id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6480340 | <filename>otp/level/DistributedInteractiveEntityAI.py
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedInteractiveEntityAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedInteractiveEntityAI')
| StarcoderdataPython |
8038791 | #!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import datetime
import inspect
import logging
import os
import random
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import test_env
test_env.setup_test_env()
from google.appengine.api import datastore_errors
from google.appengine.api import search
from google.appengine.ext import deferred
from google.appengine.ext import ndb
# From tools/third_party/
import webtest
from components import auth_testing
from components import datastore_utils
from components import stats_framework
from components import utils
from server import config
from server import stats
from server import task_pack
from server import task_request
from server import task_result
from server import task_scheduler
from server import task_to_run
from support import test_case
from server.task_result import State
# pylint: disable=W0212,W0612
def _gen_request_data(name='Request name', properties=None, **kwargs):
# Do not include optional arguments.
base_data = {
'name': name,
'user': 'Jesus',
'properties': {
'commands': [[u'command1']],
'data': [],
'dimensions': {},
'env': {},
'execution_timeout_secs': 24*60*60,
'io_timeout_secs': None,
},
'priority': 50,
'scheduling_expiration_secs': 60,
'tags': [u'tag:1'],
}
base_data.update(kwargs)
base_data['properties'].update(properties or {})
return base_data
def get_results(request_key):
"""Fetches all task results for a specified TaskRequest ndb.Key.
Returns:
tuple(TaskResultSummary, list of TaskRunResult that exist).
"""
result_summary_key = task_pack.request_key_to_result_summary_key(request_key)
result_summary = result_summary_key.get()
# There's two way to look at it, either use a DB query or fetch all the
# entities that could exist, at most 255. In general, there will be <3
# entities so just fetching them by key would be faster. This function is
# exclusively used in unit tests so it's not performance critical.
q = task_result.TaskRunResult.query(ancestor=result_summary_key)
q = q.order(task_result.TaskRunResult.key)
return result_summary, q.fetch()
def _quick_reap():
"""Reaps a task."""
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
return run_result
class TaskSchedulerApiTest(test_case.TestCase):
APP_DIR = ROOT_DIR
def setUp(self):
super(TaskSchedulerApiTest, self).setUp()
self.testbed.init_search_stub()
self.now = datetime.datetime(2014, 1, 2, 3, 4, 5, 6)
self.mock_now(self.now)
self.app = webtest.TestApp(
deferred.application,
extra_environ={
'REMOTE_ADDR': '192.168.127.12',
'SERVER_SOFTWARE': os.environ['SERVER_SOFTWARE'],
})
self.mock(stats_framework, 'add_entry', self._parse_line)
auth_testing.mock_get_current_identity(self)
def _parse_line(self, line):
# pylint: disable=W0212
actual = stats._parse_line(line, stats._Snapshot(), {}, {}, {})
self.assertIs(True, actual, line)
def test_all_apis_are_tested(self):
# Ensures there's a test for each public API.
# TODO(maruel): Remove this once coverage is asserted.
module = task_scheduler
expected = set(
i for i in dir(module)
if i[0] != '_' and hasattr(getattr(module, i), 'func_name'))
missing = expected - set(i[5:] for i in dir(self) if i.startswith('test_'))
self.assertFalse(missing)
def test_bot_reap_task(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
actual_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(request, actual_request)
self.assertEqual('localhost', run_result.bot_id)
self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_exponential_backoff(self):
self.mock(
task_scheduler.random, 'random',
lambda: task_scheduler._PROBABILITY_OF_QUICK_COMEBACK)
self.mock(utils, 'is_canary', lambda: False)
data = [
(0, 2),
(1, 2),
(2, 3),
(3, 5),
(4, 8),
(5, 11),
(6, 17),
(7, 26),
(8, 38),
(9, 58),
(10, 60),
(11, 60),
]
for value, expected in data:
actual = int(round(task_scheduler.exponential_backoff(value)))
self.assertEqual(expected, actual, (value, expected, actual))
def test_exponential_backoff_quick(self):
self.mock(
task_scheduler.random, 'random',
lambda: task_scheduler._PROBABILITY_OF_QUICK_COMEBACK - 0.01)
self.assertEqual(1.0, task_scheduler.exponential_backoff(235))
def _task_ran_successfully(self):
"""Runs a task successfully and returns the task_id."""
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
actual_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(request, actual_request)
self.assertEqual('localhost', run_result.bot_id)
self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
# It's important to terminate the task with success.
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False,
0.1))
return unicode(run_result.key_string)
def _task_deduped(
self, new_ts, deduped_from, task_id='1d8dc670a0008810', now=None):
data = _gen_request_data(
name='yay',
user='Raoul',
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
actual_request_2, run_result_2 = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(None, actual_request_2)
result_summary_duped, run_results_duped = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': now or self.now,
'costs_usd': [],
'cost_saved_usd': 0.1,
'created_ts': new_ts,
'deduped_from': deduped_from,
'durations': [0.1],
'exit_codes': [0],
'failure': False,
'id': task_id,
'internal_failure': False,
# Only this value is updated to 'now', the rest uses the previous run
# timestamps.
'modified_ts': new_ts,
'name': u'yay',
# A deduped task cannot be deduped against.
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': now or self.now,
'state': State.COMPLETED,
'try_number': 0,
'user': u'Raoul',
}
self.assertEqual(expected, result_summary_duped.to_dict())
self.assertEqual([], run_results_duped)
def test_task_idempotent(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
# First task is idempotent.
task_id = self._task_ran_successfully()
# Second task is deduped against first task.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs-1)
self._task_deduped(new_ts, task_id)
def test_task_idempotent_old(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
# First task is idempotent.
self._task_ran_successfully()
# Second task is scheduled, first task is too old to be reused.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs)
data = _gen_request_data(
name='yay',
user='Raoul',
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
# The task was enqueued for execution.
self.assertNotEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_task_idempotent_three(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
# First task is idempotent.
task_id = self._task_ran_successfully()
# Second task is deduped against first task.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs-1)
self._task_deduped(new_ts, task_id)
# Third task is scheduled, second task is not dedupable, first task is too
# old.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs)
data = _gen_request_data(
name='yay',
user='Jesus',
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
# The task was enqueued for execution.
self.assertNotEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_task_idempotent_variable(self):
# Test the edge case where GlobalConfig.reusable_task_age_secs is being
# modified. This ensure TaskResultSummary.order(TRS.key) works.
self.mock(random, 'getrandbits', lambda _: 0x88)
cfg = config.settings()
cfg.reusable_task_age_secs = 10
cfg.store()
# First task is idempotent.
self._task_ran_successfully()
# Second task is scheduled, first task is too old to be reused.
second_ts = self.mock_now(self.now, 10)
task_id = self._task_ran_successfully()
# Now any of the 2 tasks could be reused. Assert the right one (the most
# recent) is reused.
cfg = config.settings()
cfg.reusable_task_age_secs = 100
cfg.store()
# Third task is deduped against second task. That ensures ordering works
# correctly.
third_ts = self.mock_now(self.now, 20)
self._task_deduped(third_ts, task_id, '1d69ba3ea8008810', second_ts)
def test_task_parent_children(self):
# Parent task creates a child task.
parent_id = self._task_ran_successfully()
data = _gen_request_data(
parent_task_id=parent_id,
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
self.assertEqual([], result_summary.children_task_ids)
self.assertEqual(parent_id, request.parent_task_id)
parent_run_result_key = task_pack.unpack_run_result_key(parent_id)
parent_res_summary_key = task_pack.run_result_key_to_result_summary_key(
parent_run_result_key)
expected = [result_summary.key_string]
self.assertEqual(expected, parent_run_result_key.get().children_task_ids)
self.assertEqual(expected, parent_res_summary_key.get().children_task_ids)
def test_get_results(self):
# TODO(maruel): Split in more focused tests.
self.mock(random, 'getrandbits', lambda _: 0x88)
created_ts = self.now
self.mock_now(created_ts)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
# The TaskRequest was enqueued, the TaskResultSummary was created but no
# TaskRunResult exist yet since the task was not scheduled on any bot.
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': None,
'bot_version': None,
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [],
'cost_saved_usd': None,
'created_ts': created_ts,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': created_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [],
'started_ts': None,
'state': State.PENDING,
'try_number': None,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
self.assertEqual([], run_results)
# A bot reaps the TaskToRun.
reaped_ts = self.now + datetime.timedelta(seconds=60)
self.mock_now(reaped_ts)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(request, reaped_request)
self.assertTrue(run_result)
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': created_ts, # Time the TaskRequest was created.
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': reaped_ts,
'name': u'<NAME>',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.RUNNING,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
expected = [
{
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': reaped_ts,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.RUNNING,
'try_number': 1,
},
]
self.assertEqual(expected, [i.to_dict() for i in run_results])
# The bot completes the task.
done_ts = self.now + datetime.timedelta(seconds=120)
self.mock_now(done_ts)
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False,
0.1))
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', '<PASSWORD>', 0, 0, 0.2, False, False, 0.1))
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': done_ts,
'costs_usd': [0.1],
'cost_saved_usd': None,
'created_ts': created_ts,
'deduped_from': None,
'durations': [0.1, 0.2],
'exit_codes': [0, 0],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': done_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.COMPLETED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
expected = [
{
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': done_ts,
'cost_usd': 0.1,
'durations': [0.1, 0.2],
'exit_codes': [0, 0],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': done_ts,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.COMPLETED,
'try_number': 1,
},
]
self.assertEqual(expected, [t.to_dict() for t in run_results])
def test_exit_code_failure(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(request, reaped_request)
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Foo1', 0, 1, 0.1, False, False, 0.1))
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'costs_usd': [0.1],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [0.1],
'exit_codes': [1],
'failure': True,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': self.now,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.COMPLETED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
expected = [
{
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'cost_usd': 0.1,
'durations': [0.1],
'exit_codes': [1],
'failure': True,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': self.now,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.COMPLETED,
'try_number': 1,
},
]
self.assertEqual(expected, [t.to_dict() for t in run_results])
def test_schedule_request(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
# It is tested indirectly in the other functions.
request = task_request.make_request(data)
self.assertTrue(task_scheduler.schedule_request(request))
def test_bot_update_task(self):
run_result = _quick_reap()
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, 0, 0.1, False, False, 0.1))
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hey', 2, 0, 0.1, False, False,
0.1))
self.assertEqual(['hihey'], list(run_result.key.get().get_outputs()))
def test_bot_update_task_new_overwrite(self):
run_result = _quick_reap()
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, None, None, False, False,
0.1))
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hey', 1, None, None, False, False,
0.1))
self.assertEqual(['hhey'], list(run_result.key.get().get_outputs()))
def test_bot_update_exception(self):
run_result = _quick_reap()
def r(*_):
raise datastore_utils.CommitError('Sorry!')
self.mock(ndb, 'put_multi', r)
self.assertEqual(
(False, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, 0, 0.1, False, False, 0.1))
def _bot_update_timeouts(self, hard, io):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, 0, 0.1, hard, io, 0.1))
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'costs_usd': [0.1],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [0.1],
'exit_codes': [0],
'failure': True,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': self.now,
'name': u'<NAME>',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.TIMED_OUT,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'cost_usd': 0.1,
'durations': [0.1],
'exit_codes': [0],
'failure': True,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': self.now,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.TIMED_OUT,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
def test_bot_update_hard_timeout(self):
self._bot_update_timeouts(True, False)
def test_bot_update_io_timeout(self):
self._bot_update_timeouts(False, True)
def test_bot_kill_task(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(
None, task_scheduler.bot_kill_task(run_result.key, 'localhost'))
expected = {
'abandoned_ts': self.now,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': True,
'modified_ts': self.now,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.BOT_DIED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
expected = {
'abandoned_ts': self.now,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': True,
'modified_ts': self.now,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.BOT_DIED,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
def test_bot_kill_task_wrong_bot(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
expected = (
'Bot bot1 sent task kill for task 1d69b9f088008811 owned by bot '
'localhost')
self.assertEqual(
expected, task_scheduler.bot_kill_task(run_result.key, 'bot1'))
def test_cancel_task(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
ok, was_running = task_scheduler.cancel_task(result_summary.key)
self.assertEqual(True, ok)
self.assertEqual(False, was_running)
result_summary = result_summary.key.get()
self.assertEqual(task_result.State.CANCELED, result_summary.state)
def test_cancel_task_running(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
ok, was_running = task_scheduler.cancel_task(result_summary.key)
self.assertEqual(False, ok)
self.assertEqual(True, was_running)
result_summary = result_summary.key.get()
self.assertEqual(task_result.State.RUNNING, result_summary.state)
def test_cron_abort_expired_task_to_run(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
abandoned_ts = self.mock_now(self.now, data['scheduling_expiration_secs']+1)
self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run())
self.assertEqual([], task_result.TaskRunResult.query().fetch())
expected = {
'abandoned_ts': abandoned_ts,
'bot_id': None,
'bot_version': None,
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': abandoned_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [],
'started_ts': None,
'state': task_result.State.EXPIRED,
'try_number': None,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
def test_cron_abort_expired_task_to_run_retry(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
# Fake first try bot died.
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(task_result.State.BOT_DIED, run_result.key.get().state)
self.assertEqual(
task_result.State.PENDING, run_result.result_summary_key.get().state)
# BOT_DIED is kept instead of EXPIRED.
abandoned_ts = self.mock_now(self.now, data['scheduling_expiration_secs']+1)
self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run())
self.assertEqual(1, len(task_result.TaskRunResult.query().fetch()))
expected = {
'abandoned_ts': abandoned_ts,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': True,
'modified_ts': abandoned_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': task_result.State.BOT_DIED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
def test_cron_handle_bot_died(self):
# Test first retry, then success.
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
# Refresh and compare:
expected = {
'abandoned_ts': now_1,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': True,
'modified_ts': now_1,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': task_result.State.BOT_DIED,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': now_1,
'name': u'<NAME>',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': None,
'state': task_result.State.PENDING,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
# Task was retried.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost-second', 'abc')
logging.info('%s', [t.to_dict() for t in task_to_run.TaskToRun.query()])
self.assertEqual(2, run_result.try_number)
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost-second', 'Foo1', 0, 0, 0.1, False, False,
0.1))
expected = {
'abandoned_ts': None,
'bot_id': u'localhost-second',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': now_2,
'costs_usd': [0., 0.1],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [0.1],
'exit_codes': [0],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': now_2,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': now_2,
'state': task_result.State.COMPLETED,
'try_number': 2,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
self.assertEqual(0.1, run_result.key.get().cost_usd)
def test_cron_handle_bot_died_same_bot_denied(self):
# Test first retry, then success.
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
# Refresh and compare:
expected = {
'abandoned_ts': now_1,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': True,
'modified_ts': now_1,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': task_result.State.BOT_DIED,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': now_1,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': None,
'state': task_result.State.PENDING,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
# Task was retried but the same bot polls again, it's denied the task.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(None, request)
self.assertEqual(None, run_result)
logging.info('%s', [t.to_dict() for t in task_to_run.TaskToRun.query()])
def test_cron_handle_bot_died_second(self):
# Test two tries internal_failure's leading to a BOT_DIED status.
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
# It must be a different bot.
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost-second', 'abc')
now_2 = self.mock_now(self.now + 2 * task_result.BOT_PING_TOLERANCE, 3)
self.assertEqual((1, 0, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual((0, 0, 0), task_scheduler.cron_handle_bot_died())
expected = {
'abandoned_ts': now_2,
'bot_id': u'localhost-second',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0., 0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': True,
'modified_ts': now_2,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': now_1,
'state': task_result.State.BOT_DIED,
'try_number': 2,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
def test_cron_handle_bot_died_ignored_expired(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 601)
self.assertEqual((1, 0, 0), task_scheduler.cron_handle_bot_died())
def test_search_by_name(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
# Assert that search is not case-sensitive by using unexpected casing.
actual, _cursor = task_result.search_by_name('requEST', None, 10)
self.assertEqual([result_summary], actual)
actual, _cursor = task_result.search_by_name('name', None, 10)
self.assertEqual([result_summary], actual)
def test_search_by_name_failures(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
actual, _cursor = task_result.search_by_name('foo', None, 10)
self.assertEqual([], actual)
# Partial match doesn't work.
actual, _cursor = task_result.search_by_name('nam', None, 10)
self.assertEqual([], actual)
def test_search_by_name_broken_tasks(self):
# Create tasks where task_scheduler.schedule_request() fails in the middle.
# This is done by mocking the functions to fail every SKIP call and running
# it in a loop.
class RandomFailure(Exception):
pass
# First call fails ndb.put_multi(), second call fails search.Index.put(),
# third call work.
index = [0]
SKIP = 3
def put_multi(*args, **kwargs):
callers = [i[3] for i in inspect.stack()]
self.assertTrue(
'make_request' in callers or 'schedule_request' in callers, callers)
if (index[0] % SKIP) == 1:
raise RandomFailure()
return old_put_multi(*args, **kwargs)
def put_async(*args, **kwargs):
callers = [i[3] for i in inspect.stack()]
self.assertIn('schedule_request', callers)
out = ndb.Future()
if (index[0] % SKIP) == 2:
out.set_exception(search.Error())
else:
out.set_result(old_put_async(*args, **kwargs).get_result())
return out
old_put_multi = self.mock(ndb, 'put_multi', put_multi)
old_put_async = self.mock(search.Index, 'put_async', put_async)
saved = []
for i in xrange(100):
index[0] = i
data = _gen_request_data(
name='Request %d' % i,
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
try:
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
saved.append(result_summary)
except RandomFailure:
pass
self.assertEqual(67, len(saved))
self.assertEqual(67, task_request.TaskRequest.query().count())
self.assertEqual(67, task_result.TaskResultSummary.query().count())
# Now the DB is full of half-corrupted entities.
cursor = None
actual, cursor = task_result.search_by_name('Request', cursor, 31)
self.assertEqual(31, len(actual))
actual, cursor = task_result.search_by_name('Request', cursor, 31)
self.assertEqual(3, len(actual))
actual, cursor = task_result.search_by_name('Request', cursor, 31)
self.assertEqual(0, len(actual))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
unittest.main()
| StarcoderdataPython |
5113565 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import unittest
from stormshield.sns.sslclient import SSLClient
APPLIANCE = os.getenv('APPLIANCE', "")
PASSWORD = os.getenv('PASSWORD', "")
@unittest.skipIf(APPLIANCE=="", "APPLIANCE env var must be set to the ip/hostname of a running SNS appliance")
@unittest.skipIf(PASSWORD=="", "PASSWORD env var must be set to the firewall password")
class TestUtf8(unittest.TestCase):
""" Test INI format """
def setUp(self):
self.client = SSLClient(host=APPLIANCE, user='admin', password=PASSWORD, sslverifyhost=False)
self.client.send_command('CONFIG OBJECT HOST NEW type=host name=hostutf8 ip=1.2.3.4 comment="comment with utf8 characters éè\u2713"')
self.maxDiff = 5000
def tearDown(self):
self.client.send_command('CONFIG OBJECT HOST delete name=hostutf8')
self.client.disconnect()
def test_utf8(self):
""" send and receive utf-8 content """
expected = """101 code=00a01000 msg="Begin" format="section_line"
[Object]
type=host global=0 name=hostutf8 ip=1.2.3.4 modify=1 comment="comment with utf8 characters éè\u2713" type=host
100 code=00a00100 msg="Ok\""""
response = self.client.send_command('CONFIG OBJECT LIST type=host search=hostutf8 start=0')
self.assertEqual(response.output, expected)
self.assertEqual(response.ret, 100)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8049103 | #!/usr/bin/env python
import time
import math
from common import Common
from metnum.helpers.run_function import run_function
class LinearInterpolation(Common):
def __init__(self, argv):
if len(argv) < 2:
raise Exception('Must have 2 points')
self.x_low, self.y_low = [float(x) for x in argv[0].split(',')]
self.x_high, self.y_high = [float(x) for x in argv[1].split(',')]
@staticmethod
def help():
return """x_low,x_high x_high,y_high
Arguments:
x_low,y_low : Low point
x_high,y_high : High point
"""
def execute(self):
a_1 = (self.y_high - self.y_low)/(self.x_high - self.x_low)
a_0 = (self.x_high*self.y_low - self.x_low*self.y_high)/(self.x_high - self.x_low)
return str(a_0) + ' + ' + str(a_1) + '*x'
def print_performance(self):
print 'No performance can be calculated'
class LagrangeInterpolation(Common):
def __init__(self, argv):
if len(argv) < 2:
raise Exception('Minimum 2 points')
self.data_x = []
self.data_y = []
self.total_point = len(argv)
for el in argv:
x, y = (float(iel) for iel in el.split(','))
self.data_x.append(x)
self.data_y.append(y)
@staticmethod
def help():
return """x_0,y_0 x_1,y_1 [x_2,y_2, [x_3, y_3], ...]
Arguments:
x_i,y_i : Point(s) at i for 0 < i <= n
"""
def execute(self):
retval = []
for index in range(0, self.total_point):
retval.append('(' + str(self.__calculate_y_per_x(index)) + '*' + self.__get_x(index) + ')')
return ' + '.join(retval)
def __calculate_y_per_x(self, index):
retval = self.data_y[index]
for index_j in range(0, self.total_point):
if index_j != index:
retval /= self.data_x[index] - self.data_x[index_j]
return retval
def __get_x(self, index):
retval = []
for index_j in range(0, self.total_point):
if index_j != index:
if self.data_x[index_j] < 0:
retval.append('(x + ' + str(abs(self.data_x[index_j])) + ')')
else:
retval.append('(x - ' + str(self.data_x[index_j]) + ')')
return '*'.join(retval)
def print_performance(self):
print 'No performance can be calculated'
class NewtonInterpolation(Common):
def __init__(self, argv):
if len(argv) < 2:
raise Exception('Minimum 2 points')
self.data_x = []
self.data_y = []
self.data_st = []
self.total_point = len(argv)
for el in argv:
x, y = (float(iel) for iel in el.split(','))
self.data_x.append(x)
self.data_y.append(y)
@staticmethod
def help():
return """x_0,y_0 x_1,y_1 [x_2,y_2, [x_3, y_3], ...]
Arguments:
x_i,y_i : Point(s) at i for 0 < i <= n
"""
def execute(self):
retval = []
for index in range(0, self.total_point):
retval.append('(' + str(self.__calculate_y(index)) + self.__get_x(index) + ')')
return ' + '.join(retval)
def __calculate_y(self, index):
retval = []
for index_j in range(0, self.total_point - index):
if index == 0:
retval.append(self.data_y[index_j])
else:
retval.append((self.data_st[index - 1][index_j + 1] - self.data_st[index - 1][index_j])/(self.data_x[index_j + index] - self.data_x[index_j]))
self.data_st.append(retval)
return self.data_st[index][0]
def __get_x(self, index):
retval = []
for index_j in range(0, index):
if self.data_x[index_j] < 0:
retval.append('(x + ' + str(abs(self.data_x[index_j])) + ')')
else:
retval.append('(x - ' + str(self.data_x[index_j]) + ')')
if len(retval) > 0:
return '*' + '*'.join(retval)
return ''
def print_performance(self):
print 'No performance can be calculated'
class Calculate(Common):
def __init__(self, argv):
self.val_x = float(argv[0])
self.interpolation = None
@staticmethod
def help():
return """x_low,x_high x_high,y_high
Arguments:
x_low,y_low : Low point
x_high,y_high : High point
"""
def execute(self):
function = self.interpolation.execute()
print 'Generated function: y = ' + function
return run_function(function, self.val_x)
def print_performance(self):
self.interpolation.print_performance()
class CalculateLinear(Calculate):
def __init__(self, argv):
if len(argv) < 3:
raise Exception('Must have 2 points')
super(CalculateLinear, self).__init__(argv)
self.interpolation = LinearInterpolation(argv[1:])
@staticmethod
def help():
return 'x_val ' + LinearInterpolation.help() + """ x_val : X value
"""
class CalculateLagrange(Calculate):
def __init__(self, argv):
if len(argv) < 3:
raise Exception('Minimum have 2 points')
super(CalculateLagrange, self).__init__(argv)
self.interpolation = LagrangeInterpolation(argv[1:])
@staticmethod
def help():
return 'x_val ' + LagrangeInterpolation.help() + """ x_val : X value
"""
class CalculateNewton(Calculate):
def __init__(self, argv):
if len(argv) < 3:
raise Exception('Minimum have 2 points')
super(CalculateNewton, self).__init__(argv)
self.interpolation = NewtonInterpolation(argv[1:])
@staticmethod
def help():
return 'x_val ' + NewtonInterpolation.help() + """ x_val : X value
"""
| StarcoderdataPython |
12841636 | <gh_stars>0
from enum import Enum
import os
import sys
import inspect
import logging
import datetime
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from Utils.Utils import Actions
class Trade():
def __init__(self, date_string, action, quantity, symbol, price, fee, sdr):
try:
self.date = datetime.datetime.strptime(date_string, '%d/%m/%Y')
if not isinstance(action, Actions):
raise ValueError("Invalid action")
self.action = action
self.quantity = quantity
self.symbol = symbol
self.price = price
self.fee = fee
self.sdr = sdr
self.total = self.__compute_total()
except Exception as e:
logging.error(e)
raise ValueError("Invalid argument")
def to_dict(self):
return {
'date': self.date.strftime('%d/%m/%Y'),
'action': self.action.name,
'quantity': self.quantity,
'symbol': self.symbol,
'price': self.price,
'fee': self.fee,
'stamp_duty': self.sdr
}
@staticmethod
def from_dict(item):
if any(['date' not in item, 'action' not in item, 'quantity' not in item, 'symbol' not in item, 'price' not in item, 'fee' not in item, 'stamp_duty' not in item]):
raise ValueError('item not well formatted')
return Trade(item['date'], Actions[item['action']], item['quantity'],
item['symbol'], float(item['price']), float(item['fee']), float(item['stamp_duty']))
def __compute_total(self):
if self.action in (Actions.DEPOSIT, Actions.WITHDRAW, Actions.DIVIDEND):
return self.quantity
elif self.action == Actions.BUY:
cost = (self.price / 100) * self.quantity
return cost + self.fee + ((cost * self.sdr) / 100)
elif self.action == Actions.SELL:
cost = (self.price / 100) * self.quantity
total = cost + self.fee + ((cost * self.sdr) / 100)
return total * -1
return 0
| StarcoderdataPython |
4904944 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer.base_layer import TrainableLayer
from niftynet.utilities.util_common import look_up_operations
def prelu(f_in, channelwise_params):
pos = tf.nn.relu(f_in)
neg = channelwise_params * (f_in - tf.abs(f_in)) * 0.5
return pos + neg
def selu(x, name):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
def leaky_relu(x, name):
half_alpha = 0.01
return (0.5 + half_alpha) * x + (0.5 - half_alpha) * abs(x)
SUPPORTED_OP = {'relu': tf.nn.relu,
'relu6': tf.nn.relu6,
'elu': tf.nn.elu,
'softplus': tf.nn.softplus,
'softsign': tf.nn.softsign,
'sigmoid': tf.nn.sigmoid,
'tanh': tf.nn.tanh,
'prelu': prelu,
'selu': selu,
'leakyrelu': leaky_relu,
'dropout': tf.nn.dropout}
class ActiLayer(TrainableLayer):
"""
Apply an element-wise non-linear activation function.
'Prelu' uses trainable parameters and those are initialised to zeros
Dropout function is also supported
"""
def __init__(self, func, regularizer=None, name='activation'):
self.func = func.lower()
self.layer_name = '{}_{}'.format(self.func, name)
super(ActiLayer, self).__init__(name=self.layer_name)
# these are used for prelu variables
self.initializers = {'alpha': tf.constant_initializer(0.0)}
self.regularizers = {'alpha': regularizer}
def layer_op(self, input_tensor, keep_prob=None):
func_ = look_up_operations(self.func, SUPPORTED_OP)
if self.func == 'prelu':
alphas = tf.get_variable(
'alpha', input_tensor.shape[-1],
initializer=self.initializers['alpha'],
regularizer=self.regularizers['alpha'])
output_tensor = func_(input_tensor, alphas)
elif self.func == 'dropout':
assert keep_prob > 0.0
assert keep_prob <= 1.0
output_tensor = func_(input_tensor,
keep_prob=keep_prob,
name='dropout')
else:
output_tensor = func_(input_tensor, name='acti')
return output_tensor
| StarcoderdataPython |
296292 | <filename>21 - ML/DataScience1/script.py
# Use Google Colab
import seaborn as sns
from bokeh.models import HoverTool
from bokeh.plotting import figure, show
import pandas as pd
data_frame = pd.read_csv('data.csv')
data_frame.shape
data_frame.describe()
data_frame.values
df1 = pd.DataFrame(data_frame, columns=['Name', 'Wage', 'Value'])
def value_to_float(x):
if type(x) == float or type(x) == int:
return x
if 'K' in x:
if len(x) > 1:
return float(x.replace('K', '')) * 1000
return 1000.0
if 'M' in x:
if len(x) > 1:
return float(x.replace('M', '')) * 1000000
return 1000000.0
if 'B' in x:
return float(x.replace('B', '')) * 1000000000
return 0.0
wage = df1['Wage'].replace('[\€,]', '', regex=True).apply(value_to_float)
value = df1['Value'].replace('[\€,]', '', regex=True).apply(value_to_float)
df1['Wage'] = wage
df1['Value'] = value
df1['difference'] = df1['Value']-df1['Wage']
df1.sort_values('difference', ascending=False)
sns.set()
graph = sns.scatterplot(x='Wage', y='Value', data=df1)
graph
Tooltips = HoverTool(tooltips=[("index", "$index"),
("(Wage,Value)", "(@Wage, @Value)"),
("Name", "@Name"), ])
p = figure(title="Soccer 2019", x_axis_label='Wage', y_axis_label='Value',
plot_width=700, plot_height=700, tools=[Tooltips])
p.circle('Wage', 'Value', size=10, source=df1)
show(p)
| StarcoderdataPython |
1819139 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: i13_speckle_loader
:platform: Unix
:synopsis: A class for loading I13's speckle tracking data
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from savu.plugins.loaders.base_loader import BaseLoader
from savu.plugins.utils import register_plugin
import h5py
@register_plugin
class I13SpeckleLoader(BaseLoader):
"""
A class to load tomography data from an NXstxm file
:param signal_key: Path to the signals.Default:'/entry/sample'.
:param reference_key: Path to the reference.Default:'/entry/reference'.
:param angle_key: Path to the reference.Default:'/entry/theta'.
:param dataset_names: the output sets.Default: ['signal','reference'].
"""
def __init__(self, name='I13SpeckleLoader'):
super(I13SpeckleLoader, self).__init__(name)
def setup(self):
"""
"""
name_signal, name_reference = self.parameters['dataset_names']
exp = self.exp
signal = exp.create_data_object("in_data", name_signal)
signal.backing_file = h5py.File(exp.meta_data.get("data_file"), 'r')
reference = exp.create_data_object("in_data", name_reference)
reference.backing_file = h5py.File(exp.meta_data.get("data_file"), 'r')
signal.data = signal.backing_file[self.parameters['signal_key']]
signal.set_shape(signal.data.shape)
reference.data = signal.backing_file[self.parameters['reference_key']]
reference.set_shape(signal.data.shape)
signal.set_axis_labels('rotation_angle.degrees',
'idx.units',
'detector_y.pixel',
'detector_x.pixel')
reference.set_axis_labels('rotation_angle.degrees',
'idx.units',
'detector_y.pixel',
'detector_x.pixel')
theta = signal.backing_file[self.parameters['angle_key']][0]
signal.meta_data.set('rotation_angle', theta)
## hard code this stuff for now since it's probably going to change anyway
signal.add_pattern("PROJECTION", core_dims=(-2,-1), slice_dims=(0,1))
signal.add_pattern("SINOGRAM", core_dims=(0,-2), slice_dims=(1,-1))
signal.add_pattern("4D_SCAN", core_dims=(1,-2,-1),
slice_dims=(0,))
reference.add_pattern("PROJECTION", core_dims=(-2,-1), slice_dims=(0,1))
reference.add_pattern("SINOGRAM", core_dims=(0,-1), slice_dims=(1,-1))
reference.add_pattern("4D_SCAN", core_dims=(1,-2,-1),
slice_dims=(0,))
self.set_data_reduction_params(signal)
self.set_data_reduction_params(reference)
| StarcoderdataPython |
262375 | #
# LOFAR Transients Key Project
"""
General purpose astronomical coordinate handling routines.
"""
import sys
import math
from astropy import wcs as pywcs
import logging
import datetime
import pytz
from casacore.measures import measures
from casacore.quanta import quantity
logger = logging.getLogger(__name__)
# Note that we take a +ve longitude as WEST.
CORE_LAT = 52.9088
CORE_LON = -6.8689
# ITRF position of CS002
# Should be a good approximation for anything refering to the LOFAR core.
ITRF_X = 3826577.066110000
ITRF_Y = 461022.947639000
ITRF_Z = 5064892.786
# Useful constants
SECONDS_IN_HOUR = 60**2
SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
def julian_date(time=None, modified=False):
"""
Calculate the Julian date at a given timestamp.
Args:
time (datetime.datetime): Timestamp to calculate JD for.
modified (bool): If True, return the Modified Julian Date:
the number of days (including fractions) which have elapsed between
the start of 17 November 1858 AD and the specified time.
Returns:
float: Julian date value.
"""
if not time:
time = datetime.datetime.now(pytz.utc)
mjdstart = datetime.datetime(1858, 11, 17, tzinfo=pytz.utc)
mjd = time - mjdstart
mjd_daynumber = (mjd.days + mjd.seconds / (24. * 60**2) +
mjd.microseconds / (24. * 60**2 * 1000**2))
if modified:
return mjd_daynumber
return 2400000.5 + mjd_daynumber
def mjd2datetime(mjd):
"""
Convert a Modified Julian Date to datetime via 'unix time' representation.
NB 'unix time' is defined by the casacore/casacore package.
"""
q = quantity("%sd" % mjd)
return datetime.datetime.fromtimestamp(q.to_unix_time())
def mjd2lst(mjd, position=None):
"""
Converts a Modified Julian Date into Local Apparent Sidereal Time in
seconds at a given position. If position is None, we default to the
reference position of CS002.
mjd -- Modified Julian Date (float, in days)
position -- Position (casacore measure)
"""
dm = measures()
position = position or dm.position(
"ITRF", "%fm" % ITRF_X, "%fm" % ITRF_Y, "%fm" % ITRF_Z
)
dm.do_frame(position)
last = dm.measure(dm.epoch("UTC", "%fd" % mjd), "LAST")
fractional_day = last['m0']['value'] % 1
return fractional_day * 24 * SECONDS_IN_HOUR
def mjds2lst(mjds, position=None):
"""
As mjd2lst(), but takes an argument in seconds rather than days.
Args:
mjds (float):Modified Julian Date (in seconds)
position (casacore measure): Position for LST calcs
"""
return mjd2lst(mjds/SECONDS_IN_DAY, position)
def jd2lst(jd, position=None):
"""
Converts a Julian Date into Local Apparent Sidereal Time in seconds at a
given position. If position is None, we default to the reference position
of CS002.
Args:
jd (float): Julian Date
position (casacore measure): Position for LST calcs.
"""
return mjd2lst(jd - 2400000.5, position)
# NB: datetime is not sensitive to leap seconds.
# However, leap seconds were first introduced in 1972.
# So there are no leap seconds between the start of the
# Modified Julian epoch and the start of the Unix epoch,
# so this calculation is safe.
#julian_epoch = datetime.datetime(1858, 11, 17)
#unix_epoch = datetime.datetime(1970, 1, 1, 0, 0)
#delta = unix_epoch - julian_epoch
#deltaseconds = delta.total_seconds()
#unix_epoch = 3506716800
# The above is equivalent to this:
unix_epoch = quantity("1970-01-01T00:00:00").get_value('s')
def julian2unix(timestamp):
"""
Convert a modifed julian timestamp (number of seconds since 17 November
1858) to Unix timestamp (number of seconds since 1 January 1970).
Args:
timestamp (numbers.Number): Number of seconds since the Unix epoch.
Returns:
numbers.Number: Number of seconds since the modified Julian epoch.
"""
return timestamp - unix_epoch
def unix2julian(timestamp):
"""
Convert a Unix timestamp (number of seconds since 1 January 1970) to a
modified Julian timestamp (number of seconds since 17 November 1858).
Args:
timestamp (numbers.Number): Number of seconds since the modified
Julian epoch.
Returns:
numbers.Number: Number of seconds since the Unix epoch.
"""
return timestamp + unix_epoch
def sec2deg(seconds):
"""Seconds of time to degrees of arc"""
return 15.0 * seconds / 3600.0
def sec2days(seconds):
"""Seconds to number of days"""
return seconds / (24.0 * 3600)
def sec2hms(seconds):
"""Seconds to hours, minutes, seconds"""
hours, seconds = divmod(seconds, 60**2)
minutes, seconds = divmod(seconds, 60)
return (int(hours), int(minutes), seconds)
def altaz(mjds, ra, dec, lat=CORE_LAT):
"""Calculates the azimuth and elevation of source from time and position
on sky. Takes MJD in seconds and ra, dec in degrees. Returns (alt, az) in
degrees."""
# compute hour angle in degrees
ha = mjds2lst(mjds) - ra
if (ha < 0):
ha = ha + 360
# convert degrees to radians
ha, dec, lat = [math.radians(value) for value in (ha, dec, lat)]
# compute altitude in radians
sin_alt = (math.sin(dec) * math.sin(lat) +
math.cos(dec) * math.cos(lat) * math.cos(ha))
alt = math.asin(sin_alt)
# compute azimuth in radians
# divide by zero error at poles or if alt = 90 deg
cos_az = ((math.sin(dec) - math.sin(alt) * math.sin(lat)) /
(math.cos(alt) * math.cos(lat)))
az = math.acos(cos_az)
# convert radians to degrees
hrz_altitude, hrz_azimuth = [math.degrees(value) for value in (alt, az)]
# choose hemisphere
if (math.sin(ha) > 0):
hrz_azimuth = 360 - hrz_azimuth
return hrz_altitude, hrz_azimuth
def ratohms(radegs):
"""Convert RA in decimal degrees format to hours, minutes,
seconds format.
Keyword arguments:
radegs -- RA in degrees format
Return value:
ra -- tuple of 3 values, [hours,minutes,seconds]
"""
radegs %= 360
raseconds = radegs * 3600 / 15.0
return sec2hms(raseconds)
def dectodms(decdegs):
"""Convert Declination in decimal degrees format to hours, minutes,
seconds format.
Keyword arguments:
decdegs -- Dec. in degrees format
Return value:
dec -- list of 3 values, [degrees,minutes,seconds]
"""
sign = -1 if decdegs < 0 else 1
decdegs = abs(decdegs)
if decdegs > 90:
raise ValueError("coordinate out of range")
decd = int(decdegs)
decm = int((decdegs - decd) * 60)
decs = (((decdegs - decd) * 60) - decm) * 60
# Necessary because of potential roundoff errors
if decs - 60 > -1e-7:
decm += 1
decs = 0
if decm == 60:
decd += 1
decm = 0
if decd > 90:
raise ValueError("coordinate out of range")
if sign == -1:
if decd == 0:
if decm == 0:
decs = -decs
else:
decm = -decm
else:
decd = -decd
return (decd, decm, decs)
def propagate_sign(val1, val2, val3):
"""
casacore (reasonably enough) demands that a minus sign (if required)
comes at the start of the quantity. Thus "-0D30M" rather than "0D-30M".
Python regards "-0" as equal to "0"; we need to split off a separate sign
field.
If more than one of our inputs is negative, it's not clear what the user
meant: we raise.
Args:
val1(float): (,val2,val3) input values (hour/min/sec or deg/min/sec)
Returns:
tuple: "+" or "-" string denoting sign,
val1, val2, val3 (numeric) denoting absolute values of inputs.
"""
signs = [x<0 for x in (val1, val2, val3)]
if signs.count(True) == 0:
sign = "+"
elif signs.count(True) == 1:
sign, val1, val2, val3 = "-", abs(val1), abs(val2), abs(val3)
else:
raise ValueError("Too many negative coordinates")
return sign, val1, val2, val3
def hmstora(rah, ram, ras):
"""Convert RA in hours, minutes, seconds format to decimal
degrees format.
Keyword arguments:
rah,ram,ras -- RA values (h,m,s)
Return value:
radegs -- RA in decimal degrees
"""
sign, rah, ram, ras = propagate_sign(rah, ram, ras)
ra = quantity("%s%dH%dM%f" % (sign, rah, ram, ras)).get_value()
if abs(ra) >= 360:
raise ValueError("coordinates out of range")
return ra
def dmstodec(decd, decm, decs):
"""Convert Dec in degrees, minutes, seconds format to decimal
degrees format.
Keyword arguments:
decd, decm, decs -- list of Dec values (d,m,s)
Return value:
decdegs -- Dec in decimal degrees
"""
sign, decd, decm, decs = propagate_sign(decd, decm, decs)
dec = quantity("%s%dD%dM%f" % (sign, decd, decm, decs)).get_value()
if abs(dec) > 90:
raise ValueError("coordinates out of range")
return dec
def angsep(ra1, dec1, ra2, dec2):
"""Find the angular separation of two sources, in arcseconds,
using the proper spherical trig formula
Keyword arguments:
ra1,dec1 - RA and Dec of the first source, in decimal degrees
ra2,dec2 - RA and Dec of the second source, in decimal degrees
Return value:
angsep - Angular separation, in arcseconds
"""
b = (math.pi / 2) - math.radians(dec1)
c = (math.pi / 2) - math.radians(dec2)
temp = (math.cos(b) * math.cos(c)) + (math.sin(b) * math.sin(c) * math.cos(math.radians(ra1 - ra2)))
# Truncate the value of temp at +- 1: it makes no sense to do math.acos()
# of a value outside this range, but occasionally we might get one due to
# rounding errors.
if abs(temp) > 1.0:
temp = 1.0 * cmp(temp, 0)
return 3600 * math.degrees(math.acos(temp))
def alphasep(ra1, ra2, dec1, dec2):
"""Find the angular separation of two sources in RA, in arcseconds
Keyword arguments:
ra1,dec1 - RA and Dec of the first source, in decimal degrees
ra2,dec2 - RA and Dec of the second source, in decimal degrees
Return value:
angsep - Angular separation, in arcseconds
"""
return 3600 * (ra1 - ra2) * math.cos(math.radians((dec1 + dec2) / 2.0))
def deltasep(dec1, dec2):
"""Find the angular separation of two sources in Dec, in arcseconds
Keyword arguments:
dec1 - Dec of the first source, in decimal degrees
dec2 - Dec of the second source, in decimal degrees
Return value:
angsep - Angular separation, in arcseconds
"""
return 3600 * (dec1 - dec2)
# Find angular separation in Dec of 2 positions, in arcseconds
def alpha(l, m, alpha0, delta0):
"""Convert a coordinate in l,m into an coordinate in RA
Keyword arguments:
l,m -- direction cosines, given by (offset in cells) x cellsi (radians)
alpha_0, delta_0 -- centre of the field
Return value:
alpha -- RA in decimal degrees
"""
return (alpha0 + (math.degrees(math.atan2(l, (
(math.sqrt(1 - (l*l) - (m*m)) * math.cos(math.radians(delta0))) -
(m * math.sin(math.radians(delta0))))))))
def alpha_inflate(theta, decl):
"""Compute the ra expansion for a given theta at a given declination
Keyword arguments:
theta, decl are both in decimal degrees.
Return value:
alpha -- RA inflation in decimal degrees
For a derivation, see MSR TR 2006 52, Section 2.1
http://research.microsoft.com/apps/pubs/default.aspx?id=64524
"""
if abs(decl) + theta > 89.9:
return 180.0
else:
return math.degrees(abs(math.atan(math.sin(math.radians(theta)) / math.sqrt(abs(math.cos(math.radians(decl - theta)) * math.cos(math.radians(decl + theta)))))))
# Find the RA of a point in a radio image, given l,m and field centre
def delta(l, m, delta0):
"""Convert a coordinate in l, m into an coordinate in Dec
Keyword arguments:
l, m -- direction cosines, given by (offset in cells) x cellsi (radians)
alpha_0, delta_0 -- centre of the field
Return value:
delta -- Dec in decimal degrees
"""
return math.degrees(math.asin(m * math.cos(math.radians(delta0)) +
(math.sqrt(1 - (l*l) - (m*m)) *
math.sin(math.radians(delta0)))))
def l(ra, dec, cra, incr):
"""Convert a coordinate in RA,Dec into a direction cosine l
Keyword arguments:
ra,dec -- Source location
cra -- RA centre of the field
incr -- number of degrees per pixel (negative in the case of RA)
Return value:
l -- Direction cosine
"""
return ((math.cos(math.radians(dec)) * math.sin(math.radians(ra - cra))) /
(math.radians(incr)))
def m(ra, dec, cra, cdec, incr):
"""Convert a coordinate in RA,Dec into a direction cosine m
Keyword arguments:
ra,dec -- Source location
cra,cdec -- centre of the field
incr -- number of degrees per pixel
Return value:
m -- direction cosine
"""
return ((math.sin(math.radians(dec)) * math.cos(math.radians(cdec))) -
(math.cos(math.radians(dec)) * math.sin(math.radians(cdec)) *
math.cos(math.radians(ra-cra)))) / math.radians(incr)
def lm_to_radec(ra0, dec0, l, m):
"""
Find the l direction cosine in a radio image, given an RA and Dec and the
field centre
"""
# This function should be the inverse of radec_to_lmn, but it is
# not. There is likely an error here.
sind0 = math.sin(dec0)
cosd0 = math.cos(dec0)
dl = l
dm = m
d0 = dm * dm * sind0 * sind0 + dl * dl - 2 * dm * cosd0 * sind0
sind = math.sqrt(abs(sind0 * sind0 - d0))
cosd = math.sqrt(abs(cosd0 * cosd0 + d0))
if (sind0 > 0):
sind = abs(sind)
else:
sind = -abs(sind)
dec = math.atan2(sind, cosd)
if l != 0:
ra = math.atan2(-dl, (cosd0 - dm * sind0)) + ra0
else:
ra = math.atan2((1e-10), (cosd0 - dm * sind0)) + ra0
# Calculate RA,Dec from l,m and phase center. Note: As done in
# Meqtrees, which seems to differ from l, m functions above. Meqtrees
# equation may have problems, judging from my difficulty fitting a
# fringe to L4086 data. Pandey's equation is now used in radec_to_lmn
return (ra, dec)
def radec_to_lmn(ra0, dec0, ra, dec):
l = math.cos(dec) * math.sin(ra - ra0)
sind0 = math.sin(dec0)
if sind0 != 0:
# from pandey; gives same results for casa and cyga
m = (math.sin(dec) * math.cos(dec0) -
math.cos(dec) * math.sin(dec0) * math.cos(ra - ra0))
else:
m = 0
n = math.sqrt(1 - l**2 - m**2)
return (l, m, n)
def eq_to_gal(ra, dec):
"""Find the Galactic co-ordinates of a source given the equatorial
co-ordinates
Keyword arguments:
(alpha,delta) -- RA, Dec in decimal degrees
Return value:
(l,b) -- Galactic longitude and latitude, in decimal degrees
"""
dm = measures()
result = dm.measure(
dm.direction("J200", "%fdeg" % ra, "%fdeg" % dec),
"GALACTIC"
)
lon_l = math.degrees(result['m0']['value']) % 360 # 0 < ra < 360
lat_b = math.degrees(result['m1']['value'])
return lon_l, lat_b
def gal_to_eq(lon_l, lat_b):
"""Find the Galactic co-ordinates of a source given the equatorial
co-ordinates
Keyword arguments:
(l, b) -- Galactic longitude and latitude, in decimal degrees
Return value:
(alpha, delta) -- RA, Dec in decimal degrees
"""
dm = measures()
result = dm.measure(
dm.direction("GALACTIC", "%fdeg" % lon_l, "%fdeg" % lat_b),
"J2000"
)
ra = math.degrees(result['m0']['value']) % 360 # 0 < ra < 360
dec = math.degrees(result['m1']['value'])
return ra, dec
def eq_to_cart(ra, dec):
"""Find the cartesian co-ordinates on the unit sphere given the eq. co-ords.
ra, dec should be in degrees.
"""
return (math.cos(math.radians(dec)) * math.cos(math.radians(ra)), # Cartesian x
math.cos(math.radians(dec)) * math.sin(math.radians(ra)), # Cartesian y
math.sin(math.radians(dec))) # Cartesian z
class CoordSystem(object):
"""A container for constant strings representing different coordinate
systems."""
FK4 = "B1950 (FK4)"
FK5 = "J2000 (FK5)"
def coordsystem(name):
"""Given a string, return a constant from class CoordSystem."""
mappings = {
'j2000': CoordSystem.FK5,
'fk5': CoordSystem.FK5,
CoordSystem.FK5.lower(): CoordSystem.FK5,
'b1950': CoordSystem.FK4,
'fk4': CoordSystem.FK4,
CoordSystem.FK4.lower(): CoordSystem.FK4
}
return mappings[name.lower()]
def convert_coordsystem(ra, dec, insys, outsys):
"""
Convert RA & dec (given in decimal degrees) between equinoxes.
"""
dm = measures()
if insys == CoordSystem.FK4:
insys = "B1950"
elif insys == CoordSystem.FK5:
insys = "J2000"
else:
raise Exception("Unknown Coordinate System")
if outsys == CoordSystem.FK4:
outsys = "B1950"
elif outsys == CoordSystem.FK5:
outsys = "J2000"
else:
raise Exception("Unknown Coordinate System")
result = dm.measure(
dm.direction(insys, "%fdeg" % ra, "%fdeg" % dec),
outsys
)
ra = math.degrees(result['m0']['value']) % 360 # 0 < ra < 360
dec = math.degrees(result['m1']['value'])
return ra, dec
class WCS(object):
"""
Wrapper around pywcs.WCS.
This is primarily to preserve API compatibility with the earlier,
home-brewed python-wcslib wrapper. It includes:
* A fix for the reference pixel lying at the zenith;
* Raises ValueError if coordinates are invalid.
"""
# ORIGIN is the upper-left corner of the image. pywcs supports both 0
# (NumPy, C-style) or 1 (FITS, Fortran-style). The TraP uses 1.
ORIGIN = 1
# We can set these attributes on the pywcs.WCS().wcs object to configure
# the coordinate system.
WCS_ATTRS = ("crpix", "cdelt", "crval", "ctype", "cunit", "crota")
def __init__(self):
# Currently, we only support two dimensional images.
self.wcs = pywcs.WCS(naxis=2)
def __setattr__(self, attrname, value):
if attrname in self.WCS_ATTRS:
# Account for arbitrary coordinate rotations in images pointing at
# the North Celestial Pole. We set the reference direction to
# infintesimally less than 90 degrees to avoid any ambiguity. See
# discussion at #4599.
if attrname == "crval" and (value[1] == 90 or value[1] == math.pi/2):
value = (value[0], value[1] * (1 - sys.float_info.epsilon))
self.wcs.wcs.__setattr__(attrname, value)
else:
super(WCS, self).__setattr__(attrname, value)
def __getattr__(self, attrname):
if attrname in self.WCS_ATTRS:
return getattr(self.wcs.wcs, attrname)
else:
super(WCS, self).__getattr__(attrname)
def p2s(self, pixpos):
"""
Pixel to Spatial coordinate conversion.
Args:
pixpos (list): [x, y] pixel position
Returns:
tuple: ra (float) Right ascension corresponding to position [x, y]
dec (float) Declination corresponding to position [x, y]
"""
ra, dec = self.wcs.wcs_pix2world(pixpos[0], pixpos[1], self.ORIGIN)
if math.isnan(ra) or math.isnan(dec):
raise RuntimeError("Spatial position is not a number")
return float(ra), float(dec)
def s2p(self, spatialpos):
"""
Spatial to Pixel coordinate conversion.
Args:
pixpos (list): [ra, dec] spatial position
Returns:
tuple: X pixel value corresponding to position [ra, dec],
Y pixel value corresponding to position [ra, dec]
"""
x, y = self.wcs.wcs_world2pix(spatialpos[0], spatialpos[1], self.ORIGIN)
if math.isnan(x) or math.isnan(y):
raise RuntimeError("Pixel position is not a number")
return float(x), float(y)
| StarcoderdataPython |
12845915 | <reponame>jml/pyhazard
# Copyright (c) 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple example client
"""
import random
from pyrsistent import pmap
# TODO: Export from public names.
from hazard._client import (
get_game_info,
get_round_info,
join_game,
play_turn,
register_game,
register_user,
)
from hazard._rules import iter_valid_plays
from hazard._client import _make_credentials
"""
The server running Hazard. https://haverer.jml.io/ in production.
"""
BASE_URL = 'http://localhost:3000'
# TODO: These endpoints ought to be in the library, rather than something that
# users need to know.
USERS_ENDPOINT = BASE_URL + '/users'
GAMES_ENDPOINT = BASE_URL + '/games'
def get_game_endpoint(game):
# TODO: This also should be in the library.
return BASE_URL + game['url']
def get_round_endpoint(round_url):
# TODO: This also should be in the library.
return BASE_URL + round_url
def player_info(round_info, player_id):
for info in round_info['players']:
if info['id'] == player_id:
return info
def get_status(player):
if player['active']:
if player['protected']:
return ' (protected)'
else:
return ''
else:
return ' (eliminated)'
def print_round_info(round_info):
current_player = round_info['currentPlayer']
print 'Players:'
for player in round_info['players']:
status = get_status(player)
if player['id'] == current_player:
current = '* '
else:
current = ' '
print '{}{}{}: {}'.format(
current, player['id'], status, player['discards'])
print
def choose_play(hand, dealt_card, myself, others):
valid_plays = list(iter_valid_plays(hand, dealt_card, myself, others))
try:
return random.choice(valid_plays)
except IndexError:
return None
def play_round(users, round_url):
while True:
# Figure out whose turn it is
round_info = get_round_info(None, round_url)
print_round_info(round_info)
current_player_id = round_info.get('currentPlayer', None)
if not current_player_id:
return round_info['winners']
# Play as that person
current_player = users[current_player_id]
current_player_creds = _make_credentials(current_player)
current_player_view = get_round_info(current_player_creds, round_url)
# Figure out their hand
dealt_card = current_player_view['dealtCard']
hand = player_info(current_player_view, current_player_id)['hand']
others = [
p['id'] for p in round_info['players']
if p['id'] != current_player_id]
# Choose a play at random.
play = choose_play(dealt_card, hand, current_player_id, others)
print 'Playing: {}'.format(play)
response = play_turn(current_player_creds, round_url, play)
print 'Result: {}'.format(response)
def main():
# Register two users, 'foo' and 'bar'.
foo = register_user(USERS_ENDPOINT, 'foo')
foo_creds = _make_credentials(foo)
bar = register_user(USERS_ENDPOINT, 'bar')
bar_creds = _make_credentials(bar)
users = pmap({
foo['id']: foo,
bar['id']: bar,
})
# 'foo' creates a 2-player game
game = register_game(foo_creds, GAMES_ENDPOINT, 2)
game_url = get_game_endpoint(game)
# 'bar' joins the game, and the game begins.
join_game(bar_creds, game_url)
while True:
game = get_game_info(None, game_url)
print 'Game: {}'.format(game)
if game['state'] != 'in-progress':
break
current_round_url = get_round_endpoint(game['currentRound'])
winners = play_round(users, current_round_url)
print 'Round over. Winners: {}'.format(winners)
print 'Game over. Winners: {}'.format(game['winners'])
if __name__ == '__main__':
main()
| StarcoderdataPython |
9601411 | #!/usr/bin/env python
import os
import re
import sys
ORDERING_FIELD = "propertyOrder"
SCHEMA_DIRECTORY = "schemas"
class OrderingGenerator(object):
def __init__(self, field, start=0, incr=10):
self.field = field
self.count = start
self.incr = incr
def __call__(self, match):
self.count += self.incr
return '"%s": %d' % (self.field, self.count)
def renumber_schema(schema_contents):
pattern = re.compile('"' + ORDERING_FIELD + '": (\\d+)')
return re.sub(pattern, OrderingGenerator(ORDERING_FIELD), schema_contents)
def main():
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <schema_file>"
exit(1)
schema_file = os.path.join(SCHEMA_DIRECTORY, sys.argv[1])
with open(schema_file, "r") as f:
schema_contents = f.read().decode("utf-8")
new_contents = renumber_schema(schema_contents)
with open(schema_file, "w") as f:
schema_contents = f.write(new_contents.encode("utf-8"))
if __name__ == '__main__':
main()
| StarcoderdataPython |
9634749 | <gh_stars>0
#!/usr/bin/env python
# A basic test that checks that we can build a simple makefile.
makefile = '''
{
"rules":
[
{
"inputs": [ "output" ],
"outputs": [ "all" ]
},
{
"inputs": [ "node1", "node2" ],
"outputs": [ "output" ],
"cmd": "cat node1 > output && cat node2 >> output"
},
{
"inputs": [ "source1", "source2" ],
"outputs": [ "node1" ],
"cmd": "cat source1 > node1 && cat source2 >> node1"
},
{
"inputs": [ "source3", "source4" ],
"outputs": [ "node2" ],
"cmd": "cat source3 > node2 && cat source4 >> node2"
}
]
}
'''
def run(test):
test.create_makefile(makefile)
test.write_file("source1", "1")
test.write_file("source2", "2")
test.write_file("source3", "3")
test.write_file("source4", "4")
test.start()
# You can debug the current state of the graph by creating a png image:
# test.gen_graphviz("graph1.png")
# Before we build, every source file should be dirty
dirty = test.get_dirty_sources()
assert("source1" in dirty)
assert("source2" in dirty)
assert("source3" in dirty)
assert("source4" in dirty)
test.build()
# After we build, nothing should be dirty
dirty = test.get_dirty_sources()
assert(not dirty)
# check the content of all generated files
assert(test.get_file_content('node1') == '12')
assert(test.get_file_content('node2') == '34')
assert(test.get_file_content('output') == '1234')
| StarcoderdataPython |
3260547 | #encoding=utf-8
import codecs
import os
import sys
from os import path
import subprocess
import traceback
import urllib
from urlparse import urlparse
def RedIt(s):
if(sys.__stderr__.isatty()):
return "\033[1;31;40m%s\033[0m"%(s)
else:
return s
def GreenIt(s):
if(sys.__stderr__.isatty()):
return "\033[1;32;40m%s\033[0m"%(s)
else:
return s
def PullRepo(repo, path):
# may have another way to do this
repo.head.ref = repo.heads.master
repo.head.reset(index=True, working_tree=True)
repo.git.reset('--hard','origin/master')
print GreenIt("[NOTICE] {0} ({1}) {2} set success.".format(path.split(os.path.sep)[-1], repo.head.ref, path))
def GetComake(comake_url, write_path):
try:
f = urllib.urlopen(comake_url)
if f.getcode() != 200:
print RedIt("[error] {} doesn't exist".format(comake_url))
return
res = f.read()
print "[NOTICE] start writing COMAKE " + write_path
with codecs.open(write_path, "w", "utf-8") as ff:
ff.write(res.decode('utf-8'))
print GreenIt("[NOTICE] get {} success".format(comake_url))
except Exception as e:
os.remove(write_path)
traceback.print_exc()
print RedIt("[error] {} get failed: ".format(comake_url))
def CallCmd(cmd):
p=subprocess.Popen('%s'%(cmd),
shell=True,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out,err)=p.communicate()
return (p.returncode,
out,
err)
def GetPathFromUri(uri):
url = urlparse(uri)
if url.scheme == u"file":
return path.sep.join([url.netloc, url.path])
else:
local_path = [os.getenv("COMAKEPATH"), url.netloc]
local_path.extend([x for x in url.path.split('/') if x])
if local_path[-1].endswith('.git'):
local_path[-1] = local_path[-1][0:-4]
return os.path.sep.join(local_path)
else:
print RedIt("[error] wrong' dependency uri format: {}".format(uri))
return None
| StarcoderdataPython |
6698550 | <reponame>MartinHeinz/IoT-Cloud<gh_stars>10-100
from random import randint
import pytest
from pyope.ope import OPE
from client.crypto_utils import instantiate_ope_cipher
@pytest.fixture(scope="module", autouse=True)
def cipher():
random_key = OPE.generate_key()
c = instantiate_ope_cipher(random_key)
return c
def test_ope_encrypt(benchmark, cipher):
benchmark.pedantic(cipher.encrypt, args=(randint(0, 100000),), iterations=100, rounds=100)
def test_ope_decrypt(benchmark, cipher):
benchmark.pedantic(cipher.decrypt, args=(cipher.encrypt(randint(0, 100000)),), iterations=100, rounds=100)
| StarcoderdataPython |
394339 | # MOCKS for autodoc
import quantities as pq
if pq.mV.__class__.__module__ == 'sphinx.ext.autodoc.importer':
pq.mV = pq.ms = pq.Hz = pq.nA = 1.0
# END MOCKS
from abc import abstractmethod
from neuronunit import capabilities as ncap
from neuronunit.tests.base import VmTest
from olfactorybulb.neuronunit.tests.utilities import get_APs, cache
from sciunit import capabilities as scap
from olfactorybulb.neuronunit import capabilities as obncap
from olfactorybulb.neuronunit.tests import publications
SHOW_ERRORS = False
class OlfactoryBulbCellTest(VmTest):
@abstractmethod
def generate_prediction_nocache(self, model):
pass
def generate_prediction(self, model):
# import pydevd
# pydevd.settrace('192.168.0.100', port=4200, suspend=False)
result = self.fetch_cached(model)
if result is None:
# Check that self has all the required properties
self.check_required_properties()
# Perform the uncached test
try:
result = self.generate_prediction_nocache(model)
except:
import traceback
result = traceback.format_exc()
if SHOW_ERRORS:
print(result)
# Store result in cache
self.store_in_cache(model, result)
return result
def check_required_properties(self):
if hasattr(self, "required_properties"):
for prop in self.required_properties:
if not hasattr(self, prop):
raise Exception("Property '" + prop + "' not found. Make sure the property is declared either in the"
" generic test class or in the publication class.")
def fetch_cached(self, model):
return cache.get(self.get_hash(model))
def store_in_cache(self, model, result):
cache.store(self.get_hash(model), result)
def get_hash(self, model):
# The cache key is a hash of the model and the test - we want to store the model-test_result combination
model_hash = model.__hash__()
self_hash = self.__hash__()
return hash((model_hash, self_hash))
def __hash__(self):
return hash(self.__class__.__name__)
def get_dependent_prediction(self, dependent_test_class_generic, model):
# import pydevd
# pydevd.settrace('192.168.0.100', port=4200)
mro = self.__class__.mro()
if len(mro) < 4:
raise Exception("The test should be a class that inherits from a publications class"
"AND from a generic tests class, in that order. E.g. "
"'class MyTest(UrbanBurton2014, InputResistanceTest):'")
# Create a temp class that inherits from the generic test and from the specific publication
# Aways first parent class (by convention and to preserve inheritance)
publication_class = mro[1]
if not issubclass(publication_class, publications.BasePublication):
raise Exception("The first parent class '"+str(publication_class)+"' of the test should be a publication class. E.g. 'class MyTest(UrbanBurton2014, InputResistanceTest):'")
if not issubclass(dependent_test_class_generic, OlfactoryBulbCellTest):
raise Exception("The second parent class '"+dependent_test_class_generic.__class__.__name__+"' of the test should be a class that inherits from OlfactoryBulbCellTest. E.g. 'class MyTest(UrbanBurton2014, InputResistanceTest):'")
# Use SomeTestSomeAuthor1984 class name form - as descibed in BasePublication
dependent_test_class_name = dependent_test_class_generic.__name__ + publication_class.__name__
# Create the type dynamically
dependent_test_class = type(dependent_test_class_name,
(publication_class, dependent_test_class_generic),
{})
# Instantiate the dynamic class
dependent_test = dependent_test_class()
# Get the prediction (from cache if there)
return dependent_test.generate_prediction(model)
class OlfactoryBulbCellSpikeTest(OlfactoryBulbCellTest):
required_capabilities = (ncap.ReceivesSquareCurrent,
ncap.ProducesMembranePotential,
scap.Runnable,
obncap.SupportsSettingTemperature,
obncap.SupportsSettingStopTime)
def get_aps(self, voltage):
return get_APs(voltage, self.ss_delay, self.threshold_method) | StarcoderdataPython |
1977109 | # coding: utf-8
import socket
import threading
import logging
import pickle
from utils import *
import queue
import time
class Node(threading.Thread):
def __init__(self, id, address, name, successor_addr, timeout=1):
threading.Thread.__init__(self)
self.id = id
self.address = address
self.name=name
self.successor_id = 0
self.queue_in = queue.Queue()
self.queue_out = queue.Queue()
self.table = {'RESTAURANT':None,'CLERK':None,'CHEF':None,'WAITER':None}
self.table[self.name]=self.id
self.discovered=False
if successor_addr is None:
self.successor_addr = address
self.inside_ring = True
else:
self.successor_addr = successor_addr
self.inside_ring = False
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.settimeout(timeout)
self.logger = logging.getLogger("Node {}: {}".format(self.name, self.id))
def send(self, address, o):
p = pickle.dumps(o)
self.socket.sendto(p, address)
def recv(self):
try:
p, addr = self.socket.recvfrom(1024)
except socket.timeout:
return None, None
else:
if len(p) == 0:
return None, addr
else:
return p, addr
def queuein(self):
if self.queue_in.empty():
return None
return self.queue_in.get()
def queueout(self,o):
self.queue_out.put(o)
def discover(self, table, discovered_table):
for key in table:
if not table[key] == None and self.table[key] == None:
self.table[key]=table[key]
if not self.discovered and not [x for x in self.table if self.table[x] == None]:#check if table is complete
self.discovered=True
self.logger.debug('Discovered all entities-> {}'.format(self.table))
discovered_table[self.id] = self.discovered
if check_lst_true(lst=discovered_table): #utils
return None
return {'id':self.successor_id, 'method':'NODE_DISCOVERY', 'args':{'table':self.table,'discovered_table':discovered_table}}
def run(self):
self.socket.bind(self.address)
send_discover = True
first_msg = True
if self.inside_ring:
self.logger.debug('Joined the ring')
while True:
if not self.inside_ring:
o = {'method': 'NODE_JOIN', 'args': {'id':self.id, 'address':self.address}}
self.send(self.successor_addr, o)
#just need to be sendeed one time by the last node to join the ring
elif send_discover:
o = {'id': self.successor_id, 'method':'NODE_DISCOVERY', 'args':{'table':self.table,'discovered_table':[False]*4}}
self.send(self.successor_addr, o)
send_discover=False
p, addr = self.recv()
if p is not None:
o = pickle.loads(p)
args = o['args']
method = o['method']
#uncomment to see all the msgs
#self.logger.info('O: {}'.format(o))
if not 'id' in o:
if method == 'NODE_JOIN':
if contains_successor(self_id=self.id, successor_id=args['id']):
self.successor_addr = args['address']
self.successor_id = args['id']
o={'id':self.successor_id, 'method':'JOIN_REP', 'args':{}}
self.send(self.successor_addr, o)
else: #msg from the client
t={}
t['order']=args
t['address']=addr
o = {'id':self.table['CLERK'],'method': method, 'args': t}
if first_msg: #make sure there are only one msg on the ring
self.send(self.successor_addr, o)
first_msg=False
else:
self.queue_out.put(o)
else:
id = o['id']
if id == self.id:
if method == 'JOIN_REP':
self.logger.debug('Joined the ring')
self.inside_ring = True
#make sure the msg stops after everyone complete their tables
elif method == 'NODE_DISCOVERY':
o = self.discover(table = args['table'], discovered_table=args['discovered_table'])
if o is not None:
self.send(self.successor_addr, o)
else:
self.queue_in.put({'method':method,'args':args})
if not self.queue_out.empty():
o = self.queue_out.get()
else:
o = {'id':None,'method':None,'args':None}
self.send(self.successor_addr, o)
elif id == None:
if not self.queue_out.empty():
o = self.queue_out.get()
self.send(self.successor_addr, o)
else:
self.send(self.successor_addr, o)
def __str__(self):
return 'Successor: {} InsideRing: {} Table: {}'.format( self.successor_id,self.inside_ring, self.table)
def __repr__(self):
return self.__str__()
| StarcoderdataPython |
8119755 | from ..config import config
class TemplateParser:
"""TemplateParser.
Helper class to extract useful information from Wiki templates
All methods are static
"""
@staticmethod
def is_citation(t):
"""is_citation_template.
Predicate: Determines if a template qualifies as a "citation" based on its title
:param template_title: Title of the template of interest
>>> is_citation_template('Template:Cite journal')
True
>>> is_citation_template('Template:Doi')
True
>>> is_citation_template('Template:Physics-stub')
False
"""
return (T := TemplateParser.title(t)) in config.get('citation_template_whitelist', []) \
or 'cite' in T.lower()
@staticmethod
def parse_args(t):
return dict(tuple([None, *arg.split('=', 1)][-2:]) for arg in t[1])
@staticmethod
def title(t):
return t[0].title()
| StarcoderdataPython |
6534136 | import os
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path, pad_to_multiple_of=1):
# Synthetic elements used to pad the dictionary length.
# It is assumed that these synthetic elements do not appear in the actual data files.
self.synthetic = ["vvvvvvvv" + str(i) for i in range(pad_to_multiple_of-1)]
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
# Pad dictionary size to desired multiple. For example, padding to a multiple of 8
# is necessary to ensure Tensor Core usage for the decoder.
pad_elem = pad_to_multiple_of - len(self.dictionary)%pad_to_multiple_of
if pad_elem != pad_to_multiple_of:
for i in range(pad_elem):
self.dictionary.add_word(self.synthetic[i])
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
| StarcoderdataPython |
6400690 |
### BIFS class ###
### ###
### Class for performing Bayesian Image ###
### Restoration in Fourier Space (BIFS) ###
## The filename is bifscore, not bifs or BIFS to avoid trouble
## with the import machinery. In particular, import bifs, at
## least when executed in this directory, could import bifs.py
## (old name for this file) rather than the package. And thus
## from bifs.priors would fail with an error that bifs was not
## a package.
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from pylab import *
import multiprocessing as mp
from scipy import stats
import imageio
from scipy.optimize import minimize_scalar as ms
from multiprocessing import Pool, TimeoutError
from datetime import datetime
import jsonpickle as jsp
from bifs.priors import FunctionalPrior as FP
from bifs.priors import EmpiricalPrior as EP
import copy
class BIFS:
"""
Class BIFS for performing Bayesian image restoration
in k-space
Class Variables
----------------
In the next group, clients should always use the accessor functions listed:
init_image() - initial loaded image
k_image() - initial k-space image
mod_image() - initial modulus image in k-space
phase_image() - initial phase image in k-space
bifsk_image() - final BIFS modulus image in k-space
final_image() - final reconstructed image in image space
NOTE: We're keeping all these images in the BIFS object for
now re. testing and experimentation - might be useful to
eventually have production run options that are more
parsimonius re. storage.
image_file_loaded - whether an image is loaded (True,False)
initial_image_file_name - file name of initial image
imdim - int image dimension (1,2 or 3)
kdist() = distance on the shifted k-space lattice
view3Dslice - for 3D data this is a 2D array [a,b] where:
a = axis perpindicular to slice
b = fraction of maximum along that direction
for slice location
prior - string specifying prior distribution to use
current choices:
'Gaussian'
prior_choices - list of current prior choices (see above)
prior_scale - the overall scale of the prior variance
prior_scale_orig - prior scale at the origin - generally set huge
to allow data to determine overall scale
All of the above prior* variables are obsolescent. Prefer the new
_prior - <AbstractPrior>
The next 2 items might be obsolescent. However, their presence permits us to
set the parameter type before the image is loaded; _prior needs image info,
specifically dimensions, for construction.
param_func_type is replaced by _prior, specifically _prior.name(),
and param_func_choices should come from import priors
param_func_type - string specifying the k-space BIFS paramter
function to use
current choices:
"Inverse Power Decay"
"Banded Inverse Power Decay"
"Linear Decay"
"Empirical"
param_func_choices - list of current choices (see above)
likelihood - string specifying likelihood distribution to use
current choices:
'Gaussian'
'Rician'
likelihood_choices - list of current choices (see above)
likelihood_scale - the assumed (const) noise level in k-space
bessel_approx_lims - limits for bessel approximtion for rice
distribution - see paper referenced in code
bessel_approx_array - array for bessel approximtion for rice
distribution - see paper referenced in code
rice_denom_cutoff - cutoff for the demoninator of the closed form
of the posterior with a Gaussian prior and
Rician likelihood derived from bessel approximation
see paper referenced in code
basis - string specifying the basis to use - currently ony choice
is "Fourier"
basis_choices - list of current choices (see above)
We continue to use bumps, but they mostly live in the prior. We retain some info
here for compatibility with the GUI.
bumps - dictionary containing set of "bump" filters to implement
bump_types - set of choices for "bump" filter types to add to k-space
paramter function; uses scipy.signal window types
so consult that documentation for available types -
currently only types that only require window type name
and size are used - current choices are:
"boxcar"
"blackman"
"hann"
"bartlett"
"flattop"
"parzen"
"bohman"
"blackmanharris"
"nuttall"
"barthann"
bump_default_type - the default window type used (currently "blackman")
"""
def __init__(self,prior = "Gaussian",likelihood = "Gaussian",likelihood_scale = 0.05,prior_scale=0.0005,basis="Fourier"):
"""
initializes class
Inputs:
prior - function type for prior
likelihood - function type for likelihood
likelihood_scale_orig - likelihood scale at k-space origin
prior_scale - prior scale at k-space origin
basis - basis type for transfrom space
Outputs:
initializes class
"""
self._invalidate_initial_image()
self.view3Dslice = [0,0.5]
self.prior = prior
self.prior_choices = ["Gaussian"]
self.prior_scale = prior_scale
self.prior_scale_orig = 10.**7
self._prior = None # will hold <AbstractPrior> object
self.param_func_choices = FP.param_func_choices()
self.likelihood = likelihood
self.likelihood_choices = ["Gaussian","Rician"]
self.likelihood_scale = likelihood_scale
self.rice_denom_cutoff = 0.0002
# The following 3 parameters are used in conjuction
# with a closed form for the posterior MAP estimate
# with a Gaussian prior and Rician likelihood obtained
# using sympy and based on an approximation to the first
# order modified Bessel function discussed in:
# A Simple and Efficient Approximation to the Modified Bessel
# Functions and Its Applications to Rician Fading
# <NAME>, <NAME>, <NAME> and <NAME>
# 2013 IEEE GCC Conference and exhibition, November 17-20, Doha, Qatar
#
self.bessel_approx_lims = np.array([0.0,11.5,20.0,37.5])
self.bessel_approx_array = np.array([[0.7536,0.4710,0.9807,1.144],
[0.9739,-163.40,0.8672,0.995],
[-0.715,0.9852,1.0795,0.5686],
[0.2343,0.8554,1.0385,0.946]])
self.basis_choices = ["Fourier"]
self.basis = basis
self.bas = None
self.bumps = {}
# Expand the following to if/else when more basis choices added
# For now set default parameters here; make them editable via
# pop widget
if self.basis == "Fourier":
from bifs.bases import fourier as fb
self.bas = fb
self.param_func_type = self.bas.param_func_type
##### Test 1 ##############
##### - These are the 'ideal' settings for a Rician
##### likelihood analysis
# self.rice_denom_cutoff = 0.00025
# self.likelihood = "Rician"
# self.likelihood_scale = 0.0005
# self.prior_scale = 0.00009
##### Test 1 ###############
def _invalidate_initial_image(self):
"Existing image, if any, and all that depends on it, is obsolete"
self._init_image = None
self.imdim = None
self.image_file_loaded = False
self.initial_image_file_name = ''
self._prior = None # depends on image dimensions
self._invalidate_kspace()
def _invalidate_kspace(self):
"""
The transformation to k-space has or will change.
Everything that depends on it is invalid
"""
self._kdist = None
self._k_image = None
self._mod_image = None
self._phase_image = None
self._invalidate_final()
def _invalidate_final(self):
"""
The mapping from the k-space image to the final image has changed.
Invalidate everything that depends on it.
"""
self._final_image = None
self._bifsk_image = None
def save_results(self):
"""
pickles current bifs object and saves to a time stamped file
using jsonpickle; in addition saves processed final image and
final transform space image to files with the same time stamp
in case user just wants copies of final images without having
to reload bifs object.
Inputs:
current bifs object
Outputs:
saves time stamped files containing images and bifs parameters
"""
# Date stamp for parameter output file
date_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
# Output file names
if self.image_file_loaded:
# Assume here that input file is in standard 2 section,
# period seperated form with file suffix as image type
# First strip off any possible preceeding directory names
in_file_name = self.initial_image_file_name
in_file = in_file_name.split('/')[-1]
file_prefix = in_file.split('.')[0]
image_type = in_file.split('.')[1]
else:
# file_prefix = 'bifs_np_array_input'
in_file_name = "Numpy Array"
image_type = 'bmp'
# Parameter file:
out_p = 'bifs_params_'+date_stamp+'.bifspout'
out_im = 'bifs_output_image_'+date_stamp+'.'+image_type
out_k = 'bifs_output_kspace_'+date_stamp+'.'+image_type
# Center final K-space image
out_k_im = np.roll(np.roll(self.bifsk_image(),self.bifsk_image().shape[0]//2+1,0),self.bifsk_image().shape[1]//2,1)
# Output images
plt.imsave(out_im,self.final_image(),cmap = cm.Greys_r)
plt.imsave(out_k,np.log(out_k_im),cmap = cm.Greys_r)
# Output paramter file
param_file = open(out_p, "w")
param_file.write(jsp.encode(self))
param_file.close()
return
def copy_params(self):
"""
Return a new bifs object that has all the basic parameter values found in self.
Does not copy the image or filename
"""
newbifs = BIFS()
newbifs.param_func_type = self.param_func_type
newbifs.prior = self.prior
newbifs.prior_scale = self.prior_scale
newbifs._prior = copy.deepcopy(self._prior)
newbifs.likelihood = self.likelihood
newbifs.likelihood_scale = self.likelihood_scale
newbifs.bumps = self.bumps
newbifs.view3Dslice = self.view3Dslice
return newbifs
def load_image_file(self,fileName):
"""
load an image file using name fileName; can use to
examine image without invoking full bifs initialization.
Inputs:
fileName - name of file to load
Outputs:
loads file into bifs object
RB makes read_imfile persistent in some cases so bulk scans can get the headers.
"""
self._invalidate_initial_image()
self.initial_image_file_name = fileName
try:
# For now the convention is that 1D data sets
# will have filennames ending in '.txt' and
# will be in numpy text file form, e.g. as created
# with numpy.savetxt with the conventions that
# header comments start with # and there is no
# sperator between lines
if self.initial_image_file_name.lower().endswith('.txt'):
# numpy.loadtext automatacilly reads to numpy array
read_image = np.loadtxt(self.initial_image_file_name)
else:
try:
import nibabel
# if nibabel is not installed, move on
# if nibabel can't read file, move on
# nibabel.streamlines.is_supported(fname) would seem to be a good test, but files
# that fail it can still be read, e.g., nii.gz files.
# So we use the Python idiom "Better to ask forgiveness than permission"
self.read_imfile = nibabel.load(self.initial_image_file_name)
read_image = self.read_imfile.get_fdata()
except:
try:
self.read_imfile = imageio.volread(self.initial_image_file_name)
assert len(self.read_imfile) > 2
read_image = np.asarray(self.read_imfile)
except:
# we have a 2D, or maybe 1D, image
self.read_imfile = imageio.imread(self.initial_image_file_name)
read_image = np.asarray(self.read_imfile)
self.load_image(read_image, invalidate=False)
except:
print("BIFS Couldn't read image file: ",fileName)
return
return
def load_image(self, init_image, invalidate=True):
"""
intializes the bifs object so as to be ready for
analysis steps.
load_image_file calls this
Inputs:
init_image - array generated by loading image file using
load_image_file()
invalidate - if True, clear caches. Only very knowledgeable clients should use
this set to False
"""
if invalidate:
self._invalidate_initial_image()
self._init_image = init_image
self._init_image[np.isnan(init_image)] = 0.0
self.imdim = len(init_image.shape)
self.image_file_loaded = True
return
def init_image(self):
"Return initial image, if any"
# For consistency with other accessors
# if it's not there, we can't do anything
return self._init_image
def kdist(self):
if self._kdist is None:
myShape = self._init_image.shape
if self.imdim == 1:
self._kdist = self.bas.kdist1D(*myShape)
elif self.imdim == 2:
self._kdist = self.bas.kdist2D(*myShape)
elif self.imdim == 3:
self._kdist = self.bas.kdist3D(*myShape)
return self._kdist
def k_image(self):
if self._k_image is None:
if self.imdim == 1:
self._k_image = self.bas.tx1(self._init_image) # Get k-space image
elif self.imdim == 2:
self._k_image = self.bas.tx2(self._init_image) # Get k-space image
elif self.imdim == 3:
self._k_image = self.bas.txn(self._init_image) # Get k-space image
return self._k_image
def _final_setup(self):
"""Setup after image loaded and all other parameters are set.
This used to be part of load_image, but was vulnerable to
making calculations based on parameters that would later change.
This should mostly be delegated to a suitable basis object.
"""
if self.basis == "Fourier": # Add other basis functions as else...
self._mod_image = abs(self.k_image()) # Get modulus image in k-space
# sp.angle is deprecated
self._phase_image = np.angle(self.k_image()) # Get phase image in k-space
# self.data_std = self.likelihood_scale*self.mod_image
self.image_exists = True
# Set prior via orthogonal-space parameter function
self.set_prior_func_type(self.param_func_type)
# Set Rice parameter in case you use it
# self.rice_arg = self.mod_image/self.likelihood_scale
#### - The following doesn't seem to work, i.e. de-emphasizes - ####
#### - prior to too large an extent - need to investigate this - ####
# Do normalization
# # Since modulus and parameter functions are positive definite
# don't need to square them (i.e. k-space integral) to get power
# self.norm = (np.sum(self.mod_image))/(np.sum(self.prior_mean))
# self.prior_mean = self.norm*self.prior_mean
# self.prior_std = self.norm*self.prior_std
# data std is regarded as constant, related to SNR
# need to figure out best way to estimate and normalize
#### ####
#### ####
return
def mod_image(self):
"Return modulus of k-space image"
if self._mod_image is None:
self._final_setup()
return self._mod_image
def phase_image(self):
"return phase of k-space image"
if self._phase_image is None:
self._final_setup()
return self._phase_image
def final_image(self):
"return final image in conventional space"
if self._final_image is None:
self.BIFS_MAP()
return self._final_image
def bifsk_image(self):
"return final image in k-space"
if self._bifsk_image is None:
self.BIFS_MAP()
return self._bifsk_image
def add_bump(self,my_key,position,amplitude,width):
"""
adds a bump filter to the self.bumps dictionalry
Inputs:
my_key - the name of an appropriate scipy.signal shape
position - fraction of kmax for location of bump filter
amplitude - fraction of maximum of parameter function
for amplitudeof of bump filter
width - fraction of kmax for width of bump filter
Outputs:
adds bump filter to prior
"""
self._invalidate_final()
return self.prior_object().add_bump(my_key, position, amplitude, width)
def set_prior_func_type(self, pft : str):
""" Set up prior object
pft is the name of a type of prior function
"""
if not self.image_file_loaded:
self.param_func_type = pft
self._prior = None
# Nothing more can be done until image is loaded
return
if self._prior is not None and pft == self._prior.name():
return
self._invalidate_final()
# test ##########
# The following doesn't work and I don't know why
# It seems that the prior function (in particulr the decay
# function) should be normalized by the zero k-space value
# of the image (i.e. the total "power") rather than an arbitrary
# value like 500
# self.bvec[1] = self.mod_image[0,0]
# print("Zero k-space value:",self.mod_image[0,0])
# self.bvec[1] = 50.0
# test ##########
if self.basis == "Fourier": # the usual re. elses for other tx spaces
# Try setting bvec[1] = self.mod_image[0]
# if self.imdim == 1:
# self.bvec[1] = self.mod_image[0]
# elif self.imdim == 2:
# self.bvec[1] = self.mod_image[0,0]
# elif self.imdim == 3:
# self.bvec[1] = self.mod_image[0,0,0]
# else:
# pass
if pft == "Inverse Power Decay":
self._prior = FP.InversePowerDecayPrior(self.bas, self.kdist(), scale = self.prior_scale, scale_origin = self.prior_scale_orig)
elif pft == "Banded Inverse Power Decay":
self._prior = FP.BandedInversePowerDecayPrior(self.bas, self.kdist(), scale = self.prior_scale, scale_origin = self.prior_scale_orig)
elif pft == "Linear Decay":
self._prior = FP.LinearDecayPrior(self.bas, self.kdist(), scale = self.prior_scale, scale_origin = self.prior_scale_orig)
elif pft == "Empirical":
# This should already have been handled via set_empirical()
assert self._prior.name() == "Empirical"
else:
raise RuntimeError("Please specify recognized transform space parameter function, one of:"+
", ".join(self.param_func_choices))
self.param_func_type = pft
def prior_object(self, invalidate=True):
""" Return object representing the prior for possible editing.
Edits may change the type-specific parameters of the object,
but not the type (functional form) of the object.
The ugly name is a result of prior already being used as a string.
It should be the case that self.prior == self.prior_object().distribution().
Also, the whole interface is ugly.
Set invalidate=False only if you are not modifying the prior.
"""
if invalidate:
self._invalidate_final()
if self._prior is None:
self.set_prior_func_type(self.param_func_type)
return self._prior
def load_empirical(self, fname):
"""Load empirical prior from named file and set mode to Empirical
"""
x = np.load(fname)
return self.set_empirical(x)
def set_empirical(self, x):
"""x is an object with the mean and sd of distn at each voxel
It is presumed to be empirically derived, though you could make one up
if you wanted to.
Sets prior scale to 1, since the default value is very small.
You can and probably should make it larger via the Gaussian gui specification.
Note that because this requires self.bas and self.kdist the *image must be loaded first*.
"""
self._invalidate_final()
self._prior = EP.EmpiricalPrior(x, self.bas, self.kdist(), scale = 1.0, scale_origin = self.prior_scale_orig)
self.param_func_type = "Empirical"
def bifs_map_gauss_gauss(self, prior, mf, ds2):
"""
returns MAP estimate for posterior function using
gaussian prior and likelihood and uses analytic
conjugate function
Inputs:
prior - <AbstractPrior>
mf - k-space modulus (from data)
ds2 - square of likelihood std
Outputs:
MAP estimate
"""
# return((pm/ps2 + mf/ms2) / (1./ps2 + 1./ms2))
# more efficient:
return((prior.mean()*ds2 + mf*prior.var()) / (prior.var() + ds2))
# Don't think the arguments to the prior and likelihood are
# right yet, e.g. maybe need self.prior_mean_orig as argument
# for scale factor of Gaussian prior - need to estimate the
# the scale factor for the Rician from the signal to noise
# in the image... KY
def bifs_map_gauss_rice(self, prior, mf, ds):
"""
returns MAP estimate for posterior function using
gaussian prior and rician likelihood and uses uses analytic
function derived using sympy and the modified Bessel
function approximation from the paper cited above
Inputs:
prior - <AbstractPrior>
mf - k-space modulus (from data)
ds - data std
Outputs:
MAP estimate
"""
# Kludge - slows things down but we use the Gaussian conjugate
# prior calculation to estimate where the MAP value will be and
# ergo estimate which of the exponential coefficient values to use
# for the modified Bessel function approximation
#
conj = self.bifs_map_gauss_gauss(prior, self.mod_image, self.ksd2)
d = (2*ds**2 - 2*prior.var())
dabs = np.abs(d)
# Estimate which Bessel approximation coefficients
# to use based on the Gaussian conjugate MAP value
sn = np.zeros(d.shape)
sn = where(np.abs(d) > 0,np.sign(d),-1.0)
denom = sn*np.maximum(np.abs(d),self.rice_denom_cutoff)
b = mf/ds
result = 0.0
for i in range(self.bsa.shape[-1]):
if self.imdim == 1:
ba = self.bsa[:,i]
elif self.imdim == 2:
ba = self.bsa[:,:,i]
elif self.imdim == 3:
ba = self.bsa[:,:,:,i]
else:
pass
num = (-(b*ba*ds*prior.var() - mf*ds**2 + 2*mf*prior.var() - ds**2*prior.mean() +
ds*np.sqrt(b**2*ba**2*prior.var()**2 + 2*b*ba*mf*ds*prior.var() - 2*b*ba*ds*prior.mean()*prior.var() +
mf**2*ds**2 - 2*mf*ds**2*prior.mean() + ds**2*prior.mean()**2 - 4*ds**2*prior.var() + 4*prior.var()**2)))
### Note - the closer the scales get to each other
# (ergo the pathalogical blow up), the MAP estimate
# should actually get closer to the average of the
# the prior mean and likelihood mean - that's the
# logic for:
# denom = np.where(dabs < self.rice_denom_cutoff,(2*num)/(mf+pm),dabs)
### Doesn't seem to work so back to above denom setting with
### cutoff
result += (num/denom)/len(ba.shape)
# Need to include the sign part here in case the setting
# for the denominator screwed up the sign
return np.sign(result)*result
def BIFS_MAP(self):
"""
performs MAP optimization individually at each k-space
point using the MAP estimation functions above,
recombines the results, performs inverse transform and returns final image.
This seemed like a good place to use the multiprocessing package, but initial
testing found that to be slower, and so we removed the code.
Inputs:
Outputs:
Sets final_image based on performing k-space MAP estimation
"""
# Break up image(s) into linear array(s) for sending off
# to multiprocessing - this is stupid and unecessarily
# time consuming but I still need to figure out how to send
# chunks of multivariate arrays to a multiprocessing pool
if not self.image_file_loaded:
print ("Error: Need to load an image before running MAP")
return
self._final_setup()
# Square of likelihood
self.ksd2 = self.likelihood_scale**2
# self.data_std2 = self.data_std**2
# Rician parameter
# self.rice_arg = self.mod_image/self.likelihood_scale
if self.prior == "Gaussian" and self.likelihood == "Gaussian":
self._bifsk_image = self.bifs_map_gauss_gauss(self._prior, self.mod_image(), self.ksd2)
elif self.prior == "Gaussian" and self.likelihood == "Rician":
conj = self.bifs_map_gauss_gauss(self._prior, self.mod_image(), self.ksd2)
besind = np.zeros(self._init_image.shape, dtype=int)
besind[np.where(conj > self.bessel_approx_lims[1])] = 1
besind[np.where(conj > self.bessel_approx_lims[2])] = 2
besind[np.where(conj > self.bessel_approx_lims[3])] = 3
self.bsa = self.bessel_approx_array[besind,:]
self._bifsk_image = self.bifs_map_gauss_rice(self._prior, self.mod_image(), self.likelihood_scale)
else:
pass
# Send back to image space
if self.basis == "Fourier": # usual add else for other tx
if self.imdim == 1:
self._final_image = np.real(self.bas.itx1(self._bifsk_image*np.exp(1j*self.phase_image())))
elif self.imdim == 2:
self._final_image = np.real(self.bas.itx2(self._bifsk_image*np.exp(1j*self.phase_image())))
elif self.imdim == 3:
self._final_image = np.real(self.bas.itxn(self._bifsk_image*np.exp(1j*self.phase_image())))
return
| StarcoderdataPython |
3334228 | #The MIT License (MIT)
#
#Copyright (c) 2016 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import machine
class max31856(object):
"""Read Temperature on the Feather HUZZAH ESP8266 from the MAX31856 chip using GPIO
Connections will use the hardware SPI interface on the ESP8266 to talk to the MAX31855 sensor.
Tested with: MicroPython 1.8.5 on ESP8266: esp8266-20161017-v1.8.5.bin
"""
def __init__(self, csPin = 15, misoPin = 12, mosiPin = 13, clkPin = 14):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
#
# Config Register 2
# ------------------
# bit 7: Reserved -> 0
# bit 6: Averaging Mode 1 Sample -> 0 (default)
# bit 5: Averaging Mode 1 Sample -> 0 (default)
# bit 4: Averaging Mode 1 Sample -> 0 (default)
# bit 3: Thermocouple Type -> K Type (default) -> 0 (default)
# bit 2: Thermocouple Type -> K Type (default) -> 0 (default)
# bit 1: Thermocouple Type -> K Type (default) -> 1 (default)
# bit 0: Thermocouple Type -> K Type (default) -> 1 (default)
#
self.writeRegister(1, 0x03)
def setupGPIO(self):
machine.Pin(self.csPin, machine.Pin.OUT)
machine.Pin(self.misoPin, machine.Pin.IN)
machine.Pin(self.mosiPin, machine.Pin.OUT)
machine.Pin(self.clkPin, machine.Pin.OUT)
machine.Pin(self.csPin).value(1)
machine.Pin(self.clkPin).value(0)
machine.Pin(self.mosiPin).value(0)
def readThermocoupleTemp(self):
self.requestTempConv()
# read 4 registers starting with register 12
out = self.readRegisters(0x0c, 4)
[tc_highByte, tc_middleByte, tc_lowByte] = [out[0], out[1], out[2]]
temp = ((tc_highByte << 16) | (tc_middleByte << 8) | tc_lowByte) >> 5
if (tc_highByte & 0x80):
temp -= 0x80000
temp_C = temp * 0.0078125
fault = out[3]
if ((fault & 0x80) == 1):
raise FaultError("Cold Junction Out-of-Range")
if ((fault & 0x40) == 1):
raise FaultError("Thermocouple Out-of-Range")
if ((fault & 0x20) == 1):
raise FaultError("Cold-Junction High Fault")
if ((fault & 0x10) == 1):
raise FaultError("Cold-Junction Low Fault")
if ((fault & 0x08) == 1):
raise FaultError("Thermocouple Temperature High Fault")
if ((fault & 0x04) == 1):
raise FaultError("Thermocouple Temperature Low Fault")
if ((fault & 0x02) == 1):
raise FaultError("Overvoltage or Undervoltage Input Fault")
if ((fault & 0x01) == 1):
raise FaultError("Thermocouple Open-Circuit Fault")
return temp_C
def readJunctionTemp(self):
self.requestTempConv()
# read 3 registers starting with register 9
out = self.readRegisters(0x09, 3)
offset = out[0]
[junc_msb, junc_lsb] = [out[1], out[2]]
temp = ((junc_msb << 8) | junc_lsb) >> 2
temp = offset + temp
if (junc_msb & 0x80):
temp -= 0x4000
temp_C = temp * 0.015625
return temp_C
def requestTempConv(self):
#
# Config Register 1
# ------------------
# bit 7: Conversion Mode -> 0 (Normally Off Mode)
# bit 6: 1-shot -> 1 (ON)
# bit 5: open-circuit fault detection -> 0 (off)
# bit 4: open-circuit fault detection -> 0 (off)
# bit 3: Cold-junction temerature sensor enabled -> 0 (default)
# bit 2: Fault Mode -> 0 (default)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# write config register 0
self.writeRegister(0, 0x42)
# conversion time is less than 150ms
time.sleep(.2) #give it 200ms for conversion
def writeRegister(self, regNum, dataByte):
machine.Pin(self.csPin).value(0)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
machine.Pin(self.csPin).value(1)
def readRegisters(self, regNumStart, numRegisters):
out = []
machine.Pin(self.csPin).value(0)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
machine.Pin(self.csPin).value(1)
return out
def sendByte(self,byte):
for bit in range(8):
machine.Pin(self.clkPin).value(1)
if (byte & 0x80):
machine.Pin(self.mosiPin).value(1)
else:
machine.Pin(self.mosiPin).value(0)
byte <<= 1
machine.Pin(self.clkPin).value(0)
def recvByte(self):
byte = 0x00
for bit in range(8):
machine.Pin(self.clkPin).value(1)
byte <<= 1
if machine.Pin(self.misoPin).value():
byte |= 0x1
machine.Pin(self.clkPin).value(0)
return byte
class FaultError(Exception):
pass
if __name__ == "__main__":
import max31856
csPin = 15
misoPin = 12
mosiPin = 13
clkPin = 14
max = max31856.max31856(csPin,misoPin,mosiPin,clkPin)
thermoTempC = max.readThermocoupleTemp()
thermoTempF = (thermoTempC * 9.0/5.0) + 32
print ("Thermocouple Temp: %f degF" % thermoTempF)
print ("Thermocouple Temp: %f degC" % thermoTempC)
juncTempC = max.readJunctionTemp()
juncTempF = (juncTempC * 9.0/5.0) + 32
print ("Cold Junction Temp: %f degF" % juncTempF)
print ("Cold Junction Temp: %f degC" % juncTempC) | StarcoderdataPython |
6509619 | <gh_stars>10-100
""" The Proxy Pattern
Notes:
As the name suggests, the pattern involves creating a proxy or an intermediary
interface between an object or service and its user. The proxy allows the user
to access the object or service through it, while adding the possibility to
wrap calls with additional functionality. This pattern is particularly useful in
situations where intercepting a call can create meaningful optimizations
(caching/queuing) or add security (authentication/sanitization).
This example simulates the process of sending a request to interact with a
resource on a server via a proxy. The proxy implemented here stores a pointer to
a cached version of the resource. For read requests, it checks the cache before
pinging the server. It also handles updating the cache for writes and fresh
reads.
"""
class ServerResource:
"""This class provides an interface to read and write to a resource on a
server. It does not need to have knowledge of the proxy but we assume that
one can access it only via the proxy (for the caching model to work).
"""
def read(self, query):
print("\t" + "Reading from {}...".format(self.__class__.__name__))
pass
def write(self, data):
print("\t" + "Writing to {}...".format(self.__class__.__name__))
pass
class CachedResource(ServerResource):
"""The cached resource shares the same interface as the server's resource.
It is essentially a clone of the original resource that should be up to date
with original.
"""
pass
class Proxy:
"""Any request to read or write to a resource on the server passes through
this proxy. While the interface is identical to the ServerResource in
this example, I have purposefully avoided creating an inheritance
relationship between the two. This is because the proxy need not provide an
interface to only one resource. Furthermore, it need not be an exact
representation of the original resource. Having said that, it does need to
allow the user to indirectly access the original resource.
"""
def __init__(self):
"""Upon initializing, the proxy connects to both the server and the
cache.
"""
self._resource = ServerResource()
self._cache = CachedResource()
def read(self, query):
"""Read requests are first sent to the cache. If the response was
previously cached, it is returned from the cache. Otherwise, the request
is sent to the server and the response is fed into the cache and sent
back to the user.
"""
cached_result = self._cache.read(query)
if cached_result:
print("\t" + "Returning data from cache...")
return cached_result
print("\t" + "No cached data found...")
server_result = self._resource.read(query)
self._cache.write(server_result)
print("\t" + "Returning data from server...")
return server_result
def write(self, data):
"""Write requests immediately update the cache and then update the
server's resource. Finally, the response from the server is returned to
the user."""
self._cache.write(data)
return self._resource.write(data)
if __name__ == "__main__":
proxy = Proxy()
print("Making Read Query:")
proxy.read("Sample Query String")
print("\n")
print("Making Write Query:")
proxy.write("Sample Data")
print("\n")
| StarcoderdataPython |
8174966 | # -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import torch
PotentialNet_PDBBind_core_pocket_random = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.2,
'frac_test': 0.,
'batch_size': 40,
'shuffle': False,
'max_num_neighbors': 5, ##
'distance_bins': [1.5, 2.5, 3.5, 4.5],
'f_in': 40,
'f_bond': 73, # has to be larger than f_in
'f_gather':128,
'f_spatial': 128, # better to the same as f_gather
'n_rows_fc':[32],
'n_bond_conv_steps':2,
'n_spatial_conv_steps':1,
'dropouts': [0.25, 0.25, 0.25],
'lr': 0.001,
'num_epochs': 2,
'wd': 1e-07,
'metrics': ['r2', 'mae'],
'split': 'random'
}
PotentialNet_PDBBind_core_pocket_scaffold = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.2,
'frac_test': 0.,
'batch_size': 40,
'shuffle': False,
'max_num_neighbors': 5, ##
'distance_bins': [1.5, 2.5, 3.5, 4.5],
'f_in': 40,
'f_bond': 73, # has to be larger than f_in
'f_gather':128,
'f_spatial': 128, # better to the same as f_gather
'n_rows_fc':[32],
'n_bond_conv_steps':2,
'n_spatial_conv_steps':1,
'dropouts': [0.25, 0.25, 0.25],
'lr': 0.001,
'num_epochs': 2,
'wd': 1e-07,
'metrics': ['r2', 'mae'],
'split': 'scaffold'
}
PotentialNet_PDBBind_core_pocket_stratified = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.2,
'frac_test': 0.,
'batch_size': 40,
'shuffle': False,
'max_num_neighbors': 5, ##
'distance_bins': [1.5, 2.5, 3.5, 4.5],
'f_in': 40,
'f_bond': 73, # has to be larger than f_in
'f_gather':128,
'f_spatial': 128, # better to the same as f_gather
'n_rows_fc':[32],
'n_bond_conv_steps':2,
'n_spatial_conv_steps':1,
'dropouts': [0.25, 0.25, 0.25],
'lr': 0.001,
'num_epochs': 2,
'wd': 1e-07,
'metrics': ['r2', 'mae'],
'split': 'stratified'
}
PotentialNet_PDBBind_refined_pocket_random = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'remove_coreset_from_refinedset': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.2,
'frac_test': 0.,
'batch_size': 40,
'shuffle': False,
'max_num_neighbors': 5, ##
'distance_bins': [1.5, 2.5, 3.5, 4.5],
'f_in': 40,
'f_bond': 73, # has to be larger than f_in
'f_gather':90,
'f_spatial': 90, # better to the same as f_gather
'n_rows_fc':[32],
'n_bond_conv_steps':2,
'n_spatial_conv_steps':2,
'dropouts': [0.25, 0.25, 0.25],
'lr': 0.001,
'num_epochs': 2,
'wd': 1e-07,
'metrics': ['r2', 'mae'],
'split': 'random'
}
PotentialNet_PDBBind_refined_pocket_scaffold = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'remove_coreset_from_refinedset': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.2,
'frac_test': 0.,
'batch_size': 40,
'shuffle': False,
'max_num_neighbors': 5, ##
'distance_bins': [1.5, 2.5, 3.5, 4.5],
'f_in': 40,
'f_bond': 73, # has to be larger than f_in
'f_gather':90,
'f_spatial': 90, # better to the same as f_gather
'n_rows_fc':[32],
'n_bond_conv_steps':2,
'n_spatial_conv_steps':2,
'dropouts': [0.25, 0.25, 0.25],
'lr': 0.001,
'num_epochs': 1,
'wd': 1e-07,
'metrics': ['r2', 'mae'],
'split': 'scaffold'
}
PotentialNet_PDBBind_refined_pocket_stratified = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'remove_coreset_from_refinedset': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.2,
'frac_test': 0.,
'batch_size': 40,
'shuffle': False,
'max_num_neighbors': 5, ##
'distance_bins': [1.5, 2.5, 3.5, 4.5],
'f_in': 40,
'f_bond': 73, # has to be larger than f_in
'f_gather':90,
'f_spatial': 90, # better to the same as f_gather
'n_rows_fc':[32],
'n_bond_conv_steps':2,
'n_spatial_conv_steps':2,
'dropouts': [0.25, 0.25, 0.25],
'lr': 0.001,
'num_epochs': 3,
'wd': 1e-07,
'metrics': ['r2', 'mae'],
'split': 'stratified'
}
ACNN_PDBBind_core_pocket_random = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [32, 32, 16],
'weight_init_stddevs': [1. / float(np.sqrt(32)), 1. / float(np.sqrt(32)),
1. / float(np.sqrt(16)), 0.01],
'dropouts': [0., 0., 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53.]),
'radial': [[12.0], [0.0, 4.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 120,
'metrics': ['r2', 'mae'],
'split': 'random'
}
ACNN_PDBBind_core_pocket_scaffold = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [32, 32, 16],
'weight_init_stddevs': [1. / float(np.sqrt(32)), 1. / float(np.sqrt(32)),
1. / float(np.sqrt(16)), 0.01],
'dropouts': [0., 0., 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53.]),
'radial': [[12.0], [0.0, 4.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 170,
'metrics': ['r2', 'mae'],
'split': 'scaffold'
}
ACNN_PDBBind_core_pocket_stratified = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [32, 32, 16],
'weight_init_stddevs': [1. / float(np.sqrt(32)), 1. / float(np.sqrt(32)),
1. / float(np.sqrt(16)), 0.01],
'dropouts': [0., 0., 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53.]),
'radial': [[12.0], [0.0, 4.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 110,
'metrics': ['r2', 'mae'],
'split': 'stratified'
}
ACNN_PDBBind_core_pocket_temporal = {
'dataset': 'PDBBind',
'subset': 'core',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [32, 32, 16],
'weight_init_stddevs': [1. / float(np.sqrt(32)), 1. / float(np.sqrt(32)),
1. / float(np.sqrt(16)), 0.01],
'dropouts': [0., 0., 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53.]),
'radial': [[12.0], [0.0, 4.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 80,
'metrics': ['r2', 'mae'],
'split': 'temporal'
}
ACNN_PDBBind_refined_pocket_random = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [128, 128, 64],
'weight_init_stddevs': [0.125, 0.125, 0.177, 0.01],
'dropouts': [0.4, 0.4, 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 19., 20., 25., 26., 27., 28.,
29., 30., 34., 35., 38., 48., 53., 55., 80.]),
'radial': [[12.0], [0.0, 2.0, 4.0, 6.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 200,
'metrics': ['r2', 'mae'],
'split': 'random'
}
ACNN_PDBBind_refined_pocket_scaffold = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [128, 128, 64],
'weight_init_stddevs': [0.125, 0.125, 0.177, 0.01],
'dropouts': [0.4, 0.4, 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 19., 20., 25., 26., 27., 28.,
29., 30., 34., 35., 38., 48., 53., 55., 80.]),
'radial': [[12.0], [0.0, 2.0, 4.0, 6.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 350,
'metrics': ['r2', 'mae'],
'split': 'scaffold'
}
ACNN_PDBBind_refined_pocket_stratified = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [128, 128, 64],
'weight_init_stddevs': [0.125, 0.125, 0.177, 0.01],
'dropouts': [0.4, 0.4, 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 19., 20., 25., 26., 27., 28.,
29., 30., 34., 35., 38., 48., 53., 55., 80.]),
'radial': [[12.0], [0.0, 2.0, 4.0, 6.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 400,
'metrics': ['r2', 'mae'],
'split': 'stratified'
}
ACNN_PDBBind_refined_pocket_temporal = {
'dataset': 'PDBBind',
'subset': 'refined',
'load_binding_pocket': True,
'random_seed': 123,
'frac_train': 0.8,
'frac_val': 0.,
'frac_test': 0.2,
'batch_size': 24,
'shuffle': False,
'hidden_sizes': [128, 128, 64],
'weight_init_stddevs': [0.125, 0.125, 0.177, 0.01],
'dropouts': [0.4, 0.4, 0.],
'atomic_numbers_considered': torch.tensor([
1., 6., 7., 8., 9., 11., 12., 15., 16., 17., 19., 20., 25., 26., 27., 28.,
29., 30., 34., 35., 38., 48., 53., 55., 80.]),
'radial': [[12.0], [0.0, 2.0, 4.0, 6.0, 8.0], [4.0]],
'lr': 0.001,
'wd': 1e-07,
'num_epochs': 350,
'metrics': ['r2', 'mae'],
'split': 'temporal'
}
experiment_configures = {
'ACNN_PDBBind_core_pocket_random': ACNN_PDBBind_core_pocket_random,
'ACNN_PDBBind_core_pocket_scaffold': ACNN_PDBBind_core_pocket_scaffold,
'ACNN_PDBBind_core_pocket_stratified': ACNN_PDBBind_core_pocket_stratified,
'ACNN_PDBBind_core_pocket_temporal': ACNN_PDBBind_core_pocket_temporal,
'ACNN_PDBBind_refined_pocket_random': ACNN_PDBBind_refined_pocket_random,
'ACNN_PDBBind_refined_pocket_scaffold': ACNN_PDBBind_refined_pocket_scaffold,
'ACNN_PDBBind_refined_pocket_stratified': ACNN_PDBBind_refined_pocket_stratified,
'ACNN_PDBBind_refined_pocket_temporal': ACNN_PDBBind_refined_pocket_temporal,
'PotentialNet_PDBBind_core_pocket_random' : PotentialNet_PDBBind_core_pocket_random,
'PotentialNet_PDBBind_core_pocket_scaffold': PotentialNet_PDBBind_core_pocket_scaffold,
'PotentialNet_PDBBind_core_pocket_stratified' : PotentialNet_PDBBind_core_pocket_stratified,
'PotentialNet_PDBBind_refined_pocket_random' : PotentialNet_PDBBind_refined_pocket_random,
'PotentialNet_PDBBind_refined_pocket_scaffold': PotentialNet_PDBBind_refined_pocket_scaffold,
'PotentialNet_PDBBind_refined_pocket_stratified': PotentialNet_PDBBind_refined_pocket_stratified,
}
def get_exp_configure(exp_name):
return experiment_configures[exp_name]
| StarcoderdataPython |
295309 | from sklearn.svm import SVC as _SVC
from dlex.configs import Params
class SVC(_SVC):
def __init__(self, params: Params, dataset):
super().__init__(
gamma='scale',
kernel=params.model.kernel or 'rbf')
self.params = params
def score(self, X, y, metric="acc"):
if metric == "acc":
return super().score(X, y) * 100
def fit(self, X, y, sample_weight=None):
return super().fit(X, y, sample_weight)
def predict(self, X):
return super().predict(X)
| StarcoderdataPython |
265707 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import logging
from bs4 import Tag
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
search_url = '%s/?s=%s&post_type=wp-manga&author=&artist=&release='
class BoxNovelCrawler(Crawler):
base_url = [
'https://boxnovel.com/',
]
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % (self.home_url, query))
results = []
for tab in soup.select('.c-tabs-item__content'):
a = tab.select_one('.post-title h4 a')
if not isinstance(a, Tag):
continue
latest = tab.select_one('.latest-chap .chapter a')
latest = latest.text if isinstance(latest, Tag) else ''
votes = tab.select_one('.rating .total_votes')
votes = votes.text if isinstance(votes, Tag) else ''
results.append({
'title': a.text.strip(),
'url': self.absolute_url(a['href']),
'info': '%s | Rating: %s' % (latest, votes),
})
# end for
return results
# end def
def read_novel_info(self):
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
possible_title = soup.select_one('meta[property="og:title"]')
assert isinstance(possible_title, Tag), 'No novel title'
self.novel_title = possible_title['content']
logger.info('Novel title: %s', self.novel_title)
possible_image = soup.select_one('meta[property="og:image"]')
if isinstance(possible_image, Tag):
self.novel_cover = possible_image['content']
logger.info('Novel cover: %s', self.novel_cover)
try:
author = soup.select('.author-content a')
if len(author) == 2:
self.novel_author = author[0].text + ' (' + author[1].text + ')'
else:
self.novel_author = author[0].text
except Exception as e:
logger.debug('Failed to parse novel author. Error: %s', e)
logger.info('Novel author: %s', self.novel_author)
possible_novel_id = soup.select_one("#manga-chapters-holder")
assert isinstance(possible_novel_id, Tag), 'No novel id'
self.novel_id = possible_novel_id["data-id"]
logger.info("Novel id: %s", self.novel_id)
response = self.submit_form(self.novel_url.strip('/') + '/ajax/chapters')
soup = self.make_soup(response)
for a in reversed(soup.select("li.wp-manga-chapter a")):
chap_id = len(self.chapters) + 1
vol_id = 1 + len(self.chapters) // 100
if chap_id % 100 == 1:
self.volumes.append({"id": vol_id})
# end if
self.chapters.append(
{
"id": chap_id,
"volume": vol_id,
"title": a.text.strip(),
"url": self.absolute_url(a["href"]),
}
)
# end for
# end def
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter['url'])
contents = soup.select_one('div.text-left')
assert isinstance(contents, Tag), 'No contents'
return self.cleaner.extract_contents(contents)
# end def
# end class
| StarcoderdataPython |
4810278 | <reponame>imaroger/sot-talos-balance
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal as assertApprox
from sot_talos_balance.euler_to_quat import EulerToQuat
from sot_talos_balance.pose_quaternion_to_matrix_homo import PoseQuaternionToMatrixHomo
from sot_talos_balance.quat_to_euler import QuatToEuler
# --- Euler to quat ---
print("--- Euler to quat ---")
signal_in = [0.0, 0.0, 0.5, 0.0, 0.0, np.pi, 0.2, 0.6]
e2q = EulerToQuat('e2q')
e2q.euler.value = signal_in
print(e2q.euler.value)
e2q.quaternion.recompute(0)
print(e2q.quaternion.value)
assertApprox(e2q.quaternion.value, [0.0, 0.0, 0.5, 0.0, 0.0, 1.0, 0.0, 0.2, 0.6], 6)
# --- Quat to Euler ---
print("--- Quat to Euler ---")
signal_in = [0.0, 0.0, 0.5, 0.0, 0.0, 1.0, 0.0, 0.2, 0.6]
q2e = QuatToEuler('q2e')
q2e.quaternion.value = signal_in
print(q2e.quaternion.value)
q2e.euler.recompute(0)
print(q2e.euler.value)
assertApprox(q2e.euler.value, [0.0, 0.0, 0.5, 0.0, 0.0, np.pi, 0.2, 0.6], 6)
# --- Quat to homogeneous ---
print("--- Quat to homogeneous ---")
signal_in = [0.0, 0.0, 0.5, 0.0, 0.0, 1.0, 0.0]
q2m = PoseQuaternionToMatrixHomo('q2m')
q2m.sin.value = signal_in
print(q2m.sin.value)
q2m.sout.recompute(0)
print(q2m.sout.value)
expected = ((-1.0, 0.0, 0.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.5), (0.0, 0.0, 0.0, 1.0))
assertApprox(q2m.sout.value, expected, 6)
| StarcoderdataPython |
11343525 | <reponame>rbotter/pyDEA
''' This module contains classes responsible for updating solution progress.
'''
class NullProgress(object):
''' This class does not update solution progress. It is used in
terminal application.
'''
def set_position(self, position):
''' Does nothing.
'''
pass
def increment_step(self):
''' Does nothing.
'''
pass
class GuiProgress(NullProgress):
''' This class updates progress bar while a given problem is being
solved.
Attributes:
progress_bar (ProgressBar): progress bar.
step_size (double): progress bar increment.
Args:
progress_bar (ProgressBar): progress bar.
nb_models (int): total number of DEA models, can take values
1, 2 or 4.
nb_sheets (int): number of sheets in solution.
'''
def __init__(self, progress_bar, nb_models, nb_sheets):
self.progress_bar = progress_bar
self.set_position(0)
# 99.99 because of precision errors
# progress bar is reset to 0 if maximum value is exceeded
self.step_size = 99.99/(nb_models*nb_sheets)
def set_position(self, position):
''' Sets position of the progress bar to a given value.
Args:
position (double): progress bar position.
'''
self.progress_bar['value'] = position
self.progress_bar.update()
def increment_step(self):
''' Increments the progress bar by a step size.
'''
self.progress_bar.step(self.step_size)
self.progress_bar.update()
| StarcoderdataPython |
211818 | <gh_stars>0
from datetime import datetime
from logging import getLogger
from typing import Any, Dict, List, Optional, Tuple
from hs_build_tools.pytest import assert_text
from hashkernel import Jsonable
from hashkernel.mold import Flator, FunctionMold, Mold
from hashkernel.smattr import SmAttr
log = getLogger(__name__)
class A:
""" An example of SmAttr usage
Attributes:
i: integer
s: string with
default
d: optional datetime
attribute contributed
"""
i: int
s: str = "xyz"
d: Optional[datetime]
z: List[datetime]
y: Dict[str, str]
def test_docstring():
amold = Mold(A)
assert (
str(amold)
== '["i:Required[int]", "s:Required[str]=\\"xyz\\"", "d:Optional[datetime:datetime]", "z:List[datetime:datetime]", "y:Dict[str,str]"]'
)
assert_text(
A.__doc__,
"""
An example of SmAttr usage
Attributes:
i: Required[int] integer
s: Required[str] string with default. Default is: 'xyz'.
d: Optional[datetime:datetime] optional datetime
z: List[datetime:datetime]
y: Dict[str,str]
attribute contributed
""",
)
def pack_wolves(i: int, s: str = "xyz") -> Tuple[int, str]:
""" Greeting protocol
Args:
s: string with
default
Returns:
num_of_wolves: Pack size
pack_name: Name of the pack
"""
return i, s
class PackId(SmAttr):
nw: int
name: str
def pack_wolves2(i: int, s: str = "xyz") -> PackId:
return PackId((i, s))
def test_extract_molds_from_function():
fn_mold = FunctionMold(pack_wolves)
assert (
str(fn_mold.out_mold)
== '["num_of_wolves:Required[int]", "pack_name:Required[str]"]'
)
assert_text(
fn_mold.dst.doc(),
""" Greeting protocol
Args:
s: Required[str] string with default. Default is: 'xyz'.
i: Required[int]
Returns:
num_of_wolves: Required[int] Pack size
pack_name: Required[str] Name of the pack
""",
)
assert fn_mold({"i": 5}) == {"num_of_wolves": 5, "pack_name": "xyz"}
assert fn_mold({"i": 7, "s": "A-pack"}) == {
"num_of_wolves": 7,
"pack_name": "A-pack",
}
fn_mold2 = FunctionMold(pack_wolves2)
assert fn_mold2({"i": 5}) == {"nw": 5, "name": "xyz"}
assert fn_mold2({"i": 7, "s": "A-pack"}) == {"nw": 7, "name": "A-pack"}
class JsonableMemoryFlator(Flator):
def __init__(self):
self.store = []
def is_applied(self, cls: type):
return issubclass(cls, Jsonable)
def inflate(self, k: str, cls: type):
return cls(self.store[int(k)])
def deflate(self, data: Any):
k = str(len(self))
self.store.append(str(data))
return k
def __len__(self):
return len(self.store)
def test_flator():
jmf = JsonableMemoryFlator()
class X(SmAttr):
a: int
x: str
q: bool
def fn(z: X, t: int) -> bool:
return True
fn_mold = FunctionMold(fn)
orig = {"z": X(a=5, x="s", q=False), "t": 6}
deflated = fn_mold.in_mold.deflate(orig, jmf)
assert deflated == {"z": "0", "t": 6}
assert len(jmf) == 1
back = fn_mold.in_mold.inflate(deflated, jmf)
assert orig == back
result = fn_mold(orig)
assert result == {"_": True}
| StarcoderdataPython |
11343716 | # -*- coding: utf-8 -*-
# Copyright 2016-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - <NAME> <<EMAIL>>, 2016
# - <NAME> <<EMAIL>>, 2018
# - <NAME> <<EMAIL>>, 2018
# - <NAME> <<EMAIL>>, 2018
# - <NAME> <<EMAIL>>, 2019
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
from __future__ import print_function
from json import loads
from traceback import format_exc
from flask import Flask, Blueprint, request
from flask.views import MethodView
from rucio.api.temporary_did import (add_temporary_dids)
from rucio.common.exception import RucioException
from rucio.web.rest.flaskapi.v1.common import request_auth_env, response_headers
from rucio.web.rest.utils import generate_http_error_flask
class BulkDIDS(MethodView):
def post(self):
"""
Bulk add temporary data identifiers.
.. :quickref: BulkDIDS; Bulk add temporary dids.
:<json list dids: A list of dids.
:status 201: Created.
:status 400: Cannot decode json parameter list.
:status 401: Invalid Auth Token.
:status 500: Internal Error.
"""
json_data = request.data
try:
dids = loads(json_data)
except ValueError:
return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')
try:
add_temporary_dids(dids=dids, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
return 'Created', 201
def blueprint():
bp = Blueprint('temporary_did', __name__, url_prefix='/tmp_dids')
bulk_dids_view = BulkDIDS.as_view('bulk_dids')
bp.add_url_rule('', view_func=bulk_dids_view, methods=['post', ])
bp.before_request(request_auth_env)
bp.after_request(response_headers)
return bp
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint())
return doc_app
| StarcoderdataPython |
6520881 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME> - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "20/04/2017"
#from comsyl.autocorrelation.SigmaMatrix import SigmaWaist, SigmaMatrixFromCovariance
from syned.storage_ring.electron_beam import ElectronBeam
lb_Ob = ElectronBeam(energy_in_GeV=6.04,
current =0.2,
moment_xx =(37.4e-6)**2,
moment_yy =(3.5e-6)**2,
moment_xpxp =(106.9e-6)**2,
moment_ypyp =(1.2e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=1.06e-03)
hb_Ob = ElectronBeam(energy_in_GeV=6.04,
current =0.2,
moment_xx =(387.8e-6)**2,
moment_yy =(3.5e-6)**2,
moment_xpxp =(10.3e-6)**2,
moment_ypyp =(1.2e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=1.06e-03)
correct_new_Ob = ElectronBeam(energy_in_GeV=6.04,
current =0.2,
moment_xx =(27.2e-6)**2,
moment_yy =(3.4e-6)**2,
moment_xpxp =(5.2e-6)**2,
moment_ypyp =(1.4e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=0.95e-03)
correct_new_Ob_alpha = ElectronBeam(energy_in_GeV=6.04,
current =0.2,
moment_xx =9.38000281183e-10,
moment_yy =1.42697722687e-11,
moment_xpxp =1.942035404e-11,
moment_ypyp =1.88728844475e-12,
moment_xxp =-1.4299803854e-11,
moment_yyp =-1.38966769839e-12,
energy_spread=0.95e-03)
new_Ob = ElectronBeam(energy_in_GeV=6.04,
current =0.2,
moment_xx =(27.2e-6)**2,
moment_yy =(3.4e-6)**2,
moment_xpxp =(5.2e-6)**2,
moment_ypyp =(1.4e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=0.89e-03)
# like newOb but E=6.0
ebs_Ob = ElectronBeam(energy_in_GeV=6.0,
current =0.2,
moment_xx =(27.2e-6)**2,
moment_yy =(3.4e-6)**2,
moment_xpxp =(5.2e-6)**2,
moment_ypyp =(1.4e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=0.89e-03)
# like newOb but E=6.0
ebs_S28D = ElectronBeam(energy_in_GeV=6.0,
current =0.2,
moment_xx =(30.2e-6)**2,
moment_yy =(3.64e-6)**2,
moment_xpxp =(4.37e-6)**2,
moment_ypyp =(1.37e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=0.89e-03)
alba = ElectronBeam(energy_in_GeV=3.0,
current =0.2,
moment_xx =(127e-6)**2,
moment_yy =(5.2e-6)**2,
moment_xpxp =(36.2e-6)**2,
moment_ypyp =(3.5e-6)**2,
moment_xxp =0.0,
moment_yyp =0.0,
energy_spread=1e-03)
#
#
# round_new_Ob = SigmaWaist(sigma_x=0.275*27.2e-6,
# sigma_y=4*3.4e-6,
# sigma_x_prime=0.275*5.2e-6,
# sigma_y_prime=4*1.4e-6,
# sigma_dd=0.95e-03)
#
# new_Ob_alpha = SigmaMatrixFromCovariance(xx=9.38000281183e-10,
# yy=1.42697722687e-11,
# xpxp=1.942035404e-11,
# ypyp=1.88728844475e-12,
# sigma_dd=0.89e-03,
# xxp=-1.4299803854e-11,
# yyp=-1.38966769839e-12)
#
#
# new_Ob_alpha_red = SigmaMatrixFromCovariance(xx=9.38000281183e-10,
# yy=1.42697722687e-11,
# xpxp=1.942035404e-11,
# ypyp=1.88728844475e-12,
# sigma_dd=0.89e-03,
# xxp=-1.4299803854e-11 * 10**-3,
# yyp=-1.38966769839e-12 * 10**-3)
#
# new_Ob_nd = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=1e-10,
# sigma_y_prime=1e-10,
# sigma_dd=0.89e-03)
#
# delta_Ob = SigmaWaist(sigma_x=1e-10,
# sigma_y=1e-10,
# sigma_x_prime=1e-10,
# sigma_y_prime=1e-10,
# sigma_dd=1e-06)
#
# dream_Ob_01 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 0.1,
# sigma_y=3.4e-6 * 0.05 * 0.1,
# sigma_x_prime=5.2e-6 * 0.05 * 0.1,
# sigma_y_prime=1.4e-6 * 0.05 * 0.1,
# sigma_dd=0.89e-03 * 0.05 * 0.1)
#
# dream_Ob_05 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 0.5,
# sigma_y=3.4e-6 * 0.05 * 0.5,
# sigma_x_prime=5.2e-6 * 0.05 * 0.5,
# sigma_y_prime=1.4e-6 * 0.05 * 0.5,
# sigma_dd=0.89e-03 * 0.05 * 0.5)
#
# dream_Ob_1 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 1,
# sigma_y=3.4e-6 * 0.05 * 1,
# sigma_x_prime=5.2e-6 * 0.05 * 1,
# sigma_y_prime=1.4e-6 * 0.05 * 1,
# sigma_dd=0.89e-03 * 0.05 * 1)
#
# dream_Ob_4 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 4,
# sigma_y=3.4e-6 * 0.05 * 4,
# sigma_x_prime=5.2e-6 * 0.05 * 4,
# sigma_y_prime=1.4e-6 * 0.05 * 4,
# sigma_dd=0.89e-03 * 0.05 * 4)
#
# dream_Ob_8 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 8,
# sigma_y=3.4e-6 * 0.05 * 8,
# sigma_x_prime=5.2e-6 * 0.05 * 8,
# sigma_y_prime=1.4e-6 * 0.05 * 8,
# sigma_dd=0.89e-03 * 0.05 * 8)
#
# dream_Ob_12 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 12,
# sigma_y=3.4e-6 * 0.05 * 12,
# sigma_x_prime=5.2e-6 * 0.05 * 12,
# sigma_y_prime=1.4e-6 * 0.05 * 12,
# sigma_dd=0.89e-03 * 0.05 * 12)
#
# dream_Ob_16 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 16,
# sigma_y=3.4e-6 * 0.05 * 16,
# sigma_x_prime=5.2e-6 * 0.05 * 16,
# sigma_y_prime=1.4e-6 * 0.05 * 16,
# sigma_dd=0.89e-03 * 0.05 * 16)
#
# dream_Ob_20 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 20,
# sigma_y=3.4e-6 * 0.05 * 20,
# sigma_x_prime=5.2e-6 * 0.05 * 20,
# sigma_y_prime=1.4e-6 * 0.05 * 20,
# sigma_dd=0.89e-03 * 0.05 * 20)
#
# dream_Ob_24 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 24,
# sigma_y=3.4e-6 * 0.05 * 24,
# sigma_x_prime=5.2e-6 * 0.05 * 24,
# sigma_y_prime=1.4e-6 * 0.05 * 24,
# sigma_dd=0.89e-03 * 0.05 * 24)
#
# dream_Ob_28 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 28,
# sigma_y=3.4e-6 * 0.05 * 28,
# sigma_x_prime=5.2e-6 * 0.05 * 28,
# sigma_y_prime=1.4e-6 * 0.05 * 28,
# sigma_dd=0.89e-03 * 0.05 * 28)
#
# dream_trans_Ob_01 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 0.1,
# sigma_y=3.4e-6 * 0.05 * 0.1,
# sigma_x_prime=5.2e-6 * 0.05 * 0.1,
# sigma_y_prime=1.4e-6 * 0.05 * 0.1,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_05 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 0.5,
# sigma_y=3.4e-6 * 0.05 * 0.5,
# sigma_x_prime=5.2e-6 * 0.05 * 0.5,
# sigma_y_prime=1.4e-6 * 0.05 * 0.5,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_1 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 1,
# sigma_y=3.4e-6 * 0.05 * 1,
# sigma_x_prime=5.2e-6 * 0.05 * 1,
# sigma_y_prime=1.4e-6 * 0.05 * 1,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_4 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 4,
# sigma_y=3.4e-6 * 0.05 * 4,
# sigma_x_prime=5.2e-6 * 0.05 * 4,
# sigma_y_prime=1.4e-6 * 0.05 * 4,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_8 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 8,
# sigma_y=3.4e-6 * 0.05 * 8,
# sigma_x_prime=5.2e-6 * 0.05 * 8,
# sigma_y_prime=1.4e-6 * 0.05 * 8,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_12 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 12,
# sigma_y=3.4e-6 * 0.05 * 12,
# sigma_x_prime=5.2e-6 * 0.05 * 12,
# sigma_y_prime=1.4e-6 * 0.05 * 12,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_16 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 16,
# sigma_y=3.4e-6 * 0.05 * 16,
# sigma_x_prime=5.2e-6 * 0.05 * 16,
# sigma_y_prime=1.4e-6 * 0.05 * 16,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_20 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 20,
# sigma_y=3.4e-6 * 0.05 * 20,
# sigma_x_prime=5.2e-6 * 0.05 * 20,
# sigma_y_prime=1.4e-6 * 0.05 * 20,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_24 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 24,
# sigma_y=3.4e-6 * 0.05 * 24,
# sigma_x_prime=5.2e-6 * 0.05 * 24,
# sigma_y_prime=1.4e-6 * 0.05 * 24,
# sigma_dd=0.89e-03)
#
# dream_trans_Ob_28 = SigmaWaist(sigma_x=27.2e-6 * 0.05 * 28,
# sigma_y=3.4e-6 * 0.05 * 28,
# sigma_x_prime=5.2e-6 * 0.05 * 28,
# sigma_y_prime=1.4e-6 * 0.05 * 28,
# sigma_dd=0.89e-03)
#
#
# example10_ob= SigmaWaist(sigma_x=33.3317e-06,
# sigma_y=2.91204e-06,
# sigma_x_prime=16.5008e-06,
# sigma_y_prime=2.74721e-06)
#
# new_Ob_es02 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 0.2)
#
# new_Ob_es04 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 0.4)
#
# new_Ob_es06 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 0.6)
#
# new_Ob_es08 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 0.8)
#
# new_Ob_es10 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 1.0)
#
# new_Ob_es12 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 1.2)
#
# new_Ob_es14 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 1.4)
#
# new_Ob_es16 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 1.6)
#
# new_Ob_es18 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 1.8)
#
# new_Ob_es20 = SigmaWaist(sigma_x=27.2e-6,
# sigma_y=3.4e-6,
# sigma_x_prime=5.2e-6,
# sigma_y_prime=1.4e-6,
# sigma_dd=0.95e-03 * 2.0)
def latticeByName(name):
lattices = {"low_beta": lb_Ob,
"high_beta": hb_Ob,
"new": new_Ob,
"ebs_Ob": ebs_Ob,
"ebs_S28D": ebs_S28D,
"correct_new": correct_new_Ob,
"alba": alba,
# "round_new": round_new_Ob,
# "new_alpha":new_Ob_alpha,
# "correct_new_alpha":correct_new_Ob_alpha,
# "new_alpha_red":new_Ob_alpha_red,
# "new_nd": new_Ob_nd,
# "dream01": dream_Ob_01,
# "dream05": dream_Ob_05,
# "dream1": dream_Ob_1,
# "dream4": dream_Ob_4,
# "dream8": dream_Ob_8,
# "dream12": dream_Ob_12,
# "dream16": dream_Ob_16,
# "dream20": dream_Ob_20,
# "dream24": dream_Ob_24,
# "dream28": dream_Ob_28,
# "dream_trans01": dream_trans_Ob_01,
# "dream_trans05": dream_trans_Ob_05,
# "dream_trans1": dream_trans_Ob_1,
# "dream_trans4": dream_trans_Ob_4,
# "dream_trans8": dream_trans_Ob_8,
# "dream_trans12": dream_trans_Ob_12,
# "dream_trans16": dream_trans_Ob_16,
# "dream_trans20": dream_trans_Ob_20,
# "dream_trans24": dream_trans_Ob_24,
# "dream_trans28": dream_trans_Ob_28,
# "example10": example10_ob,
# "new_Ob_nd": new_Ob_nd,
# "delta": delta_Ob,
# "new_es02": new_Ob_es02,
# "new_es04": new_Ob_es04,
# "new_es06": new_Ob_es06,
# "new_es08": new_Ob_es08,
# "new_es10": new_Ob_es10,
# "new_es12": new_Ob_es12,
# "new_es14": new_Ob_es14,
# "new_es16": new_Ob_es16,
# "new_es18": new_Ob_es18,
# "new_es20": new_Ob_es20,
}
return lattices[name]
| StarcoderdataPython |
11341086 | <reponame>benjyw/materiality.commons
# coding=utf-8
# Copyright 2016 Materiality Labs.
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands.collectstatic import Command as CollectStaticCommand
class Command(CollectStaticCommand):
help = 'Copies or symlinks static files into settings.STATIC_ROOT, reading ignore patterns from a file.'
can_import_settings = True
fixed_ignore_patterns = (
)
@classmethod
def load_pattern_file(cls, path):
with open(path, 'r') as infile:
return [p for p in [p.strip() for p in infile] if p and not p.startswith('#')]
@classmethod
def maybe_load_pattern_file(cls, path):
return cls.load_pattern_file(path) if os.path.isfile(path) else None
def set_options(self, **options):
super(Command, self).set_options(**options)
ignore_file = getattr(settings, 'MATERIALITY_DJANGO_STATIC_IGNORE_FILE', '')
ignore_patterns_from_file = self.maybe_load_pattern_file(ignore_file)
if ignore_patterns_from_file:
self.ignore_patterns = list(set(self.ignore_patterns) | set(ignore_patterns_from_file))
| StarcoderdataPython |
5050671 | <filename>src/qutip_cupy/expectation.py
"""Contains specialization functions for calculating expectation."""
import cupy as cp
def expect_cupydense(op, state):
"""
Get the expectation value of the operator `op` over the state `state`. The
state can be either a ket or a density matrix.
The expectation of a state is defined as the operation:
state.adjoint() @ op @ state
and of a density matrix:
tr(op @ state)
"""
if state.shape[1] == 1:
return _expect_dense_ket(op, state)
return _expect_dense_dense_dm(op, state)
def _expect_dense_ket(op, state):
_check_shape_ket(op, state)
return cp.vdot(state._cp, op._cp @ state._cp).item()
def _check_shape_dm(op, state):
if (
op.shape[1] != state.shape[0] # Matrix multiplication
or state.shape[0] != state.shape[1] # State is square
or op.shape[0] != op.shape[1] # Op is square
):
raise ValueError(
"incorrect input shapes " + str(op.shape) + " and " + str(state.shape)
)
_expect_dense_kernel = cp.RawKernel(
r"""
#include <cupy/complex.cuh>
extern "C" __global__
void expect_dens(const complex<double>* op,const complex<double>* dm,
const int size, complex<double>* y) {
for (unsigned int tidx = blockDim.x * blockIdx.x + threadIdx.x; tidx < size;
tidx += gridDim.x * blockDim.x) {
for(unsigned int j= 0; j<size; j++){
y[tidx] += op[j*size+tidx] * dm[tidx*size+j];
};
};
}""",
"expect_dens",
)
def _expect_dense_dense_dm(op, state):
_check_shape_dm(op, state)
size = op.shape[0]
out = cp.zeros((size,), dtype=cp.complex128)
# Having this batch size may hinder performance when running other
# applications in the same GPU.
block_size = 1024
grid_size = (size + block_size - 1) // block_size
_expect_dense_kernel((grid_size,), (block_size,), (op._cp, state._cp, size, out))
return out.sum().item()
def _check_shape_ket(op, state):
if (
op.shape[1] != state.shape[0] # Matrix multiplication
or state.shape[1] != 1 # State is ket
or op.shape[0] != op.shape[1] # op must be square matrix
):
raise ValueError(
"incorrect input shapes " + str(op.shape) + " and " + str(state.shape)
)
| StarcoderdataPython |
12813235 | <filename>config/helpers.py
import random
import string
from config import mails
import sendgrid
import os
from sendgrid.helpers.mail import *
def random_string(length):
pool = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(pool) for i in range(length))
def sendMail(to_addr_list, subject, message):
sg = sendgrid.SendGridAPIClient(apikey=mails.apikey)
from_email = Email("<EMAIL>")
#subject = "Hello World from the SendGrid Python Library!"
try:
to_email = Email(to_addr_list)
content = Content("text/html", message)
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
return {"status":"success", "message":"A secret key is sent, Please check your mail", "code":response.status_code, "body":response.body}
except Exception as e:
return {"status":"fail","message":"Failed to send key, invalid mail setup"} | StarcoderdataPython |
6628445 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: latin-1 -*-
combien_de_departements = {
'Auvergne-Rhônes-Alpes': 12,
'Île-de-France': 8,
'Normandie': 5,
'Provence-Alpes-Côte d\'Azur': 8,
'Nouvelle-Aquitaine': 12,
'Grand Est': 10,
'Occitanie': 13,
'Bretagne': 4,
'Nord-Pas-de-Calais': 5
}
villes_et_regions = {
'Lyon': 'Auvergne-Rhônes-Alpes',
'Paris': 'Île-de-France',
'Caen': 'Normandie',
'Marseille': 'Provence-Alpes-Côte d\'Azur',
'Le Mont-Saint-Michel': 'Normandie',
'Grenoble': 'Auvergne-Rhônes-Alpes',
'Bordeaux': 'Nouvelle-Aquitaine',
'Strasbourg': 'Grand Est',
'Perpignan': 'Occitanie',
'Saint-Malo': 'Bretagne',
'Lille': 'Nord-Pas-de-Calais'
}
| StarcoderdataPython |
9740699 | <gh_stars>0
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from docutils.utils import Reporter as _Reporter
def create_logger(verbose=False):
"""Create autodoc logger.
This function creates output stream logger with simplified message format.
Args:
verbose: Set debug level.
Returns:
Logger instance.
"""
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(message)s'))
logger = logging.getLogger('autodoc')
logger.addHandler(console)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
return logger
# Levels mapping to convert from docutils levels to logging levels.
_levels = {
_Reporter.DEBUG_LEVEL: logging.DEBUG,
_Reporter.INFO_LEVEL: logging.INFO,
_Reporter.WARNING_LEVEL: logging.WARNING,
_Reporter.ERROR_LEVEL: logging.ERROR,
_Reporter.SEVERE_LEVEL: logging.FATAL
}
class Codes:
"""Report codes."""
# -- Docstring analysis codes ---------------------------------------------
#: Something is too complex (like 'specification too complex').
COMPLEX = 'D301'
#: Duplicate (duplicate declaration).
DUPLICATE = 'D302'
#: Something is incorrect (incorrect signature).
INCORRECT = 'D303'
#: Something unknown.
UNKNOWN = 'D304'
#: Empty state.
EMPTY = 'D305'
#: Missing state.
MISSING = 'D306'
#: Mismatch in something.
MISMATCH = 'D307'
#: Empty/missing docstring.
NODOC = 'D308'
#: Docstring parsing error.
PARSERR = 'D309'
# -- Other codes ----------------------------------------------------------
#: Internal errors.
INTERNAL = 'INTERNAL'
#: Information.
INFO = 'D300'
#: Transform errors.
ERROR = 'D401'
#: I/O errors.
IOERROR = 'D402'
class BaseReporter:
def __init__(self):
self.definition = None
def reset(self):
"""Reset reporter's state."""
self.definition = None
def document_message(self, msg):
"""This method collects docutils' reporter messages."""
if self.definition is not None:
line, col = self.definition.get_start_pos()
else:
line = col = None
if msg.hasattr('autodoc'):
self.add_report(msg.get('code', 'D201'), msg.children[0].astext(),
line, col)
else:
level = msg.get('level')
log_level = _levels.get(level, logging.DEBUG)
code = 'D1{:02d}'.format(level)
text = msg.children[0].astext().replace('\n', ' ')
self.add_report(code, text, line, col, log_level)
def add_report(self, code, message, line=None, col=None, level=None):
"""Add report.
Args:
code: Report code.
message: Report message.
line: Line number in the content.
col: Column number in the content.
level: Logging level. Info level is used if not specified.
"""
pass
class DomainReporter(BaseReporter):
#: Message format.
fmt = u'{path}: [{code}] {msg}'
def __init__(self, domain):
super(DomainReporter, self).__init__()
self.domain = domain
self._env = None
self._filename = None
@property
def env(self):
return self._env
@env.setter
def env(self, value):
self._env = value
self.definition = value.get('definition')
self._filename = value.get('report_filename')
def reset(self):
super(DomainReporter, self).reset()
self._filename = None
self._env = None
def add_report(self, code, message, line=0, col=0, level=None):
level = level or logging.INFO
if self.definition is not None:
line_, col_ = self.definition.get_start_pos()
if line == 0:
line = line_
if col == 0:
col = col_
name = self.definition.name
else:
name = None
path_item = [self._filename] if self._filename else []
if line:
# NOTE:
# We +1 because all indexes and positions are assumed to be
# zero-based and we display in 1-based format.
path_item.append(str(line))
path_item.append(str(col))
code_item = [code, self.domain.name]
if name:
code_item.append(name)
message = self.fmt.format(path=':'.join(path_item),
code=':'.join(code_item), msg=message)
self.domain.logger.log(level, message)
| StarcoderdataPython |
9606490 | import numpy as np
import gensim
from numba import njit
from gensim.models import Word2Vec
from distutils.version import LooseVersion
from sklearn.linear_model import LogisticRegression
from graphgallery import functional as gf
from graphgallery.gallery import Common
from .sklearn_model import SklearnModel
@Common.register()
class Deepwalk(SklearnModel):
"""
Implementation of DeepWalk Unsupervised Graph Neural Networks (DeepWalk).
`DeepWalk: Online Learning of Social Representations <https://arxiv.org/abs/1403.6652>`
Implementation: <https://github.com/phanein/deepwalk>
"""
def process_step(self,
adj_transform=None,
attr_transform=None,
graph_transform=None,
walk_length=80,
walks_per_node=10):
graph = gf.get(graph_transform)(self.graph)
adj_matrix = gf.get(adj_transform)(graph.adj_matrix)
walks = self.deepwalk_random_walk(adj_matrix.indices,
adj_matrix.indptr,
walk_length=walk_length,
walks_per_node=walks_per_node)
self.register_cache(walks=walks)
def model_builder(self,
name="Word2Vec",
embedding_dim=64,
window_size=5,
workers=16,
epochs=1,
num_neg_samples=1):
assert name == "Word2Vec"
walks = self.cache.walks
sentences = [list(map(str, walk)) for walk in walks]
if LooseVersion(gensim.__version__) <= LooseVersion("4.0.0"):
model = Word2Vec(sentences,
size=embedding_dim,
window=window_size,
min_count=0,
sg=1,
workers=workers,
iter=epochs,
negative=num_neg_samples,
hs=0,
compute_loss=True)
else:
model = Word2Vec(sentences,
vector_size=embedding_dim,
window=window_size,
min_count=0,
sg=1,
workers=workers,
epochs=epochs,
negative=num_neg_samples,
hs=0,
compute_loss=True)
return model
def classifier_builder(self):
cfg = self.cfg.classifier
assert cfg.name == "LogisticRegression"
classifier = LogisticRegression(solver=cfg.solver,
max_iter=cfg.max_iter,
multi_class=cfg.multi_class,
random_state=cfg.random_state)
return classifier
@staticmethod
@njit
def deepwalk_random_walk(indices,
indptr,
walk_length=80,
walks_per_node=10):
N = len(indptr) - 1
for _ in range(walks_per_node):
for n in range(N):
single_walk = [n]
current_node = n
for _ in range(walk_length - 1):
neighbors = indices[
indptr[current_node]:indptr[current_node + 1]]
if neighbors.size == 0:
break
current_node = np.random.choice(neighbors)
single_walk.append(current_node)
yield single_walk
@property
def embeddings(self, norm=True):
if LooseVersion(gensim.__version__) <= LooseVersion("4.0.0"):
embeddings = self.model.wv.vectors[np.fromiter(
map(int, self.model.wv.index2word), np.int32).argsort()]
else:
embeddings = self.model.wv.vectors[np.fromiter(
map(int, self.model.wv.index_to_key), np.int32).argsort()]
if self.cfg.normalize_embedding:
embeddings = self.normalize_embedding(embeddings)
return embeddings
| StarcoderdataPython |
5113218 | <filename>simstat/bootstrapTest.py<gh_stars>0
from . import bootstrap
import numpy as np
__all__=['bootstrapTest'] #everything that will be imported by import *, like in __init__
def bootstrapTest(data,n=10000):
"""
Wrapper for 'bootstrap'
input: data: 1D ndarray
output: whether 'data's bootstrap distribution differs from zero
p--> p-value
"""
b_data=bootstrap(data[~np.isnan(data)],n)
CI=np.nanpercentile(b_data,[5,95])
p=1
if np.prod(CI) >0:
N=len(b_data[b_data<0]) if CI[0]>0 else len(b_data[b_data>0])
p=N/n
return p
if __name__ == '__main__':
data=np.random.normal(loc=.1, scale=3, size=150)
p=bootstrapTest(data,n=10000)
print(p)
| StarcoderdataPython |
1959250 | <filename>mcradar/radarOperator/zeOperator.py
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import subprocess
import numpy as np
import xarray as xr
from glob import glob
from pytmatrix.tmatrix import Scatterer
from pytmatrix import psd, orientation, radar
from pytmatrix import refractive, tmatrix_aux
from mcradar.tableOperator import creatRadarCols
# TODO: this function should deal with the LUTs
def calcScatPropOneFreq(wl, radii, as_ratio,
rho, elv, ndgs=30,
canting=False, cantingStd=1,
meanAngle=0, safeTmatrix=False):
"""
Calculates the Ze at H and V polarization, Kdp for one wavelength
TODO: LDR???
Parameters
----------
wl: wavelength [mm] (single value)
radii: radius [mm] of the particle (array[n])
as_ratio: aspect ratio of the super particle (array[n])
rho: density [g/mmˆ3] of the super particle (array[n])
elv: elevation angle [°]
ndgs: division points used to integrate over the particle surface
canting: boolean (default = False)
cantingStd: standard deviation of the canting angle [°] (default = 1)
meanAngle: mean value of the canting angle [°] (default = 0)
Returns
-------
reflect_h: super particle horizontal reflectivity[mm^6/m^3] (array[n])
reflect_v: super particle vertical reflectivity[mm^6/m^3] (array[n])
refIndex: refractive index from each super particle (array[n])
kdp: calculated kdp from each particle (array[n])
"""
#---pyTmatrix setup
# initialize a scatterer object
scatterer = Scatterer(wavelength=wl)
scatterer.radius_type = Scatterer.RADIUS_MAXIMUM
scatterer.ndgs = ndgs
scatterer.ddelta = 1e-6
if canting==True:
scatterer.or_pdf = orientation.gaussian_pdf(std=cantingStd, mean=meanAngle)
# scatterer.orient = orientation.orient_averaged_adaptive
scatterer.orient = orientation.orient_averaged_fixed
# geometric parameters - incident direction
scatterer.thet0 = 90. - elv
scatterer.phi0 = 0.
# parameters for backscattering
refIndex = np.ones_like(radii, np.complex128)*np.nan
reflect_h = np.ones_like(radii)*np.nan
reflect_v = np.ones_like(radii)*np.nan
# S matrix for Kdp
sMat = np.ones_like(radii)*np.nan
for i, radius in enumerate(radii):
# A quick function to save the distribution of values used in the test
#with open('/home/dori/table_McRadar.txt', 'a') as f:
# f.write('{0:f} {1:f} {2:f} {3:f} {4:f} {5:f} {6:f}\n'.format(wl, elv,
# meanAngle,
# cantingStd,
# radius,
# rho[i],
# as_ratio[i]))
# scattering geometry backward
# radius = 100.0 # just a test to force nans
scatterer.thet = 180. - scatterer.thet0
scatterer.phi = (180. + scatterer.phi0) % 360.
scatterer.radius = radius
scatterer.axis_ratio = 1./as_ratio[i]
scatterer.m = refractive.mi(wl, rho[i])
refIndex[i] = refractive.mi(wl, rho[i])
if safeTmatrix:
inputs = [str(scatterer.radius),
str(scatterer.wavelength),
str(scatterer.m),
str(scatterer.axis_ratio),
str(int(canting)),
str(cantingStd),
str(meanAngle),
str(ndgs),
str(scatterer.thet0),
str(scatterer.phi0)]
arguments = ' '.join(inputs)
a = subprocess.run(['spheroidMcRadar'] + inputs, # this script should be installed by McRadar
capture_output=True)
# print(str(a))
try:
back_hh, back_vv, sMatrix, _ = str(a.stdout).split('Results ')[-1].split()
back_hh = float(back_hh)
back_vv = float(back_vv)
sMatrix = float(sMatrix)
except:
back_hh = np.nan
back_vv = np.nan
sMatrix = np.nan
# print(back_hh, radar.radar_xsect(scatterer, True))
# print(back_vv, radar.radar_xsect(scatterer, False))
reflect_h[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * back_hh # radar.radar_xsect(scatterer, True) # Kwsqrt is not correct by default at every frequency
reflect_v[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * back_vv # radar.radar_xsect(scatterer, False)
# scattering geometry forward
# scatterer.thet = scatterer.thet0
# scatterer.phi = (scatterer.phi0) % 360. #KDP geometry
# S = scatterer.get_S()
sMat[i] = sMatrix # (S[1,1]-S[0,0]).real
# print(sMatrix, sMat[i])
# print(sMatrix)
else:
reflect_h[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * radar.radar_xsect(scatterer, True) # Kwsqrt is not correct by default at every frequency
reflect_v[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * radar.radar_xsect(scatterer, False)
# scattering geometry forward
scatterer.thet = scatterer.thet0
scatterer.phi = (scatterer.phi0) % 360. #KDP geometry
S = scatterer.get_S()
sMat[i] = (S[1,1]-S[0,0]).real
kdp = 1e-3* (180.0/np.pi)*scatterer.wavelength*sMat
del scatterer # TODO: Evaluate the chance to have one Scatterer object already initiated instead of having it locally
return reflect_h, reflect_v, refIndex, kdp
def radarScat(sp, wl, K2=0.93):
"""
Calculates the single scattering radar quantities from the matrix values
Parameters
----------
sp: dataArray [n] superparticles containing backscattering matrix
and forward amplitude matrix information needed to compute
spectral radar quantities
wl: wavelength [mm]
K2: Rayleigh dielectric factor |(m^2-1)/(m^2+2)|^2
Returns
-------
reflect_h: super particle horizontal reflectivity[mm^6/m^3] (array[n])
reflect_v: super particle vertical reflectivity[mm^6/m^3] (array[n])
kdp: calculated kdp from each particle (array[n])
ldr_h: linear depolarization ratio horizontal (array[n])
rho_hv: correlation coefficient (array[n])
"""
prefactor = 2*np.pi*wl**4/(np.pi**5*K2)
reflect_hh = prefactor*(sp.Z11 - sp.Z12 - sp.Z21 + sp.Z22).values
reflect_vv = prefactor*(sp.Z11 + sp.Z12 + sp.Z21 + sp.Z22).values
kdp = 1e-3*(180.0/np.pi)*wl*sp.S22r_S11r.values
reflect_hv = prefactor*(sp.Z11 - sp.Z12 + sp.Z21 - sp.Z22).values
#reflect_vh = prefactor*(sp.Z11 + sp.Z12 - sp.Z21 - sp.Z22).values
ldr_h = reflect_hv/reflect_hh
# delta_hv np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])
#a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2
#b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])
#c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
#rho_hv np.sqrt(a / (b*c))
rho_hv = np.nan*np.ones_like(reflect_hh) # disable rho_hv for now
#Ah = 4.343e-3 * 2 * scatterer.wavelength * sp.S22i.values # attenuation horizontal polarization
#Av = 4.343e-3 * 2 * scatterer.wavelength * sp.S11i.values # attenuation vertical polarization
return reflect_hh, reflect_vv, kdp, ldr_h, rho_hv
def calcParticleZe(wls, elv, mcTable, ndgs=30,
scatSet={'mode':'full', 'safeTmatrix':False}):#zeOperator
"""
Calculates the horizontal and vertical reflectivity of
each superparticle from a given distribution of super
particles
Parameters
----------
wls: wavelength [mm] (iterable)
elv: elevation angle [°] # TODO: maybe also this can become iterable
mcTable: McSnow table returned from getMcSnowTable()
ndgs: division points used to integrate over the particle surface
Returns
-------
mcTable including the horizontal and vertical reflectivity
of each super particle calculated for X, Ka and W band. The
calculation is made separetely for aspect ratio < 1 and >=1.
Kdp is also included. TODO spectral ldr and rho_hv
"""
#calling the function to create output columns
mcTable = creatRadarCols(mcTable, wls)
#print('mcTable has ', len(mcTable))
if scatSet['mode'] == 'full':
print('Full mode Tmatrix calculation')
##calculation of the reflectivity for AR < 1
tmpTable = mcTable[mcTable['sPhi']<1].copy()
#particle properties
canting = True
meanAngle=0
cantingStd=1
radii_M1 = tmpTable['radii_mm'].values #[mm]
as_ratio_M1 = tmpTable['sPhi'].values
rho_M1 = tmpTable['sRho'].values #[g/cm^3]
for wl in wls:
singleScat = calcScatPropOneFreq(wl, radii_M1, as_ratio_M1,
rho_M1, elv, canting=canting,
cantingStd=cantingStd,
meanAngle=meanAngle, ndgs=ndgs,
safeTmatrix=scatSet['safeTmatrix'])
reflect_h, reflect_v, refInd, kdp_M1 = singleScat
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sPhi']<1] = reflect_h
mcTable['sZeV_{0}'.format(wlStr)].values[mcTable['sPhi']<1] = reflect_v
mcTable['sKDP_{0}'.format(wlStr)].values[mcTable['sPhi']<1] = kdp_M1
##calculation of the reflectivity for AR >= 1
tmpTable = mcTable[mcTable['sPhi']>=1].copy()
#particle properties
canting=True
meanAngle=90
cantingStd=1
radii_M1 = (tmpTable['radii_mm']).values #[mm]
as_ratio_M1 = tmpTable['sPhi'].values
rho_M1 = tmpTable['sRho'].values #[g/cm^3]
for wl in wls:
singleScat = calcScatPropOneFreq(wl, radii_M1, as_ratio_M1,
rho_M1, elv, canting=canting,
cantingStd=cantingStd,
meanAngle=meanAngle, ndgs=ndgs,
safeTmatrix=scatSet['safeTmatrix'])
reflect_h, reflect_v, refInd, kdp_M1 = singleScat
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = reflect_h
mcTable['sZeV_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = reflect_v
mcTable['sKDP_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = kdp_M1
elif len(mcTable): # interpolation fails if no selection is possible
elvSel = scatSet['lutElev'][np.argmin(np.abs(np.array(scatSet['lutElev'])-elv))]
print('elevation ', elv,'lut elevation ', elvSel)
for wl in wls:
f = 299792458e3/wl
freSel = scatSet['lutFreq'][np.argmin(np.abs(np.array(scatSet['lutFreq'])-f))]
print('frequency ', f/1.e9, 'lut frequency ', freSel/1.e9)
dataset_filename = scatSet['lutPath'] + 'testLUT_{:3.1f}e9Hz_{:d}.nc'.format(freSel/1e9, int(elvSel))
lut = xr.open_dataset(dataset_filename)#.sel(wavelength=wl,
# elevation=elv,
# canting=1.0,
# method='nearest')
points = lut.sel(wavelength=wl, elevation=elv, canting=1.0,
size=xr.DataArray(mcTable['radii_mm'].values, dims='points'),
aspect=xr.DataArray(mcTable['sPhi'].values, dims='points'),
density=xr.DataArray(mcTable['sRho'].values, dims='points'),
method='nearest')
reflect_h, reflect_v, kdp_M1, ldr, rho_hv = radarScat(points, wl)
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = reflect_h
mcTable['sZeV_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = reflect_v
mcTable['sKDP_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = kdp_M1
if scatSet['mode'] == 'table':
print('fast LUT mode')
elif scatSet['mode'] == 'wisdom':
print('less fast cache adaptive mode')
return mcTable
| StarcoderdataPython |
3506092 | <reponame>mobergd/interfaces
""" fit rate constants to Arrhenius expressions
"""
import os
import numpy as np
from scipy.optimize import leastsq
from ratefit.fit.arrhenius import dsarrfit_io
RC = 1.98720425864083e-3 # Gas Constant in kcal/mol.K
def single(temps, rate_constants, t_ref, method,
a_guess=8.1e-11, n_guess=-0.01, ea_guess=2000.0,
dsarrfit_path=None, a_conv_factor=1.00):
""" call the single arrhenius fitter
"""
if method == 'dsarrfit':
assert dsarrfit_path is not None
fit_params = _dsarrfit(
temps, rate_constants, a_guess, n_guess, ea_guess,
'single', dsarrfit_path, a_conv_factor)
elif method == 'python':
fit_params = _single_arrhenius_numpy(
temps, rate_constants, t_ref)
else:
raise NotImplementedError
return fit_params
def double(temps, rate_constants, t_ref, method,
a_guess=8.1e-11, n_guess=-0.01, ea_guess=2000.0,
dsarrfit_path=None, a_conv_factor=1.00):
""" call the double arrhenius fitter
"""
if method == 'dsarrfit':
assert dsarrfit_path is not None
fit_params = _dsarrfit(
temps, rate_constants, a_guess, n_guess, ea_guess,
'double', dsarrfit_path, a_conv_factor)
elif method == 'python':
fit_params = _double_arrhenius_scipy(
temps, rate_constants, t_ref, a_guess, n_guess, ea_guess)
else:
raise NotImplementedError
return fit_params
def _single_arrhenius_numpy(temps, rate_constants, t_ref):
""" this subroutine takes in a vector of rate constants and
returns the Arrhenius parameters, as well as
the T-range over which they were fit"""
# consider several cases depending on the number of valid rate constants
# no k is positive, so return all zeros
if rate_constants.size == 0:
a_fit, n_fit, ea_fit = 0.0, 0.0, 0.0
# if num(k) > 0 is 1: set A = k
elif rate_constants.size == 1:
a_fit, n_fit, ea_fit = rate_constants[0], 0.0, 0.0
# if num(k) > 0 is 2,3: fit A and Ea
elif rate_constants.size in (2, 3):
# Build vectors and matrices used for the fitting
a_vec = np.ones(len(temps))
ea_vec = (-1.0 / RC) * (1.0 / temps)
coeff_mat = np.array([a_vec, ea_vec], dtype=np.float64)
coeff_mat = coeff_mat.transpose()
k_vec = np.log(rate_constants)
# Perform the least-squares fit
theta = np.linalg.lstsq(coeff_mat, k_vec, rcond=None)[0]
# Set the fitting parameters
a_fit, n_fit, ea_fit = np.exp(theta[0]), 0.0, theta[1]
# if num(k) > 0 is more than 3: fit A, n, and Ea
elif rate_constants.size > 3:
# Build vectors and matrices used for the fitting
a_vec = np.ones(len(temps))
n_vec = np.log(temps / t_ref)
ea_vec = (-1.0 / RC) * (1.0 / temps)
coeff_mat = np.array([a_vec, n_vec, ea_vec], dtype=np.float64)
coeff_mat = coeff_mat.transpose()
k_vec = np.log(rate_constants)
# Perform the least-squares fit
theta = np.linalg.lstsq(coeff_mat, k_vec, rcond=None)[0]
# Set the fitting parameters
a_fit, n_fit, ea_fit = np.exp(theta[0]), theta[1], theta[2]
# Pack the parameters into a list
fit_params = [a_fit, n_fit, ea_fit]
return fit_params
def _double_arrhenius_scipy(temps, rate_constants, t_ref,
sgl_a, sgl_n, sgl_ea):
""" perform a double Arrhenius fit with python
"""
# Build a guess vector
guess_params = [(sgl_a / 2.0), (sgl_n + 0.1), sgl_ea,
(sgl_a / 2.0), (sgl_n - 0.1), sgl_ea]
# Perform a new least-squares fit
plsq = leastsq(_mod_arr_residuals, guess_params,
args=(rate_constants, temps, t_ref),
ftol=1.0E-9, xtol=1.0E-9, maxfev=100000)
return plsq[0]
def _mod_arr_residuals(guess_params, rate_constant, temp, t_ref):
""" this subroutine computes the residual used by the nonlinear solver
in fit_double_arrhenius_python
"""
# compute the fitted rate constant
k_fit1 = np.exp(
np.log(guess_params[0]) +
guess_params[1] * np.log(temp/t_ref) -
guess_params[2]/(RC * temp)
)
k_fit2 = np.exp(
np.log(guess_params[3]) +
guess_params[4] * np.log(temp/t_ref) -
guess_params[5]/(RC * temp)
)
k_fit = k_fit1 + k_fit2
# calculate error
err = np.log10(rate_constant) - np.log10(k_fit)
return err
def _dsarrfit(temps, rate_constants,
a_guess, n_guess, ea_guess,
fit_type, dsarrfit_path, a_conv_factor):
""" call the dsarrfit code for either a single or double fit
"""
# Write the input file for the ratefit code
ratefit_inp_str = dsarrfit_io.write_input(
temps, rate_constants, a_guess, n_guess, ea_guess)
dsarrfit_inp_file = os.path.join(dsarrfit_path, 'arrfit.dat')
print('writing dsarrfit input in {}'.format(dsarrfit_path))
with open(dsarrfit_inp_file, 'w') as arrfit_infile:
arrfit_infile.write(ratefit_inp_str)
# Run the ratefit program
print('running dsarrfit')
dsarrfit_io.run_dsarrfit(dsarrfit_path)
# Read the output of the single and double fit
dsarrfit_out_file = os.path.join(dsarrfit_path, 'arrfit.out')
with open(dsarrfit_out_file, 'r') as arrfit_outfile:
arrfit_out_str = arrfit_outfile.read()
# Parse the ratefit files for the Arrhenius fit parameters
fit_params = dsarrfit_io.read_params(
arrfit_out_str, fit_type, a_conv_factor)
return fit_params
| StarcoderdataPython |
6506365 | <reponame>ElLorans/PythonCrashCourse<gh_stars>1-10
##write a function that takes a string and a name as arguments
##and creates a file name.txt with the given string
def write(string, name):
with open(name, 'w') as file: # ALWAYS use the with method to open a file
# 'w' stands for 'writing mode'
file.write(string)
# here we used a VOID function (no return)
fl = input('Insert file name or file path + name')
text = input('Insert text you want to write')
if '.' not in fl: # it's better to have a file extension
# so we check if it is present. If not, we add it
fl += '.txt'
write(text, fl)
# for instance, if user insert 'a' and 'Hello world' this program will create a
# file named 'a.txt' in the same folder with written 'Hello world'
| StarcoderdataPython |
6653581 | import os.path
import unittest
import clrypt.openssl
TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'test-cert')
class TestBignumToMPI(unittest.TestCase):
"""Make sure we're encoding integers correctly."""
def test_exponent(self):
self.assertEqual(
clrypt.openssl.bignum_to_mpi(65537),
b'\x00\x00\x00\x03\x01\x00\x01')
def test_modulus(self):
self.assertEqual(
clrypt.openssl.bignum_to_mpi(
17652187434302119818626903597973364646688501428151181083346396650190495499971143555821153235865918471667488379960690165155890225429359619542780951438912782907420720337860016081609304413559138090809578303571230455363722891195091112107003860558073312270213669052061287420596945410842819944331551665414956709934244337764287956982989490636986364084315970710464920930036478862933506058288047831177960977170956029647528492070455408969834953275116251472162035375269818449753491792832735260819579628653112578006009233208029743042292911927382613736571054059145327226830704584124567079108161933244408783987994310178893677777563
),
b'\x00\x00\x01\x01\x00\x8b\xd5\x0e\xf7s\xde\xce\xcayg\xe5s\xaf'
b'\xa5\\\x95\xd9\xbd\xb3\xff4\xa9\x98T\xe6^\x91\xcc\xb9X\xda*'
b'\xf3W@\xed\x8b\xd7E\rB\xa7\x17l\x83_s\x8479\xa2\x92}SL\x007g'
b'\x829\xfdz\x1bwf\x060}\xd1\xaagXF\xf1\x12n\x96z\xba\xa3\xd9'
b'\xb1\x91\x98\x99\xf4.\xbfo\xd1\x13\xb8\x97p^*\x16\x0bi~\xd5'
b'\x10\x07\xa7\x7f\x86D\x9a\xf3]0YZ4\xea\xe9\x17\xe1\x86\x96'
b'\xad\xe9;\xcf\xd3T+\x91U#K.\xdb\xcc\x06\x90e]\x88\x0e[hs\xde'
b'\xbbm\x16\xc9\x19@\xd9{FI\x04\xe7\xf6\xd5\xcb\xff\xe7&\xce'
b'\xaa\x0e\x88{\xc7\xfa\xe6\x94d\x1d\xf9\x00\x18\xa2[\xeaf\xf1'
b'\xea\xe7\xc2ZG\x99\xfc\xe8\xb9|\xc7\xa4r\x06\x7f\x1e\tA\xaa'
b'\x1a\xe6\xe0\x86\x85\x11\xf0q?\xdc\xa0c\xbey\x05[u\xe5}>\xf5'
b'\xfc\x85\xaa\xff\x93v\xf7\xdf\xc6\xffv\xcei47\x03\xb1\xd0vR'
b'\x90\x16\xf5\x1a\xad\x1eH\x9dRW(\xea\xa4\xd2\x9b',
)
class TestOpenSSLKeypair(unittest.TestCase):
def setUp(self):
self.keypair = clrypt.openssl.OpenSSLKeypair(
os.path.join(TEST_CERT_DIR, 'test.crt'),
os.path.join(TEST_CERT_DIR, 'test.dem'))
def test_key_id(self):
"""A regression test: key IDs should never change."""
self.assertEqual(
self.keypair.get_key_id(),
"05f8ef9229fe21844aacfe2ec6e63e2b")
def test_encryption_cycle(self):
"""A smoke test: check that decrypt(encrypt(text)) == text."""
message = b"Some message"
encrypted = self.keypair.encrypt(message)
self.assertNotEqual(encrypted, message)
decrypted = self.keypair.decrypt(encrypted)
self.assertEqual(decrypted, message)
| StarcoderdataPython |
6401542 | <gh_stars>0
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from rest_framework import viewsets, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.views import APIView
import os
import requests
import sys
import json
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
'''
Ok, so what is 'TransmogrifyView'? It's the result of Tink which requires
a callback url for getting a client secrets. Yes, yes, I know it's poor
coding but I don't remember the specific details. Will update if my
memory comes back.
Update: Auth flow. Get 'code' from Tink via callback and use that code
in subsequent data requests.
'''
class TransmogrifyView(APIView):
base_url = 'https://api.tink.se/api/v1'
def post(self, request):
'''
TODO: This error handling is crude, what we're really
trying to do is making sure the 'code' param is set
'''
try:
body = {
'code': request.data['code'],
'client_id': os.environ['TINK_CLIENT_ID'],
'client_secret': os.environ['TINK_CLIENT_SECRET'],
'grant_type': 'authorization_code'
}
except:
return Response(status=422, data={'message': 'No code'})
body_headers = {
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
}
response_token = requests.post(self.base_url + '/oauth/token',
data=body, headers=body_headers)
resjson = json.loads(response_token.text)
try:
token = resjson['access_token']
#bearer = resjson['bearer']
except:
return Response(status=422, data={'message': 'No token'})
# TODO: Can't we just return the token to our client
# and let it handle the rest?
response_data = requests.post(self.base_url + '/search',
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + token
},
data = json.dumps({
"limit": 100
})
)
#print(response_data.text, file=sys.stderr)
return Response(response_data.json()) | StarcoderdataPython |
6528151 | <gh_stars>0
class Configuration(object):
#_server=None
#_user=None
def __init__(self, server="http://localhost", port=8080, application_context="/sbml4j", user="sbml4j"):
self._server = server
self._port = port
self._application_context = application_context
print("Server is: " + self._server + ":" + str(self._port) + self._application_context)
# Set headers
self._headers = {'Accept':'application/json', 'user':user}
self._isInSync = False
@property
def isInSync(self):
return self._isInSync
@isInSync.setter
def isInSync(self, value):
self._isInSync = value
@property
def server(self):
return self._server
@server.setter
def server(self, value):
self._server=value
self._isInSync=False
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
self._isInSync=False
@property
def application_context(self):
return self._application_context
@application_context.setter
def application_context(self, value):
self._application_context = value
self._isInSync=False
@property
def user(self):
return self._headers['user']
@user.setter
def user(self, value):
self._headers['user'] = value
self._isInSync=False
@property
def headers(self):
return self._headers
@property
def accept(self):
return self._headers['Accept']
@accept.setter
def accept(self, value):
self._headers['Accept'] = value
@property
def url(self):
url = self._server + ":" + str(self._port) + self._application_context
return url
#@property
#def content_type(self):
# return self._headers['Content-Type']
#@content_type.setter
#def content_type(self, value):
# self._headers['Content-Type'] = value
def __str__(self):
return "Server: {}:{}{} with headers: {}".format(self._server, self._port, self._application_context, self._headers)
| StarcoderdataPython |
9611667 | from django.db import models
from customer import models as customer_models
class Invoice(models.Model):
name = models.ForeignKey(customer_models.Customer, on_delete=models.CASCADE)
balance = models.IntegerField(null=True)
| StarcoderdataPython |
6545839 | <filename>MillerArrays/millerArrayCountBijvoetPairs.py<gh_stars>0
miller_arrays[0].n_bijvoet_pairs()
| StarcoderdataPython |
3418051 | """
This script adversarially trains a model using iterative attacks on multiple
GPUs.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from collections import namedtuple
from cleverhans.compat import app, flags
from trainer import TrainerMultiGPU
from trainer import TrainerSingleGPU
def run_trainer(hparams):
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
if "multigpu" in hparams.attack_type_train:
logging.info("Multi GPU Trainer.")
trainer = TrainerMultiGPU(hparams)
else:
logging.info("Single GPU Trainer.")
trainer = TrainerSingleGPU(hparams)
trainer.model_train()
trainer.eval(inc_epoch=False)
return trainer.finish()
def main(argv=None):
f = {x: flags.FLAGS[x].value for x in dir(flags.FLAGS)}
HParams = namedtuple("HParams", f.keys())
hparams = HParams(**f)
run_trainer(hparams)
if __name__ == "__main__":
flags.DEFINE_integer("train_start", 0, "Index of first training set example.")
flags.DEFINE_integer("train_end", 60000, "Index of last training set example.")
flags.DEFINE_integer("test_start", 0, "Index of first test set example.")
flags.DEFINE_integer("test_end", 10000, "Index of last test set example.")
flags.DEFINE_integer("nb_epochs", 6, "Number of epochs to train model.")
flags.DEFINE_integer("batch_size", 128, "Size of training batches.")
flags.DEFINE_boolean("adv_train", False, "Whether to do adversarial training.")
flags.DEFINE_boolean("save", True, "Whether to save from a checkpoint.")
flags.DEFINE_string("save_dir", "runs/X", "Location to store logs/model.")
flags.DEFINE_string("model_type", "madry", "Model type: basic|madry|resnet_tf.")
flags.DEFINE_string(
"attack_type_train",
"MadryEtAl_y_multigpu",
"Attack type for adversarial training:\
FGSM|MadryEtAl{,_y}{,_multigpu}.",
)
flags.DEFINE_string(
"attack_type_test", "FGSM", "Attack type for test: FGSM|MadryEtAl{,_y}."
)
flags.DEFINE_string("dataset", "mnist", "Dataset mnist|cifar10.")
flags.DEFINE_boolean(
"only_adv_train", False, "Do not train with clean examples when adv training."
)
flags.DEFINE_integer("save_steps", 50, "Save model per X steps.")
flags.DEFINE_integer(
"attack_nb_iter_train", None, "Number of iterations of training attack."
)
flags.DEFINE_integer("eval_iters", 1, "Evaluate every X steps.")
flags.DEFINE_integer(
"lrn_step", 30000, "Step to decrease learning rate" "for ResNet."
)
flags.DEFINE_float("adam_lrn", 0.001, "Learning rate for Adam Optimizer.")
flags.DEFINE_float("mom_lrn", 0.1, "Learning rate for Momentum Optimizer.")
flags.DEFINE_integer("ngpu", 1, "Number of gpus.")
flags.DEFINE_integer("sync_step", 1, "Sync params frequency.")
flags.DEFINE_boolean("fast_tests", False, "Fast tests against attacks.")
flags.DEFINE_string(
"data_path",
"./datasets/",
"Path to datasets." "Each dataset should be in a subdirectory.",
)
app.run()
| StarcoderdataPython |
386881 | <reponame>ShiNik/wiki_ml
# user define imports
from my_package.factory import ParserGenerator as ParserGenerator
from my_package import wiki_extractor as extractor
from my_package import util as util
from my_package.database_manager import DatabaseManager
from my_package.parser import TableParser
from my_package.log_manager import LogManager
# python imports
import re
class DataFetchManager:
def __init__(self):
return
@staticmethod
def fetch_data(config):
logger = LogManager.instance()
page_name = config.main_page_name
parser_list = [ParserGenerator.parser_types['table'], ParserGenerator.parser_types['infobox']]
parser_instance = ParserGenerator(parser_list)
# todo: move this to a parser for csv table
table2 = extractor.make_request_csv()
tables_txt = table2.splitlines()
head = ["city", "city_visitor", "city_visitor_reported_year"]
import pandas as pd
df_parsed_table_2 = pd.DataFrame(columns=head)
for i in range(1, len(tables_txt), 1):
city_info = tables_txt[i]
delimiter1 = ","
delimiter2 = '"'
delimiter3 = ",,"
test = city_info.split(delimiter3)
test2 = test[0].split(delimiter2)
city_name = test2[0]
city_name = city_name.replace(delimiter1, "")
city_visitors = test2[1]
values = test[1].split(delimiter1)
data = [city_name, city_visitors, values[0]]
df_parsed_table_2 = df_parsed_table_2.append(pd.Series(data, index=head), ignore_index=True)
text, _ = extractor.make_request(page_name)
df_parsed_table = parser_instance.run_function(ParserGenerator.parser_types['table'], text)
parsed_table = df_parsed_table.values.tolist()
# todo: group by city so you retrive city page only once
# take advantage of panda
for i in range(1, len(parsed_table), 1):
city_name = parsed_table[i][1]
print(city_name)
text, redirect_page = extractor.make_request(city_name)
if redirect_page:
city_name = text.split('[[')[1].split(']]')[0]
text = extractor.make_request(city_name)[0]
extracted_city_infos = parser_instance.run_function(ParserGenerator.parser_types['infobox'], text)
if logger.debug_enabled():
file_name = city_name + "_info.txt"
full_path = util.get_full_output_path(file_name)
if len(extracted_city_infos) > 0:
with open(full_path, "w", encoding="utf-8") as file:
for key, value in extracted_city_infos.items():
file.write(key + ": " + value + "\n")
museum_name = parsed_table[i][0]
print(museum_name)
# I might look at category for "Tokyo Metropolitan Art Museum"
# there I might have link to real website
# Category: National Museum of Nature and Science
if 'Zhejiang Museum' in museum_name or \
'Chongqing Museum of Natural History' in museum_name or \
"Mevlana Museum" in museum_name or \
"Tokyo Metropolitan Art Museum" in museum_name or \
"Chengdu Museum" in museum_name or \
"Royal Museums Greenwich" in museum_name or \
"National Museum of Nature and Science" in museum_name or \
"Suzhou Museum" in museum_name or \
"Three Gorges Museum" in museum_name or \
"Russian Museum" in museum_name:
# bad website can not extract it is information, missing data case
# escape it
continue;
# invalid case, page does not exist
if "<NAME>" in museum_name or \
"National Art Center" in museum_name or \
"Museo Nacional de Historia" in museum_name or \
"NGV International" in museum_name:
continue
text, redirect_page = extractor.make_request(museum_name)
if redirect_page:
museum_name = text.split('[[')[1].split(']]')[0]
text = extractor.make_request(museum_name)[0]
extracted_museum_infos = parser_instance.run_function(ParserGenerator.parser_types['infobox'], text)
# Remove all special characters, punctuation and spaces from string
new_name = re.sub('[^A-Za-z0-9]+', '', museum_name)
if logger.debug_enabled():
file_name = new_name + "_info.txt"
full_path = util.get_full_output_path(file_name)
if len(extracted_museum_infos) > 0:
with open(full_path, "w", encoding="utf-8") as file:
for key, value in extracted_museum_infos.items():
file.write(key + ": " + value + "\n")
# todo: move this to its ovn function to post-process
# save city and one of its museums in a database
extracted_city_infos["name"] = parsed_table[i][TableParser.column_type["city"]]
extracted_museum_infos["name"] = parsed_table[i][TableParser.column_type["museum"]]
extracted_museum_infos["visitors"] = parsed_table[i][TableParser.column_type["visitor"]]
extracted_museum_infos["year"] = parsed_table[i][TableParser.column_type["year"]]
city_visitor_info = df_parsed_table_2[df_parsed_table_2['city'] == extracted_city_infos["name"]]
if (len(city_visitor_info) > 0):
extracted_city_infos["city_visitor"] = city_visitor_info["city_visitor"].to_string(index=False)
extracted_city_infos["city_visitor_reported_year"] = city_visitor_info[
"city_visitor_reported_year"].to_string(index=False)
argument_list = {'city': extracted_city_infos, "museum": extracted_museum_infos} # percent of original size
database_manager = DatabaseManager.instance()
database_manager.save(**argument_list)
| StarcoderdataPython |
9790921 | """ Test Jupyter notebooks with examples
:Author: <NAME> <<EMAIL>>
:Date: 2019-02-20
:Copyright: 2019, Karr Lab
:License: MIT
"""
import glob
import itertools
import json
import nbconvert.preprocessors
import nbformat
import os
import requests
import shutil
import sys
import tempfile
import unittest
try:
response = requests.get('https://iimcb.genesilico.pl/modomics/')
modomics_available = response.status_code == 200 and response.elapsed.total_seconds() < 2.0
except requests.exceptions.ConnectionError:
modomics_available = False
@unittest.skipIf(os.getenv('CIRCLECI', '0') in ['1', 'true'], 'Jupyter server not setup in CircleCI')
class ExamplesTestCase(unittest.TestCase):
TIMEOUT = 600
@classmethod
def setUpClass(cls):
sys.path.insert(0, 'examples')
@classmethod
def tearDownClass(cls):
sys.path.remove('examples')
def setUp(self):
self.dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dirname)
def test_jupyter(self):
for filename in itertools.chain(glob.glob('examples/*.ipynb'), glob.glob('examples/**/*.ipynb')):
with open(filename) as file:
version = json.load(file)['nbformat']
with open(filename) as file:
notebook = nbformat.read(file, as_version=version)
execute_preprocessor = nbconvert.preprocessors.ExecutePreprocessor(timeout=self.TIMEOUT)
execute_preprocessor.preprocess(notebook, {'metadata': {'path': 'examples/'}})
def test_Bouhaddou_model(self):
import bouhaddou_et_al_plos_comput_biol_2018
if os.path.isfile(bouhaddou_et_al_plos_comput_biol_2018.OUT_FILENAME):
os.remove(bouhaddou_et_al_plos_comput_biol_2018.OUT_FILENAME)
bouhaddou_et_al_plos_comput_biol_2018.run()
self.assertTrue(os.path.isfile(bouhaddou_et_al_plos_comput_biol_2018.OUT_FILENAME))
@unittest.skipIf(not modomics_available, 'MODOMICS server not accesssible')
def test_modomics(self):
import modomics
modomics.run()
filename = os.path.join('examples', 'modomics.rrna.tsv')
self.assertTrue(os.path.isfile(filename))
filename = os.path.join('examples', 'modomics.trna.tsv')
self.assertTrue(os.path.isfile(filename))
def test_pro(self):
import pro
in_pkl_filename = os.path.join(self.dirname, 'in.pkl')
out_pickle_filename = os.path.join(self.dirname, 'out.pkl')
out_pickle_filename_2 = os.path.join(self.dirname, 'out.2.pkl')
out_tsv_filename = os.path.join(self.dirname, 'out.tsv')
out_fasta_filename = os.path.join(self.dirname, 'out.fasta')
out_fig_filename = os.path.join(self.dirname, 'out.svg')
pro.run(in_pkl_filename=in_pkl_filename, max_num_proteins=100,
out_pickle_filename=out_pickle_filename,
out_pickle_filename_2=out_pickle_filename_2,
out_tsv_filename=out_tsv_filename, out_fasta_filename=out_fasta_filename,
out_fig_filename=out_fig_filename,
)
self.assertTrue(os.path.isfile(out_tsv_filename))
| StarcoderdataPython |
3588357 | <gh_stars>0
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
htEfficiency = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/JME/HT/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_ht 'HT turnON; PF HT [GeV]; efficiency' ht_numerator ht_denominator",
"effic_ht_variable 'HT turnON; PF HT [GeV]; efficiency' ht_variable_numerator ht_variable_denominator",
"effic_METPhi 'METPhi turnON; MET #phi; efficiency' METPhi_numerator METPhi_denominator",
"effic_jetPt1 'JET_PT1 turnON; LEADING JET PT [GeV]; efficiency' jetPt1_numerator jetPt1_denominator",
"effic_jetPt2 'JET_PT2 turnON; SUBLEADING JET PT [GeV]; efficiency' jetPt2_numerator jetPt2_denominator",
"effic_jetEta1 'JET_ETA1 turnON; LEADING JET #eta; efficiency' jetEta1_numerator jetEta1_denominator",
"effic_jetEta2 'JET_ETA2 turnON; SUBLEADING JET #eta; efficiency' jetEta2_numerator jetEta2_denominator",
"effic_jetPhi1 'JET_PHI1 turnON; LEADING JET #phi; efficiency' jetPhi1_numerator jetPhi1_denominator",
"effic_jetPhi2 'JET_PHI2 turnON; SUBLEADING JET #phi; efficiency' jetPhi2_numerator jetPhi2_denominator",
"effic_nJets 'nJets; nJets; efficiency' nJets_numerator nJets_denominator",
"effic_nJets_HT 'nJets (Pt>30 && eta<2.5); nJets_; efficiency' nJetsHT_numerator nJetsHT_denominator"
),
efficiencyProfile = cms.untracked.vstring(
"effic_ht_vs_LS 'HT efficiency vs LS; LS; PF HT efficiency' htVsLS_numerator htVsLS_denominator"
),
)
htClient = cms.Sequence(
htEfficiency
)
| StarcoderdataPython |
4901447 | <filename>memomodel/memo_objects.py
import collections
from memodb import h5db
import enum
import yaml
import numpy
import pandas
class SimConfig(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!SimConfig'
arguments = h5db.ObjectList
class VirtualState(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!VirtualState'
name = h5db.Scalar
update_attribute = h5db.Scalar
init_attribute = h5db.Scalar
class ModelStructure(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!ModelStructure'
simulator_parameters = h5db.List
model_parameters = h5db.List
model_inputs = h5db.List
model_outputs = h5db.List
virtual_states = h5db.ObjectList
class SamplerConfig(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!SamplerConfig'
name = h5db.Scalar
strategy = h5db.Object
sim_config = h5db.Object
model_structure = h5db.Object
parameter_variations = h5db.ObjectList
class ParameterVariation(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!ParameterVariation'
parameter_name = h5db.Scalar
variation_mode = h5db.Scalar
variation_arguments = h5db.ObjectList
class ParameterVariationMode(enum.Enum):
CONSTANT = 'constant'
RANGE_OF_REAL_NUMBERS = 'range_of_real_numbers' # supported by LHS sampling strategy
RANGE_OF_INTEGERS = 'range_of_integers' # not supported by a sampling strategy yet
NUMERICAL_LEVELS = 'numerical_levels' # supported by FullFactorial sampling strategy
NOMINAL_LEVELS = 'nominal_levels' # supported by FullFactorial sampling strategy
class StrategyConfig(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = u'!StrategyConfig'
name = h5db.Scalar
arguments = h5db.ObjectList
class KeyValuePair(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = u'!KeyValuePair'
key = h5db.Scalar
value = h5db.Scalar
class KeyListPair(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = u'!KeyListPair'
key = h5db.Scalar
value = h5db.List
#
class KeyObjectPair(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = u'!KeyObjectPair'
key = h5db.Scalar
value = h5db.Object
class ApproximationFunctionConfig(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!ApproximationFunctionConfig'
inputs = h5db.List
outputs = h5db.List
model_type = h5db.Scalar
model_arguments = h5db.ObjectList
trainer_options = h5db.ObjectList
trainer_score= h5db.List
class SurrogateModelConfig(h5db.H5DBObject, yaml.YAMLObject):
yaml_tag = '!SurrogateModelConfig'
name = h5db.Scalar
approximation_functions = h5db.ObjectList
sampler_configuration = h5db.Object
class DatasetOwnership(h5db.H5DBObject):
dataset = h5db.Object
owner = h5db.Object
def __init__(self, **kwargs):
self.dataset = None
self.owner = None
h5db.H5DBObject.__init__(self, **kwargs)
class InputResponseDataset(h5db.H5DBObject):
inputs = h5db.DataFrame
responses = h5db.DataFrame
def __init__(self, input_cols=[], response_cols=[]):
"""
:param input_cols: List of input column names
:param response_cols: List of response column names
"""
# initialize empty dataframes
self.inputs = pandas.DataFrame(columns=input_cols, dtype=numpy.float64)
self.responses = pandas.DataFrame(columns=response_cols, dtype=numpy.float64)
h5db.H5DBObject.__init__(self)
def update(self, sample, response):
next_idx = self.inputs.shape[0]
# update inputs
self.inputs.loc[next_idx,:] = sample
# update responses
stripped_response = {key: values[0] for key,values in response.items()}
self.responses.loc[next_idx, :] = stripped_response
def select(self, selected_inputs=[], selected_responses=[]):
result = InputResponseDataset()
result.inputs = self.inputs[selected_inputs]
result.responses = self.responses[selected_responses]
return result
def __len__(self):
return self.inputs.shape[0]
def __repr__(self):
return str(self.__dict__)
class TrainingResult(h5db.H5DBObject):
train_data = h5db.Object
test_data = h5db.Object
metamodel = h5db.Blob
score_r2 = h5db.Scalar
score_avg = h5db.Scalar
score_hae = h5db.Scalar
score_mse = h5db.Scalar
def __init__(self):
self.train_data = None
self.test_data = None
self.metamodel = None
score_r2 = h5db.Scalar
score_avg = h5db.Scalar
score_hae = h5db.Scalar
score_mse = h5db.Scalar
h5db.H5DBObject.__init__(self)
class SurrogateModelTrainingResult(h5db.H5DBObject):
training_results = h5db.ObjectList
surrogate_model_name = h5db.Scalar
def __init__(self):
self.training_results = None
self.surrogate_model_name = None
h5db.H5DBObject.__init__(self)
class SurrogateModel(h5db.H5DBObject):
name = h5db.Scalar
metamodels = h5db.Blob
model_structure = h5db.Object
def __init__(self):
self.surrogate_model_name = None
self.metamodel = None
self.model_structure = None
h5db.H5DBObject.__init__(self)
class SimulationModelDescription(h5db.H5DBObject):
model_structure = h5db.Object
regression_model = h5db.Object
def __init__(self, *args, **kwargs):
self.model_structure = None
self.regression_model = None
h5db.H5DBObject.__init__(self, *args, **kwargs)
class GenericModelDescription():
sklearn_estimator = h5db.Blob
def __init__(self, *args, **kwargs):
self.sklearn_estimator = None
h5db.H5DBObject.__init__(self, *args, **kwargs)
class OLSModelDescription(h5db.H5DBObject): # TODO: Refactor this to LinearModelDescription
intercept = h5db.Vector
coefs = h5db.Matrix
def __init__(self, *args, **kwargs):
self.intercept = None
self.coefs = None
h5db.H5DBObject.__init__(self, *args, **kwargs)
class KernelRidgeRegressionModelDescription(h5db.H5DBObject):
kernel = h5db.Scalar
gamma = h5db.Scalar
degree = h5db.Scalar
coef0 = h5db.Scalar
X_fit = h5db.Matrix
dual_coef = h5db.Matrix
def __init__(self, *args, **kwargs):
self.intercept = None
self.coefs = None
h5db.H5DBObject.__init__(self, *args, **kwargs)
class MeMoSimDB(h5db.H5DB):
def __init__(self, h5filename):
mapped_classes = [SimulationModelDescription, OLSModelDescription, GenericModelDescription, KernelRidgeRegressionModelDescription, ModelStructure, VirtualState]
h5db.H5DB.__init__(self, h5filename, mapped_classes)
class MeMoDB(h5db.H5DB):
def __init__(self, h5filename):
mapped_classes = [SimConfig, ModelStructure, VirtualState, SamplerConfig, ParameterVariation,
StrategyConfig, KeyValuePair, SurrogateModelConfig, ApproximationFunctionConfig,
InputResponseDataset, TrainingResult, SurrogateModelTrainingResult, DatasetOwnership,
SurrogateModel]
h5db.H5DB.__init__(self, h5filename, mapped_classes)
| StarcoderdataPython |
11294158 | import numpy as np
from pysteps import motion, nowcasts
from pysteps.utils import conversion, transformation
import tensorflow as tf
oflow_method = motion.get_method("LK")
extrap_method = nowcasts.get_method("extrapolation")
def motion_extrapolate(motion_field, extrap_field, num_frames=12):
V = oflow_method(motion_field[-3:, :, :])
extrapolated = extrap_method(extrap_field[-1, :, :], V, num_frames)
return extrapolated
def motion_extrapolate_batch(motion_field, extrap_field, num_frames=12):
out_shape = (extrap_field.shape[0], num_frames) + extrap_field.shape[2:]
extrapolated = np.empty(out_shape, dtype=extrap_field.dtype)
for i in range(extrap_field.shape[0]):
extrapolated[i,...,0] = motion_extrapolate(
motion_field[i,...,0], extrap_field[i,...,0],
num_frames=12
)
return extrapolated
def conf_matrix_extrap(
batch_gen, motion_var="RZC",
extrap_var="occurrence-8-10", nan_threshold=-3.690,
num_batches=None, dataset='test',
separate_leadtimes=False,
):
names = batch_gen.pred_names_past
motion_index = names.index(motion_var)
extrap_index = names.index(extrap_var)
if num_batches is None:
num_batches = len(batch_gen.time_coords[dataset]) // \
batch_gen.batch_size
(X,Y) = batch_gen.batch(0, dataset=dataset)
num_frames = Y[0].shape[1]
if separate_leadtimes:
tp = np.zeros(num_frames, dtype=int)
fp = np.zeros(num_frames, dtype=int)
fn = np.zeros(num_frames, dtype=int)
tn = np.zeros(num_frames, dtype=int)
else:
tp = fp = fn = tn = 0
for i in range(num_batches):
print(f"{i+1}/{num_batches}")
(X,Y) = batch_gen.batch(i, dataset=dataset)
motion_field = X[motion_index].astype(np.float32)
motion_field[motion_field <= nan_threshold] = np.nan
extrap_field = X[extrap_index].astype(np.float32)
Y_pred = motion_extrapolate_batch(motion_field, extrap_field,
num_frames=num_frames)
Y_pred = (Y_pred >= 0.5)
Y = (Y[0] >= 0.5)
tp_batch = Y_pred & Y
fp_batch = Y_pred & ~Y
fn_batch = ~Y_pred & Y
tn_batch = ~Y_pred & ~Y
if separate_leadtimes:
for t in range(num_frames):
tp[t] += np.count_nonzero(tp_batch[:,t,...])
fp[t] += np.count_nonzero(fp_batch[:,t,...])
fn[t] += np.count_nonzero(fn_batch[:,t,...])
tn[t] += np.count_nonzero(tn_batch[:,t,...])
else:
tp += np.count_nonzero(tp_batch)
fp += np.count_nonzero(fp_batch)
fn += np.count_nonzero(fn_batch)
tn += np.count_nonzero(tn_batch)
N = tp + fp + fn + tn
conf_matrix = np.array(((tp, fn), (fp, tn))) / N
if separate_leadtimes:
conf_matrix = conf_matrix.reshape((2,2,1,num_frames))
else:
conf_matrix = conf_matrix.reshape((2,2,1))
return conf_matrix
def loss_extrap(
batch_gen, loss_func, smooth=None, motion_var="RZC",
extrap_var="occurrence-8-10", nan_threshold=-3.690,
num_batches=None, dataset='test',
separate_leadtimes=False,
):
names = batch_gen.pred_names_past
motion_index = names.index(motion_var)
extrap_index = names.index(extrap_var)
if num_batches is None:
num_batches = len(batch_gen.time_coords[dataset]) // \
batch_gen.batch_size
(X,Y) = batch_gen.batch(0, dataset=dataset)
num_frames = Y[0].shape[1]
if separate_leadtimes:
loss = np.zeros(num_frames, dtype=float)
else:
loss = 0
for i in range(num_batches):
print(f"{i+1}/{num_batches}")
(X,Y) = batch_gen.batch(i, dataset=dataset)
motion_field = X[motion_index].astype(np.float32)
motion_field[motion_field <= nan_threshold] = np.nan
extrap_field = X[extrap_index].astype(np.float32)
Y_pred = motion_extrapolate_batch(motion_field, extrap_field,
num_frames=num_frames)
Y_pred[np.isnan(Y_pred)] = 0
if smooth is not None:
Y_pred = Y_pred * (1-2*smooth) + smooth
Y_pred = tf.convert_to_tensor(Y_pred.astype(np.float32))
Y = tf.convert_to_tensor(Y[0].astype(np.float32))
if separate_leadtimes:
loss_batch = np.array([
loss_func(Y[:,t:t+1,...], Y_pred[:,t:t+1,...]).numpy().mean()
for t in range(num_frames)
])
else:
loss_batch = loss_func(Y, Y_pred).numpy().mean()
loss += loss_batch
loss /= num_batches
return loss
| StarcoderdataPython |
4827364 | import rfidsecuritysvc.exception as exception
from rfidsecuritysvc.api import RECORD_COUNT_HEADER
from rfidsecuritysvc.model import guest as model
def get(id):
m = model.get(id)
if m:
return m.to_json()
return f'Object with id "{id}" does not exist.', 404
def search():
results = []
for m in model.list():
results.append(m.to_json())
return results
def post(body):
try:
model.create(**body)
return None, 201
except exception.SoundNotFoundError:
return f'Sound with id "{body["sound"]}" does not exist.', 400
except exception.DuplicateGuestError:
return f'Object with first_name "{body["first_name"]}" and last_name "{body["last_name"]}" already exists.', 409
def delete(id):
return None, 200, {RECORD_COUNT_HEADER: model.delete(id)}
def put(id, body):
try:
return None, 200, {RECORD_COUNT_HEADER: model.update(id, **body)}
except exception.SoundNotFoundError:
return f'Sound with id "{body["sound"]}" does not exist.', 400
except exception.GuestNotFoundError:
try:
model.create(**body)
return None, 201, {RECORD_COUNT_HEADER: 1}
except exception.SoundNotFoundError:
return f'Sound with id "{body["sound"]}" does not exist.', 400
| StarcoderdataPython |
3365367 | <filename>move_comments.py
#!/usr/bin/env python3
# move_comments.py
from pathlib import Path
# from dataclasses import dataclass, field, Field
p = Path('.')
script_path = Path(__file__).resolve().parents
here = Path().cwd()
print(f"script location: {str(script_path[0]):<55.55}")
print(f"current path: {str(here):<55.55}")
print(p)
# with Path()
| StarcoderdataPython |
4817840 |
from nose.tools import *
import numpy as np
import pandas as pd
from .. import pvl_pres2alt
def test_proper():
alt=pvl_pres2alt.pvl_pres2alt(pressure=222)
assert(alt>0)
alt=pvl_pres2alt.pvl_pres2alt(pressure=[222,4434,32453,212])
assert(np.size(alt)>1)
#include physical checks
@raises(Exception)
def test_fail():
alt=pvl_pres2alt.pvl_pres2alt(doy=-600)
assert(alt>0)
def main():
unittest.main()
if __name__ == '__main__':
main() | StarcoderdataPython |
189808 | # -*- coding: utf-8 -*-
from django.conf.urls import url, include
from users.views import LoginView, LogoutView
urlpatterns = [
url(r'^login/$', LoginView.as_view(), name='user_login'),
url('^logout/$', LogoutView.as_view(), name="logout"),
]
| StarcoderdataPython |
1680739 |
import os
import numpy as np
path_praat_script=os.path.dirname(os.path.abspath(__file__))
def multi_find(s, r):
"""
Internal function used to decode the Formants file generated by Praat.
"""
s_len = len(s)
r_len = len(r)
_complete = []
if s_len < r_len:
n = -1
else:
for i in range(s_len):
# search for r in s until not enough characters are left
if s[i:i + r_len] == r:
_complete.append(i)
else:
i = i + 1
return(_complete)
def praat_vuv(audio_filaname, resultsp, resultst, time_stepF0=0, minf0=75, maxf0=600, maxVUVPeriod=0.02, averageVUVPeriod=0.01):
"""
runs vuv_praat script to obtain pitch and voicing decisions for a wav file.
It writes the results into two text files, one for the pitch and another
for the voicing decisions. These results can then be read using the function
read_textgrid_trans and decodeF0
:param audio_filaname: Full path to the wav file
:param resultsp: Full path to the resulting file with the pitch
:param resultst: Full path to the resulting file with the voiced/unvoiced decisions
:param time_stepF0: time step to compute the pitch, default value is 0 and Praat will use 0.75 / minf0
:param minf0: minimum frequency for the pitch in Hz, default is 75Hz
:param maxf0: maximum frequency for the pitch in Hz, default is 600
:param maxVUVPeriod: maximum interval that considered part of a larger voiced interval, default 0.02
:param averageVUVPeriod: half of this value will be taken to be the amount to which a voiced interval will extend beyond its initial and final points, default is 0.01
:returns: nothing
"""
command='praat '+path_praat_script+'/vuv_praat.praat '
command+=audio_filaname+' '+resultsp +' '+ resultst+' '
command+=str(minf0)+' '+str(maxf0)+' '
command+=str(time_stepF0)+' '+str(maxVUVPeriod)+' '+str(averageVUVPeriod)
os.system(command)
def praat_formants(audio_filename, results_filename,sizeframe,step, n_formants=5, max_formant=5500):
"""
runs FormantsPraat script to obtain the formants for a wav file.
It writes the results into a text file.
These results can then be read using the function decodeFormants.
:param audio_filaname: Full path to the wav file, string
:param results_filename: Full path to the resulting file with the formants
:param sizeframe: window size
:param step: time step to compute formants
:param n_formants: number of formants to look for
:param max_formant: maximum frequencyof formants to look for
:returns: nothing
"""
command='praat '+path_praat_script+'/FormantsPraat.praat '
command+=audio_filename + ' '+results_filename+' '
command+=str(n_formants)+' '+ str(max_formant) + ' '
command+=str(float(sizeframe)/2)+' '
command+=str(float(step))
os.system(command) #formant extraction praat
def read_textgrid_trans(file_textgrid, data_audio, fs, win_trans=0.04):
"""
This function reads a text file with the text grid with voiced/unvoiced
decisions then finds the onsets (unvoiced -> voiced) and
offsets (voiced -> unvoiced) and then reads the audio data to returns
lists of segments of lenght win_trans around these transitions.
:param file_textgrid: The text file with the text grid with voicing decisions.
:param data_audio: the audio signal.
:param fs: sampling frequency of the audio signal.
:param win_trans: the transition window lenght, default 0.04
:returns segments: List with both onset and offset transition segments.
:returns segments_onset: List with onset transition segments
:returns segments_offset: List with offset transition segments
"""
segments=[]
segments_onset=[]
segments_offset=[]
prev_trans=""
prev_line=0
with open(file_textgrid) as fp:
for line in fp:
line = line.strip('\n')
if line in ('"V"', '"U"'):
transVal=int(float(prev_line)*fs)-1
segment=data_audio[int(transVal-win_trans*fs):int(transVal+win_trans*fs)]
segments.append(segment)
if prev_trans in ('"V"', ""):
segments_onset.append(segment)
elif prev_trans=='"U"':
segments_offset.append(segment)
prev_trans=line
prev_line=line
return segments,segments_onset,segments_offset
def decodeF0(fileTxt,len_signal=0, time_stepF0=0):
"""
Reads the content of a pitch file created with praat_vuv function.
By default it will return the contents of the file in two arrays,
one for the actual values of pitch and the other with the time stamps.
Optionally the lenght of the signal and the time step of the pitch
values can be provided to return an array with the full pitch contour
for the signal, with padded zeros for unvoiced segments.
:param fileTxt: File with the pitch, which can be generated using the function praat_vuv
:param len_signal: Lenght of the audio signal in
:param time_stepF0: The time step of pitch values. Optional.
:returns pitch: Numpy array with the values of the pitch.
:returns time_voiced: time stamp for each pitch value.
"""
if os.stat(fileTxt).st_size==0:
return np.array([0]), np.array([0])
pitch_data=np.loadtxt(fileTxt)
if len(pitch_data.shape)>1:
time_voiced=pitch_data[:,0] # First column is the time stamp vector
pitch=pitch_data[:,1] # Second column
elif len(pitch_data.shape)==1: # Only one point of data
time_voiced=pitch_data[0] # First datum is the time stamp
pitch=pitch_data[1] # Second datum is the pitch value
if len_signal>0:
n_frames=int(len_signal/time_stepF0)
t=np.linspace(0.0,len_signal,n_frames)
pitch_zeros=np.zeros(int(n_frames))
if len(pitch_data.shape)>1:
for idx,time_p in enumerate(time_voiced):
argmin=np.argmin(np.abs(t-time_p))
pitch_zeros[argmin]=pitch[idx]
else:
argmin=np.argmin(np.abs(t-time_voiced))
pitch_zeros[argmin]=pitch
return pitch_zeros, t
return pitch, time_voiced
def decodeFormants(fileTxt):
"""
Read the praat textgrid file for formants and return the array
:param fileTxt: File with the formants, which can be generated using the '''praat_formants'''
:returns F1: Numpy array containing the values for the first formant
:returns F2: Numpy array containing the values for the second formant
"""
fid=open(fileTxt)
datam=fid.read()
end_line1=multi_find(datam, '\n')
F1=[]
F2=[]
ji=10
while (ji<len(end_line1)-1):
line1=datam[end_line1[ji]+1:end_line1[ji+1]]
cond=(line1 in ('3', '4', '5'))
if (cond):
F1.append(float(datam[end_line1[ji+1]+1:end_line1[ji+2]]))
F2.append(float(datam[end_line1[ji+3]+1:end_line1[ji+4]]))
ji=ji+1
F1=np.asarray(F1)
F2=np.asarray(F2)
return F1, F2
| StarcoderdataPython |
3475345 | <filename>__init__.py
__about__ = """
Django Packages is a directory of reusable apps, sites, tools, and more for your Django projects.
"""
| StarcoderdataPython |
9734854 | <filename>src/clikraken/api/private/list_open_orders.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
clikraken.api.private.list_open_orders
This module queries the OpenOrders method of Kraken's API
and outputs the results in a tabular format.
Licensed under the Apache License, Version 2.0. See the LICENSE file.
"""
import argparse
from decimal import Decimal
from clikraken.api.api_utils import parse_order_res, query_api
from clikraken.clikraken_utils import asset_pair_short
from clikraken.clikraken_utils import _tabulate as tabulate
from clikraken.clikraken_utils import csv
from clikraken.clikraken_utils import process_options
pair_help = "asset pair"
OPTIONS = (
(("-p", "--pair"), {"default": None, "help": pair_help}),
(
("-i", "--txid"),
{
"default": None,
"help": "comma delimited list of transaction ids to query info about (20 maximum)",
},
),
)
def list_open_orders(**kwargs):
"""List open orders."""
args = process_options(kwargs, OPTIONS)
return list_open_orders_api(args)
def list_open_orders_api(args):
"""List open orders."""
# Parameters to pass to the API
api_params = {
# TODO
}
if args.txid:
api_params.update({"txid": args.txid})
res_ol = query_api("private", "QueryOrders", api_params, args)
else:
res = query_api("private", "OpenOrders", api_params, args)
# extract list of orders from API results
res_ol = res["open"]
# the parsing is done in an helper function
ol = parse_order_res(res_ol, ["open"])
# filter and sort orders by price in each category
for otype in ol:
# filter orders based on currency pair
if "pair" in args and args.pair:
ol[otype] = [
odict
for odict in ol[otype]
if (
odict["pair"] in [args.pair, asset_pair_short(args.pair)]
or args.pair == "all"
)
]
# sort orders by price
ol[otype] = sorted(ol[otype], key=lambda odict: Decimal(odict["price"]))
# final list is concatenation of buy orders followed by sell orders
ol_all = ol["buy"] + ol["sell"]
return ol_all
def list_open_orders_cmd(args):
"""List open orders."""
ol_all = list_open_orders_api(args)
if not ol_all:
return
if args.csv:
print(csv(ol_all, headers="keys"))
else:
print(tabulate(ol_all, headers="keys"))
def init(subparsers):
parser_olist = subparsers.add_parser(
"olist",
aliases=["ol"],
help="[private] Get a list of your open orders",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
for (args, kwargs) in OPTIONS:
parser_olist.add_argument(*args, **kwargs)
parser_olist.set_defaults(sub_func=list_open_orders_cmd)
| StarcoderdataPython |
3472718 | <filename>resolwe/permissions/filters.py
""".. Ignore pydocstyle D400.
==================
Permissions Filter
==================
"""
from rest_framework.filters import BaseFilterBackend
from resolwe.permissions.utils import model_has_permissions
class ResolwePermissionsFilter(BaseFilterBackend):
"""Permissions filter."""
def filter_queryset(self, request, queryset, view):
"""Filter permissions queryset.
When model has no permission group defined return the entire queryset.
"""
if model_has_permissions(queryset.model):
return queryset.filter_for_user(request.user)
else:
return queryset
| StarcoderdataPython |
9795812 | #!/usr/bin/python
import sys
import time
import RPi.GPIO as GPIO
import os
import json
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
teramon_dir = os.path.dirname(os.path.realpath(__file__))
json_data = open(teramon_dir + "/teramon.json").read()
config = json.loads(json_data)
data_raw = open(config['measurement_save_path']).read()
data = json.loads(data_raw)
GPIO.setup(config['gpio_heat'], GPIO.OUT)
GPIO.setup(config['gpio_light'], GPIO.OUT)
actualHour = (int)(time.strftime('%H'))
isDay = (actualHour >= config['day_begin']) and (actualHour <= config['day_end'])
heatOn = False
heatChange = True
primaryProbeDefined = False
for probe in config['probes'].keys():
if config['probes'][probe]['primary'] == True:
primaryProbeDefined = True
if isDay:
if primaryProbeDefined:
for probe in config['probes'].keys():
if config['probes'][probe]['primary'] == True:
if data[probe]['temp'] <= config['probes'][probe]['temp_limits']['day']['low']:
heatOn = True
heatChange = True
else :
for probe in config['probes'].keys():
if data[probe]['temp'] <= config['probes'][probe]['temp_limits']['day']['low']:
heatOn = True
for probe in config['probes'].keys():
if data[probe]['temp'] >= config['probes'][probe]['temp_limits']['day']['high']:
heatOn = False
stillTrue = True
for probe in config['probes'].keys():
if (data[probe]['temp'] <= config['probes'][probe]['temp_limits']['day']['high']) and (data[probe]['temp'] >= config['probes'][probe]['temp_limits']['day']['low']) and stillTrue:
stillTrue = True
else:
stillFalse = False
if stillTrue:
heatChange = False
else:
if primaryProbeDefined:
for probe in config['probes'].keys():
if config['probes'][probe]['primary'] == True:
if data[probe]['temp'] <= config['probes'][probe]['temp_limits']['night']['low']:
heatOn = True
heatChange = True
else :
for probe in config['probes'].keys():
if data[probe]['temp'] <= config['probes'][probe]['temp_limits']['night']['low']:
heatOn = True
for probe in config['probes'].keys():
if data[probe]['temp'] >= config['probes'][probe]['temp_limits']['night']['high']:
heatOn = False
stillTrue = True
for probe in config['probes'].keys():
if (data[probe]['temp'] <= config['probes'][probe]['temp_limits']['noght']['high']) and (data[probe]['temp'] >= config['probes'][probe]['temp_limits']['night']['low']) and zatimTrue:
stillTrue = True
else:
stillTrue = False
if stillTrue:
heatChange = False
lightOn = isDay
if (lightOn):
GPIO.output(config['gpio_light'], GPIO.HIGH)
else:
GPIO.output(config['gpio_light'], GPIO.LOW)
if heatChange:
if (heatOn):
GPIO.output(config['gpio_heat'], GPIO.HIGH)
else:
GPIO.output(config['gpio_heat'], GPIO.LOW)
| StarcoderdataPython |
6543382 | <filename>backend/polls/models.py<gh_stars>1-10
import datetime
from django.db import models
from django.utils import timezone
# from django.contrib.auth.models import User
class Word(models.Model):
word = models.CharField(max_length=200)
freq = models.FloatField()
length = models.IntegerField()
def __str__(self):
return ("%s : %s" % (self.word, str(self.freq)))
class Game(models.Model):
# target = models.ForeignKey(Word, on_delete=models.PROTECT)
target = models.CharField(max_length=200)
invite = models.CharField(max_length=200) # three 4-letter words?
created_datetime = models.DateTimeField(default=timezone.now)
timeout = models.DurationField(default=datetime.timedelta(minutes=2))
state = models.IntegerField(default=0)
def seconds_remaining(self):
return self.created_date + self.timeout - timezone.now()
class Team(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
class Player(models.Model):
name = models.CharField(max_length=200)
# word = models.ForeignKey(Word, on_delete=models.PROTECT)
word = models.CharField(max_length=200)
word_add = models.BooleanField(default=True) # add or sub
team = models.ForeignKey(Team, on_delete=models.CASCADE)
master = models.BooleanField(default=False) # is game master
# class Question(models.Model):
# question_text = models.CharField(max_length=200)
# pub_date = models.DateTimeField('date published')
# def __str__(self):
# return self.question_text
# def was_published_recently(self):
# return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
# class Choice(models.Model):
# question = models.ForeignKey(Question, on_delete=models.CASCADE)
# choice_text = models.CharField(max_length=200)
# votes = models.IntegerField(default=0)
# def __str__(self):
# return self.choice_text
| StarcoderdataPython |
8007052 | <reponame>Oorzhakau/TeamForce_bot
"""Загрузка переменных окружения."""
from environs import Env
env = Env()
env.read_env()
BOT_TOKEN = env.str("BOT_TOKEN")
ADMIN = env.int("ADMIN")
DEBUG = env.list("DEBUG")
DJANGO_ALLOWED_HOSTS = env.str("DJANGO_ALLOWED_HOSTS")
SECRET_KEY = env.str("SECRET_KEY")
POSTGRES_DB = env.str("POSTGRES_DB")
POSTGRES_USER = env.str("POSTGRES_USER")
POSTGRES_PASSWORD = env.str("POSTGRES_PASSWORD")
DB_HOST = env.str("DB_HOST")
DB_PORT = env.str("DB_PORT")
POSTGRES_URI = f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{DB_HOST}:{DB_PORT}/{POSTGRES_DB}"
| StarcoderdataPython |
301049 | <reponame>richardycao/hummingbird_python
class PipelineNode(object):
def __init__(self, module_path, params):
self.module_path = module_path
self.params = params
| StarcoderdataPython |
6587151 | import json
import requests
import urllib.parse
from django.conf import settings
from rest_framework.exceptions import ValidationError
class ClashRoyaleAPI(object):
"""皇室战争API"""
def __init__(self):
# 'Accept': '*/*' 默认
self.host = 'https://api.clashroyale.com/v1'
if settings.DEBUG:
self.headers = {
'Accept': 'application/json',
'Authorization': 'Bearer xxx'}
else:
self.headers = {
'Accept': 'application/json',
'Authorization': 'Bearer xxx'}
@staticmethod
def validate(player_tag):
if not player_tag.startswith('#'):
raise ValidationError('玩家标签格式不对')
@staticmethod
def handle_response(response):
message = None
if response.text:
result_json = json.loads(response.text)
else:
message = '皇室战争接口出错啦'
if message:
return {'message': message, 'status': response.status_code}
return {'results': result_json, 'status': response.status_code}
# 获取玩家相关API
def upcomingchests(self, player_tag):
"""获取有关皇室战争玩家即将到来的宝箱的信息"""
self.validate(player_tag)
url_player_tag = urllib.parse.quote(player_tag)
url = self.host + '/players/{}/upcomingchests/'.format(url_player_tag)
response = requests.get(url, headers=self.headers)
return self.handle_response(response)
def players(self, player_tag):
"""获取皇室战争玩家信息"""
self.validate(player_tag)
url_player_tag = urllib.parse.quote(player_tag)
url = self.host + '/players/{}/'.format(url_player_tag)
response = requests.get(url, headers=self.headers)
return self.handle_response(response)
def battlelog(self, player_tag):
"""获取皇室战争玩家最近的战斗列表"""
self.validate(player_tag)
url_player_tag = urllib.parse.quote(player_tag)
url = self.host + '/players/{}/battlelog/'.format(url_player_tag)
response = requests.get(url, headers=self.headers)
return self.handle_response(response)
# 获取部落相关API
# 获取卡组相关API
# 获取比赛相关API
# 获取排名相关API
# 获取锦标赛相关API
if __name__ == "__main__":
c_r_api = ClashRoyaleAPI()
res = c_r_api.upcomingchests('#xxx')
| StarcoderdataPython |
11304676 | <gh_stars>0
from .. import db
from .. import bcrypt
import datetime
# from app.main.models.blacklist import BlacklistToken
from app.main.models.Flat import Flat
from app.main.models.Society import Society
from ..config import key
import jwt
from flask_bcrypt import generate_password_hash, check_password_hash
class User(db.Model):
__tablename__ = 'user_table'
__table_args__ = {'schema': 'visitor_management_schema'}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
username = db.Column(db.String(50), nullable=False)
first_name = db.Column(db.String(50), nullable=False)
middle_name = db.Column(db.String(50), nullable=True)
last_name = db.Column(db.String(50), nullable=False)
password = db.Column(db.String(100), nullable=False)
# society_id = db.relationship(Society, backref = 'user_table')
flat_id = db.Column(db.Integer, nullable=False)
society_id = db.Column(db.Integer, nullable=False)
# flat_id = db.relationship(Flat, backref='flat_details')
isadmin = db.Column(db.String(50), nullable=False)
email_confirmed = db.Column(db.Boolean, nullable=True, default=False)
user_entity = db.Column(db.Integer,nullable=False)
identification_type = db.Column(db.String(50), nullable=True)
identification_no = db.Column(db.String(50), nullable=True)
photo = db.Column(db.Text(), nullable=True)
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def check_password(self, password):
return check_password_hash(self.password, password)
# @staticmethod
# def encode_auth_token(user_id):
# """
# Generates the Auth Token
# :return: string
# """
# try:
# payload = {
# 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),
# 'iat': datetime.datetime.utcnow(),
# 'sub': user_id
# }
# return jwt.encode(
# payload,
# key,
# algorithm='HS256'
# )
# except Exception as e:
# return e
# @staticmethod
# def decode_auth_token(auth_token):
# """
# Decodes the auth token
# :param auth_token:
# :return: integer|string
# """
# try:
# payload = jwt.decode(auth_token, key)
# is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
# if is_blacklisted_token:
# return 'Token blacklisted. Please log in again.'
# else:
# return payload['sub']
# except jwt.ExpiredSignatureError:
# return 'Signature expired. Please log in again.'
# except jwt.InvalidTokenError:
# return 'Invalid token. Please log in again.'
def __repr__(self):
return "<User '{}'>".format(self.email)
| StarcoderdataPython |
1821538 | <filename>vega/search_space/fine_grained_space/networks/__init__.py
from .mobilenetV3tiny import MobileNetV3Tiny
from .resnet import *
from .sr import *
| StarcoderdataPython |
11203620 | <reponame>iorala/PYTH1
# Übungen zu Konditionen
# 1. Gerade und Ungerade
print("1. Gerade und Ungerade")
#
# Schreiben Sie ein Programm, das eine ganze Zahl vom Benutzer liest. Dann sollte Ihr Programm eine Meldung anzeigen,
# die angibt, ob die ganze Zahl gerade oder ungerade ist.
zahl = int(input("Geben Sie eine Zahl ein: "))
if zahl % 2 == 0:
print("Ihre Zahl ist gerade")
else:
print("Ihre Zahl ist ungerade")
#
# <NAME>
print("\n<NAME>")
#
# Es wird allgemein gesagt, dass ein Menschenjahr 7 Hundejahren entspricht. Diese einfache Umwandlung übersieht jedoch,
# dass Hunde in etwa zwei Jahren das Erwachsenenalter erreichen. Infolgedessen glauben einige Leute, dass es besser ist,
# jedes der ersten beiden Menschenjahre als 10,5 Hundejahre zu zählen und dann jedes weitere Menschenjahr als 4 Hundejahre.
#
# Schreiben Sie ein Programm, das die im vorherigen Abschnitt beschriebene Umstellung von Menschenjahren auf Hundejahre implementiert.
# Stellen Sie sicher, dass Ihr Programm bei Konvertierungen von weniger als zwei Menschenjahren und bei Konvertierungen von zwei oder
# mehr Menschenjahren korrekt funktioniert. Ihr Programm sollte eine entsprechende Fehlermeldung anzeigen,
# wenn der Benutzer eine negative Zahl eingibt.
hundejahre = 0
mj = int(input("Geben Sie eine Anzahl Menschenjahre ein: "))
if mj <= 0:
print("Bitte kein negative Jahre eingeben, die Welt ist schon negativ genug!")
elif mj <= 2:
hundejahre = mj * 10.5
else:
hundejahre = 21 + (mj-2)*4
print(mj ,"Menschenjahre entsprechen", hundejahre, "Hundejahren")
#
# 3. Vokale
print("\n3. Vokale")
# In dieser Übung erstellen Sie ein Programm, das einen Buchstaben des Alphabets vom Benutzer einliest.
# Wenn der Benutzer ein, e, i, o oder u eingibt, sollte Ihr Programm eine Meldung anzeigen die darauf hindeutet, dass der
# eingegebene Buchstabe ein Vokal ist.
#
# Wenn der Benutzer y eingibt, dann sollte Ihr Programm eine Meldung anzeigen, dass y manchmal ein Vokal ist und manchmal y ein
# Konsonant ist.
vokale = ("a", "e", "i", "o", "u")
buchstabe = input("Geben Sie einen Buchstaben ein: ")
if buchstabe == "y":
print("Der Buchstabe", buchstabe, "ist manchmal eine Vokal und manchmal ein Konsonant")
elif buchstabe in vokale:
print("Der Buchstabe", buchstabe, "ist ein Vokal")
else:
print("Der Buchstabe", buchstabe, "ist ein Konsonant") | StarcoderdataPython |
3242163 | <gh_stars>0
import time
def test_is_opencart_works(get_driver):
browser = get_driver
time.sleep(5)
assert browser.find_elements_by_xpath('//h1/a[text()="Your Store"]')
| StarcoderdataPython |
4874275 | <filename>FBA_tutorials/utils/flux_pie_plot_function.py
# coding: utf-8
# In[ ]:
def flux_pie_plot(df):
import pandas as pd
import numpy as np
from math import log, sqrt
from collections import OrderedDict
from bokeh.plotting import figure, show, output_notebook
from bokeh.palettes import brewer
import warnings
# Change name of subSystem in something shorter
for i in range(len(df.subSystems.values)):
if 'Glycolysis/gluconeogenesis' in df.subSystems.values[i]:
df.at[i, 'subSystems'] = 'Glycolysis'
if 'Squalene and cholesterol synthesis' in df.subSystems.values[i]:
df.at[i, 'subSystems'] = 'Cholesterol synthesis'
cell_line_color = OrderedDict([
("MCF7", 'black'),
("MCF7_T", 'crimson'),
("MCF7_F", 'gold'),
("LTED", 'blueviolet'),
])
# In this specification I assume no more than 2 subSystem types
ss_type_color = {}
ss_c_list = ['lightskyblue', 'lightsalmon', ]
for i in range(len(df.ss_type.unique())):
ss_type_color[df.ss_type.unique()[i]] = ss_c_list[i]
ss_type_color
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(1000 * 1E4))
maxr = sqrt(log(0.001 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
#_______________________________________________________________________________
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=(-500, 500), y_range=(-500, 500),
min_border=0, outline_line_color="white",
background_fill_color="white")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
#_______________________________________________________________________________
# # subSystem TYPE ('Central Carbon Metabolism' or 'Peripheral metabolism')
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [ss_type_color[ss_type] for ss_type in df.ss_type]
p.annular_wedge(
0, 0, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
#_______________________________________________________________________________
# subsystems BARS small wedges (bar plots)
p.annular_wedge(0, 0, inner_radius, rad(df.LTED),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=cell_line_color['MCF7_F'])
p.annular_wedge(0, 0, inner_radius, rad(df.MCF7),
-big_angle+angles+3.5*small_angle, -big_angle+angles+4.5*small_angle,
color=cell_line_color['MCF7'])
p.annular_wedge(0, 0, inner_radius, rad(df.MCF7_T),
-big_angle+angles+2*small_angle, -big_angle+angles+3*small_angle,
color=cell_line_color['MCF7_T'])
p.annular_wedge(0, 0, inner_radius, rad(df.LTED),
-big_angle+angles+0.5*small_angle, -big_angle+angles+1.5*small_angle,
color=cell_line_color['LTED'])
#_______________________________________________________________________________
# circular axes
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(0, 0, radius=radii, fill_color=None, line_color="white")
# y-axis labels
p.text(0, radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="10pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(0, 0, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
#_______________________________________________________________________________
minr_i = sqrt(log(0.001 * 1E4))
maxr_i = sqrt(log(1000 * 1E4))
a_i = (outer_radius - inner_radius) / (minr_i - maxr_i)
b_i = inner_radius - a_i * maxr_i
big_angle_i = 2.0 * np.pi / (len(df) + 1)
small_angle_i = big_angle / 7
radii = a_i * np.sqrt(np.log(labels * 1E4)) + b_i
# subSystem labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
for i in range(len(xr)):
if xr[i] > 0:
p.text(xr[i], yr[i], pd.Series(df.subSystems.loc[i]), angle=label_angle[i],
text_font_size="10pt", text_align="left", text_baseline="middle")
else:
p.text(xr[i], yr[i], pd.Series(df.subSystems.loc[i]), angle=label_angle[i],
text_font_size="10pt", text_align="right", text_baseline="middle")
#_______________________________________________________________________________
# LEGEND
# subSystem type
p.circle([+140, +140], [+350, +370], color=list(ss_type_color.values()), radius=5)
p.text([+160, +160], [+350, +370], text=[gr for gr in ss_type_color.keys()],
text_font_size="10pt", text_align="left", text_baseline="middle")
# cell lines
p.rect([-40, -40, -40,-40], [30, 10, -10,-30], width=30, height=13,
color=list(cell_line_color.values()))
p.text([-15, -15, -15, -15], [30, 10, -10,-30], text=list(cell_line_color),
text_font_size="9pt", text_align="left", text_baseline="middle")
#_______________________________________________________________________________
warnings.filterwarnings("ignore")
output_notebook()
return show(p)
| StarcoderdataPython |
8189163 | import factory.random
import pytest
from rest_framework.test import APIClient
from signups.tests.factories import SignupFactory, SignupTargetFactory, UserFactory
@pytest.fixture(autouse=True)
def no_more_mark_django_db(transactional_db):
pass
@pytest.fixture(autouse=True)
def set_random_seed():
factory.random.reseed_random(777)
@pytest.fixture(autouse=True)
def email_setup(settings):
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
settings.NOTIFICATIONS_ENABLED = True
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def user_api_client(user):
api_client = APIClient()
api_client.force_authenticate(user=user)
api_client.user = user
return api_client
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def signup_target():
return SignupTargetFactory()
@pytest.fixture
def signup(signup_target, user):
return SignupFactory(user=user, target=signup_target)
| StarcoderdataPython |
11295153 | #!/usr/bin/python3
import os
import subprocess
class ANTS_Route():
def __init__(self, server_ip, client_ip):
# Set a default server ip if none is given
if server_ip == None:
self.server_ip = "10.1.1.120"
else:
self.server_ip = server_ip
# Set a default client ip if none is given
if client_ip = None:
self.client_ip = "10.1.11.115"
else:
self.client_ip = client_ip
self.server_device_name = None
self.server_device_mac = None
self.client_device_name = None
self.client_device_mac = None
self.device_list = os.listdir("/sys/class/net")
self.device_list.remove("lo")
def flush_routing(self):
self.flush_args = "iptables -t nat -F".split(" ")
self.flush_proc = subprocess.call(self.flush_args)
while True:
self.flush_proc.poll()
if self.flush_proc.returncode is not None:
break
print("iptables settings now cleared.")
def get_mac_addrs(self, server_device, client_device):
if server_device = None:
self.server_device_name = self.device_list[2]
else:
self.server_device_name = server_device
if client_devce = None:
self.client_device_name = self.device_list[0]
else:
self.client_device_name = client_device
print("{0} and {1} will now be configured for routing.\n".format(self.server_device_name, self.client_device_name))
with open("/sys/class/net/{0}/address".format(self.server_device_name)) as f:
self.server_device_mac = f.readline().rstrip("\n")
with open("/sys/class/net/{0}/address".format(self.client_device_name)) as f:
self.client_device_mac = f.readline().rstrip("\n")
print("Server device MAC address is {0}\n".format(self.server_device_mac))
print("Client device MAC address is {0}\n".format(self.client_device_mac))
def configure_device_ips(self):
print("Setting up IP addresses...\n")
self.server_ip_args = "ip addr add {0}/24 dev {1}".format(self.server_ip, self.server_device_name).split(" ")
self.client_ip_args = "ip addr add {0}/24 dev {1}".format(self.client_ip, self.client_device_name).split(" ")
subprocess.call(self.server_ip_args)
subprocess.call(self.client_ip_args)
def configure_iptables_rules(self):
print("Configuring iptables...\n")
self.iptables_postrouting_one = "iptables -t nat -A POSTROUTING -s {0} -d 10.2.11.115 -j SNAT --to-source 10.2.1.120".format(self.server_ip).split(" ")
self.iptables_prerouting_one = "iptables -t nat -A PREROUTING -d 10.2.1.120 -j DNAT --to-destination {0}".format(self.server_ip).split(" ")
self.iptables_postrouting_two = "iptables -t nat -A POSTROUTING -s {0} -d 10.2.1.120 -j SNAT --to-source 10.2.11.115".format(self.client_ip).split(" ")
self.iptables_prerouting_two = "iptables -t nat -A PREROUTING -d 10.2.11.115 -j DNAT --to-destination {0}".format(self.client_ip).split(" ")
subprocess.call(self.iptables_postrouting_one)
subprocess.call(self.iptables_prerouting_one)
subprocess.call(self.iptables_postrouting_two)
subprocess.call(self.iptables_prerouting_two)
def list_iptables_config(self):
print("iptables configuration status:\n")
self.iptables_list_args = "iptables -t nat -L".split(" ")
subprocess.call(self.iptables_list_args)
def add_routes(self):
print("Adding routes for {0} ({1}) and {2} ({3})\n".format(self.server_device_name, self.server_device_mac, self.client_device_name, self.client_device_mac))
self.ip_route_one_args = "ip route add 10.2.11.115 dev {0}".format(self.server_device_name).split(" ")
self.arp_one_args = "arp -i {0} -s 10.2.11.115 {1}".format(self.server_device_name, self.client_device_mac).split(" ")
self.ip_route_two_args = "ip route add 10.2.1.120 dev {0}".format(self.client_device_name).split(" ")
self.arp_two_args = "arp -i {0} -s 10.2.1.120 {1}".format(self.client_device_name, self.server_device_mac).split(" ")
subprocess.call(self.ip_route_one_args)
subprocess.call(self.arp_one_args)
subprocess.call(self.ip_route_two_args)
subprocess.call(self.arp_two_args)
| StarcoderdataPython |
1890693 | <reponame>basilboli/tagli<gh_stars>0
# CONFIGURATION FILE
DEBUG = True
DATABASE = "tagli"
SECRET = "tagli"
GOOGLE_MAP_KEY = "BLABLA"
TWITTER = dict(
consumer_key='',
consumer_secret=''
)
EMAIL_FROM="BLABLA"
MAIL_SERVER = "smtp.gmail.com"
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = "BLABLA"
MAIL_PASSWORD = "<PASSWORD>"
DOMAIN_NAME = "localhost:5000"
#EMAIL CONFIGURATION OPTIONS
EMAIL_CONF_SUBJ_FR = "Bienvenue chez Tagli"
EMAIL_PWD_FR = "<PASSWORD> de passe "
SITE_URL = "http://" + DOMAIN_NAME
class UserStatus:
Default, Blocked, Active, Inactive = {"UNCONFIRMED", "BLOCKED", "ACTIVE", "INACTIVE"} | StarcoderdataPython |
1972813 | """
Created on Fri March 22, 2019
@author: <NAME>
"""
import scipy.io.wavfile
import numpy as np
from sklearn import preprocessing
if __name__ == "__main__":
"""
Script that gets the raw audio from the IEMOCAP dataset. Should be executed only once to get the FC_raw_audio.csv
file, which contains the ids and audio samples for the data that is used by our model. We truncated/zero-padded
everything to 150.000 samples
"""
# output file
out_file = "../data/processed-data/FC_raw_audio.npy"
# reading all of the ids that are going to be used
with open("../data/processed-data/FC_ordered_ids.txt") as f:
ordered_ids = f.readlines()
file_count = 0
# every audio should have the same length (150.000) for the batches
audio_data = np.zeros((len(ordered_ids), 150000))
with open(out_file, "w") as f:
# finding the corresponding .wav files specified in ordered_ids
for row, id in enumerate(ordered_ids):
current_session = id[4]
partial_id = id[0:-6]
audio_file = "../data/raw-data/IEMOCAP_full_release/Session" + current_session + "/sentences/wav/" + \
partial_id + "/" + id[0:-1] + ".wav"
# reading the audio file
_, samples = scipy.io.wavfile.read(audio_file)
# standardizing the audio samples to have zero mean and unit variance
samples = preprocessing.scale(samples.astype(float))
# zero padding the audio samples
if samples.shape[0] < 150000:
len_pad = 150000 - samples.shape[0]
zero_pad = np.zeros(len_pad)
padded_samples = np.concatenate((samples, zero_pad))
audio_data[row, :] = padded_samples
elif samples.shape[0] > 150000:
samples = samples[:150000]
audio_data[row, :] = samples
file_count += 1
if file_count % 100 == 0:
print(str(round(100 * file_count/len(ordered_ids), 2)) + "% of the files read...")
print("Done!")
# saving the padded audio data
np.save(out_file, audio_data)
| StarcoderdataPython |
3557741 | <filename>devkit_road/python/simpleExample_generateBEVResults.py
#!/usr/bin/env python
#
# THE KITTI VISION BENCHMARK SUITE: ROAD BENCHMARK
#
# File: simpleExample_transformTestResults2BEV.py
#
# Copyright (C) 2013
# Honda Research Institute Europe GmbH
# Carl-Legien-Str. 30
# 63073 Offenbach/Main
# Germany
#
# UNPUBLISHED PROPRIETARY MATERIAL.
# ALL RIGHTS RESERVED.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
import os, sys
import computingPipeline, transform2BEV
#########################################################################
# test script to process testing data in perspective domain and
# transform the results to the metric BEV
#########################################################################
if __name__ == "__main__":
#datasetDir = '/hri/storage/user/rtds/KITTI_Road_Data'
#outputDir = '/hri/storage/user/rtds/KITTI_Road_Data/test_baseline_bev'
# check for correct number of arguments.
if len(sys.argv)<2:
print "Usage: python simpleExample_generateBEVResults.py <datasetDir> <outputDir>"
print "<datasetDir> = base directory of the KITTI Road benchmark dataset (has to contain training and testing), e.g., /home/elvis/kitti_road/"
print "<outputDir> = Here the baseline results will be saved, e.g., /home/elvis/kitti_road/results/"
sys.exit(1)
# parse parameters
datasetDir = sys.argv[1]
assert os.path.isdir(datasetDir), 'Error <datasetDir>=%s does not exist' %datasetDir
if len(sys.argv)>2:
outputDir = sys.argv[2]
else:
# default
outputDir = os.path.join(datasetDir, 'results')
# path2data
testData_pathToCalib = os.path.join(datasetDir, 'testing/calib')
outputDir_perspective = os.path.join(outputDir, 'segmentation_perspective_test')
outputDir_bev = os.path.join(outputDir, 'segmentation_bev_test')
# Run computeBaseline script to generate example classification results on testing set
# Replace by your algorithm to generate real results
trainDir = os.path.join(datasetDir, 'training')
testDir = os.path.join(datasetDir, 'testing')
computingPipeline.main(testDir, outputDir_perspective)
# Convert baseline in perspective space into BEV space
# If your algorithm provides results in perspective space,
# you need to run this script before submission!
inputFiles = os.path.join(outputDir_perspective, '*.png')
transform2BEV.main(inputFiles, testData_pathToCalib, outputDir_bev)
# now zip the contents in the directory 'outputDir_bev' and upload
# the zip file to the KITTI server
| StarcoderdataPython |
3218366 | <gh_stars>10-100
import numpy as np
import tensorflow as tf
import itertools
import math
class TFStringKernel(object):
"""
Code to run the SSK of Moss et al. 2020 on a GPU
"""
def __init__(self, _gap_decay=1.0, _match_decay=1.0,batch_size=1000,
_order_coefs=[1.0], alphabet = [], maxlen=0,normalize=True):
self._gap_decay = _gap_decay
self._match_decay = _match_decay
self._order_coefs = _order_coefs
self.alphabet = alphabet
self.batch_size = batch_size
self.normalize = normalize
# build a lookup table of the alphabet to encode input strings
self.table = tf.lookup.StaticHashTable(
initializer=tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(["PAD"]+alphabet),
values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)
self.maxlen = maxlen
def Kdiag(self,X):
# X looks like np.array([[s1],[s2],[s3]]) where s1 is a string with spaces between characters
# check if string is not longer than max length
observed_maxlen = max([len(x[0].split(" ")) for x in X])
if observed_maxlen > self.maxlen:
raise ValueError("An input string is longer that max-length so refit the kernel with a larger maxlen param")
# if normalizing then diagonal will just be ones
if self.normalize:
return np.ones(X.shape[0])
else:
#otherwise have to calc
# first split up strings and pad to fixed length and prep for gpu
# pad until all same length
X = tf.strings.split(tf.squeeze(tf.convert_to_tensor(X),1)).to_tensor("PAD")
# pad until all have length of self.maxlen
if X.shape[1]<self.maxlen:
paddings = tf.constant([[0, 0,], [0, self.maxlen-X.shape[1]]])
X = tf.pad(X, paddings, "CONSTANT",constant_values="PAD")
# X has shape (#strings,# characters in longest string)
# now map from chars to indicies
X = self.table.lookup(X)
return self._diag_calculations(X)[0]
def K(self, X, X2=None):
# input of form X = np.array([[s1],[s2],[s3]])
# check if symmetric (no provided X2), if so then only need to calc upper gram matrix
symmetric = True if (X2 is None) else False
# check if input strings are longer than max allowed length
observed_maxlen = max([len(x[0].split(" ")) for x in X])
if not symmetric:
observed_maxlen_2 = max([len(x[0].split(" ")) for x in X])
observed_maxlen = max(observed_maxlen,observed_maxlen_2)
if observed_maxlen > self.maxlen:
raise ValueError("An input string is longer that max-length so refit the kernel with a larger maxlen param")
# Turn our inputs into lists of integers using one-hot embedding
# pad until all same length
X = tf.strings.split(tf.squeeze(tf.convert_to_tensor(X),1)).to_tensor("PAD")
# pad until all have length of self.maxlen
if X.shape[1]<self.maxlen:
paddings = tf.constant([[0, 0,], [0, self.maxlen-X.shape[1]]])
X = tf.pad(X, paddings, "CONSTANT",constant_values="PAD")
X = self.table.lookup(X)
if symmetric:
X2 = X
else:
# pad until all same length
X2 = tf.strings.split(tf.squeeze(tf.convert_to_tensor(X2),1)).to_tensor("PAD")
# pad until all have length of self.maxlen
if X2.shape[1]<self.maxlen:
paddings = tf.constant([[0, 0,], [0, self.maxlen-X2.shape[1]]])
X2 = tf.pad(X2, paddings, "CONSTANT",constant_values="PAD")
X2 = self.table.lookup(X2)
# Make D: a upper triangular matrix over decay powers.
tf_tril = tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0)
power = [[0]*i+list(range(0,self.maxlen-i)) for i in range(1,self.maxlen)]+[[0]*self.maxlen]
tf_power=tf.constant(np.array(power).reshape(self.maxlen,self.maxlen), dtype=tf.float64) + tf_tril
tf_tril = tf.transpose(tf_tril)-tf.eye(self.maxlen,dtype=tf.float64)
tf_gap_decay = tf.constant(self._gap_decay,dtype=tf.float64)
gaps = tf.fill([self.maxlen, self.maxlen], tf_gap_decay)
D = tf.pow(gaps*tf_tril, tf_power)
dD_dgap = tf.pow((tf_tril * gaps), (tf_power - 1.0)) * tf_tril * tf_power
#if needed calculate the values needed for normalization
if self.normalize:
X_diag_Ks, X_diag_gap_grads, X_diag_match_grads, X_diag_coef_grads = self._diag_calculations(X)
if not symmetric:
X2_diag_Ks, X2_diag_gap_grads, X2_diag_match_grads, X2_diag_coef_grads = self._diag_calculations(X2)
# Initialize return values
k_results = np.zeros(shape=(len(X), len(X2)))
gap_grads = np.zeros(shape=(len(X), len(X2)))
match_grads = np.zeros(shape=(len(X), len(X2)))
coef_grads = np.zeros(shape=(len(X), len(X2), len(self._order_coefs)))
# prepare batches to send to _k
# get indicies of all possible pairings from X and X2
# this way allows maximum number of kernel calcs to be squished onto the GPU (rather than just doing individual rows of gram)
tuples = list(itertools.product(range(X.shape[0]), range(X2.shape[0])))
# if symmetric only need to calc upper gram matrix
if symmetric:
tuples = [t for t in tuples if t[0]<=t[1]]
num_batches = math.ceil(len(tuples)/self.batch_size)
for i in range(num_batches):
tuples_batch = tuples[self.batch_size*i:self.batch_size*(i+1)]
X_batch_indicies = [t[0] for t in tuples_batch]
X2_batch_indicies = [t[1] for t in tuples_batch]
# collect strings for this batch
X_batch = tf.gather(X,X_batch_indicies,axis=0)
X2_batch = tf.gather(X2,X2_batch_indicies,axis=0)
result = self._k(X_batch, X2_batch,D,dD_dgap)
# this bit is probably slow, should vectorize
# put results into the right places in the return values and normalize if required
for i in range(0,len(tuples_batch)):
if not self.normalize:
k_results[tuples_batch[i][0],tuples_batch[i][1]] =result[0][i]
gap_grads[tuples_batch[i][0],tuples_batch[i][1]] =result[1][i]
match_grads[tuples_batch[i][0],tuples_batch[i][1]] =result[2][i]
coef_grads[tuples_batch[i][0],tuples_batch[i][1],:] =result[3][i]
else:
if symmetric:
k_result_norm, gap_grad_norm, match_grad_norm, coef_grad_norm = self._normalize(result[0][i], result[1][i], result[2][i] ,result[3][i],
X_diag_Ks[tuples_batch[i][0]], X_diag_Ks[tuples_batch[i][1]],
X_diag_gap_grads[tuples_batch[i][0]], X_diag_match_grads[tuples_batch[i][0]],X_diag_coef_grads[tuples_batch[i][0]],
X_diag_gap_grads[tuples_batch[i][1]], X_diag_match_grads[tuples_batch[i][1]],X_diag_coef_grads[tuples_batch[i][1]])
else:
k_result_norm, gap_grad_norm, match_grad_norm, coef_grad_norm = self._normalize(result[0][i], result[1][i], result[2][i] ,result[3][i],
X_diag_Ks[tuples_batch[i][0]], X2_diag_Ks[tuples_batch[i][1]],
X_diag_gap_grads[tuples_batch[i][0]], X_diag_match_grads[tuples_batch[i][0]],X_diag_coef_grads[tuples_batch[i][0]],
X2_diag_gap_grads[tuples_batch[i][1]], X2_diag_match_grads[tuples_batch[i][1]],X2_diag_coef_grads[tuples_batch[i][1]])
k_results[tuples_batch[i][0],tuples_batch[i][1]] = k_result_norm
gap_grads[tuples_batch[i][0],tuples_batch[i][1]] = gap_grad_norm
match_grads[tuples_batch[i][0],tuples_batch[i][1]] = match_grad_norm
coef_grads[tuples_batch[i][0],tuples_batch[i][1],:] = coef_grad_norm
# if symmetric then need to fill in rest of matrix (lower gram)
if symmetric:
for i in range(X.shape[0]):
for j in range(i):
k_results[i, j] = k_results[j, i]
gap_grads[i, j] = gap_grads[j, i]
match_grads[i, j] = match_grads[j, i]
coef_grads[i, j,:] = coef_grads[j, i,:]
return k_results, gap_grads, match_grads, coef_grads
def _diag_calculations(self, X):
"""
Calculate the K(x,x) values first because
they are used in normalization.
This is pre-normalization (otherwise diag is just ones)
This function is not to be called directly, as requires preprocessing on X
"""
# Make D: a upper triangular matrix over decay powers.
tf_tril = tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0)
power = [[0]*i+list(range(0,self.maxlen-i)) for i in range(1,self.maxlen)]+[[0]*self.maxlen]
tf_power=tf.constant(np.array(power).reshape(self.maxlen,self.maxlen), dtype=tf.float64) + tf_tril
tf_tril = tf.transpose(tf_tril)-tf.eye(self.maxlen,dtype=tf.float64)
tf_gap_decay = tf.constant(self._gap_decay,dtype=tf.float64)
gaps = tf.fill([self.maxlen, self.maxlen], tf_gap_decay)
D = tf.pow(gaps*tf_tril, tf_power)
dD_dgap = tf.pow((tf_tril * gaps), (tf_power - 1.0)) * tf_tril * tf_power
# initialize return values
k_result = np.zeros(shape=(len(X)))
gap_grads = np.zeros(shape=(len(X)))
match_grads = np.zeros(shape=(len(X)))
coef_grads = np.zeros(shape=(len(X), len(self._order_coefs)))
# All set up. Proceed with kernel matrix calculations (in batches if required)
num_batches = math.ceil(len(X)/self.batch_size)
for i in range(num_batches):
X_batch = X[self.batch_size*i:self.batch_size*(i+1),:]
result = self._k(X_batch, X_batch,D,dD_dgap)
k_result[self.batch_size*i:self.batch_size*(i+1)] = result[0].numpy()
gap_grads [self.batch_size*i:self.batch_size*(i+1)] = result[1].numpy()
match_grads [self.batch_size*i:self.batch_size*(i+1)] = result[2].numpy()
coef_grads [self.batch_size*i:self.batch_size*(i+1),:] = result[3].numpy()
return (k_result,gap_grads,match_grads,coef_grads)
def _k(self, X1, X2, D, dD_dgap):
"""
TF code for vectorized kernel calc.
Following notation from Beck (2017), i.e have tensors S,D,Kpp,Kp
Input is two tensors of shape (# strings , # characters)
and we calc the pair-wise kernel calcs between the elements (i.e n kern calcs for two lists of length n)
D and dD_gap are the matricies than unroll the recursion and allow vecotrizaiton
"""
# init
tf_gap_decay = tf.constant(self._gap_decay,dtype=tf.float64)
tf_match_decay = tf.constant(self._match_decay,dtype=tf.float64)
tf_order_coefs = tf.convert_to_tensor(self._order_coefs, dtype=tf.float64)
# Strings will be represented as matrices of
# one-hot embeddings and the similarity is just the dot product. (ie. checking for matches of characters)
# turn into one-hot i.e. shape (# strings, #characters+1, alphabet size)
X1 = tf.one_hot(X1,len(self.alphabet)+1,dtype=tf.float64)
X2 = tf.one_hot(X2,len(self.alphabet)+1,dtype=tf.float64)
# remove the ones in the first column that encode the padding (i.e we dont want them to count as a match)
paddings = tf.constant([[0, 0], [0, 0],[0,len(self.alphabet)]])
X1 = X1 - tf.pad(tf.expand_dims(X1[:,:,0], 2),paddings,"CONSTANT")
X2 = X2 - tf.pad(tf.expand_dims(X2[:,:,0], 2),paddings,"CONSTANT")
# store squared match coef
match_sq = tf.square(tf_match_decay)
# Make S: the similarity tensor of shape (# strings, #characters, # characters)
S = tf.matmul( X1,tf.transpose(X2,perm=(0,2,1)))
# Main loop, where Kp, Kpp values and gradients are calculated.
Kp = []
dKp_dgap = []
dKp_dmatch = []
Kp.append(tf.ones(shape=(X1.shape[0],self.maxlen, self.maxlen), dtype=tf.float64))
dKp_dgap.append(tf.zeros(shape=(X1.shape[0],self.maxlen, self.maxlen),dtype=tf.float64))
dKp_dmatch.append(tf.zeros(shape=(X1.shape[0],self.maxlen, self.maxlen),dtype=tf.float64))
for i in range(len(tf_order_coefs)-1):
# calc subkernels for each subsequence length
aux = tf.multiply(S, Kp[i])
aux1 = tf.reshape(aux, tf.stack([X1.shape[0] * self.maxlen, self.maxlen]))
aux2 = tf.matmul(aux1, D)
aux = aux2 * match_sq
aux = tf.reshape(aux, tf.stack([X1.shape[0], self.maxlen, self.maxlen]))
aux = tf.transpose(aux, perm=[0, 2, 1])
aux3 = tf.reshape(aux, tf.stack([X1.shape[0] * self.maxlen, self.maxlen]))
aux = tf.matmul(aux3, D)
aux = tf.reshape(aux, tf.stack([X1.shape[0], self.maxlen, self.maxlen]))
Kp.append(tf.transpose(aux, perm=[0, 2, 1]))
aux = tf.multiply(S, dKp_dgap[i])
aux = tf.reshape(aux, tf.stack([X1.shape[0] *self.maxlen,self.maxlen]))
aux = tf.matmul(aux, D) + tf.matmul(aux1, dD_dgap)
aux = aux * match_sq
aux = tf.reshape(aux, tf.stack([X1.shape[0],self.maxlen,self.maxlen]))
aux = tf.transpose(aux, perm=[0, 2, 1])
aux = tf.reshape(aux, tf.stack([X1.shape[0] *self.maxlen,self.maxlen]))
aux = tf.matmul(aux, D) + tf.matmul(aux3, dD_dgap)
aux = tf.reshape(aux, tf.stack([X1.shape[0],self.maxlen,self.maxlen]))
dKp_dgap.append(tf.transpose(aux, perm=[0, 2, 1]))
aux = tf.multiply(S, dKp_dmatch[i])
aux = tf.reshape(aux, tf.stack([X1.shape[0] *self.maxlen,self.maxlen]))
aux = tf.matmul(aux, D)
aux = (aux * match_sq) + (2 * tf_match_decay * aux2)
aux = tf.reshape(aux, tf.stack([X1.shape[0],self.maxlen,self.maxlen]))
aux = tf.transpose(aux, perm=[0, 2, 1])
aux = tf.reshape(aux, tf.stack([X1.shape[0] *self.maxlen,self.maxlen]))
aux = tf.matmul(aux, D)
aux = tf.reshape(aux, tf.stack([X1.shape[0],self.maxlen,self.maxlen]))
dKp_dmatch.append(tf.transpose(aux, perm=[0, 2, 1]))
Kp = tf.stack(Kp)
dKp_dgap = tf.stack(dKp_dgap)
dKp_dmatch = tf.stack(dKp_dmatch)
# Final calculation. We gather all Kps and
# multiply then by their coeficients.
# get k
aux = tf.multiply(S, Kp)
aux = tf.reduce_sum(aux, 2)
sum2 = tf.reduce_sum(aux, 2, keepdims=True)
Ki = tf.multiply(sum2, match_sq)
Ki = tf.squeeze(Ki, [2])
# reshape in case batch size 1
k = tf.reshape(tf.squeeze(tf.matmul(tf.reshape(tf_order_coefs,(1,-1)), Ki)),(X1.shape[0],))
# get gap decay grads
aux = tf.multiply(S, dKp_dgap)
aux = tf.reduce_sum(aux, 2)
aux = tf.reduce_sum(aux, 2, keepdims=True)
aux = tf.multiply(aux, match_sq)
aux = tf.squeeze(aux, [2])
dk_dgap = tf.reshape(tf.squeeze(tf.matmul(tf.reshape(tf_order_coefs,(1,-1)), aux)),(X1.shape[0],))
# get match decay grads
aux = tf.multiply(S, dKp_dmatch)
aux = tf.reduce_sum(aux, 2)
aux = tf.reduce_sum(aux, 2, keepdims=True)
aux = tf.multiply(aux, match_sq) + (2 * tf_match_decay * sum2)
aux = tf.squeeze(aux, [2])
dk_dmatch = tf.reshape( tf.squeeze(tf.matmul(tf.reshape(tf_order_coefs,(1,-1)), aux)),(X1.shape[0],))
# get coefs grads
dk_dcoefs = tf.transpose(Ki)
return (k, dk_dgap, dk_dmatch, dk_dcoefs, Ki)
def _normalize(self, K_result, gap_grads, match_grads, coef_grads,diag_Ks_i,
diag_Ks_j, diag_gap_grads_i, diag_match_grads_i, diag_coef_grads_i,
diag_gap_grads_j, diag_match_grads_j, diag_coef_grads_j,):
"""
Normalize the kernel and kernel derivatives.
Following the derivation of Beck (2015)
"""
norm = diag_Ks_i * diag_Ks_j
sqrt_norm = np.sqrt(norm)
K_norm = K_result / sqrt_norm
diff_gap = ((diag_gap_grads_i * diag_Ks_j) +
(diag_Ks_i * diag_gap_grads_j))
diff_gap /= 2 * norm
gap_grads_norm = ((gap_grads / sqrt_norm) -
(K_norm * diff_gap))
diff_match = ((diag_match_grads_i * diag_Ks_j) +
(diag_Ks_i * diag_match_grads_j))
diff_match /= 2 * norm
match_grads_norm = ((match_grads / sqrt_norm) -
(K_norm * diff_match))
diff_coef = ((diag_coef_grads_i * diag_Ks_j) +
(diag_Ks_i * diag_coef_grads_j))
diff_coef /= 2 * norm
coef_grads_norm = ((coef_grads / sqrt_norm) -
(K_norm * diff_coef))
return K_norm, gap_grads_norm, match_grads_norm, coef_grads_norm
| StarcoderdataPython |
3431756 | <filename>sara_flexbe_states/src/sara_flexbe_states/Look_at_sound.py
#!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from geometry_msgs.msg import PoseStamped
from tf.transformations import euler_from_quaternion
from flexbe_core.proxy import ProxySubscriberCached
from flexbe_core.proxy import ProxyActionClient
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Pose, Pose2D, Quaternion, Point
from move_base_msgs.msg import *
from std_msgs.msg import Float64
from tf import transformations
"""
Created on 10/25/2018
@author: <NAME>
"""
class LookAtSound(EventState):
"""
Make Sara's head keep looking at the strongest source of sounds.
"""
def __init__(self, moveBase=False):
"""Constructor"""
super(LookAtSound, self).__init__(outcomes=['done'])
self.MoveBase = moveBase
# Subscriber config
self.soundsTopic = "/direction_of_arrival"
self._sub = ProxySubscriberCached({self.soundsTopic: PoseStamped})
# Publisher config
self.pubPitch = rospy.Publisher("/sara_head_pitch_controller/command", Float64, queue_size=1)
self.pubYaw = rospy.Publisher("/sara_head_yaw_controller/command", Float64, queue_size=1)
self.msg = Float64()
# action client
self._action_topic = "/move_base"
self._client = ProxyActionClient({self._action_topic: MoveBaseAction})
def execute(self, userdata):
# If a new sound direction is detected.
if self._sub.has_msg(self.soundsTopic):
message = self._sub.get_last_msg(self.soundsTopic)
orientation = message.pose.orientation
orient_quat = [orientation.x, orientation.y, orientation.z, orientation.w]
roll, pitch, yaw = euler_from_quaternion(orient_quat)
# Publish the head commands
self.msg.data = min(max(-pitch, -0), 1)
self.pubPitch.publish(self.msg)
angle = min(max(yaw, -1.2), 1.2)
self.msg.data = angle
self.pubYaw.publish(self.msg)
GoalPose = Pose()
qt = transformations.quaternion_from_euler(0, 0, yaw-angle)
GoalPose.orientation.w = qt[3]
GoalPose.orientation.x = qt[0]
GoalPose.orientation.y = qt[1]
GoalPose.orientation.z = qt[2]
self.setGoal(GoalPose)
return "done"
def on_exit(self, userdata):
if not self._client.has_result(self._action_topic):
self._client.cancel(self._action_topic)
Logger.loginfo('Cancelled active action goal.')
def setGoal(self, pose):
goal = MoveBaseGoal()
goal.target_pose.pose = pose
goal.target_pose.header.frame_id = "base_link"
# Send the action goal for execution
try:
Logger.loginfo("sending goal" + str(goal))
self._client.send_goal(self._action_topic, goal)
except Exception as e:
Logger.logwarn("Unable to send navigation action goal:\n%s" % str(e))
self._failed = True
| StarcoderdataPython |
8119791 | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.decorators import action
from .models import Collection
from .serializers import CollectionSerializer
from users.models import User
from users.serializers import UserSerializer
from digital_books.models import Digital_Book
from digital_books.serializers import digital_bookSerializer
from django.http.response import JsonResponse
import json
from permissions.services import APIPermissionClassFactory
def is_logged(user, obj, request):
return user.email == obj.email
class CollectionViewset(viewsets.ModelViewSet):
queryset = Collection.objects.all()
serializer_class = CollectionSerializer
permission_classes = (
APIPermissionClassFactory(
name='UserPermission',
permission_configuration={
'base': {
'create': True,
'list': False,
},
'instance': {
'retrieve': False,
'destroy': False,
'update': False,
'add_to_collection': is_logged,
'get_collection' : is_logged,
}
}
),
)
@action(detail=False, url_path='add-to-collection', methods=['post'])
def add_to_collection(self, request):
userId = request.data['user']
ammount = request.data['ammount']
for bookId in request.data['books']:
book_user = User.objects.get(pk= userId)
balance = book_user.balance
balance -= float(ammount)
book_user.balance = balance
book_user.save()
book_object = Digital_Book.objects.get(pk= bookId)
collection = Collection()
collection.user = book_user
collection.book = book_object
collection.save()
return Response({
'status': 'Buyed Succesfully'
})
@action(detail=False, url_path='get-collection', methods=['get'])
def get_collection(self, request):
books_array = []
response = []
userId = request.query_params['user']
user = User.objects.get(pk = userId)
books = Collection.objects.filter(user = user)
# books_serialized = digital_bookSerializer(books, many=True).data
collection = CollectionSerializer(books, many = True).data
print(collection)
return Response(collection)
| StarcoderdataPython |
3536946 | <reponame>alexdawn/risk
from typing import Callable, Any, Tuple, Dict, List
from itertools import product, chain
from functools import lru_cache
import warnings
import logging
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import identity
from scipy.sparse.linalg import inv
warnings.filterwarnings('ignore') # scipy generates tons of errors
# Battle Estimator
#
# Markov chains can be used to efficiently calculate all outcomes from
# a battle of A attackers and D defenders, to calculate the probability of the
# attacker or defender winning, as well as the expected number of survivors
# and even the range of likely outcomes.
# This allows an A.I. to consider only likely outcomes
def probY(y1: int, y2: int = None) -> float:
"""Probability top two of 3 ordered dice Y1=y1 and Y2=y2"""
assert y1 > 0 and y1 <= 7
if y2:
assert y2 > 0 and y2 <= 7
if y2:
if y1 == y2:
return (3 * y1 - 2) / 216
elif y1 > y2:
return (6 * y2 - 3) / 216
else:
return 0
else:
return 1 - 3 * y1 + 3 * pow(float(y1), 2) / 216
def probZ(z1: int, z2: int = None) -> float:
"""Probability of two ordered dice Z1=z1 and Z2=2z"""
assert z1 > 0 and z1 <= 7
if z2:
assert z2 > 0 and z2 <= 7
if z2:
if z1 == z2:
return 1 / 36
elif z1 > z2:
return 2 / 36
else:
return 0
else:
return (2 * z1 - 1) / 36
def probSingle(x: int, _: None) -> float:
"""Probability of Dice X=x"""
return 1 / 6
def dice(dice: int) -> Callable[[Any, Any], float]:
"""Use the approriate probability distribution for number of dice"""
functions = {
1: probSingle,
2: probZ,
3: probY
} # type: Dict[int, Callable[[Any, Any], float]]
return functions[dice]
@lru_cache()
def probable_outcome(
attackers: int, defenders: int, defender_loses: int) -> float:
"""Probability P(i,j,k) of Defender losing k given i attackers and j defenders"""
assert attackers >= 1 and attackers <= 4, "Invalid attackers {}".format(attackers)
assert defenders >= 1 and defenders <= 2, "Invalid defenders {}".format(defenders)
assert defender_loses >= 0 and defender_loses <= 2, "Invalid losers {}".format(defender_loses)
Attacker = dice(attackers)
Defender = dice(defenders)
die = range(1, 7)
prob = 0.0
if attackers == 1 and defenders == 1:
for y1, z1 in product(*([die] * 2)):
if (y1 > z1 and defender_loses == 1) or (y1 <= z1 and defender_loses == 0):
prob += Attacker(y1, None) * Defender(z1, None)
elif attackers == 1:
for y1, z1, z2 in product(*([die] * 3)):
if (y1 > z1 and defender_loses == 1) or (y1 <= z1 and defender_loses == 0):
prob += Attacker(y1, None) * Defender(z1, z2)
elif defenders == 1:
for y1, y2, z1 in product(*([die] * 3)):
if (y1 > z1 and defender_loses == 1) or (y1 <= z1 and defender_loses == 0):
prob += Attacker(y1, y2) * Defender(z1, None)
else:
for y1, y2, z1, z2 in product(*([die] * 4)):
if ((y1 > z1 and y2 > z2 and defender_loses == 2) or
(((y1 > z1 and y2 <= z2) or (y1 <= z1 and y2 > z2)) and defender_loses == 1) or
(y1 <= z1 and y2 <= z2 and defender_loses == 0)):
prob += Attacker(y1, y2) * Defender(z1, z2)
return prob
def generate_states(A: int, D: int)\
-> Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]:
""""Generate all the possible transient and outcome states from the initial state"""
transient_state = [
(a, d) for a, d in product(range(1, A + 1), range(1, D + 1))
]
absorbing_state = [
(a, d) for a, d in chain(zip([0] * D, range(1, D + 1)), zip(range(1, A + 1), [0] * A))
]
return transient_state, absorbing_state
def generate_prob_matrix(A: int, D: int)\
-> Tuple[Dict[Tuple[int, int], int], Dict[Tuple[int, int], int], np.ndarray]:
"""Generate the probability outcome matrix"""
transient_state, absorbing_state = generate_states(A, D)
transient_state_lookup = {s: i for i, s in enumerate(transient_state)}
absorbing_state_lookup = {s: i for i, s in enumerate(absorbing_state)}
transient_length, absorbing_length = len(transient_state), len(absorbing_state)
# Add probability to transition elements
Qrow = []
Qcol = []
Qdata = []
Rrow = []
Rcol = []
Rdata = []
for i, (a, d) in enumerate(transient_state):
max_deaths = 2 if a > 1 and d > 1 else 1
for dl in range(0, max_deaths + 1):
al = max_deaths - dl
na, nd = a - al, d - dl
if a - al > 0 and d - dl > 0:
Qrow.append(i)
Qcol.append(transient_state_lookup[(na, nd)])
Qdata.append(probable_outcome(min(a, 3), min(d, 2), dl))
else:
Rrow.append(i)
Rcol.append(absorbing_state_lookup[(na, nd)])
Rdata.append(probable_outcome(min(a, 3), min(d, 2), dl))
Q = csc_matrix((Qdata, (Qrow, Qcol)), shape=(transient_length, transient_length))
R = csc_matrix((Rdata, (Rrow, Rcol)), shape=(transient_length, absorbing_length))
iden = identity(transient_length)
F = inv(iden - Q) * R
return transient_state_lookup, absorbing_state_lookup, F
def filter_states(states: Dict[Tuple[int, int], int], probs: np.ndarray, a: int, d: int)\
-> Tuple[List[Tuple[int, int]], np.ndarray]:
"""Filter invalid states"""
reverse_states = {y: x for x, y in states.items()}
new_states, new_probs = tuple(
zip(*((s, p) for s, p in list((reverse_states[i], prob)
for i, prob in enumerate(probs)) if s[0] <= a or s[1] <= d)))
return new_states, new_probs
def get_matrix_row(F: np.ndarray, row: int) -> np.ndarray:
"""Gets the ith row of the matrix
needed for getting probabilities of outcomes from starting state i"""
if len(F.shape) > 1:
return F[row][:].toarray()[0]
else:
return F
def wrap_probabilities()\
-> Callable[[int, int], Tuple[List[Tuple[int, int]], np.ndarray]]:
"""Avoids generating probability matrix if a larger one already exists"""
F = [] # type: List[List[int]]
transient_state_lookup = {} # type: Dict[Tuple[int, int], int]
absorbing_state_lookup = {} # type: Dict[Tuple[int, int], int]
def get_prob(a: int, d: int) -> Tuple[List[Tuple[int, int]], np.ndarray]:
nonlocal F, transient_state_lookup, absorbing_state_lookup
if (a, d) in transient_state_lookup.keys():
return filter_states(
absorbing_state_lookup, get_matrix_row(F, transient_state_lookup[(a, d)]), a, d)
else:
logging.critical("State outcomes not calculated for ({},{})".format(a, d))
b = max(a, d) # avoid shrinking the matrix
transient_state_lookup, absorbing_state_lookup, F = generate_prob_matrix(b, b)
logging.critical("Calculated")
# need the lookup for where
return filter_states(
absorbing_state_lookup, get_matrix_row(F, transient_state_lookup[(a, d)]), a, d)
return get_prob
# Need a smarter way of doing this?
get_cached_probabilities = wrap_probabilities()
@lru_cache()
def calculate_win_prob(a: int, d: int) -> float:
_, probs = get_cached_probabilities(a, d)
return sum(probs[d:])
@lru_cache()
def calculate_expected_remainder(a: int, d: int) -> Tuple[float, float, float, float]:
"""Calculated Expectations and Standard Deviations from a Battle"""
states, probs = get_cached_probabilities(a, d)
ea = sum(a * p for (a, d), p in zip(states, probs))
ed = sum(d * p for (a, d), p in zip(states, probs))
va = sum(p * pow(a - ea, 2) for (a, d), p in zip(states, probs))
vd = sum(p * pow(d - ed, 2) for (a, d), p in zip(states, probs))
return ea, va, ed, vd
def generate_outcome(a: int, d: int, repeats: int = 1) -> List[Tuple[int, int]]:
"""Run a battle using the matrix instead of simulated dice"""
states, probs = get_cached_probabilities(a, d)
return [states[x] for x in np.random.choice(range(len(states)), repeats, p=probs)]
| StarcoderdataPython |
6489098 | <filename>account/urls.py
from django.urls import path
from rest_framework.authtoken import views as special_views
from account import views
urlpatterns = [
path('login', special_views.obtain_auth_token),
path('register',views.register_view,name="register")
] | StarcoderdataPython |
48308 | <filename>books/booksdatasourcetests.py
'''
booksdatasourcetest.py
<NAME>, 24 September 2021
<NAME>, <NAME>, 11 October 2021
'''
import booksdatasource
import unittest
class BooksDataSourceTester(unittest.TestCase):
def setUp(self):
self.data_source = booksdatasource.BooksDataSource('books_medium.csv')
def tearDown(self):
pass
def test_unique_author(self):
authors = self.data_source.authors('Pratchett')
for author in authors:
self.assertEqual(author, booksdatasource.Author('Pratchett', 'Terry'))
self.assertTrue(authors[0] == booksdatasource.Author('Pratchett', 'Terry'))
self.assertTrue(len(authors) == 1)
def test_blank_author(self):
authors = self.data_source.authors()
self.assertTrue(len(authors) == 8)
def test_authors(self):
authors = self.data_source.authors('Jane')
self.assertTrue(booksdatasource.Author('Austen', 'Jane') in authors)
self.assertTrue(len(authors) == 1)
def test_sorted_authors(self):
authors = self.data_source.authors('te')
self.assertTrue(authors[0] == booksdatasource.Author('Austen', 'Jane'))
self.assertTrue(len(authors) == 3)
'''
Book Tests
'''
def test_blank_books(self):
books = self.data_source.books()
self.assertTrue(books)
self.assertTrue(len(books) == 10)
def test_books(self):
books = self.data_source.books('Sula', 'year')
self.assertTrue(booksdatasource.Book('Sula', 1973, [booksdatasource.Author('Morrison', 'Toni')]) in books)
def test_sorted_title(self):
books = self.data_source.books('There', 'title')
self.assertTrue(books[0] == booksdatasource.Book('And Then There Were None', 1939, [booksdatasource.Author('Christie', 'Agatha')]))
def test_sorted_year(self):
books = self.data_source.books('the', 'year')
self.assertTrue(books[0] == booksdatasource.Book('The Life and Opinions of Tristram Shandy, Gentleman', 1759, [booksdatasource.Author('Sterne', 'Laurence')]))
'''
Between Years Tests
'''
def test_blank_years(self):
books = self.data_source.books_between_years()
self.assertTrue(len(books) == 10)
def test_no_books(self):
books = self.data_source.books_between_years(1500, 1550)
self.assertTrue(len(books) == 0)
self.assertFalse(books)
def test_inclusive(self):
books = self.data_source.books_between_years(1700, 1759)
self.assertTrue(books)
self.assertTrue(len(books) == 1)
def test_sorted(self):
books = self.data_source.books_between_years(1813, 1815)
self.assertTrue(books[0] == booksdatasource.Book('Pride and Prejudice', 1813, [booksdatasource.Author('Austen', 'Jane')]))
self.assertTrue(books[1] == booksdatasource.Book('Sense and Sensibility', 1813, [booksdatasource.Author('Austen', 'Jane')]))
self.assertTrue(books[2] == booksdatasource.Book('Emma', 1815, [booksdatasource.Author('Austen', 'Jane')]))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9783256 | <filename>p2p/graph_manager.py
import networkx as nx
import numpy as np
class DummyNode:
def __init__(self, node_id):
self.id = node_id
def sample_neighbors(client_num, num_clients, self_ind):
c_list = list(range(client_num))
c_list.remove(self_ind)
random_indices = np.random.choice(len(c_list), size=num_clients, replace=False)
return np.array(c_list)[random_indices]
def sparse_graph(n, num_neighbors, create_using):
if create_using.is_directed():
in_n = [num_neighbors] * n
out_n = [num_neighbors] * n
g = nx.directed_havel_hakimi_graph(in_n, out_n, create_using)
"""
m = np.zeros(shape=(n, n))
for i in range(n):
nb = sample_neighbors(n, num_neighbors, i)
m[i][nb] = 1
g = nx.from_numpy_matrix(np.asmatrix(m), create_using=create_using)
"""
else:
# undirected d-regular graph (sum row == sum column)
g = nx.random_regular_graph(num_neighbors, n)
return g
_graph_type_dict = {
'complete': lambda **kwargs: nx.complete_graph(kwargs['n'], create_using=kwargs['create_using']),
'ring': lambda **kwargs: nx.cycle_graph(kwargs['n'], create_using=kwargs['create_using']),
'sparse': lambda **kwargs: sparse_graph(kwargs['n'], kwargs['num_neighbors'], create_using=kwargs['create_using']),
'erdos_renyi': lambda **kwargs: nx.erdos_renyi_graph(kwargs['n'], kwargs['p'], directed=kwargs['directed']),
'binomial': lambda **kwargs: nx.binomial_graph(kwargs['n'], kwargs['p'], directed=kwargs['directed']),
'grid': lambda **kwargs: nx.grid_2d_graph(kwargs['num_neighbors'], kwargs['n'], kwargs['periodic'],
kwargs['create_using'])
}
class GraphManager:
def __init__(self, graph_type, nodes, directed=False, time_varying=-1, num_neighbors=1):
self.n = len(nodes)
self.nodes = nodes
self.directed = directed
self.time_varying = time_varying
self.num_neighbors = num_neighbors
self.graph_type = graph_type
self._nx_graph = self._resolve_graph_type()
self._resolve_weights_mixing()
@property
def nodes_num(self):
return self.n
def check_time_varying(self, time_iter):
if self.time_varying > 0 and time_iter % self.time_varying == 0:
print('Changing graph communication matrix')
self._nx_graph = self._resolve_graph_type()
self._resolve_weights_mixing()
def _resolve_graph_type(self):
assert self.graph_type in _graph_type_dict
graph_fn = _graph_type_dict[self.graph_type]
kwargs = {'n': self.n,
'create_using': nx.DiGraph() if self.directed else nx.Graph(),
'directed': self.directed,
'num_neighbors': self.num_neighbors
}
graph = graph_fn(**kwargs)
return graph
def _resolve_weights_mixing(self):
for node in self._nx_graph.nodes:
# For now, all algorithms require Wii > 0, so we add self as an edge
self._nx_graph.add_edge(node, node)
nbs = self._graph_neighbors(node)
# For now, uniform weights
weight = 1 / len(nbs)
for nb in nbs:
self._nx_graph[node][nb]['weight'] = weight
def get_edge_weight(self, i, j):
edge_data = self._nx_graph.get_edge_data(i, j)
if edge_data is None:
return 0
return edge_data['weight']
def get_self_node_weight(self, node_id):
return self.get_edge_weight(node_id, node_id)
def _graph_neighbors(self, node_id):
return list(self._nx_graph.neighbors(node_id))
def get_peers(self, node_id):
nb = self._graph_neighbors(node_id)
return [n for n in self.nodes if n.id in nb and n.id != node_id]
def get_weighted_peers(self, node_id):
nb = self.get_peers(node_id)
wb = [self.get_edge_weight(node_id, n.id) for n in nb]
return nb, wb
def get_node(self, node_id):
return self.nodes[node_id]
def draw(self):
nx.draw(self._nx_graph)
def graph_info(self):
nb_num = self.num_neighbors
if self.graph_type == 'ring':
nb_num = 1 if self.directed else 2
elif self.graph_type == 'complete':
nb_num = self.n - 1
info = "{} ({}), N: {}, NB: {}, TV: {}".format(self.graph_type,
'directed' if self.directed else 'undirected',
self.n, nb_num,
self.time_varying)
return info
def as_numpy_array(self):
return nx.to_numpy_array(self._nx_graph)
def nx_graph_from_saved_lists(np_array, directed=False):
return nx.from_numpy_array(np.asarray(np_array), create_using=nx.DiGraph() if directed else nx.Graph())
if __name__ == "__main__":
gm = GraphManager('sparse', [DummyNode(_) for _ in range(10)], directed=False, num_neighbors=3)
gm.draw()
| StarcoderdataPython |
1796619 | from talon import Context, actions, ui, Module, app, clip
ctx = Context()
mod = Module()
mod.apps.gwent = "app.name: Gwent.exe"
ctx.matches = r"""
app: gwent
"""
| StarcoderdataPython |
6638979 | # Generated by Django 2.0 on 2021-04-06 08:18
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0037_auto_20210203_2216'),
]
operations = [
migrations.AddField(
model_name='objecttype',
name='countries',
field=django_countries.fields.CountryField(default='', max_length=746, multiple=True),
),
]
| StarcoderdataPython |
4933809 | <filename>07/solve_2.py
from address import is_compatible, load_addresses
def main():
"""Count the number of addresses compatible with
the second protocol, i.e., matching 'aba[bab]'.
"""
addresses = load_addresses()
print(sum([is_compatible(address, protocol=2) for address in addresses]))
if __name__ == '__main__':
main()
| StarcoderdataPython |
154207 | from django.urls import path, include
from rest_framework import routers
from . import views
router = routers.SimpleRouter()
router.register('signup',views.UserAccountsViewSet)
urlpatterns = [
path('',include(router.urls)),
path('login/',views.UserLoginApiView.as_view()),
] | StarcoderdataPython |
9648121 | <gh_stars>0
import os
import json
import requests
def getAppDetails(envs):
appDetails = getCuratedAppDetails(envs)
retDict = {}
names = []
counts = []
appCpus = []
appMem = []
totalTasks = 0
for item in ['tasks', 'apps', 'cpus', 'mem']:
dictItem = {}
names = []
values = []
totalValue = 0
for team, value in appDetails[item].items():
names.append(team)
values.append(value)
totalValue += value
dictItem['name'] = names
dictItem['data'] = values
dictItem['total'] = totalValue
retDict[item] = dictItem
retDict['tasks']['title'] = '{} tasks are running on RogerOS ({})...'.format(retDict['tasks']['total'], '+'.join(envs))
retDict['apps']['title'] = '...which are instances of {} applicatons.'.format(retDict['apps']['total'])
retDict['cpus']['title'] = '{} cores (cpus) are currently allocated to them...'.format(retDict['cpus']['total'])
retDict['mem']['title'] = ' ...along with a total of {} mb of memory.'.format(retDict['mem']['total'])
retDict['tasks']['headers'] = ['name', 'count']
retDict['apps']['headers'] = ['name', 'count']
retDict['cpus']['headers'] = ['name', 'allocation']
retDict['mem']['headers'] = ['name', 'allocation']
return retDict
def getTeamNamesDict():
return json.loads(os.environ['GROUP_DATA'])
def getCuratedAppDetails(envs):
teamNames = getTeamNamesDict()
tasks = {}
apps = {}
cpus = {}
mem = {}
for env in envs:
try:
rawData = getRawAppDetails(env)
for id, (running, cpusAlloc, memAlloc) in rawData.items():
team = 'others'
for team_name, patterns in teamNames.items():
if isMatchingName(id, patterns):
team = team_name
break
if team in tasks:
apps[team] += 1
tasks[team] += running
cpus[team] += cpusAlloc
mem[team] += memAlloc
else:
apps[team] = 1
tasks[team] = running
cpus[team] = cpusAlloc
mem[team] = memAlloc
except Exception as e:
print e
flash('Had trouble accessing {} environment. Please try again soon.'.format(env))
pass # possible connection error.. continue to next env
return { 'tasks':tasks, 'apps': apps, 'cpus':cpus, 'mem':mem }
def isMatchingName(name, patterns):
for item in patterns:
if item in name:
return True
return False
def getRawAppDetails(env):
endpoints = json.loads(os.environ['MARATHON_ENDPOINTS'])
url = endpoints[env] + '/v2/apps'
resp = requests.get(url, auth=(os.environ['MARATHON_USER'], os.environ['MARATHON_PASSWD']))
rdata = resp.json()
apps = {}
for app in rdata['apps']:
apps [app['id']] = [ app['tasksRunning'], float(app['instances']) * app['cpus'], float(app['instances']) * app['mem'] ]
return apps
| StarcoderdataPython |
1745052 | # -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: JFZhou 2020-05-31)
import numpy as np
import os
import sys
sys.path.insert(0, 'subtools/pytorch')
import libs.support.kaldi_io as kaldi_io
from plda_base import PLDA
class CORAL(object):
def __init__(self,
mean_diff_scale=1.0,
within_covar_scale=0.8,
between_covar_scale=0.8):
self.tot_weight = 0
self.mean_stats = 0
self.variance_stats = 0
self.mean_diff_scale = 1.0
self.mean_diff_scale = mean_diff_scale
self.within_covar_scale = within_covar_scale
self.between_covar_scale = between_covar_scale
def add_stats(self, weight, ivector):
ivector = np.reshape(ivector,(-1,1))
if type(self.mean_stats)==int:
self.mean_stats = np.zeros(ivector.shape)
self.variance_stats = np.zeros((ivector.shape[0],ivector.shape[0]))
self.tot_weight += weight
self.mean_stats += weight * ivector
self.variance_stats += weight * np.matmul(ivector,ivector.T)
def update_plda(self,):
dim = self.mean_stats.shape[0]
#TODO:Add assert
'''
// mean_diff of the adaptation data from the training data. We optionally add
// this to our total covariance matrix
'''
mean = (1.0 / self.tot_weight) * self.mean_stats
'''
D(x)= E[x^2]-[E(x)]^2
'''
variance = (1.0 / self.tot_weight) * self.variance_stats - np.matmul(mean,mean.T)
'''
// update the plda's mean data-member with our adaptation-data mean.
'''
mean_diff = mean - self.mean
variance += self.mean_diff_scale * np.matmul(mean_diff,mean_diff.T)
self.mean = mean
o_covariance = self.within_var + self.between_var
eigh_o, Q_o = np.linalg.eigh(o_covariance)
self.sort_svd(eigh_o, Q_o)
eigh_i, Q_i = np.linalg.eigh(variance)
self.sort_svd(eigh_i, Q_i)
EIGH_O = np.diag(eigh_o)
EIGH_I = np.diag(eigh_i)
C_o = np.matmul(np.matmul(Q_o,np.linalg.inv(np.sqrt(EIGH_O))),Q_o.T)
C_i = np.matmul(np.matmul(Q_i,np.sqrt(EIGH_I)),Q_i.T)
A = np.matmul(C_i,C_o)
S_w = np.matmul(np.matmul(A,self.within_var),A.T)
S_b = np.matmul(np.matmul(A,self.between_var),A.T)
self.between_var = S_b
self.within_var = S_w
def sort_svd(self,s, d):
for i in range(len(s)-1):
for j in range(i+1,len(s)):
if s[i] > s[j]:
s[i], s[j] = s[j], s[i]
d[i], d[j] = d[j], d[i]
def plda_read(self,plda):
with kaldi_io.open_or_fd(plda,'rb') as f:
for key,vec in kaldi_io.read_vec_flt_ark(f):
if key == 'mean':
self.mean = vec.reshape(-1,1)
self.dim = self.mean.shape[0]
elif key == 'within_var':
self.within_var = vec.reshape(self.dim, self.dim)
else:
self.between_var = vec.reshape(self.dim, self.dim)
def plda_write(self,plda):
with kaldi_io.open_or_fd(plda,'wb') as f:
kaldi_io.write_vec_flt(f, self.mean, key='mean')
kaldi_io.write_vec_flt(f, self.within_var.reshape(-1,1), key='within_var')
kaldi_io.write_vec_flt(f, self.between_var.reshape(-1,1), key='between_var')
class CIP(object):
"""
Reference:
<NAME>, <NAME>, <NAME>, et al. A Generalized Framework for Domain Adaptation of PLDA in Speaker Recognition[C]//ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020: 6619-6623.
"""
def __init__(self,
interpolation_weight=0.5):
self.interpolation_weight = interpolation_weight
def interpolation(self,coral,plda_in_domain):
mean_in,between_var_in,within_var_in = self.plda_read(plda_in_domain)
self.mean = mean_in
self.between_var = self.interpolation_weight*coral.between_var+(1-self.interpolation_weight)*between_var_in
self.within_var = self.interpolation_weight*coral.within_var+(1-self.interpolation_weight)*within_var_in
def plda_read(self,plda):
with kaldi_io.open_or_fd(plda,'rb') as f:
for key,vec in kaldi_io.read_vec_flt_ark(f):
if key == 'mean':
mean = vec.reshape(-1,1)
dim = mean.shape[0]
elif key == 'within_var':
within_var = vec.reshape(dim, dim)
else:
between_var = vec.reshape(dim, dim)
return mean,between_var,within_var
def main():
if len(sys.argv)!=5:
print('<plda-out-domain> <adapt-ivector-rspecifier> <plda-in-domain> <plda-adapt> \n',
)
sys.exit()
plda_out_domain = sys.argv[1]
train_vecs_adapt = sys.argv[2]
plda_in_domain = sys.argv[3]
plda_adapt = sys.argv[4]
coral=CORAL()
coral.plda_read(plda_out_domain)
for _,vec in kaldi_io.read_vec_flt_auto(train_vecs_adapt):
coral.add_stats(1,vec)
coral.update_plda()
cip=CIP()
cip.interpolation(coral,plda_in_domain)
plda_new = PLDA()
plda_new.mean = cip.mean
plda_new.within_var = cip.within_var
plda_new.between_var = cip.between_var
plda_new.get_output()
plda_new.plda_trans_write(plda_adapt)
if __name__ == "__main__":
main() | StarcoderdataPython |
1780205 | <reponame>3xistentialcrisis/Blog
import unittest
from app.models import Comment, Blog, User
from app import db
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comment(id=1, comment='Test comment', user=self.user_karanja, blog_id=self.new_blog)
def tearDown(self):
Blog.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment, 'Test comment')
self.assertEquals(self.new_comment.user, self.user_karanja)
self.assertEquals(self.new_comment.blog_id, self.new_blog)
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.user_ciku = User(username='ciku', password='<PASSWORD>', email='<EMAIL>')
self.new_blog = Blog(id=1, title='Test', content='test blog', user_id=self.user_ciku.id)
self.new_comment = Comment(id=1, comment='test comment', user_id=self.user_ciku.id,
blog_id=self.new_blog.id)
def tearDown(self):
Blog.query.delete()
User.query.delete()
Comment.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment, 'test comment')
self.assertEquals(self.new_comment.user_id, self.user_ciku.id)
self.assertEquals(self.new_comment.blog_id, self.new_blog.id)
def test_save_comment(self):
self.new_comment.save()
self.assertTrue(len(Comment.query.all()) > 0)
def test_get_comment(self):
self.new_comment.save()
got_comment = Comment.get_comment(1)
self.assertTrue(got_comment is not None) | StarcoderdataPython |
5059632 | <reponame>mlaugharn/EB_GFN<gh_stars>0
import numpy as np
import torch
import torch.nn as nn
import torchvision
import os, sys
import copy
import time
import random
import ipdb
from tqdm import tqdm
import argparse
import network
sys.path.insert(0, "..")
from gflownet import get_GFlowNet
import utils_data
def makedirs(path):
if not os.path.exists(path):
print('creating dir: {}'.format(path))
os.makedirs(path)
else:
print(path, "already exist!")
class EBM(nn.Module):
def __init__(self, net, mean=None):
super().__init__()
self.net = net
if mean is None:
self.mean = None
else:
self.mean = nn.Parameter(mean, requires_grad=False)
self.base_dist = torch.distributions.Bernoulli(probs=self.mean)
def forward(self, x):
if self.mean is None:
bd = 0.
else:
bd = self.base_dist.log_prob(x).sum(-1)
logp = self.net(x).squeeze()
return logp + bd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--device", "--d", default=0, type=int)
# data
parser.add_argument('--save_dir', type=str, default="./")
parser.add_argument('--data', type=str, default='dmnist')
parser.add_argument("--down_sample", "--ds", default=0, type=int, choices=[0, 1])
parser.add_argument('--ckpt_path', type=str, default=None)
# models
parser.add_argument('--model', type=str, default='mlp-256')
parser.add_argument('--base_dist', "--bd", type=int, default=1, choices=[0, 1])
parser.add_argument('--gradnorm', "--gn", type=float, default=0.0)
parser.add_argument('--l2', type=float, default=0.0)
parser.add_argument('--n_iters', "--ni", type=lambda x: int(float(x)), default=5e4)
parser.add_argument('--batch_size', "--bs", type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=100)
parser.add_argument('--print_every', "--pe", type=int, default=100)
parser.add_argument('--viz_every', "--ve", type=int, default=2000)
parser.add_argument('--eval_every', type=int, default=2000)
parser.add_argument('--lr', type=float, default=.0001)
parser.add_argument("--ebm_every", "--ee", type=int, default=1, help="EBM training frequency")
# for GFN
parser.add_argument("--type", type=str)
parser.add_argument("--hid", type=int, default=256)
parser.add_argument("--hid_layers", "--hl", type=int, default=5)
parser.add_argument("--leaky", type=int, default=1, choices=[0, 1])
parser.add_argument("--gfn_bn", "--gbn", type=int, default=0, choices=[0, 1])
parser.add_argument("--init_zero", "--iz", type=int, default=0, choices=[0, 1])
parser.add_argument("--gmodel", "--gm", type=str, default="mlp")
parser.add_argument("--train_steps", "--ts", type=int, default=1)
parser.add_argument("--l1loss", "--l1l", type=int, default=0, choices=[0, 1], help="use soft l1 loss instead of l2")
parser.add_argument("--with_mh", "--wm", type=int, default=0, choices=[0, 1])
parser.add_argument("--rand_k", "--rk", type=int, default=0, choices=[0, 1])
parser.add_argument("--lin_k", "--lk", type=int, default=0, choices=[0, 1])
parser.add_argument("--warmup_k", "--wk", type=lambda x: int(float(x)), default=0, help="need to use w/ lin_k")
parser.add_argument("--K", type=int, default=-1, help="for gfn back forth negative sample generation")
parser.add_argument("--rand_coef", "--rc", type=float, default=0, help="for tb")
parser.add_argument("--back_ratio", "--br", type=float, default=0.)
parser.add_argument("--clip", type=float, default=-1., help="for gfn's linf gradient clipping")
parser.add_argument("--temp", type=float, default=1)
parser.add_argument("--opt", type=str, default="adam", choices=["adam", "sgd"])
parser.add_argument("--glr", type=float, default=1e-3)
parser.add_argument("--zlr", type=float, default=1e-1)
parser.add_argument("--momentum", "--mom", type=float, default=0.0)
parser.add_argument("--gfn_weight_decay", "--gwd", type=float, default=0.0)
parser.add_argument('--mc_num', "--mcn", type=int, default=5)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = "{:}".format(args.device)
device = torch.device("cpu") if args.device < 0 else torch.device("cuda")
args.device = device
args.save_dir = os.path.join(args.save_dir, "test")
makedirs(args.save_dir)
print("Device:" + str(device))
print("Args:" + str(args))
before_load = time.time()
train_loader, val_loader, test_loader, args = utils_data.load_dataset(args)
plot = lambda p, x: torchvision.utils.save_image(x.view(x.size(0), args.input_size[0],
args.input_size[1], args.input_size[2]), p, normalize=True, nrow=int(x.size(0) ** .5))
print(f"It takes {time.time() - before_load:.3f}s to load {args.data} dataset.")
def preprocess(data):
if args.dynamic_binarization:
return torch.bernoulli(data)
else:
return data
if args.down_sample:
assert args.model.startswith("mlp-")
if args.model.startswith("mlp-"):
nint = int(args.model.split('-')[1])
net = network.mlp_ebm(np.prod(args.input_size), nint)
elif args.model.startswith("cnn-"):
nint = int(args.model.split('-')[1])
net = network.MNISTConvNet(nint)
elif args.model.startswith("resnet-"):
nint = int(args.model.split('-')[1])
net = network.ResNetEBM(nint)
else:
raise ValueError("invalid model definition")
init_batch = []
for x, _ in train_loader:
init_batch.append(preprocess(x))
init_batch = torch.cat(init_batch, 0)
eps = 1e-2
init_mean = init_batch.mean(0) * (1. - 2 * eps) + eps
if args.base_dist:
model = EBM(net, init_mean)
else:
model = EBM(net)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
xdim = np.prod(args.input_size)
assert args.gmodel == "mlp"
gfn = get_GFlowNet(args.type, xdim, args, device)
model.to(device)
print("model: {:}".format(model))
itr = 0
while itr < args.n_iters:
for x in train_loader:
st = time.time()
x = preprocess(x[0].to(device)) # -> (bs, 784)
if args.gradnorm > 0:
x.requires_grad_()
update_success_rate = -1.
assert "tb" in args.type
train_loss, train_logZ = gfn.train(args.batch_size, scorer=lambda inp: model(inp).detach(),
silent=itr % args.print_every != 0, data=x, back_ratio=args.back_ratio)
if args.rand_k or args.lin_k or (args.K > 0):
if args.rand_k:
K = random.randrange(xdim) + 1
elif args.lin_k:
K = min(xdim, int(xdim * float(itr + 1) / args.warmup_k))
K = max(K, 1)
elif args.K > 0:
K = args.K
else:
raise ValueError
gfn.model.eval()
x_fake, delta_logp_traj = gfn.backforth_sample(x, K)
delta_logp_traj = delta_logp_traj.detach()
if args.with_mh:
# MH step, calculate log p(x') - log p(x)
lp_update = model(x_fake).squeeze() - model(x).squeeze()
update_dist = torch.distributions.Bernoulli(logits=lp_update + delta_logp_traj)
updates = update_dist.sample()
x_fake = x_fake * updates[:, None] + x * (1. - updates[:, None])
update_success_rate = updates.mean().item()
else:
x_fake = gfn.sample(args.batch_size)
if itr % args.ebm_every == 0:
st = time.time() - st
model.train()
logp_real = model(x).squeeze()
if args.gradnorm > 0:
grad_ld = torch.autograd.grad(logp_real.sum(), x,
create_graph=True)[0].flatten(start_dim=1).norm(2, 1)
grad_reg = (grad_ld ** 2. / 2.).mean()
else:
grad_reg = torch.tensor(0.).to(device)
logp_fake = model(x_fake).squeeze()
obj = logp_real.mean() - logp_fake.mean()
l2_reg = (logp_real ** 2.).mean() + (logp_fake ** 2.).mean()
loss = -obj + grad_reg * args.gradnorm + args.l2 * l2_reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
if itr % args.print_every == 0 or itr == args.n_iters - 1:
print("({:5d}) | ({:.3f}s/iter) |log p(real)={:.2e}, "
"log p(fake)={:.2e}, diff={:.2e}, grad_reg={:.2e}, l2_reg={:.2e} update_rate={:.1f}".format(itr, st,
logp_real.mean().item(), logp_fake.mean().item(), obj.item(), grad_reg.item(), l2_reg.item(), update_success_rate))
if (itr + 1) % args.eval_every == 0:
model.eval()
print("GFN TEST")
gfn.model.eval()
gfn_test_ll = gfn.evaluate(test_loader, preprocess, args.mc_num)
print("GFN Test log-likelihood ({}) with {} samples: {}".format(itr, args.mc_num, gfn_test_ll.item()))
model.cpu()
d = {}
d['model'] = model.state_dict()
d['optimizer'] = optimizer.state_dict()
gfn_ckpt = {"model": gfn.model.state_dict(), "optimizer": gfn.optimizer.state_dict(),}
gfn_ckpt["logZ"] = gfn.logZ.detach().cpu()
torch.save(d, "{}/ckpt.pt".format(args.save_dir))
torch.save(gfn_ckpt, "{}/gfn_ckpt.pt".format(args.save_dir))
model.to(device)
itr += 1
if itr > args.n_iters:
print("Training finished!")
quit(0)
| StarcoderdataPython |
6449297 | import hashlib
import binascii
import sys,os
import time
import evernote.edam.userstore.constants as UserStoreConstants
import evernote.edam.type.ttypes as Types
from evernote.api.client import EvernoteClient
dev_token = "S=s345:U=3<PASSWORD>d5:E=14b41f1d370:C=143ea40a774:P=1cd:A=en-devtoken:V=2:H=db1266a393bb7007ee80ec1c27e5c7ec"
client = EvernoteClient(token=dev_token,sandbox=False)
userStore = client.get_user_store()
noteStore = client.get_note_store()
#user = userStore.getUser()
#print user.username
#noteStore = client.get_note_store()
#noteStore = client.get_note_store()
#note = Types.Note()
for filer in os.listdir("/Volumes/VOLUME1/DCIM/100MEDIA"):
print filer
note = Types.Note()
note.title = str(time.ctime(os.path.getmtime("/Volumes/VOLUME1/DCIM/100MEDIA/"+filer)))
note.content = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>'
#os.path.getmtime(file)
# To include an attachment such as an image in a note, first create a Resource
# for the attachment. At a minimum, the Resource contains the binary attachment
# data, an MD5 hash of the binary data, and the attachment MIME type.
# It can also include attributes such as filename and location.
image = open("/Volumes/VOLUME1/DCIM/100MEDIA/"+filer, 'rb').read()
md5 = hashlib.md5()
md5.update(image)
hashi = md5.digest()
hashis = md5.hexdigest()
print str(hashis)
note.content+= '<en-media type="image/jpeg" hash="'+str(hashis)+'" /></en-note>'
data = Types.Data()
data.size = len(image)
data.bodyHash = hashi
data.body = image
resource = Types.Resource()
resource.mime = 'image/jpeg'
resource.data = data
# Now, add the new Resource to the note's list of resources
note.resources = [resource]
notenew = noteStore.createNote(note)
os.remove("/Volumes/VOLUME1/DCIM/100MEDIA/"+filer)
#import hashlib
#import binascii
#import evernote.edam.userstore.constants as UserStoreConstants
#import evernote.edam.type.ttypes as Types
| StarcoderdataPython |
8187828 | <gh_stars>1-10
from midiutil.MidiFile import MIDIFile
import music_models, argparse, random, note_timing, copy, json, subprocess
#If this value is supplied, the Key class will write down all notes it generates in the file specified.
dir_write_note = ''
def set_dir_write_note(new_dir):
global dir_write_note
dir_write_note = new_dir
def gen_notes_for_key(track, number_notes, root_note, scale, channel, duration = 1, bias_same_note = 0, low_end = 'A0', high_end = 'G#8', base_notes = [], notes_bias = {}, markov_values = None):
k = music_models.Key(root_note, scale, base_notes, notes_bias, low_end, high_end, markov_values)
notes = []
prev_note = k.generate_note(None, 3)
while number_notes>0:
prev_note = k.generate_note(prev_note, 7, bias_same_note, dir_write_note = dir_write_note)
notes.append(music_models.Note(channel = channel, track = track, note = prev_note, duration = duration, volume = 100))
number_notes -= 1
return notes
def calculate_number_of_notes(block):
if block.get('number_of_notes'): return block['number_of_notes']
elif block.get('number_of_bars'): return block['number_of_bars'] * block['number_of_beats_per_bar']
else: raise Exception('There was an error calculating the number of notes in block ', block.get('name'))
def generate_generic_notes(b):
number_of_notes = calculate_number_of_notes(b)
gen_notes_kwargs = {'track' : b['track'], 'channel' : b.get('channel', 1), 'number_notes' : number_of_notes, 'root_note' : b.get('root_note', 'A'), 'scale' : b.get('scale', 'minor'), 'bias_same_note' : b.get('bias_same_note'), 'high_end' : b.get('high_end', 'G#7'), 'low_end' : b.get('low_end', 'C1'), 'base_notes': b.get('base_notes'), 'notes_bias': b.get('notes_bias', {}), 'markov_values' : b.get('markov_values')}
generic_notes = gen_notes_for_key(**gen_notes_kwargs)
return generic_notes
def group_generic_notes(b, generic_notes, starting_point):
ungrouped_notes = copy.deepcopy(generic_notes)
if b.get('accents') :
accents = {int(x):b['accents'][x] for x in b['accents']}
else : accents = {}
grouped_notes_kwargs = {'notes' : ungrouped_notes, 'no_beats' : b.get('number_of_beats_per_bar'), 'time_signature' : b.get('time_signature'), 'bias_separate_notes' : b.get('bias_separate_notes'), 'accents' : accents, 'start_at' : starting_point, 'pattern' : b.get('pattern', []), 'default_accent':b.get('default_accent', 50)}
grouped_notes = note_timing.group_notes_for_time_signature(**grouped_notes_kwargs)
return grouped_notes
def handle_block(b, mid):
if b.get('repeat', 1) > 1:
b['play_at'] += [i * b.get('number_of_beats_per_bar', 1) * b.get('number_of_bars') for i in range(1, b['repeat']+1)]
if b.get('block_type') == 'complex' :
mid.addTempo(b['track'], b['play_at'][0], b['bpm'])
complex_track = []
for block in b['blocks']:
if not block: continue
for key in b:
if key not in block.keys() + ['blocks', 'block_type', 'play_at', 'repeat', 'number_of_blocks']:
block[key] = b[key]
complex_track += handle_block(block, mid)
entire_track = []
for starting_point in b['play_at']:
temp_track = copy.deepcopy(complex_track)
for bar in temp_track:
for note in bar.notes:
note.time += starting_point
entire_track += temp_track
else:
entire_track = []
generic_notes = generate_generic_notes(b)
for starting_point in b['play_at']:
mid.addProgramChange(b['track'], b.get('channel', 2) - 1, starting_point, b.get('program_number', 0))
grouped_notes = group_generic_notes(b, generic_notes, starting_point)
entire_track += grouped_notes
# print 'notes for ', b['name'], ':', [[t.time for t in x.notes] for x in entire_track], '\n\n'
return entire_track
def main():
parser = argparse.ArgumentParser(description="Basic arguments")
parser.add_argument('--use_soundfont', help = 'Soundfont to use for creating a wav file. If not specified, will just create a mid file', default = '')
parser.add_argument('--output', help = 'Path for output midi file. ', default = 'output')
parser.add_argument('--input', help = 'Path for input JSON file. ', default = 'input.JSON')
parser.add_argument('--no_tracks', help = 'Number of tracks. Default is 100. ', default = 100)
args = parser.parse_args()
output = args.output
blocks = json.loads(open(args.input, 'r').read())
no_tracks = args.no_tracks
mid = generate(blocks, no_tracks)
write_mid(mid, args.output, args.use_soundfont)
def generate(blocks, no_tracks = 100):
mid = MIDIFile(no_tracks)
entire_track = handle_block(blocks, mid)
return generate_from_track(mid, entire_track, no_tracks)
def generate_from_track(mid, entire_track, no_tracks = 100):
for bar in entire_track:
for note in bar.notes:
mid.addNote(*note.get_values())
return mid
def write_mid(mid, output):
binfile = open(output + '.mid', 'w+b')
mid.writeFile(binfile)
binfile.close()
return output
def to_wav(song_path, soundfont):
command = ['fluidsynth', '-F', song_path + '.wav', soundfont, song_path + '.mid']
subprocess.call(command)
if __name__ == '__main__' :
main()
| StarcoderdataPython |
1727336 | <filename>scripts/scrape_oup.py
# scrapes Oxford university press and returns a list of feeds
# get the main page
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("https://academic.oup.com/journals/pages/journals_a_to_z").read().decode('utf-8')
# get all links from this page
soup = BeautifulSoup(html, 'html.parser')
all_journal_links = []
all_journal_names = []
for link in soup.find_all('a'):
all_journal_links.append(link.get('href'))
all_journal_names.append(link.get_text())
jl = [x for x,y in zip(all_journal_links, all_journal_names) if x is not None]
jn = [y for x,y in zip(all_journal_links, all_journal_names) if x is not None]
all_journal_names = jn
all_journal_links = jl
jl = [x for x,y in zip(all_journal_links, all_journal_names) if x.find('https://academic.oup.com/') > -1 and x.find('journals') == -1]
jn = [y for x,y in zip(all_journal_links, all_journal_names) if x.find('https://academic.oup.com/') > -1 and x.find('journals') == -1]
all_journal_names = jn
all_journal_links = jl
# now add /issuue/ to each of these, that is the correct link with the RSS link
all_journal_links = [x.strip() + '/issue/' for x in all_journal_links]
rss_links = [None]*len(all_journal_links)
for i in range(0,len(all_journal_links)):
this_link = all_journal_links[i]
html = urlopen(this_link).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
all_links = soup.find_all('a')
for j in range(0,len(all_links)):
if all_links[j].get_text().find('RSS Feed - Current Issue Only') > -1:
rss_links[i] = all_links[j].get('href')
print(all_journal_names[i] + " | " + rss_links[i])
| StarcoderdataPython |
6459233 | <reponame>junlulocky/BGMM
"""
Author: <NAME>
Contact: <EMAIL>
Date: 2014
"""
import math
import numpy as np
import numpy.testing as npt
from bayes_gmm.gaussian_components_fixedvar import (
GaussianComponentsFixedVar, FixedVarPrior, log_norm_pdf, log_post_pred_unvectorized
)
def test_log_prod_norm():
np.random.seed(1)
# Prior
D = 10
var = 1*np.random.rand(D)
mu_0 = 5*np.random.rand(D) - 2
var_0 = 2*np.random.rand(D)
prior = FixedVarPrior(var, mu_0, var_0)
# GMM will be used to access `_log_prod_norm`
x = 3*np.random.rand(D) + 4
gmm = GaussianComponentsFixedVar(np.array([x]), prior)
expected_prior = np.sum([log_norm_pdf(x[i], mu_0[i], var_0[i]) for i in range(len(x))])
npt.assert_almost_equal(gmm.log_prior(0), expected_prior)
def test_log_post_pred_k():
np.random.seed(1)
# Generate data
D = 10
N_1 = 10
N_2 = 5
N_3 = 5
X = 5*np.random.rand(N_1 + N_2 + N_3, D) - 1
X_1 = X[:N_1]
X_2 = X[N_1:N_1 + N_2]
X_3 = X[N_1 + N_2:]
# Prior
var = 1*np.random.rand(D)
mu_0 = 5*np.random.rand(D) - 2
var_0 = 2*np.random.rand(D)
prior = FixedVarPrior(var, mu_0, var_0)
precision = 1./var
precision_0 = 1./var_0
# Setup GMM
assignments = np.concatenate([np.zeros(N_1), np.ones(N_2), 2*np.ones(N_3)])
gmm = GaussianComponentsFixedVar(X, prior, assignments=assignments)
# Remove everything from component 2 (additional check)
for i in range(N_1, N_1 + N_2):
gmm.del_item(i)
# Calculate posterior for first component by hand
x_1 = X_1[0]
precision_N_1 = precision_0 + N_1*precision
mu_N_1 = (mu_0 * precision_0 + precision*N_1*X_1.mean(axis=0)) / precision_N_1
precision_pred = 1./(1./precision_N_1 + 1./precision)
expected_posterior = np.sum(
[log_norm_pdf(x_1[i], mu_N_1[i], 1./precision_pred[i]) for i in range(len(x_1))]
)
npt.assert_almost_equal(gmm.log_post_pred_k(0, 0), expected_posterior)
# Calculate posterior for second component by hand
x_3 = X_3[0]
precision_N_3 = precision_0 + N_3*precision
mu_N_3 = (mu_0 * precision_0 + precision*N_3*X_3.mean(axis=0)) / precision_N_3
precision_pred = 1./(1./precision_N_3 + 1./precision)
expected_posterior = np.sum(
[log_norm_pdf(x_3[i], mu_N_3[i], 1./precision_pred[i]) for i in range(len(x_3))]
)
npt.assert_almost_equal(gmm.log_post_pred_k(N_1 + N_2, 1), expected_posterior)
def test_log_post_pred():
np.random.seed(1)
# Generate data
X = np.random.rand(11, 10)
N, D = X.shape
# Prior
var = 1*np.random.rand(D)
mu_0 = 5*np.random.rand(D) - 2
var_0 = 2*np.random.rand(D)
prior = FixedVarPrior(var, mu_0, var_0)
# Setup GMM
assignments = [0, 0, 0, 1, 0, 1, 3, 4, 3, 2, -1]
gmm = GaussianComponentsFixedVar(X, prior, assignments=assignments)
expected_log_post_pred = log_post_pred_unvectorized(gmm, 10)
npt.assert_almost_equal(gmm.log_post_pred(10), expected_log_post_pred)
def test_log_marg_k():
np.random.seed(1)
# Generate data
D = 10
N_1 = 10
X_1 = 5*np.random.rand(N_1, D) - 1
# Prior
var = 10*np.random.rand(D)
mu_0 = 5*np.random.rand(D) - 2
var_0 = 2*np.random.rand(D)
prior = FixedVarPrior(var, mu_0, var_0)
precision = 1./var
precision_0 = 1./var_0
# Setup GMM
assignments = np.concatenate([np.zeros(N_1)])
gmm = GaussianComponentsFixedVar(X_1, prior, assignments=assignments)
# Calculate marginal for component by hand
expected_log_marg = np.sum(np.log([
np.sqrt(var[i])/(np.sqrt(2*np.pi*var[i])**N_1*np.sqrt(N_1*var_0[i] + var[i])) *
np.exp(-0.5*np.square(X_1).sum(axis=0)[i] / var[i] - mu_0[i]**2/(2*var_0[i])) *
np.exp(
(var_0[i]*N_1**2*X_1.mean(axis=0)[i]**2/var[i] + var[i]*mu_0[i]**2/var_0[i] +
2*N_1*X_1.mean(axis=0)[i]*mu_0[i]) / (2. * (N_1*var_0[i] + var[i]))
)
for i in range(D)
]))
npt.assert_almost_equal(gmm.log_marg_k(0), expected_log_marg)
| StarcoderdataPython |
1902435 | # -*- coding: utf-8 -*-
from model.contact import Contact
test_data = [
Contact(firstname='Степан ', middlename='Иванович ',
lastname='Петров ', nickname='Иваныч ',
title='title', company='ОАО ОмПО Радиозавод им. А.С.Попова(РЕЛЕРО) ',
address='г. Омск ул.Ленина д.1 кв. 99', homephone=r'+79509999999',
mobilephone=r'+79509999999', workphone=r'+79509999999',
faxphone=r'+79509999999', mail='<EMAIL>',
email2='<EMAIL>', email3='<EMAIL>',
homepage='www.homepage.ru',
bday="1", bmonth="January", byear="1999",
aday="11", amonth="February", ayear="2000",
address2='г. Омск ул. Маршала Жукова д.№ 79 кв.№ 105 ', secondaryphone='84952000600'),
Contact(firstname='Петр ', middlename='Степанович ',
lastname='Степанович ', nickname='Иваныч ',
title='title', company='ОАО ОмПО Радиозавод им. А.С.Попова(РЕЛЕРО) ',
address='г. Омск ул.Ленина д.1 кв. 99', homephone=r'+79509999999',
mobilephone=r'+79509999999', workphone=r'+79509999999',
faxphone=r'+79509999999', mail='<EMAIL>',
email2='<EMAIL>', email3='<EMAIL>',
homepage='www.homepage.ru',
bday="1", bmonth="January", byear="1999",
aday="11", amonth="February", ayear="2000",
address2='г. Омск ул. Маршала Жукова д.№ 79 кв.№ 105 ', secondaryphone='84952000600')
]
| StarcoderdataPython |
11212948 | <reponame>amitkumarj441/QNET<gh_stars>10-100
from functools import partial
import pytest
from sympy import symbols, sqrt, exp, I, Rational, IndexedBase
from qnet import (
CircuitSymbol, CIdentity, CircuitZero, CPermutation, SeriesProduct,
Feedback, SeriesInverse, circuit_identity as cid, Beamsplitter,
OperatorSymbol, IdentityOperator, ZeroOperator, Create, Destroy, Jz, Jplus,
Jminus, Phase, Displace, Squeeze, LocalSigma, LocalProjector, tr, Adjoint,
PseudoInverse, NullSpaceProjector, Commutator, LocalSpace, TrivialSpace,
FullSpace, Matrix, KetSymbol, ZeroKet, TrivialKet, BasisKet,
CoherentStateKet, UnequalSpaces, ScalarTimesKet, OperatorTimesKet, Bra,
OverlappingSpaces, SpaceTooLargeError, BraKet, KetBra, SuperOperatorSymbol,
IdentitySuperOperator, ZeroSuperOperator, SuperAdjoint, SPre, SPost,
SuperOperatorTimesOperator, FockIndex, StrLabel, IdxSym, latex,
configure_printing, QuantumDerivative, Scalar, ScalarExpression, SpinSpace,
SpinBasisKet, Eq)
from qnet.printing.latexprinter import QnetLatexPrinter
def test_ascii_scalar():
"""Test rendering of scalar values"""
assert latex(2) == '2'
latex.printer.cache = {}
# we always want 2.0 to be printed as '2'. Without this normalization, the
# state of the cache might introduce non-reproducible behavior, as 2==2.0
assert latex(2.0) == '2'
assert latex(1j) == '1i'
assert latex('foo') == 'foo'
i = IdxSym('i')
alpha = IndexedBase('alpha')
assert latex(i) == 'i'
assert latex(alpha[i]) == r'\alpha_{i}'
def test_tex_render_string():
"""Test rendering of ascii to latex strings"""
printer = QnetLatexPrinter()
assert printer._render_str('a') == r'a'
assert printer._render_str('A') == r'A'
assert printer._render_str('longword') == r'\text{longword}'
assert printer._render_str('alpha') == r'\alpha'
assert latex('alpha') == r'\alpha'
assert printer._render_str('Alpha') == r'A'
assert printer._render_str('Beta') == r'B'
assert printer._render_str('Gamma') == r'\Gamma'
assert printer._render_str('Delta') == r'\Delta'
assert printer._render_str('Epsilon') == r'E'
assert printer._render_str('Zeta') == r'Z'
assert printer._render_str('Eta') == r'H'
assert printer._render_str('Theta') == r'\Theta'
assert printer._render_str('Iota') == r'I'
assert printer._render_str('Kappa') == r'K'
assert printer._render_str('Lambda') == r'\Lambda'
assert printer._render_str('Mu') == r'M'
assert printer._render_str('Nu') == r'N'
assert printer._render_str('Xi') == r'\Xi'
assert printer._render_str('Omicron') == r'O'
assert printer._render_str('Pi') == r'\Pi'
assert printer._render_str('Rho') == r'P'
assert printer._render_str('Sigma') == r'\Sigma'
assert printer._render_str('Tau') == r'T'
assert printer._render_str('Ypsilon') == r'\Upsilon'
assert printer._render_str('Upsilon') == r'\Upsilon'
assert printer._render_str('ypsilon') == r'\upsilon'
assert printer._render_str('upsilon') == r'\upsilon'
assert printer._render_str('Phi') == r'\Phi'
assert printer._render_str('Chi') == r'X'
assert printer._render_str('Psi') == r'\Psi'
assert printer._render_str('Omega') == r'\Omega'
assert printer._render_str('xi_1') == r'\xi_{1}'
assert printer._render_str('xi_1^2') == r'\xi_{1}^{2}'
assert printer._render_str('Xi_1') == r'\Xi_{1}'
assert printer._render_str('Xi_long') == r'\Xi_{\text{long}}'
assert printer._render_str('Xi_1+2') == r'\Xi_{1+2}'
assert printer._render_str('Lambda_i,j') == r'\Lambda_{i,j}'
assert printer._render_str('epsilon_mu,nu') == r'\epsilon_{\mu,\nu}'
def test_tex_circuit_elements():
"""Test the tex representation of "atomic" circuit algebra elements"""
alpha, t = symbols('alpha, t')
theta = symbols('theta', positive=True)
assert latex(CircuitSymbol("C", cdim=2)) == 'C'
assert latex(CircuitSymbol("C_1", cdim=2)) == 'C_{1}'
assert latex(CircuitSymbol("Xi_2", cdim=2)) == r'\Xi_{2}'
assert latex(CircuitSymbol("Xi_full", cdim=2)) == r'\Xi_{\text{full}}'
assert (
latex(CircuitSymbol("C", alpha, t, cdim=2)) ==
r'C\left(\alpha, t\right)')
assert latex(CIdentity) == r'{\rm cid}(1)'
assert latex(cid(4)) == r'{\rm cid}(4)'
assert latex(CircuitZero) == r'{\rm cid}(0)'
assert latex(Beamsplitter()) == r'{\rm BS}\left(\frac{\pi}{4}\right)'
assert (
latex(Beamsplitter(mixing_angle=theta)) ==
r'{\rm BS}\left(\theta\right)')
def test_tex_circuit_operations():
"""Test the tex representation of circuit algebra operations"""
A = CircuitSymbol("A_test", cdim=2)
B = CircuitSymbol("B_test", cdim=2)
C = CircuitSymbol("C_test", cdim=2)
beta = CircuitSymbol("beta", cdim=1)
gamma = CircuitSymbol("gamma", cdim=1)
perm = CPermutation.create((2, 1, 0, 3))
assert (latex(A << B << C) ==
r'A_{\text{test}} \lhd B_{\text{test}} \lhd C_{\text{test}}')
assert (latex(A + B + C) ==
r'A_{\text{test}} \boxplus B_{\text{test}} '
r'\boxplus C_{\text{test}}')
assert (latex(A << (beta + gamma)) ==
r'A_{\text{test}} \lhd \left(\beta \boxplus \gamma\right)')
assert (latex(A + (B << C)) ==
r'A_{\text{test}} \boxplus '
r'\left(B_{\text{test}} \lhd C_{\text{test}}\right)')
assert (latex(perm) ==
r'\mathbf{P}_{\sigma}\begin{pmatrix} 0 & 1 & 2 & 3 \\ '
r'2 & 1 & 0 & 3 \end{pmatrix}')
assert (latex(SeriesProduct(perm, (A+B))) ==
r'\mathbf{P}_{\sigma}\begin{pmatrix} 0 & 1 & 2 & 3 \\ '
r'2 & 1 & 0 & 3 \end{pmatrix} '
r'\lhd \left(A_{\text{test}} \boxplus B_{\text{test}}\right)')
assert (latex(Feedback((A+B), out_port=3, in_port=0)) ==
r'\left\lfloor{A_{\text{test}} \boxplus B_{\text{test}}}'
r'\right\rfloor_{3\rightarrow{}0}')
assert (latex(SeriesInverse(A+B)) ==
r'\left[A_{\text{test}} \boxplus B_{\text{test}}\right]^{\rhd}')
def test_tex_hilbert_elements():
"""Test the tex representation of "atomic" Hilbert space algebra
elements"""
assert latex(LocalSpace(1)) == r'\mathcal{H}_{1}'
assert latex(LocalSpace(1, dimension=2)) == r'\mathcal{H}_{1}'
assert latex(LocalSpace(1, basis=(r'g', 'e'))) == r'\mathcal{H}_{1}'
assert latex(LocalSpace('local')) == r'\mathcal{H}_{\text{local}}'
assert latex(LocalSpace('kappa')) == r'\mathcal{H}_{\kappa}'
assert latex(TrivialSpace) == r'\mathcal{H}_{\text{null}}'
assert latex(FullSpace) == r'\mathcal{H}_{\text{total}}'
assert latex(LocalSpace(StrLabel(IdxSym('i')))) == r'\mathcal{H}_{i}'
def test_tex_hilbert_operations():
"""Test the tex representation of Hilbert space algebra operations"""
H1 = LocalSpace(1)
H2 = LocalSpace(2)
assert latex(H1 * H2) == r'\mathcal{H}_{1} \otimes \mathcal{H}_{2}'
def test_tex_matrix():
"""Test tex representation of the Matrix class"""
A = OperatorSymbol("A", hs=1)
B = OperatorSymbol("B", hs=1)
C = OperatorSymbol("C", hs=1)
D = OperatorSymbol("D", hs=1)
assert latex(OperatorSymbol("A", hs=1)) == r'\hat{A}^{(1)}'
assert (latex(Matrix([[A, B], [C, D]])) ==
r'\begin{pmatrix}\hat{A}^{(1)} & \hat{B}^{(1)} \\'
r'\hat{C}^{(1)} & \hat{D}^{(1)}\end{pmatrix}')
assert (latex(Matrix([A, B, C, D])) ==
r'\begin{pmatrix}\hat{A}^{(1)} \\\hat{B}^{(1)} \\'
r'\hat{C}^{(1)} \\\hat{D}^{(1)}\end{pmatrix}')
assert (latex(Matrix([[A, B, C, D]])) ==
r'\begin{pmatrix}\hat{A}^{(1)} & \hat{B}^{(1)} & '
r'\hat{C}^{(1)} & \hat{D}^{(1)}\end{pmatrix}')
assert (latex(Matrix([[0, 1], [-1, 0]])) ==
r'\begin{pmatrix}0 & 1 \\-1 & 0\end{pmatrix}')
assert latex(Matrix([[], []])) == r'\begin{pmatrix} \\\end{pmatrix}'
assert latex(Matrix([])) == r'\begin{pmatrix} \\\end{pmatrix}'
def test_tex_equation():
"""Test printing of the Eq class"""
eq_1 = Eq(
lhs=OperatorSymbol('H', hs=0),
rhs=Create(hs=0) * Destroy(hs=0))
eq = (
eq_1
.apply_to_lhs(lambda expr: expr + 1, cont=True)
.apply_to_rhs(lambda expr: expr + 1, cont=True)
.apply_to_rhs(lambda expr: expr**2, cont=True, tag=3)
.apply(lambda expr: expr + 1, cont=True, tag=4)
.apply_mtd_to_rhs('expand', cont=True)
.apply_to_lhs(lambda expr: expr**2, cont=True, tag=5)
.apply_mtd('expand', cont=True)
.apply_to_lhs(lambda expr: expr**2, cont=True, tag=6)
.apply_mtd_to_lhs('expand', cont=True)
.apply_to_rhs(lambda expr: expr + 1, cont=True)
)
assert (
latex(eq_1).split("\n") == [
r'\begin{equation}',
r' \hat{H}^{(0)} = \hat{a}^{(0)\dagger} \hat{a}^{(0)}',
r'\end{equation}',
''])
assert (
latex(eq_1.set_tag(1)).split("\n") == [
r'\begin{equation}',
r' \hat{H}^{(0)} = \hat{a}^{(0)\dagger} \hat{a}^{(0)}',
r'\tag{1}\end{equation}',
''])
tex_lines = (
latex(eq, show_hs_label=False, tex_op_macro=r'\Op{{{name}}}')
.split("\n"))
expected = [
r'\begin{align}',
r' \Op{H} &= \Op{a}^{\dagger} \Op{a}\\',
r' \mathbb{1} + \Op{H} &= \Op{a}^{\dagger} \Op{a}\\',
r' &= \mathbb{1} + \Op{a}^{\dagger} \Op{a}\\',
r' &= \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right) \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right)\tag{3}\\',
r' 2 + \Op{H} &= \mathbb{1} + \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right) \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right)\tag{4}\\',
r' &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\\',
r' \left(2 + \Op{H}\right) \left(2 + \Op{H}\right) &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\tag{5}\\',
r' 4 + 4 \Op{H} + \Op{H} \Op{H} &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\\',
r' \left(4 + 4 \Op{H} + \Op{H} \Op{H}\right) \left(4 + 4 \Op{H} + \Op{H} \Op{H}\right) &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\tag{6}\\',
r' 16 + 32 \Op{H} + \Op{H} \Op{H} \Op{H} \Op{H} + 8 \Op{H} \Op{H} \Op{H} + 24 \Op{H} \Op{H} &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\\',
r' &= 3 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}',
r'\end{align}',
r'']
for i, line in enumerate(tex_lines):
assert line == expected[i]
def test_tex_operator_elements():
"""Test the tex representation of "atomic" operator algebra elements"""
hs1 = LocalSpace('q1', dimension=2)
hs2 = LocalSpace('q2', dimension=2)
alpha, beta = symbols('alpha, beta')
fock1 = LocalSpace(
1, local_identifiers={'Create': 'b', 'Destroy': 'b', 'Phase': 'Phi'})
spin1 = SpinSpace(
1, spin=1, local_identifiers={'Jz': 'Z', 'Jplus': 'Jp', 'Jminus': 'Jm'})
assert latex(OperatorSymbol("A", hs=hs1)) == r'\hat{A}^{(q_{1})}'
assert (latex(OperatorSymbol("A_1", hs=hs1*hs2)) ==
r'\hat{A}_{1}^{(q_{1} \otimes q_{2})}')
assert (latex(OperatorSymbol("Xi_2", hs=(r'q1', 'q2'))) ==
r'\hat{\Xi}_{2}^{(q_{1} \otimes q_{2})}')
assert (latex(OperatorSymbol("Xi_full", hs=1)) ==
r'\hat{\Xi}_{\text{full}}^{(1)}')
assert latex(OperatorSymbol("Xi", alpha, beta, hs=1)) == (
r'\hat{\Xi}^{(1)}\left(\alpha, \beta\right)')
assert latex(IdentityOperator) == r'\mathbb{1}'
assert latex(IdentityOperator, tex_identity_sym='I') == 'I'
assert latex(ZeroOperator) == r'\mathbb{0}'
assert latex(Create(hs=1)) == r'\hat{a}^{(1)\dagger}'
assert latex(Create(hs=fock1)) == r'\hat{b}^{(1)\dagger}'
assert latex(Destroy(hs=1)) == r'\hat{a}^{(1)}'
assert latex(Destroy(hs=fock1)) == r'\hat{b}^{(1)}'
assert latex(Jz(hs=SpinSpace(1, spin=1))) == r'\hat{J}_{z}^{(1)}'
assert latex(Jz(hs=spin1)) == r'\hat{Z}^{(1)}'
assert latex(Jplus(hs=SpinSpace(1, spin=1))) == r'\hat{J}_{+}^{(1)}'
assert latex(Jplus(hs=spin1)) == r'\text{Jp}^{(1)}'
assert latex(Jminus(hs=SpinSpace(1, spin=1))) == r'\hat{J}_{-}^{(1)}'
assert latex(Jminus(hs=spin1)) == r'\text{Jm}^{(1)}'
assert (latex(Phase(Rational(1, 2), hs=1)) ==
r'\text{Phase}^{(1)}\left(\frac{1}{2}\right)')
assert (latex(Phase(0.5, hs=1)) ==
r'\text{Phase}^{(1)}\left(0.5\right)')
assert (latex(Phase(0.5, hs=fock1)) ==
r'\hat{\Phi}^{(1)}\left(0.5\right)')
assert (latex(Displace(0.5, hs=1)) ==
r'\hat{D}^{(1)}\left(0.5\right)')
assert (latex(Squeeze(0.5, hs=1)) ==
r'\text{Squeeze}^{(1)}\left(0.5\right)')
hs_tls = LocalSpace('1', basis=('g', 'e'))
sig_e_g = LocalSigma('e', 'g', hs=hs_tls)
assert (
latex(sig_e_g, sig_as_ketbra=False) ==
r'\hat{\sigma}_{e,g}^{(1)}')
assert (
latex(sig_e_g) ==
r'\left\lvert e \middle\rangle\!\middle\langle g \right\rvert^{(1)}')
hs_tls = LocalSpace('1', basis=('excited', 'ground'))
sig_excited_ground = LocalSigma('excited', 'ground', hs=hs_tls)
assert (
latex(sig_excited_ground, sig_as_ketbra=False) ==
r'\hat{\sigma}_{\text{excited},\text{ground}}^{(1)}')
assert (
latex(sig_excited_ground) ==
r'\left\lvert \text{excited} \middle\rangle\!'
r'\middle\langle \text{ground} \right\rvert^{(1)}')
hs_tls = LocalSpace('1', basis=('mu', 'nu'))
sig_mu_nu = LocalSigma('mu', 'nu', hs=hs_tls)
assert (
latex(sig_mu_nu) ==
r'\left\lvert \mu \middle\rangle\!'
r'\middle\langle \nu \right\rvert^{(1)}')
hs_tls = LocalSpace('1', basis=('excited', 'ground'))
sig_excited_excited = LocalProjector('excited', hs=hs_tls)
assert (
latex(sig_excited_excited, sig_as_ketbra=False) ==
r'\hat{\Pi}_{\text{excited}}^{(1)}')
hs_tls = LocalSpace('1', basis=('g', 'e'))
sig_e_e = LocalProjector('e', hs=hs_tls)
assert (
latex(sig_e_e, sig_as_ketbra=False) == r'\hat{\Pi}_{e}^{(1)}')
def test_tex_operator_operations():
"""Test the tex representation of operator algebra operations"""
hs1 = LocalSpace('q_1', dimension=2)
hs2 = LocalSpace('q_2', dimension=2)
A = OperatorSymbol("A", hs=hs1)
B = OperatorSymbol("B", hs=hs1)
C = OperatorSymbol("C", hs=hs2)
psi = KetSymbol('Psi', hs=hs1)
gamma = symbols('gamma', positive=True)
assert latex(A.dag()) == r'\hat{A}^{(q_{1})\dagger}'
assert latex(A + B) == r'\hat{A}^{(q_{1})} + \hat{B}^{(q_{1})}'
assert latex(A * B) == r'\hat{A}^{(q_{1})} \hat{B}^{(q_{1})}'
assert latex(A * C) == r'\hat{A}^{(q_{1})} \hat{C}^{(q_{2})}'
assert latex(2 * A) == r'2 \hat{A}^{(q_{1})}'
assert latex(2j * A) == r'2i \hat{A}^{(q_{1})}'
assert latex((1+2j) * A) == r'(1+2i) \hat{A}^{(q_{1})}'
assert latex(gamma**2 * A) == r'\gamma^{2} \hat{A}^{(q_{1})}'
assert (
latex(-gamma**2/2 * A) == r'- \frac{\gamma^{2}}{2} \hat{A}^{(q_{1})}')
assert (
latex(tr(A * C, over_space=hs2)) ==
r'{\rm tr}_{q_{2}}\left[\hat{C}^{(q_{2})}\right] '
r'\hat{A}^{(q_{1})}')
assert latex(Adjoint(A)) == r'\hat{A}^{(q_{1})\dagger}'
assert (
latex(Adjoint(A**2)) ==
r'\left(\hat{A}^{(q_{1})} \hat{A}^{(q_{1})}\right)^\dagger')
assert (
latex(Adjoint(A)**2) ==
r'\hat{A}^{(q_{1})\dagger} \hat{A}^{(q_{1})\dagger}')
assert latex(Adjoint(Create(hs=1))) == r'\hat{a}^{(1)}'
assert (
latex(Adjoint(A + B)) ==
r'\left(\hat{A}^{(q_{1})} + \hat{B}^{(q_{1})}\right)^\dagger')
assert latex(PseudoInverse(A)) == r'\left(\hat{A}^{(q_{1})}\right)^+'
assert (
latex(PseudoInverse(A)**2) ==
r'\left(\hat{A}^{(q_{1})}\right)^+ \left(\hat{A}^{(q_{1})}\right)^+')
assert (latex(NullSpaceProjector(A)) ==
r'\hat{P}_{Ker}\left(\hat{A}^{(q_{1})}\right)')
assert latex(A - B) == r'\hat{A}^{(q_{1})} - \hat{B}^{(q_{1})}'
assert (latex(A - B + C) ==
r'\hat{A}^{(q_{1})} - \hat{B}^{(q_{1})} + \hat{C}^{(q_{2})}')
assert (latex(2 * A - sqrt(gamma) * (B + C)) ==
r'2 \hat{A}^{(q_{1})} - \sqrt{\gamma} \left(\hat{B}^{(q_{1})} + '
r'\hat{C}^{(q_{2})}\right)')
assert (latex(Commutator(A, B)) ==
r'\left[\hat{A}^{(q_{1})}, \hat{B}^{(q_{1})}\right]')
expr = (Commutator(A, B) * psi).dag()
assert (
latex(expr, show_hs_label=False) ==
r'\left\langle \Psi \right\rvert \left[\hat{A}, '
r'\hat{B}\right]^{\dagger}')
def test_tex_ket_elements():
"""Test the tex representation of "atomic" kets"""
hs1 = LocalSpace('q1', basis=('g', 'e'))
hs2 = LocalSpace('q2', basis=('g', 'e'))
alpha, beta = symbols('alpha, beta')
psi = KetSymbol('Psi', hs=hs1)
assert (latex(psi) == r'\left\lvert \Psi \right\rangle^{(q_{1})}')
assert (
latex(KetSymbol('Psi', alpha, beta, hs=1)) ==
r'\left\lvert \Psi\left(\alpha, \beta\right) \right\rangle^{(1)}')
assert (latex(psi, tex_use_braket=True) == r'\Ket{\Psi}^{(q_{1})}')
assert (
latex(psi, tex_use_braket=True, show_hs_label='subscript') ==
r'\Ket{\Psi}_{(q_{1})}')
assert (
latex(psi, tex_use_braket=True, show_hs_label=False) == r'\Ket{\Psi}')
assert (latex(KetSymbol('Psi', hs=1)) ==
r'\left\lvert \Psi \right\rangle^{(1)}')
assert (latex(KetSymbol('Psi', hs=(1, 2))) ==
r'\left\lvert \Psi \right\rangle^{(1 \otimes 2)}')
assert (latex(KetSymbol('Psi', hs=hs1*hs2)) ==
r'\left\lvert \Psi \right\rangle^{(q_{1} \otimes q_{2})}')
assert (latex(KetSymbol('Psi', hs=1)) ==
r'\left\lvert \Psi \right\rangle^{(1)}')
assert latex(ZeroKet) == '0'
assert latex(TrivialKet) == '1'
assert (latex(BasisKet('e', hs=hs1)) ==
r'\left\lvert e \right\rangle^{(q_{1})}')
hs_tls = LocalSpace('1', basis=('excited', 'ground'))
assert (latex(BasisKet('excited', hs=hs_tls)) ==
r'\left\lvert \text{excited} \right\rangle^{(1)}')
assert (latex(BasisKet(1, hs=1)) ==
r'\left\lvert 1 \right\rangle^{(1)}')
spin = SpinSpace('s', spin=(3, 2))
assert (
latex(SpinBasisKet(-3, 2, hs=spin)) ==
r'\left\lvert -3/2 \right\rangle^{(s)}')
assert (
latex(SpinBasisKet(1, 2, hs=spin)) ==
r'\left\lvert +1/2 \right\rangle^{(s)}')
assert (
latex(SpinBasisKet(-3, 2, hs=spin), tex_frac_for_spin_labels=True) ==
r'\left\lvert -\frac{3}{2} \right\rangle^{(s)}')
assert (
latex(SpinBasisKet(1, 2, hs=spin), tex_frac_for_spin_labels=True) ==
r'\left\lvert +\frac{1}{2} \right\rangle^{(s)}')
assert (latex(CoherentStateKet(2.0, hs=1)) ==
r'\left\lvert \alpha=2 \right\rangle^{(1)}')
def test_tex_symbolic_labels():
"""Test tex representation of symbols with symbolic labels"""
i = IdxSym('i')
j = IdxSym('j')
hs0 = LocalSpace(0)
hs1 = LocalSpace(1)
Psi = IndexedBase('Psi')
with configure_printing(tex_use_braket=True):
assert (
latex(BasisKet(FockIndex(2 * i), hs=hs0)) ==
r'\Ket{2 i}^{(0)}')
assert (latex(
KetSymbol(StrLabel(2 * i), hs=hs0)) ==
r'\Ket{2 i}^{(0)}')
assert (
latex(KetSymbol(StrLabel(Psi[i, j]), hs=hs0*hs1)) ==
r'\Ket{\Psi_{i j}}^{(0 \otimes 1)}')
expr = BasisKet(FockIndex(i), hs=hs0) * BasisKet(FockIndex(j), hs=hs1)
assert latex(expr) == r'\Ket{i,j}^{(0 \otimes 1)}'
assert (
latex(Bra(BasisKet(FockIndex(2 * i), hs=hs0))) ==
r'\Bra{2 i}^{(0)}')
assert (
latex(LocalSigma(FockIndex(i), FockIndex(j), hs=hs0)) ==
r'\Ket{i}\!\Bra{j}^{(0)}')
alpha = symbols('alpha')
expr = CoherentStateKet(alpha, hs=1).to_fock_representation()
assert (
latex(expr) ==
r'e^{- \frac{\alpha \overline{\alpha}}{2}} '
r'\left(\sum_{n \in \mathcal{H}_{1}} '
r'\frac{\alpha^{n}}{\sqrt{n!}} \Ket{n}^{(1)}\right)')
assert (
latex(expr, conjg_style='star') ==
r'e^{- \frac{\alpha {\alpha}^*}{2}} '
r'\left(\sum_{n \in \mathcal{H}_{1}} '
r'\frac{\alpha^{n}}{\sqrt{n!}} \Ket{n}^{(1)}\right)')
tls = SpinSpace(label='s', spin='1/2', basis=('down', 'up'))
Sig = IndexedBase('sigma')
n = IdxSym('n')
Sig_n = OperatorSymbol(StrLabel(Sig[n]), hs=tls)
assert latex(Sig_n, show_hs_label=False) == r'\hat{\sigma}_{n}'
def test_tex_bra_elements():
"""Test the tex representation of "atomic" kets"""
hs1 = LocalSpace('q1', basis=('g', 'e'))
hs2 = LocalSpace('q2', basis=('g', 'e'))
alpha, beta = symbols('alpha, beta')
bra = Bra(KetSymbol('Psi', hs=hs1))
assert (latex(bra) == r'\left\langle \Psi \right\rvert^{(q_{1})}')
assert latex(Bra(KetSymbol('Psi', alpha, beta, hs=hs1))) == (
r'\left\langle \Psi\left(\alpha, \beta\right) \right\rvert^{(q_{1})}')
assert (latex(bra, tex_use_braket=True) == r'\Bra{\Psi}^{(q_{1})}')
assert (
latex(bra, tex_use_braket=True, show_hs_label='subscript') ==
r'\Bra{\Psi}_{(q_{1})}')
assert (
latex(bra, tex_use_braket=True, show_hs_label=False) ==
r'\Bra{\Psi}')
assert (
latex(Bra(KetSymbol('Psi', hs=1))) ==
r'\left\langle \Psi \right\rvert^{(1)}')
assert (
latex(Bra(KetSymbol('Psi', hs=(1, 2)))) ==
r'\left\langle \Psi \right\rvert^{(1 \otimes 2)}')
assert (
latex(Bra(KetSymbol('Psi', hs=hs1*hs2))) ==
r'\left\langle \Psi \right\rvert^{(q_{1} \otimes q_{2})}')
assert (
latex(KetSymbol('Psi', hs=1).dag()) ==
r'\left\langle \Psi \right\rvert^{(1)}')
assert latex(Bra(ZeroKet)) == '0'
assert latex(Bra(TrivialKet)) == '1'
assert (
latex(BasisKet('e', hs=hs1).adjoint()) ==
r'\left\langle e \right\rvert^{(q_{1})}')
assert (
latex(BasisKet(1, hs=1).adjoint()) ==
r'\left\langle 1 \right\rvert^{(1)}')
assert (
latex(CoherentStateKet(2.0, hs=1).dag()) ==
r'\left\langle \alpha=2 \right\rvert^{(1)}')
def test_tex_ket_operations():
"""Test the tex representation of ket operations"""
hs1 = LocalSpace('q_1', basis=('g', 'e'))
hs2 = LocalSpace('q_2', basis=('g', 'e'))
ket_g1 = BasisKet('g', hs=hs1)
ket_e1 = BasisKet('e', hs=hs1)
ket_g2 = BasisKet('g', hs=hs2)
ket_e2 = BasisKet('e', hs=hs2)
psi1 = KetSymbol("Psi_1", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
psi3 = KetSymbol("Psi_3", hs=hs1)
phi = KetSymbol("Phi", hs=hs2)
A = OperatorSymbol("A_0", hs=hs1)
gamma = symbols('gamma', positive=True)
alpha = symbols('alpha')
beta = symbols('beta')
phase = exp(-I * gamma)
i = IdxSym('i')
assert (
latex(psi1 + psi2) ==
r'\left\lvert \Psi_{1} \right\rangle^{(q_{1})} + '
r'\left\lvert \Psi_{2} \right\rangle^{(q_{1})}')
assert (
latex(psi1 - psi2 + psi3) ==
r'\left\lvert \Psi_{1} \right\rangle^{(q_{1})} - '
r'\left\lvert \Psi_{2} \right\rangle^{(q_{1})} + '
r'\left\lvert \Psi_{3} \right\rangle^{(q_{1})}')
assert (
latex(psi1 * phi) ==
r'\left\lvert \Psi_{1} \right\rangle^{(q_{1})} \otimes '
r'\left\lvert \Phi \right\rangle^{(q_{2})}')
assert (
latex(phase * psi1) ==
r'e^{- i \gamma} \left\lvert \Psi_{1} \right\rangle^{(q_{1})}')
assert (
latex((alpha + 1) * KetSymbol('Psi', hs=0)) ==
r'\left(\alpha + 1\right) \left\lvert \Psi \right\rangle^{(0)}')
assert (
latex(A * psi1) ==
r'\hat{A}_{0}^{(q_{1})} \left\lvert \Psi_{1} \right\rangle^{(q_{1})}')
braket = BraKet(psi1, psi2)
assert (
latex(braket, show_hs_label='subscript') ==
r'\left\langle \Psi_{1} \middle\vert \Psi_{2} \right\rangle_{(q_{1})}')
assert (
latex(braket, show_hs_label=False) ==
r'\left\langle \Psi_{1} \middle\vert \Psi_{2} \right\rangle')
expr = BraKet(
KetSymbol('Psi_1', alpha, hs=hs1), KetSymbol('Psi_2', beta, hs=hs1))
assert (
latex(expr) ==
r'\left\langle \Psi_{1}\left(\alpha\right) \middle\vert '
r'\Psi_{2}\left(\beta\right) \right\rangle^{(q_{1})}')
assert (
latex(ket_e1 * ket_e2) ==
r'\left\lvert ee \right\rangle^{(q_{1} \otimes q_{2})}')
assert latex(ket_e1.dag() * ket_e1) == r'1'
assert latex(ket_g1.dag() * ket_e1) == r'0'
ketbra = KetBra(psi1, psi2)
assert (
latex(ketbra) ==
r'\left\lvert \Psi_{1} \middle\rangle\!'
r'\middle\langle \Psi_{2} \right\rvert^{(q_{1})}')
assert (
latex(ketbra, show_hs_label='subscript') ==
r'\left\lvert \Psi_{1} \middle\rangle\!'
r'\middle\langle \Psi_{2} \right\rvert_{(q_{1})}')
assert (
latex(ketbra, show_hs_label=False) ==
r'\left\lvert \Psi_{1} \middle\rangle\!'
r'\middle\langle \Psi_{2} \right\rvert')
expr = KetBra(
KetSymbol('Psi_1', alpha, hs=hs1), KetSymbol('Psi_2', beta, hs=hs1))
assert (
latex(expr) ==
r'\left\lvert \Psi_{1}\left(\alpha\right) \middle\rangle\!'
r'\middle\langle \Psi_{2}\left(\beta\right) \right\rvert^{(q_{1})}')
bell1 = (ket_e1 * ket_g2 - I * ket_g1 * ket_e2) / sqrt(2)
bell2 = (ket_e1 * ket_e2 - ket_g1 * ket_g2) / sqrt(2)
assert (
latex(bell1) ==
r'\frac{1}{\sqrt{2}} \left(\left\lvert eg \right\rangle^{(q_{1} '
r'\otimes q_{2})} - i \left\lvert ge \right\rangle'
r'^{(q_{1} \otimes q_{2})}\right)')
assert (
latex(bell2) ==
r'\frac{1}{\sqrt{2}} \left(\left\lvert ee \right\rangle^{(q_{1} '
r'\otimes q_{2})} - \left\lvert gg \right\rangle'
r'^{(q_{1} \otimes q_{2})}\right)')
assert (
latex(bell2, show_hs_label=False) ==
r'\frac{1}{\sqrt{2}} \left(\left\lvert ee \right\rangle - '
r'\left\lvert gg \right\rangle\right)')
assert BraKet.create(bell1, bell2).expand() == 0
assert (
latex(BraKet.create(bell1, bell2)) ==
r'\frac{1}{2} \left(\left\langle eg \right\rvert'
r'^{(q_{1} \otimes q_{2})} + i \left\langle ge \right\rvert'
r'^{(q_{1} \otimes q_{2})}\right) '
r'\left(\left\lvert ee \right\rangle^{(q_{1} \otimes q_{2})} '
r'- \left\lvert gg \right\rangle^{(q_{1} \otimes q_{2})}\right)')
assert (
latex(KetBra.create(bell1, bell2)) ==
r'\frac{1}{2} \left(\left\lvert eg \right\rangle'
r'^{(q_{1} \otimes q_{2})} - i \left\lvert ge \right\rangle'
r'^{(q_{1} \otimes q_{2})}\right)\left(\left\langle ee \right\rvert'
r'^{(q_{1} \otimes q_{2})} - \left\langle gg \right\rvert'
r'^{(q_{1} \otimes q_{2})}\right)')
with configure_printing(tex_use_braket=True):
expr = KetBra(KetSymbol('Psi', hs=0), BasisKet(FockIndex(i), hs=0))
assert latex(expr) == r'\Ket{\Psi}\!\Bra{i}^{(0)}'
expr = KetBra(BasisKet(FockIndex(i), hs=0), KetSymbol('Psi', hs=0))
assert latex(expr) == r'\Ket{i}\!\Bra{\Psi}^{(0)}'
expr = BraKet(KetSymbol('Psi', hs=0), BasisKet(FockIndex(i), hs=0))
assert latex(expr) == r'\Braket{\Psi | i}^(0)'
expr = BraKet(BasisKet(FockIndex(i), hs=0), KetSymbol('Psi', hs=0))
assert latex(expr) == r'\Braket{i | \Psi}^(0)'
def test_tex_bra_operations():
"""Test the tex representation of bra operations"""
hs1 = LocalSpace('q_1', dimension=2)
hs2 = LocalSpace('q_2', dimension=2)
psi1 = KetSymbol("Psi_1", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
bra_psi1 = KetSymbol("Psi_1", hs=hs1).dag()
bra_psi2 = KetSymbol("Psi_2", hs=hs1).dag()
bra_psi2 = KetSymbol("Psi_2", hs=hs1).dag()
bra_psi3 = KetSymbol("Psi_3", hs=hs1).dag()
bra_phi = KetSymbol("Phi", hs=hs2).dag()
A = OperatorSymbol("A_0", hs=hs1)
gamma = symbols('gamma', positive=True)
phase = exp(-I * gamma)
assert (
latex((psi1 + psi2).dag()) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} + '
r'\left\langle \Psi_{2} \right\rvert^{(q_{1})}')
assert (
latex((psi1 + psi2).dag(), tex_use_braket=True) ==
r'\Bra{\Psi_{1}}^{(q_{1})} + \Bra{\Psi_{2}}^{(q_{1})}')
assert (
latex(bra_psi1 + bra_psi2) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} + '
r'\left\langle \Psi_{2} \right\rvert^{(q_{1})}')
assert (
latex(bra_psi1 - bra_psi2 + bra_psi3) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} - '
r'\left\langle \Psi_{2} \right\rvert^{(q_{1})} + '
r'\left\langle \Psi_{3} \right\rvert^{(q_{1})}')
assert (
latex(bra_psi1 * bra_phi) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} \otimes '
r'\left\langle \Phi \right\rvert^{(q_{2})}')
assert (
latex(bra_psi1 * bra_phi, tex_use_braket=True) ==
r'\Bra{\Psi_{1}}^{(q_{1})} \otimes \Bra{\Phi}^{(q_{2})}')
assert (
latex(Bra(phase * psi1)) ==
r'e^{i \gamma} \left\langle \Psi_{1} \right\rvert^{(q_{1})}')
assert (
latex((A * psi1).dag()) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} '
r'\hat{A}_{0}^{(q_{1})\dagger}')
def test_tex_sop_elements():
"""Test the tex representation of "atomic" Superoperators"""
hs1 = LocalSpace('q1', dimension=2)
hs2 = LocalSpace('q2', dimension=2)
alpha, beta = symbols('alpha, beta')
assert latex(SuperOperatorSymbol("A", hs=hs1)) == r'\mathrm{A}^{(q_{1})}'
assert (latex(SuperOperatorSymbol("A_1", hs=hs1*hs2)) ==
r'\mathrm{A}_{1}^{(q_{1} \otimes q_{2})}')
assert (latex(SuperOperatorSymbol("Xi", alpha, beta, hs=hs1)) ==
r'\mathrm{\Xi}^{(q_{1})}\left(\alpha, \beta\right)')
assert (latex(SuperOperatorSymbol("Xi_2", hs=('q1', 'q2'))) ==
r'\mathrm{\Xi}_{2}^{(q_{1} \otimes q_{2})}')
assert (latex(SuperOperatorSymbol("Xi_full", hs=1)) ==
r'\mathrm{\Xi}_{\text{full}}^{(1)}')
assert latex(IdentitySuperOperator) == r'\mathbb{1}'
assert latex(ZeroSuperOperator) == r'\mathbb{0}'
def test_tex_sop_operations():
"""Test the tex representation of super operator algebra operations"""
hs1 = LocalSpace('q_1', dimension=2)
hs2 = LocalSpace('q_2', dimension=2)
A = SuperOperatorSymbol("A", hs=hs1)
B = SuperOperatorSymbol("B", hs=hs1)
C = SuperOperatorSymbol("C", hs=hs2)
L = SuperOperatorSymbol("L", hs=1)
M = SuperOperatorSymbol("M", hs=1)
A_op = OperatorSymbol("A", hs=1)
gamma = symbols('gamma', positive=True)
assert latex(A + B) == r'\mathrm{A}^{(q_{1})} + \mathrm{B}^{(q_{1})}'
assert latex(A * B) == r'\mathrm{A}^{(q_{1})} \mathrm{B}^{(q_{1})}'
assert latex(A * C) == r'\mathrm{A}^{(q_{1})} \mathrm{C}^{(q_{2})}'
assert latex(2 * A) == r'2 \mathrm{A}^{(q_{1})}'
assert latex(2j * A) == r'2i \mathrm{A}^{(q_{1})}'
assert latex((1+2j) * A) == r'(1+2i) \mathrm{A}^{(q_{1})}'
assert latex(gamma**2 * A) == r'\gamma^{2} \mathrm{A}^{(q_{1})}'
assert (latex(-gamma**2/2 * A) ==
r'- \frac{\gamma^{2}}{2} \mathrm{A}^{(q_{1})}')
assert latex(SuperAdjoint(A)) == r'\mathrm{A}^{(q_{1})\dagger}'
assert (latex(SuperAdjoint(A + B)) ==
r'\left(\mathrm{A}^{(q_{1})} + '
r'\mathrm{B}^{(q_{1})}\right)^\dagger')
assert latex(A - B) == r'\mathrm{A}^{(q_{1})} - \mathrm{B}^{(q_{1})}'
assert (latex(A - B + C) ==
r'\mathrm{A}^{(q_{1})} - \mathrm{B}^{(q_{1})} + '
r'\mathrm{C}^{(q_{2})}')
assert (latex(2 * A - sqrt(gamma) * (B + C)) ==
r'2 \mathrm{A}^{(q_{1})} - \sqrt{\gamma} '
r'\left(\mathrm{B}^{(q_{1})} + \mathrm{C}^{(q_{2})}\right)')
assert latex(SPre(A_op)) == r'\mathrm{SPre}\left(\hat{A}^{(1)}\right)'
assert latex(SPost(A_op)) == r'\mathrm{SPost}\left(\hat{A}^{(1)}\right)'
assert (latex(SuperOperatorTimesOperator(L, A_op)) ==
r'\mathrm{L}^{(1)}\left[\hat{A}^{(1)}\right]')
assert (latex(SuperOperatorTimesOperator(L, sqrt(gamma) * A_op)) ==
r'\mathrm{L}^{(1)}\left[\sqrt{\gamma} \hat{A}^{(1)}\right]')
assert (latex(SuperOperatorTimesOperator((L + 2*M), A_op)) ==
r'\left(\mathrm{L}^{(1)} + 2 \mathrm{M}^{(1)}\right)'
r'\left[\hat{A}^{(1)}\right]')
def test_tex_spin_arrows():
"""Test the representation of spin-1/2 spaces with special labels "down",
"up" as arrows"""
tls1 = SpinSpace('1', spin='1/2', basis=("down", "up"))
tls2 = SpinSpace('2', spin='1/2', basis=("down", "up"))
tls3 = SpinSpace('3', spin='1/2', basis=("down", "up"))
down1 = BasisKet('down', hs=tls1)
up1 = BasisKet('up', hs=tls1)
down2 = BasisKet('down', hs=tls2)
up3 = BasisKet('up', hs=tls3)
assert latex(down1) == r'\left\lvert \downarrow \right\rangle^{(1)}'
assert latex(up1) == r'\left\lvert \uparrow \right\rangle^{(1)}'
ket = down1 * down2 * up3
assert (
latex(ket) ==
r'\left\lvert \downarrow\downarrow\uparrow \right\rangle'
r'^{(1 \otimes 2 \otimes 3)}')
sig = LocalSigma("up", "down", hs=tls1)
assert (
latex(sig) ==
r'\left\lvert \uparrow \middle\rangle\!'
r'\middle\langle \downarrow \right\rvert^{(1)}')
@pytest.mark.xfail
def test_tex_spin_arrows_multi_sigma():
# when fixed, combine with test_tex_spin_arrows
tls1 = SpinSpace('1', spin='1/2', basis=("down", "up"))
tls2 = SpinSpace('2', spin='1/2', basis=("down", "up"))
tls3 = SpinSpace('3', spin='1/2', basis=("down", "up"))
sig1 = LocalSigma("up", "down", hs=tls1)
sig2 = LocalSigma("up", "up", hs=tls2)
sig3 = LocalSigma("down", "down", hs=tls3)
assert latex(sig1 * sig2 * sig3) == r''
def test_repr_latex():
"""Test the automatic representation in the notebook"""
A = OperatorSymbol("A", hs=1)
B = OperatorSymbol("B", hs=1)
assert A._repr_latex_() == "$%s$" % latex(A)
assert (A + B)._repr_latex_() == "$%s$" % latex(A + B)
@pytest.fixture
def MyScalarFunc():
class MyScalarDerivative(QuantumDerivative, Scalar):
pass
class ScalarFunc(ScalarExpression):
def __init__(self, name, *sym_args):
self._name = name
self._sym_args = sym_args
super().__init__(name, *sym_args)
def _adjoint(self):
return self
@property
def args(self):
return (self._name, *self._sym_args)
def _diff(self, sym):
return MyScalarDerivative(self, derivs={sym: 1})
def _latex(self, *args, **kwargs):
return "%s(%s)" % (
self._name, ", ".join(
[latex(sym) for sym in self._sym_args]))
return ScalarFunc
def test_tex_derivative(MyScalarFunc):
s, s0, t, t0, gamma = symbols('s, s_0, t, t_0, gamma', real=True)
m = IdxSym('m')
n = IdxSym('n')
S = IndexedBase('s')
T = IndexedBase('t')
f = partial(MyScalarFunc, "f")
g = partial(MyScalarFunc, "g")
expr = f(s, t).diff(t)
assert latex(expr) == r'\frac{\partial}{\partial t} f(s, t)'
expr = f(s, t).diff(s, n=2).diff(t)
assert latex(expr) == (
r'\frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t)')
expr = f(s, t).diff(s, n=2).diff(t).evaluate_at({s: s0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{s=s_{0}}')
expr = f(S[m], T[n]).diff(S[m], n=2).diff(T[n]).evaluate_at({S[m]: s0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s_{m}^{2} \partial t_{n}} '
r'f(s_{m}, t_{n}) \right\vert_{s_{m}=s_{0}}')
expr = f(s, t).diff(s, n=2).diff(t).evaluate_at({s: 0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{s=0}')
expr = f(gamma, t).diff(gamma, n=2).diff(t).evaluate_at({gamma: 0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial \gamma^{2} \partial t} '
r'f(\gamma, t) \right\vert_{\gamma=0}')
expr = f(s, t).diff(s, n=2).diff(t).evaluate_at({s: s0, t: t0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{s=s_{0}, t=t_{0}}')
D = expr.__class__
expr = D(f(s, t) + g(s, t), derivs={s: 2, t: 1}, vals={s: s0, t: t0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} '
r'\left(f(s, t) + g(s, t)\right) \right\vert_{s=s_{0}, t=t_{0}}')
expr = D(2 * f(s, t), derivs={s: 2, t: 1}, vals={s: s0, t: t0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} '
r'\left(2 f(s, t)\right) \right\vert_{s=s_{0}, t=t_{0}}')
expr = f(s, t).diff(t) * g(s, t)
assert latex(expr) == (
r'\left(\frac{\partial}{\partial t} f(s, t)\right) g(s, t)')
expr = f(s, t).diff(t).evaluate_at({t: 0}) * g(s, t)
assert latex(expr) == (
r'\left(\left. \frac{\partial}{\partial t} f(s, t) '
r'\right\vert_{t=0}\right) g(s, t)')
expr = f(s, t).diff(t) + g(s, t)
assert latex(expr) == r'\frac{\partial}{\partial t} f(s, t) + g(s, t)'
f = MyScalarFunc("f", S[m], T[n])
series = f.series_expand(T[n], about=0, order=3)
assert latex(series) == (
r'\left(f(s_{m}, 0), \left. \frac{\partial}{\partial t_{n}} '
r'f(s_{m}, t_{n}) \right\vert_{t_{n}=0}, \frac{1}{2} \left(\left. '
r'\frac{\partial^{2}}{\partial t_{n}^{2}} f(s_{m}, t_{n}) '
r'\right\vert_{t_{n}=0}\right), \frac{1}{6} \left(\left. '
r'\frac{\partial^{3}}{\partial t_{n}^{3}} f(s_{m}, t_{n}) '
r'\right\vert_{t_{n}=0}\right)\right)')
f = MyScalarFunc("f", s, t)
series = f.series_expand(t, about=0, order=2)
assert (
latex(series) ==
r'\left(f(s, 0), \left. \frac{\partial}{\partial t} f(s, t) '
r'\right\vert_{t=0}, \frac{1}{2} \left(\left. '
r'\frac{\partial^{2}}{\partial t^{2}} f(s, t) '
r'\right\vert_{t=0}\right)\right)')
expr = ( # nested derivative
MyScalarFunc("f", s, t)
.diff(s, n=2)
.diff(t)
.evaluate_at({t: t0})
.diff(t0))
assert latex(expr) == (
r'\frac{\partial}{\partial t_{0}} \left(\left. '
r'\frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{t=t_{0}}\right)')
| StarcoderdataPython |
3551387 | <reponame>gaarangoa/genomic-scripts<filename>GeneTools/fasta_subset.py
import sys
import click
from Bio import SeqIO
import logging
import gzip
import json
@click.command()
@click.option('--fasta', required=True, help='fasta input file')
@click.option('--entries', required=True, help='tabular file with entries')
def fasta_subset(fasta, entries):
'''
Search and retrieve sequences from fasta file
This script hashes the --entries and traverses the --fasta file until all entries are found.
The running time depends on the length of the file
'''
# file with list of sequences to filter
finp = {i.strip(): True for i in open(entries)}
# total_entries = len(finp)
for record in SeqIO.parse(open(fasta), "fasta"):
# terminate the program if all reads have been reached.
# if total_entries <= 0: exit()
_id = record.id
if not finp:
exit()
try:
assert(finp[_id])
print(">"+_id+"\n"+str(record.seq))
except Exception as e:
pass
# total_entries -= 1
| StarcoderdataPython |
1629915 | <reponame>pkfec/regulations-parser
import logging
import click
from regparser.tree.depth import optional_rules
from regparser.tree.depth.derive import derive_depths
logger = logging.getLogger(__name__)
@click.command()
@click.argument('markers', type=click.STRING, required=True)
def outline_depths(markers):
"""
Infer an outline's structure.
Return a list of outline depths for a given list of space-separated
markers.
"""
# Input is space-separated.
marker_list = markers.split(' ')
all_solutions = derive_depths(
marker_list,
[optional_rules.limit_sequence_gap(1)]
)
depths = {tuple(str(a.depth) for a in s) for s in all_solutions}.pop()
# Expected output is space-separated.
formatted_output = ' '.join(depths)
click.echo(formatted_output)
if __name__ == '__main__':
"""Enable running this command directly. E.g.,
`$ python regparser/commands/outline_depths.py`. This can save 1.5 seconds
or more of startup time.
"""
outline_depths()
| StarcoderdataPython |
1657416 | <reponame>HippoInWindow20/YPHS-HW
import sqlite3
from string import Template
from datetime import datetime
from ftplib import FTP
import requests
import os
usr = os.environ['ftpusr']
psw = os.environ['ftppsw']
def get_current_time():
site=requests.get("https://worldtimeapi.org/api/timezone/Asia/Taipei")
data=site.json()
day=datetime.fromisoformat(data["datetime"])
return day.strftime('%Y/%m/%d')
def remote_connect(server, file_name, usr, psw):
server.set_debuglevel(0)
server.connect("172.16.17.32")
server.login(usr, psw)
server.cwd("./database")
with open(f"./{file_name}", "wb") as w:
server.retrbinary(f'RETR ./{file_name}', w.write)
server.quit()
def remote_upload(server, file_name, usr, psw):
server.set_debuglevel(2)
server.connect("172.16.17.32")
server.login(usr, psw)
server.cwd("./database")
with open(f"./{file_name}", "rb") as r:
server.storbinary(f"STOR ./{file_name}", r)
server.quit()
class database:
def __init__(self, name):
global usr, psw, dt2
self.name = name
self.server = FTP()
try:
remote_connect(self.server, name, usr, psw)
except:
pass
self.db = sqlite3.connect(name)
def __del__(self):
global usr, psw
self.db.commit()
self.db.close()
remote_upload(self.server, self.name, usr, psw)
def create_table(self, table_name):
self.db.cursor()
self.db.execute(Template(
"CREATE TABLE $name(id INTEGER PRIMARY KEY AUTOINCREMENT,type TEXT,day TEXT,subject TEXT,content TEXT)").substitute(name=table_name))
def insert(self, table_name, subject, type_, content):
self.db.execute(Template("INSERT INTO $name(type , day , subject , content ) VALUES(\"$type\" , \"$dat\" , \"$subject\" , \"$txt\" )").substitute(
name=table_name, dat=get_current_time(), type=type_, subject=subject, txt=content))
self.db.commit()
def select(self, table_name, date):
results = self.db.execute(Template(
"SELECT * FROM $name WHERE day=\"$day\"").substitute(name=table_name, day=date))
return results.fetchall()
def select_by_id(self, table_name, id):
result = self.db.execute(Template(
"SELECT * FROM $name WHERE id=\"$no\"").substitute(name=table_name, no=id)).fetchone()
return result
def update(self, table_name, id, content):
self.db.execute(Template("UPDATE $name SET content = \"$content\" WHERE id = \"$id\"").substitute(
name=table_name, id=id, content=content))
self.db.commit()
def delete(self, table_name, id):
self.db.execute(Template("DELETE FROM $name WHERE id=\"$id\"").substitute(
name=table_name, id=id))
self.db.commit()
| StarcoderdataPython |
4937097 | from __future__ import absolute_import
from __future__ import print_function
import os
__version__ = open(os.path.join(os.path.dirname(__file__), "VERSION")).read().splitlines()[0]
| StarcoderdataPython |
3479533 | #!/usr/bin/env python2.7
import os
import sys
from collections import defaultdict
from hashlib import sha1
from optparse import OptionParser
class TreeMatcher(object):
"""Locate and report identical files from two directory trees.
A tree can be a sub-tree of the other.
"""
def __init__(self, exts=[], stop_on_error=False, verbose=False):
self._exts = [x.lstrip('.') for x in exts]
self._stop_on_error = stop_on_error
self._verbose = verbose
self._digests = ({}, {})
self._paths = (None, None)
def scan(self, root, nolink, discard):
if self._verbose:
print "Scan %s" % root
digests = defaultdict(list)
rules = [lambda p, d: not d.startswith('.'),
lambda p, d: discard != os.path.join(p, d),
lambda p, d: not nolink or
not os.path.islink(os.path.join(p, d))]
for (dirpath, dirnames, filenames) in os.walk(root):
dirnames[:] = [d for d in dirnames
if all([r(dirpath, d) for r in rules])]
for fn in filenames:
if self._exts:
name, ext = os.path.splitext(fn)
if ext[1:] not in self._exts:
continue
pn = os.path.join(dirpath, fn)
try:
with open(pn, 'rb') as fp:
hmd = sha1(fp.read()).hexdigest()
except IOError:
if self._stop_on_error:
if self._verbose:
print ""
raise
continue
rn = pn[len(root)+1:]
digests[hmd].append(rn)
if self._verbose:
print "\r%s %d files" % (root, len(digests)),
if self._verbose:
print ""
return digests
def scanall(self, dir1, dir2, nolink):
ad1 = os.path.abspath(dir1)
ad2 = os.path.abspath(dir2)
discard1 = self._discard_nested(ad1, ad2)
discard2 = self._discard_nested(ad2, ad1)
d1 = self.scan(ad1, nolink, discard1)
d2 = self.scan(ad2, nolink, discard2)
self._digests = (d1, d2)
self._paths = (ad1, ad2)
@classmethod
def _discard_nested(cls, p1, p2):
relpath = os.path.relpath(p2, p1)
if not relpath.startswith(''.join((os.pardir, os.sep))):
return p2
return None
def show_match(self, script_mode=False, relative_mode=False):
d1, d2 = self._digests
if not d1:
print "No file found"
return
m = set(d1) & set(d2)
if not script_mode:
l1 = max([len(d1[x][0]) for x in m])
l2 = max([max([len(s) for s in d2[x]]) for x in m])
fmttpl = "%%(dst)-%(ldst)ds <-- %%(src)-%(ldst)ds"
fmt = fmttpl % {'lsrc': l1, 'ldst': l2}
for h in sorted(m, key=lambda x: sorted(d2[x])[0]):
if len(d1[h]) == 1:
s1 = d1[h][0]
else:
s1 = "%s (%d)" % (d1[h][0], len(d1[h]))
print fmt % {'src': s1, 'dst': d2[h][0]}
for s2 in d2[h][1:]:
print s2
else:
fmt = "%(src)s %(dst)s"
for h in sorted(m, key=lambda x: sorted(d2[x])[0]):
for dst in d2[h]:
src = d1[h][0]
if relative_mode:
p1, p2 = self._paths
src = os.path.abspath(os.path.join(p1, src))
tmp = os.path.abspath(os.path.join(p2, dst))
src = os.path.relpath(src, os.path.dirname(tmp))
print fmt % {'src': src, 'dst': dst}
def show_unmatch(self, prefix, pos):
if pos:
d1, d2 = self._digests
else:
d2, d1 = self._digests
m = set(d2) - set(d1)
for h in sorted(m, key=lambda x: d2[x]):
print "%s" % os.path.relpath(os.path.join(prefix, d2[h][0]))
def main():
# Use example to replace duplicate header files with symlinks:
# matchtrees.py -l -x "h" -s -r . sysroot/usr/include | \
# while read a b; do rm sysroot/usr/include/$b &&
# ln -s $a sysroot/usr/include/$b; done
try:
debug = False
usage = 'Usage: %prog [options] <src> <dst>\n'\
' Track and match files between two directory structures'
optparser = OptionParser(usage=usage)
optparser.add_option('-x', '--extension', dest='exts',
action='append', default=[],
help='Extension filter, may be repeated')
optparser.add_option('-u', '--unmatch', dest='unmatch',
action='count',
help='Show unmatch')
optparser.add_option('-l', '--nolink', dest='nolink',
action='store_true',
help='Do not descent in symlinked dirs')
optparser.add_option('-a', '--abort', dest='abort',
action='store_true',
help='Abort on error')
optparser.add_option('-s', '--script', dest='script',
action='store_true',
help='Make the output script-friendly')
optparser.add_option('-r', '--relative', dest='relative',
action='store_true',
help='Use relative paths for output')
optparser.add_option('-v', '--verbose', dest='verbose',
action='store_true',
help='Show progress')
optparser.add_option('-d', '--debug', dest='debug',
action='store_true',
help='Show debug information')
(options, args) = optparser.parse_args(sys.argv[1:])
debug = options.debug
if len(args) < 2:
optparser.error('Missing tree directory')
tm = TreeMatcher(exts=options.exts, stop_on_error=options.abort,
verbose=options.verbose)
tm.scanall(args[0], args[1], options.nolink)
if options.unmatch:
tm.show_unmatch(args[0], 0)
if options.unmatch > 1:
tm.show_unmatch(args[1], 1)
else:
tm.show_match(options.script, options.relative)
except (IOError, ), e:
if debug:
import traceback
traceback.print_exc()
else:
print >> sys.stderr, 'Error:', str(e) or 'Internal error, use -d'
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3375497 | <reponame>Ben435/BensLoadTestTool
from bad_requests.request import Request
STANDARD_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0",
"Accept-Language": "en,en-US",
"Accept": "text/html,application/xhtml+xml",
"Accept-Encoding": 'deflate'
}
DEFAULT_BOUNDARY = "=="
def get(uri, args=None, headers=None):
host, resource = parse_uri(uri)
if headers is None:
headers = STANDARD_HEADERS
header_str = serialize_headers(headers)
args_str = url_encode_dict(args)
if args_str:
resource += "?" + args_str
message = "GET {} HTTP/1.1\nHost: {}\n{}\n\n".format(resource, host, header_str)
return Request(host, message)
def head(uri, args=None, headers=None):
host, resource = parse_uri(uri)
if headers is None:
headers = STANDARD_HEADERS
header_str = serialize_headers(headers)
args_str = url_encode_dict(args)
if args_str:
resource += "?" + args_str
message = "HEAD {} HTTP/1.1\nHost: {}\n{}\n\n".format(resource, host, header_str)
return Request(host, message, get_body=False)
def post(uri, args=None, headers=None):
host, resource = parse_uri(uri)
if "Content-Type" in headers:
if headers['Content-Type'] == "application/x-www-form-urlencoded":
return urlencoded_post(host, resource, args=args, headers=headers)
elif "multipart/form-data" in headers['Content-Type']:
parts = headers['Content-Type'].split(";")
if len(parts) >= 2:
boundary_parts = parts[1].strip().split("=")
if boundary_parts[0].lower() == "boundary":
boundary = boundary_parts[1]
else:
# Default boundary.
boundary = DEFAULT_BOUNDARY
return multipart_post(host, resource, boundary, args=args, headers=headers)
else:
return multipart_post(host, resource, DEFAULT_BOUNDARY, args=args, headers=headers)
else:
raise Exception("Invalid Content-Type: {}".format(headers['Content-Type']))
else:
# Default.
return urlencoded_post(host, resource, args=args, headers=headers)
def urlencoded_post(host, resource, args=None, headers=None):
pass
def multipart_post(host, resource, boundary, args=None, headers=None):
pass
def parse_uri(uri):
resource = uri.split("/")
# ['http:', '', 'www.example.com', 'what_we_want.html']
host = resource[2]
if len(resource) < 4:
resource = "/".join(resource)
else:
resource = "/" + "/".join(resource[3:])
return host, resource
def serialize_headers(headers):
# Join headers.
header_str = ""
for key, val in headers.items():
header_str += key + ": " + val + "\n"
return header_str
def url_encode_dict(args):
if args is not None and isinstance(args, dict):
all_args = []
for k, v in args.items():
cur_k = percent_encode_string(k)
cur_v = percent_encode_string(v)
all_args.append(cur_k + "=" + cur_v)
return "&".join(all_args)
else:
return None
def percent_encode_string(string):
safe_chars = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-"
end_str = ""
for char in string:
if char not in safe_chars:
num = ord(char)
print(num, hex(num), len(str(hex(num)).split("x")[-1])*8)
if num <= 255:
end_str += "%" + str(hex(num).split("x")[-1]).upper()
else:
tmp = str(hex(num).split("x")[-1]).upper()
if len(tmp) % 2 == 1:
tmp = "0" + tmp
for i in range(2, len(tmp)+1, 2):
end_str += "%" + tmp[i-2:i]
else:
end_str += char
return end_str
if __name__ == "__main__":
#test.
import urllib.parse
test_str = "♥"
print(test_str)
print(urllib.parse.quote(test_str))
print(percent_encode_string(test_str))
# test_url = "http://www.theuselessweb.com"
#
# resp_head = head(test_url)
# print(resp_head)
#
# req_get = get(test_url, args={"hello": "world"})
# print(req_get)
# resp_get = req_get.send()
# print(resp_get)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.