id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1605555 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handles the selection of the revision operators.
"""
import logging
from abc import abstractmethod, ABC
from collections import Collection
import neurallog.structure_learning.structure_learning_system as sls
from neurallog.knowledge.examples import Examples
from neurallog.knowledge.theory import TheoryRevisionException
from neurallog.knowledge.theory.evaluation.metric.theory_metric import \
TheoryMetric
from neurallog.knowledge.theory.manager.revision.revision_operator_evaluator \
import \
RevisionOperatorEvaluator
from neurallog.util import Initializable
logger = logging.getLogger(__name__)
class RevisionOperatorSelector(Initializable):
"""
Class responsible for selecting the best suited revision operator.
"""
def __init__(self, learning_system=None, operator_evaluators=None):
"""
Creates a revision operator selector.
:param learning_system: the learning system
:type learning_system: sls.StructureLearningSystem
:param operator_evaluators: the operator evaluators
:type operator_evaluators: Collection[RevisionOperatorEvaluator] or None
"""
self.learning_system = learning_system
self.operator_evaluators = operator_evaluators
# noinspection PyMissingOrEmptyDocstring
def initialize(self):
super().initialize()
for operator_evaluator in self.operator_evaluators:
operator_evaluator.learning_system = self.learning_system
operator_evaluator.initialize()
# noinspection PyMissingOrEmptyDocstring
def required_fields(self):
return ["learning_system", "operator_evaluators"]
@abstractmethod
def select_operator(self, examples, theory_metric, minimum_threshold=None):
"""
Selects the best operator to revise the theory, based on the examples
and the metric.
:param examples: the examples
:type examples: Examples
:param theory_metric: the theory metric
:type theory_metric: TheoryMetric
:param minimum_threshold: a minimum threshold to consider by the
operator. Implementations of this class could use this threshold in
order to improve performance by skipping evaluating candidates
:type minimum_threshold: Optional[float]
:return: the best revision operator
:rtype: RevisionOperatorEvaluator
"""
pass
class SelectFirstRevisionOperator(RevisionOperatorSelector):
"""
Selects the first operator evaluator to revise the theory.
"""
# noinspection PyMissingOrEmptyDocstring,PyAttributeOutsideInit
def initialize(self):
super().initialize()
self._operator = next(iter(self.operator_evaluators))
# noinspection PyMissingOrEmptyDocstring
def select_operator(self, examples, theory_metric, minimum_threshold=None):
return self._operator
class BestRevisionOperatorSelector(RevisionOperatorSelector):
"""
Selects the best possible revision operator.
"""
# noinspection PyMissingOrEmptyDocstring,PyAttributeOutsideInit
def initialize(self):
super().initialize()
if len(self.operator_evaluators) < 2:
self.selector: RevisionOperatorEvaluatorSelector = \
SingleRevisionOperatorEvaluator(self.operator_evaluators)
else:
self.selector: RevisionOperatorEvaluatorSelector = \
BestSelector(self.operator_evaluators)
# noinspection PyMissingOrEmptyDocstring,PyAttributeOutsideInit
def select_operator(self, examples, theory_metric, minimum_threshold=None):
return self.selector.select_operator(
examples, theory_metric, minimum_threshold)
class RevisionOperatorEvaluatorSelector(ABC):
"""
Class to select the proper operator, given the target examples and the
metric.
"""
@abstractmethod
def select_operator(self, targets, metric, minimum_threshold=None):
"""
Selects the proper operator, based on the target examples and the
metric.
:param targets: the target examples
:type targets: Examples
:param metric: the metric
:type metric: TheoryMetric
:param minimum_threshold: a minimum threshold to consider by the
operator. Implementations of this class could use this threshold in
order to improve performance by skipping evaluating candidates
:type minimum_threshold: Optional[float]
:return: the proper revision operator evaluator
:rtype: RevisionOperatorEvaluator
"""
pass
class SingleRevisionOperatorEvaluator(RevisionOperatorEvaluatorSelector):
"""
Selects the only operator.
"""
def __init__(self, operator_evaluators):
"""
Create a single revision operator selector.
:param operator_evaluators: the operator evaluators
:type operator_evaluators: Collection[RevisionOperatorEvaluator]
"""
self.operator_evaluator = next(iter(operator_evaluators))
# noinspection PyMissingOrEmptyDocstring
def select_operator(self, targets, metric, minimum_threshold=None):
if self.operator_evaluator is not None:
self.operator_evaluator.clear_cached_theory()
return self.operator_evaluator
class BestSelector(RevisionOperatorEvaluatorSelector):
"""
Selects the best possible operator.
"""
def __init__(self, operator_evaluators):
"""
Create a best operator selector.
:param operator_evaluators: the operator evaluators
:type operator_evaluators: Collection[RevisionOperatorEvaluator]
"""
self.operator_evaluators = operator_evaluators
self.preferred_operator = next(iter(operator_evaluators))
# noinspection PyMissingOrEmptyDocstring
def select_operator(self, targets, metric, minimum_threshold=None):
best_evaluator = self.preferred_operator
best_evaluation = metric.default_value
for evaluator in self.operator_evaluators:
try:
evaluator.clear_cached_theory()
current = evaluator.evaluate_operator(
targets, metric, minimum_threshold)
if metric.compare(current, best_evaluation) > 0:
best_evaluation = current
best_evaluator = evaluator
except TheoryRevisionException:
logger.exception(
"Error when evaluating the revision operator, reason:")
return best_evaluator
| StarcoderdataPython |
7248 | <reponame>kykrueger/redash
import calendar
import datetime
from unittest import TestCase
import pytz
from dateutil.parser import parse as date_parse
from tests import BaseTestCase
from redash import models, redis_connection
from redash.models import db, types
from redash.utils import gen_query_hash, utcnow
class DashboardTest(BaseTestCase):
def test_appends_suffix_to_slug_when_duplicate(self):
d1 = self.factory.create_dashboard()
db.session.flush()
self.assertEqual(d1.slug, 'test')
d2 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d2.slug)
d3 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d3.slug)
self.assertNotEqual(d2.slug, d3.slug)
class ShouldScheduleNextTest(TestCase):
def test_interval_schedule_that_needs_reschedule(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600"))
def test_interval_schedule_that_doesnt_need_reschedule(self):
now = utcnow()
half_an_hour_ago = now - datetime.timedelta(minutes=30)
self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600"))
def test_exact_time_that_needs_reschedule(self):
now = utcnow()
yesterday = now - datetime.timedelta(days=1)
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(yesterday, now, "86400",
scheduled_time))
def test_exact_time_that_doesnt_need_reschedule(self):
now = date_parse("2015-10-16 20:10")
yesterday = date_parse("2015-10-15 23:07")
schedule = "23:00"
self.assertFalse(models.should_schedule_next(yesterday, now, "86400", schedule))
def test_exact_time_with_day_change(self):
now = utcnow().replace(hour=0, minute=1)
previous = (now - datetime.timedelta(days=2)).replace(hour=23,
minute=59)
schedule = "23:59".format(now.hour + 3)
self.assertTrue(models.should_schedule_next(previous, now, "86400", schedule))
def test_exact_time_every_x_days_that_needs_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=4)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_that_doesnt_need_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=2)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_with_day_change(self):
now = utcnow().replace(hour=23, minute=59)
previous = (now - datetime.timedelta(days=2)).replace(hour=0, minute=1)
schedule = "23:58"
three_day_interval = "259200"
self.assertTrue(models.should_schedule_next(previous, now, three_day_interval, schedule))
def test_exact_time_every_x_weeks_that_needs_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Tuesday
# 2) The last time it ran was 3 weeks ago from this week's Thursday
# 3) It is now Wednesday of this week
#
# Expectation: Even though less than 3 weeks have passed since the
# last run 3 weeks ago on Thursday, it's overdue since
# it should be running on Tuesdays.
this_thursday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Thursday") - utcnow().weekday())
three_weeks_ago = this_thursday - datetime.timedelta(weeks=3)
now = this_thursday - datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Tuesday"))
def test_exact_time_every_x_weeks_that_doesnt_need_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Thurday
# 2) The last time it ran was 3 weeks ago from this week's Tuesday
# 3) It is now Wednesday of this week
#
# Expectation: Even though more than 3 weeks have passed since the
# last run 3 weeks ago on Tuesday, it's not overdue since
# it should be running on Thursdays.
this_tuesday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Tuesday") - utcnow().weekday())
three_weeks_ago = this_tuesday - datetime.timedelta(weeks=3)
now = this_tuesday + datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Thursday"))
def test_backoff(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
failures=5))
self.assertFalse(models.should_schedule_next(two_hours_ago, now,
"3600", failures=10))
def test_next_iteration_overflow(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertFalse(models.should_schedule_next(two_hours_ago, now, "3600", failures=32))
class QueryOutdatedQueriesTest(BaseTestCase):
# TODO: this test can be refactored to use mock version of should_schedule_next to simplify it.
def test_outdated_queries_skips_unscheduled_queries(self):
query = self.factory.create_query(schedule={'interval':None, 'time': None, 'until':None, 'day_of_week':None})
query_with_none = self.factory.create_query(schedule=None)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
self.assertNotIn(query_with_none, queries)
def test_outdated_queries_works_with_ttl_based_schedule(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_outdated_queries_works_scheduled_queries_tracker(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
models.scheduled_queries_executions.update(query.id)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_skips_fresh_queries(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_outdated_queries_works_with_specific_time_schedule(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'86400', 'time':half_an_hour_ago.strftime('%H:%M'), 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago - datetime.timedelta(days=1))
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_enqueues_query_only_once(self):
"""
Only one query per data source with the same text will be reported by
Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query2])
def test_enqueues_query_with_correct_data_source(self):
"""
Queries from different data sources will be reported by
Query.outdated_queries() even if they have the same query text.
"""
query = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, data_source=self.factory.create_data_source())
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
outdated_queries = models.Query.outdated_queries()
self.assertEqual(len(outdated_queries), 2)
self.assertIn(query, outdated_queries)
self.assertIn(query2, outdated_queries)
def test_enqueues_only_for_relevant_data_source(self):
"""
If multiple queries with the same text exist, only ones that are
scheduled to be refreshed are reported by Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'3600', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_failure_extends_schedule(self):
"""
Execution failures recorded for a query result in exponential backoff
for scheduling future execution.
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, schedule_failures=4)
retrieved_at = utcnow() - datetime.timedelta(minutes=16)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [])
query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_schedule_until_after(self):
"""
Queries with non-null ``schedule['until']`` are not reported by
Query.outdated_queries() after the given time is past.
"""
one_day_ago = (utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_ago, 'time':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_schedule_until_before(self):
"""
Queries with non-null ``schedule['until']`` are reported by
Query.outdated_queries() before the given time is past.
"""
one_day_from_now = (utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_from_now, 'time': None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
class QueryArchiveTest(BaseTestCase):
def test_archive_query_sets_flag(self):
query = self.factory.create_query()
db.session.flush()
query.archive()
self.assertEqual(query.is_archived, True)
def test_archived_query_doesnt_return_in_all(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
yesterday = utcnow() - datetime.timedelta(days=1)
query_result = models.QueryResult.store_result(
query.org_id, query.data_source, query.query_hash, query.query_text,
"1", 123, yesterday)
query.latest_query_data = query_result
groups = list(models.Group.query.filter(models.Group.id.in_(query.groups)))
self.assertIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertIn(query, models.Query.outdated_queries())
db.session.flush()
query.archive()
self.assertNotIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertNotIn(query, models.Query.outdated_queries())
def test_removes_associated_widgets_from_dashboards(self):
widget = self.factory.create_widget()
query = widget.visualization.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Widget.query.get(widget.id), None)
def test_removes_scheduling(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
query.archive()
self.assertIsNone(query.schedule)
def test_deletes_alerts(self):
subscription = self.factory.create_alert_subscription()
query = subscription.alert.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Alert.query.get(subscription.alert.id), None)
self.assertEqual(models.AlertSubscription.query.get(subscription.id), None)
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
qr = self.factory.create_query_result()
self.factory.create_query(latest_query_data=qr)
db.session.flush()
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(qr, list(models.QueryResult.unused()))
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
db.session.flush()
new_unused_qr = self.factory.create_query_result()
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(new_unused_qr, list(models.QueryResult.unused()))
class TestQueryAll(BaseTestCase):
def test_returns_only_queries_in_given_groups(self):
ds1 = self.factory.create_data_source()
ds2 = self.factory.create_data_source()
group1 = models.Group(name="g1", org=ds1.org, permissions=['create', 'view'])
group2 = models.Group(name="g2", org=ds1.org, permissions=['create', 'view'])
q1 = self.factory.create_query(data_source=ds1)
q2 = self.factory.create_query(data_source=ds2)
db.session.add_all([
ds1, ds2,
group1, group2,
q1, q2,
models.DataSourceGroup(
group=group1, data_source=ds1),
models.DataSourceGroup(group=group2, data_source=ds2)
])
db.session.flush()
self.assertIn(q1, list(models.Query.all_queries([group1.id])))
self.assertNotIn(q2, list(models.Query.all_queries([group1.id])))
self.assertIn(q1, list(models.Query.all_queries([group1.id, group2.id])))
self.assertIn(q2, list(models.Query.all_queries([group1.id, group2.id])))
def test_skips_drafts(self):
q = self.factory.create_query(is_draft=True)
self.assertNotIn(q, models.Query.all_queries([self.factory.default_group.id]))
def test_includes_drafts_of_given_user(self):
q = self.factory.create_query(is_draft=True)
self.assertIn(q, models.Query.all_queries([self.factory.default_group.id], user_id=q.user_id))
def test_order_by_relationship(self):
u1 = self.factory.create_user(name='alice')
u2 = self.factory.create_user(name='bob')
self.factory.create_query(user=u1)
self.factory.create_query(user=u2)
db.session.commit()
# have to reset the order here with None since all_queries orders by
# created_at by default
base = models.Query.all_queries([self.factory.default_group.id]).order_by(None)
qs1 = base.order_by(models.User.name)
self.assertEqual(['alice', 'bob'], [q.user.name for q in qs1])
qs2 = base.order_by(models.User.name.desc())
self.assertEqual(['bob', 'alice'], [q.user.name for q in qs2])
class TestGroup(BaseTestCase):
def test_returns_groups_with_specified_names(self):
org1 = self.factory.create_org()
org2 = self.factory.create_org()
matching_group1 = models.Group(id=999, name="g1", org=org1)
matching_group2 = models.Group(id=888, name="g2", org=org1)
non_matching_group = models.Group(id=777, name="g1", org=org2)
groups = models.Group.find_by_name(org1, ["g1", "g2"])
self.assertIn(matching_group1, groups)
self.assertIn(matching_group2, groups)
self.assertNotIn(non_matching_group, groups)
def test_returns_no_groups(self):
org1 = self.factory.create_org()
models.Group(id=999, name="g1", org=org1)
self.assertEqual([], models.Group.find_by_name(org1, ["non-existing"]))
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
self.data_source = self.factory.data_source
self.query = "SELECT 1"
self.query_hash = gen_query_hash(self.query)
self.runtime = 123
self.utcnow = utcnow()
self.data = '{"a": 1}'
def test_stores_the_result(self):
query_result = models.QueryResult.store_result(
self.data_source.org_id, self.data_source, self.query_hash,
self.query, self.data, self.runtime, self.utcnow)
self.assertEqual(query_result._data, self.data)
self.assertEqual(query_result.runtime, self.runtime)
self.assertEqual(query_result.retrieved_at, self.utcnow)
self.assertEqual(query_result.query_text, self.query)
self.assertEqual(query_result.query_hash, self.query_hash)
self.assertEqual(query_result.data_source, self.data_source)
class TestEvents(BaseTestCase):
def raw_event(self):
timestamp = 1411778709.791
user = self.factory.user
created_at = datetime.datetime.utcfromtimestamp(timestamp)
db.session.flush()
raw_event = {"action": "view",
"timestamp": timestamp,
"object_type": "dashboard",
"user_id": user.id,
"object_id": 1,
"org_id": 1}
return raw_event, user, created_at
def test_records_event(self):
raw_event, user, created_at = self.raw_event()
event = models.Event.record(raw_event)
db.session.flush()
self.assertEqual(event.user, user)
self.assertEqual(event.action, "view")
self.assertEqual(event.object_type, "dashboard")
self.assertEqual(event.object_id, 1)
self.assertEqual(event.created_at, created_at)
def test_records_additional_properties(self):
raw_event, _, _ = self.raw_event()
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
raw_event.update(additional_properties)
event = models.Event.record(raw_event)
self.assertDictEqual(event.additional_properties, additional_properties)
def _set_up_dashboard_test(d):
d.g1 = d.factory.create_group(name='First', permissions=['create', 'view'])
d.g2 = d.factory.create_group(name='Second', permissions=['create', 'view'])
d.ds1 = d.factory.create_data_source()
d.ds2 = d.factory.create_data_source()
db.session.flush()
d.u1 = d.factory.create_user(group_ids=[d.g1.id])
d.u2 = d.factory.create_user(group_ids=[d.g2.id])
db.session.add_all([
models.DataSourceGroup(group=d.g1, data_source=d.ds1),
models.DataSourceGroup(group=d.g2, data_source=d.ds2)
])
d.q1 = d.factory.create_query(data_source=d.ds1)
d.q2 = d.factory.create_query(data_source=d.ds2)
d.v1 = d.factory.create_visualization(query_rel=d.q1)
d.v2 = d.factory.create_visualization(query_rel=d.q2)
d.w1 = d.factory.create_widget(visualization=d.v1)
d.w2 = d.factory.create_widget(visualization=d.v2)
d.w3 = d.factory.create_widget(visualization=d.v2, dashboard=d.w2.dashboard)
d.w4 = d.factory.create_widget(visualization=d.v2)
d.w5 = d.factory.create_widget(visualization=d.v1, dashboard=d.w4.dashboard)
d.w1.dashboard.is_draft = False
d.w2.dashboard.is_draft = False
d.w4.dashboard.is_draft = False
class TestDashboardAll(BaseTestCase):
def setUp(self):
super(TestDashboardAll, self).setUp()
_set_up_dashboard_test(self)
def test_requires_group_or_user_id(self):
d1 = self.factory.create_dashboard()
self.assertNotIn(d1, list(models.Dashboard.all(
d1.user.org, d1.user.group_ids, None)))
l2 = list(models.Dashboard.all(
d1.user.org, [0], d1.user.id))
self.assertIn(d1, l2)
def test_returns_dashboards_based_on_groups(self):
self.assertIn(self.w1.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
self.assertIn(self.w2.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w1.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w2.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
def test_returns_each_dashboard_once(self):
dashboards = list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
self.assertEqual(len(dashboards), 2)
def test_returns_dashboard_you_have_partial_access_to(self):
self.assertIn(self.w5.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
def test_returns_dashboards_created_by_user(self):
d1 = self.factory.create_dashboard(user=self.u1)
db.session.flush()
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, self.u1.id)))
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, [0], self.u1.id)))
self.assertNotIn(d1, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, self.u2.id)))
def test_returns_dashboards_with_text_widgets(self):
w1 = self.factory.create_widget(visualization=None)
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertIn(w1.dashboard, models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
def test_returns_dashboards_from_current_org_only(self):
w1 = self.factory.create_widget(visualization=None)
user = self.factory.create_user(org=self.factory.create_org())
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertNotIn(w1.dashboard, models.Dashboard.all(user.org, user.group_ids, None))
| StarcoderdataPython |
1657012 | import numpy as np
import pandas as pd
from climate.s3_bucket_operations.s3_operations import S3_Operation
from sklearn.impute import KNNImputer
from sklearn.preprocessing import StandardScaler
from utils.logger import App_Logger
from utils.read_params import read_params
class Preprocessor:
"""
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
def __init__(self, log_file):
self.log_writer = App_Logger()
self.config = read_params()
self.class_name = self.__class__.__name__
self.log_file = log_file
self.null_values_file = self.config["null_values_csv_file"]
self.n_components = self.config["pca_model"]["n_components"]
self.knn_n_neighbors = self.config["n_neighbors"]
self.knn_weights = (self.config["weights"],)
self.input_files_bucket = self.config["s3_bucket"]["input_files"]
self.s3 = S3_Operation()
def remove_columns(self, data, columns):
"""
Method Name : remove_columns
Description : This method removes the given columns from a pandas dataframe.
Output : A pandas dataframe after removing the specified columns.
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.remove_columns.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
self.data = data
self.columns = columns
try:
self.useful_data = self.data.drop(labels=self.columns, axis=1)
self.log_writer.log(self.log_file, "Column removal Successful")
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return self.useful_data
except Exception as e:
self.log_writer.log(self.log_file, "Column removal Unsuccessful")
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def separate_label_feature(self, data, label_column_name):
"""
Method Name : separate_label_feature
Description : This method separates the features and a Label Coulmns.
Output : Returns two separate Dataframes, one containing features and the other containing Labels .
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.separate_label_feature.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
self.X = data.drop(labels=label_column_name, axis=1)
self.Y = data[label_column_name]
self.log_writer.log(
self.log_file,
"Label Separation Successful",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return self.X, self.Y
except Exception as e:
self.log_writer.log(
self.log_file,
"Label Separation Unsuccessful",
)
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def drop_unnecessary_columns(self, data, cols):
"""
Method Name : drop_unnecessary_columns
Description : This method drop unnecessary columns in the dataframe
Output : Unnecessary columns are dropped in the dataframe
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.drop_unnecessary_columns.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
data = data.drop(cols, axis=1)
self.log_writer.log(self.log_file, "Dropped unnecessary columns")
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return data
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def replace_invalid_with_null(self, data):
"""
Method Name : replace_invalid_with_null
Description : This method replaces invalid values with null
Output : A dataframe where invalid values are replaced with null
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.replace_invalid_with_null.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
for column in data.columns:
count = data[column][data[column] == "?"].count()
if count != 0:
data[column] = data[column].replace("?", np.nan)
self.log_writer.log(
self.log_file,
"Replaced invalid values with np.nan",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return data
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def is_null_present(self, data):
"""
Method Name : is_null_present
Description : This method checks whether there are null values present in the pandas dataframe or not.
Output : Returns True if null values are present in the dataframe, False if they are not present and
returns the list of columns for which null values are present.
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.is_null_present.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
self.null_present = False
self.cols_with_missing_values = []
self.cols = data.columns
try:
self.null_counts = data.isna().sum()
for i in range(len(self.null_counts)):
if self.null_counts[i] > 0:
self.null_present = True
self.cols_with_missing_values.append(self.cols[i])
if self.null_present:
self.null_df = pd.DataFrame()
self.null_df["columns"] = data.columns
self.null_df["missing values count"] = np.asarray(data.isna().sum())
self.log_writer.log(self.log_file, "Created data frame with null values")
self.s3.upload_df_as_csv(
self.null_df,
self.null_values_file,
self.input_files_bucket,
self.null_values_file,
)
self.log_writer.start_log(
"exit", self.class_name, method_name, self.log_file
)
return self.null_present
except Exception as e:
self.log_writer.log(self.log_file, "Finding missing values failed")
self.log_writer.exception_log(
e, self.class_name, method_name, self.log_file
)
def encode_target_cols(self, data):
"""
Method Name : encode_target_cols
Description : This method encodes all the categorical values in the training set.
Output : A dataframe which has all the categorical values encoded.
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.encode_target_cols.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
data["class"] = data["class"].map({"p": 1, "e": 2})
for column in data.drop(["class"], axis=1).columns:
data = pd.get_dummies(data, columns=[column])
self.log_writer.log(self.log_file, "Encoded target columns")
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return data
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def apply_standard_scaler(self, X):
"""
Method Name : apply_standard_scaler
Description : This method replaces all the missing values in the dataframe using KNN Imputer.
Output : A dataframe which has all the missing values imputed.
On Failure : Raise Exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.apply_standard_scaler.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
scalar = StandardScaler()
X_scaled = scalar.fit_transform(X)
self.log_writer.log(
self.log_file,
f"Transformed data using {scalar.__class__.__name__}",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return X_scaled
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def impute_missing_values(self, data):
"""
Method Name : impute_missing_values
Description : This method replaces all the missing values in the dataframe using KNN Imputer.
Output : A dataframe which has all the missing values imputed.
On Failure : Raise Exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.impute_missing_values.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
self.data = data
try:
imputer = KNNImputer(
n_neighbors=self.knn_n_neighbors,
weights=self.knn_weights,
missing_values=np.nan,
)
self.log_writer.log(
self.log_file,
f"Initialized {imputer.__class__.__name__}",
)
self.new_array = imputer.fit_transform(self.data)
self.log_writer.log(
self.log_file,
"Imputed missing values using KNN imputer",
)
self.new_data = pd.dataframe(
data=(self.new_array), columns=self.data.columns
)
self.log_writer.log(
self.log_file,
"Created new dataframe with imputed values",
)
self.log_writer.log(
self.log_file,
"Imputing missing values Successful",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return self.new_data
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def get_columns_with_zero_std_deviation(self, data):
"""
Method Name : get_columns_with_zero_std_deviation
Description : This method finds out the columns which have a standard deviation of zero.
Output : List of the columns with standard deviation of zero
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_columns_with_zero_std_deviation.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
self.columns = data.columns
self.data_n = data.describe()
self.col_drop = []
try:
for x in self.columns:
if self.data_n[x]["std"] == 0:
self.col_drop.append(x)
self.log_writer.log(
self.log_file,
"Column search for Standard Deviation of Zero Successful",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return self.col_drop
except Exception as e:
self.log_writer.log(
self.log_file,
"Column search for Standard Deviation of Zero Failed",
)
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
| StarcoderdataPython |
3393578 | import os
from typing import List
import numpy as np
import torch
import pytest
from persia.helper import ensure_persia_service
from persia.ctx import BaseCtx, DataCtx
from .utils import random_port
EMBEDDING_CONFIG = {"slots_config": {"age": {"dim": 8}}}
RAW_EMBEDDING_CONFIG = {
"slots_config": {
"user_id": {"dim": 8},
"user_id_follower_list": {"dim": 8, "embedding_summation": False},
}
}
GLOBAL_CONFIG = {
"embedding_worker_config": {"forward_buffer_size": 1000},
"common_config": {"metrics_config": {"enable_metrics": False}},
}
def assert_ndarray_base_data(
ndarray_base_data_list: List[np.ndarray],
tensors: List[torch.Tensor],
use_cuda: bool,
):
assert len(ndarray_base_data_list) == len(tensors)
for ndarray_base_data, tensor in zip(ndarray_base_data_list, tensors):
if use_cuda:
tensor = tensor.cpu()
np.testing.assert_equal(ndarray_base_data, tensor.numpy())
def assert_id_type_feature_data(tensors: List[torch.Tensor], config: dict):
embedding_configs = config["slots_config"]
for tensor, embedding_config in zip(tensors, embedding_configs):
expected_dim = (
embedding_config["dim"]
if embedding_config.get("embedding_summation", True)
else embedding_config["dim"] + 1
)
expected_ndim = 2 if embedding_config["embedding_summation"] else 3
assert len(tensor.shape) == expected_ndim
assert tensor.shape[-1] == expected_dim
# FIXME: Try no-singleton PersiaCommonContext.
# Every time init the PersiaCommonContext, it will reuse the instance created
# before. Any environment update makes no effects on singleton instance PersiaCommonContext,
# such as PERSIA_NATS_URL.
if torch.cuda.is_available():
parameter_list = [True]
ids = ["cuda"]
else:
parameter_list = [False]
ids = ["cpu"]
@pytest.mark.parametrize("use_cuda", parameter_list, ids=ids)
def test_data_ctx(use_cuda: bool):
non_id_type_features = [np.array([1], dtype=np.float32)]
labels = [
np.array(
[
1,
],
dtype=np.float32,
)
]
def data_loader():
from persia.embedding.data import (
PersiaBatch,
IDTypeFeature,
NonIDTypeFeature,
Label,
)
persia_batch = PersiaBatch(
[
IDTypeFeature(
"age",
[
np.array(
[
1,
2,
3,
],
dtype=np.uint64,
)
],
)
],
non_id_type_features=[
NonIDTypeFeature(non_id_type_feature)
for non_id_type_feature in non_id_type_features
],
labels=[Label(label) for label in labels],
requires_grad=False,
)
with DataCtx() as data_ctx:
data_ctx.send_data(persia_batch)
os.environ["WORLD_SIZE"] = str(1)
os.environ["RANK"] = str(0)
os.environ["LOCAL_RANK"] = str(0)
from persia.ctx import PreprocessMode, _prepare_feature
from persia.data import DataLoader, StreamingDataset
from persia.embedding import get_default_embedding_config
from persia.env import get_world_size
device_id = 0 if use_cuda else None
with ensure_persia_service(
data_loader_func=data_loader,
embedding_config=EMBEDDING_CONFIG,
global_config=GLOBAL_CONFIG,
embedding_worker_port=random_port(),
embedding_parameter_server_port=random_port(),
nats_server_port=random_port(),
):
embedding_config = get_default_embedding_config()
with BaseCtx(device_id=device_id) as ctx:
ctx.common_context.init_nats_publisher(get_world_size())
ctx.common_context.configure_embedding_parameter_servers(
embedding_config.emb_initialization[0],
embedding_config.emb_initialization[1],
embedding_config.admit_probability,
embedding_config.weight_bound > 0,
embedding_config.weight_bound,
)
ctx.common_context.wait_servers_ready()
data_loader = DataLoader(
StreamingDataset(buffer_size=10), timeout_ms=1000 * 30
)
data_generator = iter(data_loader)
persia_training_batch = next(data_generator)
(
non_id_type_tensors,
id_type_embedding_tensors,
label_tensors,
) = _prepare_feature(persia_training_batch, PreprocessMode.EVAL)
assert_ndarray_base_data(
non_id_type_features, non_id_type_tensors, use_cuda
)
assert_ndarray_base_data(labels, label_tensors, use_cuda)
# assert_id_type_feature_data(id_type_embedding_tensors, EMBEDDING_CONFIG)
| StarcoderdataPython |
1778167 | <reponame>maroozm/AliPhysics
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.struct.DataContainers import TrackContainer, ClusterContainer
from PWGJE.EMCALJetTasks.Tracks.analysis.base.struct.EventHistogram import EventHistogramOld, EventHistogramNew
from PWGJE.EMCALJetTasks.Tracks.analysis.base.struct.ParticleTHnSparse import CreatePartcileTHnSparse
from copy import deepcopy
class DataContainerFactory(object):
'''
classdocs
'''
def __init__(self, dataformat):
'''
Constructor
'''
self.__dataformat = dataformat
def SetDataFormat(self, df):
self.__dataformat = df
def CreateTrackContainer(self, eventhist, trackhist):
return TrackContainer(self.MakeEventHist(eventhist), trackhist, self.__dataformat)
def CreateClusterContainer(self, eventhist, clusterhist):
return ClusterContainer(self.MakeEventHist(eventhist), clusterhist, self.__dataformat)
def CreateParticleContainer(self, particlehist):
return CreatePartcileTHnSparse(particlehist, True if self.__dataformat == "new" else False)
def MakeEventHist(self, eventhist):
if self.__dataformat == "new":
return EventHistogramNew(deepcopy(eventhist))
else:
return EventHistogramOld(deepcopy(eventhist)) | StarcoderdataPython |
52047 | <reponame>cloudmesh/cloudmesh-flow
from cloudmesh.compute.aws import Provider as AWSProvider
from cloudmesh.compute.azure import AzProvider
from cloudmesh.flow.Flow import Flow
class MyFlow(Flow):
def spawn_aws(self):
pass
def spawn_azure(self):
pass
def ping_aws(self):
pass
def ping_azure(self):
pass
if __name__ == "__main__":
flow = MyFlow(sys.argv[0])
| StarcoderdataPython |
3293490 | <gh_stars>0
from skimage import io, data
import numpy as np
from matplotlib import pyplot as plt
im = data.camera()
im = im.astype('float')
lin, col = im.shape
#im2 = im.copy()
im2 = np.zeros((lin,col))
filtro = np.array([[1/9, 1/9, 1/9],
[1/9, 1/9, 1/9],
[1/9, 1/9, 1/9]])
filtro = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]]) / 6
for i in range(1, lin-1):
for j in range(1, col-1):
# soma = im[i-1,j-1]+im[i-1,j]+im[i-1,j+1]
# soma = soma + im[i,j-1]+im[i,j]+im[i,j+1]
# soma = soma + im[i+1,j-1]+im[i+1,j]+im[i+1,j+1]
# media = soma / 9
sub_im = im[i-1:i+2, j-1:j+2]
# media = sub_im.mean()
# media = (sub_im * (1/9)).sum()
mult = sub_im * filtro
media = mult.sum()
im2[i,j] = media
plt.figure()
plt.subplot(121)
plt.imshow(im.astype('uint8'), cmap='gray')
plt.subplot(122)
plt.imshow(im2.astype('uint8'), cmap='gray')
| StarcoderdataPython |
189555 | <reponame>gjbadros/elkm1<gh_stars>10-100
"""Definition of an ElkM1 Custom Value"""
from .const import Max, TextDescriptions
from .elements import Element, Elements
from .message import cp_encode, cw_encode
class Setting(Element):
"""Class representing an Custom Value"""
def __init__(self, index, elk):
super().__init__(index, elk)
self.value_format = 0
self.value = None
def set(self, value):
"""(Helper) Set custom value."""
self._elk.send(cw_encode(self._index, value, self.value_format))
class Settings(Elements):
"""Handling for multiple custom values"""
def __init__(self, elk):
super().__init__(elk, Setting, Max.SETTINGS.value)
elk.add_handler("CR", self._cr_handler)
def sync(self):
"""Retrieve custom values from ElkM1"""
self.elk.send(cp_encode())
self.get_descriptions(TextDescriptions.SETTING.value)
def _cr_handler(self, values):
for value in values:
custom_value = self.elements[value["index"]]
custom_value.value_format = value["value_format"]
custom_value.value = value["value"]
| StarcoderdataPython |
1634648 | # Selection Sort
from random import shuffle, randint
import matplotlib.pyplot as plt
import matplotlib.animation as ani
raw = [randint(0, 100) for i in range(101)]
shuffle(raw)
x = [i for i in range(101)]
tempmax = len(raw)
fig, ax=plt.subplots()
ax.set_xlim(-1, 101)
def bubble_sort_step():
global tempmax, raw, x
for j in range(0, tempmax-1):
if raw[j] > raw[j+1]:
raw[j], raw[j+1] = raw[j+1], raw[j]
for bar in ax.containers:
bar.remove()
return plt.bar(x,raw, color='cornflowerblue')
def animate(i):
global x, raw
return bubble_sort_step()
anim= ani.FuncAnimation(fig,animate,frames=100,
interval=100)
plt.grid(True)
plt.show()
| StarcoderdataPython |
136101 | #!/usr/bin/env python
# coding=utf-8
"""Message Handler.
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
Plays the server role in communication_node
Relations
----------
subscribes from /message_server topic,
publishes on corresponding nodes /ns/message_status topic
"""
import os
import signal
import sys
from time import gmtime,strftime
import rospy
from communication_node.msg import *
from nav_msgs.msg import *
debuger_mode=False;
information_logger=None;
rate=None
message_handlers_list=[]
def on_exit(*args):
global information_logger
print ( "\n EXITING MESSAGE HANDLER")
if information_logger!=None :
information_logger.write("\n The Test has finished on "+strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " GMT time \n")
information_logger.write("\n ======================== \n ======================== \n \n \n")
information_logger.close()
sys.exit(0)
class message_handle:
"""
this class is wrapper around a ros publisher and a ros subscriber
and a callback function for the ros rossubscirber
for each data type given to the this script through launch file or
parameter server this script will create an object to
handle messages of that type
"""
def __init__(self,sub_topic="",pub_topic="",data_type=None,tag="",alt_type=None):
"""this is the constructor function for message_handle class
:parameter
sub_topic : string, the prefix for the subscribing topic
pub_topic : string, the topic for publishing the messages to
data_type : type, the type of the message that's going to be handled
tag : string, tag for the data type
alt_type : type, alternative type for publishing message
Relations
----------
subscribes from /"sub_topic"+"tag"
"""
self.sub_topic=sub_topic;
self.pub_topic=pub_topic;
self.data_type=data_type;
self.alt_type=alt_type;
self.tag=tag;
self.subscriber=rospy.Subscriber(self.sub_topic+self.tag,self.data_type, self.callback_function,queue_size=20);
self.message_publisher=None;
def callback_function(self,data):
"""this is the callback function for self.subscriber
:parameter
data : self.data_type, this is the data received by the Subscriber
Relations
----------
publishes to "data.source"+"self.pub_topic"
"""
global information_logger
global propagation_parameters
# TODO handle for different message types
# TODO prop_model = data.prop_model
if(self.tag!="map"and self.tag!="Odom"):
print("new "+self.tag+" received")
robots_list=rospy.get_param("/robots_list")
if ((data.source not in robots_list )or(data.destination not in robots_list) ):
return;
connection_list=[];
connection_list=(rospy.get_param("/connection_list_"+data.source));
source_index=robots_list.index(data.destination);
if (connection_list[1+source_index]==1):
#if (True):
if (self.alt_type!=None):
self.message_publisher = rospy.Publisher(data.source +"/g_map", self.alt_type, queue_size=10)
i=0
while not ( rospy.is_shutdown() or i>1):
self.message_publisher.publish(data.data)
print("from",data.source," to",data.destination);
i+=1
rate.sleep()
i=0
else:
self.message_publisher = rospy.Publisher(data.destination + self.pub_topic, self.data_type, queue_size=10)
i=0
while not ( rospy.is_shutdown() or i>1):
self.message_publisher.publish(data)
print("from",data.source," to",data.destination);
#print("sent messagne",self.tag)
i+=1
rate.sleep()
i=0
#print "communication is possible"
if debuger_mode==True :
# we write infomation to the log file
information_logger.write(self.tag+"".join(["-" for k in range(0,11-len(self.tag))]))
information_logger.write(data.source+"".join(["-" for k in range(0,11-len(data.source))]))
information_logger.write(data.destination+"".join(["-" for k in range(0,20-len(data.destination))]))
information_logger.write("message sent"+"\n")
else:
# TODO, ignore the message, send feedback
if debuger_mode==True :
# we write infomation to the log file
information_logger.write(self.tag+"".join(["-" for k in range(0,11-len(self.tag))]))
information_logger.write(data.source+"".join(["-" for k in range(0,11-len(data.source))]))
information_logger.write(data.destination+"".join(["-" for k in range(0,20-len(data.destination))]))
information_logger.write("failed"+"\n")
#print "communication is not possible"
def listener():
global information_logger
global rate
global debuger_mode
global message_handlers_list;
global propagation_models;
rospy.init_node('communication_node_message_handler')
debuger_mode=rospy.get_param("debuger_mode",default=False)
if debuger_mode==True :
log_file=rospy.get_param("log_file",default="results")
if not os.path.exists("/home/user/project_franchesco/communication_node/test_results/"+log_file):
os.makedirs("/home/user/project_franchesco/communication_node/test_results/"+log_file)
information_logger = open("/home/user/project_franchesco/communication_node/test_results/"+log_file+"/"+log_file+".log", "w")
information_logger.write("\n \n \n ###################### \n ###################### \n")
information_logger.write("\n This is the result of test on "+strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " GMT time \n")
information_logger.write("Type-------Source-----Destination---------Outcome\n");
signal.signal(signal.SIGINT, on_exit)
signal.signal(signal.SIGTERM, on_exit)
rate=rospy.Rate(50)
message_list=[["/message_server_","/inbox_Goal",Data_Goal,"Goal",None],["/message_server_","/inbox_Map",Data_Map,"map",None]];
for i in range (0,len(message_list)):
message_handlers_list.append(message_handle(message_list[i][0],message_list[i][1],message_list[i][2],message_list[i][3],message_list[i][4]));
rospy.spin()
listener()
| StarcoderdataPython |
3246197 | # Generated by Django 2.0.3 on 2020-06-01 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ApiManager', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SshInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('ip', models.CharField(max_length=20, unique=True)),
('port', models.IntegerField(default=22, max_length=10, null=True)),
('login_user', models.CharField(max_length=20)),
('password', models.CharField(max_length=64)),
],
options={
'verbose_name': 'ssh信息',
'db_table': 'SshInfo',
},
),
]
| StarcoderdataPython |
3252129 | # Mail Service
from threading import Thread
from flask_mail import Message
from run import flask_app, mail
def send_async_email(app, msg):
with app.app_context():
try:
mail.send(msg)
print('Mail sent')
except ConnectionRefusedError:
return 'MAIL SERVER] not working', 500
def send_email(subject, sender, recipients, text_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
Thread(target=send_async_email, args=(flask_app, msg)).start()
| StarcoderdataPython |
3281176 | <gh_stars>0
while True:
try:
print('\nPlease input number only')
price = input('The price of the product: ')
while True:
a = input('If this product include multiple packages, please input a to provide more information, '
'else press enter to continue. ')
if a == 'a':
net_each = input(' The net content of each package:')
quantity = input(' Quantity: ')
net = float(net_each) * float(quantity)
print('The overall net content of this product is:' + str(net) + ' g/ml')
break
else:
net = input('The net content of this product:')
break
x = float(price) / float(net) * 500
print('This product is: \n' + str(round(x, 4)) + ' £/$/€/¥ per 500 g/ml.')
except Exception:
print('\nSorry, please input number only. Or an unknown error occurred.')
keep_running = input('Input any keys to continue or input b to exit the program ')
if keep_running == 'b':
print('')
break
| StarcoderdataPython |
4822435 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Functions to call when running the module.
"""
import numpy as np
import os
import pandas as pd
from delphi_utils import read_params
from .qualtrics import make_fetchers,get
def run_module():
params = read_params()
qparams = params['qualtrics']
qparams['qualtrics_dir'] = params['input_dir']
if not os.path.exists(qparams['qualtrics_dir']):
os.makedirs(qparams['qualtrics_dir'])
if not qparams['token']:
print("\nDRY-RUN MODE\n")
fetch,post = make_fetchers(qparams)
get(fetch, post, qparams)
| StarcoderdataPython |
27238 | <reponame>PingHuskar/hackerrank<filename>algorithms/warmup/compare-the-triplets.py
# Algorithms > Warmup > Compare the Triplets
# Compare the elements in two triplets.
#
# https://www.hackerrank.com/challenges/compare-the-triplets/problem
#
a = map(int, input().split())
b = map(int, input().split())
alice, bob = 0, 0
for i, j in zip(a, b):
if i > j:
alice += 1
elif i < j:
bob += 1
print(alice, bob)
| StarcoderdataPython |
3365309 | from django.core.exceptions import ValidationError
from django.forms import inlineformset_factory
from django.utils import translation
from parler.forms import TranslatableModelForm
from .utils import AppTestCase
from .testapp.models import (SimpleModel, UniqueTogetherModel, ForeignKeyTranslationModel, RegularModel,
CleanFieldModel, UUIDPrimaryKeyModel, UUIDPrimaryKeyRelatedModel,
IntegerPrimaryKeyModel, IntegerPrimaryKeyRelatedModel)
class SimpleForm(TranslatableModelForm):
class Meta:
model = SimpleModel
fields = '__all__'
class CleanFieldForm(TranslatableModelForm):
class Meta:
model = CleanFieldModel
fields = '__all__'
class UniqueTogetherForm(TranslatableModelForm):
class Meta:
model = UniqueTogetherModel
fields = '__all__'
class ForeignKeyTranslationModelForm(TranslatableModelForm):
class Meta:
model = ForeignKeyTranslationModel
fields = '__all__'
class IntegerPrimaryKeyForm(TranslatableModelForm):
class Meta:
model = IntegerPrimaryKeyModel
fields = '__all__'
class UUIDPrimaryKeyForm(TranslatableModelForm):
class Meta:
model = UUIDPrimaryKeyModel
fields = '__all__'
class FormTests(AppTestCase):
"""
Test model construction
"""
def test_form_language_validation(self):
form_instance = SimpleForm(_current_language='fr-FR')
self.assertEqual(form_instance.language_code, 'fr-FR')
with self.assertRaises(ValueError):
SimpleForm(_current_language='fa')
with self.assertRaises(ValueError):
SimpleForm(_current_language='va_VN')
def test_form_fields(self):
"""
Check if the form fields exist.
"""
self.assertTrue('shared' in SimpleForm.base_fields)
self.assertTrue('tr_title' in SimpleForm.base_fields)
def test_form_save(self):
"""
Check if the form receives and stores data.
"""
with translation.override('fr'):
# Initialize form in other language.
x = SimpleForm(data={'shared': 'SHARED', 'tr_title': 'TRANS'})
x.language_code = 'nl'
self.assertFalse(x.errors)
# Data should come out
self.assertEqual(x.cleaned_data['shared'], 'SHARED')
self.assertEqual(x.cleaned_data['tr_title'], 'TRANS')
# Data should be saved
instance = x.save()
self.assertEqual(instance.get_current_language(), 'nl')
x = SimpleModel.objects.language('nl').get(pk=instance.pk)
self.assertEqual(x.shared, 'SHARED')
self.assertEqual(x.tr_title, 'TRANS')
def test_form_save_clean(self):
"""
Check if the form receives and stores data.
"""
with translation.override('fr'):
# Initialize form in other language.
x = CleanFieldForm(data={'shared': 'TRANS', 'tr_title': 'TEST'})
x.language_code = 'nl'
self.assertFalse(x.errors)
# Data should come out
self.assertEqual(x.cleaned_data['shared'], 'TRANS')
self.assertEqual(x.cleaned_data['tr_title'], 'TEST')
# Data should be saved
instance = x.save()
self.assertEqual(instance.get_current_language(), 'nl')
x = CleanFieldModel.objects.language('nl').get(pk=instance.pk)
self.assertEqual(x.shared, 'TRANS_cleanchar_cleanshared')
self.assertEqual(x.tr_title, 'TEST_cleanchar_cleantrans')
def test_form_save_clean_exclude(self):
"""
Check that non-form fields are properly excluded.
"""
class CleanPartialFieldForm(TranslatableModelForm):
class Meta:
model = CleanFieldModel
fields = ('shared',)
exclude = ('tr_title',)
self.assertEqual(list(CleanPartialFieldForm.base_fields.keys()), ['shared'])
with translation.override('fr'):
x = CleanPartialFieldForm(data={'shared': 'TRANS'})
x.language_code = 'nl'
self.assertFalse(x.errors)
def test_unique_together(self):
UniqueTogetherModel(_current_language='en', slug='foo').save()
# Different language code, no problem
form = UniqueTogetherForm(data={'slug': 'foo'})
form.language_code = 'fr'
self.assertTrue(form.is_valid())
# Same language code, should raise unique_together check
form = UniqueTogetherForm(data={'slug': 'foo'})
form.language_code = 'en'
self.assertFalse(form.is_valid())
self.assertRaises(ValidationError, lambda: form.instance.validate_unique())
def test_not_null_foreignkey_in_translation(self):
"""
Simulate scenario for model with translation field of type foreign key (not null).
1. User create model with one translation (EN)
2. Switch to another language in admin (FR)
"""
# create object with translation
r1 = RegularModel.objects.create(original_field='r1')
a = ForeignKeyTranslationModel.objects.create(translated_foreign=r1, shared='EN')
# same way as TranslatableAdmin.get_object() inicializing translation, when user swich to new translation language
a.set_current_language('fr', initialize=True)
# inicialize form
form = ForeignKeyTranslationModelForm(instance=a)
self.assertTrue(True)
class InlineFormTests(AppTestCase):
def test_integer_primary_key(self):
parent_form = IntegerPrimaryKeyForm(data={'tr_title': 'TRANS'})
self.assertTrue(parent_form.is_valid())
parent = parent_form.save(commit=False)
InlineFormSet = inlineformset_factory(IntegerPrimaryKeyModel, IntegerPrimaryKeyRelatedModel, fields=())
formset = InlineFormSet(instance=parent, data={'children-TOTAL_FORMS': 1, 'children-INITIAL_FORMS': 0})
self.assertTrue(formset.is_valid())
parent.save()
self.assertEqual(parent.translations.count(), 1)
def test_uuid_primary_key(self):
parent_form = UUIDPrimaryKeyForm(data={'tr_title': 'TRANS'})
self.assertTrue(parent_form.is_valid())
parent = parent_form.save(commit=False)
self.assertIsNotNone(parent.pk) # UUID primary key set on instantiation
InlineFormSet = inlineformset_factory(UUIDPrimaryKeyModel, UUIDPrimaryKeyRelatedModel, fields=())
formset = InlineFormSet(instance=parent, data={'children-TOTAL_FORMS': 1, 'children-INITIAL_FORMS': 0})
self.assertTrue(formset.is_valid())
self.assertIsNone(parent.pk) # The formset above will reset the primary key
parent.save()
self.assertEqual(parent.translations.count(), 1)
| StarcoderdataPython |
3308910 | #
# @lc app=leetcode id=489 lang=python
#
# [489] Robot Room Cleaner
#
# https://leetcode.com/problems/robot-room-cleaner/description/
#
# algorithms
# Hard (68.53%)
# Likes: 899
# Dislikes: 53
# Total Accepted: 45.5K
# Total Submissions: 66.4K
# Testcase Example: '[[1,1,1,1,1,0,1,1],[1,1,1,1,1,0,1,1],[1,0,1,1,1,1,1,1],[0,0,0,1,0,0,0,0],[1,1,1,1,1,1,1,1]]\n' +
'1\n' +
'3'
#
# Given a robot cleaner in a room modeled as a grid.
#
# Each cell in the grid can be empty or blocked.
#
# The robot cleaner with 4 given APIs can move forward, turn left or turn
# right. Each turn it made is 90 degrees.
#
# When it tries to move into a blocked cell, its bumper sensor detects the
# obstacle and it stays on the current cell.
#
# Design an algorithm to clean the entire room using only the 4 given APIs
# shown below.
#
#
# interface Robot {
# // returns true if next cell is open and robot moves into the cell.
# // returns false if next cell is obstacle and robot stays on the current
# cell.
# boolean move();
#
# // Robot will stay on the same cell after calling turnLeft/turnRight.
# // Each turn will be 90 degrees.
# void turnLeft();
# void turnRight();
#
# // Clean the current cell.
# void clean();
# }
#
#
# Example:
#
#
# Input:
# room = [
# [1,1,1,1,1,0,1,1],
# [1,1,1,1,1,0,1,1],
# [1,0,1,1,1,1,1,1],
# [0,0,0,1,0,0,0,0],
# [1,1,1,1,1,1,1,1]
# ],
# row = 1,
# col = 3
#
# Explanation:
# All grids in the room are marked by either 0 or 1.
# 0 means the cell is blocked, while 1 means the cell is accessible.
# The robot initially starts at the position of row=1, col=3.
# From the top left corner, its position is one row below and three columns
# right.
#
#
# Notes:
#
#
# The input is only given to initialize the room and the robot's position
# internally. You must solve this problem "blindfolded". In other words, you
# must control the robot using only the mentioned 4 APIs, without knowing the
# room layout and the initial robot's position.
# The robot's initial position will always be in an accessible cell.
# The initial direction of the robot will be facing up.
# All accessible cells are connected, which means the all cells marked as 1
# will be accessible by the robot.
# Assume all four edges of the grid are all surrounded by wall.
#
#
#
# @lc code=start
def cleanRoomAux(robot, grid, pos, direction):
robot.clean()
grid.add(pos)
for i in range(4):
next_pos = (pos[0] + direction[0], pos[1] + direction[1])
if next_pos not in grid and robot.move():
cleanRoomAux(robot, grid, next_pos, direction)
robot.turnRight()
robot.turnRight()
robot.move()
robot.turnRight()
robot.turnRight()
direction = (-direction[1], direction[0])
robot.turnRight()
class Solution(object):
def cleanRoom(self, robot):
cleanRoomAux(robot, set(), (0, 0), (0, 1))
# @lc code=end
| StarcoderdataPython |
1726517 | import logging
logging.basicConfig(filename=r'travel.log', filemode='w', level=logging.INFO, format='%(message)s')
def get_log(msg):
logging.info(msg)
| StarcoderdataPython |
3322698 | <gh_stars>1-10
"""
Num
8-bit : TinyInt UnsignedTinyInt
16-bit: SmallInt UnsignedSmallInt
24-bit: MediumInt UnsignedMediumInt
32-bit: Int UnsignedInt
64-bit: BitInt UnsignedBitInt
FloatField: DOUBLE
DecimalField
EnumField
IntEnumField 0 <= value < 32768
CharEnumField
"""
from typing import Any
from tortoise.fields.base import Field
class TinyIntField(Field, int):
"""
Tiny integer field. (8-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "TINYINT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -128,
"le": 127,
}
class _db_mysql:
GENERATED_SQL = "TINYINT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class MediumIntField(Field, int):
"""
Medium integer field. (24-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "MEDIUMINT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -8388608,
"le": 8388607,
}
class _db_mysql:
GENERATED_SQL = "MEDIUMINT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class UnsignedTinyIntField(Field, int):
"""
Unsigned Tiny integer field. (8-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "TINYINT UNSIGNED"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else 0,
"le": 255,
}
class _db_mysql:
GENERATED_SQL = "TINYINT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT"
class UnsignedSmallIntField(Field, int):
"""
Unsigned Small integer field. (16-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "SMALLINT UNSIGNED"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else 0,
"le": 65535,
}
class _db_mysql:
GENERATED_SQL = "SMALLINT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT"
class UnsignedMediumIntField(Field, int):
"""
Unsigned Medium integer field. (24-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "MEDIUMINT UNSIGNED"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else 0,
"le": 16777215,
}
class _db_mysql:
GENERATED_SQL = "MEDIUMINT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT"
class UnsignedIntField(Field, int):
"""
Unsigned Int integer field. (32-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "INT UNSIGNED"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else 0,
"le": 4294967295,
}
class _db_mysql:
GENERATED_SQL = "INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT"
class UnsignedBigIntField(Field, int):
"""
Unsigned Big integer field. (64-bit unsigned)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "BIGINT UNSIGNED"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else 0,
"le": 18446744073709551615,
}
class _db_mysql:
GENERATED_SQL = "BIGINT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT"
| StarcoderdataPython |
93501 | password = input()
alpha = False
upalpha = False
digit = False
for i in password:
if i.isspace():
print("NOPE")
break
if i.isdigit():
digit = True
if i.isalpha():
if i.isupper():
upalpha = True
if i.islower():
alpha = True
else:
if alpha and upalpha and digit:
print("OK")
else:
print("NOPE")
| StarcoderdataPython |
3290697 | """Rules for generating code from Arcs schemas.
Rules are re-exported in build_defs.bzl -- use those instead.
"""
load("//devtools/build_cleaner/skylark:build_defs.bzl", "register_extension_info")
load(":kotlin.bzl", "arcs_kt_library", "arcs_kt_plan")
load(":manifest.bzl", "arcs_manifest")
load(":tools.oss.bzl", "arcs_tool_schema2wasm")
load(":util.bzl", "manifest_only", "replace_arcs_suffix")
def _run_schema2wasm(
name,
src,
deps,
out,
language_name,
language_flag,
wasm,
test_harness = False):
"""Generates source code for the given .arcs schema file.
Runs sigh schema2wasm to generate the output.
"""
if not src.endswith(".arcs"):
fail("src must be a .arcs file")
if type(deps) == str:
fail("deps must be a list")
arcs_tool_schema2wasm(
name = name,
srcs = [src],
outs = [out],
deps = deps,
language_name = language_name,
language_flag = language_flag,
wasm = wasm,
test_harness = test_harness,
)
def arcs_cc_schema(name, src, deps = [], out = None):
"""Generates a C++ header file for the given .arcs schema file."""
_run_schema2wasm(
name = name + "_genrule",
src = src,
deps = deps,
out = out or replace_arcs_suffix(src, ".h"),
language_flag = "--cpp",
language_name = "C++",
wasm = False,
)
def arcs_kt_schema(
name,
srcs,
arcs_sdk_deps,
data = [],
deps = [],
platforms = ["jvm"],
test_harness = False,
visibility = None):
"""Generates a Kotlin schemas, entities, specs, handle holders, and base particles for input .arcs manifest files.
Example:
Direct dependency on this target is required for use.
```
arcs_kt_schema(
name = "foo_schemas",
srcs = ["foo.arcs"],
)
arcs_kt_library(
name = "arcs_lib",
srcs = glob("*.kt"),
deps = [":foo_schemas"],
)
```
Args:
name: name of the target to create
srcs: list of Arcs manifest files to include
arcs_sdk_deps: build targets for the Arcs SDK to be included
data: list of Arcs manifests needed at runtime
deps: list of imported manifests
platforms: list of target platforms (currently, `jvm` and `wasm` supported).
test_harness: whether to generate a test harness target
visibility: visibility of the generated arcs_kt_library
Returns:
Dictionary of:
"outs": output files. other rules can use this to bundle outputs.
"deps": deps of those outputs.
"""
supported = ["jvm", "wasm"]
# TODO(#5018)
if "jvm" not in platforms:
platforms.append("jvm")
outs = []
outdeps = []
for src in srcs:
for ext in platforms:
if ext not in supported:
fail("Platform %s not allowed; only %s supported.".format(ext, supported.join(",")))
wasm = ext == "wasm"
genrule_name = replace_arcs_suffix(src, "_genrule_" + ext)
out = replace_arcs_suffix(src, "_GeneratedSchemas.%s.kt" % ext)
outs.append(out)
_run_schema2wasm(
name = genrule_name,
src = src,
out = out,
deps = deps + data,
wasm = wasm,
language_flag = "--kotlin",
language_name = "Kotlin",
)
arcs_kt_library(
name = name,
srcs = outs,
platforms = platforms,
deps = arcs_sdk_deps,
visibility = visibility,
)
outdeps = outdeps + arcs_sdk_deps
if (test_harness):
test_harness_outs = []
for src in srcs:
out = replace_arcs_suffix(src, "_TestHarness.kt")
test_harness_outs.append(out)
_run_schema2wasm(
name = replace_arcs_suffix(src, "_genrule_test_harness"),
src = src,
out = out,
deps = deps,
wasm = False,
test_harness = True,
language_flag = "--kotlin",
language_name = "Kotlin",
)
arcs_kt_library(
name = name + "_test_harness",
testonly = 1,
srcs = test_harness_outs,
deps = arcs_sdk_deps + [
":" + name,
"//third_party/java/arcs:testing",
"//third_party/kotlin/kotlinx_coroutines",
],
)
return {"outs": outs, "deps": outdeps}
def arcs_kt_gen(
name,
srcs,
arcs_sdk_deps,
data = [],
deps = [],
platforms = ["jvm"],
test_harness = False,
visibility = None):
"""Generates Kotlin files for the given .arcs files.
This is a convenience wrapper that combines all code generation targets based on arcs files.
Args:
name: name of the target to create
srcs: list of Arcs manifest files to include
arcs_sdk_deps: build targets for the Arcs SDK to be included
data: list of Arcs manifests needed at runtime
deps: list of dependent arcs targets, such as an arcs_kt_gen target in a different package
platforms: list of target platforms (currently, `jvm` and `wasm` supported).
test_harness: whether to generate a test harness target
visibility: visibility of the generated arcs_kt_library
"""
manifest_name = name + "_manifest"
schema_name = name + "_schema"
plan_name = name + "_plan"
arcs_manifest(
name = manifest_name,
srcs = srcs,
manifest_proto = False,
deps = manifest_only(deps) + data,
)
schema = arcs_kt_schema(
name = schema_name,
srcs = srcs,
arcs_sdk_deps = arcs_sdk_deps,
deps = deps + [":" + manifest_name],
platforms = platforms,
test_harness = test_harness,
visibility = visibility,
)
plan = arcs_kt_plan(
name = plan_name,
srcs = srcs,
arcs_sdk_deps = arcs_sdk_deps,
data = [":" + manifest_name],
deps = deps + [":" + schema_name],
platforms = platforms,
visibility = visibility,
)
# generates combined library. This allows developers to more easily see what is generated.
arcs_kt_library(
name = name,
srcs = depset(schema["outs"] + plan["outs"]).to_list(),
platforms = platforms,
deps = depset(schema["deps"] + plan["deps"] + manifest_only(deps, inverse = True)).to_list(),
visibility = visibility,
)
register_extension_info(
extension = arcs_kt_gen,
label_regex_for_dep = "{extension_name}\\-kt(_DO_NOT_DEPEND_JVM)?",
)
| StarcoderdataPython |
80057 | <filename>erigam/__init__.py
import logging
import os
import traceback
from flask import Flask, request, render_template
from erigam.lib.request_methods import (
connect_redis,
create_session,
set_cookie,
db_commit,
disconnect_redis,
disconnect_sql,
cache_breaker
)
from erigam.views import (
main,
backend,
chat,
admin,
log
)
app = Flask(__name__)
# Cache breaking
app.url_defaults(cache_breaker)
# Pre and post request stuff
app.before_request(connect_redis)
app.before_request(create_session)
app.after_request(set_cookie)
app.after_request(db_commit)
app.teardown_request(disconnect_redis)
app.teardown_request(disconnect_sql)
# Flask settings
app.url_map.strict_slashes = False
# Jinja settings
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
# Throw tracebacks to console
app.config["SENTRY_PRIVATE_DSN"] = os.environ.get("SENTRY_PRIVATE_DSN", None)
app.config["SENTRY_PUBLIC_DSN"] = os.environ.get("SENTRY_PUBLIC_DSN", None)
app.config['PROPAGATE_EXCEPTIONS'] = True
if 'DEBUG' in os.environ:
app.config['DEBUG'] = True
# Sentry
if app.config["SENTRY_PRIVATE_DSN"]: # pragma: no cover
from raven.contrib.flask import Sentry
app.config["SENTRY_INCLUDE_PATHS"] = ["erigam"]
sentry = Sentry(app,
dsn=app.config["SENTRY_PRIVATE_DSN"],
logging=True,
level=logging.ERROR,
)
logging.getLogger("sentry.errors.uncaught").setLevel(logging.CRITICAL)
else:
sentry = None
# Register Blueprints
app.register_blueprint(main.blueprint)
app.register_blueprint(backend.blueprint, url_prefix='/chat_ajax')
app.register_blueprint(chat.blueprint, url_prefix='/chat')
app.register_blueprint(admin.blueprint, url_prefix='/admin')
app.register_blueprint(log.blueprint)
# Error handlers
@app.errorhandler(404)
def notfound_error(e):
return render_template("errors/404.html"), 404
if not app.config['DEBUG']:
@app.errorhandler(Exception)
def production_error(e):
if request.is_xhr:
if 'debug' not in request.args and 'debug' not in request.form:
raise
return render_template("errors/exception.html",
traceback=traceback.format_exc()
), 500
| StarcoderdataPython |
197010 | <reponame>hongyuanChrisLi/StartupInsights
import os
import logging
import sys
import __init__ as init
def get_env_variable(var_name):
"""
Get environment variable or return exception
"""
logging.config.fileConfig('logging.ini')
logger = logging.getLogger(__name__)
try:
return os.environ[var_name]
except KeyError as e:
logger.exception("Environment Variable Not Found: " + str(var_name))
sys.exit(init.ENV_VAR_NOT_FOUND)
| StarcoderdataPython |
1604453 | """
Solvers
-------
This part of the package provides wrappers around Assimulo solvers.
"""
from assimulo.problem import Explicit_Problem
import numpy as np
import sys
from means.simulation import SensitivityTerm
from means.simulation.trajectory import Trajectory, TrajectoryWithSensitivityData
import inspect
from means.util.memoisation import memoised_property, MemoisableObject
from means.util.sympyhelpers import to_one_dim_array
NP_FLOATING_POINT_PRECISION = np.double
#-- Easy initialisation utilities -------------------------------------------------------------
class UniqueNameInitialisationMixin(object):
@classmethod
def unique_name(self):
return NotImplemented
class SolverException(Exception):
__base_exception_class = None
__base_exception_kwargs = None
def __init__(self, message, base_exception=None):
if base_exception is not None:
if message is None:
message = ''
# We need to take message argument as otherwise SolverException is unpickleable
message += '{0.__class__.__name__}: {0!s}'.format(base_exception)
super(SolverException, self).__init__(message)
# CVodeError does not serialise well, so let's store it as a set of arguments and create the base exception
# on the fly, rather than storing the actual object
if base_exception is not None:
self.__base_exception_class = base_exception.__class__
self.__base_exception_kwargs = base_exception.__dict__.copy()
@property
def base_exception(self):
if self.__base_exception_class is not None:
return self.__base_exception_class(**self.__base_exception_kwargs)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.message == other.message and self.__base_exception_class == other.__base_exception_class and \
self.__base_exception_kwargs == other.__base_exception_kwargs
def available_solvers(with_sensitivity_support=False):
members = inspect.getmembers(sys.modules[__name__])
initialisable_solvers = {}
# Some metaprogramming here: look for all classes at this module that are subclasses of
# `UniqueNameInitialisationMixin`. Compile a dictionary of these
for name, object in members:
if inspect.isclass(object) and issubclass(object, SolverBase) \
and issubclass(object, UniqueNameInitialisationMixin) \
and object != UniqueNameInitialisationMixin:
if with_sensitivity_support and not issubclass(object, SensitivitySolverBase):
# If we need sensitivity support, skip all non-sensitivity solvers
continue
elif not with_sensitivity_support and issubclass(object, SensitivitySolverBase):
# If we don't need sensitivity support, skip all solvers with sensitivity support
continue
assert(object.unique_name not in initialisable_solvers)
initialisable_solvers[object.unique_name().lower()] = object
return initialisable_solvers
#-- Exception handling utilities -----------------------------------------------------------
def parse_flag(exception_message):
"""
Parse the flag from the solver exception.
e.g.
>>> parse_flag("Exception: Dopri5 failed with flag -3")
-3
:param exception_message: message from the exception
:type exception_message: str
:return: flag id
:rtype: int
"""
import re
match = re.match('.* failed with flag (-\d+)', exception_message)
try:
return int(match.group(1))
except Exception:
return None
#-- Base solver functionality ---------------------------------------------------------------
def _set_kwargs_as_attributes(instance, **kwargs):
for attribute, value in kwargs.iteritems():
setattr(instance, attribute, value)
return instance
def _wrap_results_to_trajectories(simulated_timepoints, simulated_values, descriptions):
number_of_timepoints, number_of_simulated_values = simulated_values.shape
assert(len(descriptions) == number_of_simulated_values)
assert(len(simulated_timepoints) == number_of_timepoints)
# Wrap results to trajectories
trajectories = []
for description, simulated_value_column in zip(descriptions, simulated_values.T):
trajectories.append(Trajectory(simulated_timepoints, simulated_value_column, description))
return trajectories
class SolverBase(MemoisableObject):
"""
This acts as a base class for ODE solvers used in `means`.
It wraps around the solvers available in :module:`assimulo` package, and provides some basic functionality
that allows solvers be used with `means` objects.
"""
_parameters = None
_initial_conditions = None
_problem = None
_starting_time = None
_options = None
def __init__(self, problem, parameters, initial_conditions, starting_time=0.0, **options):
"""
:param problem: Problem to simulate
:type problem: :class:`~means.approximation.ODEProblem`
:param parameters: Parameters of the solver. One entry for each constant in `problem`
:type parameters: :class:`iterable`
:param initial_conditions: Initial conditions of the system. One for each of the equations.
Assumed to be zero, if not specified
:type initial_conditions: :class:`iterable`
:param starting_time: Starting time for the solver, defaults to 0.0
:type starting_time: float
:param options: Options to be passed to the specific instance of the solver.
"""
parameters = to_one_dim_array(parameters, dtype=NP_FLOATING_POINT_PRECISION)
initial_conditions = to_one_dim_array(initial_conditions, dtype=NP_FLOATING_POINT_PRECISION)
assert(parameters.shape == (len(problem.parameters),))
assert(initial_conditions.shape[0] == problem.number_of_equations)
self._parameters = parameters
self._initial_conditions = initial_conditions
self._starting_time = float(starting_time)
self._problem = problem
self._options = options
def simulate(self, timepoints):
"""
Simulate initialised solver for the specified timepoints
:param timepoints: timepoints that will be returned from simulation
:return: a list of trajectories for each of the equations in the problem.
"""
solver = self._solver
last_timepoint = timepoints[-1]
try:
simulated_timepoints, simulated_values = solver.simulate(last_timepoint, ncp_list=timepoints)
except (Exception, self._solver_exception_class) as e:
# The exceptions thrown by solvers are usually hiding the real cause, try to see if it is
# our right_hand_side_as_function that is broken first
try:
self._problem.right_hand_side_as_function(self._initial_conditions, self._parameters)
except:
# If it is broken, throw that exception instead
raise
else:
# If it is not, handle the original exception
self._handle_solver_exception(e)
trajectories = self._results_to_trajectories(simulated_timepoints, simulated_values)
return trajectories
def _handle_solver_exception(self, solver_exception):
"""
This function handles any exceptions that occurred in the solver and have been proven not to be
related to our right_hand_side function.
Subclasses can override it.
:param solver_exception: the exception raised by the solver
:type solver_exception: Exception
"""
# By default just re-raise it with our wrapper
raise SolverException(None, solver_exception)
def _default_solver_instance(self):
raise NotImplementedError
@property
def _solver_exception_class(self):
"""
Property That would return the exception class thrown by a specific solver the subclases can override.
"""
return None
@memoised_property
def _solver(self):
solver = self._default_solver_instance()
verbosity = self._options.pop('verbosity', 50)
return _set_kwargs_as_attributes(solver, verbosity=verbosity, **self._options)
@memoised_property
def _assimulo_problem(self):
rhs = self._problem.right_hand_side_as_function
parameters = self._parameters
initial_conditions = self._initial_conditions
initial_timepoint = self._starting_time
model = Explicit_Problem(lambda t, x: rhs(x, parameters),
initial_conditions, initial_timepoint)
return model
def _results_to_trajectories(self, simulated_timepoints, simulated_values):
"""
Convert the resulting results into a list of trajectories
:param simulated_timepoints: timepoints output from a solver
:param simulated_values: values returned by the solver
:return:
"""
descriptions = self._problem.left_hand_side_descriptors
return _wrap_results_to_trajectories(simulated_timepoints, simulated_values, descriptions)
class CVodeMixin(UniqueNameInitialisationMixin, object):
@classmethod
def unique_name(cls):
return 'cvode'
@property
def _solver_exception_class(self):
from assimulo.solvers.sundials import CVodeError
return CVodeError
def _cvode_instance(self, model, options):
from assimulo.solvers.sundials import CVode
solver = CVode(model)
if 'usesens' in options:
raise AttributeError('Cannot set \'usesens\' parameter. Use Simulation or SimulationWithSensitivities for '
'sensitivity calculations')
return solver
class CVodeSolver(SolverBase, CVodeMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to false here as we are non-parametric here
solver.usesens = False
return solver
class ODE15sMixin(CVodeMixin):
"""
A CVODE solver that mimicks the parameters used in `ode15s`_ solver in MATLAB.
The different parameters that are set differently by default are:
``discr``
Set to ``'BDF'`` by default
``atol``
Set to ``1e-6``
``rtol``
Set to ``1e-3``
.. _`ode15s`: http://www.mathworks.ch/ch/help/matlab/ref/ode15s.html
"""
ATOL = 1e-6
RTOL = 1e-3
MINH = 5.684342e-14
@classmethod
def unique_name(cls):
return 'ode15s'
def _cvode_instance(self, model, options):
solver = super(ODE15sMixin, self)._cvode_instance(model, options)
# BDF method below makes it a key similarity to the ode15s
solver.discr = options.pop('discr', 'BDF')
solver.atol = options.pop('atol', self.ATOL)
solver.rtol = options.pop('rtol', self.RTOL)
solver.maxord = options.pop('maxord', 5)
# If minh is not set, CVODE would try to continue the simulation, issuing a warning
# We set it here so this simulation fails.
solver.minh = options.pop('minh', self.MINH)
return solver
class ODE15sLikeSolver(SolverBase, ODE15sMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to false here as we are non-parametric here
solver.usesens = False
return solver
class Dopri5Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers.runge_kutta import Dopri5
return Dopri5(self._assimulo_problem)
@classmethod
def unique_name(self):
return 'dopri5'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Problem is probably stiff'}
new_message = None
try:
new_message = 'Dopri5 failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(Dopri5Solver, self)._handle_solver_exception(exception)
class LSODARSolver(SolverBase, UniqueNameInitialisationMixin):
@property
def _solver_exception_class(self):
from assimulo.exception import ODEPACK_Exception
return ODEPACK_Exception
def _default_solver_instance(self):
from assimulo.solvers import LSODAR
return LSODAR(self._assimulo_problem)
@classmethod
def unique_name(self):
return 'lsodar'
def _handle_solver_exception(self, solver_exception):
flag = parse_flag(solver_exception.message)
from assimulo.exception import ODEPACK_Exception
FLAG_DOCUMENTATION = {-1: 'Excess work done on this call (perhaps wrong jt)',
-2: 'Excess accuracy requested (tolerances too small)',
-3: 'Illegal input detected (see printed message)',
-4: 'Repeated error test failures (check all inputs)',
-5: 'Repeated convergence failures (perhaps bad jacobian supplied or wrong choice of '
'jt or tolerances)',
-6: 'Error weight became zero during problem.',
-7: 'Work space insufficient to finish (see messages)'}
new_message = None
try:
new_message = 'LSODAR failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = ODEPACK_Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(LSODARSolver, self)._handle_solver_exception(exception)
class ExplicitEulerSolver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import ExplicitEuler
return ExplicitEuler(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'euler'
def simulate(self, timepoints):
# Euler solver does not return the correct timepoints for some reason, work around that by resampling them
trajectories = super(ExplicitEulerSolver, self).simulate(timepoints)
resampled_trajectories = []
for trajectory in trajectories:
resampled_trajectories.append(trajectory.resample(timepoints))
return resampled_trajectories
class RungeKutta4Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RungeKutta4
return RungeKutta4(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rungekutta4'
def simulate(self, timepoints):
# RungeKutta4 solver does not return the correct timepoints for some reason, work around that by resampling them
trajectories = super(RungeKutta4Solver, self).simulate(timepoints)
resampled_trajectories = []
for trajectory in trajectories:
resampled_trajectories.append(trajectory.resample(timepoints))
return resampled_trajectories
class RungeKutta34Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RungeKutta34
return RungeKutta34(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rungekutta34'
class Radau5Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import Radau5ODE
return Radau5ODE(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'radau5'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Matrix is repeatedly singular'}
new_message = None
try:
new_message = 'Radau5 failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(Radau5Solver, self)._handle_solver_exception(exception)
class RodasSolver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RodasODE
return RodasODE(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rodas'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Matrix is repeatedly singular'}
new_message = None
try:
new_message = 'Rodas failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(RodasSolver, self)._handle_solver_exception(exception)
#-- Solvers with sensitivity support -----------------------------------------------------------------------------------
def _add_sensitivity_data_to_trajectories(trajectories, raw_sensitivity_data, parameters):
sensitivity_values = []
for i, trajectory in enumerate(trajectories):
ode_term = trajectory.description
term_sensitivities = []
for j, parameter in enumerate(parameters):
term_sensitivities.append((parameter, raw_sensitivity_data[j, :, i]))
sensitivity_values.append(term_sensitivities)
trajectories_with_sensitivity_data = []
for trajectory, sensitivities in zip(trajectories, sensitivity_values):
# Collect the sensitivities into a nice dictionary of Trajectory objects
sensitivity_trajectories = []
for parameter, values in sensitivities:
sensitivity_trajectories.append(Trajectory(trajectory.timepoints, values,
SensitivityTerm(trajectory.description, parameter)))
trajectory_with_sensitivities = TrajectoryWithSensitivityData.from_trajectory(trajectory,
sensitivity_trajectories)
trajectories_with_sensitivity_data.append(trajectory_with_sensitivities)
return trajectories_with_sensitivity_data
class SensitivitySolverBase(SolverBase):
@property
def _assimulo_problem(self):
rhs = self._problem.right_hand_side_as_function
parameters = self._parameters
initial_conditions = self._initial_conditions
initial_timepoint = self._starting_time
# Solvers with sensitivity support should be able to accept parameters
# into rhs function directly
model = Explicit_Problem(lambda t, x, p: rhs(x, p),
initial_conditions, initial_timepoint)
model.p0 = np.array(parameters)
return model
def _results_to_trajectories(self, simulated_timepoints, simulated_values):
trajectories = super(SensitivitySolverBase, self)._results_to_trajectories(simulated_timepoints,
simulated_values)
sensitivities_raw = np.array(self._solver.p_sol)
trajectories_with_sensitivity_data = _add_sensitivity_data_to_trajectories(trajectories, sensitivities_raw,
self._problem.parameters)
return trajectories_with_sensitivity_data
class CVodeSolverWithSensitivities(SensitivitySolverBase, CVodeMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to true here as we are non-parametric here
solver.usesens = True
solver.report_continuously = True
return solver
class ODE15sSolverWithSensitivities(SensitivitySolverBase, ODE15sMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to true here as we are non-parametric here
solver.usesens = True
solver.report_continuously = True
return solver
| StarcoderdataPython |
1686476 | <reponame>MaxRichter/fastapi-celery
from app.db import models
def test_get_users(client, test_superuser, superuser_token_headers):
response = client.get("/api/v1/users", headers=superuser_token_headers)
assert response.status_code == 200
assert response.json() == [
{
"id": test_superuser.id,
"email": test_superuser.email,
"is_active": test_superuser.is_active,
"is_superuser": test_superuser.is_superuser,
}
]
def test_delete_user(client, test_superuser, test_db, superuser_token_headers):
response = client.delete(
f"/api/v1/users/{test_superuser.id}", headers=superuser_token_headers
)
assert response.status_code == 200
assert test_db.query(models.User).all() == []
def test_delete_user_not_found(client, superuser_token_headers):
response = client.delete(
"/api/v1/users/4321", headers=superuser_token_headers
)
assert response.status_code == 404
def test_edit_user(client, test_superuser, superuser_token_headers):
new_user = {
"email": "<EMAIL>",
"is_active": False,
"is_superuser": True,
"first_name": "Joe",
"last_name": "Smith",
"password": "<PASSWORD>",
}
response = client.put(
f"/api/v1/users/{test_superuser.id}",
json=new_user,
headers=superuser_token_headers,
)
assert response.status_code == 200
new_user["id"] = test_superuser.id
new_user.pop("password")
assert response.json() == new_user
def test_edit_user_not_found(client, test_db, superuser_token_headers):
new_user = {
"email": "<EMAIL>",
"is_active": False,
"is_superuser": False,
"password": "<PASSWORD>",
}
response = client.put(
"/api/v1/users/1234", json=new_user, headers=superuser_token_headers
)
assert response.status_code == 404
def test_get_user(
client, test_user, superuser_token_headers,
):
response = client.get(
f"/api/v1/users/{test_user.id}", headers=superuser_token_headers
)
assert response.status_code == 200
assert response.json() == {
"id": test_user.id,
"email": test_user.email,
"is_active": bool(test_user.is_active),
"is_superuser": test_user.is_superuser,
}
def test_user_not_found(client, superuser_token_headers):
response = client.get("/api/v1/users/123", headers=superuser_token_headers)
assert response.status_code == 404
def test_authenticated_user_me(client, user_token_headers):
response = client.get("/api/v1/users/me", headers=user_token_headers)
assert response.status_code == 200
def test_authenticated_edit_user_me(client, test_user, user_token_headers):
new_user = {
"email": "<EMAIL>",
"is_active": False,
"first_name": "Joe",
"last_name": "Smith",
"password": "<PASSWORD>",
}
response = client.put(
f"/api/v1/users/me", json=new_user, headers=user_token_headers,
)
assert response.status_code == 200
new_user["id"] = test_user.id
new_user.pop("password")
assert response.json() == new_user
def test_unauthenticated_routes(client):
response = client.get("/api/v1/users/me")
assert response.status_code == 401
response = client.get("/api/v1/users")
assert response.status_code == 401
response = client.get("/api/v1/users/123")
assert response.status_code == 401
response = client.put("/api/v1/users/123")
assert response.status_code == 401
response = client.put("/api/v1/users/me")
assert response.status_code == 401
response = client.delete("/api/v1/users/123")
assert response.status_code == 401
def test_unauthorized_routes(client, user_token_headers):
response = client.get("/api/v1/users", headers=user_token_headers)
assert response.status_code == 403
response = client.get("/api/v1/users/123", headers=user_token_headers)
assert response.status_code == 403
response = client.put("/api/v1/users/123", headers=user_token_headers)
assert response.status_code == 403
| StarcoderdataPython |
3234443 | from pathlib import Path
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark.sql import SparkSession
# covered by cleaner
spark = (
SparkSession.builder.appName("Process ChEMBL25 Assays")
.config("spark.sql.execution.arrow.enabled", "true")
.getOrCreate()
)
_data_root = Path("/local00/bioinf/tpp")
df = spark.read.parquet(
(_data_root / "chembl_25/merged_data_mixed.parquet/").as_posix()
)
df_test = df.select("inchikey", F.explode("ECFC4"))
ecfp_ids_schema = T.StructType(
[
T.StructField("ecfp", T.StringType(), False),
T.StructField("id", T.IntegerType(), False),
]
)
ecfp_frequencies = (
df.select(F.explode("ECFP").alias("ECFP"))
.select(F.trim(F.col("ECFP")).alias("ECFP"))
.groupby("ECFP")
.count()
)
ecfp_ids = (
ecfp_frequencies.filter(F.col("count") > 25)
.select("ECFP")
.rdd.map(lambda x: x[0])
.zipWithIndex()
.toDF(ecfp_ids_schema)
)
tmp = (
df_test.alias("ta")
.join(ecfp_ids.alias("tb"), F.col("ta.col") == F.col("tb.ECFP"))
.select("inchikey", "id")
)
df_ecfp = tmp.groupby("inchikey").agg(
F.sort_array(F.collect_set("id")).alias("ecfp_ids")
)
df_ecfp.write.parquet((_data_root / "tmp/ecfp_formatted.parquet").as_posix())
df_ecfp_test = spark.read.parquet(
(_data_root / "tmp/ecfp_formatted.parquet").as_posix()
)
| StarcoderdataPython |
3239253 | from django.db import models
# Model Tasks 1-5
#####################################
class Teacher(models.Model):
firstname = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
def __str__(self):
return self.firstname
class Student(models.Model):
firstname = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
age = models.IntegerField()
classroom = models.IntegerField()
teacher = models.CharField(max_length=100)
def __str__(self):
return self.firstname
###################################### | StarcoderdataPython |
3291379 | <reponame>githubhjx/Tfrecords<gh_stars>0
from skimage import io, transform
import glob
import os
import numpy as np
import tensorflow as tf
path = '../train.tfrecords'
def read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
label = tf.cast(features['label'], tf.int32)
return img, label
img, label = read_and_decode(path)
| StarcoderdataPython |
3262961 | <reponame>NeonDaniel/combo-lock
from memory_tempfile import MemoryTempfile
import os
def get_ram_directory(folder):
tempfile = MemoryTempfile(fallback=True)
path = os.path.join(tempfile.gettempdir(), folder)
if not os.path.exists(path):
os.makedirs(path)
return path
| StarcoderdataPython |
82227 | from typing import Dict, Optional
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import when
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.data_types.expression import AutoMapperDataTypeExpression
from spark_auto_mapper.helpers.value_parser import AutoMapperValueParser
from spark_auto_mapper.type_definitions.wrapper_types import (
AutoMapperColumnOrColumnLikeType,
AutoMapperAnyDataType,
)
class AutoMapperMapDataType(AutoMapperDataTypeExpression):
"""
Applies the supplied mapping to the value of column
"""
def __init__(
self,
column: AutoMapperColumnOrColumnLikeType,
mapping: Dict[Optional[AutoMapperTextInputType], AutoMapperAnyDataType],
default: Optional[AutoMapperAnyDataType] = None,
):
super().__init__(value="")
self.column: AutoMapperColumnOrColumnLikeType = column
self.mapping: Dict[AutoMapperAnyDataType, AutoMapperDataTypeBase] = {
key: (
value
if isinstance(value, AutoMapperDataTypeBase)
else AutoMapperValueParser.parse_value(value)
)
for key, value in mapping.items()
}
assert self.mapping
self.default: AutoMapperDataTypeBase = (
default
if isinstance(default, AutoMapperDataTypeBase)
else AutoMapperValueParser.parse_value(default)
)
def get_column_spec(
self, source_df: Optional[DataFrame], current_column: Optional[Column]
) -> Column:
inner_column_spec: Column = self.column.get_column_spec(
source_df=source_df, current_column=current_column
)
column_spec: Optional[Column] = None
key: AutoMapperAnyDataType
value: AutoMapperDataTypeBase
for key, value in self.mapping.items():
if column_spec is not None:
column_spec = column_spec.when(
inner_column_spec.eqNullSafe(key), # type: ignore
value.get_column_spec(
source_df=source_df, current_column=current_column
),
)
else:
column_spec = when(
inner_column_spec.eqNullSafe(key), # type: ignore
value.get_column_spec(
source_df=source_df, current_column=current_column
),
)
if column_spec is not None:
column_spec = column_spec.otherwise(
self.default.get_column_spec(
source_df=source_df, current_column=current_column
)
)
assert column_spec is not None
return column_spec
| StarcoderdataPython |
5196 | <filename>pyopenproject/business/services/command/configuration/find.py<gh_stars>1-10
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.configuration.configuration_command import ConfigurationCommand
from pyopenproject.model.configuration import Configuration
class Find(ConfigurationCommand):
def __init__(self, connection):
"""Constructor for class Find, from ConfigurationCommand.
:param connection: The connection data
"""
super().__init__(connection)
def execute(self):
try:
json_obj = GetRequest(self.connection, f"{self.CONTEXT}").execute()
return Configuration(json_obj)
except RequestError as re:
raise BusinessError("Error listing configuration") from re
| StarcoderdataPython |
3370485 | # -*- coding: <utf-8> -*-
"""
Module that implements a client and server interface useful for controlling a
vim server.
This module could be used for unit testing or integration testing
for a Vim plugin written in Python. Or you can use it to interactively control
a Vim editor by Python code, for example, in an Ipython session.
This work tries to be the python equivalent of Vimrunner ruby gem found at:
http://rubydoc.info/gems/vimrunner/index
I thank the author(s) for the effort and nice level of abstraction
they put in this gem.
"""
import os.path
import shutil
import multiprocessing
import subprocess
import random
import time
#vimrc = os.path.join(os.path.dirname(
# os.path.abspath(__file__)), 'default_vimrc')
### utility functions ###
def create_vim_list(values):
"""creates the Vim editor's equivalent of python's repr(a_list).
>>> create_vim_list(['first line', 'second line'])
'["first line", "second line"]'
values - a list of strings
We need double quotes not single quotes to create a Vim list.
Returns a string that is a properly written Vim list of strings.
This result can be fed to vim's eval function to create a list in vim.
"""
values_with_quotes = ('"' + elem + '"' for elem in values)
return '[%s]' % ', '.join(values_with_quotes)
# as a one liner:
#return '[%s]' % ', '.join("\"%s\"" % elem for elem in values)
class Server(object):
"""
Represents a remote Vim editor server. A Server has the responsibility of
starting a Vim process and communicating with it through the
client - server interface. The process can be started with one of the
"start*" family of methods:
start_in_other_terminal()
start_gvim()
start()
The server can be stopped with "kill" method, but it is recommended to
use client's "quit" method .
If given the servername of an existing Vim instance, it can
control that instance without the need to start a new process.
A Client would be necessary as an actual interface, though it is possible
to use a Server directly to invoke --remote-send and --remote-expr
commands on its Vim instance.
Example:
>>> vim = Server("My_server")
>>> client = vim.start_in_other_terminal()
>>> client.edit("some_file.txt")
"""
vimrc = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'default_vimrc')
def __init__(self, name='', executable='vim', vimrc='', noplugin=True,
extra_args=['-n']):
"""
Initialize a Server.
name - The String name of the Vim server
(default: "VIMRUNNER#{random.randint}").
executable - The String 'vim' or 'gvim' (for GUI) or an absolute
path of Vim executable to use
(default: vim).
vimrc - The String vimrc file to source in the client. The
default Server.vimrc file is used, it is needed by
this module in order to work fine. If user wants to
use a custom vimrc, it should be sourced using client.
(default: Server.vimrc).
noplugin - Do not load any plugins.
extra_args - command line args that can be given to vim before it
is started. It is used especially by start_gvim()
(default: ['-n'] to prevent using swap files.)
Example:
>>> # no swap file will be used:
>>> vim = Server(extra_args=['-n']
"""
self.name = name or 'VIMRUNNER#%s' % random.randint(1, 10000)
self.executable = executable if os.path.isabs(executable) else \
self._get_abs_path(executable)
vimrc = vimrc if vimrc else Server.vimrc
self.vimrc = '-u %s' % vimrc
self.noplugin = '--noplugin' if noplugin else ''
self.extra_args = extra_args
self.cmd = None
self._format_vim_args()
def _format_vim_args(self):
"""Utility function used by start_*() family of functions.
Returns nothing."""
# format arguments list for the Vim subprocess, order matters
self.vim_args = [self.executable, self.vimrc, self.noplugin,
'--servername', self.name]
# get rid of empty strings, False arguments
self.vim_args = [arg for arg in self.vim_args if arg]
# add extra vim arguments
self.vim_args.extend(self.extra_args)
self.cmd = " ".join(self.vim_args)
# Eg:
# >>> self.cmd
# "/usr/bin/gvim -n --noplugin --servername VIMRUNNER#1"
def start(self, timeout=5, testing=False):
"""Starts Vim server in a subprocess, eg.:
>>> subprocess.call("vim -n --servername GOTOWORD", shell=True)
but we don't want to wait for Vim to complete and to block this script
so we need some thread like behaviour that is obtained using the
multiprocessing module.
testing - flag useful for tests when you don't want to start Vim server
Returns a client connected to Vim server.
"""
self.server = multiprocessing.Process(
name=self.name,
target=subprocess.call,
args=(self.cmd,),
kwargs={'shell': True}
# we need to start in a shell otherwise vim complains with error:
# Garbage after option argument: "-u /path/to/custom/vimrc"
)
if not testing:
self.server.start()
self.check_is_running(timeout)
return Client(self)
def start_headless(self, timeout=5, testing=False):
"""Starts headless Vim server in a subprocess.
vim -n --servername GOTOWORD >/dev/null 2>&1 <&1
No input and output is connected to Vim server,
so that you can run a unit test without a dirty log.
testing - flag useful for tests when you don't want to start Vim server
Returns a client connected to Vim server.
"""
if not testing:
self.server = subprocess.Popen(
args=self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True
)
self.check_is_running(timeout)
return Client(self)
def start_in_other_terminal(self):
"""Start vim in a terminal other than the one used to run this script
(test script) because vim will pollute the output of the test script
and vim will malfunction.
Returns a Client.
We need something like:
x-terminal-emulator -e 'sh -c "python vim_server_no_gui.py"'
It is useful when testing a vim plugin to launch vim in other
terminal so that the test script's output doesn't get polluted by vim.
"""
self._format_vim_args()
self.cmd = "x-terminal-emulator -e '%s'" % self.cmd
# x-terminal-emulator chooses the default terminal in a cross-desktop
# way (debian, ubuntu, mint, etc.)
return self.start()
def start_gvim(self):
"""Start a GUI Vim. Returns a Client()."""
self.executable = self._get_abs_path(exe='gvim')
# Gvim needs to be started with the -f flag so it doesn't fork and
# kill its original process
self.extra_args.append('-f')
# -f seems not to work
self._format_vim_args()
return self.start()
def connect(self, timeout=5):
"""Connect to a running instance of Vim server.
Returns a client.
Eg:
>>> vim = Server(name="SOME_SERVER_NAME")
>>> client = vim.connect()
"""
self.check_is_running(timeout)
return self.start(testing=True)
# with testing=True we prevent Server from starting a new Vim server
# in a subprocess
def kill(self):
"""Kills the Vim instance started in a subprocess. Returns nothing.
It is useless if you connected to server with connect(). In that case
use quit() instead.
kill() works with vim, but not with gvim.
"""
if hasattr(self, 'server'):
# this one is the parent of gvim: vim.server._popen.pid
# how can I find the pid of gvim? only if I do `ps aux | grep
# self.name`?
#os.kill(int(self.server.pid), signal.SIGTERM)
self.server.terminate()
else:
raise AttributeError("Server needs to be started first.")
def quit(self):
"""Used to send to server the :qa! command. Useful when we connected
to server instead of starting it in a subprocess with start().
"""
self.remote_send(':qa!<Enter>')
def remote_send(self, keys):
"""Sends the given keys to Vim server. A wrapper around --remote-send.
keys - a String with a sequence of Vim-compatible keystrokes.
Returns nothing.
Eg:
$ vim --servername VIMRUNNER --remote-send ':qa! <Enter>'
"""
subprocess.call(
[self.executable, '--servername', self.name, '--remote-send',
keys]
)
def remote_expr(self, expression):
"""Evaluates an expression in the Vim server and returns the result.
A wrapper around --remote-expr.
Note that a command is not an expression, but a function call or a
variable is.
expression - a String with a Vim expression to evaluate.
Returns the String output of the expression. Eg:
remote_expr('&shiftwidth')
"""
result = subprocess.check_output(
[self.executable, '--servername', self.name, '--remote-expr',
expression])
return result.decode('utf-8')
def server_list(self):
"""Retrieves a list of names of currently running Vim servers.
Returns a List of String server names currently running.
"""
path = subprocess.check_output([self.executable,
'--serverlist'])
path = path.decode('utf-8')
return path.split('\n')
def is_running(self):
"Returns a Boolean indicating wheather server exists and is running."
return self.name.upper() in [s.strip() for s in self.server_list()]
def check_is_running(self, timeout):
"""Raises a RuntimeError exception if it can't find, during timeout,
a Vim server with the same name as the one given at initialization
during timeout.
"""
while timeout:
if self.is_running():
break
time.sleep(1)
timeout -= 1
else:
raise RuntimeError("Could not connect to vim server before "
"timeout expired. Maybe you should try again.")
@staticmethod
def _get_abs_path(exe):
"""Uses 'which' shell command to get the absolute path of the
executable."""
path = subprocess.check_output([shutil.which(exe)])
# output from subprocess, sockets etc. is bytes even in py3, so
# convert it to unicode
path = path.decode('utf-8')
return path.strip('\n')
class Client(object):
"""
Client that has a reference to a Vim server. Useful to send keys,
commands, expressions to manipulate Vim.
"""
def __init__(self, server):
self.server = server
def type(self, keys):
"""
Invokes one of the basic actions the Vim server supports, sending a
key sequence. The keys are sent as-is, so it'd probably be better to
use the wrapper methods, normal(), insert() and so on. Eg:
>>> client.type(':ls <Enter>')
"""
self.server.remote_send(keys)
def command(self, cmd):
"""Send commands to a Vim server.
Used for Vim cmds and everything except for calling functions. Eg:
>>> client.command("ls")
"""
output = self.eval("VimrunnerPyEvaluateCommandOutput('%s')" % cmd)
return output
# could have been implemented like:
# self.type(':%s <Enter>' % cmd)
def eval(self, expression):
"""
Calls the server's remote_expr() method to evaluate the expression.
Returns the String output of the expression, stripped by useless
whitespaces. Eg:
>>> # get the line number of the cursor
>>> client.eval('line(".")')
Note that Vim makes a clear distinction between ' and ".
"""
return self.server.remote_expr(expression).strip()
def edit(self, filename):
"""Edits the file filename with Vim.
Note that this doesn't use the '--remote' Vim flag, it simply types
in the command manually. This is necessary to avoid the Vim instance
getting focus.
filename - a String that can be a relative or absolute path
Returns, if the file is found, a string with the name of the document
otherwise it returns an empty string.
Eg:
>>> # suppose 'test' folder is in pwd:
>>> result = client.edit('test/a-file.txt')
>>> result
'"test/a-file.txt" 10L, 304C'
>>> # otherwise an absolute path is needed:
>>> client.edit('/home/user/path_to_file/file.txt')
"""
return self.command("edit %s" % filename)
def feedkeys(self, keys):
"""
Send keys as if they come from a mapping or typed by a user.
Vim's usual remote-send functionality to send keys to a server does
not respect mappings. As a workaround, the feedkeys() function can be
used to more closely simulate user input.
Example:
We want to send 3 keys: Ctrl w p and according to Vim docs you would
write: '<C-w>p' but these keys need to be escaped with a backslash '\':
>>> # in Vim you would write
>>> :call feedkeys("\<C-w>p")
>>> # this function can be used like this:
>>> client = Client(server)
>>> client.feedkeys('\<C-w>p')
>>> client.feedkeys('\<C-w>k')
"""
#self.command('call feedkeys("%s")' % keys)
self.eval('feedkeys("%s")' % keys)
#self.server.remote_expr('%Q{feedkeys("%s")}' % keys)
def source(self, script):
"""Source a script in Vim server.
script - a filename with an absolute path.
You can see all sourced scripts with command('script')
"""
self.command('source %s' % script)
def normal(self, keys=''):
"""
Switches Vim to normal mode and types in the given keys.
"""
#self.server.remote_send("<C-\\><C-n>%s" % keys)
# OR
self.type("<C-\\><C-n>%s" % keys)
def insert(self, text):
"""
Switches Vim to insert mode and types in the given text at current
cursor position. Eg:
>>> client.insert('Hello World!')
"""
self.normal("i%s" % text)
def search(self, text, flags='', stopline='', timeout=''):
"""
Starts a search in Vim for the given text. The result is that the
cursor is positioned on its first occurrence.
For info about the rest of the args, check :help search.
"""
#self.type('/%s<CR>' % text)
return self.eval('search("%s", "%s", "%s", "%s")' %
(text, flags, stopline, timeout))
def append_runtimepath(self, dir):
"""
Appends a directory to Vim's runtimepath.
dir - The directory added to the path
Returns nothing. Eg:
>>> client.append_runtimepath("/path/to/a/plugin/dir")
"""
dir_path = os.path.abspath(dir)
self.command("set runtimepath+=%s" % dir_path)
def echo(self, expression):
"""
Echo the expression in Vim. Eg:
>>> # get list of directories where plugins reside
>>> client.echo("&runtimepath")
>>> # output color brightness
>>> client.echo("&bg")
>>> # echo a string in Vim
>>> client.echo('"testing echo function with a string"')
>>> # or double quotes need to be escaped
>>> client.echo("\"testing echo function with a string\"")
Returns the String output.
"""
## redirect message to variable local to buffer
#self.command("redir => b:command_output")
#self.command("silent echo %s" % expression)
## end redirection:
#self.command("redir END")
## get value of variable from current buffer:
#output = self.eval('getbufvar("%", "command_output")')
## remove variable:
#self.command("unlet b:command_output")
#return output
return self.command("echo %s" % expression)
def prepend_runtimepath(self, dir):
"""
Prepends a directory to Vim's runtimepath. Use this instead of
append_runtimepath() to give the directory higher priority when Vim
runtime's a file.
dir - The directory added to the path
Eg:
>>> client.prepend_runtimepath('/home/user/plugin_dir')
"""
dir_path = os.path.abspath(dir)
runtimepath = self.echo("&runtimepath")
self.command("set runtimepath=%s,%s" % (dir_path, runtimepath))
def add_plugin(self, dir, entry_script=''):
"""
Adds a plugin to Vim's runtime. Initially, Vim is started without
sourcing any plugins to ensure a clean state. This method can be used
to populate the instance's environment.
dir - The base directory of the plugin, the one that contains
its autoload, plugin, ftplugin, etc. directories.
entry_script - The Vim script that's runtime'd to initialize the plugin
(optional)
Examples:
>>> client.add_plugin('/home/andrei/.vim/my_plugin/', 'plugin/rails.vim')
Returns nothing.
"""
self.append_runtimepath(dir)
if entry_script:
self.command("runtime %s" % entry_script)
def read_buffer(self, lnum, end='', buf=None):
"""Reads lines from buffer with index 'buf' or, by default, from the
current buffer in the range lnum -> end.
Uses vim's getbufline().
Returns one string with the lines joined with newlines '\\\\n' marking
the end of each line.
Eg:
>>> one_line = client.read_buffer("1")
>>> two_lines = client.read_buffer("1", "2")
>>> all_lines = client.read_buffer("1", "$")
>>> two_lines = client.read_buffer("line('$') - 1", "'$'")
"""
if not buf:
buf = self.get_active_buffer()
return self.eval("getbufline(%s, %s, %s)" % (buf, lnum, end))
def write_buffer(self, lnum, text):
"""Writes one or more lines to current buffer, starting from line
'lnum'. Calls vim's setline() function.
lnum - can be a number or a special character like $, '.'. etc.
text - can be a string or a list of strings.
Returns '0' or '1', as strings.
Eg:
Input is a string
>>> client.write_buffer("2", "write to line number 2")
>>> client.write_buffer("'$'", "write to last line")
>>> client.write_buffer("\"$\"", "write to last line")
>>> client.write_buffer("'$'", "['last line', 'add after last line']")
>>> client.write_buffer("line('$') + 1", "add after last line")
Input is a list
>>> l = ['last line', 'add after last line']
>>> client.write_buffer("'$'",l)
Pay attention, simple and double quotes matter.
"""
if type(text) == list:
return self.eval("setline(%s, %s)" % (lnum, create_vim_list(text)))
# text must be quoted in Vim editor:
return self.eval("setline(%s, \"%s\")" % (lnum, text))
def get_active_buffer(self):
"""
Get the current (active) vim buffer. Returns a string with the buffer number.
"""
return self.eval("winbufnr(0)")
def quit(self):
"Exit Vim."
self.server.quit()
#if __name__ == '__main__':
# vim = Server()
#
# gvim = Server()
| StarcoderdataPython |
1722464 | from __future__ import absolute_import
from changes.api.base import APIView
from changes.config import db
from changes.constants import Result
from changes.models.build import Build
from changes.models.job import Job
from changes.models.test import TestCase
from changes.models.source import Source
# This constant must match MAX_TESTS_TO_ADD in citools' quarantine keeper
MAX_TESTS_TO_ADD = 2
class BuildFlakyTestsAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
jobs = list(Job.query.filter(
Job.build_id == build.id,
))
if jobs:
flaky_tests_query = db.session.query(
TestCase.id,
TestCase.name,
TestCase.name_sha,
TestCase.job_id
).filter(
TestCase.job_id.in_([j.id for j in jobs]),
TestCase.result == Result.passed,
TestCase.reruns > 1
).order_by(TestCase.name.asc()).all()
else:
flaky_tests_query = []
flaky_tests = []
for test in flaky_tests_query:
item = {
'id': test.id,
'name': test.name,
'name_sha': test.name_sha,
'job_id': test.job_id,
}
# Quarantine Keeper only needs the author if there are at most
# MAX_TESTS_TO_ADD to add. If there are less, it will only send
# an alert and we don't want to waste time querying the DB
if len(flaky_tests_query) <= MAX_TESTS_TO_ADD:
last_test = self._get_last_testcase(build.project_id, test.name_sha)
possible_authors = [
last_test.owner,
]
for author in possible_authors:
if author:
item['author'] = {'email': author}
break
flaky_tests.append(item)
context = {
'projectSlug': build.project.slug,
'repositoryUrl': build.project.repository.url,
'flakyTests': {
'count': len(flaky_tests),
'items': flaky_tests
}
}
return self.respond(context)
@staticmethod
def _get_last_testcase(project_id, test_name_sha):
"""Get the most recent TestCase instance from a commit build for the specified name.
Args:
:param project_id: string
:param test_name_sha: string
Returns:
TestCase
"""
most_recent_test = TestCase.query.join(TestCase.job).join(Job.build).join(Build.source).filter(
TestCase.project_id == project_id,
TestCase.name_sha == test_name_sha,
Source.patch_id.is_(None),
Source.revision_sha.isnot(None),
).order_by(TestCase.date_created.desc()).first()
# Fall back to any test if this was never committed
if not most_recent_test:
most_recent_test = TestCase.query.filter(
TestCase.project_id == project_id,
TestCase.name_sha == test_name_sha,
).order_by(TestCase.date_created.desc()).first()
return most_recent_test
| StarcoderdataPython |
1792028 | #!/usr/bin/env python3
"""
Extracts images from the data stored with generate_data.py or optimal_trajectory_gen.py. Applies several transformation
to all images, if wished.
"""
import pathlib
import random
import argparse
import json
import shutil
from typing import List, Callable
import yaml
import numpy as np
import cv2
from gym_duckietown.envs import SimpleSimEnv
import src.graphics
class Transform:
def __init__(self):
self.transforms = []
def add_transform(self, transform: Callable):
self.transforms.append(transform)
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __bool__(self):
return bool(self.transforms)
def write_imgs_from_map(map_name: str, save_dir: pathlib.Path, test_percentage=0.3):
env = SimpleSimEnv(map_name=map_name)
file_path = pathlib.Path('experiments/demos_{}.json'.format(map_name))
if not file_path.is_file():
raise ValueError("Could not find the file containing the generated trajectories: {}".format(file_path))
data = json.loads(file_path.read_text())
demos = data['demos']
positions = map(lambda d: d['positions'], demos)
actions = map(lambda d: d['actions'], demos)
positions = sum(positions, [])
actions = sum(actions, [])
test_dir = save_dir / "test"
train_dir = save_dir / "train"
test_dir.mkdir(parents=True, exist_ok=True)
train_dir.mkdir(parents=True, exist_ok=True)
print("Found {} positions to be converted to images...".format(len(positions)))
for idx, position in enumerate(positions):
cur_pos = np.array(position[0])
cur_angle = position[1]
vels = actions[idx]
env.cur_pos = cur_pos
env.cur_angle = cur_angle
obs = env.render_obs().copy()
obs = obs[..., ::-1]
if random.random() < test_percentage:
img_path = test_dir / "{0:06d}.jpg".format(idx)
lbl_path = test_dir / "{0:06d}.txt".format(idx)
else:
img_path = train_dir / "{0:06d}.jpg".format(idx)
lbl_path = train_dir / "{0:06d}.txt".format(idx)
cv2.imwrite(img_path.as_posix(), obs)
lbl_path.write_text(" ".join(map(str, vels)))
def write_imgs_from_srcdir(src_dir: pathlib.Path, tgt_dir: pathlib.Path, keep_zeros_prob=1.0, only_road=False) -> None:
test_dir = tgt_dir / "test"
train_dir = tgt_dir / "train"
test_dir.mkdir(exist_ok=True)
train_dir.mkdir(exist_ok=True)
test_percentage = 0.3
test_count = 0
train_count = 0
for path in src_dir.iterdir():
if path.suffix != ".yaml":
continue
seq_info = yaml.load(path.read_text())
for entry in seq_info:
if abs(entry["omega"]) < 0.1 and random.random() > keep_zeros_prob:
continue
if random.random() < test_percentage:
img_tgt = test_dir / "{0:06d}.jpg".format(test_count)
lbl_tgt = test_dir / "{0:06d}.txt".format(test_count)
test_count += 1
else:
img_tgt = train_dir / "{0:06d}.jpg".format(train_count)
lbl_tgt = train_dir / "{0:06d}.txt".format(train_count)
train_count += 1
img_src = src_dir / entry["only_road_pth"] if only_road else src_dir / entry["path"]
shutil.copy(img_src.as_posix(), img_tgt.as_posix())
lbl_tgt.write_text(str(entry["omega"]))
def transform_images(src_dir: pathlib.Path, transform: Callable):
for pth in src_dir.iterdir():
if pth.suffix != ".jpg":
continue
img = cv2.imread(pth.as_posix())
img = transform(img)
cv2.imwrite(pth.as_posix(), img)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--map", help="Name of the map")
parser.add_argument("--src_dir", help="If specified, the data is assumed to be from sequences")
parser.add_argument("--tgt_dir", required=True, help="place to store the images")
parser.add_argument("--flatten_dist", action="store_true", help="if the data distribution should be flattened")
parser.add_argument("--only_road", action="store_true", help="use the only road images")
parser.add_argument("--transform_only_road", action="store_true", help="transforms the only road images")
parser.add_argument("--invert", action="store_true")
parser.add_argument("--overflow", action="store_true")
args = parser.parse_args()
if args.transform_only_road:
args.only_road = True
if args.flatten_dist:
keep_zeros_prob = 0.03
else:
keep_zeros_prob = 1.0
if args.src_dir is None:
if args.map is None:
raise ValueError("You need to specify either --src_dir or --map")
if args.only_road:
raise ValueError("You cant specify both --map and --only_road")
if args.transform_only_road:
raise ValueError("You cant specify both --map and --transform_only_road")
write_imgs_from_map(map_name=args.map, save_dir=pathlib.Path(args.tgt_dir))
else:
if args.map is not None:
raise ValueError("You can't specify both --map and --src_dir")
write_imgs_from_srcdir(pathlib.Path(args.src_dir),
pathlib.Path(args.tgt_dir),
keep_zeros_prob=keep_zeros_prob,
only_road=args.only_road)
transform = Transform()
if args.tranform_only_road:
transform.add_transform(src.graphics.apply_color_filter)
if args.invert:
transform.add_transform(src.graphics.invert)
if args.overflow:
transform.add_transform(src.graphics.overflow)
if transform:
print("transforming images...")
transform_images(pathlib.Path(args.tgt_dir) / "train", transform)
transform_images(pathlib.Path(args.tgt_dir) / "test", transform)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1708865 | import copy
import dateutil
from bson import ObjectId
from aquascope.tests.aquascope.webserver.data_access.db.dummy_uploads import DUMMY_UPLOADS
from aquascope.webserver.data_access.db import Item
from aquascope.webserver.data_access.db.items import DEFAULT_ITEM_PROJECTION, ANNOTABLE_FIELDS
from aquascope.webserver.data_access.db.util import project_dict
def add_tags_to_items(item_list):
list_with_tags = copy.deepcopy(item_list)
for item in list_with_tags:
upload_id = item['upload_id']
for upload in DUMMY_UPLOADS:
if upload._id == upload_id:
item['tags'] = upload.tags
return list_with_tags
_DUMMY_ITEMS = [
{
"_id": ObjectId('000000000000000000000000'),
"upload_id": ObjectId('000000000000000000001003'),
"file_size": 1.0,
"aspect_ratio": 1.0,
"maj_axis_len": 1.0,
"min_axis_len": 1.0,
"orientation": 1.0,
"eccentricity": 1.0,
"solidity": 1.0,
"estimated_volume": 1.0,
"area": 1.0,
"intensity_gray_mass_displace_in_images": 0.008681178096776,
"intensity_gray_moment_hu_4": 6.44407178777693e-12,
"intensity_gray_moment_hu_5": 1.58949394588895e-22,
"intensity_gray_moment_hu_6": 4.87549139791229e-15,
"intensity_gray_moment_hu_7": 9.29907144602865e-23,
"intensity_gray_std_intensity": 44.4518848935367,
"intensity_gray_moment_hu_1": 0.00162911271992,
"intensity_gray_moment_hu_2": 6.02185718988395e-07,
"intensity_gray_moment_hu_3": 1.26728600174152e-10,
"intensity_gray_median_intensity": 87.0,
"intensity_gray_mass_displace_in_minors": 0.021203842389859,
"intensity_gray_mean_intensity": 84.08,
"intensity_gray_perc_25_intensity": 39.0,
"intensity_gray_perc_75_intensity": 124.0,
"intensity_red_mass_displace_in_images": 0.016106449593647,
"intensity_red_moment_hu_4": 6.23065353203622e-12,
"intensity_red_moment_hu_5": 2.82161622605635e-23,
"intensity_red_moment_hu_6": 4.08980777153774e-15,
"intensity_red_moment_hu_7": 2.18575645069997e-22,
"intensity_red_std_intensity": 33.6895555570746,
"intensity_red_moment_hu_1": 0.002005703596185,
"intensity_red_moment_hu_2": 8.66777471825988e-07,
"intensity_red_moment_hu_3": 2.00807713614223e-10,
"intensity_red_median_intensity": 68.0,
"intensity_red_mass_displace_in_minors": 0.039831665466914,
"intensity_red_mean_intensity": 67.5858823529412,
"intensity_red_perc_25_intensity": 34.0,
"intensity_red_perc_75_intensity": 96.0,
"intensity_green_mass_displace_in_images": 0.009051942893981,
"intensity_green_moment_hu_4": 7.3328871508961e-12,
"intensity_green_moment_hu_5": 1.61308614662726e-22,
"intensity_green_moment_hu_6": 5.41478886935591e-15,
"intensity_green_moment_hu_7": 1.46066546290528e-22,
"intensity_green_std_intensity": 49.2534002268882,
"intensity_green_moment_hu_1": 0.001597936986465,
"intensity_green_moment_hu_2": 6.4019059128102e-07,
"intensity_green_moment_hu_3": 1.20101695144045e-10,
"intensity_green_median_intensity": 84.0,
"intensity_green_mass_displace_in_minors": 0.020957246911431,
"intensity_green_mean_intensity": 87.0611764705882,
"intensity_green_perc_25_intensity": 37.0,
"intensity_green_perc_75_intensity": 128.0,
"intensity_blue_mass_displace_in_images": 0.003477996783235,
"intensity_blue_moment_hu_4": 5.33414678545634e-12,
"intensity_blue_moment_hu_5": 1.13689838407033e-22,
"intensity_blue_moment_hu_6": 3.34282757211662e-15,
"intensity_blue_moment_hu_7": -8.77804907314849e-24,
"intensity_blue_std_intensity": 52.6562716093001,
"intensity_blue_moment_hu_1": 0.001380066669561,
"intensity_blue_moment_hu_2": 4.06557804086171e-07,
"intensity_blue_moment_hu_3": 8.56701947118446e-11,
"intensity_blue_median_intensity": 98.0,
"intensity_blue_mass_displace_in_minors": 0.008827539385204,
"intensity_blue_mean_intensity": 98.6188235294118,
"intensity_blue_perc_25_intensity": 46.0,
"intensity_blue_perc_75_intensity": 144.0,
"empire": "prokaryota",
"kingdom": "bacteria",
"phylum": "cyanobacteria",
"class": "cyanophyceae",
"order": "nostocales",
"family": "nostocaceae",
"genus": "anabaena",
"species": "sp",
"with_eggs": None,
"dividing": False,
"dead": False,
"with_epibiont": None,
"with_parasite": None,
"broken": False,
"colony": False,
"cluster": None,
"eating": True,
"multiple_species": False,
"partially_cropped": False,
"male": None,
"female": None,
"juvenile": None,
"adult": None,
"ephippium": None,
"resting_egg": None,
"heterocyst": None,
"akinete": None,
"with_spines": None,
"beatles": None,
"stones": None,
"zeppelin": None,
"floyd": None,
"acdc": None,
"hendrix": None,
"alan_parsons": None,
"allman": None,
"dire_straits": None,
"eagles": None,
"guns": None,
"purple": None,
"van_halen": None,
"skynyrd": None,
"zz_top": None,
"iron": None,
"police": None,
"moore": None,
"inxs": None,
"chilli_peppers": None,
"filename": 'image_000.jpeg',
"extension": ".jpeg",
"group_id": "processed",
"acquisition_time": dateutil.parser.parse('2019-01-20 10:00:00'),
"image_width": 48,
"image_height": 32,
**({f'{k}_modified_by': 'user1' for k in ANNOTABLE_FIELDS}),
**({f'{k}_modification_time': dateutil.parser.parse('2019-01-21 10:00:00') for k in ANNOTABLE_FIELDS})
},
{
"_id": ObjectId('000000000000000000000001'),
"upload_id": ObjectId('000000000000000000001001'),
"file_size": 1.0,
"aspect_ratio": 1.0,
"maj_axis_len": 1.0,
"min_axis_len": 1.0,
"orientation": 1.0,
"eccentricity": 1.0,
"solidity": 1.0,
"estimated_volume": 1.0,
"area": 1.0,
"intensity_gray_mass_displace_in_images": 0.008681178096776,
"intensity_gray_moment_hu_4": 6.44407178777693e-12,
"intensity_gray_moment_hu_5": 1.58949394588895e-22,
"intensity_gray_moment_hu_6": 4.87549139791229e-15,
"intensity_gray_moment_hu_7": 9.29907144602865e-23,
"intensity_gray_std_intensity": 44.4518848935367,
"intensity_gray_moment_hu_1": 0.00162911271992,
"intensity_gray_moment_hu_2": 6.02185718988395e-07,
"intensity_gray_moment_hu_3": 1.26728600174152e-10,
"intensity_gray_median_intensity": 87.0,
"intensity_gray_mass_displace_in_minors": 0.021203842389859,
"intensity_gray_mean_intensity": 84.08,
"intensity_gray_perc_25_intensity": 39.0,
"intensity_gray_perc_75_intensity": 124.0,
"intensity_red_mass_displace_in_images": 0.016106449593647,
"intensity_red_moment_hu_4": 6.23065353203622e-12,
"intensity_red_moment_hu_5": 2.82161622605635e-23,
"intensity_red_moment_hu_6": 4.08980777153774e-15,
"intensity_red_moment_hu_7": 2.18575645069997e-22,
"intensity_red_std_intensity": 33.6895555570746,
"intensity_red_moment_hu_1": 0.002005703596185,
"intensity_red_moment_hu_2": 8.66777471825988e-07,
"intensity_red_moment_hu_3": 2.00807713614223e-10,
"intensity_red_median_intensity": 68.0,
"intensity_red_mass_displace_in_minors": 0.039831665466914,
"intensity_red_mean_intensity": 67.5858823529412,
"intensity_red_perc_25_intensity": 34.0,
"intensity_red_perc_75_intensity": 96.0,
"intensity_green_mass_displace_in_images": 0.009051942893981,
"intensity_green_moment_hu_4": 7.3328871508961e-12,
"intensity_green_moment_hu_5": 1.61308614662726e-22,
"intensity_green_moment_hu_6": 5.41478886935591e-15,
"intensity_green_moment_hu_7": 1.46066546290528e-22,
"intensity_green_std_intensity": 49.2534002268882,
"intensity_green_moment_hu_1": 0.001597936986465,
"intensity_green_moment_hu_2": 6.4019059128102e-07,
"intensity_green_moment_hu_3": 1.20101695144045e-10,
"intensity_green_median_intensity": 84.0,
"intensity_green_mass_displace_in_minors": 0.020957246911431,
"intensity_green_mean_intensity": 87.0611764705882,
"intensity_green_perc_25_intensity": 37.0,
"intensity_green_perc_75_intensity": 128.0,
"intensity_blue_mass_displace_in_images": 0.003477996783235,
"intensity_blue_moment_hu_4": 5.33414678545634e-12,
"intensity_blue_moment_hu_5": 1.13689838407033e-22,
"intensity_blue_moment_hu_6": 3.34282757211662e-15,
"intensity_blue_moment_hu_7": -8.77804907314849e-24,
"intensity_blue_std_intensity": 52.6562716093001,
"intensity_blue_moment_hu_1": 0.001380066669561,
"intensity_blue_moment_hu_2": 4.06557804086171e-07,
"intensity_blue_moment_hu_3": 8.56701947118446e-11,
"intensity_blue_median_intensity": 98.0,
"intensity_blue_mass_displace_in_minors": 0.008827539385204,
"intensity_blue_mean_intensity": 98.6188235294118,
"intensity_blue_perc_25_intensity": 46.0,
"intensity_blue_perc_75_intensity": 144.0,
"empire": "prokaryota",
"kingdom": "bacteria",
"phylum": "cyanobacteria",
"class": "cyanophyceae",
"order": "nostocales",
"family": "nostocaceae",
"genus": "anabaena",
"species": None,
"with_eggs": None,
"dividing": False,
"dead": False,
"with_epibiont": None,
"with_parasite": None,
"broken": False,
"colony": False,
"cluster": None,
"eating": True,
"multiple_species": False,
"partially_cropped": False,
"male": None,
"female": False,
"juvenile": None,
"adult": None,
"ephippium": None,
"resting_egg": None,
"heterocyst": None,
"akinete": None,
"with_spines": None,
"beatles": None,
"stones": None,
"zeppelin": None,
"floyd": None,
"acdc": None,
"hendrix": None,
"alan_parsons": None,
"allman": None,
"dire_straits": None,
"eagles": None,
"guns": None,
"purple": None,
"van_halen": None,
"skynyrd": None,
"zz_top": None,
"iron": None,
"police": None,
"moore": None,
"inxs": None,
"chilli_peppers": None,
"filename": "image_001.jpeg",
"extension": ".jpeg",
"group_id": "processed",
"acquisition_time": dateutil.parser.parse('2019-01-20 06:00:00'),
"image_width": 100,
"image_height": 100,
**({f'{k}_modified_by': 'user2' for k in ANNOTABLE_FIELDS}),
**({f'{k}_modification_time': dateutil.parser.parse('2019-01-21 06:00:00') for k in ANNOTABLE_FIELDS})
},
{
"_id": ObjectId('000000000000000000000002'),
"upload_id": ObjectId('000000000000000000001003'),
"file_size": 1.0,
"aspect_ratio": 1.0,
"maj_axis_len": 1.0,
"min_axis_len": 1.0,
"orientation": 1.0,
"eccentricity": 1.0,
"solidity": 1.0,
"estimated_volume": 1.0,
"area": 1.0,
"intensity_gray_mass_displace_in_images": 0.008681178096776,
"intensity_gray_moment_hu_4": 6.44407178777693e-12,
"intensity_gray_moment_hu_5": 1.58949394588895e-22,
"intensity_gray_moment_hu_6": 4.87549139791229e-15,
"intensity_gray_moment_hu_7": 9.29907144602865e-23,
"intensity_gray_std_intensity": 44.4518848935367,
"intensity_gray_moment_hu_1": 0.00162911271992,
"intensity_gray_moment_hu_2": 6.02185718988395e-07,
"intensity_gray_moment_hu_3": 1.26728600174152e-10,
"intensity_gray_median_intensity": 87.0,
"intensity_gray_mass_displace_in_minors": 0.021203842389859,
"intensity_gray_mean_intensity": 84.08,
"intensity_gray_perc_25_intensity": 39.0,
"intensity_gray_perc_75_intensity": 124.0,
"intensity_red_mass_displace_in_images": 0.016106449593647,
"intensity_red_moment_hu_4": 6.23065353203622e-12,
"intensity_red_moment_hu_5": 2.82161622605635e-23,
"intensity_red_moment_hu_6": 4.08980777153774e-15,
"intensity_red_moment_hu_7": 2.18575645069997e-22,
"intensity_red_std_intensity": 33.6895555570746,
"intensity_red_moment_hu_1": 0.002005703596185,
"intensity_red_moment_hu_2": 8.66777471825988e-07,
"intensity_red_moment_hu_3": 2.00807713614223e-10,
"intensity_red_median_intensity": 68.0,
"intensity_red_mass_displace_in_minors": 0.039831665466914,
"intensity_red_mean_intensity": 67.5858823529412,
"intensity_red_perc_25_intensity": 34.0,
"intensity_red_perc_75_intensity": 96.0,
"intensity_green_mass_displace_in_images": 0.009051942893981,
"intensity_green_moment_hu_4": 7.3328871508961e-12,
"intensity_green_moment_hu_5": 1.61308614662726e-22,
"intensity_green_moment_hu_6": 5.41478886935591e-15,
"intensity_green_moment_hu_7": 1.46066546290528e-22,
"intensity_green_std_intensity": 49.2534002268882,
"intensity_green_moment_hu_1": 0.001597936986465,
"intensity_green_moment_hu_2": 6.4019059128102e-07,
"intensity_green_moment_hu_3": 1.20101695144045e-10,
"intensity_green_median_intensity": 84.0,
"intensity_green_mass_displace_in_minors": 0.020957246911431,
"intensity_green_mean_intensity": 87.0611764705882,
"intensity_green_perc_25_intensity": 37.0,
"intensity_green_perc_75_intensity": 128.0,
"intensity_blue_mass_displace_in_images": 0.003477996783235,
"intensity_blue_moment_hu_4": 5.33414678545634e-12,
"intensity_blue_moment_hu_5": 1.13689838407033e-22,
"intensity_blue_moment_hu_6": 3.34282757211662e-15,
"intensity_blue_moment_hu_7": -8.77804907314849e-24,
"intensity_blue_std_intensity": 52.6562716093001,
"intensity_blue_moment_hu_1": 0.001380066669561,
"intensity_blue_moment_hu_2": 4.06557804086171e-07,
"intensity_blue_moment_hu_3": 8.56701947118446e-11,
"intensity_blue_median_intensity": 98.0,
"intensity_blue_mass_displace_in_minors": 0.008827539385204,
"intensity_blue_mean_intensity": 98.6188235294118,
"intensity_blue_perc_25_intensity": 46.0,
"intensity_blue_perc_75_intensity": 144.0,
"empire": "prokaryota",
"kingdom": "bacteria",
"phylum": "cyanobacteria",
"class": "cyanophyceae",
"order": "nostocales",
"family": "nostocaceae",
"genus": "anabaena",
"species": "sp",
"with_eggs": None,
"dividing": False,
"dead": False,
"with_epibiont": None,
"with_parasite": None,
"broken": False,
"colony": False,
"cluster": None,
"eating": False,
"multiple_species": False,
"partially_cropped": False,
"male": None,
"female": True,
"juvenile": None,
"adult": None,
"ephippium": None,
"resting_egg": None,
"heterocyst": None,
"akinete": None,
"with_spines": None,
"beatles": None,
"stones": None,
"zeppelin": None,
"floyd": None,
"acdc": None,
"hendrix": None,
"alan_parsons": None,
"allman": None,
"dire_straits": None,
"eagles": None,
"guns": None,
"purple": None,
"van_halen": None,
"skynyrd": None,
"zz_top": None,
"iron": None,
"police": None,
"moore": None,
"inxs": None,
"chilli_peppers": None,
"filename": "image_002.jpeg",
"extension": ".jpeg",
"group_id": "processed",
"acquisition_time": dateutil.parser.parse('2019-01-10 10:00:00'),
"image_width": 100,
"image_height": 100,
**({f'{k}_modified_by': None for k in ANNOTABLE_FIELDS}),
**({f'{k}_modification_time': None for k in ANNOTABLE_FIELDS})
},
{
"_id": ObjectId('000000000000000000000003'),
"upload_id": ObjectId('000000000000000000001002'),
"file_size": 1.0,
"aspect_ratio": 1.0,
"maj_axis_len": 1.0,
"min_axis_len": 1.0,
"orientation": 1.0,
"eccentricity": 1.0,
"solidity": 1.0,
"estimated_volume": 1.0,
"area": 1.0,
"intensity_gray_mass_displace_in_images": 0.008681178096776,
"intensity_gray_moment_hu_4": 6.44407178777693e-12,
"intensity_gray_moment_hu_5": 1.58949394588895e-22,
"intensity_gray_moment_hu_6": 4.87549139791229e-15,
"intensity_gray_moment_hu_7": 9.29907144602865e-23,
"intensity_gray_std_intensity": 44.4518848935367,
"intensity_gray_moment_hu_1": 0.00162911271992,
"intensity_gray_moment_hu_2": 6.02185718988395e-07,
"intensity_gray_moment_hu_3": 1.26728600174152e-10,
"intensity_gray_median_intensity": 87.0,
"intensity_gray_mass_displace_in_minors": 0.021203842389859,
"intensity_gray_mean_intensity": 84.08,
"intensity_gray_perc_25_intensity": 39.0,
"intensity_gray_perc_75_intensity": 124.0,
"intensity_red_mass_displace_in_images": 0.016106449593647,
"intensity_red_moment_hu_4": 6.23065353203622e-12,
"intensity_red_moment_hu_5": 2.82161622605635e-23,
"intensity_red_moment_hu_6": 4.08980777153774e-15,
"intensity_red_moment_hu_7": 2.18575645069997e-22,
"intensity_red_std_intensity": 33.6895555570746,
"intensity_red_moment_hu_1": 0.002005703596185,
"intensity_red_moment_hu_2": 8.66777471825988e-07,
"intensity_red_moment_hu_3": 2.00807713614223e-10,
"intensity_red_median_intensity": 68.0,
"intensity_red_mass_displace_in_minors": 0.039831665466914,
"intensity_red_mean_intensity": 67.5858823529412,
"intensity_red_perc_25_intensity": 34.0,
"intensity_red_perc_75_intensity": 96.0,
"intensity_green_mass_displace_in_images": 0.009051942893981,
"intensity_green_moment_hu_4": 7.3328871508961e-12,
"intensity_green_moment_hu_5": 1.61308614662726e-22,
"intensity_green_moment_hu_6": 5.41478886935591e-15,
"intensity_green_moment_hu_7": 1.46066546290528e-22,
"intensity_green_std_intensity": 49.2534002268882,
"intensity_green_moment_hu_1": 0.001597936986465,
"intensity_green_moment_hu_2": 6.4019059128102e-07,
"intensity_green_moment_hu_3": 1.20101695144045e-10,
"intensity_green_median_intensity": 84.0,
"intensity_green_mass_displace_in_minors": 0.020957246911431,
"intensity_green_mean_intensity": 87.0611764705882,
"intensity_green_perc_25_intensity": 37.0,
"intensity_green_perc_75_intensity": 128.0,
"intensity_blue_mass_displace_in_images": 0.003477996783235,
"intensity_blue_moment_hu_4": 5.33414678545634e-12,
"intensity_blue_moment_hu_5": 1.13689838407033e-22,
"intensity_blue_moment_hu_6": 3.34282757211662e-15,
"intensity_blue_moment_hu_7": -8.77804907314849e-24,
"intensity_blue_std_intensity": 52.6562716093001,
"intensity_blue_moment_hu_1": 0.001380066669561,
"intensity_blue_moment_hu_2": 4.06557804086171e-07,
"intensity_blue_moment_hu_3": 8.56701947118446e-11,
"intensity_blue_median_intensity": 98.0,
"intensity_blue_mass_displace_in_minors": 0.008827539385204,
"intensity_blue_mean_intensity": 98.6188235294118,
"intensity_blue_perc_25_intensity": 46.0,
"intensity_blue_perc_75_intensity": 144.0,
"empire": "prokaryota",
"kingdom": "bacteria",
"phylum": "cyanobacteria",
"class": "cyanophyceae",
"order": "sphaeropleales",
"family": "scenedesmaceae",
"genus": "coelastrum",
"species": None,
"with_eggs": None,
"dividing": False,
"dead": False,
"with_epibiont": None,
"with_parasite": None,
"broken": False,
"colony": False,
"cluster": None,
"eating": None,
"multiple_species": False,
"partially_cropped": False,
"male": None,
"female": None,
"juvenile": None,
"adult": None,
"ephippium": None,
"resting_egg": None,
"heterocyst": None,
"akinete": None,
"with_spines": None,
"beatles": None,
"stones": None,
"zeppelin": None,
"floyd": None,
"acdc": None,
"hendrix": None,
"alan_parsons": None,
"allman": False,
"dire_straits": None,
"eagles": None,
"guns": None,
"purple": None,
"van_halen": None,
"skynyrd": None,
"zz_top": None,
"iron": None,
"police": None,
"moore": None,
"inxs": None,
"chilli_peppers": None,
"filename": "image_003.jpeg",
"extension": ".jpeg",
"group_id": "processed",
"acquisition_time": dateutil.parser.parse('2019-01-05 10:00:00'),
"image_width": 100,
"image_height": 100,
**({f'{k}_modified_by': 'user1' for k in ANNOTABLE_FIELDS[0::2]}),
**({f'{k}_modification_time': dateutil.parser.parse('2019-01-05 10:00:00') for k in ANNOTABLE_FIELDS[0::2]}),
**({f'{k}_modified_by': 'user2' for k in ANNOTABLE_FIELDS[1::2]}),
**({f'{k}_modification_time': dateutil.parser.parse('2019-01-06 10:00:00') for k in ANNOTABLE_FIELDS[1::2]})
},
{
"_id": ObjectId('000000000000000000000004'),
"upload_id": ObjectId('000000000000000000001000'),
"file_size": 1.0,
"aspect_ratio": 1.0,
"maj_axis_len": 1.0,
"min_axis_len": 1.0,
"orientation": 1.0,
"eccentricity": 1.0,
"solidity": 1.0,
"estimated_volume": 1.0,
"area": 1.0,
"intensity_gray_mass_displace_in_images": 0.008681178096776,
"intensity_gray_moment_hu_4": 6.44407178777693e-12,
"intensity_gray_moment_hu_5": 1.58949394588895e-22,
"intensity_gray_moment_hu_6": 4.87549139791229e-15,
"intensity_gray_moment_hu_7": 9.29907144602865e-23,
"intensity_gray_std_intensity": 44.4518848935367,
"intensity_gray_moment_hu_1": 0.00162911271992,
"intensity_gray_moment_hu_2": 6.02185718988395e-07,
"intensity_gray_moment_hu_3": 1.26728600174152e-10,
"intensity_gray_median_intensity": 87.0,
"intensity_gray_mass_displace_in_minors": 0.021203842389859,
"intensity_gray_mean_intensity": 84.08,
"intensity_gray_perc_25_intensity": 39.0,
"intensity_gray_perc_75_intensity": 124.0,
"intensity_red_mass_displace_in_images": 0.016106449593647,
"intensity_red_moment_hu_4": 6.23065353203622e-12,
"intensity_red_moment_hu_5": 2.82161622605635e-23,
"intensity_red_moment_hu_6": 4.08980777153774e-15,
"intensity_red_moment_hu_7": 2.18575645069997e-22,
"intensity_red_std_intensity": 33.6895555570746,
"intensity_red_moment_hu_1": 0.002005703596185,
"intensity_red_moment_hu_2": 8.66777471825988e-07,
"intensity_red_moment_hu_3": 2.00807713614223e-10,
"intensity_red_median_intensity": 68.0,
"intensity_red_mass_displace_in_minors": 0.039831665466914,
"intensity_red_mean_intensity": 67.5858823529412,
"intensity_red_perc_25_intensity": 34.0,
"intensity_red_perc_75_intensity": 96.0,
"intensity_green_mass_displace_in_images": 0.009051942893981,
"intensity_green_moment_hu_4": 7.3328871508961e-12,
"intensity_green_moment_hu_5": 1.61308614662726e-22,
"intensity_green_moment_hu_6": 5.41478886935591e-15,
"intensity_green_moment_hu_7": 1.46066546290528e-22,
"intensity_green_std_intensity": 49.2534002268882,
"intensity_green_moment_hu_1": 0.001597936986465,
"intensity_green_moment_hu_2": 6.4019059128102e-07,
"intensity_green_moment_hu_3": 1.20101695144045e-10,
"intensity_green_median_intensity": 84.0,
"intensity_green_mass_displace_in_minors": 0.020957246911431,
"intensity_green_mean_intensity": 87.0611764705882,
"intensity_green_perc_25_intensity": 37.0,
"intensity_green_perc_75_intensity": 128.0,
"intensity_blue_mass_displace_in_images": 0.003477996783235,
"intensity_blue_moment_hu_4": 5.33414678545634e-12,
"intensity_blue_moment_hu_5": 1.13689838407033e-22,
"intensity_blue_moment_hu_6": 3.34282757211662e-15,
"intensity_blue_moment_hu_7": -8.77804907314849e-24,
"intensity_blue_std_intensity": 52.6562716093001,
"intensity_blue_moment_hu_1": 0.001380066669561,
"intensity_blue_moment_hu_2": 4.06557804086171e-07,
"intensity_blue_moment_hu_3": 8.56701947118446e-11,
"intensity_blue_median_intensity": 98.0,
"intensity_blue_mass_displace_in_minors": 0.008827539385204,
"intensity_blue_mean_intensity": 98.6188235294118,
"intensity_blue_perc_25_intensity": 46.0,
"intensity_blue_perc_75_intensity": 144.0,
"empire": None,
"kingdom": "bacteria",
"phylum": "cyanobacteria",
"class": "cyanophyceae",
"order": "sphaeropleales",
"family": "scenedesmaceae",
"genus": "coelastrum",
"species": "reticulatum",
"with_eggs": None,
"dividing": False,
"dead": False,
"with_epibiont": None,
"with_parasite": None,
"broken": False,
"colony": False,
"cluster": None,
"eating": None,
"multiple_species": False,
"partially_cropped": False,
"male": None,
"female": None,
"juvenile": None,
"adult": None,
"ephippium": None,
"resting_egg": None,
"heterocyst": None,
"akinete": None,
"with_spines": None,
"beatles": None,
"stones": None,
"zeppelin": None,
"floyd": None,
"acdc": None,
"hendrix": None,
"alan_parsons": None,
"allman": None,
"dire_straits": None,
"eagles": None,
"guns": None,
"purple": None,
"van_halen": None,
"skynyrd": None,
"zz_top": None,
"iron": None,
"police": None,
"moore": None,
"inxs": None,
"chilli_peppers": None,
"filename": "image_004.jpeg",
"extension": ".jpeg",
"group_id": "processed",
"acquisition_time": dateutil.parser.parse('2019-01-01 10:00:00'),
"image_width": 100,
"image_height": 100,
**({f'{k}_modified_by': None for k in ANNOTABLE_FIELDS}),
**({f'{k}_modification_time': None for k in ANNOTABLE_FIELDS})
}
]
_DUMMY_ITEMS_WITH_TAGS = add_tags_to_items(_DUMMY_ITEMS)
DUMMY_ITEMS_WITH_DEFAULT_PROJECTION = [
Item(project_dict(copy.deepcopy(item), DEFAULT_ITEM_PROJECTION)) for item in _DUMMY_ITEMS_WITH_TAGS
]
DUMMY_ITEMS = [Item(item) for item in _DUMMY_ITEMS_WITH_TAGS]
| StarcoderdataPython |
3282752 | """
plot data from isingWorm2d.py
"""
from __future__ import division, print_function
import cPickle as pickle # for reading/writing data to text files.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib import rcParams # for turning off legend frame
from scipy import stats
input_name = 'pottsWorm2d_highT'
#need to create an empty classes with the sames name as used in the pickle file.
class Lattice(object): pass
class Worm(object): pass
class Observables(object): pass
def load_data():
import cPickle as pickle
observables_file = ((r'..\data\%s.pkl') % (input_name))
lattice_file = ((r'..\data\%s_lattice.pkl') % (input_name))
worm_file = ((r'..\data\%s_worm.pkl') % (input_name))
observables = pickle.load(open(observables_file, 'rb'))
lattice = pickle.load(open(lattice_file, 'rb'))
worm = pickle.load(open(worm_file, 'rb'))
return lattice, worm, observables
def plot_correlation_loglog(observables):
"""
Currently only the Worm algorithm measures correlation data.
"""
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111)
# make room for slider
plt.subplots_adjust(bottom=0.22, top=0.9, right=0.95, left=0.15)
ax.set_xlabel('$\log\,r_{ij}$', fontsize=14)
ax.set_ylabel('$\\rm \log\,g\\left(r_{ij}\\right)$', fontsize=14)
digits = int(np.log10(observables.Z[-1]))
ax.set_title(r'$\rm{\bf Ising\,2D:}\,%s^2 Grid,\,%.1f\!\times 10^{%u}MCSteps$'
% (observables.L, observables.Z[-1]/(10**digits), digits),
fontsize=14, loc=('center'))
r_range = np.linspace(1, observables.L+1, observables.L)
correlation = observables.correlation
#correlation = np.cumsum(correlation[::-1], axis=0)[::-1]
# initialize correlation function plot.
correlation_plot = ax.plot([], [], 'o', markersize=6, color='b')[0]
# initialize least squares fit plot.
least_squares_fit = ax.plot([], [], '-r', label='y=mx+b')[0]
rcParams['legend.frameon'] = 'False'
# create position index slider
r_max = len(np.log(correlation[correlation[:, -1]>0, -1]))
slider_axes = plt.axes([0.2, 0.03, 0.7, 0.03], axisbg='lightgoldenrodyellow')
r_slider = Slider(slider_axes, '$r_{max}$', 3, r_max, valinit=r_max,
facecolor='b', valfmt ='%u')
# create temperature index slider
T_range = observables.T_range
slider_axes = plt.axes([0.2, 0.07, 0.7, 0.03], axisbg='lightgoldenrodyellow')
T_slider = Slider(slider_axes, '$T$', 1, len(T_range), valinit=len(T_range),
facecolor='b', valfmt ='%u')
def slider_update(value):
r_idx, T_idx = int(r_slider.val), int(T_slider.val)-1
correlation_function = correlation[1:,T_idx]/correlation[0,T_idx]
# use only nonzero correlation values for fitting
r = np.log(r_range[correlation_function>0])
y = np.log(correlation_function[correlation_function>0])
correlation_plot.set_xdata(r[0:r_idx])
correlation_plot.set_ydata(y[0:r_idx])
# least squares fit using scipy package.
fit_data = stats.linregress(correlation_plot.get_xdata(), correlation_plot.get_ydata())
slope, intercept, r_value = fit_data[0], fit_data[1], fit_data[2]
least_squares_fit.set_label(r'${\rmFit:}\; m = %.3f,\;r^2 = %.3f,\;T=%.3f$'
% (slope, r_value**2, T_range[T_idx]))
# plot least squares fit.
least_squares_fit.set_ydata((slope*correlation_plot.get_xdata()+intercept))
least_squares_fit.set_xdata(correlation_plot.get_xdata())
# set new axes bounds.
ax.set_xlim(min(correlation_plot.get_xdata()), max(correlation_plot.get_xdata()))
ax.set_ylim(min(correlation_plot.get_ydata()), max(correlation_plot.get_ydata()))
# refresh figure.
ax.legend(loc='lower left')
fig.canvas.draw_idle()
r_slider.on_changed(slider_update) # set slider callback function.
T_slider.on_changed(slider_update) # set slider callback function.
slider_update(True) # initialize plot
plt.show()
def plot_bond_lattice(lattice, worm, observables):
"""
Displays the bond lattice corresponding to the most recent temperature used.
"""
# create bond grid for plotting
line_range = np.linspace(0, lattice.L, lattice.L+1)
x_grid, y_grid = np.meshgrid(line_range, line_range)
# initialize figure.
fig = plt.figure(figsize=(9, 9))
ax = plt.axes(xlim=(0, lattice.L), ylim=(0, lattice.L))
ax.set_xlabel(r'$T = %.2f,\;\langle H \rangle = %.3f$'
% (observables.T_range[-1], observables.mean_energy[0, -1]),
fontsize=16, position=(0.5,-0.085))
plt.subplots_adjust(bottom=0.1, top=0.96, right=0.96, left=0.04)
# create grid (gray lines).
plt.plot(x_grid, y_grid, c='#dddddd', lw=1)
plt.plot(y_grid, x_grid, c='#dddddd', lw=1)
ax.set_title(r'$\rm{\bf High\ Temperature\ Domain\!\ }$',
fontsize=14, loc=('center'))
# convert boolean bond data to numeric arrays for plotting.
colors = ['aquamarine', 'midnightblue', 'skyblue', 'blueviolet', 'cadetblue', 'cornflowerblue', 'coral', 'firebrick', 'purple']
#colors = ['azure']*8
# plot bond lines.
cm = plt.get_cmap('jet')
#ax.set_color_cycle([cm(1.*i/(worm.q-1)) for i in range(worm.q-1)])
for i in range(1, 2):
xh = x_grid[lattice.bonds[0]==i].flatten()
yh = y_grid[lattice.bonds[0]==i].flatten()
xv = x_grid[lattice.bonds[1]==i].flatten()
yv = y_grid[lattice.bonds[1]==i].flatten()
h_bonds = np.hstack((np.vstack((xh, xh+1)), np.vstack((xv, xv))))
v_bonds = np.hstack((np.vstack((yh, yh)), np.vstack((yv, yv+1))))
plt.plot(h_bonds, v_bonds, 'r', lw=3)
# plot worm head and tail.
plt.plot(worm.tail[0], worm.tail[1], 'bs', ms=10)
plt.plot(worm.head[0], worm.head[1], 'g>', ms=15)
# disable clipping to show periodic bonds.
for o in fig.findobj():
o.set_clip_on(False)
def plot_observables(observables):
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111,
xlim=(observables.T_range[0], observables.T_range[-1]),
ylim=(observables.mean_energy[0, 0], observables.mean_energy[0, -1]))
ax.set_xlabel("Temperature [K]")
ax.set_ylabel("Energy [$k_b$]")
digits = int(np.log10(observables.mcsteps))
ax.set_title(r'$\rm{\bf Ising\,2D:}\,%s^2 Grid,\,%.1f\!\times 10^{%u}MCSteps$'
% (observables.L, observables.mcsteps/(10**digits), digits),
fontsize=14, loc=('center'))
plt.subplots_adjust(bottom=0.15, top=0.9, right=0.95, left=0.15)
ax.plot(observables.T_range, observables.mean_energy[0, :], 'bo')
def main():
lattice, worm, observables = load_data()
#plot_correlation_loglog(observables)
plot_bond_lattice(lattice, worm, observables)
#plot_observables(observables)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1666501 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Contact : <EMAIL>
# @Time : 10/9/2020 5:34 PM
# @File : CirculantMatrixTracker.py
# @Software: PyCharm
from __future__ import print_function
import os
import os.path
import sys
import glob
import time
from optparse import OptionParser
import scipy.misc
import pylab
debug = False
class CirculantMatrixTracker:
def __init__(self, object_example):
"""
object_example is an image showing the object to track
"""
return
def find(self, image):
"""
Will return the x/y coordinates where the object was found,
and the score
"""
return
def update_template(self, new_example, forget_factor=1):
"""
Update the tracking template,
new_example is expected to match the size of
the example provided to the constructor
"""
return
| StarcoderdataPython |
30221 | <reponame>mengelhard/cft
import numpy as np
import pandas as pd
import tensorflow as tf
import itertools
import datetime
import os
from sklearn.metrics import roc_auc_score
from model import mlp, lognormal_nlogpdf, lognormal_nlogsurvival
from model_reddit import load_batch
from train_reddit import get_files
from train_mimic import rae_over_samples, rae, ci
REDDIT_DIR = '/scratch/mme4/reddit'
#REDDIT_DIR = '/Users/mme/projects/cft/data/reddit_subset'
#REDDIT_DIR = '/home/rapiduser/dictionary_collection_2'
def main():
train_fns, val_fns, test_fns = get_files(REDDIT_DIR)
utc = datetime.datetime.utcnow().strftime('%s')
n_outputs = 9
results_fn = os.path.join(
os.path.split(os.getcwd())[0],
'results',
'reddit_baselines_' + utc + '.csv')
head = ['status', 'model_type', 'hidden_layer_size', 'censoring_factor',
'n_iter', 'final_train_nll', 'final_val_nll']
head += ['mean_auc'] + [('auc%i' % i) for i in range (n_outputs)]
head += ['mean_raem'] + [('raem%i' % i) for i in range(n_outputs)]
head += ['mean_raea'] + [('raea%i' % i) for i in range(n_outputs)]
head += ['mean_ci'] + [('ci%i' % i) for i in range(n_outputs)]
with open(results_fn, 'w+') as results_file:
print(', '.join(head), file=results_file)
for i in range(3 * 10 * 2):
hidden_layer_sizes = (750, )
model_type = ['survival', 'c_mlp', 's_mlp'][i % 3]
censoring_factor = [2., 3.][i // 30]
print('Running', model_type, 'with layers', hidden_layer_sizes)
#try:
n_iter, final_train_nll, final_val_nll, aucs, raes_median, raes_all, cis = train_baseline(
model_type, censoring_factor, hidden_layer_sizes,
train_fns, val_fns, test_fns)
status = 'complete'
# except:
# n_iter, final_train_nll, final_val_nll = [np.nan] * 3
# aucs = [np.nan] * n_outputs
# raes_median = [np.nan] * n_outputs
# raes_all = [np.nan] * n_outputs
# cis = [np.nan] * n_outputs
# status = 'failed'
results = [status, model_type, hidden_layer_sizes[0],
censoring_factor, n_iter, final_train_nll,
final_val_nll]
results += [np.nanmean(aucs)] + aucs
results += [np.nanmean(raes_median)] + raes_median
results += [np.nanmean(raes_all)] + raes_all
results += [np.nanmean(cis)] + cis
results = [str(r) for r in results]
with open(results_fn, 'a') as results_file:
print(', '.join(results), file=results_file)
print('Run complete with status:', status)
def train_baseline(model_type, censoring_factor, hidden_layer_sizes, train_fns, val_fns, test_fns):
tf.reset_default_graph()
if model_type is 'survival':
mdl = SurvivalModel(
decoder_layer_sizes=hidden_layer_sizes, dropout_pct=.5,
censoring_factor=censoring_factor)
else:
mdl = PosNegModel(
encoder_layer_sizes=hidden_layer_sizes, dropout_pct=.5,
censoring_factor=censoring_factor)
with tf.Session() as sess:
train_stats, val_stats = mdl.train(
sess, train_fns, val_fns,
100, train_type=model_type,
max_epochs_no_improve=3, learning_rate=3e-4,
verbose=False)
predictions, c_val, t_val, s_val = mdl.predict(
sess, test_fns)
train_stats = list(zip(*train_stats))
val_stats = list(zip(*val_stats))
n_iter = train_stats[0][-1]
final_train_nll = train_stats[1][-1]
final_val_nll = val_stats[1][-1]
n_out = np.shape(c_val)[1]
if model_type is 'survival':
c_prob_pred = t_to_prob(predictions)
raes = [rae_over_samples(t_val[:, i], s_val[:, i], predictions[..., i]) for i in range(n_out)]
cis = [ci(t_val[:, i], s_val[:, i], predictions[..., i]) for i in range(n_out)]
raes_median, raes_all = list(zip(*raes))
else:
c_prob_pred = predictions
raes_median = [np.nan] * n_out
raes_all = [np.nan] * n_out
cis = [np.nan] * n_out
aucs = [roc_auc_score(c_val[:, i], c_prob_pred[:, i]) for i in range(n_out)]
return n_iter, final_train_nll, final_val_nll, aucs, list(raes_median), list(raes_all), cis
def time_to_prob(x):
sorted_pos = {v: pos for pos, v in enumerate(sorted(x))}
return 1 - np.array([sorted_pos[v] / len(x) for v in x])
def t_to_prob(x):
return np.stack([time_to_prob(arr) for arr in x.T]).T
class SurvivalModel:
def __init__(self,
embedding_layer_sizes=(),
decoder_layer_sizes=(),
dropout_pct=0.,
censoring_factor=2.,
activation_fn=tf.nn.relu):
self.embedding_layer_sizes = embedding_layer_sizes
self.decoder_layer_sizes = decoder_layer_sizes
self.dropout_pct = dropout_pct
self.censoring_factor = censoring_factor
self.activation_fn = activation_fn
def train(self, sess,
train_files,
val_files,
max_epochs,
train_type='survival',
max_epochs_no_improve=0,
learning_rate=1e-3,
batch_size=300, batch_eval_freq=1,
verbose=False):
assert train_type is 'survival'
self.n_outputs = 9
self.opt = tf.train.AdamOptimizer(learning_rate)
self._build_placeholders()
self._build_x()
self._build_model()
sess.run(tf.global_variables_initializer())
train_stats = []
val_stats = []
best_val_nloglik = np.inf
n_epochs_no_improve = 0
batches_per_epoch = len(train_files)
for epoch_idx in range(max_epochs):
for batch_idx, batch_file in enumerate(train_files):
xvb, xfb, cb, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
lgnrm_nlogp_, lgnrm_nlogs_, _ = sess.run(
[self.lgnrm_nlogp, self.lgnrm_nlogs, self.train_op],
feed_dict={self.xv: xvb, self.xf: xfb, self.t: tb, self.s: sb, self.is_training: True})
if np.isnan(np.mean(lgnrm_nlogp_)):
print('Warning: lgnrm_nlogp is NaN')
if np.isnan(np.mean(lgnrm_nlogs_)):
print('Warning: lgnrm_nlogs is NaN')
if batch_idx % batch_eval_freq == 0:
idx = epoch_idx * batches_per_epoch + batch_idx
train_stats.append(
(idx, ) + self._get_train_stats(
sess, xvb, xfb, tb, sb))
idx = (epoch_idx + 1) * batches_per_epoch
current_val_stats = []
for val_batch_idx, batch_file in enumerate(val_files):
xvb, xfb, cb, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
current_val_stats.append(
self._get_train_stats(
sess, xvb, xfb, tb, sb))
print('current val stats are:', current_val_stats)
val_stats.append((idx, ) + tuple(np.mean(current_val_stats, axis=0)))
print('Completed Epoch %i' % epoch_idx)
if verbose:
self._summarize(
np.mean(train_stats[-batches_per_epoch:], axis=0),
val_stats[-1],
batches_per_epoch)
if val_stats[-1][1] < best_val_nloglik:
best_val_nloglik = val_stats[-1][1]
n_epochs_no_improve = 0
else:
n_epochs_no_improve += 1
if n_epochs_no_improve > max_epochs_no_improve:
break
return train_stats, val_stats
def _build_model(self):
self.t_mu, self.t_logvar = self._decoder(self.x)
nll = self._nloglik(self.t_mu, self.t_logvar)
self.nll = tf.reduce_mean(nll)
self.train_op = self.opt.minimize(self.nll)
def _nloglik(self, t_mu, t_logvar):
self.lgnrm_nlogp = lognormal_nlogpdf(self.t, self.t_mu, self.t_logvar)
self.lgnrm_nlogs = lognormal_nlogsurvival(self.t, self.t_mu, self.t_logvar)
nll = self.s * self.lgnrm_nlogp + (1 - self.s) * self.lgnrm_nlogs
return nll
def _build_placeholders(self):
self.xv = tf.placeholder(
shape=(None, 20, 512),
dtype=tf.float32)
self.xf = tf.placeholder(
shape=(None, 2),
dtype=tf.float32)
self.t = tf.placeholder(
shape=(None, self.n_outputs),
dtype=tf.float32)
self.s = tf.placeholder(
shape=(None, self.n_outputs),
dtype=tf.float32)
self.is_training = tf.placeholder(
shape=(),
dtype=tf.bool)
def _build_x(self):
with tf.variable_scope('embeddings'):
x_refined = mlp(
self.xv, self.embedding_layer_sizes,
dropout_pct=0.,
activation_fn=tf.nn.tanh)
x_max = tf.reduce_max(x_refined, axis=1)
x_mean = tf.reduce_mean(x_refined, axis=1)
self.x = tf.concat([x_max, x_mean, self.xf], axis=1)
def _decoder(self, h, reuse=False):
with tf.variable_scope('decoder', reuse=reuse):
hidden_layer = mlp(
h, self.decoder_layer_sizes,
dropout_pct=self.dropout_pct,
activation_fn=self.activation_fn,
training=self.is_training,
reuse=reuse)
mu = tf.layers.dense(
hidden_layer, self.n_outputs,
activation=None,
name='mu',
reuse=reuse)
logvar = tf.layers.dense(
hidden_layer, self.n_outputs,
activation=None,
name='logvar',
reuse=reuse)
self.t_pred = tf.exp(mu)
self.decoder_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='decoder')
return mu, logvar
def _get_train_stats(self, sess, xvs, xfs, ts, ss):
nloglik, mean, logvar = sess.run(
[self.nll,
self.t_mu,
self.t_logvar],
feed_dict={self.xv: xvs, self.xf: xfs, self.t: ts, self.s:ss, self.is_training: False})
return nloglik, np.mean(mean), np.mean(logvar)
def _summarize(self, train_stats, val_stats, batches_per_epoch):
print('nloglik (train) = %.2e' % train_stats[1])
print('t_mu: %.2e' % train_stats[2], 't_logvar: %.2e' % train_stats[3])
print('nloglik (val) = %.2e' % val_stats[1])
print('t_mu: %.2e' % val_stats[2], 't_logvar: %.2e\n' % val_stats[3])
def predict(self, sess, batch_files):
t_pred = []
c = []
t = []
s = []
for idx, batch_file in enumerate(batch_files):
xvb, xfb, cb, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
t_pred_ = sess.run(
self.t_pred,
feed_dict={self.xv: xvb, self.xf: xfb, self.is_training: False})
t_pred.append(t_pred_)
c.append(cb)
t.append(tb)
s.append(sb)
t_pred = np.concatenate(t_pred, axis=0)
c = np.concatenate(c, axis=0)
t = np.concatenate(t, axis=0)
s = np.concatenate(s, axis=0)
return t_pred, c, t, s
class PosNegModel: ## amend this to use c vs s
def __init__(self,
embedding_layer_sizes=(),
encoder_layer_sizes=(),
dropout_pct=0.,
censoring_factor=2.,
activation_fn=tf.nn.relu):
self.embedding_layer_sizes = embedding_layer_sizes
self.encoder_layer_sizes = encoder_layer_sizes
self.dropout_pct = dropout_pct
self.censoring_factor = censoring_factor
self.activation_fn = activation_fn
def train(self, sess,
train_files,
val_files,
max_epochs,
train_type='s_mlp', max_epochs_no_improve=1,
learning_rate=1e-3,
batch_size=300, batch_eval_freq=10,
verbose=False):
self.train_type = train_type
self.n_outputs = 9
self.opt = tf.train.AdamOptimizer(learning_rate)
self._build_placeholders()
self._build_x()
self._build_model()
sess.run(tf.global_variables_initializer())
train_stats = []
val_stats = []
best_val_nloglik = np.inf
n_epochs_no_improve = 0
batches_per_epoch = len(train_files)
for epoch_idx in range(max_epochs):
for batch_idx, batch_file in enumerate(train_files):
xvb, xfb, cb, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
if train_type is 's_mlp':
xvb, xfb, _, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
elif train_type is 'c_mlp':
xvb, xfb, sb, tb, _ = load_batch(
batch_file, censoring_factor=self.censoring_factor)
sess.run(
self.train_op,
feed_dict={self.xv: xvb, self.xf: xfb, self.s: sb, self.is_training: True})
if batch_idx % batch_eval_freq == 0:
idx = epoch_idx * batches_per_epoch + batch_idx
train_stats.append((idx, self._get_train_stats(sess, xvb, xfb, sb)))
idx = (epoch_idx + 1) * batches_per_epoch
current_val_stats = []
for val_batch_idx, batch_file in enumerate(val_files):
if train_type is 's_mlp':
xvb, xfb, _, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
elif train_type is 'c_mlp':
xvb, xfb, sb, tb, _ = load_batch(
batch_file, censoring_factor=self.censoring_factor)
current_val_stats.append(self._get_train_stats(sess, xvb, xfb, sb))
val_stats.append((idx, np.mean(current_val_stats)))
print('Completed Epoch %i' % epoch_idx)
if verbose:
self._summarize(
np.mean(train_stats[-batches_per_epoch:], axis=0),
val_stats[-1],
batches_per_epoch)
if val_stats[-1][1] < best_val_nloglik:
best_val_nloglik = val_stats[-1][1]
n_epochs_no_improve = 0
else:
n_epochs_no_improve += 1
if n_epochs_no_improve > max_epochs_no_improve:
break
return train_stats, val_stats
def _build_model(self):
self.s_logits, self.s_probs = self._encoder(self.x)
nll = tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.s,
logits=self.s_logits)
self.nll = tf.reduce_mean(nll)
self.train_op = self.opt.minimize(self.nll)
def _build_placeholders(self):
self.xv = tf.placeholder(
shape=(None, 20, 512),
dtype=tf.float32)
self.xf = tf.placeholder(
shape=(None, 2),
dtype=tf.float32)
self.s = tf.placeholder(
shape=(None, self.n_outputs),
dtype=tf.float32)
self.is_training = tf.placeholder(
shape=(),
dtype=tf.bool)
def _build_x(self):
with tf.variable_scope('embeddings'):
x_refined = mlp(
self.xv, self.embedding_layer_sizes,
dropout_pct=0.,
activation_fn=tf.nn.tanh)
x_max = tf.reduce_max(x_refined, axis=1)
x_mean = tf.reduce_mean(x_refined, axis=1)
self.x = tf.concat([x_max, x_mean, self.xf], axis=1)
def _encoder(self, h):
with tf.variable_scope('encoder'):
hidden_layer = mlp(
h, self.encoder_layer_sizes,
dropout_pct=self.dropout_pct,
training=self.is_training,
activation_fn=self.activation_fn)
logits = tf.layers.dense(
hidden_layer, self.n_outputs,
activation=None, name='logit_weights')
probs = tf.nn.sigmoid(logits)
self.encoder_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='encoder')
return logits, probs
def _get_train_stats(self, sess, xvs, xfs, ss):
nloglik = sess.run(
self.nll,
feed_dict={self.xv: xvs, self.xf: xfs, self.s:ss, self.is_training: False})
return nloglik
def _summarize(self, train_stats, val_stats, batches_per_epoch):
print('nloglik (train) = %.2e' % train_stats)
print('nloglik (val) = %.2e' % val_stats)
def predict(self, sess, batch_files):
s_probs = []
c = []
t = []
s = []
for idx, batch_file in enumerate(batch_files):
xvb, xfb, cb, tb, sb = load_batch(
batch_file, censoring_factor=self.censoring_factor)
s_probs_ = sess.run(
self.s_probs,
feed_dict={self.xv: xvb, self.xf: xfb, self.is_training: False})
s_probs.append(s_probs_)
c.append(cb)
t.append(tb)
s.append(sb)
s_probs = np.concatenate(s_probs, axis=0)
c = np.concatenate(c, axis=0)
t = np.concatenate(t, axis=0)
s = np.concatenate(s, axis=0)
return s_probs, c, t, s
if __name__ == '__main__':
main()
| StarcoderdataPython |
197729 | from app import db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String(80), index=True, unique=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
nome_bar = db.Column(db.String(120), index=True, unique=True)
def __repr__(self):
return '<User {}>'.format(self.username) + '<Id {}>'.format(self.id) + '<Email {}>'.format(self.email) + '<Password {}>'.format(self.password_hash)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
class Food(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(120))
image = db.Column(db.BLOB)
tipologia = db.Column(db.String(120))
descrizione = db.Column(db.String(120))
prezzo = db.Column(db.Integer)
sempre_attivo = db.Column(db.Boolean)
data = db.Column(db.DateTime, index=True, default=datetime.utcnow)
nome_food = db.Column(db.String(120), db.ForeignKey('user.nome_bar'))
def __repr__(self):
return '<Primo piatto {}>'.format(self.nome) + '<Id {}>'.format(self.id)
class Building(db.Model):
id_corso = db.Column(db.Integer, primary_key=True)
struttura_des = db.Column(db.String(120))
struttura_id = db.Column(db.String(10))
struttura_ga_id = db.Column(db.Integer)
corso_ga_id = db.Column(db.String(10))
def __repr__(self):
return '<Id Corso {}>'.format(self.id_corso) | StarcoderdataPython |
57843 | <reponame>zxdavb/evohome-radio<filename>evohome/logger.py<gh_stars>0
"""Evohome serial."""
import logging
from .const import LOGGING_FILE
# CON_FORMAT = "%(message).164s" # Virtual
CON_FORMAT = "%(message).220s" # Laptop
# CON_FORMAT = "%(message).292s" # Monitor
LOG_FORMAT = "%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s"
logging.basicConfig(
level=logging.INFO,
format=LOG_FORMAT,
datefmt="%Y-%m-%d %H:%M:%S",
filename=LOGGING_FILE,
filemode="a",
)
_CONSOLE = logging.StreamHandler()
_CONSOLE.setLevel(logging.DEBUG)
_CONSOLE.setFormatter(logging.Formatter(CON_FORMAT, datefmt="%H:%M:%S"))
_LOGGER = logging.getLogger(__name__)
_LOGGER.addHandler(_CONSOLE)
| StarcoderdataPython |
1713316 | <reponame>glstr/python_learning
#!/usr/bin/python
# coding=utf-8
def keyvaidir():
d = {"hello": 1}
for key, value in d.items():
print key, 'corresponds to', value
for key in d:
print key, 'corresponds to', d[key]
| StarcoderdataPython |
175377 | <filename>tests/core/test_utils.py
import asyncio
from unittest import mock
import pytest
from tests.utils import async
from waterbutler.core import utils
class TestAsyncRetry:
@async
def test_returns_success(self):
mock_func = mock.Mock(return_value='Foo')
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
x = yield from retryable()
assert x == 'Foo'
assert mock_func.call_count == 1
@async
def test_retries_until(self):
mock_func = mock.Mock(side_effect=[Exception(), 'Foo'])
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
first = yield from retryable()
x = yield from first
assert x == 'Foo'
assert mock_func.call_count == 2
@async
def test_retries_then_raises(self):
mock_func = mock.Mock(side_effect=Exception('Foo'))
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
coro = yield from retryable()
with pytest.raises(Exception) as e:
for _ in range(10):
assert isinstance(coro, asyncio.Task)
coro = yield from coro
assert e.type == Exception
assert e.value.args == ('Foo',)
assert mock_func.call_count == 6
@async
def test_retries_by_its_self(self):
mock_func = mock.Mock(side_effect=Exception())
retryable = utils.async_retry(8, 0, raven=None)(mock_func)
retryable()
yield from asyncio.sleep(.1)
assert mock_func.call_count == 9
def test_docstring_survives(self):
def mytest():
'''This is a docstring'''
pass
retryable = utils.async_retry(8, 0, raven=None)(mytest)
assert retryable.__doc__ == '''This is a docstring'''
@async
def test_kwargs_work(self):
def mytest(mack, *args, **kwargs):
mack()
assert args == ('test', 'Foo')
assert kwargs == {'test': 'Foo', 'baz': 'bam'}
return True
retryable = utils.async_retry(8, 0, raven=None)(mytest)
merk = mock.Mock(side_effect=[Exception(''), 5])
fut = retryable(merk, 'test', 'Foo', test='Foo', baz='bam')
assert (yield from (yield from fut))
assert merk.call_count == 2
@async
def test_all_retry(self):
mock_func = mock.Mock(side_effect=Exception())
retryable = utils.async_retry(8, 0, raven=None)(mock_func)
retryable()
retryable()
yield from asyncio.sleep(.1)
assert mock_func.call_count == 18
| StarcoderdataPython |
4838944 | <gh_stars>0
# -*-coding:utf8-*-
""" SendPulse REST API usage example
Documentation:
https://login.sendpulse.com/manual/rest-api/
https://sendpulse.com/api
"""
from pysendpulse.pysendpulse import PySendPulse
if __name__ == "__main__":
REST_API_ID = ''
REST_API_SECRET = ''
TOKEN_STORAGE = 'memcached'
SPApiProxy = PySendPulse(REST_API_ID, REST_API_SECRET, TOKEN_STORAGE)
# Get list of tasks
SPApiProxy.push_get_tasks()
# Get list of websites
SPApiProxy.push_get_websites()
# Get amount of websites
SPApiProxy.push_count_websites()
# Get list of variables for website
SPApiProxy.push_get_variables(WEBSITE_ID)
# Get list of subscriptions for website
SPApiProxy.push_get_subscriptions(WEBSITE_ID)
# Get amount of subscriptions for website
SPApiProxy.push_count_subscriptions(WEBSITE_ID)
# Activate/Deactivate subscriber, state=1 - activate, state=2 - deactivate
SPApiProxy.push_set_subscription_state(SUBSCRIBER_ID, STATE)
# Create new push task
SPApiProxy.push_create('Hello!', WEBSITE_ID, 'This is my first push message', '10',
{'filter_lang': 'en', 'filter': '{"variable_name":"some","operator":"or","conditions":[{"condition":"likewith","value":"a"},{"condition":"notequal","value":"b"}]}'})
# Get balance in Japanese Yen
SPApiProxy.get_balance('JPY')
# Get Mailing Lists list example
SPApiProxy.get_list_of_addressbooks()
# Get Mailing Lists list with limit and offset example
SPApiProxy.get_list_of_addressbooks(offset=5, limit=2)
# Add emails with variables to addressbook
emails_for_add = [
{
'email': '<EMAIL>',
'variables': {
'name': 'test11',
'number': '11'
}
},
{'email': '<EMAIL>'},
{
'email': '<EMAIL>',
'variables': {
'firstname': 'test33',
'age': 33,
'date': '2015-09-30'
}
}
]
SPApiProxy.add_emails_to_addressbook(ADDRESSBOOK_ID, emails_for_add)
# Delete email from addressbook
emails_for_delete = ['<EMAIL>']
SPApiProxy.delete_emails_from_addressbook(ADDRESSBOOK_ID, emails_for_delete)
# Get a list of variables available on a mailing list
SPApiProxy.get_addressbook_variables(ADDRESSBOOK_ID)
# Changing a variable for an email contact
SPApiProxy.set_variables_for_email(ADDRESSBOOK_ID, '<EMAIL>', [{'name': 'foo', 'value': 'bar'}])
# Get campaigns statistic for list of emails
emails_list = ['<EMAIL>']
SPApiProxy.get_emails_stat_by_campaigns(emails_list)
# Add sender "FROM" email
SPApiProxy.add_sender('<EMAIL>', '<NAME>')
# Get list of senders
SPApiProxy.get_list_of_senders()
# Add emails to unsubscribe list
SPApiProxy.smtp_add_emails_to_unsubscribe([
{'email': '<EMAIL>', 'comment': 'comment_1'},
{'email': '<EMAIL>', 'comment': 'comment_2'}
])
# Create new email campaign with attaches
task_body = "<h1>Hello, John!</h1><p>This is the test task from https://sendpulse.com/api REST API!</p>"
SPApiProxy.add_campaign(from_email='<EMAIL>',
from_name='<NAME>',
subject='Test campaign from REST API',
body=task_body,
addressbook_id=ADDRESSBOOK_ID,
campaign_name='Test campaign from REST API',
attachments={'attach1.txt': '12345\n', 'attach2.txt': '54321\n'})
# Send mail using SMTP
email = {
'subject': 'This is the test task from REST API',
'html': '<h1>Hello, John!</h1><p>This is the test task from https://sendpulse.com/api REST API!</p>',
'text': 'Hello, John!\nThis is the test task from https://sendpulse.com/api REST API!',
'from': {'name': '<NAME>', 'email': '<EMAIL>'},
'to': [
{'name': '<NAME>', 'email': '<EMAIL>'}
],
'bcc': [
{'name': '<NAME>', 'email': '<EMAIL>'}
]
}
SPApiProxy.smtp_send_mail(email)
# Send mail with template using SMTP
email = {
'subject': 'This is the test task from REST API',
'from': {'name': '<NAME>', 'email': '<EMAIL>'},
'to': [
{'name': '<NAME>', 'email': '<EMAIL>'}
],
"template": {
'id': '73606', # ID of the template uploaded in the service. Use this
# (https://sendpulse.com/integrations/api/bulk-email#template-list)
# method to get the template ID (use either real_id or id parameter from the reply)
'variables': {
'foo': 'value',
'bar': 'value'
}
},
}
SPApiProxy.smtp_send_mail_with_template(email)
# **************** SMS ***************
# Add phones to address book
phones_for_add = [
'11111111111',
'22222222222'
]
SPApiProxy.sms_add_phones(ADDRESSBOOK_ID, phones_for_add)
# Add phones to address book
phones_for_add = {
"11111111111":
[
[
{"name": "test1", "type": "date", "value": "2018-10-10 23:00:00"},
{"name": "test2", "type": "string", "value": "asdasd"},
{"name": "test3", "type": "number", "value": "123"}
]
],
"22222222222":
[
[
{"name": "test1", "type": "date", "value": "2018-10-10 23:00:00"},
{"name": "test2", "type": "string", "value": "czxczx"},
{"name": "test3", "type": "number", "value": "456"}
]
]
}
SPApiProxy.sms_add_phones_with_variables(ADDRESSBOOK_ID, phones_for_add)
# Update phones variables from the address book
phones_for_update = [
'11111111111'
]
variables = [
{
"name": "name", "type": "string", "value": "Michael"
}
]
SPApiProxy.sms_update_phones_variables(ADDRESSBOOK_ID, phones_for_update, variables)
# Get information about phone from the address book
SPApiProxy.sms_get_phone_info(ADDRESSBOOK_ID, '1111111111')
# Remove phones to address book
phones_for_remove = [
'11111111111',
'22222222222'
]
SPApiProxy.sms_delete_phones(ADDRESSBOOK_ID, phones_for_remove)
# Get phones from the blacklist
SPApiProxy.sms_get_blacklist()
# Add phones to blacklist
phones_for_add_to_blacklist = [
'111222227',
'222333337'
]
SPApiProxy.sms_add_phones_to_blacklist(phones_for_add_to_blacklist, 'test')
# Remove phones from blacklist
phones_for_remove = [
'11111111111',
'22222222222'
]
SPApiProxy.sms_delete_phones_from_blacklist(phones_for_remove)
# Get info by phones from the blacklist
phones = [
'11111111111',
'22222222222'
]
SPApiProxy.sms_get_phones_info_from_blacklist(phones)
# Create new sms campaign
SPApiProxy.sms_add_campaign(SENDER_NAME, ADDRESSBOOK_ID, 'test')
# Send sms by some phones
phones_for_send = [
'11111111111'
]
SPApiProxy.sms_send(SENDER_NAME, phones_for_send, 'test')
# Get list of sms campaigns
date_from = '2018-04-10 23:00:00'
date_to = '2018-05-10 23:00:00'
SPApiProxy.sms_get_list_campaigns(date_from, date_to)
# Get information about sms campaign
SPApiProxy.sms_get_campaign_info(CAMPAIGN_ID)
# Cancel sms campaign
SPApiProxy.sms_cancel_campaign(CAMPAIGN_ID)
# Get cost sms campaign
SPApiProxy.sms_get_campaign_cost('sender', 'test', ADDRESSBOOK_ID)
# SPApiProxy.sms_get_campaign_cost('sender', 'test', None, ['111111111'])
# Remove sms campaign
SPApiProxy.sms_delete_campaign(CAMPAIGN_ID)
| StarcoderdataPython |
3302062 | from .base import Job
| StarcoderdataPython |
3212577 | <reponame>8by8-org/usvotes<gh_stars>1-10
from app.services.steps import Step
from flask import g
# this is a placeholder. No action required, just routing to change-or-apply
class Step_1(Step):
form_requirements = []
step_requirements = []
endpoint = '/change-or-apply'
prev_step = 'Step_0'
next_step = None
is_complete = False
def run(self):
return True
| StarcoderdataPython |
161785 | # michaelpeterswa
# kulo.py
import csv
import geojson
import datetime
import numpy as np
from shapely.geometry import shape, MultiPolygon, Polygon, Point
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
input_file = "..\data\Washington_Large_Fires_1973-2019.geojson"
def loadData(filename):
"""
Loads GeoJson Data from "filename"
"""
with open(filename) as f:
data = geojson.load(f)
return data
def returnMaxAcreage(fire_data):
"""
return maximum acreage
"""
fire_max = 0
for fire in fire_data:
if fire["properties"]["ACRES"] >= fire_max:
fire_max = fire["properties"]["ACRES"]
return fire_max
def createPolygon(fire):
"""
create a Polygon object from list of points
"""
points = []
for coordinate in fire["geometry"]["coordinates"][0]:
points.append(tuple(coordinate))
polygon = Polygon(points)
return polygon
def createPolygonFromMulti(fire):
"""
https://gis.stackexchange.com/questions/166934/python-library-for-converting-geojson-multi-polygon-to-polygon
"""
multipolygon = [x.buffer(0) for x in shape(fire["geometry"]).buffer(0).geoms]
max_poly = max(multipolygon, key=lambda a: a.area)
return max_poly
def generateCentroid(polygon):
"""
calculate and return centroid of a polygon
"""
return list(polygon.centroid.coords)
def isMultiPolygonal(fire):
"""
return true if the object is a MultiPolygon
"""
return True if fire["geometry"]["type"] == "MultiPolygon" else False
def normalizeFireData(fire_data, max_acres, lat_div=100, long_div=200):
fire_data_list = []
for fire in fire_data:
fire_size = fire[1]["ACRES"] / max_acreage
fire_lat = fire[0][0][1] / lat_div
fire_long = fire[0][0][0] / long_div
fire_data_list.append([fire_lat, fire_long, fire_size])
fire_data_nparray = np.array(fire_data_list)
return fire_data_nparray
if __name__ == "__main__":
fire_data = loadData(input_file)
fire_data = fire_data["features"]
max_acreage = returnMaxAcreage(fire_data)
print(max_acreage)
lat_amt = 100
long_amt = 100
results = []
for fire in fire_data:
poly = createPolygonFromMulti(fire) if isMultiPolygonal(fire) else createPolygon(fire)
fire_centroid = generateCentroid(poly)
results.append((fire_centroid, fire["properties"]))
normalized_fire_data = normalizeFireData(results, max_acreage, lat_amt, long_amt)
with open("../data/cleaned_data.csv", 'w', newline='') as f:
csv_obj = csv.writer(f)
csv_obj.writerows(normalized_fire_data)
# #-----------------------------
# X = normalized_fire_data[:,0:2]
# y = normalized_fire_data[:,2]
# model = Sequential()
# model.add(Dense(4, input_dim=2, activation='relu'))
# model.add(Dense(4, activation='relu'))
# model.add(Dense(1, activation='linear'))
# model.compile(loss='mse', optimizer='adam')
# log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
# model.fit(X, y, batch_size=3, epochs=1000, callbacks=[tensorboard_callback], verbose=1)
# results = model.evaluate(X, y)
# model.save("../kulo_model")
# print("Loss: ", results)
# test_lat = 48.383549
# test_long = -120.009935
# samples = [(test_lat / lat_amt, test_long / long_amt)]
# npsamples = np.array(samples)
# predictions = model.predict(samples)
# result_acres = predictions[0][0] * max_acreage
# print("final result for: (", test_lat, ",", test_long, ") at ", result_acres, "acres" ) | StarcoderdataPython |
3238297 | <gh_stars>0
from django.db import models
class Aluno(models.Model):
def save(self):
if (self.email == ''):
self.email = 'email nao fornecido'
if (self.login == ''):
raise Exception("Esta faltando o login")
if (len(Aluno.objects.filter(login=self.login)) > 0):
raise Exception("Aluno já cadastrado")
# if (len(Professor.objects.filter(login=self.login)) > 0):
# raise Exception("Login já cadastrado")
super(Aluno, self).save()
nome = models.TextField(max_length=255)
email = models.TextField(max_length=255)
celular = models.TextField(max_length=20)
login = models.TextField(max_length=20)
senha = models.TextField(max_length=20) | StarcoderdataPython |
34181 |
import re
import time
import requests
from telethon import events
from userbot import CMD_HELP
from userbot.utils import register
import asyncio
import random
EMOJIS = [
"😂",
"😂",
"👌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"😩",
"😏",
"😞",
"👀",
"👅",
"😩",
"🤒",
"😳",
"🤯",
"😵",
"🥵",
"🤒",
"😠",
"😪",
"😴",
"🤤",
"👿",
"👽",
"😏",
"😒",
"😣",
"🤔",
"🤨",
"🧐",
"😝",
"🤪",
"🤩",
"☺️",
"😭",
"🥺",
]
ZALG_LIST = [["̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
if not vpr.text[0].isalpha() and vpr.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
if not stret.text[0].isalpha() and stret.text[0] not in ("/", "#", "@", "!"):
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = random.randint(3, 10)
reply_text = re.sub(
r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])",
(r"\1"*count),
message
)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
if not zgfy.text[0].isalpha() and zgfy.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
randint = random.randint(0, 2)
if randint == 0:
charac = charac.strip() + \
random.choice(ZALG_LIST[0]).strip()
elif randint == 1:
charac = charac.strip() + \
random.choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
random.choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
if not cp_e.text[0].isalpha() and cp_e.text[0] not in ("/", "#", "@", "!"):
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = random.choice(EMOJIS)
b_char = random.choice(
message
).lower() # choose a random character in the message to be substituted with 🅱️
for owo in message:
if owo == " ":
reply_text += random.choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += random.choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(random.getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += random.choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
if not mock.text[0].isalpha() and mock.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and random.randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
CMD_HELP.update({
"fontstyles": ".cp (text) or .cp reply to message \
\nUsage: inserts some emojis in between the texts\
\n\n.vapor (text) or .vapor reply to message \
\nUsage: Vaporize the given text. \
\n\n.str (text) or .str reply to message \
\nUsage: Stretchs the given message.\
\n\n.zal (text) or .zal reply to message \
\nUsage: Invoke the feeling of chaos.\
\n\n.mock (text) or .mock reply to message \
\nUsage: random capital and small letters in given text.\
"
})
| StarcoderdataPython |
4811935 | <filename>src/TestScale.py
#!/usr/bin/python3
import threading
import time
import random
class Scale(threading.Thread):
def __init__(self, comPort, container, index) :
threading.Thread.__init__(self, daemon=True)
self._stop = threading.Event()
self.container = container
self.index = index
self.cycle = 0
def run(self) :
ID = chr(ord('A') + self.index)
while not self.stopped() :
time.sleep(.5)
value = ID + str(self.cycle)
self.container[ID] = value
self.cycle += 1
# is a stop function necessary if thread runs as daemon?
# maybe necessary to close the serial port?
def stop(self) :
self._stop.set()
def stopped(self) :
return self._stop.isSet()
| StarcoderdataPython |
3333989 | # -*- coding: utf-8 -*-
# Author: sunzheng
# Time: 2018/12/26
from selenium.webdriver.common.by import By
class Retail_Sys:
login_username=(By.XPATH,'//input[@id="username"]')
login_pwd=(By.XPATH,'//input[@id="password"]')
login_submit=(By.XPATH,'//button[@id="submit_btn"]')
#登录账号
login_account=(By.XPATH,'//div[@class="rightUser"]')
#零售管理
retail_sys=(By.XPATH,'//span[@class="icon-shopping-cart"]')
#智化大货定制
big_huo=(By.XPATH,'//dl[@class="cur"]//dd//a[contains(text(),"智化大货定制")]')
#大货列表
bighuo_list=(By.XPATH,'//iframe[@src="/Customization/LargeGoodsMade/Index2"]')
'''
下面是大货定制页面对象,应该封装在其他class里面
'''
#会员手机号
mobile_number=(By.XPATH,'//input[@id="SearchKey"]')
#点击查询按钮
search=(By.XPATH,'//button[@id="searchphone"]')
#商品编码
sp_number=(By.XPATH,'//input[@id="ItemCode"]')
#点击搜索按钮
click_serach=(By.XPATH,'//button[@id="sectionnumber"]')
#选择点击导购框
daogou=(By.XPATH,'//button[@data-id="SalesRepNames"]')
#选择导购
xuanz1=(By.XPATH,'//ul[@role="listbox"]//li[1]')
xuanze=(By.XPATH,'//button//span[text()="陈文凤"]')
#选择当前门店
click_mlist=(By.XPATH,'//input[@placeholder="请选择收货地址"]')
mendian=(By.XPATH,'//dd[text()="当前门店"]')
#创建订单,可以增加校验
caert_order=(By.XPATH,'//button[@id="btn_createCustom"]')
'''
订单创建成功,进行订单收银
2.可以判断是否出现收银,如果出现说明订单创建成功
'''
to_collect_money=(By.XPATH,'//button[@id="btn_collectmoney"]')
#进入收银弹框页面
# kmoner=(By.XPATH,'//div[@class="layui-layer layui-layer-page OverflowNoContent"]')
#订单金额
Amoner=(By.XPATH,'//label[@id="DifferenceAmt"]')
click_moner=(By.XPATH,'//input[@placeholder="请选择"]')
moner_type=(By.XPATH,'//dd[text()="现金"]')
output_moner=(By.XPATH,'//input[@id="CollectionAmount"]')
add=(By.XPATH,'//a[@id="collectmoneyadd"]')
gemover=(By.XPATH,'//a[text()="完成"]')
#取消打印小票
quit=(By.XPATH,'//a[text()="取消"]')
#// td[text() = "18820941844"][1]
line_one=(By.XPATH,'//tr[@id="1"]') | StarcoderdataPython |
57018 | <gh_stars>10-100
"""
Problem:
You are given an array X of floating-point numbers x1, x2, ... xn. These can be rounded
up or down to create a corresponding array Y of integers y1, y2, ... yn.
Write an algorithm that finds an appropriate Y array with the following properties:
The rounded sums of both arrays should be equal.
The absolute pairwise difference between elements is minimized. In other words,
|x1- y1| + |x2- y2| + ... + |xn- yn| should be as small as possible.
For example, suppose your input is [1.3, 2.3, 4.4]. In this case you cannot do better
than [1, 2, 5], which has an absolute difference of
|1.3 - 1| + |2.3 - 2| + |4.4 - 5| = 1.
"""
from typing import List, Tuple
def get_fraction_from_tuple(tup: Tuple[int, float]) -> float:
_, elem = tup
return elem - int(elem)
def round_arr(arr: List[float]) -> List[int]:
rounded_arr = [round(elem) for elem in arr]
sum_arr = int(sum(arr))
sum_rounded_arr = sum(rounded_arr)
# if the sums are equal, the rounding has been properly implemented
if sum_arr == sum_rounded_arr:
return rounded_arr
# eqalizing the sums
should_increment = sum_arr > sum_rounded_arr
num_map = sorted(
[(index, elem) for index, elem in enumerate(arr)],
key=get_fraction_from_tuple,
reverse=should_increment,
)
# incrementing and decrementing the values as per requirement (while minimizing the
# pair-wise sum)
for i in range(sum_arr - sum_rounded_arr):
index, _ = num_map[i]
rounded_arr[index] = (
rounded_arr[index] + 1 if should_increment else rounded_arr[index] - 1
)
return rounded_arr
if __name__ == "__main__":
print(round_arr([1.3, 2.3, 4.4]))
print(round_arr([1.8, 2.8, 4.4]))
"""
SPECS:
TIME COMPLEXITY: O(n x log(n))
SPACE COMPLEXITY: O(n)
"""
| StarcoderdataPython |
4823874 | <filename>ledger/address/__init__.py
default_app_config = 'ledger.address.config.AddressConfig'
| StarcoderdataPython |
3262883 |
# TODO: implement standard library here
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.74*(r-y) - 0.27*(b-y)
q = 0.48*(r-y) + 0.41*(b-y)
return (y, i, q)
| StarcoderdataPython |
30958 | <gh_stars>0
import json
import typing
from collections.abc import Collection
from decimal import Decimal
from functools import reduce
class QueryPredicate:
AND = "AND"
OR = "OR"
_operators = {
"exact": "=",
"gte": ">=",
"lte": "=<",
"lt": "<",
"gt": ">",
"is_defined": ("is defined", "is not defined"),
"contains_all": "contains all",
"contains_any": "contains any",
}
def __init__(self, **filters: str):
self._connector = filters.pop("_connector", self.AND)
self._filters = filters
def __str__(self) -> str:
result = []
for key, value in self._filters.items():
fields = key.split("__")
operator = fields.pop()
if operator not in self._operators:
fields.append(operator)
operator = "exact"
lhs = fields.pop()
val = self._clause(lhs, operator, value)
fields.append(val)
result.append(reduce(lambda x, y: f"{y}({x})", fields[::-1]))
if self._connector == self.OR:
return " OR ".join(result)
return " AND ".join(result)
def __or__(self, other):
data: typing.Dict[str, typing.Any] = {}
data.update(self._filters)
data.update(other._filters)
return self.__class__(**data, _connector=self.OR)
def __and__(self, other):
data: typing.Dict[str, typing.Any] = {}
data.update(self._filters)
data.update(other._filters)
return self.__class__(**data, _connector=self.AND)
def _clause(self, lhs, operator, rhs) -> str:
assert operator in self._operators
if isinstance(rhs, dict):
rhs = self.__class__(**rhs)
return "%s(%s)" % (lhs, rhs)
if isinstance(rhs, self.__class__):
return "%s(%s)" % (lhs, rhs)
op = self._operators[operator]
if isinstance(op, tuple):
return "%s %s" % (lhs, op[0 if rhs else 1])
else:
rhs = self._escape_value(rhs)
return "%s %s %s" % (lhs, op, rhs)
def _escape_value(self, value) -> str:
if isinstance(value, self.__class__):
return "(%s)" % value
if isinstance(value, Decimal):
return str(value)
if not isinstance(value, str) and isinstance(value, Collection):
return "(%s)" % (", ".join(self._escape_value(v) for v in value))
return json.dumps(value)
| StarcoderdataPython |
163838 | from django.http import HttpResponseRedirect, HttpResponse
#, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
from django.shortcuts import render
from datetime import datetime
import simplejson
import sys
import threading
import time
import traceback
from src.helper import *
from src.helper_form import *
from src.helper_html import *
from src.models import *
from src.views import result_json, error400
def design_3d(request, form=Design3DForm(), from_1d=False, from_2d=False):
return render(request, PATH.HTML_PATH['design_3d'], {'3d_form': form, 'from_1d': from_1d, 'from_2d': from_2d})
def design_3d_run(request):
if request.method != 'POST': return error400(request)
form = Design3DForm(request.POST)
msg = ''
if form.is_valid():
(sequence, tag) = form_data_clean_common(form.cleaned_data)
(primers, offset, min_muts, max_muts, which_muts, which_lib, structures, is_exclude, is_single, is_fill_WT, num_mutations) = form_data_clean_3d(form.cleaned_data, sequence)
is_valid = form_check_valid_job(3, sequence, primers=primers, min_muts=min_muts, max_muts=max_muts, structures=structures)
if isinstance(is_valid, HttpResponse):
return is_valid
else:
primers = is_valid[1]
job_id = random_job_id()
create_HTML_page_wait(job_id, 3)
job_entry = Design3D(date=datetime.now(), job_id=job_id, sequence=sequence, structures=simplejson.dumps(structures, sort_keys=True, indent=' ' * 4), tag=tag, status='1', params=simplejson.dumps({'offset': offset, 'min_muts': min_muts, 'max_muts': max_muts, 'which_lib': which_lib, 'num_mutations': num_mutations, 'is_exclude': is_exclude, 'is_single': is_single, 'is_fill_WT': is_fill_WT}, sort_keys=True, indent=' ' * 4), result=simplejson.dumps({'primer_set': primers}, sort_keys=True, indent=' ' * 4))
job_entry.save()
job_list_entry = JobIDs(job_id=job_id, type=3, date=datetime.now())
job_list_entry.save()
job = threading.Thread(target=design_3d_wrapper, args=(sequence, structures, primers, tag, offset, which_muts, which_lib, num_mutations, is_exclude, is_single, is_fill_WT, job_id))
job.start()
return result_json(job_id)
else:
return HttpResponse(simplejson.dumps({'error': '00', 'type': 3}, sort_keys=True, indent=' ' * 4), content_type='application/json')
return render(request, PATH.HTML_PATH['design_3d'], {'3d_form': form})
def demo_3d(request):
return HttpResponseRedirect('/result/?job_id=' + ARG['DEMO_3D_ID_1'])
def demo_3d_run(request):
if 'mode' in request.GET and len(request.GET.get('mode')):
mode = str(request.GET.get('mode')[0])
else:
mode = '1'
job_id = ARG['DEMO_3D_ID_' + mode]
create_HTML_page_wait(job_id, 3)
which_muts = range(ARG['MIN_MUTS'], ARG['MAX_MUTS'] + 1)
is_exclude = ARG['IS_EXCLUDE']
is_single = ARG['IS_SINGLE']
is_fill_WT = ARG['IS_FILLWT']
if mode == '2':
structures = [STR['P4P6_1'], STR['P4P6_2']]
else:
structures = [STR['P4P6']]
is_exclude = (not is_exclude)
is_single = (not is_single)
is_fill_WT = (not is_fill_WT)
job = threading.Thread(target=design_3d_wrapper, args=(SEQ['P4P6'], structures, SEQ['PRIMER_SET'], 'P4P6_2HP', ARG['OFFSET'], which_muts, [int(ARG['LIB'])], ARG['NUM_MUT'], is_exclude, is_single, is_fill_WT, job_id))
job.start()
return result_json(job_id)
def design_3d_wrapper(sequence, structures, primer_set, tag, offset, which_muts, which_lib, num_mutations, is_exclude, is_single, is_fillWT, job_id):
try:
t0 = time.time()
# time.sleep(15)
plate = prm_3d.design(sequence, primer_set, structures, offset, num_mutations, which_lib, which_muts, tag, is_exclude, is_single, is_fillWT, True)
save_result_data(plate, job_id, tag, 3)
t_total = time.time() - t0
except Exception:
t_total = time.time() - t0
print "\033[41mError(s)\033[0m encountered: \033[94m", sys.exc_info()[0], "\033[0m"
print traceback.format_exc()
return create_HTML_page_error(job_id, t_total, 3)
# when no solution found
if (not plate.is_success): return create_HTML_page_fail(job_id, 3)
try:
mode = 'NORMAL' if len(structures) == 1 else 'DIFF'
script = HTML_elem_header(job_id, False, 3)
script += '<div class="alert alert-default" title="Sequence Illustration"><p><span class="glyphicon glyphicon-question-sign"></span> <b>INFO</b>: <b style="color:#ff69bc;">%s</b> <i>Mode</i>; <span>(<span class="glyphicon glyphicon-stats" style="color:#b7bac5;"></span> <u>%d</u>)</span>.<small class="pull-right">(hover on sequence to locate plate coordinates)</small></p><p class="monospace" style="overflow-x:scroll;">__SEQ_ANNOT__</p></div>' % (mode, plate.get('N_CONSTRUCT'))
script += HTML_elem_time_elapsed(t_total, 3)
(script, flag) = HTML_comp_plates(plate, script, job_id, 3)
script += HTML_comp_assembly(plate.echo('assembly'))
script += HTML_elem_whats_next() + '</p>'
script = HTML_comp_warnings(flag, script, plate, 3)
script = HTML_comp_illustration(plate, script, 3)
job_entry = Design3D.objects.get(job_id=job_id)
job_entry.status = '2' if job_id not in (ARG['DEMO_3D_ID_1'], ARG['DEMO_3D_ID_2']) else '0'
job_entry.result = simplejson.dumps({'primer_set': plate.primer_set, 'primers': plate._data['assembly'].primers.tolist()[0:-1], 'tm_overlaps': map(lambda x: round(x, 2), plate._data['assembly'].Tm_overlaps), 'plates': [plate.get('N_PLATE'), plate.get('N_PRIMER')], 'constructs': len(plate._data['constructs']), 'warnings': flag}, sort_keys=True, indent=' ' * 4)
job_entry.time = t_total
job_entry.save()
create_HTML_page_result(script, job_id, 3)
except Exception:
print "\033[41mError(s)\033[0m encountered: \033[94m", sys.exc_info()[0], "\033[0m"
print traceback.format_exc()
create_HTML_page_error(job_id, t_total, 3)
def design_3d_from_1d(request):
if 'from' in request.GET:
referer_job_id = request.GET.get('from')
form = Design3DForm()
from_1d = False
if Design1D.objects.filter(job_id=referer_job_id).exists():
job_entry = Design1D.objects.get(job_id=referer_job_id)
primers = ','.join(simplejson.loads(job_entry.result)['primer_set'])
form = Design3DForm(initial={'sequence': job_entry.sequence, 'tag': job_entry.tag, 'primers': primers})
from_1d = True
else:
return error400(request)
return design_3d(request, form, from_1d=from_1d)
def design_3d_from_2d(request):
if 'from' in request.GET:
referer_job_id = request.GET.get('from')
form = Design3DForm()
from_2d = False
if Design2D.objects.filter(job_id=referer_job_id).exists():
job_entry = Design2D.objects.get(job_id=referer_job_id)
params = simplejson.loads(job_entry.params)
primers = ','.join(simplejson.loads(job_entry.result)['primer_set'])
form = Design3DForm(initial={'sequence': job_entry.sequence, 'tag': job_entry.tag, 'primers': primers, 'max_muts': params['max_muts'], 'min_muts': params['min_muts'], 'offset': params['offset']})
from_2d = True
else:
return error400(request)
return design_3d(request, form, from_2d=from_2d)
| StarcoderdataPython |
1691079 | <filename>tests/unit/test_config.py
import json
from ward import test, raises
import pydantic
from hautomate.settings import HautoConfig
from tests.fixtures import cfg_data_hauto
@test('HAutoConfig validates input', tags=['unit'])
def _(opts=cfg_data_hauto):
model = HautoConfig(**opts)
# remove because comparing relative paths is impossibru
_opts = {k: v for k, v in opts.items() if k != 'apps_dir'}
excludes = {f for f in model.fields if f not in _opts}
model_data = model.json(exclude=excludes)
assert json.loads(model_data) == _opts
# negative scenarios
with raises(pydantic.ValidationError):
HautoConfig(**{**opts, **{'timezone': 'America/Gotham City'}})
with raises(pydantic.ValidationError):
HautoConfig(**{**opts, **{'api_configs': {'time_travel': None}}})
| StarcoderdataPython |
68395 | <filename>rheia/CASES/H2_MOBILITY/h2_mobility.py<gh_stars>1-10
"""
The :py:mod:`h2_mobility` module contains a class to read the required data and
a class to evaluate the power-to-mobility system.
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pvlib
class ReadData:
"""
This class enables to read data from the data files.
Parameters
----------
filename_climate : str
The directory of the file with information on the
solar irradiance.
"""
def __init__(self, filename_climate):
self.filename_climate = filename_climate
self.path = os.path.dirname(os.path.abspath(__file__))
def load_climate(self):
"""
This method loads the hourly solar irradiance data
and ambient temperature data,
situated in the 'sol_irr' and 'T_amb' columns of the
climate data file.
Returns
-------
sol_irr : ndarray
The hourly solar irradiance data for a Typical
Meteorological Year.
t_amb : ndarray
The hourly ambient temperature data for a Typical
Meteorological Year.
"""
data = pd.read_csv(self.filename_climate)
sol_irr = data['sol_irr'].to_numpy()
t_amb = data['T_amb'].to_numpy()
return sol_irr, t_amb
def load_parameters(self):
"""
This method loads the deterministic values of the model
parameters, defined in the design_space file. This is
useful when the deterministic performance of a specific
design needs to be evaluated.
Returns
-------
param_dict : dict
Dictionary with the names of the model parameters
and the corresponding deterministic values.
"""
param_dict = {}
design_space = os.path.join(self.path, 'design_space')
# read the deterministic values for the parameters in `design_space`
with open(design_space, 'r') as file:
for line in file:
tmp = line.split()
if tmp[1] == 'par':
param_dict[tmp[0]] = float(tmp[2])
return param_dict
class Evaluation:
"""
This class evaluates the power-to-mobility system.
For a given design, the solar irradiance, ambient temperature
and the characterization of the model parameters,
the levelized cost of driving, carbon intensity and the annual
grid consumption are quantified.
Parameters
----------
sol_irr : ndarray
The hourly solar irradiance for the evaluated year.
t_amb : ndarray
The hourly ambient temperature for the evaluated year.
parameters : dict
Dictionary with the model parameters and design variables values.
"""
def __init__(self, sol_irr, t_amb, par):
self.par = par
# the solar irradiance and ambient temperature are scaled with the
# corresponding uncertainty
self.sol_irr = sol_irr * self.par['u_sol_irr']
self.t_amb = t_amb + self.par['u_t_amb']
self.length = len(self.sol_irr)
# the result dictionary
self.res = {}
# the system lifetime
self.par['life_sys'] = 20.
# initialize the operating hours of the electrolyzer array
self.res['running_hours_pemel'] = 0.
# initialize the storage tank size and its starting status
self.m_h2_max = self.tank()
self.m_h2_min = 0.05 * self.m_h2_max
self.m_h2 = self.m_h2_min
# instantiate the profiles for grid electricity price
self.demand_profiles()
# the number of PEM electrolyzer cells, corresponding to the
# nominal capacity of the considered PEM cell and the provided
# PEM capacity
self.n_pemel_array = self.par['n_pemel'] / 0.4
# the fitted polynomials on the electrolyzer and compressor
self.polyfit_pemel()
self.polyfit_pemel_compr()
def demand_profiles(self):
"""
Set the grid electricity price for buying and selling electricity.
A contract with fixed electricity price is considered, for which the
price for buying electricity consists of three segments: the energy
price itself (i.e. 'elec cost'), the profit made on this price by the
electricity provider (i.e. 'elec_cost_profit') and the fraction of the
energy price to the final retail price (i.e. 'elec_cost_ratio', e.g.
when this value equal 0.3, then the energy price corresponds to 30% of
the final bill, while 70% corresponds to transportation cost,
taxes,...). The price for selling electricity back to the grid
corresponds to the energy price.
In addition, the demand profiles from the hydrogen buses and diesel
buses is determined, based on the European daily refueling profile [1].
[1] <NAME>, I.Williamson, <NAME>, and <NAME>,
“Decarbonising city bus networks in ireland with renewable hydrogen,”
International Journal of Hydrogen Energy, 2020.
"""
# electricity cost profile [euro/Wh]
self.elec_profile = np.ones(self.length) * (
(self.par['elec_cost'] +
self.par['elec_cost_profit']) /
self.par['elec_cost_ratio']) / 1e6
# electricity selling profile [euro/Wh]
self.elec_profile_sale = np.ones(
self.length) * self.par['elec_cost'] / 1e6
self.diesel_profile = np.ones(self.length) * self.par['diesel_cost']
# number of km driven per day per bus
self.par['n_km_bus'] = 250.
# number of buses in the fleet
self.par['n_bus'] = 40.
# energy consumed by the diesel buses and hydrogen buses per day
# [kWh/day]
energy_h2 = (self.par['cons_h2_bus'] * self.par['n_km_bus'] *
self.par['n_h2_bus'])
energy_diesel = (self.par['cons_diesel_bus'] * self.par['n_km_bus'] *
(self.par['n_bus'] - self.par['n_h2_bus']))
h2_required = energy_h2 / 33.33 # kg
diesel_required = energy_diesel / 10. # litre
# the daily refueling profile of the buses.
fill_profile = np.array([0.09, 0.015, 0.005, 0.04, 0.04, 0., 0.01,
0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0.08, 0.08, 0.13, 0.13, 0.13, 0.13, 0.11])
# daily refueling profile for the hydrogen buses and diesel buses
day_h2 = np.ones(24) * fill_profile * h2_required
day_diesel = np.ones(24) * fill_profile * diesel_required
# annual refueling profiles
self.load_h2 = list(day_h2) * int(365 * self.length / 8760)
self.load_diesel = list(day_diesel) * int(365 * self.length / 8760)
# dispenser capacity such that the hourly hydrogen demand can be
# complied with
dispenser_mass_flow_rate = 33.333
self.par['n_disp'] = max(self.load_h2) / dispenser_mass_flow_rate
#############################
# photovoltaic array module #
#############################
def quantify_mpp(self, sol_irr, t_amb, pv_system):
"""
Quantify the maximum power of the photovoltaic array
for a given solar irradiance and ambient temperature.
Parameters
----------
sol_irr : float
The solar irradiance [W/m2].
t_amb : float
The ambient temperature [C].
pv_system : pandas.core.series.Series
The pv system characteristics
Returns
-------
pmp : float
The maximum power.
"""
# quantify the parameters for the pv system using De Soto method
pv_inputs = pvlib.pvsystem.calcparams_desoto(sol_irr,
t_amb,
pv_system['alpha_sc'],
pv_system['a_ref'],
pv_system['I_L_ref'],
pv_system['I_o_ref'],
pv_system['R_sh_ref'],
pv_system['R_s'],
EgRef=1.121,
dEgdT=-0.0002677,
irrad_ref=1000.,
temp_ref=25.)
# determine the maximum power for the given pv system
pmp = pvlib.pvsystem.max_power_point(pv_inputs[0],
pv_inputs[1],
pv_inputs[2],
pv_inputs[3],
pv_inputs[4],
method='newton')['p_mp']
return pmp
def photovoltaic(self):
"""
The hourly photovoltaic power is quantified via the PVlib package.
Using this package, first the characteristics for a typical
photovoltaic panel are defined. Based on these characteristics,
the maximum power point is quantified for each hour, based on the
corresponding solar irradiance and ambient temperature. Finally, the
hourly power production is scaled by the considered photovoltaic array
capacity.
"""
p_pv = np.zeros(len(self.sol_irr))
# get the specific photovoltaic panel characteristics
pv_database = pvlib.pvsystem.retrieve_sam('CECmod')
pv_system = pv_database.SunPower_SPR_X19_240_BLK
# determine the maximum power point at reference conditions
p_mpp_ref = self.quantify_mpp(1000., 25., pv_system) # W
# maximum power point determination for each hour in the timeframe
for i, irr in enumerate(self.sol_irr):
if irr > 0.:
p_mpp = self.quantify_mpp(irr, self.t_amb[i], pv_system)
p_pv[i] = p_mpp / p_mpp_ref * self.par['n_pv'] * 1e3 # W
else:
p_pv[i] = 0.
# store the hourly pv power in the result dictionary
self.res['p_pv'] = p_pv
# the dc-dc converter capacity in kW
self.res['n_dcdc_pv'] = max(p_pv) / 1e3
#############################
# electrolyzer array module #
#############################
def pemel(self, i_pemel):
"""
The electrolyzer model, based on the work of Saeed et al. [2]. For a
given current, the model determines the operating voltage by
considering the activation, concentration and ohmic overpotentials.
The model quantifies the operating voltage, power, efficiency and
hydrogen production.
[2] <NAME>., & <NAME>. (2015). Modeling and Analysis of
Renewable PEM Fuel Cell System. Energy Procedia, 74, 87–101.
https://doi.org/10.1016/j.egypro.2015.07.527
Parameters
----------
i_pemel : float
The electrolyzer input current [A].
Returns
-------
res : dict
Dictionary with the operating conditions of the electrolyzer for a
given current. It contains items on the operating voltage, power,
efficiency and hydrogen mass flow rate.
"""
par_pemel = {'T': 353.,
'a': 1.,
'p_o2': 1.,
'p_h2': 1.,
'p_h2o': 1.,
'i_L': 2.,
'A': 100.,
'i_0': 1e-4,
'n': 2.,
't_mem': 50e-4,
'alpha': 0.3,
'R': 8.3143,
'F': 96485.,
'HHV': 141.7e6,
}
res = {}
i = i_pemel / par_pemel['A']
# minimum operating voltage of electrolyzer
e_0 = (1.48 - 0.85e-3 * (par_pemel['T'] - 298.15) + 4.3085e-5 *
par_pemel['T'] * np.log(par_pemel['p_h2'] *
np.sqrt(par_pemel['p_o2']) /
par_pemel['p_h2o']))
# activation overpotential
v_act = (np.log(i / par_pemel['i_0']) /
(par_pemel['alpha'] * par_pemel['n'] * par_pemel['F']) *
par_pemel['R'] * par_pemel['T'])
# ohmic overpotential
lambda_mem = (0.043 + 17.81 * par_pemel['a'] -
39.85 * par_pemel['a']**2. +
36. * par_pemel['a']**3.)
sigma_mem = ((0.005139 * lambda_mem - 0.00326) *
np.exp(1268 * (1. / 303. - 1. / par_pemel['T'])))
v_ohm = i * par_pemel['t_mem'] / sigma_mem
# the concentration overpotential
v_con = - (par_pemel['R'] * par_pemel['T'] /
(par_pemel['n'] * par_pemel['F']) *
np.log(1. - i / par_pemel['i_L']))
# model outputs
res['v_pemel'] = (e_0 + v_act + v_ohm + v_con) * self.n_pemel_array
res['m_pemel'] = self.current_to_mh2(i_pemel) * self.n_pemel_array
res['p_pemel'] = i_pemel * res['v_pemel']
res['eff_pemel'] = (res['m_pemel'] * par_pemel['HHV'] /
(res['p_pemel'] * 3600.))
return res
def current_to_mh2(self, current):
"""
When current is provided, this function determines the
corresponding hydrogen mass flow rate per hour.
Parameters
----------
current : float
The electrolyzer input current [A].
Returns
-------
m_h2 : float
The produced hydrogen mass flow rate [kg/h].
"""
far_cons = 96485.
m_h2 = current / (2. * far_cons) * 2.02e-3 * 3600.
return m_h2
def mh2_to_power(self, m_h2):
"""
When the hydrogen mass flow rate is provided, this function determines
the corresponding required power per hour.
Parameters
----------
m_h2 : float
The produced hydrogen mass flow rate [kg/h].
Returns
-------
power : float
The required power to produce the hydrogen [W].
"""
far_cons = 96485.
current = m_h2 * (2. * far_cons) / (2.02e-3 * 3600. *
self.n_pemel_array)
power = self.pemel(current)['p_pemel']
return power
def polyfit_pemel(self):
"""
The electrolyzer stack is evaluated over a range of input currents.
Following these evaluations, a polynomial is fitted on the
power - current relation of the electrolyzer. This polynomial enables
to rapidly determine the input current when a certain amount of power
is available. Since this relation is fairly linear, the polynomial
should reach good agreement with the actual power - current relation,
while maintaining the level of fidelity of the actual model.
"""
# evaluate the electrolyzer stack for a set of currents
i_list = np.arange(start=3, stop=200, step=4)
p_pemel = np.zeros(len(i_list))
for index, i in enumerate(i_list):
res = self.pemel(i)
p_pemel[index] = res['p_pemel']
# generate a polynomial fitted on the power - current points
self.p_to_i_pemel = polyfit_func(p_pemel, i_list)
#####################
# compressor module #
#####################
def compressor(self, m_h2):
"""
The compressor module defined the required compression power to
compress the hydrogen mass flow rate [3].
[3] <NAME>., <NAME>., & <NAME>. (2014). Dynamic analysis of
a self-sustainable renewable hydrogen fueling station. ASME 2014 12th
International Conference on Fuel Cell Science, Engineering and
Technology, FUELCELL 2014 Collocated with the ASME 2014 8th
International Conference on Energy Sustainability.
https://doi.org/10.1115/FuelCell2014-6330
Parameters
----------
m_h2 : float
Hydrogen mass flow rate [kg/h].
Returns
-------
power : float
The required compression power [W].
"""
# convert the flow rate into kg/s
m_h2 *= 1. / 3600.
par_c = {
'T_in': 353.,
'p_in': 1.,
'p_out': 350.,
'eta_c': 0.85,
'R': 4.124,
'n': 1.609,
}
power = (m_h2 *
par_c['n'] *
par_c['R'] *
par_c['T_in'] *
((par_c['p_out'] /
par_c['p_in'])**((par_c['n'] -
1.) /
par_c['n']) -
1.) *
1000. /
(par_c['eta_c'] *
(par_c['n'] -
1.)))
return power
def polyfit_pemel_compr(self):
"""
The power consumption by the electrolyzer stack and compressor are
evaluated over a range of hydrogen mass flow rates. Following these
evaluations, a polynomial is fitted on the mass flow rate - power
relation. This polynomial enables to rapidly determine the input
mass flow rate when a certain amount of power is available.
Since this relation is fairly linear, the polynomial should reach good
agreement with the actual mass flow rate - power relation, while
maintaining the level of fidelity of the actual model.
"""
# the electrolyzer array operating limits
pemel_lower_lim = self.par['n_pemel'] * 10.
pemel_upper_lim = self.par['n_pemel'] * 1e3
# the operating current at these limits
current_ub = self.p_to_i_pemel(pemel_upper_lim)
current_lb = self.p_to_i_pemel(pemel_lower_lim)
# the characteristics of the electrolyzer at the limits
pemel_ub = self.pemel(current_ub)
pemel_lb = self.pemel(current_lb)
# the compression power at the hydrogen production limits
p_compr_ub = self.compressor(pemel_ub['m_pemel'])
p_compr_lb = self.compressor(pemel_lb['m_pemel'])
# the compression and electrolyzer power at the limits
pemel_compr_ub = pemel_ub['p_pemel'] + p_compr_ub
pemel_compr_lb = pemel_lb['p_pemel'] + p_compr_lb
# the operating bounds for the compressor and electrolyzer
self.bounds = {'pemel_lb': pemel_lb,
'pemel_ub': pemel_ub,
'pemel_compr_lb': pemel_compr_lb,
'pemel_compr_ub': pemel_compr_ub
}
# evaluate the electrolyzer and compressor for a set of mass flow rates
step = pemel_ub['m_pemel'] / 50.
m_h2_list = np.arange(start=step, stop=pemel_ub['m_pemel'] - step,
step=step)
p_pemel_compr = np.zeros(len(m_h2_list))
for index, m_h2 in enumerate(m_h2_list):
p_pemel = self.mh2_to_power(m_h2)
p_compr = self.compressor(m_h2)
p_pemel_compr[index] = p_pemel + p_compr
# generate a polynomial fitted on the power - mass flow rate points
self.p_to_m_pemel_comp = polyfit_func(p_pemel_compr, m_h2_list)
#############################
# tank and dispenser module #
#############################
def tank(self):
"""
The maximum storage capacity of the hydrogen tank.
Returns
-------
m_max : float
The hydrogen storage capacity. [kg]
"""
# conversion from energy (kWh) into mass (kg)
m_max = self.par['n_tank'] / 33.33
return m_max
def dispenser(self, m_h2):
"""
The cooling power is determined before the dispensation of the hydrogen
into the bus fuel tank [4].
[4] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“Techno-economic and thermodynamic analysis of pre-cooling systems at
gaseous hydrogen refueling stations,” Int. J. Hydrogen Energy, vol. 42,
no. 49, pp. 29067–29079, Dec. 2017.
Parameters
----------
m_h2 : float
Hydrogen mass flow rate [kg/h].
Returns
-------
power : float
The required cooling power for dispensation [W].
"""
demand_day_h2 = sum(self.load_h2[:24])
if m_h2 < 1e-6 or demand_day_h2 < 1e-4:
power = 0.
else:
t_amb = 10.4
m_h2 *= 1. / 3600.
ei_pcu = ((0.3 / (1.6 * np.exp(-0.018 * t_amb))) +
(25. * np.log(t_amb) - 21.) / demand_day_h2) # kWhe/kgH2
power = ei_pcu * m_h2 * 3.6e6 # W
return power
##############################
# management strategy module #
##############################
def p_for_inst_demand(self, m_h2):
"""
When a hydrogen demand is provided, this method determines the power
to generate the hydrogen in the electrolyzer array and the power to
compress the hydrogen in the compressor. The method indicates when the
electrolyzer array capacity is not sufficient to produce the desired
hydrogen mass flow rate.
Parameters
----------
m_h2 : float
Hydrogen mass flow rate [kg/h].
Returns
-------
p_pemel : float
The required electrolyzer array power [W].
p_compr : float
The required compressor power [W].
bool
True when the electrolyzer capacity is insufficient to produce the
hydrogen.
"""
# check if the desired hydrogen can be produced by the PEM
if m_h2 > self.bounds['pemel_ub']['m_pemel']:
return 0., 0., True
# determine the power needed from electrolyzer and compressor to
# deliver the required hydrogen mass flow rate
p_pemel = self.mh2_to_power(m_h2)
p_compr = self.compressor(m_h2)
self.res['running_hours_pemel'] += 1.
return p_pemel, p_compr, False
def prod_mh2(self, p_in):
"""
When there is power available to produce hydrogen, this method
distributes this power over the electrolyzer array and compressor,
such that the hydrogen is produced and compressed with this given
power. If the given power will lead to an excess of hydrogen (i.e
the storage tank is full before the energy is fully consumed), the
power supplied to the electrolyzer array and compressor is recalculated
such that no excess hydrogen is produced.
Parameters
----------
p_in : float
The power available to produce hydrogen [W].
Returns
-------
p_pemel : float
The power consumed by the electrolyzer array [W].
p_compr : float
The power consumed by the compressor [W].
"""
no_run = False
# produce H2 only if tank is not yet full
# nothing happens when the desired power is under the lower limit
if self.m_h2 >= self.m_h2_max or p_in < self.bounds['pemel_compr_lb']:
p_pemel = 0.
p_compr = 0.
no_run = True # the electrolyzer did not run
# if the power is higher than the upper limit, work at the upper limit
elif p_in > self.bounds['pemel_compr_ub']:
# this is what can be produced at the upper limit for H2
m_h2 = self.bounds['pemel_ub']['m_pemel']
# produced hydrogen is added to the tank
self.m_h2 += m_h2
else:
# quantify the hydrogen created with the available power
m_h2 = self.p_to_m_pemel_comp(p_in)
# produced hydrogen is added to the tank
self.m_h2 += m_h2
# if the new hydrogen capacity exceeds the maximum storage capacity
if self.m_h2 > self.m_h2_max:
# the hydrogen that is still allowed in the tank
m_h2 -= (self.m_h2 - self.m_h2_max)
# the power for the PEM to generate this hydrogen
p_rev = self.mh2_to_power(m_h2)
# check if this power is higher than the lower limit for the PEM
if p_rev > self.bounds['pemel_lb']['p_pemel']:
self.m_h2 = self.m_h2_max
# the space left in the tank is too small
else:
self.m_h2 -= (m_h2 + self.m_h2 - self.m_h2_max)
p_pemel = 0.
p_compr = 0.
no_run = True
# if power is applied to the PEM and compessor
if not no_run:
p_compr = self.compressor(m_h2)
p_pemel = self.mh2_to_power(m_h2)
self.res['running_hours_pemel'] += 1.
return p_pemel, p_compr
def extract_h2_from_tank(self, demand):
"""
Extract the hydrogen demand from the storage tank. If more hydrogen is
demanded than available in the tank, extract only the available amount
of hydrogen.
Parameters
----------
demand : float
The hydrogen demand. [kg]
Returns
-------
demand_left : float
The amount of hydrogen demand that is not covered by the tank. [kg]
"""
# extract the hydrogen from the tank
self.m_h2 -= demand
if self.m_h2 < self.m_h2_min:
# if the demand was too high, set the tank to its minimum level and
# define the demand that was not covered by the tank
demand_left = self.m_h2_min - self.m_h2
self.m_h2 = self.m_h2_min
else:
# if the tank covers the demand, there is no hydrogen demand left
demand_left = 0.
return demand_left
##########################################
# model evaluation
##########################################
def evaluation(self):
"""
This is the main method of the Evaluation class.
For each hour, the power management strategy is applied.
The hydrogen demand is extracted from the hydrogen tank, when
sufficient hydrogen is available in the tank. When the hydrogen in the
tank does not comply with the hydrogen demand, the power to run the
electrolyzer array and compressor is calculated to generate and
compress the remaining hydrogen. To generate this power and the
dispensation power, the photovoltaic power is called upon first. If
necessary, the remaining power is covered by the grid.
When the hydrogen tank does comply with the hydrogen demand, the
photovoltaic power is used to cover the required dispensation power.
When excess photovoltaic power is present, this power is used to
generate and compress hydrogen in the electrolyzer array and
compressor, respectively. Finally, the lifetime, cost and
CO2-emission of the system are quantified.
Returns
-------
bool
True when the capacity of the electrolyzer array is sufficient to
cover the instantaneous hydrogen demand during the year.
"""
n_compr = np.zeros(self.length)
n_disp = np.zeros(self.length)
n_dcdc_pem = np.zeros(self.length)
n_dcac = np.zeros(self.length)
self.res['m_h2_array'] = np.ones(self.length)
self.res['grid_e_buy'] = np.ones(self.length)
self.res['grid_e_sold'] = np.ones(self.length)
self.res['grid_co2'] = 0.
# get the hourly photovoltaic array power
self.photovoltaic()
for t in range(self.length):
e_grid_buy = 0.
e_grid_sold = 0.
# define the power for dispensation
p_disp = self.dispenser(self.load_h2[t])
n_disp[t] = p_disp
# define if there is any H2 demand left after assessing the tank
demand_left = self.extract_h2_from_tank(self.load_h2[t])
if demand_left > 0.:
# power needed by the PEM and compressor to generate the demand
p_pemel, p_compr, check = self.p_for_inst_demand(demand_left)
n_dcdc_pem[t] = p_pemel
n_compr[t] = p_compr
if check:
return False
# positive if the PV energy can comply with the PEM power
net_p_1 = self.res['p_pv'][t] - p_pemel
if net_p_1 > 0.:
# if yes, check if remaining PV power can comply with the
# compressor and dispensation demand
net_p_2 = net_p_1 - (p_compr + p_disp)
if net_p_2 > 0.:
# when still excess power available, sell to the grid
e_grid_sold = net_p_2
n_dcac[t] = e_grid_sold
else:
# if the compressor and dispensation demand cannot be
# covered, buy remaining demand from the grid
# (compressor and dispensation are AC-powered)
e_grid_buy = abs(net_p_2)
else:
# if PEM, compressor and dispensation cannot be covered by
# PV energy, buy required electricity from the grid
# only power for PEM has to pass through AC-DC conversion
p_to_buy = abs(net_p_1) + p_compr + p_disp
e_grid_buy = p_to_buy
n_dcac[t] = abs(net_p_1)
else: # excess PV energy available to generate hydrogen
# use solar energy to power the dispensation
net_p = self.res['p_pv'][t] - p_disp
if net_p <= 0.:
# remaining power covered by the grid
e_grid_buy = abs(net_p)
# the solar power is all used for dispensation and thus
# sent through the DC-AC converter
n_dcac[t] = self.res['p_pv'][t]
else:
# quantify how much of the remaining PV energy can be used
# by the PEM and compressor to generate hydrogen
p_pemel, p_compr = self.prod_mh2(net_p)
n_compr[t] = p_compr
# the remaining excess PV energy can be sold
e_grid_sold = net_p - p_pemel - p_compr
n_dcac[t] = p_disp + e_grid_sold + p_compr
n_dcdc_pem[t] = p_pemel
# store evolution of storage tank, electricity bought/sold
# and the CO2 amount from buying grid electricity
self.res['m_h2_array'][t] = ((self.m_h2 - self.m_h2_min) /
(self.m_h2_max - self.m_h2_min))
self.res['grid_e_buy'][t] = e_grid_buy
self.res['grid_e_sold'][t] = e_grid_sold
self.res['grid_co2'] += e_grid_buy * self.par['co2_elec']
# define the capacity of the converters, compression and cooling
self.res['n_compr'] = max(n_compr) / 1e3
self.res['n_cooling'] = max(n_disp) / 1e3
self.res['n_dcdc_pemel'] = max(n_dcdc_pem) / 1e3
self.res['n_dcac'] = max(n_dcac) / 1e3
# the cost of annual diesel consumption
self.res['diesel_cost'] = self.load_diesel * self.diesel_profile
# determine the lifetime of the electrolyzer
self.lifetime()
# determine the system cost
self.cost()
# determine the system annual CO2 emission
self.lca()
return True
def lifetime(self):
"""
The lifetime method determines the lifetime of
the electrolyzer array, based on the number of
operating hours during the evaluated year.
"""
# lifetime of the electrolyzer array
if self.res['running_hours_pemel'] == 0.:
self.res['life_pemel'] = 1e8
else:
self.res['life_pemel'] = (self.par['life_pemel'] /
self.res['running_hours_pemel'])
def cost(self):
"""
Based on the capital recovery factor, the CAPEX,
OPEX and replacement cost of the system components,
the levelized cost of mobility [euro/km] is determined. The formula
for the annualized system cost is adopted from Coppitters et al. [5].
[5] <NAME>., <NAME>., & <NAME>. (2020). Robust design
optimization and stochastic performance analysis of a
grid-connected photovoltaic system with battery storage and
hydrogen storage. Energy, 213, 118798.
https://doi.org/10.1016/j.energy.2020.118798
"""
# the capital recovery factor
inv_rate = ((self.par['int_rate'] - self.par['infl_rate']) /
(1. + self.par['infl_rate']))
crf = (((1. + inv_rate)**self.par['life_sys'] - 1.) /
(inv_rate * (1. + inv_rate)**self.par['life_sys']))**(-1)
# annual cost of photovoltaic array and DC-DC converter
pv_cost = self.par['n_pv'] * (crf * self.par['capex_pv'] +
self.par['opex_pv'])
pv_dcdc_cost = self.res['n_dcdc_pv'] * (self.par['capex_dcdc'] *
(crf + self.par['opex_dcdc']))
components_cost = pv_cost + pv_dcdc_cost
# annual cost of electrolyzer array and DC-DC converter
pemel_cost = self.par['n_pemel'] * (self.par['capex_pemel'] *
(crf + self.par['opex_pemel']))
pemel_dcdc_cost = (self.res['n_dcdc_pemel'] *
(self.par['capex_dcdc'] *
(crf + self.par['opex_dcdc'])))
components_cost += pemel_cost + pemel_dcdc_cost
# annual cost of hydrogen storage tank
tank_cost = self.par['n_tank'] * (self.par['capex_tank'] *
(crf + self.par['opex_tank']))
components_cost += tank_cost
# annual cost of compressor
compressor_cost = (self.par['capex_compr'] *
(self.res['n_compr']**0.5861) *
(crf + self.par['opex_compr']))
components_cost += compressor_cost
# annual cost of dispenser
dispenser_cost = self.par['n_disp'] * (self.par['capex_disp'] *
(crf + self.par['opex_disp']))
components_cost += dispenser_cost
# annual cost of dispensation cooling
cooling_cost = self.res['n_cooling'] * (self.par['capex_cool'] *
(crf + self.par['opex_cool']))
components_cost += cooling_cost
# annual cost of DC-AC inverter
dcac_cost = self.res['n_dcac'] * (self.par['capex_dcac'] *
(crf + self.par['opex_dcac']))
components_cost += dcac_cost
# annual cost of buses
h2_bus_cost = (self.par['capex_h2_bus'] * crf + self.par['n_km_bus'] *
365. * self.par['opex_h2_bus']) * self.par['n_h2_bus']
diesel_bus_cost = (self.par['capex_diesel_bus'] * crf +
self.par['opex_diesel_bus'] * 365. *
self.par['n_km_bus']) * (self.par['n_bus'] -
self.par['n_h2_bus'])
components_cost += h2_bus_cost + diesel_bus_cost
# annual replacement cost of the electrolyzer and buses
arc = crf * sum([(1. + inv_rate)**(-(i + 1.) *
self.res['life_pemel']) *
self.par['n_pemel'] *
self.par['repl_pemel'] *
self.par['capex_pemel'] for
i in range(int(self.par['life_sys'] /
self.res['life_pemel']))])
arc += crf * sum([(1. + inv_rate)**(-(i + 1.) * 10.) *
self.par['n_h2_bus'] * self.par['repl_h2_bus']
for i in range(int(self.par['life_sys'] / 10.))])
arc += crf * sum([(1. + inv_rate)**(-(i + 1.) * 10.) *
(self.par['n_bus'] - self.par['n_h2_bus']) *
self.par['repl_diesel_bus'] for i in
range(int(self.par['life_sys'] / 10.))])
grid_e_cost = sum(self.res['grid_e_buy'] * self.elec_profile)
grid_e_gain = sum(self.res['grid_e_sold'] * self.elec_profile_sale)
# total annual cost
cost = (arc + components_cost + grid_e_cost - grid_e_gain +
sum(self.res['diesel_cost']))
# levelized cost of mobility in euro/km
self.res['lcom'] = cost / (self.par['n_km_bus'] * self.par['n_bus'] *
365.)
def lca(self):
"""
The life cycle assessment is performed based on the CO2 emissions from
constructing the system components and the emissions related to
consuming grid electricity and diesel. The annual CO2-equivalent
emissions of the system is divided by the annual distance covered by
the bus fleet, resulting in the levelized cost of mobility.
"""
# annual CO2 emission of photovoltaic array production
pv_co2 = self.par['n_pv'] * self.par['co2_pv']
pv_dcdc_co2 = self.res['n_dcdc_pv'] * self.par['co2_dcdc']
comp_co2 = pv_co2 + pv_dcdc_co2
# annual CO2 emission of electrolyzer array production
pemel_co2 = (self.par['n_pemel'] * self.par['co2_pemel'] *
(1 + int(self.par['life_sys'] / self.res['life_pemel'])))
pemel_dcdc_co2 = self.res['n_dcdc_pemel'] * self.par['co2_dcdc']
comp_co2 += pemel_co2 + pemel_dcdc_co2
# annual CO2 emission of hydrogen storage tank production
tank_co2 = self.par['n_tank'] / 33.33 * 16. * self.par['co2_tank']
comp_co2 += tank_co2
# annual CO2 emission of compressor production
compressor_co2 = self.res['n_compr'] * self.par['co2_compr']
comp_co2 += compressor_co2
# annual CO2 emission of dispenser production
dispenser_co2 = self.par['n_disp'] * self.par['co2_disp']
comp_co2 += dispenser_co2
# annual CO2 emission of cooling unit production
cooling_co2 = self.res['n_cooling'] * self.par['co2_cool']
comp_co2 += cooling_co2
# annual CO2 emission of DC-AC inverter production
dcac_co2 = self.res['n_dcac'] * self.par['co2_dcac']
comp_co2 += dcac_co2
# annual CO2 emission of diesel and hydrogen engine production
diesel_engine_co2 = (self.par['co2_diesel_engine'] * 200. * # 200 kW
(self.par['n_bus'] - self.par['n_h2_bus']))
h2_engine_co2 = self.par['co2_fc_engine'] * 200. * self.par['n_h2_bus']
comp_co2 += ((diesel_engine_co2 + h2_engine_co2) *
(1 + int(self.par['life_sys'] / 10.))) # 10y lifetime
# annual CO2 emission of diesel consumption
diesel_co2 = sum(self.load_diesel) * self.par['co2_diesel']
# annual CO2 emission of system
co2 = (comp_co2 / self.par['life_sys'] + self.res['grid_co2'] +
diesel_co2)
# CO2 emitted per km driven by the fleet
self.res['lco2'] = co2 / (self.par['n_km_bus'] * self.par['n_bus'] *
365. * self.length / 8760.)
def print_results(self, succes=True):
"""
This method prints the levelized cost of electricity,
the self-sufficiency ratio and the annual energy produced
by the photovoltaic array.
"""
if not succes:
print("""Evaluation failed: the electrolyzer array capacity
of %f kW was not sufficient to cover the instantaneous
hydrogen demand.""" % self.par['n_pemel'])
else:
print('outputs:')
print('LCOE:'.ljust(30) + '%.5f euro/km' % self.res['lcom'])
print('LCO2:'.ljust(30) + '%.5f kg co2-eq/km' % self.res['lco2'])
print(
'PV electricity generated:'.ljust(30) +
'%.5f MWh' %
(sum(self.res['p_pv']) / 1e6))
print('grid energy bought:'.ljust(30) + '%.5f MWh' %
(sum(self.res['grid_e_buy']) / 1e6))
print('grid energy sold:'.ljust(30) + '%.5f MWh' %
(sum(self.res['grid_e_sold']) / 1e6))
print(
'compressor capacity:'.ljust(30) +
'%.5f kW' %
self.res['n_compr'])
print(
'cooling capacity:'.ljust(30) +
'%.5f kW' %
self.res['n_cooling'])
print('life electrolyzer:'.ljust(30) + '%.5f year' %
self.res['life_pemel'])
plt.plot(self.res['m_h2_array'])
plt.show(block=False)
def polyfit_func(x_in, y_in, threshold=0.99999999):
"""
The function fits a polynomial to the points of x_in and y_in. The
polynomial starts with order 1. To evaluate its performance, the R-squared
performance indicator is quantified. If the value for R-squared does
not reach the defined threshold, the polynomial order is increased and
the polynomial is fitted again on the points, until the threshold is
satisfied. Once satisfied, the function returns the polynomial.
Parameters
----------
x_in : ndarray
The x-coordinates for the sample points.
y_in : ndarray
The y-coordinates for the sample points.
threshold : float, optional
The threshold for the R-squared parameter. The default is 0.99999.
Returns
-------
poly_func : numpy.poly1d
A one-dimensional polynomial.
"""
order = 0
r_squared = 0.
while r_squared < threshold:
order += 1
# the polynomial
poly_coeff = np.polyfit(x_in, y_in, order)
poly_func = np.poly1d(poly_coeff)
# r-squared
yhat = poly_func(x_in)
ybar = np.sum(y_in) / len(y_in)
ssreg = np.sum((yhat - ybar)**2.)
sstot = np.sum((y_in - ybar)**2.)
r_squared = ssreg / sstot
return poly_func
| StarcoderdataPython |
3266880 | import sqlite3
DATASET = [
("Tencho", "2018-12-03"),
("Bessho", "2018-12-03"),
("Emoto", "2020-12-03"),
("Gamo", "2020-12-03"),
("Funakoshi", "2020-12-03"),
("Funakoshi", "2020-12-03"),
("Doigaki", "2020-12-03"),
("Doigaki", "2020-20-03"),
("Chikura", "2020-12-03"),
("Akabane", "2020-12-03"),
]
def main():
conn = sqlite3.connect(":memory:")
conn.executescript(
"""
DROP TABLE IF EXISTS foobar;
CREATE TABLE foobar (
last_name TEXT TEXT NOT NULL,
start_day TEXT NOT NULL
);
"""
)
conn.executemany("INSERT INTO foobar VALUES (?, ?)", DATASET)
query = """
SELECT last_name,
start_day,
COUNT(*) AS num_entries
FROM foobar
WHERE start_day >= '2019-01-01'
GROUP BY last_name, start_day
ORDER BY num_entries DESC
LIMIT 10;
"""
print(conn.execute(query).fetchall())
if __name__ == "__main__":
main()
| StarcoderdataPython |
1783256 | <reponame>cccaaannn/fizzbuzz_api
# Write a program that prints the numbers from 1 to 100.
# But for multiples of three print “Fizz” instead of the number and for the multiples of five print “Buzz”.
# For numbers which are multiples of both three and five print “FizzBuzz”.
class FizzBuzz:
def __init__(self):
self.custom_fizzbuzz_array = {
"custom_fizzbuzz":[
{"word":"fizz", "divisors":[3]},
{"word":"buzz", "divisors":[5]},
{"word":"fizzbuzz", "divisors":[3,5]}
]
}
def __is_divisor(self, num, divisors):
# returns true if a number is divisible to all of the numbers in a list
for divisor in divisors:
if(num % divisor != 0):
return False
return True
def __get_fizzbuzz_result(self, num, custom_fizzbuzz_array):
# checks all requested divisor lists and returns correspounding word if exists if not returns number itself
for custom_fizzbuzz in custom_fizzbuzz_array:
if(self.__is_divisor(num, custom_fizzbuzz["divisors"])):
return custom_fizzbuzz["word"]
return str(num)
def fizzbuzz_custom(self, custom_fizzbuzz_array, start=1, stop=100, to_list=False):
"""generates custom fizzbuzz
Args:
custom_fizzbuzz_dict ([type]):
simple structure
{
"custom_fizzbuzz":[
{"word":"fizz", "divisors":[3]},
{"word":"buzz", "divisors":[5]},
{"word":"fizzbuzz", "divisors":[3,5]},
]
}
start (int, optional): start point. Defaults to 1.
stop (int, optional): end point. Defaults to 100.
to_list (bool, optional): convert to list. Defaults to False.
"""
# sort the custom_fizzbuzz_array according to the length of the divisors lists. (longer list with more numbers will be tested first)
sorted_custom_fizzbuzz_array = sorted(custom_fizzbuzz_array["custom_fizzbuzz"], key=(lambda x: len(x["divisors"])), reverse=True)
ans = ""
for num in range(start, stop + 1):
ans += self.__get_fizzbuzz_result(num, sorted_custom_fizzbuzz_array)
if(num != stop):
ans += "\n"
if(to_list):
return ans.split("\n")
else:
return ans
def fizzbuzz(self, **kwargs):
# regular fizzbuzz
return self.fizzbuzz_custom(self.custom_fizzbuzz_array, **kwargs)
| StarcoderdataPython |
174747 | <reponame>DavidLutton/Coursework<filename>labtoolkit/Switch/HP3488A.py
from ..GenericInstrument import GenericInstrument
from ..IEEE488 import IEEE488
from ..SCPI import SCPI
import pandas as pd
class Switch:
def load_switchpath(self, switchpaths):
df = pd.read_excel(switchpaths, sheet_name='Data').dropna(how='all')
df = df.astype({'Path': 'category', 'Requires': 'category', 'Switch': 'int', 'State': 'bool'})
# rearrange requires as extras steps for paths that call for it
for switch_path in set(df[df['Requires'].notna()]['Path']):
frame = df[df['Path'] == switch_path]
req = df[df['Path'] == list(set(frame.Requires))[0]]
for values in req.iterrows():
res = {'Path': str(switch_path), 'Switch': int(values[1].Switch), 'State': bool(values[1].State)}
# print(res)
df = df.append(res, ignore_index=True)
df = df.drop(columns=['Requires'])
self._switchpaths = df
@property
def switchpath(self):
return list(set(self._switchpaths.Path))
@switchpath.setter
def switchpath(self, path):
for route in self._switchpaths[self._switchpaths['Path'] == path].iterrows():
self.switch(route[1].Switch, route[1].State)
class HP3488A(GenericInstrument, Switch):
def __post__(self):
self.inst.read_termination = '\r\n'
self.inst.write_termination = '\n'
# switcher.switch(502, True)
# switcher.read(502), switcher.readtext(502)
# buffer = {}
# for sw in 100, 101, 102:
# buffer[sw] = switcher.read(sw)
# print(f"{sw} {switcher.read(sw)}, {switcher.readtext(sw)}")
# buffer
# query(f'CTYPE {1}'), query(f'CTYPE {2}'), query(f'CTYPE {3}'), query(f'CTYPE {4}'), query(f'CTYPE {5}')
# CRESET 1
# query(f'CMON {4}')
def switch(self, switch, state):
if state in ['OPEN', False]:
self.write(f'OPEN {switch}')
if state in ['CLOSE', True]:
self.write(f'CLOSE {switch}')
def viewtext(self, switch):
return self.query(f'VIEW {switch}').split(' ')[0] # 'CLOSED 0', 'OPEN 1'
def view(self, switch):
# 'CLOSED 0', 'OPEN 1'
if int(self.query(f'VIEW {switch}').split(' ')[-1]) == 1:
return bool(False)
else:
return bool(True)
def cards(self):
return self.query(f'CTYPE {1}'), self.query(f'CTYPE {2}'), self.query(f'CTYPE {3}'), self.query(f'CTYPE {4}'), self.query(f'CTYPE {5}')
def switches(self):
switches = []
for index, card in enumerate(self.cards(), 1):
card = card.split(' ')[-1]
# print(f'{index}, {card}')
# print(cards[card])
if card == '44471':
for dex in [0, 1, 2]:
switches.append(f'{index}0{dex}')
# print(f'{index}0{dex}')
if card == '44472':
for segment in [0, 1]:
for dex in [0, 1, 2, 3]:
switches.append(f'{index}{segment}{dex}')
# print(f'{index}{segment}{dex}')
return switches
def state(self):
buffer = {}
for sw in self.switches():
buffer[sw] = self.view(sw)
# print(f"{sw} {buffer[sw]}")
return buffer
| StarcoderdataPython |
1713225 | """Module to hold the Invoice resource."""
from fintoc.mixins import ResourceMixin
class Invoice(ResourceMixin):
"""Represents a Fintoc Invoice."""
mappings = {
"issuer": "taxpayer",
"receiver": "taxpayer",
}
| StarcoderdataPython |
43139 | <reponame>vlegoff/tsunami
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la classe PGFormat."""
driver = True
from cgi import escape
import re
try:
import postgresql
from postgresql.exceptions import ClientCannotConnectError
except ImportError:
driver = False
from primaires.format.fonctions import supprimer_accents
from secondaires.exportaide.formats.pgsql.config import TEXTE_CFG
class PGFormat:
"""Classe contenant la définition du format d'export 'pgsql'."""
def __init__(self):
self.cfg = None
self.connection = None
self.adresse_commandes = ""
def peut_tourner(self):
return driver
def config(self):
"""Configure le format."""
self.cfg = type(importeur).anaconf.get_config("exportaide.pg",
"exportaide/pgsql/config.cfg", "modele export PG", TEXTE_CFG)
self.adresse_commandes = self.cfg.adresse_commandes
def init(self):
"""Initialisation du module.
On essaye de se connecter. Si on ne peut pas, False est
retourné.
"""
host = self.cfg.host
port = self.cfg.port
dbuser = self.cfg.dbuser
dbpass = self.cfg.dbpass
dbname = self.cfg.dbname
try:
self.connexion = postgresql.open(
"pq://{user}:{password}@{host}:{port}/{database}".format(
user=dbuser, password=<PASSWORD>, host=host, port=port,
database=dbname))
except ClientCannotConnectError:
return False
return True
def exporter_commandes(self):
"""Exporte les commandes."""
commandes = [noeud.commande for noeud in \
importeur.interpreteur.commandes]
# Sélectionne les commandes déjà créées
query = self.connexion.prepare("SELECT slug FROM commands")
crees = list(query())
crees = [ligne[0] for ligne in crees]
nb_commandes = 0
for commande in commandes:
nb_commandes += self.exporter_commande(commande, crees)
print(nb_commandes, "commandes migrées")
def exporter_sujets(self):
"""Exporte les sujets d'aide."""
sujets = [sujet for sujet in importeur.information.sujets.values() if \
sujet.str_groupe in ("aucun", "joueur", "pnj")]
# Sélectionne les sujets déjà créés
query = self.connexion.prepare("SELECT slug FROM topics")
crees = list(query())
crees = [ligne[0] for ligne in crees]
nb_sujets = 0
for sujet in sujets:
nb_sujets += self.exporter_sujet(sujet, crees)
print(nb_sujets, "sujets d'aide migrées")
def get_slug_commande(self, commande):
"""Retourne le slug de la commande."""
nom = supprimer_accents(commande.adresse)
nom = nom.replace(":", "_")
return nom
def get_nom_commande(self, commande):
"""Retourne le nom français et anglais de la commande."""
return commande.nom_francais + "/" + commande.nom_anglais
def transformer_texte(self, texte):
"""Retourne le texte transformé.
Les caractères spéciaux comme |att|, |cmd| sont transformés
en tags HTML.
"""
texte = escape(texte)
re_cmd = r"\%(.*?)\%"
for autre_cmd in list(re.findall(re_cmd, texte)):
autre = importeur.interpreteur.trouver_commande(autre_cmd)
slug = supprimer_accents(autre_cmd).replace(":", "_")
link = "<a href=\"" + self.adresse_commandes + slug + "\""
link_fr = link + " class=\"comm_lang_fr\">"
link_en = link + " class=\"comm_lang_en\">"
link_fr = link_fr + autre.nom_francais + "</a>"
link_en = link_en + autre.nom_anglais + "</a>"
link = link_fr + link_en
texte = texte.replace("%" + autre_cmd + "%", link)
balises = (
(r"\|cmd\|(.*?)\|ff\|", r"<span class=\"commande\">\1</span>"),
(r"\|ent\|(.*?)\|ff\|", r"<span class=\"commande\">\1</span>"),
(r"\|att\|(.*?)\|ff\|", r"<span class=\"attention\">\1</span>"),
(r"\|err\|(.*?)\|ff\|", r"<span class=\"erreur\">\1</span>"),
(r"\|[a-z]+?\|", r""),
)
for pattern, repl in balises:
texte = re.sub(pattern, repl, texte)
return texte
def exporter_commande(self, commande, crees):
"""Exporte la commande spécifiée."""
if commande.groupe not in ("pnj", "joueur"):
return 0
nb = 1
slug = self.get_slug_commande(commande)
parent = ""
if commande.parente:
parent = self.get_slug_commande(commande.parente)
aide_courte = self.transformer_texte(commande.aide_courte)
aide_longue = self.transformer_texte(commande.aide_longue)
syntaxe = commande.noeud.afficher()
if slug in crees:
query = \
"UPDATE commands SET french_name=$1, " \
"english_name=$2, category=$3, " \
"syntax=$4, synopsis=$5, help=$6, parent_id=$7 " \
"WHERE slug=$8"
preparation = self.connexion.prepare(query)
preparation(commande.nom_francais, commande.nom_anglais,
commande.categorie.nom, syntaxe, aide_courte,
aide_longue, parent, slug)
else:
query = \
"INSERT INTO commands (slug, french_name, " \
"english_name, category, syntax, synopsis, " \
"help, parent_id) values($1, $2, $3, $4, $5, $6, $7, $8)"
preparation = self.connexion.prepare(query)
preparation(slug, commande.nom_francais, commande.nom_anglais,
commande.nom_categorie, syntaxe, aide_courte,
aide_longue, parent)
crees.append(slug)
if commande.parametres:
for parametre in commande.parametres.values():
nb += self.exporter_commande(parametre.commande, crees)
return nb
def exporter_sujet(self, sujet, crees):
"""Exporte le sujet d'aide spécifié."""
if sujet.str_groupe not in ("aucun", "pnj", "joueur"):
return 0
nb = 1
cle = sujet.cle
profondeur = self.get_profondeur_sujet(sujet)
position = self.get_position_sujet(sujet)
parent = ""
if sujet.pere:
parent = sujet.pere.cle
contenu = []
for paragraphe in sujet.contenu.paragraphes:
contenu.append(self.transformer_texte(paragraphe))
contenu = "\n".join(contenu)
contenu = contenu.replace("\n\n", "</p><p>")
contenu = "<p>" + contenu.replace("\n", "<br />") + "</p>"
if cle in crees:
query = \
"UPDATE topics SET title=$1, content=$2, parent_id=$3, " \
"depth=$4, position=$5 WHERE slug=$6"
preparation = self.connexion.prepare(query)
preparation(sujet.titre, contenu, parent, profondeur, position,
cle)
else:
query = \
"INSERT INTO topics (slug, title, content, parent_id, " \
"depth, position) values($1, $2, $3, $4, $5, $6)"
preparation = self.connexion.prepare(query)
preparation(cle, sujet.titre, contenu, parent, profondeur,
position)
crees.append(cle)
return nb
def get_profondeur_sujet(self, sujet):
"""Retourne la profondeur du sujet."""
if sujet.pere is None:
return 0
return 1 + self.get_profondeur_sujet(sujet.pere)
def get_position_sujet(self, sujet):
"""Retourne la position du sujet.
La position est une chaîne, "0" si le sujet n'a pas de parent,
sinon un e information de position (comme "1.1.5").
"""
if sujet.pere is None:
return ""
pos = sujet.pere.sujets_fils.index(sujet) + 1
parent = self.get_position_sujet(sujet.pere)
if parent:
return parent + "." + str(pos)
return str(pos)
| StarcoderdataPython |
3369302 | <reponame>svaswani/STEALTH
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-14 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tools', '0003_tool_model_pic'),
]
operations = [
migrations.AlterField(
model_name='tool',
name='model_pic',
field=models.ImageField(default='/static/img/no-img.jpg', upload_to=''),
),
]
| StarcoderdataPython |
3340746 | <filename>option_2_dict.py
#!/usr/bin/env python
'''
This caching option uses a global dictionary to store results.
This is a better result than using global variables, since we now have "keys"
for each operation.
The downside is (possibly) we need to control key generation, and each function
needs to check the cache. Not very DRY!
'''
import time
from pprint import pprint as pp
CACHE = {}
def do_long_op_one(arg1, arg2):
key = 'do_long_op_one|%r|%r' % (arg1, arg2)
if key in CACHE:
return CACHE[key]
# simulate long operation
time.sleep(5)
CACHE[key] = arg1 * arg2
return CACHE[key]
def clear_cache():
CACHE.clear()
def main():
print time.asctime()
print "The result:", do_long_op_one(1, 2)
print time.asctime()
print "The result:", do_long_op_one(1, 2)
print "The result:", do_long_op_one(3, 4)
print "The result:", do_long_op_one(3, 4)
print "The result:", do_long_op_one(10, 10)
print "CACHE is:"
pp(CACHE)
if __name__ == '__main__':
main()
| StarcoderdataPython |
134460 | from __future__ import unicode_literals
from utils import CanadianJurisdiction
from pupa.scrape import Organization
class Oshawa(CanadianJurisdiction):
classification = 'legislature'
division_id = 'ocd-division/country:ca/csd:3518013'
division_name = 'Oshawa'
name = 'Oshawa City Council'
url = 'http://www.oshawa.ca'
def get_organizations(self):
organization = Organization(self.name, classification=self.classification)
organization.add_post(role='Mayor', label='Oshawa', division_id=self.division_id)
for seat_number in range(1, 8):
organization.add_post(role='Regional Councillor', label='Oshawa (seat {})'.format(seat_number), division_id=self.division_id)
for seat_number in range(1, 4):
organization.add_post(role='Councillor', label='Oshawa (seat {})'.format(seat_number), division_id=self.division_id)
yield organization
| StarcoderdataPython |
1745418 | from setuptools import setup
setup(
name='search',
version='0.0.1',
py_modules=['index'],
)
| StarcoderdataPython |
131607 | # -*- coding: utf-8 -*-
'''
Watch files and translate the changes into salt events
:depends: - pyinotify Python module >= 0.9.5
:Caution: Using generic mask options like open, access, ignored, and
closed_nowrite with reactors can easily cause the reactor
to loop on itself.
'''
# Import Python libs
from __future__ import absolute_import
import collections
# Import salt libs
import salt.ext.six
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
MASKS = {}
for var in dir(pyinotify):
if var.startswith('IN_'):
key = var[3:].lower()
MASKS[key] = getattr(pyinotify, var)
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'inotify'
import logging
log = logging.getLogger(__name__)
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
return False
def _get_mask(mask):
'''
Return the int that represents the mask
'''
return MASKS.get(mask, 0)
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['inotify.queue'].append(revent)
def _get_notifier():
'''
Check the context for the notifier and construct it if not present
'''
if 'inotify.notifier' not in __context__:
__context__['inotify.queue'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)
return __context__['inotify.notifier']
def validate(config):
'''
Validate the beacon configuration
'''
VALID_MASK = [
'access',
'attrib',
'close_nowrite',
'close_write',
'create',
'delete',
'delete_self',
'excl_unlink',
'ignored',
'modify',
'moved_from',
'moved_to',
'move_self',
'oneshot',
'onlydir',
'open',
'unmount'
]
# Configuration for diskusage beacon should be a list of dicts
if not isinstance(config, dict):
log.info('Configuration for inotify beacon must be a dictionary.')
return False
else:
for config_item in config:
if not isinstance(config[config_item], dict):
log.info('Configuration for inotify beacon must '
'be a dictionary of dictionaries.')
return False
else:
if not any(j in ['mask', 'recurse', 'auto_add'] for j in config[config_item]):
log.info('Configuration for inotify beacon must '
'contain mask, recurse or auto_add items.')
return False
if 'auto_add' in config[config_item]:
if not isinstance(config[config_item]['auto_add'], bool):
log.info('Configuration for inotify beacon '
'auto_add must be boolean.')
return False
if 'recurse' in config[config_item]:
if not isinstance(config[config_item]['recurse'], bool):
log.info('Configuration for inotify beacon '
'recurse must be boolean.')
return False
if 'mask' in config[config_item]:
if not isinstance(config[config_item]['mask'], list):
log.info('Configuration for inotify beacon '
'mask must be list.')
return False
for mask in config[config_item]['mask']:
if mask not in VALID_MASK:
log.info('Configuration for inotify beacon '
'invalid mask option {0}.'.format(mask))
return False
return True
def beacon(config):
'''
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
inotify:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access File accessed
* attrib File metadata changed
* close_nowrite Unwritable file closed
* close_write Writable file closed
* create File created in watched directory
* delete File deleted from watched directory
* delete_self Watched file or directory deleted
* modify File modified
* moved_from File moved out of watched directory
* moved_to File moved into watched directory
* move_self Watched file moved
* open File opened
The mask can also contain the following options:
* dont_follow Don't dereference symbolic links
* excl_unlink Omit events for children after they have been unlinked
* oneshot Remove watch after one event
* onlydir Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
'''
ret = []
notifier = _get_notifier()
wm = notifier._watch_manager
# Read in existing events
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
queue = __context__['inotify.queue']
while queue:
event = queue.popleft()
sub = {'tag': event.path,
'path': event.pathname,
'change': event.maskname}
ret.append(sub)
# Get paths currently being watched
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in config:
if isinstance(config[path], dict):
mask = config[path].get('mask', DEFAULT_MASK)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
r_mask |= _get_mask(sub)
elif isinstance(mask, salt.ext.six.binary_type):
r_mask = _get_mask(mask)
else:
r_mask = mask
mask = r_mask
rec = config[path].get('recurse', False)
auto_add = config[path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
else:
wm.add_watch(path, mask, rec=rec, auto_add=auto_add)
# Return event data
return ret
| StarcoderdataPython |
1755371 | <reponame>ramezrawas/galaxy-1
from galaxy.util import bunch
import logging
log = logging.getLogger( __name__ )
def form( *args, **kwargs ):
return FormBuilder( *args, **kwargs )
class FormBuilder( object ):
"""
Simple class describing an HTML form
"""
def __init__( self, action="", title="", name="form", submit_text="submit", use_panels=False ):
self.title = title
self.name = name
self.action = action
self.submit_text = submit_text
self.inputs = []
self.use_panels = use_panels
def add_input( self, type, name, label, value=None, error=None, help=None, use_label=True ):
self.inputs.append( FormInput( type, label, name, value, error, help, use_label ) )
return self
def add_checkbox( self, name, label, value=None, error=None, help=None ):
return self.add_input( 'checkbox', label, name, value, error, help )
def add_text( self, name, label, value=None, error=None, help=None ):
return self.add_input( 'text', label, name, value, error, help )
def add_password( self, name, label, value=None, error=None, help=None ):
return self.add_input( 'password', label, name, value, error, help )
def add_select( self, name, label, value=None, options=[], error=None, help=None, use_label=True ):
self.inputs.append( SelectInput( name, label, value=value, options=options, error=error, help=help, use_label=use_label ) )
return self
class FormInput( object ):
"""
Simple class describing a form input element
"""
def __init__( self, type, name, label, value=None, error=None, help=None, use_label=True, extra_attributes={}, **kwargs ):
self.type = type
self.name = name
self.label = label
self.value = value
self.error = error
self.help = help
self.use_label = use_label
self.extra_attributes = extra_attributes
class DatalistInput( FormInput ):
""" Data list input """
def __init__( self, name, *args, **kwargs ):
if 'extra_attributes' not in kwargs:
kwargs[ 'extra_attributes' ] = {}
kwargs[ 'extra_attributes' ][ 'list' ] = name
FormInput.__init__( self, None, name, *args, **kwargs )
self.options = kwargs.get( 'options', {} )
def body_html( self ):
options = "".join( [ "<option value='%s'>%s</option>" % ( key, value ) for key, value in self.options.iteritems() ] )
return """<datalist id="%s">%s</datalist>""" % ( self.name, options )
class SelectInput( FormInput ):
""" A select form input. """
def __init__( self, name, label, value=None, options=[], error=None, help=None, use_label=True ):
FormInput.__init__( self, "select", name, label, value=value, error=error, help=help, use_label=use_label )
self.options = options
class FormData( object ):
"""
Class for passing data about a form to a template, very rudimentary, could
be combined with the tool form handling to build something more general.
"""
def __init__( self ):
# TODO: galaxy's two Bunchs are defined differently. Is this right?
self.values = bunch.Bunch()
self.errors = bunch.Bunch()
| StarcoderdataPython |
1667106 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sys
'''
A basic script to graph test score quantities and calculate statistical data.
'''
def retrieve():
# Gets test scores from text file passed as command line arg
try:
with open(sys.argv[1]) as data:
return [int(x.strip()) for x in data.readlines()]
except FileNotFoundError as e:
sys.exit('\nFile was not found.\n')
except Exception as e:
sys.exit('''
Something went wrong...
Usage:
python stats.py <file>
''')
def build(values, entry):
'''
Builds a DataFrame of the statistical results. The DataFrame is output to
both the console and a results.txt file
'''
df = pd.DataFrame(pd.Series([
np.amin(values),
np.amax(values),
np.mean(values),
np.median(values),
np.var(values),
np.std(values)
], [
'Min',
'Max',
'Mean',
'Median',
'Variance',
'Standard Deviation'
]),
columns=[
entry
]
)
print("\n", df, "\n")
try:
with open('results.txt', 'w') as output:
output.write(df.to_string())
except Exception as e:
print(e)
def display(x, y, title=None):
plt.bar(x, y, label='Scores')
if title:
plt.title(title)
plt.xlabel('Score')
plt.ylabel('Quantity')
plt.legend()
plt.show()
def main():
values = retrieve()
title = input('\n\tEnter a name for these scores:\n\t')
build(values, title=title)
# x is the domain of scores, y is the quantities of each score
x = np.array([i for i in range(min(values), max(values) + 1)])
y = np.array([0 for i in range(max(values) + 1)])
# Adds up score quantities
for j in values:
y[j] += 1
display(x, y[min(values):], title)
if __name__ == '__main__':
main()
| StarcoderdataPython |
88623 | # Generated by Django 3.1.8 on 2021-06-24 15:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employees', '0011_auto_20210624_2313'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='dept_role',
new_name='department_role',
),
]
| StarcoderdataPython |
67374 | """
Module for representing PredPatt and UDS graphs
This module represents PredPatt and UDS graphs using networkx. It
incorporates the dependency parse-based graphs from the syntax module
as subgraphs.
"""
| StarcoderdataPython |
89360 | # coding:utf-8
from django.contrib.auth.models import User
from .models import WeChatUser, PhoneUser, FeedBack, StarList, BookListComment
from rest_framework.serializers import (
SerializerMethodField,
ModelSerializer,
ValidationError,
DateTimeField,
CharField,
IntegerField,
)
from bookdata.models import NoteComment
from .accounts_lib.vlidators import CheckString,PhoneValid
from rest_framework import serializers
class FeedBackDetailSerializer(ModelSerializer):
"""
用户反馈详情的序列化器
"""
class Meta:
model = FeedBack
fields = '__all__'
class FeedBackSerializer(ModelSerializer):
"""
用户反馈序列化器
"""
class Meta:
model = FeedBack
fields = [
'content'
]
class UserProfileDetailSerializer(ModelSerializer):
"""
用户基本信息序列化器
"""
have_phone = SerializerMethodField()
phone_number = SerializerMethodField()
email = SerializerMethodField()
real_name = SerializerMethodField()
message_info = SerializerMethodField()
money = SerializerMethodField()
recommend_times = SerializerMethodField()
class Meta:
model = WeChatUser
fields = [
'nickname',
'headimgurl',
'openid',
'sex',
'phone_number',
'email',
'real_name',
'have_phone',
'money',
'message_info',
'recommend_times',
]
def get_message_info(self,obj):
username = obj.openid
try:
user =User.objects.get(username=username)
phone_user = PhoneUser.objects.get(user=user)
reply = dict()
order_message = phone_user.order_message
return_message = phone_user.return_message
reply['order_msg'] = order_message
reply['return_msg'] = return_message
return reply
except:
return None
def get_phone_number(self,obj):
username = obj.openid
u = User.objects.get(username=username)
try:
p = PhoneUser.objects.get(user=u)
return p.phone_number
except:
return None
def get_email(self,obj):
username = obj.openid
u = User.objects.get(username=username)
try:
p = PhoneUser.objects.get(user=u)
return p.email
except:
return None
def get_real_name(self,obj):
username = obj.openid
u = User.objects.get(username=username)
try:
p = PhoneUser.objects.get(user=u)
return p.real_name
except:
return None
def get_have_phone(self,obj):
username = obj.openid
u = User.objects.get(username=username)
try:
p = PhoneUser.objects.get(user=u)
return 1
except:
return 0
def get_money(self,obj):
openid = obj.openid
try:
user =User.objects.get(username=openid)
phone_user = PhoneUser.objects.get(user=user)
money = phone_user.money
return money
except:
return -1
def get_recommend_times(self,obj):
openid = obj.openid
try:
user =User.objects.get(username=openid)
phone_user = PhoneUser.objects.get(user=user)
money = phone_user.recommend_times
return money
except:
return None
class PhoneUserCreateSerializer(ModelSerializer):
"""
用户手机绑定序列化器
"""
captcha = CharField(allow_null=False)
class Meta:
model = PhoneUser
fields = [
'phone_number',
'captcha'
]
def validate(self, data):
phone_number = data.get('phone_number')
captcha = data.get('captcha')
if not phone_number:
raise ValidationError('lack phone_number')
if not captcha:
raise ValidationError('lack captcha')
return data
class CheckAPISerializer(serializers.Serializer):
"""
用户信息查重 API 序列化器
"""
phone_number = serializers.CharField()
email = serializers.EmailField(allow_null=True)
def validate(self, data):
phone_number = data.get('phone_number')
if not phone_number:
serializers.ValidationError('lack phone number')
return data
class SendMessageSerializer(serializers.Serializer):
"""
发送验证码序列化器,暂时只提供注册
"""
phone_number = serializers.CharField()
def validate(self, data):
phone_number = data.get('phone_number')
if not phone_number:
serializers.ValidationError('lack phone number')
return data
class ChangeTimesSerializer(serializers.Serializer):
"""
更改推荐频率
"""
recommend_times = serializers.IntegerField()
def validate(self, data):
recommend_times = data.get('recommend_times')
if not recommend_times:
serializers.ValidationError('lack recommend_times')
return data
class AddLabelSerializer(serializers.Serializer):
"""
添加标签到首页
"""
label_name = serializers.CharField(max_length=1000)
class LabelSerializer(ModelSerializer):
"""
查看自己添加的首页分类:标签
"""
class Meta:
model = StarList
exclude = ['user_list_id', 'user', 'list_type']
class BookListCreateSerializer(serializers.Serializer):
title = serializers.CharField(max_length=1000)
comment = serializers.CharField(max_length=10000)
isbn13_list = serializers.ListField(child=IntegerField())
class BookListIdSerializer(serializers.Serializer):
list_id = serializers.CharField(max_length=200)
class CycleCommnetSerializer(serializers.Serializer):
content = serializers.CharField(max_length=1000)
class ListCommentDetailSerializer(ModelSerializer):
nickname = SerializerMethodField()
class Meta:
model = BookListComment
fields = [
'nickname',
'content',
]
def get_nickname(self, obj):
username = obj.user.username
try:
wechat_user = WeChatUser.objects.get(openid=username)
nickname = wechat_user.nickname
return nickname
except:
return '--'
class NoteCommentDetailSerializer(ModelSerializer):
nickname = SerializerMethodField()
class Meta:
model = NoteComment
fields = [
'nickname',
'content',
]
def get_nickname(self, obj):
username = obj.user.username
try:
wechat_user = WeChatUser.objects.get(openid=username)
nickname = wechat_user.nickname
return nickname
except:
return '--'
| StarcoderdataPython |
3281823 | #!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import collections
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect):
self.ngraphs = ngraphs
self.dependence = dependence.replace('_', ' ')
self.system = system
self.max_problem_size = int(max_problem_size) if max_problem_size is not None else None
self.min_problem_size = int(min_problem_size) if min_problem_size is not None else None
self.csv_dialect = csv_dialect
self.header = []
self.table = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
self.metg = collections.defaultdict(lambda: float('inf'))
def filter(self, row):
return row['ngraphs'] == self.ngraphs and row['type'] == self.dependence and (self.system is None or row['name'] == self.system)
def process(self, row, data, metg=None):
if self.system is not None:
assert metg is not None
self.metg[row['nodes']] = min(metg, self.metg[row['nodes']], key=float)
for values in zip(*list(data.values())):
items = dict(zip(data.keys(), values))
if (self.max_problem_size is None or items['iterations'] <= self.max_problem_size) and (self.min_problem_size is None or self.min_problem_size <= items['iterations']):
name = items['iterations'] if self.system is not None else row['name']
if name not in self.header:
self.header.append(name)
self.table[row['nodes']][name] = min(
items['elapsed'],
self.table[row['nodes']][name],
key=float)
def error_value(self):
return {}
def complete(self):
# FIXME: This isn't actually the criteria we'd like to sort on,
# we'd prefer to sort so that the list of names roughly parallels
# the order of the bars in the graph.
self.header.sort()
if self.system is not None:
self.header.reverse()
self.header.insert(0, 'nodes')
if self.system is not None:
self.header.append('metg')
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for nodes in sorted(self.table.keys()):
row = self.table[nodes]
row = {k: None if v == float('inf') else v for k, v in row.items()}
row['nodes'] = nodes
if self.system is not None:
row['metg'] = self.metg[nodes]
out.writerow(row)
def driver(ngraphs, dependence, system, max_problem_size, min_problem_size, machine, resource, threshold, csv_dialect, verbose):
parser = Parser(ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect)
parser.parse(machine, resource, threshold, False, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--ngraphs', type=int, required=True)
parser.add_argument('-d', '--dependence', required=True)
parser.add_argument('-s', '--system')
parser.add_argument('--max-problem-size')
parser.add_argument('--min-problem-size')
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-r', '--resource', default='flops')
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
| StarcoderdataPython |
4803701 | from matern import Matern52
from sum_kernel import SumKernel
from product_kernel import ProductKernel
from noise import Noise
from scale import Scale
from transform_kernel import TransformKernel
__all__ = ["Matern52", "SumKernel", "ProductKernel", "Noise", "Scale", "TransformKernel"]
| StarcoderdataPython |
1792910 | <filename>modules/search.py<gh_stars>0
from whoosh import qparser
def searchParsed(q, index, modelScoring, limit):
with index.searcher(closereader=False, weighting=modelScoring) as s:
return s.search(q, limit=limit)
def search(query, index, weighting, limit, wildcard):
parser = qparser.QueryParser("body", schema=index.schema, group=qparser.OrGroup)
parser.remove_plugin_class(qparser.FieldsPlugin)
if not wildcard:
parser.remove_plugin_class(qparser.WildcardPlugin)
q = parser.parse(query)
return searchParsed(q, index, weighting, limit)
| StarcoderdataPython |
3311161 | import random
noun_list = list()
noun_list.append("Doubt")
noun_list.append("Homeland")
noun_list.append("Marsh")
noun_list.append("Tilt")
noun_list.append("Piss")
noun_list.append("Meaning")
noun_list.append("Omnivore")
noun_list.append("Cloth")
noun_list.append("Vignette")
noun_list.append("Nerve")
noun_list.append("Chutney")
noun_list.append("Communication")
noun_list.append("Dinner")
noun_list.append("Lode")
noun_list.append("Perch")
noun_list.append("Permit")
noun_list.append("Reciprocity")
noun_list.append("Step ")
noun_list.append("Stimulation ")
noun_list.append("Wetsuit ")
noun_list.append("Coin ")
noun_list.append("Cook")
noun_list.append("Eggnog")
noun_list.append("Headline")
noun_list.append("Journal")
noun_list.append("Pimp")
noun_list.append("Pint")
noun_list.append("Shoemaker")
noun_list.append("Survey")
noun_list.append("Timeout")
noun_list.append("Author")
noun_list.append("Breadcrumb")
noun_list.append("Canvas")
noun_list.append("Colleague")
noun_list.append("Grandmom")
noun_list.append("Jackal")
noun_list.append("Length")
noun_list.append("Original")
noun_list.append("Spectrograph")
noun_list.append("Webinar")
noun_list.append("Assembly")
noun_list.append("Bayou")
noun_list.append("Homonym")
noun_list.append("Middle")
noun_list.append("Parenting")
noun_list.append("Pirate")
noun_list.append("Princess")
noun_list.append("Reservoir")
noun_list.append("Scholar")
noun_list.append("Steel")
noun_list.append("Silence")
noun_list.append("Fox")
noun_list.append("Owl")
noun_list.append("Firehouse")
noun_list.append("INSERT TITLE")
noun_list.append("Salesmen")
noun_list.append("Reflection")
noun_list.append("Prostitue")
noun_list.append("Skybox")
noun_list.append("Print")
noun_list.append(" salvage ")
noun_list.append(" fancy ")
noun_list.append(" damaged ")
noun_list.append(" weave ")
noun_list.append(" cars ")
noun_list.append(" hop ")
noun_list.append(" stingy ")
noun_list.append(" question ")
noun_list.append(" trees ")
noun_list.append(" complete ")
noun_list.append(" boundary ")
noun_list.append(" quarrelsome ")
noun_list.append(" sew ")
noun_list.append(" carve ")
noun_list.append(" cling ")
noun_list.append(" internal ")
noun_list.append(" callous ")
noun_list.append(" scam ")
noun_list.append(" shed ")
noun_list.append(" feel ")
noun_list.append(" literate ")
noun_list.append(" needless ")
noun_list.append(" lyrical ")
noun_list.append(" oven ")
noun_list.append(" shiver ")
noun_list.append(" quarrel ")
noun_list.append(" outstanding ")
noun_list.append(" jump ")
noun_list.append(" grandiose ")
noun_list.append(" few ")
noun_list.append(" stew ")
noun_list.append(" selfish ")
noun_list.append(" occupy ")
noun_list.append(" dump ")
noun_list.append(" simple ")
noun_list.append(" building ")
noun_list.append(" ")
noun_list.append(" ")
noun_list.append(" ")
noun_list.append(" ")
def sort_list(list):
list.strip(" ")
def get_noun(list):
# sort_list(second)
return random.choice(list) | StarcoderdataPython |
3329809 | import requests
BASE_URL = "https://internshala.com/"
| StarcoderdataPython |
178487 | <reponame>edimatt/CS2qif
import argparse
import csv
import json
import logging
import os
import re
from datetime import datetime
from io import open
import chardet
__all__ = ["QifConverter", "processCsv"]
class QifConverter:
def __init__(
self,
file_name_inp,
file_name_out,
cc=False,
start_dt=datetime(1900, 1, 1),
log_level="ERROR",
):
logging.basicConfig()
self._mlogger = logging.getLogger(__name__)
self._mlogger.setLevel(log_level)
self.__file_name_inp = file_name_inp
self.__file_name_out = file_name_out
with open(self.__file_name_inp, "rb") as inp:
self.__encoding = chardet.detect(inp.read())["encoding"] or "iso-8859-1"
self.cc = cc
self.start_dt = start_dt
self.transactions = []
self.categories = {}
_cf = os.path.join(
os.environ.get("HOME"), ".config", "cs2qif", "categories.json"
)
if os.path.isfile(_cf):
self._mlogger.info("Reading categories from %s", _cf)
with open(_cf, "r") as cat:
self.categories = json.load(cat)
else:
self._mlogger.warning("Categories file does not exist: %s.", _cf)
self.categories = {}
self._mlogger.info(
"Run parameters: %s, %s, %s, %s",
self.__file_name_inp,
self.__file_name_out,
self.cc,
self.start_dt,
)
def __enter__(self):
self._mlogger.debug("Opening " + self.__file_name_inp)
self.__file_handler_inp = open(
self.__file_name_inp, "r", encoding=self.__encoding
)
self._mlogger.debug("Opening " + self.__file_name_out)
self.__file_handler_out = open(
self.__file_name_out, "w", encoding=self.__encoding
)
return self
def __exit__(self, type, value, traceback):
self._mlogger.debug("Closing " + self.__file_name_inp)
self.__file_handler_inp.close()
self._mlogger.debug("Closing " + self.__file_name_out)
self.__file_handler_out.close()
return False
def convertCsv(self):
row_to_process = 0
account_name = None
account_type = None
for line in csv.reader(self.__file_handler_inp):
row_to_process = row_to_process + 1
CC_LINE = 4 if self.cc else 3
CC_STOP = 6 if self.cc else 5
if row_to_process == CC_LINE:
account_type = (
"CCard"
if re.search("Carta di credito", line[0], re.IGNORECASE) is not None
else "Bank"
)
account_name = line[1]
self._mlogger.info("Processing: " + account_name)
if row_to_process == CC_STOP:
break
self.__file_handler_out.write(u"!Account\n")
self.__file_handler_out.write(u"N" + account_name + "\n")
self.__file_handler_out.write(u"T" + account_type + "\n^\n")
self.__file_handler_out.write(u"!Type:" + account_type + "\n")
csv.register_dialect(
"CREDIT_SUISSE",
delimiter=",",
doublequote=True,
escapechar="\\",
quotechar='"',
lineterminator="\n",
)
csv.register_dialect(
"QIF",
delimiter="\n",
quoting=csv.QUOTE_NONE,
doublequote=True,
escapechar="",
quotechar="",
lineterminator="\n^\n",
)
csfile = csv.DictReader(self.__file_handler_inp, dialect="CREDIT_SUISSE")
qcsfile = csv.DictWriter(
self.__file_handler_out,
dialect="QIF",
fieldnames=[
"Data di registrazione",
"Addebito",
"Categoria",
"Testo",
"Payee",
],
)
data = "Data di transazione" if self.cc else "Data di registrazione"
addebito = "Addebito"
accredito = "Accredito"
if self.cc:
addebito = addebito + " CHF"
accredito = accredito + " CHF"
skipped = False
for row in csfile:
# Skip account name
if self.cc and not skipped:
skipped = True
continue
# Skip trailer
if (
row[data].lower() == "totale della colonna"
or row[data].lower() == "registrazione provv."
or row[data] == "Totale"
):
continue
# Set up registration date
try:
d = datetime.strptime(row[data], "%d.%m.%Y")
if d < self.start_dt:
continue
except ValueError:
d = datetime.now()
# Set up transaction amount
amount = "-" + row[addebito] if len(row[addebito]) != 0 else row[accredito]
# Identify categories
cat = None
payee = None
try:
text = row["Testo"]
except KeyError:
text = row["Descrizione"]
while cat is None:
for c, expr in self.categories.items():
m = re.search(expr, text, re.IGNORECASE)
if m is not None:
self._mlogger.debug("Match for %s found", m.group(0))
cat = c
payee = m.group(0)
break
if cat is None:
cat = "Other"
# Write the row
outrow = {
"Data di registrazione": "D" + d.strftime("%d-%m-%y"),
"Addebito": "T" + amount,
"Categoria": "L" + cat,
"Testo": "M" + text,
}
if payee is not None:
outrow["Payee"] = "P" + payee.upper()
self.transactions.append(outrow)
qcsfile.writerow(outrow)
self._mlogger.info("{0} transactions converted.".format(len(self.transactions)))
def processCsv(file_name_inp, file_name_out, cc, start_dt, log_level):
with QifConverter(file_name_inp, file_name_out, cc, start_dt, log_level) as qif:
qif.convertCsv()
def main():
parser = argparse.ArgumentParser(
description="Credit Suisse online banking csv to qif format."
)
parser.add_argument(
"--filein",
default="export.csv",
required=True,
help="Input csv containing the CS transactions.",
)
parser.add_argument(
"--fileout", default="export.qif", help="Output file in the QIF format."
)
parser.add_argument(
"--cc", action="store_true", help="The file to convert is a credit card."
)
parser.add_argument(
"--start_dt",
default=datetime(datetime.today().year, datetime.today().month, 1),
type=lambda s: datetime.strptime(s, "%Y%m%d"),
help="Start date for analysis. Format YYYYMMDD.",
)
parser.add_argument(
"--log-level", dest="log_level", default=logging.ERROR, help="Logging level"
)
args = parser.parse_args()
processCsv(args.filein, args.fileout, args.cc, args.start_dt, args.log_level)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1707433 | <gh_stars>0
import pytest
from rpy2 import rinterface
from rpy2.rinterface import memorymanagement
rinterface.initr()
def test_rmemory_manager():
with memorymanagement.rmemory() as rmemory:
assert rmemory.count == 0
foo = rmemory.protect(rinterface.conversion._str_to_charsxp('foo'))
assert rmemory.count == 1
del(foo)
assert rmemory.count == 0
def test_rmemory_manager_unprotect():
with memorymanagement.rmemory() as rmemory:
assert rmemory.count == 0
foo = rmemory.protect(rinterface.conversion._str_to_charsxp('foo'))
with pytest.raises(ValueError):
rmemory.unprotect(2)
rmemory.unprotect(1)
assert rmemory.count == 0
del(foo)
assert rmemory.count == 0
def test_rmemory_manager_unprotect_invalid():
with memorymanagement.rmemory() as rmemory:
assert rmemory.count == 0
with pytest.raises(ValueError):
rmemory.unprotect(2)
assert rmemory.count == 0
| StarcoderdataPython |
1718345 | # Copyright 2000 - 2015 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mimetypes import MimeTypes
from ntpath import basename
class File:
def __init__(self, connection, id):
self.connection = connection
self.id = id
self.service = "/script/1.0/file"
def list(self):
"""List all existing files in account."""
return self.connection.get(self.service)
def retrieve(self):
"""Retrieve an existing file info by id."""
if self.id is None:
raise Exception("Missing id: This API requires a monitor ID be supplied.")
return self.connection.get(self.service + "/" + self.id)
def delete(self):
"""Delete an existing file by id."""
if self.id is None:
raise Exception("Missing id: This API requires a monitor ID be supplied.")
return self.connection.delete(self.service + "/" + self.id)
def upload(self, file_path, mime_type=None):
"""Upload a new data file.
Arguments:
file_path -- Path to the file on the system making the request.
Keyword Arguments:
mime_type -- The MIME type of the file. If not specific, the client
will attempt to use the mimetypes library to guess.
"""
if mime_type == None:
mime = MimeTypes()
mime_type = mime.guess_type(file_path)[0]
file_name = basename(file_path)
file = {'file': (file_name, open(file_path, 'rb'), mime_type)}
params = {'qqfile': file_name}
return self.connection.post_multi_part(self.service, file, params=params)
| StarcoderdataPython |
1611007 | <reponame>yk/jina
from typing import Dict, Any, Type
from ..base import VersionedYAMLParser
from ....flow.base import BaseFlow
from ....helper import expand_env_var
class LegacyParser(VersionedYAMLParser):
version = 'legacy' # the version number this parser designed for
def parse(self, cls: Type['BaseFlow'], data: Dict) -> 'BaseFlow':
"""Return the Flow YAML parser given the syntax version number
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
"""
p = data.get('with', {}) # type: Dict[str, Any]
a = p.pop('args') if 'args' in p else ()
k = p.pop('kwargs') if 'kwargs' in p else {}
# maybe there are some hanging kwargs in "parameters"
tmp_a = (expand_env_var(v) for v in a)
tmp_p = {kk: expand_env_var(vv) for kk, vv in {**k, **p}.items()}
obj = cls(*tmp_a, **tmp_p)
pp = data.get('pods', {})
for pod_name, pod_attr in pp.items():
p_pod_attr = {kk: expand_env_var(vv) for kk, vv in pod_attr.items()}
if pod_name != 'gateway':
# ignore gateway when reading, it will be added during build()
obj.add(name=pod_name, **p_pod_attr, copy_flow=False)
return obj
def dump(self, data: 'BaseFlow') -> Dict:
"""Return the dictionary given a versioned flow object
:param data: versioned flow object
"""
r = {}
if data._version:
r['version'] = data._version
if data._kwargs:
r['with'] = data._kwargs
if data._pod_nodes:
r['pods'] = {}
if 'gateway' in data._pod_nodes:
# always dump gateway as the first pod, if exist
r['pods']['gateway'] = {}
for k, v in data._pod_nodes.items():
if k == 'gateway':
continue
kwargs = {'needs': list(v.needs)} if v.needs else {}
kwargs.update(v._kwargs)
if 'name' in kwargs:
kwargs.pop('name')
r['pods'][k] = kwargs
return r
| StarcoderdataPython |
1654234 | """
Script to export features and labels from database into csv file.
"""
import argparse
import os
import pandas as pd
from research_database.research_database_communication import ResearchDBConnection
def main():
parser = argparse.ArgumentParser(description='Merge feature and labels and output as csv.')
parser.add_argument('password', type=str, help='Database password.')
parser.add_argument('output_file', type=str, help='File to store output data.')
parser.add_argument('--force', action='store_true', help='Force to override output file.')
args, _ = parser.parse_known_args()
db_conn = ResearchDBConnection(args.password)
stays = db_conn.read_table_from_db('stays')
features = db_conn.read_table_from_db('features')
inputs = db_conn.read_table_from_db('inputs')
output = pivot_inputs_for_features(features, inputs)
assert output.shape[0] == stays.shape[0]
# Add year until stay lasts and label.
stays.loc[:, 'year_icu_out'] = pd.to_datetime(stays.loc[:, 'intbis'].dt.year, format="%Y")
stays = stays.set_index('id')
output = output.merge(stays[['year_icu_out', 'label']], left_index=True, right_index=True, how='inner')
assert output.shape[0] == stays.shape[0]
assert output.shape[1] - 2 == features.shape[0]
print('Write features and labels to', args.output_file)
if args.force and os.path.exists(args.output_file):
os.remove(args.output_file)
output.to_csv(args.output_file, sep=',', header=True, index=False)
def pivot_inputs_for_features(features, inputs):
features.sort_values(['name'], ascending=True, ignore_index=True, inplace=True)
text_features = features.loc[features['datacolumn'] == 'textvalue'].copy()
numeric_features = features.loc[features['datacolumn'] == 'numericvalue'].copy()
assert features.shape[0] == text_features.shape[0] + numeric_features.shape[0]
inputs['stayid'] = inputs['stayid'].astype('int')
text_outputs = inputs.loc[inputs['featurename'].isin(text_features['name']),
['stayid', 'featurename', 'textvalue']]\
.pivot(index='stayid', columns='featurename', values='textvalue')
numeric_outputs = inputs.loc[inputs['featurename'].isin(numeric_features['name']),
['stayid', 'featurename', 'numericvalue']]\
.pivot(index='stayid', columns='featurename', values='numericvalue')
assert features.shape[0] == text_outputs.shape[1] + numeric_outputs.shape[1]
return numeric_outputs.join(text_outputs, how='inner')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1704172 | <reponame>havron/horseradish
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
THREADS_PER_PAGE = 8
# General
# These will need to be set to `True` if you are developing locally
CORS = False
debug = False
# this is the secret key used by flask session management
SECRET_KEY = "<KEY>
# You should consider storing these separately from your config
HORSERADISH_TOKEN_SECRET = "<KEY>
HORSERADISH_ENCRYPTION_KEYS = "faieHLYEOndlOmTH3JiouzvnhM0rQ1u4ZFboYpIwfDI="
# Logging
LOG_LEVEL = "DEBUG"
LOG_FILE = "horseradish.log"
LOG_UPGRADE_FILE = "db_upgrade.log"
# Database
# modify this if you are not using a local database
SQLALCHEMY_DATABASE_URI = (
"postgresql://horseradish:horseradish@localhost:5432/horseradish"
)
| StarcoderdataPython |
62473 | import unicodedata
import unicodedata
chars = []
for c in range(1, 65536):
c = unichr(c)
name = unicodedata.name(c, '')
if name.startswith("FULLWIDTH") or name.startswith("HALFWIDTH"):
chars.append((name, c))
d = {}
for name, c in chars:
p = name.split()
if p[0] in ('HALFWIDTH', 'FULLWIDTH'):
name = " ".join(p[1:])
normal = full = half = None
try:
normal = unicodedata.lookup(name)
except KeyError:
pass
try:
full = unicodedata.lookup("FULLWIDTH "+name)
except KeyError:
pass
try:
half = unicodedata.lookup("HALFWIDTH "+name)
except KeyError:
pass
if normal or full or half:
d[name] = (normal, full, half)
d2 = {}
for name, (normal, full, half) in d.items():
if full:
if normal:
pair = (full, normal)
elif half:
pair = (full, half)
if half:
if normal:
pair = (normal, half)
elif full:
pair = (full, half)
try:
pair[0].encode("cp932")
pair[1].encode("cp932")
except UnicodeEncodeError:
continue
d2[name] = pair
d2['YEN SIGN'] = (u'\uffe5', u'\x5c')
l = []
for name, (full, half) in d2.items():
print "%r:%r,\t# %s" % (full, half, name)
| StarcoderdataPython |
165885 | <reponame>TheMagnat/IAR_Self-Improving-Reactive-Agents
import numpy as np
import glob
import os
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
def load(path):
allRuns = []
allNames = []
with os.scandir(path) as entries:
for entry in entries:
if entry.is_dir():
allNames.append(entry.name)
allRuns.append([])
for file in glob.glob(f"{entry.path}/*.csv"):
tab = np.genfromtxt(file, delimiter=',')
allRuns[-1].append(tab[1:])
return allNames, np.array(allRuns)
def visualize(methodsNames, methodsData, which=2):
nbPlays = methodsData[0][0].shape[0]
t = np.arange(nbPlays)
fig, ax = plt.subplots(1)
for name, method in zip(methodsNames, methodsData):
npMethod = np.array(method)[:, :, which]
mu = npMethod.mean(axis=0)
sigma = npMethod.std(axis=0)
muW = savgol_filter(mu, 11, 2)
#sigmaW = savgol_filter(sigma, 11, 2)
ax.plot(t, muW, lw=2, label=name)
ax.fill_between(t, mu+sigma, mu-sigma, alpha=0.4)
ax.legend(loc='lower right')
ax.set_xlabel('Plays')
ax.set_ylabel('Food')
ax.grid()
plt.show()
###Start
runsPath = "runs_v3"
allNames, allRuns = load(runsPath)
seuil = 12.0
print("Number of Plays with mea")
for name, run in zip(allNames, allRuns):
print(name, ":", (run[:, :, 2] > seuil).sum())
# print(allNames[0], (allRuns[0, :, 200:300, 2].mean()))
visualize(allNames, allRuns, which=2) | StarcoderdataPython |
4295 | <gh_stars>1-10
# coding: utf-8
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 3.25.0-2b3f843a-20210115-164628
"""
The administration REST API for IBM Event Streams on Cloud.
"""
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class AdminrestV1(BaseService):
"""The adminrest V1 service."""
DEFAULT_SERVICE_URL = 'https://adminrest.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'adminrest'
@classmethod
def new_instance(cls,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'AdminrestV1':
"""
Return a new client for the adminrest service using the specified
parameters and external configuration.
"""
authenticator = get_authenticator_from_environment(service_name)
service = cls(
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the adminrest service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
#########################
# default
#########################
def create_topic(self,
*,
name: str = None,
partitions: int = None,
partition_count: int = None,
configs: List['ConfigCreate'] = None,
**kwargs
) -> DetailedResponse:
"""
Create a new topic.
Create a new topic.
:param str name: (optional) The name of topic to be created.
:param int partitions: (optional) The number of partitions.
:param int partition_count: (optional) The number of partitions, this field
takes precedence over 'partitions'. Default value is 1 if not specified.
:param List[ConfigCreate] configs: (optional) The config properties to be
set for the new topic.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if configs is not None:
configs = [convert_model(x) for x in configs]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_topic')
headers.update(sdk_headers)
data = {
'name': name,
'partitions': partitions,
'partition_count': partition_count,
'configs': configs
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/topics'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def list_topics(self,
*,
topic_filter: str = None,
per_page: int = None,
page: int = None,
**kwargs
) -> DetailedResponse:
"""
Get a list of topics.
Returns a list containing information about all of the Kafka topics that are
defined for an instance of the Event Streams service. If there are currently no
topics defined then an empty list is returned.
:param str topic_filter: (optional) A filter to be applied to the topic
names. A simple filter can be specified as a string with asterisk (`*`)
wildcards representing 0 or more characters, e.g. `topic-name*` will filter
all topic names that begin with the string `topic-name` followed by any
character sequence. A more complex filter pattern can be used by
surrounding a regular expression in forward slash (`/`) delimiters, e.g.
`/topic-name.* /`.
:param int per_page: (optional) The number of topic names to be returns.
:param int page: (optional) The page number to be returned. The number 1
represents the first page. The default value is 1.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `List[TopicDetail]` result
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_topics')
headers.update(sdk_headers)
params = {
'topic_filter': topic_filter,
'per_page': per_page,
'page': page
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/topics'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_topic(self,
topic_name: str,
**kwargs
) -> DetailedResponse:
"""
Get detailed information on a topic.
Get detailed information on a topic.
:param str topic_name: The topic name for the topic to be listed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `TopicDetail` object
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_topic')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def delete_topic(self,
topic_name: str,
**kwargs
) -> DetailedResponse:
"""
Delete a topic.
Delete a topic.
:param str topic_name: The topic name for the topic to be listed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_topic')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def update_topic(self,
topic_name: str,
*,
new_total_partition_count: int = None,
configs: List['ConfigUpdate'] = None,
**kwargs
) -> DetailedResponse:
"""
Increase the number of partitions and/or update one or more topic configuration parameters.
Increase the number of partitions and/or update one or more topic configuration
parameters.
:param str topic_name: The topic name for the topic to be listed.
:param int new_total_partition_count: (optional) The new partition number
to be increased.
:param List[ConfigUpdate] configs: (optional) The config properties to be
updated for the topic. Valid config keys are 'cleanup.policy',
'retention.ms', 'retention.bytes', 'segment.bytes', 'segment.ms',
'segment.index.bytes'.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
if configs is not None:
configs = [convert_model(x) for x in configs]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_topic')
headers.update(sdk_headers)
data = {
'new_total_partition_count': new_total_partition_count,
'configs': configs
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='PATCH',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_mirroring_topic_selection(self,
**kwargs
) -> DetailedResponse:
"""
Get current topic selection for mirroring.
Get current topic selection for mirroring.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_mirroring_topic_selection')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/topic-selection'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def replace_mirroring_topic_selection(self,
*,
includes: List[str] = None,
**kwargs
) -> DetailedResponse:
"""
Replace topic selection for mirroring.
Replace topic selection for mirroring. This operation replaces the complete set of
mirroring topic selections.
:param List[str] includes: (optional)
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='replace_mirroring_topic_selection')
headers.update(sdk_headers)
data = {
'includes': includes
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/topic-selection'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_mirroring_active_topics(self,
**kwargs
) -> DetailedResponse:
"""
Get topics that are being actively mirrored.
Get topics that are being actively mirrored.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringActiveTopics` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_mirroring_active_topics')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/active-topics'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
##############################################################################
# Models
##############################################################################
class ReplicaAssignmentBrokers():
"""
ReplicaAssignmentBrokers.
:attr List[int] replicas: (optional)
"""
def __init__(self,
*,
replicas: List[int] = None) -> None:
"""
Initialize a ReplicaAssignmentBrokers object.
:param List[int] replicas: (optional)
"""
self.replicas = replicas
@classmethod
def from_dict(cls, _dict: Dict) -> 'ReplicaAssignmentBrokers':
"""Initialize a ReplicaAssignmentBrokers object from a json dictionary."""
args = {}
if 'replicas' in _dict:
args['replicas'] = _dict.get('replicas')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ReplicaAssignmentBrokers object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'replicas') and self.replicas is not None:
_dict['replicas'] = self.replicas
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ReplicaAssignmentBrokers object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ReplicaAssignmentBrokers') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ReplicaAssignmentBrokers') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCreate():
"""
ConfigCreate.
:attr str name: (optional) The name of the config property.
:attr str value: (optional) The value for a config property.
"""
def __init__(self,
*,
name: str = None,
value: str = None) -> None:
"""
Initialize a ConfigCreate object.
:param str name: (optional) The name of the config property.
:param str value: (optional) The value for a config property.
"""
self.name = name
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCreate':
"""Initialize a ConfigCreate object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCreate object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCreate object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCreate') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCreate') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigUpdate():
"""
ConfigUpdate.
:attr str name: (optional) The name of the config property.
:attr str value: (optional) The value for a config property.
:attr bool reset_to_default: (optional) When true, the value of the config
property is reset to its default value.
"""
def __init__(self,
*,
name: str = None,
value: str = None,
reset_to_default: bool = None) -> None:
"""
Initialize a ConfigUpdate object.
:param str name: (optional) The name of the config property.
:param str value: (optional) The value for a config property.
:param bool reset_to_default: (optional) When true, the value of the config
property is reset to its default value.
"""
self.name = name
self.value = value
self.reset_to_default = reset_to_default
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigUpdate':
"""Initialize a ConfigUpdate object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'value' in _dict:
args['value'] = _dict.get('value')
if 'reset_to_default' in _dict:
args['reset_to_default'] = _dict.get('reset_to_default')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigUpdate object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'reset_to_default') and self.reset_to_default is not None:
_dict['reset_to_default'] = self.reset_to_default
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigUpdate object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigUpdate') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigUpdate') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MirroringActiveTopics():
"""
Topics that are being actively mirrored.
:attr List[str] active_topics: (optional)
"""
def __init__(self,
*,
active_topics: List[str] = None) -> None:
"""
Initialize a MirroringActiveTopics object.
:param List[str] active_topics: (optional)
"""
self.active_topics = active_topics
@classmethod
def from_dict(cls, _dict: Dict) -> 'MirroringActiveTopics':
"""Initialize a MirroringActiveTopics object from a json dictionary."""
args = {}
if 'active_topics' in _dict:
args['active_topics'] = _dict.get('active_topics')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MirroringActiveTopics object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'active_topics') and self.active_topics is not None:
_dict['active_topics'] = self.active_topics
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this MirroringActiveTopics object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'MirroringActiveTopics') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'MirroringActiveTopics') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MirroringTopicSelection():
"""
Mirroring topic selection payload.
:attr List[str] includes: (optional)
"""
def __init__(self,
*,
includes: List[str] = None) -> None:
"""
Initialize a MirroringTopicSelection object.
:param List[str] includes: (optional)
"""
self.includes = includes
@classmethod
def from_dict(cls, _dict: Dict) -> 'MirroringTopicSelection':
"""Initialize a MirroringTopicSelection object from a json dictionary."""
args = {}
if 'includes' in _dict:
args['includes'] = _dict.get('includes')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MirroringTopicSelection object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'includes') and self.includes is not None:
_dict['includes'] = self.includes
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this MirroringTopicSelection object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'MirroringTopicSelection') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'MirroringTopicSelection') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ReplicaAssignment():
"""
ReplicaAssignment.
:attr int id: (optional) The ID of the partition.
:attr ReplicaAssignmentBrokers brokers: (optional)
"""
def __init__(self,
*,
id: int = None,
brokers: 'ReplicaAssignmentBrokers' = None) -> None:
"""
Initialize a ReplicaAssignment object.
:param int id: (optional) The ID of the partition.
:param ReplicaAssignmentBrokers brokers: (optional)
"""
self.id = id
self.brokers = brokers
@classmethod
def from_dict(cls, _dict: Dict) -> 'ReplicaAssignment':
"""Initialize a ReplicaAssignment object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
if 'brokers' in _dict:
args['brokers'] = ReplicaAssignmentBrokers.from_dict(_dict.get('brokers'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ReplicaAssignment object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'brokers') and self.brokers is not None:
_dict['brokers'] = self.brokers.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ReplicaAssignment object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ReplicaAssignment') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ReplicaAssignment') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TopicConfigs():
"""
TopicConfigs.
:attr str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:attr str min_insync_replicas: (optional) The value of config property
'min.insync.replicas'.
:attr str retention_bytes: (optional) The value of config property
'retention.bytes'.
:attr str retention_ms: (optional) The value of config property 'retention.ms'.
:attr str segment_bytes: (optional) The value of config property
'segment.bytes'.
:attr str segment_index_bytes: (optional) The value of config property
'segment.index.bytes'.
:attr str segment_ms: (optional) The value of config property 'segment.ms'.
"""
def __init__(self,
*,
cleanup_policy: str = None,
min_insync_replicas: str = None,
retention_bytes: str = None,
retention_ms: str = None,
segment_bytes: str = None,
segment_index_bytes: str = None,
segment_ms: str = None) -> None:
"""
Initialize a TopicConfigs object.
:param str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:param str min_insync_replicas: (optional) The value of config property
'min.insync.replicas'.
:param str retention_bytes: (optional) The value of config property
'retention.bytes'.
:param str retention_ms: (optional) The value of config property
'retention.ms'.
:param str segment_bytes: (optional) The value of config property
'segment.bytes'.
:param str segment_index_bytes: (optional) The value of config property
'segment.index.bytes'.
:param str segment_ms: (optional) The value of config property
'segment.ms'.
"""
self.cleanup_policy = cleanup_policy
self.min_insync_replicas = min_insync_replicas
self.retention_bytes = retention_bytes
self.retention_ms = retention_ms
self.segment_bytes = segment_bytes
self.segment_index_bytes = segment_index_bytes
self.segment_ms = segment_ms
@classmethod
def from_dict(cls, _dict: Dict) -> 'TopicConfigs':
"""Initialize a TopicConfigs object from a json dictionary."""
args = {}
if 'cleanup.policy' in _dict:
args['cleanup_policy'] = _dict.get('cleanup.policy')
if 'min.insync.replicas' in _dict:
args['min_insync_replicas'] = _dict.get('min.insync.replicas')
if 'retention.bytes' in _dict:
args['retention_bytes'] = _dict.get('retention.bytes')
if 'retention.ms' in _dict:
args['retention_ms'] = _dict.get('retention.ms')
if 'segment.bytes' in _dict:
args['segment_bytes'] = _dict.get('segment.bytes')
if 'segment.index.bytes' in _dict:
args['segment_index_bytes'] = _dict.get('segment.index.bytes')
if 'segment.ms' in _dict:
args['segment_ms'] = _dict.get('segment.ms')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TopicConfigs object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None:
_dict['cleanup.policy'] = self.cleanup_policy
if hasattr(self, 'min_insync_replicas') and self.min_insync_replicas is not None:
_dict['min.insync.replicas'] = self.min_insync_replicas
if hasattr(self, 'retention_bytes') and self.retention_bytes is not None:
_dict['retention.bytes'] = self.retention_bytes
if hasattr(self, 'retention_ms') and self.retention_ms is not None:
_dict['retention.ms'] = self.retention_ms
if hasattr(self, 'segment_bytes') and self.segment_bytes is not None:
_dict['segment.bytes'] = self.segment_bytes
if hasattr(self, 'segment_index_bytes') and self.segment_index_bytes is not None:
_dict['segment.index.bytes'] = self.segment_index_bytes
if hasattr(self, 'segment_ms') and self.segment_ms is not None:
_dict['segment.ms'] = self.segment_ms
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TopicConfigs object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TopicConfigs') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TopicConfigs') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TopicDetail():
"""
TopicDetail.
:attr str name: (optional) The name of the topic.
:attr int partitions: (optional) The number of partitions.
:attr int replication_factor: (optional) The number of replication factor.
:attr int retention_ms: (optional) The value of config property 'retention.ms'.
:attr str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:attr TopicConfigs configs: (optional)
:attr List[ReplicaAssignment] replica_assignments: (optional) The replia
assignment of the topic.
"""
def __init__(self,
*,
name: str = None,
partitions: int = None,
replication_factor: int = None,
retention_ms: int = None,
cleanup_policy: str = None,
configs: 'TopicConfigs' = None,
replica_assignments: List['ReplicaAssignment'] = None) -> None:
"""
Initialize a TopicDetail object.
:param str name: (optional) The name of the topic.
:param int partitions: (optional) The number of partitions.
:param int replication_factor: (optional) The number of replication factor.
:param int retention_ms: (optional) The value of config property
'retention.ms'.
:param str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:param TopicConfigs configs: (optional)
:param List[ReplicaAssignment] replica_assignments: (optional) The replia
assignment of the topic.
"""
self.name = name
self.partitions = partitions
self.replication_factor = replication_factor
self.retention_ms = retention_ms
self.cleanup_policy = cleanup_policy
self.configs = configs
self.replica_assignments = replica_assignments
@classmethod
def from_dict(cls, _dict: Dict) -> 'TopicDetail':
"""Initialize a TopicDetail object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'partitions' in _dict:
args['partitions'] = _dict.get('partitions')
if 'replicationFactor' in _dict:
args['replication_factor'] = _dict.get('replicationFactor')
if 'retentionMs' in _dict:
args['retention_ms'] = _dict.get('retentionMs')
if 'cleanupPolicy' in _dict:
args['cleanup_policy'] = _dict.get('cleanupPolicy')
if 'configs' in _dict:
args['configs'] = TopicConfigs.from_dict(_dict.get('configs'))
if 'replicaAssignments' in _dict:
args['replica_assignments'] = [ReplicaAssignment.from_dict(x) for x in _dict.get('replicaAssignments')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TopicDetail object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'partitions') and self.partitions is not None:
_dict['partitions'] = self.partitions
if hasattr(self, 'replication_factor') and self.replication_factor is not None:
_dict['replicationFactor'] = self.replication_factor
if hasattr(self, 'retention_ms') and self.retention_ms is not None:
_dict['retentionMs'] = self.retention_ms
if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None:
_dict['cleanupPolicy'] = self.cleanup_policy
if hasattr(self, 'configs') and self.configs is not None:
_dict['configs'] = self.configs.to_dict()
if hasattr(self, 'replica_assignments') and self.replica_assignments is not None:
_dict['replicaAssignments'] = [x.to_dict() for x in self.replica_assignments]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TopicDetail object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TopicDetail') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TopicDetail') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| StarcoderdataPython |
65744 | import os
import time
from parsons.etl.table import Table
from parsons.utilities.check_env import check
from slackclient import SlackClient
from slackclient.exceptions import SlackClientError
import requests
class Slack(object):
def __init__(self, api_key=None):
if api_key is None:
try:
self.api_key = os.environ["SLACK_API_TOKEN"]
except KeyError:
raise KeyError('Missing api_key. It must be passed as an '
'argument or stored as environmental variable')
else:
self.api_key = api_key
self.client = SlackClient(self.api_key)
def channels(self, fields=['id', 'name'], exclude_archived=False,
types=['public_channel']):
"""
Return a list of all channels in a Slack team.
`Args:`
fields: list
A list of the fields to return. By default, only the channel
`id` and `name` are returned. See
https://api.slack.com/methods/conversations.list for a full
list of available fields. `Notes:` nested fields are unpacked.
exclude_archived: bool
Set to `True` to exclude archived channels from the list.
Default is false.
types: list
Mix and match channel types by providing a list of any
combination of `public_channel`, `private_channel`,
`mpim` (aka group messages), or `im` (aka 1-1 messages).
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
tbl = self._paginate_request(
"conversations.list", "channels", types=types,
exclude_archived=exclude_archived)
tbl.unpack_dict("topic", include_original=False, prepend=True,
prepend_value="topic")
tbl.unpack_dict("purpose", include_original=False,
prepend=True, prepend_value="purpose")
rm_cols = [x for x in tbl.columns if x not in fields]
tbl.remove_column(*rm_cols)
return tbl
def users(self, fields=['id', 'name', 'deleted', 'profile_real_name_normalized',
'profile_email']):
"""
Return a list of all users in a Slack team.
`Args:`
fields: list
A list of the fields to return. By default, only the user
`id` and `name` and `deleted` status are returned. See
https://api.slack.com/methods/users.list for a full list of
available fields. `Notes:` nested fields are unpacked.
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
tbl = self._paginate_request("users.list", "members", include_locale=True)
tbl.unpack_dict("profile", include_original=False, prepend=True,
prepend_value="profile")
rm_cols = [x for x in tbl.columns if x not in fields]
tbl.remove_column(*rm_cols)
return tbl
@classmethod
def message(cls, channel, text, webhook=None, parent_message_id=None):
"""
Send a message to a Slack channel with a webhook instead of an api_key.
You might not have the full-access API key but still want to notify a channel
`Args:`
channel: str
The name or id of a `public_channel`, a `private_channel`, or
an `im` (aka 1-1 message).
text: str
Text of the message to send.
webhook: str
If you have a webhook url instead of an api_key
Looks like: https://<KEY>
parent_message_id: str
The `ts` value of the parent message. If used, this will thread the message.
"""
webhook = check('SLACK_API_WEBHOOK', webhook, optional=True)
payload = {'channel': channel, 'text': text}
if parent_message_id:
payload['thread_ts'] = parent_message_id
return requests.post(webhook, json=payload)
def message_channel(self, channel, text, as_user=False, parent_message_id=None):
"""
Send a message to a Slack channel
`Args:`
channel: str
The name or id of a `public_channel`, a `private_channel`, or
an `im` (aka 1-1 message).
text: str
Text of the message to send.
as_user: str
Pass true to post the message as the authenticated user,
instead of as a bot. Defaults to false. See
https://api.slack.com/methods/chat.postMessage#authorship for
more information about Slack authorship.
parent_message_id: str
The `ts` value of the parent message. If used, this will thread the message.
`Returns:`
`dict`:
A response json
"""
resp = self.client.api_call(
"chat.postMessage", channel=channel, text=text,
as_user=as_user, thread_ts=parent_message_id)
if not resp['ok']:
if resp['error'] == 'ratelimited':
time.sleep(int(resp['headers']['Retry-After']))
resp = self.client.api_call(
"chat.postMessage",
channel=channel, text=text, as_user=as_user)
raise SlackClientError(resp['error'])
return resp
def upload_file(self, channels, filename, filetype=None,
initial_comment=None, title=None, is_binary=False):
"""
Upload a file to Slack channel(s).
`Args:`
channels: list
The list of channel names or IDs where the file will be shared.
filename: str
The path to the file to be uploaded.
filetype: str
A file type identifier. If None, type will be inferred base on
file extension. This is used to determine what fields are
available for that object. See https://api.slack.com/types/file
for a list of valid types and for more information about the
file object.
initial_comment: str
The text of the message to send along with the file.
title: str
Title of the file to be uploaded.
is_binary: bool
If True, open this file in binary mode. This is needed if
uploading binary files. Defaults to False.
`Returns:`
`dict`:
A response json
"""
if filetype is None and '.' in filename:
filetype = filename.split('.')[-1]
mode = 'rb' if is_binary else 'r'
with open(filename, mode) as file_content:
resp = self.client.api_call(
"files.upload", channels=channels, file=file_content,
filetype=filetype, initial_comment=initial_comment,
title=title)
if not resp['ok']:
if resp['error'] == 'ratelimited':
time.sleep(int(resp['headers']['Retry-After']))
resp = self.client.api_call(
"files.upload", channels=channels, file=file_content,
filetype=filetype, initial_comment=initial_comment,
title=title)
raise SlackClientError(resp['error'])
return resp
def _paginate_request(self, endpoint, collection, **kwargs):
# The max object we're requesting at a time.
# This is an nternal limit to not overload slack api
LIMIT = 200
items = []
next_page = True
cursor = None
while next_page:
resp = self.client.api_call(
endpoint, cursor=cursor, limit=LIMIT, **kwargs)
if not resp['ok']:
if resp['error'] == 'ratelimited':
time.sleep(int(resp['headers']['Retry-After']))
continue
raise SlackClientError(resp['error'])
items.extend(resp[collection])
if resp["response_metadata"]["next_cursor"]:
cursor = resp["response_metadata"]["next_cursor"]
else:
next_page = False
return Table(items)
| StarcoderdataPython |
3343261 | <reponame>ncilfone/mabwiser<filename>tests/test_popularity.py
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from mabwiser.mab import LearningPolicy, NeighborhoodPolicy, _Popularity
from tests.test_base import BaseTest
class PopularityTest(BaseTest):
def test_2arm_equal_prob(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 1, 1, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [1, 1, 1, 2, 1])
exp, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 1, 1, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=False)
self.assertAlmostEqual(exp[1], 0.9948500327379373)
self.assertAlmostEqual(exp[2], 0.005149967262062828)
def test_2arm_diff_prob(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 0, 1, 0, 0, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [1, 1, 1, 2, 1])
def test_2arm_diff_prob_2(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 1, 1, 0, 0, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [1, 1, 1, 2, 1])
def test_3arm_equal_prob(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[1, 1, 1, 1, 1, 1, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [3, 2, 3, 3, 3])
def test_3arm_diff_prob(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[1, 0, 0, 1, 1, 1, 1, 0, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [3, 2, 3, 3, 3])
def test_with_context(self):
arm, mab = self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
neighborhood_policy=NeighborhoodPolicy.KNearest(),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=3,
is_predict=True)
self.assertListEqual(arm[0], [1, 1])
self.assertListEqual(arm[1], [1, 1])
self.assertListEqual(arm[2], [3, 1])
def test_zero_reward(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [1, 1, 1, 2, 1])
def test_epsilon_has_no_impact(self):
# This is super hack test to check that epsilon has no impact
# on popularity results
arms = ['Arm1', 'Arm2']
mab = _Popularity(rng=np.random.RandomState(seed=123456),
arms=arms, n_jobs=1, backend=None)
decisions = ['Arm1', 'Arm1', 'Arm2', 'Arm1']
rewards = [20, 17, 25, 9]
mab.fit(np.array(decisions), np.array(rewards))
# Original result
self.assertDictEqual({'Arm1': 0.03702697841958926, 'Arm2': 0.9629730215804108},
mab.predict_expectations())
# Hack into epsilon from underlying greedy bandit
mab = _Popularity(rng=np.random.RandomState(seed=123456),
arms=arms, n_jobs=1, backend=None)
mab.epsilon = 5
mab.fit(np.array(decisions), np.array(rewards))
# Assert epsilon change has no impact
# self.assertEqual("Arm1", mab.predict())
self.assertDictEqual({'Arm1': 0.03702697841958926, 'Arm2': 0.9629730215804108},
mab.predict_expectations())
def test_2arm_partial_fit(self):
exp, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 1, 1, 0, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=False)
# Initial probabilities
self.assertAlmostEqual(exp[1], 0.9991137956839969)
self.assertAlmostEqual(exp[2], 0.0008862043160030626)
# Partial fit
mab.partial_fit([1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1])
exp = mab.predict_expectations()
self.assertAlmostEqual(exp[1], 0.9162612769403672)
self.assertAlmostEqual(exp[2], 0.08373872305963273)
def test_fit_twice(self):
exp, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 1, 1, 0, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=False)
# Initial probabilities
self.assertAlmostEqual(exp[1], 0.9991137956839969)
self.assertAlmostEqual(exp[2], 0.0008862043160030626)
# Fit the other way around
mab.fit([2, 2, 2, 1, 1, 1], [1, 1, 1, 0, 1, 1])
exp = mab.predict_expectations()
self.assertAlmostEqual(exp[1], 0.9262956187781518)
self.assertAlmostEqual(exp[2], 0.07370438122184816)
def test_unused_arm(self):
exp, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[1, 1, 1, 0, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=False)
# Initial probabilities
self.assertAlmostEqual(exp[3], 0.0)
def test_add_arm(self):
arms, mab = self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
neighborhood_policy=NeighborhoodPolicy.Clusters(2),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
mab.add_arm(5)
self.assertTrue(5 in mab.arms)
self.assertTrue(5 in mab._imp.arms)
self.assertTrue(5 in mab._imp.lp_list[0].arms)
self.assertTrue(5 in mab._imp.lp_list[0].arm_to_expectation.keys())
def test_string_arms(self):
arms, mab = self.predict(arms=["one", "two"],
decisions=["one", "one", "one", "two", "two", "two"],
rewards=[1, 1, 1, 0, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arms, ['one', 'one', 'one', 'two', 'one'])
def test_different_seeds(self):
arm, mab = self.predict(arms=["one", "two"],
decisions=["one", "one", "one", "two", "two", "two"],
rewards=[1, 1, 1, 0, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual('one', arm)
# Same setup but change seed that prefers the other arm
arm, mab = self.predict(arms=["one", "two"],
decisions=["one", "one", "one", "two", "two", "two"],
rewards=[1, 1, 1, 0, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=12234,
num_run=1,
is_predict=True)
self.assertEqual('two', arm)
def test_numpy_rewards(self):
exp, mab = self.predict(arms=["one", "two"],
decisions=["one", "one", "one", "two", "two", "two"],
rewards=np.array([1, 1, 1, 0, 1, 1]),
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=False)
self.assertAlmostEqual(exp["one"], 0.9991137956839969)
self.assertAlmostEqual(exp["two"], 0.0008862043160030626)
def test_data_frame(self):
df = pd.DataFrame({"decisions": ["one", "one", "one", "two", "two", "two"],
"rewards": [1, 1, 1, 0, 1, 1]})
exp, mab = self.predict(arms=["one", "two"],
decisions=df["decisions"],
rewards=df["rewards"],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=1,
is_predict=False)
self.assertAlmostEqual(exp["one"], 0.9991137956839969)
self.assertAlmostEqual(exp["two"], 0.0008862043160030626)
def test_negative_rewards(self):
with self.assertRaises(ValueError):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 2, 2],
rewards=[-1, -1, 1, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=5,
is_predict=True)
self.assertEqual(arm, [2, 2, 2, 2, 2])
def test_remove_arm(self):
arms, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3],
rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=123456,
num_run=4,
is_predict=True)
mab.remove_arm(3)
self.assertTrue(3 not in mab.arms)
self.assertTrue(3 not in mab._imp.arms)
self.assertTrue(3 not in mab._imp.arm_to_expectation)
def test_warm_start(self):
_, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
seed=7,
num_run=1,
is_predict=False)
# Before warm start
self.assertEqual(mab._imp.trained_arms, [1, 2])
self.assertDictEqual(mab._imp.arm_to_expectation, {1: 1.0, 2: 0.0, 3: 0.0})
# Warm start
mab.warm_start(arm_to_features={1: [0, 1], 2: [0, 0], 3: [0, 1]}, distance_quantile=0.5)
self.assertDictEqual(mab._imp.arm_to_expectation, {1: 1.0, 2: 0.0, 3: 1.0})
def test_popularity_contexts(self):
arms, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3],
rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
contexts=[[]] * 10,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [3, 2, 3, 3, 3, 2, 2, 3, 2, 3])
arms, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3],
rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1],
learning_policy=LearningPolicy.Popularity(),
contexts=[[1, 2, 3]] * 10,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [3, 2, 3, 3, 3, 2, 2, 3, 2, 3])
| StarcoderdataPython |
3229256 | """ Advent of code 2017 day 15/1 """
from argparse import ArgumentParser
class Generator(object):
""" Value generators """
def __init__(self, factor, initial, name):
""" Constructor for the object """
self.factor = factor
self.prev_value = initial
self.name = name
def __next__(self):
""" To make it iterable """
return_value = (self.prev_value * self.factor) % 2147483647
#print(self.name, self.prev_value, return_value)
self.prev_value = return_value
return return_value
def __iter__(self):
return self
class Judge(object):
""" Judge of the generators """
def __init__(self, generators, pair_count):
self.gen_a, self.gen_b = generators
self.pair_count = pair_count
def process(self):
""" Judge the values of the generators """
matched = 0
for index in range(self.pair_count):
if (index % 1000000) == 0:
print(index)
if self.compare(next(self.gen_a), next(self.gen_b)):
matched += 1
return matched
@staticmethod
def compare(val_1, val_2):
""" Compare the lowest 16 bits of the values """
return (val_1 & 0xffff) == (val_2 & 0xffff)
def read_data(data):
""" Parse the input data """
return [int(line[line.rfind(" "):]) for line in data.split('\n')]
def solution(data):
""" Solution to the problem """
val_a, val_b = read_data(data)
gen_a = Generator(16807, val_a, 'A')
gen_b = Generator(48271, val_b, 'B')
pair_count = 40000000
return Judge([gen_a, gen_b], pair_count).process()
if __name__ == "__main__":
PARSER = ArgumentParser()
PARSER.add_argument("--input", dest='input', action='store_true')
PARSER.add_argument("--test")
ARGS = PARSER.parse_args()
if ARGS.input:
with(open('input.txt', 'r')) as input_file:
print(solution(input_file.read()))
elif ARGS.test:
print(solution(str(ARGS.test)))
else:
DEBUG = """Generator A starts with 65
Generator B starts with 8921"""
print(solution(DEBUG))
| StarcoderdataPython |
85743 | #
# schedule.py - Contains Hue 'schedule' definitions
#
import enum
import re
from . import common
class Schedule(common.Object):
"""
Represents a Hue schedule.
"""
@property
def name(self):
return self._data['name']
@property
def is_enabled(self):
return Status(self._data['status']) is Status.ENABLED
@property
def timer_time(self):
return self._data['localtime']
@property
def created_time(self):
return common.Time(self._data['created'])
@property
def start_time(self):
return common.Time(self._data['starttime'])
@property
def auto_delete(self):
return self._data['autodelete']
@property
def recycle(self):
return self._data['recycle']
@property
def command_action(self):
command = self._data['command']
# Address is /api/<username>/<resource>/<id>/<item>
# where /<item> may be omitted
match = re.match(r"/api/.*(/\w+/\d+)/?(.*)", command['address'])
full_id, item_addr = match.groups()
obj = self.bridge.get_from_full_id(full_id)
return obj.parse_action(item_addr, command['body'])
def parse_action(self, item_addr, body):
status = body.get("status")
return Action(self, body.get("localtime"),
None if status is None else Status(status))
class Status(enum.Enum):
ENABLED = "enabled"
DISABLED = "disabled"
class Action(common.Action):
def __init__(self, schedule, localtime=None, status=None):
self._schedule = schedule
self._localtime = localtime
self._status = status
@property
def address(self):
return "{}/state".format(self._sensor.full_id)
@property
def body(self):
out = {}
if self._status is not None:
out['status'] = self._status.value
if self._localtime is not None:
out['localtime'] = self._localtime
return out
def __str__(self):
actions = []
if self._status is not None:
actions.append("set status = {}".format(self._status.value))
if self._localtime is not None:
actions.append("set time = {}".format(self._localtime))
return "Schedule '{}': {}".format(self._schedule.name,
", ".join(actions))
| StarcoderdataPython |
3263777 | def sort_words(words):
# step 1: get the word list
words_list = words.split(' ')
# step 2: sort the words alphabetically
words_lower = []
for i, j in enumerate(words_list):
words_lower.append(j.lower()+str(i))
words_lower.sort()
# step 3: join the words
new = ["".join(words_list[int(i[-1])]) for i in words_lower]
return new
if __name__ == '__main__':
print(sort_words('banana ORANGE apple')) | StarcoderdataPython |
737 | numero1 = int(input("Digite o primeiro número: "))
numero2 = int(input("Digite o segundo número: "))
numero3 = int(input("Digite o terceiro número: "))
if (numero1 < numero2 and numero2 < numero3):
print("crescente")
else:
print("não está em ordem crescente") | StarcoderdataPython |
1601815 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from checksumdir import dirhash
import subprocess
PACKAGE_DIR = "packages/sensor_certification/apps/"
# Add color format codes to the given text
def colorize(text, style):
formats = {
"GOOD": "\033[92m{}\033[0m",
"BAD": "\033[91m{}\033[0m",
"HEADER": "\033[1m{}\033[0m",
"WARN": "\033[33m{}\033[0m",
}
return formats[style].format(text)
# Computes checksum on the directory present in input_file
def checksum(input_file, directory):
dir = ""
chksum = ""
file = open(input_file, "r")
lines = file.readlines()
file.close()
for line in lines:
line = ''.join(line.split())
if (line.find(directory) != -1):
dir = line[len(directory):].strip('"')
chksum = dirhash(dir, 'md5')
return chksum
return 0
# Returns platform info such as target platform, jetpack sdk version
def get_platform_info(platform, search_info):
platform = PACKAGE_DIR + platform
command = ['bash', '-c', 'source {} && env'.format(platform)]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
for line in proc.stdout:
(key, _, value) = str(line).partition("=")
if (key == search_info):
return value.strip()
return ""
# Construct and return a function that will check if a value is in a given range
def range_checker(minimum, maximum):
def check(value):
if not (value <= maximum and value >= minimum):
raise ValueError("Value {} is outside allowed range [{}, {}]".format(
value, minimum, maximum))
return check
| StarcoderdataPython |
1652724 | ################################################################################
# Copyright 2019-2020 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from . import Properties
class HardwarePredicate(Properties.Predicate):
@classmethod
def FromISA(cls, isa):
gfxArch = 'gfx'+''.join(map(str, isa))
return cls("AMDGPU", value=cls("Processor", value=gfxArch))
@classmethod
def FromHardware(cls, isa, cuCount=None):
gfxArch = 'gfx'+''.join(map(str, isa))
if cuCount == None:
return cls("AMDGPU", value=cls("Processor", value=gfxArch))
else:
return cls("AMDGPU", value=cls.And([cls("Processor", value=gfxArch),
cls("CUCount", value=cuCount)]))
def __lt__(self, other):
# Use superclass logic for TruePreds
if other.tag == 'TruePred' or self.tag == 'TruePred':
return super().__lt__(other)
# Compute unit counts are embedded as 'And' with
# 'Processor' and 'ComputeUnitCount' as children
if self.value.tag == 'And':
myAndPred = self.value
myProcPred = next(iter(x for x in myAndPred.value if x.tag == "Processor"), None)
myCUPred = next(iter(x for x in myAndPred.value if x.tag == "CUCount"), None)
myCUCount = myCUPred.value if myCUPred != None else 0
else:
myProcPred = self.value
myCUCount = 0
if other.value.tag == 'And':
otherAndPred = other.value
otherProcPred = next(iter(x for x in otherAndPred.value if x.tag == "Processor"), None)
otherCUPred = next(iter(x for x in otherAndPred.value if x.tag == "CUCount"), None)
otherCUCount = otherCUPred.value if otherCUPred != None else 0
else:
otherProcPred = other.value
otherCUCount = 0
# If CU properties are empty, then compare processor predicates
if myCUCount == otherCUCount == 0:
# Make sure that we have valid processor preds
assert myProcPred != None and otherProcPred != None, "Missing processor predicate"
assert myProcPred.tag == otherProcPred.tag == "Processor", "Invalid processor predicate"
# Downgrade to base class so that we don't recurse
myProcPred.__class__ = otherProcPred.__class__ = Properties.Predicate
return myProcPred < otherProcPred
# Higher priority given to higher CU count
return myCUCount > otherCUCount
| StarcoderdataPython |
3227207 | import argparse
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
import pandas as pd
# androzoo_latest_path = Path("latest.csv")
androzoo_latest_path = Path("/Users/kuznetsov/work/workspace/autogranted/latest.csv")
market = "play.google.com"
required_columns = ['sha256', 'dex_date', 'pkg_name', 'vercode', 'markets']
tqdm.pandas()
def query(start: datetime, end: datetime, out_file: Path):
column_idx = get_column_indices()
# df_market = pd.DataFrame(columns=required_columns)
# for chunk in tqdm(pd.read_csv(androzoo_latest_path, sep=',', usecols=column_idx, chunksize=10 ** 3)):
# df_market = df_market.append(chunk[chunk['markets'].str.contains(market)])
df = pd.read_csv(androzoo_latest_path, sep=',', usecols=column_idx)
df.fillna(0, inplace=True)
print("latest.csv loaded")
df.vercode = df.vercode.astype(int)
df['dex_date'] = pd.to_datetime(df['dex_date']) # , format='%Y-%m-%d %H:%M:%S'
df_market = df[df['markets'].str.contains(market)]
df_date = df_market[(df_market['dex_date'] >= start) & (df_market['dex_date'] < end)]
df_lastver = df_date.groupby('pkg_name').progress_apply(lambda x: x.nlargest(1, "vercode"))
df_res = df_lastver.loc[:, ('pkg_name', 'vercode', 'sha256')]
df_res.to_csv(out_file, index=False)
def get_column_indices():
with open(androzoo_latest_path) as fd:
header = fd.readline().strip()
columns = header.split(",")
return [columns.index(c) for c in required_columns]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=lambda s: datetime.strptime(s, '%Y-%m-%d'), help="start of the period Y-m-d")
parser.add_argument("--end", type=lambda s: datetime.strptime(s, '%Y-%m-%d'), help="end of the period Y-m-d")
parser.add_argument("--output", help="output csv")
args = parser.parse_args()
query(args.start, args.end, Path(args.output))
| StarcoderdataPython |
4837866 | <reponame>ruohoruotsi/pyro
from __future__ import absolute_import, division, print_function
import numbers
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import torch
from six import add_metaclass
import pyro.poutine as poutine
from pyro.distributions import Categorical, Empirical
from pyro.ops.stats import waic
from .util import site_is_subsample
class EmpiricalMarginal(Empirical):
"""
Marginal distribution over a single site (or multiple, provided they have the same
shape) from the ``TracePosterior``'s model.
.. note:: If multiple sites are specified, they must have the same tensor shape.
Samples from each site will be stacked and stored within a single tensor. See
:class:`~pyro.distributions.Empirical`. To hold the marginal distribution of sites
having different shapes, use :class:`~pyro.infer.abstract_infer.Marginals` instead.
:param TracePosterior trace_posterior: a ``TracePosterior`` instance representing
a Monte Carlo posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
"""
def __init__(self, trace_posterior, sites=None, validate_args=None):
assert isinstance(trace_posterior, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = "_RETURN"
self._num_chains = 1
self._samples_buffer = defaultdict(list)
self._weights_buffer = defaultdict(list)
self._populate_traces(trace_posterior, sites)
samples, weights = self._get_samples_and_weights()
super(EmpiricalMarginal, self).__init__(samples,
weights,
validate_args=validate_args)
def _get_samples_and_weights(self):
"""
Appends values collected in the samples/weights buffers to their
corresponding tensors.
"""
num_chains = len(self._samples_buffer)
samples_by_chain = []
weights_by_chain = []
for i in range(num_chains):
samples = torch.stack(self._samples_buffer[i], dim=0)
samples_by_chain.append(samples)
weights_dtype = samples.dtype if samples.dtype.is_floating_point else torch.float32
weights = torch.as_tensor(self._weights_buffer[i], device=samples.device, dtype=weights_dtype)
weights_by_chain.append(weights)
if len(samples_by_chain) == 1:
return samples_by_chain[0], weights_by_chain[0]
else:
return torch.stack(samples_by_chain, dim=0), torch.stack(weights_by_chain, dim=0)
def _add_sample(self, value, log_weight=None, chain_id=0):
"""
Adds a new data point to the sample. The values in successive calls to
``add`` must have the same tensor shape and size. Optionally, an
importance weight can be specified via ``log_weight`` or ``weight``
(default value of `1` is used if not specified).
:param torch.Tensor value: tensor to add to the sample.
:param torch.Tensor log_weight: log weight (optional) corresponding
to the sample.
:param int chain_id: chain id that generated the sample (optional).
Note that if this argument is provided, ``chain_id`` must lie
in ``[0, num_chains - 1]``, and there must be equal number
of samples per chain.
"""
# Apply default weight of 1.0.
if log_weight is None:
log_weight = 0.0
if self._validate_args and not isinstance(log_weight, numbers.Number) and log_weight.dim() > 0:
raise ValueError("``weight.dim() > 0``, but weight should be a scalar.")
# Append to the buffer list
self._samples_buffer[chain_id].append(value)
self._weights_buffer[chain_id].append(log_weight)
self._num_chains = max(self._num_chains, chain_id + 1)
def _populate_traces(self, trace_posterior, sites):
assert isinstance(sites, (list, str))
for tr, log_weight, chain_id in zip(trace_posterior.exec_traces,
trace_posterior.log_weights,
trace_posterior.chain_ids):
value = tr.nodes[sites]["value"] if isinstance(sites, str) else \
torch.stack([tr.nodes[site]["value"] for site in sites], 0)
self._add_sample(value, log_weight=log_weight, chain_id=chain_id)
class Marginals(object):
"""
Holds the marginal distribution over one or more sites from the ``TracePosterior``'s
model. This is a convenience container class, which can be extended by ``TracePosterior``
subclasses. e.g. for implementing diagnostics.
:param TracePosterior trace_posterior: a TracePosterior instance representing
a Monte Carlo posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
"""
def __init__(self, trace_posterior, sites=None, validate_args=None):
assert isinstance(trace_posterior, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = ["_RETURN"]
elif isinstance(sites, str):
sites = [sites]
else:
assert isinstance(sites, list)
self.sites = sites
self._marginals = OrderedDict()
self._diagnostics = OrderedDict()
self._trace_posterior = trace_posterior
self._populate_traces(trace_posterior, validate_args)
def _populate_traces(self, trace_posterior, validate):
self._marginals = {site: EmpiricalMarginal(trace_posterior, site, validate)
for site in self.sites}
def support(self, flatten=False):
"""
Gets support of this marginal distribution.
:param bool flatten: A flag to decide if we want to flatten `batch_shape`
when the marginal distribution is collected from the posterior with
``num_chains > 1``. Defaults to False.
:returns: a dict with keys are sites' names and values are sites' supports.
:rtype: :class:`OrderedDict`
"""
support = OrderedDict([(site, value.enumerate_support())
for site, value in self._marginals.items()])
if self._trace_posterior.num_chains > 1 and flatten:
for site, samples in support.items():
shape = samples.size()
flattened_shape = torch.Size((shape[0] * shape[1],)) + shape[2:]
support[site] = samples.reshape(flattened_shape)
return support
@property
def empirical(self):
"""
A dictionary of sites' names and their corresponding :class:`EmpiricalMarginal`
distribution.
:type: :class:`OrderedDict`
"""
return self._marginals
@add_metaclass(ABCMeta)
class TracePosterior(object):
"""
Abstract TracePosterior object from which posterior inference algorithms inherit.
When run, collects a bag of execution traces from the approximate posterior.
This is designed to be used by other utility classes like `EmpiricalMarginal`,
that need access to the collected execution traces.
"""
def __init__(self, num_chains=1):
self.num_chains = num_chains
self._reset()
def _reset(self):
self.log_weights = []
self.exec_traces = []
self.chain_ids = [] # chain id corresponding to the sample
self._idx_by_chain = [[] for _ in range(self.num_chains)] # indexes of samples by chain id
self._categorical = None
def marginal(self, sites=None):
"""
Generates the marginal distribution of this posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
:returns: A :class:`Marginals` class instance.
:rtype: :class:`Marginals`
"""
return Marginals(self, sites)
@abstractmethod
def _traces(self, *args, **kwargs):
"""
Abstract method implemented by classes that inherit from `TracePosterior`.
:return: Generator over ``(exec_trace, weight)`` or
``(exec_trace, weight, chain_id)``.
"""
raise NotImplementedError("Inference algorithm must implement ``_traces``.")
def __call__(self, *args, **kwargs):
# To ensure deterministic sampling in the presence of multiple chains,
# we get the index from ``idxs_by_chain`` instead of sampling from
# the marginal directly.
random_idx = self._categorical.sample().item()
chain_idx, sample_idx = random_idx % self.num_chains, random_idx // self.num_chains
sample_idx = self._idx_by_chain[chain_idx][sample_idx]
trace = self.exec_traces[sample_idx].copy()
for name in trace.observation_nodes:
trace.remove_node(name)
return trace
def run(self, *args, **kwargs):
"""
Calls `self._traces` to populate execution traces from a stochastic
Pyro model.
:param args: optional args taken by `self._traces`.
:param kwargs: optional keywords args taken by `self._traces`.
"""
self._reset()
with poutine.block():
for i, vals in enumerate(self._traces(*args, **kwargs)):
if len(vals) == 2:
chain_id = 0
tr, logit = vals
else:
tr, logit, chain_id = vals
assert chain_id < self.num_chains
self.exec_traces.append(tr)
self.log_weights.append(logit)
self.chain_ids.append(chain_id)
self._idx_by_chain[chain_id].append(i)
self._categorical = Categorical(logits=torch.tensor(self.log_weights))
return self
def information_criterion(self, pointwise=False):
"""
Computes information criterion of the model. Currently, returns only "Widely
Applicable/Watanabe-Akaike Information Criterion" (WAIC) and the corresponding
effective number of parameters.
Reference:
[1] `Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC`,
<NAME>, <NAME>, and <NAME>
:param bool pointwise: a flag to decide if we want to get a vectorized WAIC or not. When
``pointwise=False``, returns the sum.
:returns: a dictionary containing values of WAIC and its effective number of
parameters.
:rtype: :class:`OrderedDict`
"""
if not self.exec_traces:
return {}
obs_node = None
log_likelihoods = []
for trace in self.exec_traces:
obs_nodes = trace.observation_nodes
if len(obs_nodes) > 1:
raise ValueError("Infomation criterion calculation only works for models "
"with one observation node.")
if obs_node is None:
obs_node = obs_nodes[0]
elif obs_node != obs_nodes[0]:
raise ValueError("Observation node has been changed, expected {} but got {}"
.format(obs_node, obs_nodes[0]))
log_likelihoods.append(trace.nodes[obs_node]["fn"]
.log_prob(trace.nodes[obs_node]["value"]))
ll = torch.stack(log_likelihoods, dim=0)
waic_value, p_waic = waic(ll, torch.tensor(self.log_weights, device=ll.device), pointwise)
return OrderedDict([("waic", waic_value), ("p_waic", p_waic)])
class TracePredictive(TracePosterior):
"""
Generates and holds traces from the posterior predictive distribution,
given model execution traces from the approximate posterior. This is
achieved by constraining latent sites to randomly sampled parameter
values from the model execution traces and running the model forward
to generate traces with new response ("_RETURN") sites.
:param model: arbitrary Python callable containing Pyro primitives.
:param TracePosterior posterior: trace posterior instance holding samples from the model's approximate posterior.
:param int num_samples: number of samples to generate.
:param keep_sites: The sites which should be sampled from posterior distribution (default: all)
"""
def __init__(self, model, posterior, num_samples, keep_sites=None):
self.model = model
self.posterior = posterior
self.num_samples = num_samples
self.keep_sites = keep_sites
super(TracePredictive, self).__init__()
def _traces(self, *args, **kwargs):
if not self.posterior.exec_traces:
self.posterior.run(*args, **kwargs)
data_trace = poutine.trace(self.model).get_trace(*args, **kwargs)
for _ in range(self.num_samples):
model_trace = self.posterior().copy()
self._remove_dropped_nodes(model_trace)
self._adjust_to_data(model_trace, data_trace)
resampled_trace = poutine.trace(poutine.replay(self.model, model_trace)).get_trace(*args, **kwargs)
yield (resampled_trace, 0., 0)
def _remove_dropped_nodes(self, trace):
if self.keep_sites is None:
return
for name, site in list(trace.nodes.items()):
if name not in self.keep_sites:
trace.remove_node(name)
continue
def _adjust_to_data(self, trace, data_trace):
for name, site in list(trace.nodes.items()):
# Adjust subsample sites
if site_is_subsample(site):
site["fn"] = data_trace.nodes[name]["fn"]
site["value"] = data_trace.nodes[name]["value"]
# Adjust sites under conditionally independent stacks
try:
site["cond_indep_stack"] = data_trace.nodes[name]["cond_indep_stack"]
site["fn"] = data_trace.nodes[name]["fn"]
for cis in site["cond_indep_stack"]:
# Select random sub-indices to replay values under conditionally independent stacks.
# Otherwise, we assume there is an dependence of indexes between training data
# and prediction data.
batch_dim = cis.dim - site["fn"].event_dim
subidxs = torch.randint(0, site['value'].size(batch_dim), (cis.size,),
device=site["value"].device)
site["value"] = site["value"].index_select(batch_dim, subidxs)
except KeyError:
pass
def marginal(self, sites=None):
"""
Gets marginal distribution for this predictive posterior distribution.
"""
return Marginals(self, sites)
| StarcoderdataPython |
111332 | import json
import pulumi
import pulumi_aws as aws
from infrastructure.dynamodb.table import books_dynamodb_table
config = pulumi.Config()
lambda_name = config.get('lambda_name')
role = aws.iam.Role(
lambda_name,
name=lambda_name,
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com",
}
},
]
}
),
)
policy = aws.iam.Policy(
lambda_name,
name=lambda_name,
description="IAM policy for logging from a lambda and table access",
policy=books_dynamodb_table.arn.apply(
lambda arn: json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"dynamodb:UpdateItem",
"dynamodb:PutItem",
"dynamodb:GetItem",
"dynamodb:DescribeTable"
],
"Resource": arn,
"Effect": "Allow",
},
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow"
}
]
}
)
)
)
role_policy_attachment = aws.iam.RolePolicyAttachment(
lambda_name,
role=role.name,
policy_arn=policy.arn)
| StarcoderdataPython |
1612973 | <reponame>gbrault/resonance<filename>resonance/tests/test_nonlinear_systems.py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import pytest
from pandas.util.testing import assert_frame_equal
from ..nonlinear_systems import (SingleDoFNonLinearSystem,
MultiDoFNonLinearSystem)
def test_single_dof_nonlinear_system():
class SimplePendulum(SingleDoFNonLinearSystem):
pass
sys = SimplePendulum()
sys.constants['mass'] = 1.0 # kg
sys.constants['length'] = 1.0 # m
sys.constants['acc_due_to_grav'] = 9.81 # m/s**2
sys.coordinates['angle'] = np.deg2rad(15.0) # rad
sys.speeds['angle_vel'] = 0.0 # rad/s
assert list(sys.states.keys()) == ['angle', 'angle_vel']
assert list(sys.states.values()) == [np.deg2rad(15.0), 0.0]
def equations_of_motion(angle, angle_vel, length, acc_due_to_grav):
l = length
g = acc_due_to_grav
thetad = angle_vel
omegad = -g / l * np.sin(angle)
return thetad, omegad
# function that computes the right hand side of the equations of motion in
# first order form
sys.diff_eq_func = equations_of_motion
def height(angle, length):
"""The Y coordinate points in the opposite of gravity, i.e. up. The X
coordinate points to the right."""
return -length * np.cos(angle)
sys.add_measurement('height', height)
def sway(angle, length):
"""The X coordinate points to the right."""
return length * np.sin(angle)
sys.add_measurement('sway', sway)
def potential_energy(length, height, mass, acc_due_to_grav):
return mass * acc_due_to_grav * (length + height)
sys.add_measurement('potential_energy', potential_energy)
def kinetic_energy(mass, length, angle_vel):
return mass * (length * angle_vel)**2 / 2.0
sys.add_measurement('kinetic_energy', kinetic_energy)
def total_energy(kinetic_energy, potential_energy):
return kinetic_energy + potential_energy
sys.add_measurement('total_energy', total_energy)
sample_rate = 30
sys.free_response(5.0, sample_rate=sample_rate)
def plot_config(mass, length, sway, height, time, sway__hist, height__hist,
time__hist, potential_energy__hist):
fig, axes = plt.subplots(1, 2)
circle_radius = mass / 2.0 / 10.0
axes[0].set_xlim((-length - circle_radius, length + circle_radius))
axes[0].set_ylim((-length - circle_radius, length + circle_radius))
axes[0].set_title('Pendulum')
axes[0].set_aspect('equal')
xlabel = axes[0].set_xlabel('Time: {:.3f}'.format(time))
path_lines = axes[0].plot(sway__hist, height__hist, color='red')
rod_lines = axes[0].plot([0, sway], [0, height])
circle = Circle((sway, height), radius=circle_radius)
axes[0].add_patch(circle)
axes[1].set_ylim((0, 0.4))
axes[1].set_xlim((0, 5))
axes[1].set_xlabel('Time [s]')
axes[1].set_ylabel('Potential Energy [J]')
pe_lines = axes[1].plot(time__hist, potential_energy__hist)
plt.tight_layout()
return fig, circle, rod_lines, path_lines, pe_lines, xlabel
sys.config_plot_func = plot_config
def update_plot(sway, height, time, time__hist, sway__hist, height__hist,
potential_energy__hist, circle, rod_lines, path_lines,
pe_lines, xlabel):
circle.center = sway, height
rod_lines[0].set_data([0, sway], [0, height])
path_lines[0].set_data(sway__hist, height__hist)
pe_lines[0].set_data(time__hist, potential_energy__hist)
xlabel.set_text('Time: {:.3f}'.format(time))
return circle, rod_lines, path_lines, pe_lines, xlabel
sys.config_plot_update_func = update_plot
def test_sdof_trifilar_pendulum():
sys = SingleDoFNonLinearSystem()
sys.constants['m'] = 1 # kg
sys.constants['r'] = 0.3 # m
sys.constants['l'] = 0.75 # m
sys.constants['g'] = 9.81 # m/s**2
sys.constants['I'] = 0.3**2 # kg m**2
sys.coordinates['theta'] = 0.2 # rad
with pytest.raises(ValueError):
sys.coordinates['omega'] = 0.0 # rad/s
sys.speeds['omega'] = 0.0 # rad/s
def eval_rhs(theta, omega, I, m, r, l, g):
return theta
with pytest.raises(ValueError): # wrong num of return args in diff_eq_func
sys.diff_eq_func = eval_rhs
def eval_rhs(theta, omega, I, m, r, l, g):
theta_dot = omega
omega_dot = (-m*r**2*(2*g*(l**2 + 2*r**2*np.cos(theta) -
2*r**2)**4*np.sin(theta) +
2*r**4*(l**2 + 2*r**2*np.cos(theta) -
2*r**2)**(5/2)*omega**2*np.sin(theta)**3 +
r**2*(l**2 + 2*r**2*np.cos(theta) -
2*r**2)**(7/2)*omega**2*np.sin(2*theta)) /
(2*(I*(l**2 + 2*r**2*np.cos(theta) - 2*r**2) +
m*r**4*np.sin(theta)**2)*(l**2 + 2*r**2*np.cos(theta) -
2*r**2)**(7/2)))
return theta_dot, omega_dot
sys.diff_eq_func = eval_rhs
# should work with shape(m, 2n)
res = sys._ode_eval_func(np.array([[0.1, 0.2],
[0.1, 0.2],
[0.1, 0.2]]), [0.1, 0.2, 0.3])
assert res.shape == (3, 2)
# reset these
sys.coordinates['theta'] = 0.2 # rad
sys.speeds['omega'] = 0.0 # rad/s
sys._time['t'] = 0
# should work with shape(m, 2n, 1)
res = sys._ode_eval_func(np.random.random(3 * 2 * 1).reshape(3, 2, 1),
[0.1, 0.2, 0.3])
assert res.shape == (3, 2, 1)
# resest these
sys.coordinates['theta'] = 0.2 # rad
sys.speeds['omega'] = 0.0 # rad/s
sys._time['t'] = 0
traj = sys.free_response(2.0)
# NOTE : See https://github.com/moorepants/resonance/issues/128
sys.constants['l'] = 1.0 # m
traj2 = sys.free_response(2.0)
assert not traj.equals(traj2)
assert 'theta' in traj.columns
assert 'omega' in traj.columns
assert 'theta_acc' in traj.columns
desc = sys.__str__()
expected_desc = """\
System name: SingleDoFNonLinearSystem
Differential equations function defined: True
Configuration plot function defined: False
Configuration update function defined: False
Constants
=========
m = 1.00000
r = 0.30000
l = 1.00000
g = 9.81000
I = 0.09000
Coordinates
===========
theta = 0.20000
Speeds
======
omega = d(theta)/dt = 0.00000
Measurements
============
"""
assert desc == expected_desc
def test_multi_dof_nonlinear_system():
sys = MultiDoFNonLinearSystem()
sys.constants['m'] = 1.0 # kg
sys.constants['k'] = 0.5 # N/m
# NOTE : This could confuse the students because we couldn't do this with
# linear systems. You had to pass these values into the response methods.
sys.constants['Fo'] = 1.0 # N
sys.constants['w'] = 3.0 # rad/s
# NOTE : The order of declaration will define the order of the states.
sys.coordinates['x2'] = 0.2 # m
sys.coordinates['x1'] = 0.1 # m
# should be in order of entry
assert list(sys.coordinates.keys()) == ['x2', 'x1']
# TODO : How do we know which speed corresponds to which coordinate
# derivative?
sys.speeds['v1'] = 0.01 # m/s
sys.speeds['v2'] = 0.02 # m/s
# should be in order of entry
assert list(sys.speeds.keys()) == ['v1', 'v2']
assert list(sys.states.keys()) == ['x2', 'x1', 'v1', 'v2']
def rhs(x1, x2, v1, v2, m, k, Fo, w, time):
# two masses connected by springs in series sliding on frictionless
# surface with one spring attached to wall with sinusoidal forcing on
# the end spring
x1d = x1 * time
x2d = x2 * time
v1d = v1 * time
v2d = v2 * time
# NOTE : Order of output must match sys.states!
return x2d, x1d, v1d, v2d
sys.diff_eq_func = rhs
desc = sys.__str__()
expected_desc = """\
System name: MultiDoFNonLinearSystem
Differential equations function defined: True
Configuration plot function defined: False
Configuration update function defined: False
Constants
=========
m = 1.00000
k = 0.50000
Fo = 1.00000
w = 3.00000
Coordinates
===========
x2 = 0.20000
x1 = 0.10000
Speeds
======
v1 = d(x2)/dt = 0.01000
v2 = d(x1)/dt = 0.02000
Measurements
============
"""
assert desc == expected_desc
# should work with shape(2n,)
x = np.random.random(4)
t = 0.1
res = sys._ode_eval_func(x, t)
assert res.shape == (4,)
np.testing.assert_allclose(res, x * t)
sys.coordinates['x2'] = 0.2
sys.coordinates['x1'] = 0.1
sys.speeds['v1'] = 0.01
sys.speeds['v2'] = 0.02
sys._time['t'] = 0.0
# should work with shape(1, 2n)
x = np.random.random(4).reshape(1, 4)
t = 0.1
res = sys._ode_eval_func(x, t)
assert res.shape == (1, 4)
np.testing.assert_allclose(res, x * t)
sys.coordinates['x2'] = 0.2
sys.coordinates['x1'] = 0.1
sys.speeds['v1'] = 0.01
sys.speeds['v2'] = 0.02
sys._time['t'] = 0.0
# should work with shape(1, 2n, 1)
x = np.random.random(4).reshape(1, 4, 1)
t = 0.1
res = sys._ode_eval_func(x, t)
assert res.shape == (1, 4, 1)
np.testing.assert_allclose(res, x * t)
sys.coordinates['x2'] = 0.2
sys.coordinates['x1'] = 0.1
sys.speeds['v1'] = 0.01
sys.speeds['v2'] = 0.02
sys._time['t'] = 0.0
# should work with shape(m, 2n)
x = np.random.random(10 * 4).reshape(10, 4)
t = np.random.random(10)
res = sys._ode_eval_func(x, t)
assert res.shape == (10, 4)
np.testing.assert_allclose(res, x * t[:, np.newaxis])
sys.coordinates['x2'] = 0.2
sys.coordinates['x1'] = 0.1
sys.speeds['v1'] = 0.01
sys.speeds['v2'] = 0.02
sys._time['t'] = 0.0
# should work with shape(m, 2n, 1)
x = np.random.random(10 * 4).reshape(10, 4, 1)
t = np.random.random(10)
res = sys._ode_eval_func(x, t)
assert res.shape == (10, 4, 1)
np.testing.assert_allclose(res, x * t[:, np.newaxis, np.newaxis])
sys.coordinates['x2'] = 0.2
sys.coordinates['x1'] = 0.1
sys.speeds['v1'] = 0.01
sys.speeds['v2'] = 0.02
sys._time['t'] = 0.0
# NOTE : Order of args does not matter here in the function signature.
def rhs(x1, x2, v1, v2, m, k, Fo, w, time):
# two masses connected by springs in series sliding on frictionless
# surface with one spring attached to wall with sinusoidal forcing on
# the end spring
x1d = v1
x2d = v2
v1d = (-k * x1 + k * (x2 - x1)) / m
v2d = (-k * (x2 - x1) + Fo * np.cos(w * time)) / m
# NOTE : Order of output must match sys.states!
return x2d, x1d, v1d, v2d
sys.diff_eq_func = rhs
traj = sys.free_response(5.0)
for s in sys.states.keys():
assert s in traj.columns
# NOTE : The two integrators do not give the same answer, but they can be
# compared to a low precision.
traj = sys.free_response(3.0)
traj2 = sys.free_response(3.0, integrator='lsoda')
# TODO : This check it not working even at low precision. The results are
# pretty different.
#assert_frame_equal(traj, traj2, check_less_precise=1)
def test_measurements_in_diff_eq_func():
# NOTE : Tests issue 127
# https://github.com/moorepants/resonance/issues/127
sys1 = SingleDoFNonLinearSystem()
sys1.constants['m'] = 1
sys1.constants['k'] = 100
sys1.coordinates['x'] = 1
sys1.speeds['v'] = 0
def f(x, k):
return k * x
sys1.add_measurement('spring_force', f)
def rhs(x, v, m, spring_force):
xdot = v
vdot = -spring_force / m
return xdot, vdot
sys1.diff_eq_func = rhs
traj1 = sys1.free_response(2)
del traj1['spring_force']
sys2 = SingleDoFNonLinearSystem()
sys2.constants['m'] = 1
sys2.constants['k'] = 100
sys2.coordinates['x'] = 1
sys2.speeds['v'] = 0
def rhs(x, v, m, k):
xdot = v
vdot = -k * x / m
return xdot, vdot
sys2.diff_eq_func = rhs
traj2 = sys2.free_response(2)
assert_frame_equal(traj1, traj2)
| StarcoderdataPython |
1762876 | <reponame>tommbendall/peakondrake
from peakondrake import *
code = 'test_jump'
Ld = 40.
dt = 0.001
tmax = 0.01
experiment(code, Ld, tmax,
resolutions=[100, 500, 1000, 5000, 10000],
dts=dt,
sigmas=0.0,
seeds=0,
schemes='upwind',
timesteppings='midpoint',
ics='one_peak',
num_Xis=0,
Xi_family='sines',
alphasq=1.0,
c0=0.,
gamma=0.,
diagnostics=['l2_m', 'max_jump_local', 'max_du_loc', 'min_du_loc'],
fields_to_output=['uscalar', 'du'],
ndump=int(tmax / (10 * dt)),
field_ndump=int(tmax / (10 * dt)))
| StarcoderdataPython |
4828176 | <gh_stars>10-100
#from ..functions._load_features import load_features, make_windows
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.