hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f84601803d72cfa1638fcfd7b2402d63e07ffb | 95 | py | Python | src/PythonDotNet/echo_and_sleep.py | ravikhubchandani/dynamic-web-scraper | a0ae988d52756dc43d25a0eed2752000e97a7491 | [
"MIT"
] | null | null | null | src/PythonDotNet/echo_and_sleep.py | ravikhubchandani/dynamic-web-scraper | a0ae988d52756dc43d25a0eed2752000e97a7491 | [
"MIT"
] | null | null | null | src/PythonDotNet/echo_and_sleep.py | ravikhubchandani/dynamic-web-scraper | a0ae988d52756dc43d25a0eed2752000e97a7491 | [
"MIT"
] | 1 | 2021-07-22T22:54:59.000Z | 2021-07-22T22:54:59.000Z | import sys
import time
msg = sys.argv[1]
sleep = sys.argv[2]
print(msg)
time.sleep(int(sleep)) | 13.571429 | 22 | 0.715789 | import sys
import time
msg = sys.argv[1]
sleep = sys.argv[2]
print(msg)
time.sleep(int(sleep)) | true | true |
f7f846b13ce4b89c41ba206b73951ba6c3817680 | 2,159 | py | Python | bldc/odrive/drag_profiler.py | rravenel/furuta_pendulum | b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f | [
"MIT"
] | 2 | 2021-09-23T16:29:23.000Z | 2021-09-30T19:55:44.000Z | bldc/odrive/drag_profiler.py | rravenel/furuta_pendulum | b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f | [
"MIT"
] | null | null | null | bldc/odrive/drag_profiler.py | rravenel/furuta_pendulum | b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f | [
"MIT"
] | null | null | null | import odrive
import matplotlib.pyplot as plt
import time
'''
Measure internal drag/friction. Spin it up and clock it slowing down.
1465rpm / 154 rad/s / 24.4Hz: -356 rad/s^2
2197rpm / 230 rad/s / 36.6Hz: -378 rad/s^2
2930rpm / 307 rad/s / 48.8Hz: -342 rad/s^2
3663rpm / 383 rad/s / 61.0Hz: -324 rad/s^2
'''
# max is 600,000
v_target = 500000
t_sample = 5
still_count = 20
c2rad = 1303.8
t_cut = 1.05
v_sample = 25
print("Connecting...")
d = odrive.find_any()
print("Connected")
x = d.axis0
x.controller.config.control_mode = 1
x.controller.current_setpoint = 0
def test():
x.controller.config.control_mode = 2
x.controller.vel_setpoint = v_target
time.sleep(0.5)
x.controller.config.control_mode = 1
x.controller.current_setpoint = 0
v_last = 0
t_start = time.time()
t_last = t_start
now = t_start
zero_v_count = 0
while now - t_start < t_sample:
if zero_v_count >= still_count:
break
v = x.encoder.vel_estimate
v = v / c2rad
now = time.time()
dv = v - v_last
dt = now - t_last
a = dv/dt
buf_t.append(now)
buf_v.append(v)
buf_a.append(a)
v_last = v
t_last = now
if 0 == int(v):
zero_v_count += 1
else:
zero_v_count = 0
buf_t = []
buf_v = []
buf_a = []
count = 1
for i in range(count):
test()
# throw out first sample from v = 0
buf_t = buf_t[1:]
buf_v = buf_v[1:]
buf_a = buf_a[1:]
data = []
drag_map_v = []
drag_map = []
buf_seg = []
t_start = buf_t[0]
for i in range(len(buf_t)):
t = buf_t[i] - t_start
v = buf_v[i]
a = int(buf_a[i])
print("#%d:\tt: %fs\tv: %frad/s\ta: %drad/s2" % (i, t, v, a))
if t > 0.05 and t < t_cut:
data.append(a)
buf_seg.append(a)
if i > 0 and 0 == i%v_sample:
v_diff = buf_v[i-10] - v
drag_map_v.append(v + v_diff/2)
drag_map.append(sum(buf_seg)/len(buf_seg))
buf_seg = []
print("\tv: %f\td: %f" % (drag_map_v[-1], drag_map[-1]))
# alter for rendering
buf_t[i] = t
buf_v[i] = 25 * v
drag = sum(data) / len(data)
print("Acceleration due to drag: %frad/s2" % (drag))
#plt.plot(buf_t, len(buf_t) * [0])
#plt.plot(buf_t, buf_a)
#plt.plot(buf_t, buf_v)
plt.plot(drag_map_v, len(drag_map) * [0])
plt.plot(drag_map_v, drag_map)
plt.show()
| 18.29661 | 70 | 0.64428 | import odrive
import matplotlib.pyplot as plt
import time
v_target = 500000
t_sample = 5
still_count = 20
c2rad = 1303.8
t_cut = 1.05
v_sample = 25
print("Connecting...")
d = odrive.find_any()
print("Connected")
x = d.axis0
x.controller.config.control_mode = 1
x.controller.current_setpoint = 0
def test():
x.controller.config.control_mode = 2
x.controller.vel_setpoint = v_target
time.sleep(0.5)
x.controller.config.control_mode = 1
x.controller.current_setpoint = 0
v_last = 0
t_start = time.time()
t_last = t_start
now = t_start
zero_v_count = 0
while now - t_start < t_sample:
if zero_v_count >= still_count:
break
v = x.encoder.vel_estimate
v = v / c2rad
now = time.time()
dv = v - v_last
dt = now - t_last
a = dv/dt
buf_t.append(now)
buf_v.append(v)
buf_a.append(a)
v_last = v
t_last = now
if 0 == int(v):
zero_v_count += 1
else:
zero_v_count = 0
buf_t = []
buf_v = []
buf_a = []
count = 1
for i in range(count):
test()
buf_t = buf_t[1:]
buf_v = buf_v[1:]
buf_a = buf_a[1:]
data = []
drag_map_v = []
drag_map = []
buf_seg = []
t_start = buf_t[0]
for i in range(len(buf_t)):
t = buf_t[i] - t_start
v = buf_v[i]
a = int(buf_a[i])
print("#%d:\tt: %fs\tv: %frad/s\ta: %drad/s2" % (i, t, v, a))
if t > 0.05 and t < t_cut:
data.append(a)
buf_seg.append(a)
if i > 0 and 0 == i%v_sample:
v_diff = buf_v[i-10] - v
drag_map_v.append(v + v_diff/2)
drag_map.append(sum(buf_seg)/len(buf_seg))
buf_seg = []
print("\tv: %f\td: %f" % (drag_map_v[-1], drag_map[-1]))
buf_t[i] = t
buf_v[i] = 25 * v
drag = sum(data) / len(data)
print("Acceleration due to drag: %frad/s2" % (drag))
plt.plot(drag_map_v, len(drag_map) * [0])
plt.plot(drag_map_v, drag_map)
plt.show()
| true | true |
f7f847160404de669c8660bda4ba3cd69e81f082 | 5,560 | py | Python | tensorpack/train/input_data.py | arassadin/SYQ | d30e6f0053ada3ad504038698a8756425594aa22 | [
"Apache-2.0"
] | 1 | 2018-03-23T16:26:23.000Z | 2018-03-23T16:26:23.000Z | tensorpack/train/input_data.py | andrewliao11/Andrew_tensorpack | 735a2672e3d93b5b612a303b5b6d222e9b2d4280 | [
"Apache-2.0"
] | 1 | 2019-03-10T16:32:12.000Z | 2019-03-11T11:08:52.000Z | tensorpack/train/input_data.py | arassadin/SYQ | d30e6f0053ada3ad504038698a8756425594aa22 | [
"Apache-2.0"
] | 2 | 2019-01-18T15:10:51.000Z | 2019-02-20T17:04:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: input_data.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import threading
from abc import ABCMeta, abstractmethod
import six
from ..dataflow import DataFlow, RepeatedData
from ..tfutils.summary import add_moving_summary
from ..utils import logger
from ..callbacks.concurrency import StartProcOrThread
__all__ = ['QueueInput', 'FeedfreeInput', 'TensorInput',
'DummyConstantInput']
@six.add_metaclass(ABCMeta)
class InputData(object):
pass
class FeedInput(InputData):
def __init__(self, ds):
assert isinstance(ds, DataFlow), ds
self.ds = ds
def size(self):
return self.ds.size()
def _setup(self, trainer):
self.input_vars = trainer.model.get_input_vars()
rds = RepeatedData(self.ds, -1)
rds.reset_state()
self.data_producer = rds.get_data()
def next_feed(self):
data = next(self.data_producer)
feed = dict(zip(self.input_vars, data))
return feed
class FeedfreeInput(InputData):
def get_input_tensors(self):
return self._get_input_tensors()
@abstractmethod
def _get_input_tensors(self):
"""
always create and return a list of new input tensors
"""
class EnqueueThread(threading.Thread):
def __init__(self, trainer, queue, ds, input_placehdrs):
super(EnqueueThread, self).__init__()
self.name = 'EnqueueThread'
self.daemon = True
self.dataflow = ds
self.queue = queue
self.sess = trainer.sess
self.coord = trainer.coord
self.placehdrs = input_placehdrs
self.op = self.queue.enqueue(self.placehdrs)
self.close_op = self.queue.close(cancel_pending_enqueues=True)
self.size_op = self.queue.size()
add_moving_summary(tf.cast(
self.size_op, tf.float32, name='input_queue_size'))
def run(self):
self.dataflow.reset_state()
with self.sess.as_default():
try:
while True:
for dp in self.dataflow.get_data():
if self.coord.should_stop():
return
feed = dict(zip(self.placehdrs, dp))
#print 'qsize:', self.sess.run([self.op, self.size_op], feed_dict=feed)[1]
self.op.run(feed_dict=feed)
except tf.errors.CancelledError as e:
pass
except Exception:
logger.exception("Exception in EnqueueThread:")
finally:
self.coord.request_stop()
try:
self.sess.run(self.close_op)
except RuntimeError: # session already closed
pass
logger.info("Enqueue Thread Exited.")
class QueueInput(FeedfreeInput):
def __init__(self, ds, queue=None):
"""
:param ds: a `DataFlow` instance
:param queue: a `tf.QueueBase` instance to be used to buffer datapoints.
Defaults to a FIFO queue of size 50.
"""
assert isinstance(ds, DataFlow), ds
self.queue = queue
self.ds = ds
def size(self):
return self.ds.size()
def _setup(self, trainer):
self.input_placehdrs = trainer.model.get_input_vars()
assert len(self.input_placehdrs) > 0, \
"QueueInput can only be used with input placeholders!"
if self.queue is None:
self.queue = tf.FIFOQueue(
50, [x.dtype for x in self.input_placehdrs],
name='input_queue')
self.thread = EnqueueThread(
trainer, self.queue, self.ds, self.input_placehdrs)
trainer.config.callbacks.append(StartProcOrThread(self.thread))
def _get_input_tensors(self):
ret = self.queue.dequeue(name='input_deque')
if isinstance(ret, tf.Tensor): # only one input
ret = [ret]
assert len(ret) == len(self.input_placehdrs)
for qv, v in zip(ret, self.input_placehdrs):
qv.set_shape(v.get_shape())
# test the overhead of queue
#with tf.device('/gpu:0'):
#ret = [tf.Variable(tf.random_normal([128,224,224,3],
#dtype=tf.float32), trainable=False),
#tf.Variable(tf.ones([128], dtype=tf.int32), trainable=False)]
return ret
class DummyConstantInput(QueueInput):
""" only for debugging performance issues """
def __init__(self, ds, shapes):
super(DummyConstantInput, self).__init__(ds)
self.shapes = shapes
logger.warn("Using dummy input for debug!")
def _get_input_tensors(self):
placehdrs = self.input_placehdrs
assert len(self.shapes) == len(placehdrs)
ret = []
for idx, p in enumerate(placehdrs):
with tf.device('/gpu:0'):
ret.append(tf.get_variable('dummy-' + p.op.name,
shape=self.shapes[idx], dtype=p.dtype, trainable=False,
initializer=tf.constant_initializer()))
return ret
class TensorInput(FeedfreeInput):
def __init__(self, get_tensor_fn, size=None):
self.get_tensor_fn = get_tensor_fn
self._size = size
def size(self):
if self._size is None:
raise ValueError("size of TensorInput is undefined!")
return self._size
def _setup(self, trainer):
pass
def _get_input_tensors(self):
return self.get_tensor_fn()
| 33.095238 | 98 | 0.60054 |
import tensorflow as tf
import threading
from abc import ABCMeta, abstractmethod
import six
from ..dataflow import DataFlow, RepeatedData
from ..tfutils.summary import add_moving_summary
from ..utils import logger
from ..callbacks.concurrency import StartProcOrThread
__all__ = ['QueueInput', 'FeedfreeInput', 'TensorInput',
'DummyConstantInput']
@six.add_metaclass(ABCMeta)
class InputData(object):
pass
class FeedInput(InputData):
def __init__(self, ds):
assert isinstance(ds, DataFlow), ds
self.ds = ds
def size(self):
return self.ds.size()
def _setup(self, trainer):
self.input_vars = trainer.model.get_input_vars()
rds = RepeatedData(self.ds, -1)
rds.reset_state()
self.data_producer = rds.get_data()
def next_feed(self):
data = next(self.data_producer)
feed = dict(zip(self.input_vars, data))
return feed
class FeedfreeInput(InputData):
def get_input_tensors(self):
return self._get_input_tensors()
@abstractmethod
def _get_input_tensors(self):
class EnqueueThread(threading.Thread):
def __init__(self, trainer, queue, ds, input_placehdrs):
super(EnqueueThread, self).__init__()
self.name = 'EnqueueThread'
self.daemon = True
self.dataflow = ds
self.queue = queue
self.sess = trainer.sess
self.coord = trainer.coord
self.placehdrs = input_placehdrs
self.op = self.queue.enqueue(self.placehdrs)
self.close_op = self.queue.close(cancel_pending_enqueues=True)
self.size_op = self.queue.size()
add_moving_summary(tf.cast(
self.size_op, tf.float32, name='input_queue_size'))
def run(self):
self.dataflow.reset_state()
with self.sess.as_default():
try:
while True:
for dp in self.dataflow.get_data():
if self.coord.should_stop():
return
feed = dict(zip(self.placehdrs, dp))
self.op.run(feed_dict=feed)
except tf.errors.CancelledError as e:
pass
except Exception:
logger.exception("Exception in EnqueueThread:")
finally:
self.coord.request_stop()
try:
self.sess.run(self.close_op)
except RuntimeError:
pass
logger.info("Enqueue Thread Exited.")
class QueueInput(FeedfreeInput):
def __init__(self, ds, queue=None):
assert isinstance(ds, DataFlow), ds
self.queue = queue
self.ds = ds
def size(self):
return self.ds.size()
def _setup(self, trainer):
self.input_placehdrs = trainer.model.get_input_vars()
assert len(self.input_placehdrs) > 0, \
"QueueInput can only be used with input placeholders!"
if self.queue is None:
self.queue = tf.FIFOQueue(
50, [x.dtype for x in self.input_placehdrs],
name='input_queue')
self.thread = EnqueueThread(
trainer, self.queue, self.ds, self.input_placehdrs)
trainer.config.callbacks.append(StartProcOrThread(self.thread))
def _get_input_tensors(self):
ret = self.queue.dequeue(name='input_deque')
if isinstance(ret, tf.Tensor):
ret = [ret]
assert len(ret) == len(self.input_placehdrs)
for qv, v in zip(ret, self.input_placehdrs):
qv.set_shape(v.get_shape())
return ret
class DummyConstantInput(QueueInput):
def __init__(self, ds, shapes):
super(DummyConstantInput, self).__init__(ds)
self.shapes = shapes
logger.warn("Using dummy input for debug!")
def _get_input_tensors(self):
placehdrs = self.input_placehdrs
assert len(self.shapes) == len(placehdrs)
ret = []
for idx, p in enumerate(placehdrs):
with tf.device('/gpu:0'):
ret.append(tf.get_variable('dummy-' + p.op.name,
shape=self.shapes[idx], dtype=p.dtype, trainable=False,
initializer=tf.constant_initializer()))
return ret
class TensorInput(FeedfreeInput):
def __init__(self, get_tensor_fn, size=None):
self.get_tensor_fn = get_tensor_fn
self._size = size
def size(self):
if self._size is None:
raise ValueError("size of TensorInput is undefined!")
return self._size
def _setup(self, trainer):
pass
def _get_input_tensors(self):
return self.get_tensor_fn()
| true | true |
f7f847432998927e6c55b2038310d7fb3624be7d | 23,244 | py | Python | tests/annotations/tests.py | shinshin86/django | 5cc81cd9eb69f5f7a711412c02039b435c393135 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-05-13T10:40:59.000Z | 2019-05-13T10:40:59.000Z | tests/annotations/tests.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-09-27T16:40:34.000Z | 2019-09-27T16:40:34.000Z | tests/annotations/tests.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-04T08:47:02.000Z | 2020-11-04T08:47:02.000Z | import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, NullBooleanField, Q, Sum, Value,
)
from django.db.models.functions import Length, Lower
from django.test import TestCase, skipUnlessDBFeature
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_empty_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual({p.last_name for p in people}, {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
lengths = Employee.objects.annotate(
name_len=Length('first_name'),
).distinct('name_len').values_list('name_len', flat=True)
self.assertSequenceEqual(lengths, [3, 7, 8])
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_decimal_annotation(self):
salary = Decimal(10) ** -Employee._meta.get_field('salary').decimal_places
Employee.objects.create(
first_name='Max',
last_name='Paine',
store=Store.objects.first(),
age=23,
salary=salary,
)
self.assertEqual(
Employee.objects.annotate(new_salary=F('salary') / 10).get().new_salary,
salary / 10,
)
def test_filter_decimal_annotation(self):
qs = Book.objects.annotate(new_price=F('price') + 1).filter(new_price=Decimal(31)).values_list('new_price')
self.assertEqual(qs.get(), (Decimal(31),))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name'),
).filter(
name='Practical Django Projects',
).order_by('store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_values_with_pk_annotation(self):
# annotate references a field in values() with pk
publishers = Publisher.objects.values('id', 'book__rating').annotate(total=Sum('book__rating'))
for publisher in publishers.filter(pk=self.p1.pk):
self.assertEqual(publisher['book__rating'], publisher['total'])
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Columns are aligned in the correct order for resolve_columns. This test
will fail on MySQL if column ordering is out. Column fields should be
aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE',
)
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
def test_boolean_value_annotation(self):
books = Book.objects.annotate(
is_book=Value(True, output_field=BooleanField()),
is_pony=Value(False, output_field=BooleanField()),
is_none=Value(None, output_field=BooleanField(null=True)),
is_none_old=Value(None, output_field=NullBooleanField()),
)
self.assertGreater(len(books), 0)
for book in books:
self.assertIs(book.is_book, True)
self.assertIs(book.is_pony, False)
self.assertIsNone(book.is_none)
self.assertIsNone(book.is_none_old)
def test_annotation_in_f_grouped_by_annotation(self):
qs = (
Publisher.objects.annotate(multiplier=Value(3))
# group by option => sum of value * multiplier
.values('name')
.annotate(multiplied_value_sum=Sum(F('multiplier') * F('num_awards')))
.order_by()
)
self.assertCountEqual(
qs, [
{'multiplied_value_sum': 9, 'name': 'Apress'},
{'multiplied_value_sum': 0, 'name': "Jonno's House of Books"},
{'multiplied_value_sum': 27, 'name': 'Morgan Kaufmann'},
{'multiplied_value_sum': 21, 'name': 'Prentice Hall'},
{'multiplied_value_sum': 3, 'name': 'Sams'},
]
)
def test_arguments_must_be_expressions(self):
msg = 'QuerySet.annotate() received non-expression(s): %s.'
with self.assertRaisesMessage(TypeError, msg % BooleanField()):
Book.objects.annotate(BooleanField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.annotate(is_book=True)
with self.assertRaisesMessage(TypeError, msg % ', '.join([str(BooleanField()), 'True'])):
Book.objects.annotate(BooleanField(), Value(False), is_book=True)
| 41.507143 | 115 | 0.606307 | import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, NullBooleanField, Q, Sum, Value,
)
from django.db.models.functions import Length, Lower
from django.test import TestCase, skipUnlessDBFeature
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_empty_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual({p.last_name for p in people}, {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
lengths = Employee.objects.annotate(
name_len=Length('first_name'),
).distinct('name_len').values_list('name_len', flat=True)
self.assertSequenceEqual(lengths, [3, 7, 8])
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_decimal_annotation(self):
salary = Decimal(10) ** -Employee._meta.get_field('salary').decimal_places
Employee.objects.create(
first_name='Max',
last_name='Paine',
store=Store.objects.first(),
age=23,
salary=salary,
)
self.assertEqual(
Employee.objects.annotate(new_salary=F('salary') / 10).get().new_salary,
salary / 10,
)
def test_filter_decimal_annotation(self):
qs = Book.objects.annotate(new_price=F('price') + 1).filter(new_price=Decimal(31)).values_list('new_price')
self.assertEqual(qs.get(), (Decimal(31),))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name'),
).filter(
name='Practical Django Projects',
).order_by('store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_values_with_pk_annotation(self):
# annotate references a field in values() with pk
publishers = Publisher.objects.values('id', 'book__rating').annotate(total=Sum('book__rating'))
for publisher in publishers.filter(pk=self.p1.pk):
self.assertEqual(publisher['book__rating'], publisher['total'])
def test_defer_annotation(self):
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE',
)
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
def test_boolean_value_annotation(self):
books = Book.objects.annotate(
is_book=Value(True, output_field=BooleanField()),
is_pony=Value(False, output_field=BooleanField()),
is_none=Value(None, output_field=BooleanField(null=True)),
is_none_old=Value(None, output_field=NullBooleanField()),
)
self.assertGreater(len(books), 0)
for book in books:
self.assertIs(book.is_book, True)
self.assertIs(book.is_pony, False)
self.assertIsNone(book.is_none)
self.assertIsNone(book.is_none_old)
def test_annotation_in_f_grouped_by_annotation(self):
qs = (
Publisher.objects.annotate(multiplier=Value(3))
# group by option => sum of value * multiplier
.values('name')
.annotate(multiplied_value_sum=Sum(F('multiplier') * F('num_awards')))
.order_by()
)
self.assertCountEqual(
qs, [
{'multiplied_value_sum': 9, 'name': 'Apress'},
{'multiplied_value_sum': 0, 'name': "Jonno's House of Books"},
{'multiplied_value_sum': 27, 'name': 'Morgan Kaufmann'},
{'multiplied_value_sum': 21, 'name': 'Prentice Hall'},
{'multiplied_value_sum': 3, 'name': 'Sams'},
]
)
def test_arguments_must_be_expressions(self):
msg = 'QuerySet.annotate() received non-expression(s): %s.'
with self.assertRaisesMessage(TypeError, msg % BooleanField()):
Book.objects.annotate(BooleanField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.annotate(is_book=True)
with self.assertRaisesMessage(TypeError, msg % ', '.join([str(BooleanField()), 'True'])):
Book.objects.annotate(BooleanField(), Value(False), is_book=True)
| true | true |
f7f8475cb28bcec67b527f653609827444f4b7f3 | 16,077 | py | Python | swagger_to/swagger.py | koji8y/swagger-to | 8c9201a71220b183aa55b10d61ec322008633f58 | [
"MIT"
] | null | null | null | swagger_to/swagger.py | koji8y/swagger-to | 8c9201a71220b183aa55b10d61ec322008633f58 | [
"MIT"
] | null | null | null | swagger_to/swagger.py | koji8y/swagger-to | 8c9201a71220b183aa55b10d61ec322008633f58 | [
"MIT"
] | null | null | null | """Parse Swagger spec."""
import collections
from enum import Enum
import pathlib
from typing import List, Optional, MutableMapping, Any, Set, Tuple, Union # pylint: disable=unused-import
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
# pylint: disable=missing-docstring,too-many-instance-attributes,too-many-locals,too-many-ancestors,too-many-branches
class OptionKey(Enum):
"""Key names for parse options."""
PermitAbsenseOfOperationId = "PermitAbsenseOfOperationId"
PermitTypeWithoutOptionalFormat = "PermitTypeWithoutOptionalFormat"
PermitAbsenseOfTagNameIfNoTagsExist = "PermitAbsenseOfTagNameIfNoTagsExist"
parse_options: Set[OptionKey] = set(
(OptionKey.PermitAbsenseOfOperationId,
OptionKey.PermitTypeWithoutOptionalFormat,
OptionKey.PermitAbsenseOfTagNameIfNoTagsExist))
class RawDict:
"""Represent a raw dictionary from a Swagger spec file."""
def __init__(self, adict: MutableMapping[str, Any] = collections.OrderedDict(), source: str = '',
lineno: int = 0) -> None:
"""Initialize with the given values."""
self.adict = adict
self.source = source
self.lineno = lineno
class Typedef:
"""Represent a type definition in a Swagger spec."""
def __init__(self):
"""Initialize with defaults."""
self.ref = ''
self.description = ''
self.type = ''
self.format = ''
self.pattern = ''
self.properties = collections.OrderedDict() # type: MutableMapping[str, Typedef]
self.required = [] # type: List[str]
self.items = None # type: Optional[Typedef]
self.additional_properties = None # type: Optional[Typedef]
self.additional_properties_in_bool = None # type: Optional[bool]
self.__lineno__ = 0
# original specification dictionary, if available; not deep-copied, do not modify
self.raw_dict = None # type: RawDict
class Definition:
"""Represent an identifiable data type from the Swagger spec."""
def __init__(self):
"""Initialize with defaults."""
self.identifier = ''
self.typedef = None # type: Optional[Typedef]
self.swagger = None # type: Optional[Swagger]
# original specification dictionary, if available; not deep-copied, do not modify
self.raw_dict = None # type: Optional[RawDict]
class Parameter:
"""Represent a parameer of a method in Swagger spec."""
def __init__(self):
"""Initialize with defaults."""
self.method = None # type: Optional[Method]
self.name = ''
self.in_what = ''
self.description = ''
self.required = False
self.type = ''
self.format = ''
self.pattern = ''
self.schema = None # type: Optional[Typedef]
self.default_value = None # type: Optional[str]
self.ref = ''
self.__lineno__ = 0
# original specification dictionary, if available; not deep-copied, do not modify
self.raw_dict = None # type: Optional[RawDict]
class Response:
"""Represent an endpoint response in Swagger spec."""
def __init__(self):
"""Initialize with defaults."""
self.code = ''
self.description = ''
self.schema = None # type: Optional[Typedef]
self.type = ''
self.format = ''
self.pattern = ''
self.__lineno__ = 0
# original specification dictionary, if available; not deep-copied, do not modify
self.raw_dict = None # type: Optional[RawDict]
class Method:
"""Represent an endpoint method in Swagger spec."""
def __init__(self):
"""Initialize with defaults."""
self.identifier = ''
self.operation_id = ''
self.tags = [] # type: List[str]
self.description = ''
self.parameters = [] # type: List[Parameter]
self.responses = collections.OrderedDict() # type: MutableMapping[str, Response]
self.path = None # type: Optional[Path]
self.produces = [] # type: List[str]
self.consumes = [] # type: List[str]
self.x_swagger_to_skip = False
self.__lineno__ = 0
# original specification dictionary, if available; not deep-copied, do not modify
self.raw_dict = None # type: Optional[RawDict]
class Path:
"""Represent an endpoint path in Swagger spec."""
def __init__(self):
"""Initialize with defaults."""
self.identifier = ''
self.methods = [] # type: List[Method]
self.swagger = None # type: Optional[Swagger]
self.__lineno__ = 0
# original specification dictionary, if available; not deep-copied, do not modify
self.raw_dict = None # type: Optional[RawDict]
class Swagger:
"""Represent a parsed Swagger specification."""
def __init__(self):
"""Initialize with defaults."""
self.name = ""
self.base_path = ""
self.description = ""
self.paths = collections.OrderedDict() # type: MutableMapping[str, Path]
self.definitions = collections.OrderedDict() # type: MutableMapping[str, Definition]
self.parameters = collections.OrderedDict() # type: MutableMapping[str, Parameter]
self.raw_dict = None # type: Optional[RawDict]
def _parse_typedef(raw_dict: RawDict) -> Tuple[Typedef, List[str]]:
"""
Parse the type definition from the raw dictionary in the Swagger spec.
:param raw_dict: raw dictionary of the Swagger spec
:return: (parsed type definition, parsing errors if any)
"""
adict = raw_dict.adict
typedef = Typedef()
typedef.ref = adict.get('$ref', '')
typedef.description = adict.get('description', '').strip()
typedef.type = adict.get('type', '')
typedef.format = adict.get('format', '')
typedef.pattern = adict.get('pattern', '')
typedef.__lineno__ = raw_dict.lineno
errors = [] # type: List[str]
for prop_name, prop_dict in adict.get('properties', RawDict()).adict.items():
prop_typedef, prop_errors = _parse_typedef(raw_dict=prop_dict)
errors.extend(['in property {!r}: {}'.format(prop_name, error) for error in prop_errors])
typedef.properties[prop_name] = prop_typedef
typedef.required = adict.get('required', [])
# check that all the required are well-defined
for prop_name in typedef.required:
if prop_name not in typedef.properties:
errors.append("required property not defined: {!r}".format(prop_name))
if 'additionalProperties' in adict:
add_prop_dict = adict['additionalProperties']
if isinstance(add_prop_dict, bool):
typedef.additional_properties_in_bool = add_prop_dict
else:
add_prop_typedef, add_prop_errors = _parse_typedef(raw_dict=add_prop_dict)
errors.extend(['in additionalProperties: {}'.format(error) for error in add_prop_errors])
typedef.additional_properties = add_prop_typedef
if 'items' in adict:
items_dict = adict['items']
items_typedef, items_errors = _parse_typedef(raw_dict=items_dict)
errors.extend(['in items: {}'.format(error) for error in items_errors])
typedef.items = items_typedef
if typedef.type == 'number':
if typedef.format not in ['float', 'double']:
if OptionKey.PermitTypeWithoutOptionalFormat not in parse_options:
errors.append("Unexpected format for type 'number': {!r}".format(typedef.format))
elif typedef.type == 'integer':
if typedef.format not in ['int32', 'int64']:
if OptionKey.PermitTypeWithoutOptionalFormat not in parse_options:
errors.append("Unexpected format for type 'integer': {!r}".format(typedef.format))
typedef.raw_dict = raw_dict
return typedef, errors
def _parse_parameter(raw_dict: RawDict) -> Tuple[Parameter, List[str]]:
"""
Parse a parameter from the raw dictionary of the Swagger spec.
:param raw_dict: raw dictionary of the Swagger spec
:return: (parsed parameter, parsing errors if any)
"""
adict = raw_dict.adict
param = Parameter()
param.name = adict.get('name', '')
param.in_what = adict.get('in', '')
param.description = adict.get('description', '').strip()
param.required = adict.get('required', False)
param.type = adict.get('type', '')
param.format = adict.get('format', '')
param.pattern = adict.get('pattern', '')
param.ref = adict.get('$ref', '')
param.__lineno__ = raw_dict.lineno
errors = [] # type: List[str]
if 'schema' in adict:
schema_dict = adict['schema']
typedef, schema_errors = _parse_typedef(raw_dict=schema_dict)
param.schema = typedef
errors.extend(['in schema: {}'.format(error) for error in schema_errors])
param.raw_dict = raw_dict
if param.in_what == 'body' and param.schema is None:
errors.append('parameter in body, but no schema')
if 'default' in adict:
#errors.append('default values for parameters are not supported')
param.default_value = adict['default']
return param, errors
def _parse_response(raw_dict: RawDict) -> Tuple[Response, List[str]]:
"""
Parse an endpoint response from the raw dictionary of the Swagger spec.
:param raw_dict: raw dictionary of the Swagger spec
:return: (parsed response, parsing errors if any)
"""
adict = raw_dict.adict
resp = Response()
errors = [] # type: List[str]
resp.description = adict.get('description', '').strip()
resp.type = adict.get('type', '')
resp.format = adict.get('format', '')
resp.pattern = adict.get('pattern', '')
resp.__lineno__ = raw_dict.lineno
if 'schema' in adict:
schema_dict = adict['schema']
typedef, schema_errors = _parse_typedef(raw_dict=schema_dict)
resp.schema = typedef
errors.extend(['in schema: {}'.format(error) for error in schema_errors])
resp.raw_dict = raw_dict
return resp, errors
def _parse_method(raw_dict: RawDict) -> Tuple[Method, List[str]]:
"""
Parse an endpoint method from the raw dictionary of the Swagger spec.
:param raw_dict: raw dictionary of the Swagger spec
:return: (parsed method, parsing errors if any)
"""
mth = Method()
errors = [] # type: List[str]
adict = raw_dict.adict
mth.operation_id = adict.get('operationId', '')
if mth.operation_id == '':
if OptionKey.PermitAbsenseOfOperationId not in parse_options:
errors.append('missing operationId')
mth.tags = adict.get('tags', [])
mth.description = adict.get('description', '').strip()
mth.x_swagger_to_skip = adict.get('x-swagger-to-skip', False)
mth.produces = adict.get('produces', [])
mth.consumes = adict.get('consumes', [])
mth.__lineno__ = raw_dict.lineno
for i, param_dict in enumerate(adict.get('parameters', [])):
param, param_errors = _parse_parameter(raw_dict=param_dict)
errors.extend(['in parameter {} (name: {!r}): {}'.format(i, param.name, error) for error in param_errors])
param.method = mth
mth.parameters.append(param)
for resp_code, resp_dict in adict.get('responses', RawDict()).adict.items():
resp, resp_errors = _parse_response(raw_dict=resp_dict)
errors.extend(['in response {!r}: {}'.format(resp_code, error) for error in resp_errors])
resp.code = resp_code
mth.responses[str(resp_code)] = resp
mth.raw_dict = raw_dict
return mth, errors
def _parse_path(raw_dict: RawDict) -> Tuple[Path, List[str]]:
"""
Parse an endpoint path from the dictionary.
:param path_id: path identifier
:param raw_dict: raw dictionary of the Swagger spec
:return: (parsed path, parsing errors if any)
"""
pth = Path()
errors = [] # type: List[str]
for method_id, method_dict in raw_dict.adict.items():
method, method_errors = _parse_method(raw_dict=method_dict)
method.identifier = method_id
method.path = pth
errors.extend(['in method {!r}: {}'.format(method_id, error) for error in method_errors])
if not method_errors:
pth.methods.append(method)
pth.raw_dict = raw_dict
return pth, errors
def parse_yaml(stream: Any) -> Tuple[Swagger, List[str]]:
"""
Parse the Swagger specification from the given text.
:param stream: YAML representation of the Swagger spec satisfying file interface
:return: (parsed Swagger specification, parsing errors if any)
"""
# adapted from https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
# and https://stackoverflow.com/questions/13319067/parsing-yaml-return-with-line-number
object_pairs_hook = collections.OrderedDict
class OrderedLoader(yaml.SafeLoader):
def compose_node(self, parent, index):
# the line number where the previous token has ended (plus empty lines)
node = Composer.compose_node(self, parent, index)
node.__lineno__ = self.line + 1
return node
def construct_mapping(loader, node, deep=False):
loader.flatten_mapping(node)
mapping = Constructor.construct_pairs(loader, node, deep=deep)
ordered_hook = object_pairs_hook(mapping)
# assert not hasattr(ordered_hook, "__lineno__"), \
# "Expected ordered mapping to have no __lineno__ attribute set before"
# setattr(ordered_hook, "__lineno__", node.__lineno__)
return RawDict(adict=ordered_hook, source=stream.name, lineno=node.__lineno__)
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
raw_dict = yaml.load(stream, OrderedLoader)
swagger = Swagger()
errors = [] # type: List[str]
adict = raw_dict.adict
tag_exists: bool = False
if 'tags' in adict:
if len(adict['tags']) > 0:
tag_exists = True
for tag in adict['tags']:
for key, value in tag.adict.items():
if key == 'name':
swagger.name = value
if swagger.name == '':
if not (OptionKey.PermitAbsenseOfTagNameIfNoTagsExist in parse_options
and not tag_exists):
errors.append('missing tag "name" in the swagger specification')
swagger.base_path = adict.get('basePath', '')
for path_id, path_dict in adict.get('paths', RawDict()).adict.items():
path, path_errors = _parse_path(raw_dict=path_dict)
path.identifier = path_id
path.swagger = swagger
errors.extend(['in path {!r}: {}'.format(path_id, error) for error in path_errors])
if not path_errors:
swagger.paths[path_id] = path
for def_id, def_dict in adict.get('definitions', RawDict()).adict.items():
typedef, def_errors = _parse_typedef(raw_dict=def_dict)
errors.extend(['in definition {!r}: {}'.format(def_id, error) for error in def_errors])
adef = Definition()
adef.swagger = swagger
adef.identifier = def_id
adef.typedef = typedef
if not def_errors:
swagger.definitions[def_id] = adef
for param_id, param_dict in adict.get('parameters', RawDict()).adict.items():
param, param_errors = _parse_parameter(raw_dict=param_dict)
errors.extend(['in parameter {!r}: {}'.format(param_id, error) for error in param_errors])
if not param_errors:
swagger.parameters[param_id] = param
swagger.raw_dict = raw_dict
return swagger, errors
def parse_yaml_file(path: Union[str, pathlib.Path]) -> Tuple[Swagger, List[str]]:
"""
Parse the Swagger specification from the given file.
:param path: path to the .yaml file
:return: (parsed Swagger specification, parsing errors if any)
"""
with open(str(path), 'rt') as fid:
return parse_yaml(stream=fid)
| 34.426124 | 119 | 0.64981 |
import collections
from enum import Enum
import pathlib
from typing import List, Optional, MutableMapping, Any, Set, Tuple, Union
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
class OptionKey(Enum):
PermitAbsenseOfOperationId = "PermitAbsenseOfOperationId"
PermitTypeWithoutOptionalFormat = "PermitTypeWithoutOptionalFormat"
PermitAbsenseOfTagNameIfNoTagsExist = "PermitAbsenseOfTagNameIfNoTagsExist"
parse_options: Set[OptionKey] = set(
(OptionKey.PermitAbsenseOfOperationId,
OptionKey.PermitTypeWithoutOptionalFormat,
OptionKey.PermitAbsenseOfTagNameIfNoTagsExist))
class RawDict:
def __init__(self, adict: MutableMapping[str, Any] = collections.OrderedDict(), source: str = '',
lineno: int = 0) -> None:
self.adict = adict
self.source = source
self.lineno = lineno
class Typedef:
def __init__(self):
self.ref = ''
self.description = ''
self.type = ''
self.format = ''
self.pattern = ''
self.properties = collections.OrderedDict()
self.required = []
self.items = None
self.additional_properties = None
self.additional_properties_in_bool = None
self.__lineno__ = 0
self.raw_dict = None
class Definition:
def __init__(self):
self.identifier = ''
self.typedef = None
self.swagger = None
self.raw_dict = None
class Parameter:
def __init__(self):
self.method = None
self.name = ''
self.in_what = ''
self.description = ''
self.required = False
self.type = ''
self.format = ''
self.pattern = ''
self.schema = None
self.default_value = None
self.ref = ''
self.__lineno__ = 0
self.raw_dict = None
class Response:
def __init__(self):
self.code = ''
self.description = ''
self.schema = None
self.type = ''
self.format = ''
self.pattern = ''
self.__lineno__ = 0
self.raw_dict = None
class Method:
def __init__(self):
self.identifier = ''
self.operation_id = ''
self.tags = []
self.description = ''
self.parameters = []
self.responses = collections.OrderedDict()
self.path = None
self.produces = []
self.consumes = []
self.x_swagger_to_skip = False
self.__lineno__ = 0
self.raw_dict = None
class Path:
def __init__(self):
self.identifier = ''
self.methods = []
self.swagger = None
self.__lineno__ = 0
self.raw_dict = None
class Swagger:
def __init__(self):
self.name = ""
self.base_path = ""
self.description = ""
self.paths = collections.OrderedDict()
self.definitions = collections.OrderedDict()
self.parameters = collections.OrderedDict()
self.raw_dict = None
def _parse_typedef(raw_dict: RawDict) -> Tuple[Typedef, List[str]]:
adict = raw_dict.adict
typedef = Typedef()
typedef.ref = adict.get('$ref', '')
typedef.description = adict.get('description', '').strip()
typedef.type = adict.get('type', '')
typedef.format = adict.get('format', '')
typedef.pattern = adict.get('pattern', '')
typedef.__lineno__ = raw_dict.lineno
errors = []
for prop_name, prop_dict in adict.get('properties', RawDict()).adict.items():
prop_typedef, prop_errors = _parse_typedef(raw_dict=prop_dict)
errors.extend(['in property {!r}: {}'.format(prop_name, error) for error in prop_errors])
typedef.properties[prop_name] = prop_typedef
typedef.required = adict.get('required', [])
for prop_name in typedef.required:
if prop_name not in typedef.properties:
errors.append("required property not defined: {!r}".format(prop_name))
if 'additionalProperties' in adict:
add_prop_dict = adict['additionalProperties']
if isinstance(add_prop_dict, bool):
typedef.additional_properties_in_bool = add_prop_dict
else:
add_prop_typedef, add_prop_errors = _parse_typedef(raw_dict=add_prop_dict)
errors.extend(['in additionalProperties: {}'.format(error) for error in add_prop_errors])
typedef.additional_properties = add_prop_typedef
if 'items' in adict:
items_dict = adict['items']
items_typedef, items_errors = _parse_typedef(raw_dict=items_dict)
errors.extend(['in items: {}'.format(error) for error in items_errors])
typedef.items = items_typedef
if typedef.type == 'number':
if typedef.format not in ['float', 'double']:
if OptionKey.PermitTypeWithoutOptionalFormat not in parse_options:
errors.append("Unexpected format for type 'number': {!r}".format(typedef.format))
elif typedef.type == 'integer':
if typedef.format not in ['int32', 'int64']:
if OptionKey.PermitTypeWithoutOptionalFormat not in parse_options:
errors.append("Unexpected format for type 'integer': {!r}".format(typedef.format))
typedef.raw_dict = raw_dict
return typedef, errors
def _parse_parameter(raw_dict: RawDict) -> Tuple[Parameter, List[str]]:
adict = raw_dict.adict
param = Parameter()
param.name = adict.get('name', '')
param.in_what = adict.get('in', '')
param.description = adict.get('description', '').strip()
param.required = adict.get('required', False)
param.type = adict.get('type', '')
param.format = adict.get('format', '')
param.pattern = adict.get('pattern', '')
param.ref = adict.get('$ref', '')
param.__lineno__ = raw_dict.lineno
errors = []
if 'schema' in adict:
schema_dict = adict['schema']
typedef, schema_errors = _parse_typedef(raw_dict=schema_dict)
param.schema = typedef
errors.extend(['in schema: {}'.format(error) for error in schema_errors])
param.raw_dict = raw_dict
if param.in_what == 'body' and param.schema is None:
errors.append('parameter in body, but no schema')
if 'default' in adict:
param.default_value = adict['default']
return param, errors
def _parse_response(raw_dict: RawDict) -> Tuple[Response, List[str]]:
adict = raw_dict.adict
resp = Response()
errors = []
resp.description = adict.get('description', '').strip()
resp.type = adict.get('type', '')
resp.format = adict.get('format', '')
resp.pattern = adict.get('pattern', '')
resp.__lineno__ = raw_dict.lineno
if 'schema' in adict:
schema_dict = adict['schema']
typedef, schema_errors = _parse_typedef(raw_dict=schema_dict)
resp.schema = typedef
errors.extend(['in schema: {}'.format(error) for error in schema_errors])
resp.raw_dict = raw_dict
return resp, errors
def _parse_method(raw_dict: RawDict) -> Tuple[Method, List[str]]:
mth = Method()
errors = []
adict = raw_dict.adict
mth.operation_id = adict.get('operationId', '')
if mth.operation_id == '':
if OptionKey.PermitAbsenseOfOperationId not in parse_options:
errors.append('missing operationId')
mth.tags = adict.get('tags', [])
mth.description = adict.get('description', '').strip()
mth.x_swagger_to_skip = adict.get('x-swagger-to-skip', False)
mth.produces = adict.get('produces', [])
mth.consumes = adict.get('consumes', [])
mth.__lineno__ = raw_dict.lineno
for i, param_dict in enumerate(adict.get('parameters', [])):
param, param_errors = _parse_parameter(raw_dict=param_dict)
errors.extend(['in parameter {} (name: {!r}): {}'.format(i, param.name, error) for error in param_errors])
param.method = mth
mth.parameters.append(param)
for resp_code, resp_dict in adict.get('responses', RawDict()).adict.items():
resp, resp_errors = _parse_response(raw_dict=resp_dict)
errors.extend(['in response {!r}: {}'.format(resp_code, error) for error in resp_errors])
resp.code = resp_code
mth.responses[str(resp_code)] = resp
mth.raw_dict = raw_dict
return mth, errors
def _parse_path(raw_dict: RawDict) -> Tuple[Path, List[str]]:
pth = Path()
errors = []
for method_id, method_dict in raw_dict.adict.items():
method, method_errors = _parse_method(raw_dict=method_dict)
method.identifier = method_id
method.path = pth
errors.extend(['in method {!r}: {}'.format(method_id, error) for error in method_errors])
if not method_errors:
pth.methods.append(method)
pth.raw_dict = raw_dict
return pth, errors
def parse_yaml(stream: Any) -> Tuple[Swagger, List[str]]:
object_pairs_hook = collections.OrderedDict
class OrderedLoader(yaml.SafeLoader):
def compose_node(self, parent, index):
node = Composer.compose_node(self, parent, index)
node.__lineno__ = self.line + 1
return node
def construct_mapping(loader, node, deep=False):
loader.flatten_mapping(node)
mapping = Constructor.construct_pairs(loader, node, deep=deep)
ordered_hook = object_pairs_hook(mapping)
return RawDict(adict=ordered_hook, source=stream.name, lineno=node.__lineno__)
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
raw_dict = yaml.load(stream, OrderedLoader)
swagger = Swagger()
errors = []
adict = raw_dict.adict
tag_exists: bool = False
if 'tags' in adict:
if len(adict['tags']) > 0:
tag_exists = True
for tag in adict['tags']:
for key, value in tag.adict.items():
if key == 'name':
swagger.name = value
if swagger.name == '':
if not (OptionKey.PermitAbsenseOfTagNameIfNoTagsExist in parse_options
and not tag_exists):
errors.append('missing tag "name" in the swagger specification')
swagger.base_path = adict.get('basePath', '')
for path_id, path_dict in adict.get('paths', RawDict()).adict.items():
path, path_errors = _parse_path(raw_dict=path_dict)
path.identifier = path_id
path.swagger = swagger
errors.extend(['in path {!r}: {}'.format(path_id, error) for error in path_errors])
if not path_errors:
swagger.paths[path_id] = path
for def_id, def_dict in adict.get('definitions', RawDict()).adict.items():
typedef, def_errors = _parse_typedef(raw_dict=def_dict)
errors.extend(['in definition {!r}: {}'.format(def_id, error) for error in def_errors])
adef = Definition()
adef.swagger = swagger
adef.identifier = def_id
adef.typedef = typedef
if not def_errors:
swagger.definitions[def_id] = adef
for param_id, param_dict in adict.get('parameters', RawDict()).adict.items():
param, param_errors = _parse_parameter(raw_dict=param_dict)
errors.extend(['in parameter {!r}: {}'.format(param_id, error) for error in param_errors])
if not param_errors:
swagger.parameters[param_id] = param
swagger.raw_dict = raw_dict
return swagger, errors
def parse_yaml_file(path: Union[str, pathlib.Path]) -> Tuple[Swagger, List[str]]:
with open(str(path), 'rt') as fid:
return parse_yaml(stream=fid)
| true | true |
f7f8476ba6d8932d191686e45f693ff9d7952891 | 24,350 | py | Python | nova/virt/hyperv/vmutils.py | bopopescu/nova-22 | e94e289c663b37df2a12dafc9ceaecf8d04432a8 | [
"Apache-2.0"
] | 1 | 2019-06-19T17:56:41.000Z | 2019-06-19T17:56:41.000Z | nova/virt/hyperv/vmutils.py | bopopescu/nova-22 | e94e289c663b37df2a12dafc9ceaecf8d04432a8 | [
"Apache-2.0"
] | null | null | null | nova/virt/hyperv/vmutils.py | bopopescu/nova-22 | e94e289c663b37df2a12dafc9ceaecf8d04432a8 | [
"Apache-2.0"
] | 1 | 2020-07-24T06:47:54.000Z | 2020-07-24T06:47:54.000Z | # Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = dict((v, k) for k, v in
self._vm_power_states_map.iteritems())
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
vm_names = [v.ElementName for v in
self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vm_names
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
enabled_state = self._enabled_states_map[si.EnabledState]
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug(_('Creating VM %s'), vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug(_('Setting memory for vm %s'), vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
(job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
return self._lookup_vm_check(vm_name)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
return [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)][0].path_()
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks_count(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
return len(volumes)
def _get_new_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
return self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
#Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
#Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
#Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
#Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
#Invalid state for current operation (32775) typically means that
#the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug(_("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._IDE_DISK_RES_SUB_TYPE,
self._IDE_DVD_RES_SUB_TYPE]]
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"),
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(disk_path)
if physical_disk:
self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
return physical_disk
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
| 42.055268 | 79 | 0.618973 |
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = dict((v, k) for k, v in
self._vm_power_states_map.iteritems())
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instances(self):
vm_names = [v.ElementName for v in
self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vm_names
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
enabled_state = self._enabled_states_map[si.EnabledState]
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug(_('Creating VM %s'), vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug(_('Setting memory for vm %s'), vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
(job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
return self._lookup_vm_check(vm_name)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
return [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)][0].path_()
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks_count(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
return len(volumes)
def _get_new_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
return self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
drive.Parent = ctrller_path
drive.Address = drive_addr
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
res.Parent = drive_path
res.Connection = [path]
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def set_vm_state(self, vm_name, req_state):
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug(_("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._IDE_DISK_RES_SUB_TYPE,
self._IDE_DVD_RES_SUB_TYPE]]
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"),
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(disk_path)
if physical_disk:
self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
return physical_disk
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
| true | true |
f7f8476d880ca6fb2e644208a0320ec7f6e88bd7 | 2,031 | py | Python | python_research/preprocessing/attribute_profiles/max_tree/attribute_matrix_construction.py | myychal/hypernet | 778e9c1a2f27ab1c664bb6d8ea49c65d0c7bdade | [
"MIT"
] | null | null | null | python_research/preprocessing/attribute_profiles/max_tree/attribute_matrix_construction.py | myychal/hypernet | 778e9c1a2f27ab1c664bb6d8ea49c65d0c7bdade | [
"MIT"
] | null | null | null | python_research/preprocessing/attribute_profiles/max_tree/attribute_matrix_construction.py | myychal/hypernet | 778e9c1a2f27ab1c664bb6d8ea49c65d0c7bdade | [
"MIT"
] | null | null | null | import numpy as np
from .attributes_incrementally import StandardDeviation, LengthOfDiagonal, \
FirstHuMoment, Area
from ..utils.data_types import Pixel
def construct_area_matrix(image: np.ndarray) -> np.ndarray:
matrix = np.ones(image.shape, dtype=Area)
image_width = image.shape[1]
for index, _ in enumerate(image.flatten()):
x = index % image_width
y = int(index / image_width)
matrix[y, x] = Area()
return matrix
def construct_std_dev_matrix(image: np.ndarray) -> np.ndarray:
image_width = image.shape[1]
std_dev_matrix = np.zeros(image.shape, dtype=StandardDeviation)
for index, pixel_value in enumerate(image.flatten()):
x = index % image_width
y = int(index / image_width)
std_dev_matrix[y, x] = StandardDeviation(value=pixel_value)
return std_dev_matrix
def construct_length_of_diagonal_matrix(image: np.ndarray) -> np.ndarray:
width = image.shape[1]
image_size = image.size
matrix = np.zeros(image.shape, dtype=LengthOfDiagonal)
for index in range(0, image_size):
x = index % width
y = int(index / width)
matrix[y, x] = LengthOfDiagonal(x, x, y, y)
return matrix
def construct_first_hu_moment_matrix(image: np.ndarray) -> np.ndarray:
width = image.shape[1]
max_ = float(np.amax(image))
min_ = float(np.amin(image))
matrix = np.zeros(image.shape, dtype=FirstHuMoment)
for index, pixel_value in enumerate(image.flatten()):
x = index % width
y = int(index / width)
norm_pixel_value = (float(pixel_value) - min_) / (max_ - min_)
matrix[y, x] = FirstHuMoment(Pixel(x, y, norm_pixel_value))
return matrix
matrix_constructs = {
'area': construct_area_matrix,
'stddev': construct_std_dev_matrix,
'diagonal': construct_length_of_diagonal_matrix,
'moment': construct_first_hu_moment_matrix
}
def construct_matrix(attribute_name: str, image: np.ndarray) -> np.ndarray:
return matrix_constructs[attribute_name](image)
| 33.295082 | 76 | 0.6903 | import numpy as np
from .attributes_incrementally import StandardDeviation, LengthOfDiagonal, \
FirstHuMoment, Area
from ..utils.data_types import Pixel
def construct_area_matrix(image: np.ndarray) -> np.ndarray:
matrix = np.ones(image.shape, dtype=Area)
image_width = image.shape[1]
for index, _ in enumerate(image.flatten()):
x = index % image_width
y = int(index / image_width)
matrix[y, x] = Area()
return matrix
def construct_std_dev_matrix(image: np.ndarray) -> np.ndarray:
image_width = image.shape[1]
std_dev_matrix = np.zeros(image.shape, dtype=StandardDeviation)
for index, pixel_value in enumerate(image.flatten()):
x = index % image_width
y = int(index / image_width)
std_dev_matrix[y, x] = StandardDeviation(value=pixel_value)
return std_dev_matrix
def construct_length_of_diagonal_matrix(image: np.ndarray) -> np.ndarray:
width = image.shape[1]
image_size = image.size
matrix = np.zeros(image.shape, dtype=LengthOfDiagonal)
for index in range(0, image_size):
x = index % width
y = int(index / width)
matrix[y, x] = LengthOfDiagonal(x, x, y, y)
return matrix
def construct_first_hu_moment_matrix(image: np.ndarray) -> np.ndarray:
width = image.shape[1]
max_ = float(np.amax(image))
min_ = float(np.amin(image))
matrix = np.zeros(image.shape, dtype=FirstHuMoment)
for index, pixel_value in enumerate(image.flatten()):
x = index % width
y = int(index / width)
norm_pixel_value = (float(pixel_value) - min_) / (max_ - min_)
matrix[y, x] = FirstHuMoment(Pixel(x, y, norm_pixel_value))
return matrix
matrix_constructs = {
'area': construct_area_matrix,
'stddev': construct_std_dev_matrix,
'diagonal': construct_length_of_diagonal_matrix,
'moment': construct_first_hu_moment_matrix
}
def construct_matrix(attribute_name: str, image: np.ndarray) -> np.ndarray:
return matrix_constructs[attribute_name](image)
| true | true |
f7f848b326c1f102b2fdefcf278ec8a88cba3e85 | 5,682 | py | Python | cooltools/cli/logbin_expected.py | Phlya/cooltools | 4fa1d11344cd652622dedcc24c99cd0d5458029b | [
"MIT"
] | null | null | null | cooltools/cli/logbin_expected.py | Phlya/cooltools | 4fa1d11344cd652622dedcc24c99cd0d5458029b | [
"MIT"
] | null | null | null | cooltools/cli/logbin_expected.py | Phlya/cooltools | 4fa1d11344cd652622dedcc24c99cd0d5458029b | [
"MIT"
] | 1 | 2019-11-14T12:24:41.000Z | 2019-11-14T12:24:41.000Z | import pandas as pd
import numpy as np
from functools import partial
from .. import expected
from ..lib.common import read_expected
import click
from .util import validate_csv
from . import cli
@cli.command()
@click.argument(
"expected_path",
metavar="EXPECTED_PATH",
type=str,
callback=partial(validate_csv, default_column="balanced.sum"),
)
@click.argument("output_prefix", metavar="OUTPUT_PREFIX", type=str, nargs=1)
@click.option(
"--bins-per-order-magnitude",
metavar="bins_per_order_magnitude",
help="How many bins per order of magnitude. "
"Default of 10 has a ratio of neighboring bins of about 1.25",
type=int,
nargs=1,
default=10,
show_default=True,
)
@click.option(
"--bin-layout",
metavar="bin_layout",
help="'fixed' means that bins are exactly the same for different datasets, "
"and only depend on bins_per_order_magnitude "
"'longest_regio' means that the last bin will end at size of the longest region. "
"\nGOOD: the last bin will have as much data as possible. "
"\nBAD: bin edges will end up different for different datasets, "
"you can't divide them by each other",
type=click.Choice(["fixed", "longest_region"]),
nargs=1,
default="fixed",
show_default=True,
)
@click.option(
"--min-nvalid",
metavar="min_nvalid",
help="For each region, throw out bins (log-spaced) that have less than min_nvalid "
"valid pixels. This will ensure that each entree in Pc by region has at least "
"n_valid valid pixels. "
"Don't set it to zero, or it will introduce bugs. Setting it to 1 is OK, but "
"not recommended.",
type=int,
nargs=1,
default=200,
show_default=True,
)
@click.option(
"--min-count",
metavar="min_count",
help="If counts are found in the data, then for each region, throw out bins "
"(log-spaced) that have more than min_counts of counts.sum (raw Hi-C counts). "
"This will ensure that each entree in P(s) by region has at least min_count "
"raw Hi-C reads",
type=int,
nargs=1,
default=50,
show_default=True,
)
@click.option(
"--spread-funcs",
metavar="spread_funcs",
help="A way to estimate the spread of the P(s) curves between regions. "
"* 'minmax' - the minimum/maximum of by-region P(s)\n"
"* 'std' - weighted standard deviation of P(s) curves (may produce negative results)\n "
"* 'logstd' (recommended) weighted standard deviation in logspace",
type=click.Choice(["minmax", "std", "logstd"]),
default="logstd",
show_default=True,
nargs=1,
)
@click.option(
"--spread-funcs-slope",
metavar="spread_funcs_slope",
help="Same as spread-funcs, but for slope (derivative) ratehr than P(s)",
type=click.Choice(["minmax", "std", "logstd"]),
default="std",
show_default=True,
nargs=1,
)
@click.option(
"--resolution",
metavar="resolution",
help="Data resolution in bp. If provided, additonal column of separation in bp "
"(s_bp) will be added to the outputs",
type=int,
nargs=1,
)
def logbin_expected(
expected_path,
output_prefix,
bins_per_order_magnitude,
bin_layout,
min_nvalid,
min_count,
spread_funcs,
spread_funcs_slope,
resolution,
):
"""
Logarithmically bin expected values generated using compute_expected for cis data.
This smoothes the data, resulting in clearer plots and more robust analysis results.
Also calculates derivative after gaussian smoothing.
For a very detailed escription, see
https://github.com/open2c/cooltools/blob/51b95c3bed8d00a5f1f91370fc5192d9a7face7c/cooltools/expected.py#L988
EXPECTED_PATH : The paths to a .tsv file with output of compute_expected.
Must include a header. Use the '::' syntax to specify a summary column name.
OUTPUT_PREFIX: Output file name prefix to store the logbinned expected
(prefix.log.tsv) and derivative (prefix.der.tsv) in the tsv format."
"""
# unpack expected path and name as generated by click's callback to validate_csv:
expected_path, exp_summary_name = expected_path
# make sure "count.sum" is present in the expected file:
expected_summary_cols = [exp_summary_name]
if "count.sum" not in expected_summary_cols:
expected_summary_cols.append("count.sum")
cvd = read_expected(
expected_path,
expected_value_cols=expected_summary_cols,
)
# name of the column with Probability of contacts is
# based on the name of the column with the diagonal-summary
# stats in the input expected DataFrame:
exp_summary_base, *_ = exp_summary_name.split(".")
Pc_name = f"{exp_summary_base}.avg"
lb_cvd, lb_slopes, lb_distbins = expected.logbin_expected(
cvd,
summary_name=exp_summary_name,
bins_per_order_magnitude=bins_per_order_magnitude,
bin_layout=bin_layout,
min_nvalid=min_nvalid,
min_count=min_count,
)
# combine Probabilities of contact for the regions:
lb_cvd_agg, lb_slopes_agg = expected.combine_binned_expected(
lb_cvd,
Pc_name=Pc_name,
binned_exp_slope=lb_slopes,
spread_funcs=spread_funcs,
spread_funcs_slope=spread_funcs_slope,
)
if resolution is not None:
lb_cvd_agg["s_bp"] = lb_cvd_agg["diag.avg"] * resolution
lb_slopes_agg["s_bp"] = lb_slopes_agg["diag.avg"] * resolution
lb_cvd_agg.to_csv(
f"{output_prefix}.log.tsv",
sep="\t",
index=False,
na_rep="nan",
)
lb_slopes_agg.to_csv(
f"{output_prefix}.der.tsv",
sep="\t",
index=False,
na_rep="nan",
)
| 32.655172 | 112 | 0.681978 | import pandas as pd
import numpy as np
from functools import partial
from .. import expected
from ..lib.common import read_expected
import click
from .util import validate_csv
from . import cli
@cli.command()
@click.argument(
"expected_path",
metavar="EXPECTED_PATH",
type=str,
callback=partial(validate_csv, default_column="balanced.sum"),
)
@click.argument("output_prefix", metavar="OUTPUT_PREFIX", type=str, nargs=1)
@click.option(
"--bins-per-order-magnitude",
metavar="bins_per_order_magnitude",
help="How many bins per order of magnitude. "
"Default of 10 has a ratio of neighboring bins of about 1.25",
type=int,
nargs=1,
default=10,
show_default=True,
)
@click.option(
"--bin-layout",
metavar="bin_layout",
help="'fixed' means that bins are exactly the same for different datasets, "
"and only depend on bins_per_order_magnitude "
"'longest_regio' means that the last bin will end at size of the longest region. "
"\nGOOD: the last bin will have as much data as possible. "
"\nBAD: bin edges will end up different for different datasets, "
"you can't divide them by each other",
type=click.Choice(["fixed", "longest_region"]),
nargs=1,
default="fixed",
show_default=True,
)
@click.option(
"--min-nvalid",
metavar="min_nvalid",
help="For each region, throw out bins (log-spaced) that have less than min_nvalid "
"valid pixels. This will ensure that each entree in Pc by region has at least "
"n_valid valid pixels. "
"Don't set it to zero, or it will introduce bugs. Setting it to 1 is OK, but "
"not recommended.",
type=int,
nargs=1,
default=200,
show_default=True,
)
@click.option(
"--min-count",
metavar="min_count",
help="If counts are found in the data, then for each region, throw out bins "
"(log-spaced) that have more than min_counts of counts.sum (raw Hi-C counts). "
"This will ensure that each entree in P(s) by region has at least min_count "
"raw Hi-C reads",
type=int,
nargs=1,
default=50,
show_default=True,
)
@click.option(
"--spread-funcs",
metavar="spread_funcs",
help="A way to estimate the spread of the P(s) curves between regions. "
"* 'minmax' - the minimum/maximum of by-region P(s)\n"
"* 'std' - weighted standard deviation of P(s) curves (may produce negative results)\n "
"* 'logstd' (recommended) weighted standard deviation in logspace",
type=click.Choice(["minmax", "std", "logstd"]),
default="logstd",
show_default=True,
nargs=1,
)
@click.option(
"--spread-funcs-slope",
metavar="spread_funcs_slope",
help="Same as spread-funcs, but for slope (derivative) ratehr than P(s)",
type=click.Choice(["minmax", "std", "logstd"]),
default="std",
show_default=True,
nargs=1,
)
@click.option(
"--resolution",
metavar="resolution",
help="Data resolution in bp. If provided, additonal column of separation in bp "
"(s_bp) will be added to the outputs",
type=int,
nargs=1,
)
def logbin_expected(
expected_path,
output_prefix,
bins_per_order_magnitude,
bin_layout,
min_nvalid,
min_count,
spread_funcs,
spread_funcs_slope,
resolution,
):
expected_path, exp_summary_name = expected_path
# make sure "count.sum" is present in the expected file:
expected_summary_cols = [exp_summary_name]
if "count.sum" not in expected_summary_cols:
expected_summary_cols.append("count.sum")
cvd = read_expected(
expected_path,
expected_value_cols=expected_summary_cols,
)
# name of the column with Probability of contacts is
# based on the name of the column with the diagonal-summary
# stats in the input expected DataFrame:
exp_summary_base, *_ = exp_summary_name.split(".")
Pc_name = f"{exp_summary_base}.avg"
lb_cvd, lb_slopes, lb_distbins = expected.logbin_expected(
cvd,
summary_name=exp_summary_name,
bins_per_order_magnitude=bins_per_order_magnitude,
bin_layout=bin_layout,
min_nvalid=min_nvalid,
min_count=min_count,
)
# combine Probabilities of contact for the regions:
lb_cvd_agg, lb_slopes_agg = expected.combine_binned_expected(
lb_cvd,
Pc_name=Pc_name,
binned_exp_slope=lb_slopes,
spread_funcs=spread_funcs,
spread_funcs_slope=spread_funcs_slope,
)
if resolution is not None:
lb_cvd_agg["s_bp"] = lb_cvd_agg["diag.avg"] * resolution
lb_slopes_agg["s_bp"] = lb_slopes_agg["diag.avg"] * resolution
lb_cvd_agg.to_csv(
f"{output_prefix}.log.tsv",
sep="\t",
index=False,
na_rep="nan",
)
lb_slopes_agg.to_csv(
f"{output_prefix}.der.tsv",
sep="\t",
index=False,
na_rep="nan",
)
| true | true |
f7f848fff026443704348bf0031d34da4fc32000 | 374 | py | Python | gekitchensdk/erd/values/dishwasher/__init__.py | joelmoses/gekitchensdk | f3253262c7f6f2c26e5ad4143c20e788b28c57a9 | [
"MIT"
] | null | null | null | gekitchensdk/erd/values/dishwasher/__init__.py | joelmoses/gekitchensdk | f3253262c7f6f2c26e5ad4143c20e788b28c57a9 | [
"MIT"
] | null | null | null | gekitchensdk/erd/values/dishwasher/__init__.py | joelmoses/gekitchensdk | f3253262c7f6f2c26e5ad4143c20e788b28c57a9 | [
"MIT"
] | null | null | null | from .erd_cycle_state import ErdCycleStateRaw, ErdCycleState
from .erd_operating_mode import ErdOperatingMode
from .erd_rinse_agent import ErdRinseAgentRaw, ErdRinseAgent
from .operating_mode import OperatingMode
from .cycle_state_mapping import CYCLE_STATE_RAW_MAP
from .operating_mode_mapping import OPERATING_MODE_MAP
from .rinse_agent_mapping import RINSE_AGENT_RAW_MAP
| 46.75 | 60 | 0.895722 | from .erd_cycle_state import ErdCycleStateRaw, ErdCycleState
from .erd_operating_mode import ErdOperatingMode
from .erd_rinse_agent import ErdRinseAgentRaw, ErdRinseAgent
from .operating_mode import OperatingMode
from .cycle_state_mapping import CYCLE_STATE_RAW_MAP
from .operating_mode_mapping import OPERATING_MODE_MAP
from .rinse_agent_mapping import RINSE_AGENT_RAW_MAP
| true | true |
f7f8493ffa1855e0ca69e2e1385060c8e9c1ef8a | 3,606 | py | Python | luzfcb_todo/migrations/0001_initial.py | luzfcb/luzfcb_todo | 85af6ef8a4d3995a4577abc61c851317b37f2feb | [
"BSD-2-Clause"
] | null | null | null | luzfcb_todo/migrations/0001_initial.py | luzfcb/luzfcb_todo | 85af6ef8a4d3995a4577abc61c851317b37f2feb | [
"BSD-2-Clause"
] | null | null | null | luzfcb_todo/migrations/0001_initial.py | luzfcb/luzfcb_todo | 85af6ef8a4d3995a4577abc61c851317b37f2feb | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('is_completed', models.BooleanField(default=False)),
('completed_date', models.DateTimeField(null=True, editable=False, blank=True)),
('expire_date', models.DateTimeField(null=True, blank=True)),
('is_expired', models.BooleanField(default=False, editable=False)),
('is_active', models.BooleanField(default=True)),
('priority', models.PositiveIntegerField()),
('title', models.CharField(max_length=60)),
('title_slug', models.SlugField(max_length=60, editable=False)),
('content', models.TextField()),
('created_by', models.ForeignKey(related_name='luzfcb_todo_task_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='luzfcb_todo_task_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('is_completed', models.BooleanField(default=False)),
('completed_date', models.DateTimeField(null=True, editable=False, blank=True)),
('expire_date', models.DateTimeField(null=True, blank=True)),
('is_expired', models.BooleanField(default=False, editable=False)),
('is_active', models.BooleanField(default=True)),
('priority', models.PositiveIntegerField()),
('name', models.CharField(max_length=60)),
('name_slug', models.SlugField(max_length=60, editable=False)),
('created_by', models.ForeignKey(related_name='luzfcb_todo_todolist_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='luzfcb_todo_todolist_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='task',
name='todolist',
field=models.ForeignKey(to='luzfcb_todo.TodoList'),
),
]
| 54.636364 | 214 | 0.630893 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('is_completed', models.BooleanField(default=False)),
('completed_date', models.DateTimeField(null=True, editable=False, blank=True)),
('expire_date', models.DateTimeField(null=True, blank=True)),
('is_expired', models.BooleanField(default=False, editable=False)),
('is_active', models.BooleanField(default=True)),
('priority', models.PositiveIntegerField()),
('title', models.CharField(max_length=60)),
('title_slug', models.SlugField(max_length=60, editable=False)),
('content', models.TextField()),
('created_by', models.ForeignKey(related_name='luzfcb_todo_task_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='luzfcb_todo_task_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('is_completed', models.BooleanField(default=False)),
('completed_date', models.DateTimeField(null=True, editable=False, blank=True)),
('expire_date', models.DateTimeField(null=True, blank=True)),
('is_expired', models.BooleanField(default=False, editable=False)),
('is_active', models.BooleanField(default=True)),
('priority', models.PositiveIntegerField()),
('name', models.CharField(max_length=60)),
('name_slug', models.SlugField(max_length=60, editable=False)),
('created_by', models.ForeignKey(related_name='luzfcb_todo_todolist_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='luzfcb_todo_todolist_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='task',
name='todolist',
field=models.ForeignKey(to='luzfcb_todo.TodoList'),
),
]
| true | true |
f7f849f98d43fda3731248f671e2104b55408e37 | 1,912 | py | Python | backend_admin/urls.py | sougannkyou/AppSimulator | 0803d557b0451b049f11e90c258ec11f7074a2ac | [
"MIT"
] | 6 | 2018-10-21T03:41:03.000Z | 2021-12-17T11:09:50.000Z | backend_admin/urls.py | sougannkyou/AppSimulator | 0803d557b0451b049f11e90c258ec11f7074a2ac | [
"MIT"
] | null | null | null | backend_admin/urls.py | sougannkyou/AppSimulator | 0803d557b0451b049f11e90c258ec11f7074a2ac | [
"MIT"
] | 4 | 2020-01-30T14:46:50.000Z | 2021-12-04T01:07:46.000Z | from django.conf.urls import url, patterns
from . import views
from django.views.generic import RedirectView
# from django.views.generic.simple import redirect_to
# from django.views.generic.simple import redirect_to
urlpatterns = [
url(r'^test/$', views.test, name='test'),
url(r'^$', views.index, name='index'),
url(r'^login$', views.login_view, name='login'),
url(r'^logout$', views.logout_view, name='logout'),
url(r'^changepassword$', views.change_password, name='change_password'),
url(r'^private$', views.private, name='private'),
url(r'^backend_admin/backend_admin/user/add/$', views.create_user, name='create_user', ),
url(r'^backend_admin/backend_admin/user/(\d+)/password$', views.change_user_password, name='change_user_password', ),
url(r'^backend_admin/backend_admin/user/(\d+)/$', views.change_user, name='change_user', ),
url(r'^backend_admin/auth/group/add/$', views.add_group, name='add_group', ),
url(r'^backend_admin/auth/group/(\d+)/$', views.change_group, name='change_group', ),
# url(r'^admin/backend_admin/user/\d/$', RedirectView.as_view(url='/changeuser'),),
# url(r'^admin/auth/group/$', RedirectView.as_view(url='/backend_admin/auth/group', permanent=True), ),
url(r'^duplicate_username$', views.username_duplicate_verify, name='duplicate_username'),
url(r'^duplicate_email$', views.email_duplicate_verify, name='duplicate_email'),
url(r'^backend_admin/(\w+)/(\w+)/$', views.change_list, name='change_list'),
url(r'^backend_admin/(\w+)/(\w+)/delete$', views.model_object_delete, name='model_object_delete'),
url(r'^uwsgireload/$', views.reload_uwsgi, name='reload_uwsgi'),
url(r'^visual/login$', views.visual_login_view, name='visual_login_view'),
url(r'^visual/verify/$', views.visual_login_verify, name='visual_login_verify'),
url(r'^trans_users/$', views.trans_users, name='trans_users'),
]
| 56.235294 | 121 | 0.70659 | from django.conf.urls import url, patterns
from . import views
from django.views.generic import RedirectView
urlpatterns = [
url(r'^test/$', views.test, name='test'),
url(r'^$', views.index, name='index'),
url(r'^login$', views.login_view, name='login'),
url(r'^logout$', views.logout_view, name='logout'),
url(r'^changepassword$', views.change_password, name='change_password'),
url(r'^private$', views.private, name='private'),
url(r'^backend_admin/backend_admin/user/add/$', views.create_user, name='create_user', ),
url(r'^backend_admin/backend_admin/user/(\d+)/password$', views.change_user_password, name='change_user_password', ),
url(r'^backend_admin/backend_admin/user/(\d+)/$', views.change_user, name='change_user', ),
url(r'^backend_admin/auth/group/add/$', views.add_group, name='add_group', ),
url(r'^backend_admin/auth/group/(\d+)/$', views.change_group, name='change_group', ),
url(r'^duplicate_username$', views.username_duplicate_verify, name='duplicate_username'),
url(r'^duplicate_email$', views.email_duplicate_verify, name='duplicate_email'),
url(r'^backend_admin/(\w+)/(\w+)/$', views.change_list, name='change_list'),
url(r'^backend_admin/(\w+)/(\w+)/delete$', views.model_object_delete, name='model_object_delete'),
url(r'^uwsgireload/$', views.reload_uwsgi, name='reload_uwsgi'),
url(r'^visual/login$', views.visual_login_view, name='visual_login_view'),
url(r'^visual/verify/$', views.visual_login_verify, name='visual_login_verify'),
url(r'^trans_users/$', views.trans_users, name='trans_users'),
]
| true | true |
f7f84a499e54ce8fd34b532a1823beb6545c1e4b | 781 | py | Python | src/cobra/core/plugins/base/structs.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | 1 | 2015-01-27T08:56:46.000Z | 2015-01-27T08:56:46.000Z | src/cobra/core/plugins/base/structs.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | null | null | null | src/cobra/core/plugins/base/structs.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, print_function
__all__ = ('Annotation', 'Notification')
import warnings
class Annotation(object):
__slots__ = ['label', 'url', 'description']
def __init__(self, label, url=None, description=None):
self.label = label
self.url = url
self.description = description
class Notification(object):
__slots__ = ['event', 'rule', 'rules']
def __init__(self, event, rule=None, rules=None):
if rule and not rules:
rules = [rule]
self.event = event
self.rules = rules or []
@property
def rule(self):
warnings.warn('Notification.rule is deprecated. Switch to Notification.rules.',
DeprecationWarning)
return self.rules[0]
| 23.666667 | 87 | 0.627401 |
from __future__ import absolute_import, print_function
__all__ = ('Annotation', 'Notification')
import warnings
class Annotation(object):
__slots__ = ['label', 'url', 'description']
def __init__(self, label, url=None, description=None):
self.label = label
self.url = url
self.description = description
class Notification(object):
__slots__ = ['event', 'rule', 'rules']
def __init__(self, event, rule=None, rules=None):
if rule and not rules:
rules = [rule]
self.event = event
self.rules = rules or []
@property
def rule(self):
warnings.warn('Notification.rule is deprecated. Switch to Notification.rules.',
DeprecationWarning)
return self.rules[0]
| true | true |
f7f84b77b52cf56b6d38f170521dfac87c0ecff2 | 7,645 | py | Python | util/canloader.py | sectioncritical/atmega_can_bootloader | a703b6902fee0f3e931adc77b0470a8238906a0b | [
"MIT"
] | null | null | null | util/canloader.py | sectioncritical/atmega_can_bootloader | a703b6902fee0f3e931adc77b0470a8238906a0b | [
"MIT"
] | 3 | 2021-12-05T22:59:08.000Z | 2021-12-13T22:03:40.000Z | util/canloader.py | sectioncritical/atmega_can_bootloader | a703b6902fee0f3e931adc77b0470a8238906a0b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# SPDX-License-Identifier: MIT
#
# MIT License
#
# Copyright (c) 2021 Joseph Kroesche
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import argparse
import can
from intelhex import IntelHex
_can_rate = 250000
# CRC16 implementation that matches the C version in the boot loader
def crc16_update(crc, val):
crc ^= val
for i in range(8):
if (crc & 1):
crc = (crc >> 1) ^ 0xA001
else:
crc = (crc >> 1)
return crc & 0xFFFF;
def build_arbid(boardid, cmdid):
return 0x1b007100 + (boardid << 4) + cmdid
# scan for any board running the CAN boot loader
def scan():
arbid = build_arbid(0, 0) # initial arb id, cmd=0/PING, boardid=0
bus = can.interface.Bus(bustype="socketcan", channel="can0", bitrate=_can_rate)
msg = can.Message(arbitration_id=arbid, is_extended_id=True, data=[])
print("Scanning for CAN boot loaders")
for boardid in range(16):
print(f"{boardid:02d} ... ", end="")
msg.arbitration_id = build_arbid(boardid=boardid, cmdid=0)
# send the ping message to the address
bus.send(msg)
# check for a reply
rxmsg = bus.recv(timeout=0.1)
if rxmsg:
expid = arbid + (boardid << 4) + 5 # expected REPORT arbid
# check for proper PONG response
if ((rxmsg.arbitration_id == expid)
and (rxmsg.dlc == 8)
and (rxmsg.data[4] == 0)):
print("OK")
else:
print("err")
print(rxmsg)
# no response
else:
print()
# send a PING to a specified boardid
# pretty print the reply information such as boot laoder version
def ping(boardid):
arbid = build_arbid(boardid=boardid, cmdid=0) # PING
bus = can.interface.Bus(bustype="socketcan", channel="can0", bitrate=_can_rate)
msg = can.Message(arbitration_id=arbid, is_extended_id=True, data=[])
bus.send(msg)
rxmsg = bus.recv(timeout=0.1)
if rxmsg:
rxcmd = rxmsg.arbitration_id & 0x0F
if rxcmd != 5:
print(f"unexpected message was not a REPORT ({rxcmd})")
return
payload = rxmsg.data
verstr = f"{payload[0]}.{payload[1]}.{payload[2]}"
statstr = "OK" if (payload[3] == 1) else "Err"
print(f"Board ID: {boardid}")
print(f"Version: {verstr}")
print(f"Status: {statstr}")
print(f"payload 5/6: {payload[5]:02X} {payload[6]:02X}")
else:
print("No reply")
# get CAN message and verify it is a REPORT
# if so, return the message payload
# else return None
def get_report(canbus):
msg = canbus.recv(timeout=0.1)
if msg:
rxcmd = msg.arbitration_id & 0x0F
if rxcmd == 5:
return msg.data
return None
# upload the hex file filename, to the specified boardid
# using the CAN protocol
def load(boardid, filename):
# load the hex file
ih = IntelHex(filename)
# make sure is just one segment and it starts at 0
segs = ih.segments()
if len(segs) != 1:
print("ERR: more than one segment in hex file")
return
seg = segs[0]
imgaddr = seg[0]
imglen = seg[1]
if imgaddr != 0:
print("ERR: image segment does not start at address 0")
return
print(f"original image length: {imglen}")
# pad out to multiple of 8 bytes length
padlen = 8 - (imglen % 8)
print(f"padlen: {padlen}")
if padlen != 0:
for idx in range(imglen, imglen+padlen):
ih[idx] = 0
imglen = len(ih) # new image length
print(f"new image len: {imglen}")
bus = can.interface.Bus(bustype="socketcan", channel="can0", birate=_can_rate)
# send start command
arbid = build_arbid(boardid=boardid, cmdid=2)
msg = can.Message(arbitration_id=arbid, is_extended_id=True,
data=[imglen & 0xFF, (imglen >> 8) & 0xFF])
bus.send(msg)
# verify READY report
rpt = get_report(bus)
if rpt is None or rpt[4] != 1:
print("ERR: did not recieve READY after START")
print("report:", rpt)
return
# iterate over image in 8 byte chunks
loadcrc = 0
for idx in range(0, imglen, 8):
print(f"{idx:04X}: ")
# create a DATA message
arbid = build_arbid(boardid=boardid, cmdid=3)
payload = ih.tobinarray(start=idx, size=8)
for val in payload:
loadcrc = crc16_update(loadcrc, val)
msg = can.Message(arbitration_id=arbid, is_extended_id=True,
data=payload)
bus.send(msg)
rpt = get_report(bus)
rptype = 2 if (imglen - idx) == 8 else 1
if rpt is None or rpt[4] != rptype:
print("ERR: did not recieve READY after DATA")
print("report:", rpt)
return
# send STOP command
arbid = build_arbid(boardid=boardid, cmdid=4)
msg = can.Message(arbitration_id=arbid, is_extended_id=True,
data=[loadcrc & 0xFF, (loadcrc >> 8) & 0xFF])
bus.send(msg)
rpt = get_report(bus)
if rpt is None or rpt[4] != 3:
print("ERR: did not recieve DONE after STOP")
print("report:", rpt)
return
if rpt[5] != 1:
print("ERR: target indicates load error")
return
print("Load complete with success indication from target")
print(f"len={imglen:04X} crc={loadcrc:04X}")
# command line interface
def cli():
global _can_rate
parser = argparse.ArgumentParser(description="CAN Firmware Loader")
parser.add_argument('-v', "--verbose", action="store_true",
help="turn on some debug output")
parser.add_argument('-r', "--rate", type=int, default=_can_rate,
help=f"CAN data rate ({_can_rate})")
parser.add_argument('-f', "--file", help="file to upload")
parser.add_argument('-b', "--board", type=int, help="board ID of target")
parser.add_argument("command", help="loader command (ping, scan, load)")
args = parser.parse_args()
if args.rate:
_can_rate = args.rate
if args.command == "scan":
scan()
elif args.command == "ping":
if args.board is None:
print("ping must specify --board")
else:
ping(args.board)
elif args.command == "load":
if args.board is None:
print("load must specify --board")
elif args.file is None:
print("load must specify --file")
else:
load(args.board, args.file)
else:
print("unknown command")
if __name__ == "__main__":
cli()
| 32.394068 | 83 | 0.614388 |
import argparse
import can
from intelhex import IntelHex
_can_rate = 250000
def crc16_update(crc, val):
crc ^= val
for i in range(8):
if (crc & 1):
crc = (crc >> 1) ^ 0xA001
else:
crc = (crc >> 1)
return crc & 0xFFFF;
def build_arbid(boardid, cmdid):
return 0x1b007100 + (boardid << 4) + cmdid
def scan():
arbid = build_arbid(0, 0)
bus = can.interface.Bus(bustype="socketcan", channel="can0", bitrate=_can_rate)
msg = can.Message(arbitration_id=arbid, is_extended_id=True, data=[])
print("Scanning for CAN boot loaders")
for boardid in range(16):
print(f"{boardid:02d} ... ", end="")
msg.arbitration_id = build_arbid(boardid=boardid, cmdid=0)
bus.send(msg)
rxmsg = bus.recv(timeout=0.1)
if rxmsg:
expid = arbid + (boardid << 4) + 5
if ((rxmsg.arbitration_id == expid)
and (rxmsg.dlc == 8)
and (rxmsg.data[4] == 0)):
print("OK")
else:
print("err")
print(rxmsg)
else:
print()
def ping(boardid):
arbid = build_arbid(boardid=boardid, cmdid=0)
bus = can.interface.Bus(bustype="socketcan", channel="can0", bitrate=_can_rate)
msg = can.Message(arbitration_id=arbid, is_extended_id=True, data=[])
bus.send(msg)
rxmsg = bus.recv(timeout=0.1)
if rxmsg:
rxcmd = rxmsg.arbitration_id & 0x0F
if rxcmd != 5:
print(f"unexpected message was not a REPORT ({rxcmd})")
return
payload = rxmsg.data
verstr = f"{payload[0]}.{payload[1]}.{payload[2]}"
statstr = "OK" if (payload[3] == 1) else "Err"
print(f"Board ID: {boardid}")
print(f"Version: {verstr}")
print(f"Status: {statstr}")
print(f"payload 5/6: {payload[5]:02X} {payload[6]:02X}")
else:
print("No reply")
def get_report(canbus):
msg = canbus.recv(timeout=0.1)
if msg:
rxcmd = msg.arbitration_id & 0x0F
if rxcmd == 5:
return msg.data
return None
def load(boardid, filename):
ih = IntelHex(filename)
segs = ih.segments()
if len(segs) != 1:
print("ERR: more than one segment in hex file")
return
seg = segs[0]
imgaddr = seg[0]
imglen = seg[1]
if imgaddr != 0:
print("ERR: image segment does not start at address 0")
return
print(f"original image length: {imglen}")
padlen = 8 - (imglen % 8)
print(f"padlen: {padlen}")
if padlen != 0:
for idx in range(imglen, imglen+padlen):
ih[idx] = 0
imglen = len(ih)
print(f"new image len: {imglen}")
bus = can.interface.Bus(bustype="socketcan", channel="can0", birate=_can_rate)
arbid = build_arbid(boardid=boardid, cmdid=2)
msg = can.Message(arbitration_id=arbid, is_extended_id=True,
data=[imglen & 0xFF, (imglen >> 8) & 0xFF])
bus.send(msg)
rpt = get_report(bus)
if rpt is None or rpt[4] != 1:
print("ERR: did not recieve READY after START")
print("report:", rpt)
return
loadcrc = 0
for idx in range(0, imglen, 8):
print(f"{idx:04X}: ")
arbid = build_arbid(boardid=boardid, cmdid=3)
payload = ih.tobinarray(start=idx, size=8)
for val in payload:
loadcrc = crc16_update(loadcrc, val)
msg = can.Message(arbitration_id=arbid, is_extended_id=True,
data=payload)
bus.send(msg)
rpt = get_report(bus)
rptype = 2 if (imglen - idx) == 8 else 1
if rpt is None or rpt[4] != rptype:
print("ERR: did not recieve READY after DATA")
print("report:", rpt)
return
arbid = build_arbid(boardid=boardid, cmdid=4)
msg = can.Message(arbitration_id=arbid, is_extended_id=True,
data=[loadcrc & 0xFF, (loadcrc >> 8) & 0xFF])
bus.send(msg)
rpt = get_report(bus)
if rpt is None or rpt[4] != 3:
print("ERR: did not recieve DONE after STOP")
print("report:", rpt)
return
if rpt[5] != 1:
print("ERR: target indicates load error")
return
print("Load complete with success indication from target")
print(f"len={imglen:04X} crc={loadcrc:04X}")
def cli():
global _can_rate
parser = argparse.ArgumentParser(description="CAN Firmware Loader")
parser.add_argument('-v', "--verbose", action="store_true",
help="turn on some debug output")
parser.add_argument('-r', "--rate", type=int, default=_can_rate,
help=f"CAN data rate ({_can_rate})")
parser.add_argument('-f', "--file", help="file to upload")
parser.add_argument('-b', "--board", type=int, help="board ID of target")
parser.add_argument("command", help="loader command (ping, scan, load)")
args = parser.parse_args()
if args.rate:
_can_rate = args.rate
if args.command == "scan":
scan()
elif args.command == "ping":
if args.board is None:
print("ping must specify --board")
else:
ping(args.board)
elif args.command == "load":
if args.board is None:
print("load must specify --board")
elif args.file is None:
print("load must specify --file")
else:
load(args.board, args.file)
else:
print("unknown command")
if __name__ == "__main__":
cli()
| true | true |
f7f84be4fc0dd3bed67149225c190a3988322e84 | 345 | py | Python | pangea/core/utils.py | LongTailBio/pangea-django | 630551dded7f9e38f95eda8c36039e0de46961e7 | [
"MIT"
] | null | null | null | pangea/core/utils.py | LongTailBio/pangea-django | 630551dded7f9e38f95eda8c36039e0de46961e7 | [
"MIT"
] | 27 | 2020-03-26T02:55:12.000Z | 2022-03-12T00:55:04.000Z | pangea/core/utils.py | LongTailBio/pangea-django | 630551dded7f9e38f95eda8c36039e0de46961e7 | [
"MIT"
] | 1 | 2021-09-14T08:15:54.000Z | 2021-09-14T08:15:54.000Z |
import random
def str2bool(v):
"""Parse boolean value from string."""
return str(v).lower() in ("true", "t", "1")
def random_replicate_name(len=12):
"""Return a random alphanumeric string of length `len`."""
out = random.choices('abcdefghijklmnopqrtuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ0123456789', k=len)
return ''.join(out)
| 24.642857 | 95 | 0.689855 |
import random
def str2bool(v):
return str(v).lower() in ("true", "t", "1")
def random_replicate_name(len=12):
out = random.choices('abcdefghijklmnopqrtuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ0123456789', k=len)
return ''.join(out)
| true | true |
f7f84e60e9987fe1920493977cfaea5115d193c2 | 5,432 | py | Python | src/config/template.py | sanghyun-son/srwarp | d7cc08db5ba5ec9103f1813f76d1da825afe1a5b | [
"MIT"
] | 82 | 2021-04-22T09:22:46.000Z | 2022-03-30T03:06:47.000Z | src/config/template.py | sanghyun-son/srwarp | d7cc08db5ba5ec9103f1813f76d1da825afe1a5b | [
"MIT"
] | 3 | 2021-08-04T15:40:52.000Z | 2022-02-21T10:10:52.000Z | src/config/template.py | sanghyun-son/srwarp | d7cc08db5ba5ec9103f1813f76d1da825afe1a5b | [
"MIT"
] | 11 | 2021-05-16T14:54:33.000Z | 2022-02-18T08:25:37.000Z | def set_template(template, cfg):
if 'srwarp-all' in template:
cfg.model = 'srwarp.baseline'
cfg.residual = True
cfg.kernel_size_up = 3
cfg.kernel_net = True
cfg.kernel_net_multi = True
cfg.kernel_depthwise = True
if 'down' in template:
cfg.scale = 2
cfg.patch = 64
cfg.dtrain = ['downsampler.unpaired']
cfg.augmentation = ''
cfg.unpaired_hr = '../dataset/DIV2K/patch_gradient/crop_filtered_odd'
if 'video' in template:
cfg.unpaired_lr = '../experiment/jpeg/q20_test/patch_gradient/crop_seoul_moon_filtered'
else:
cfg.unpaired_lr = '../dataset/DIV2K/patch_gradient_gaussian/crop_lr_{}_filtered_even'.format(cfg.degradation_test)
cfg.kernel_gt = '../dataset/DIV2K/kernel_{}.mat'.format(cfg.degradation_test)
cfg.n_feats = 48
cfg.depth = 4
cfg.batch_size = 16
if 'self' in template:
cfg.loss = 'loss/ds_self.txt'
cfg.trainer = 'downsampler.self'
cfg.save = 'kernels_self'
cfg.epochs = 20
cfg.milestones = [5, 10, 15]
else:
cfg.loss = 'loss/ds_iterative.txt'
cfg.trainer = 'downsampler.iterative'
cfg.save = 'kernels_16'
cfg.epochs = 30
cfg.milestones = [10, 15, 20]
if 'jaeha' in template:
cfg.model = 'jaeha.generator'
cfg.dis = 'jaeha.discriminator'
cfg.loss = 'loss/ds_jaeha.txt'
cfg.trainer = 'jaeha.unpaired'
cfg.lr = 5e-5
cfg.save = 'kernels_jaeha'
cfg.epochs = 80
cfg.milestones = [987654321]
else:
cfg.model = 'downsampler.dnew'
cfg.dis = 'downsampler.discriminator_kgan'
cfg.depth_sub = 7
cfg.width_sub = 64
cfg.adjust_weight = 0.010
cfg.reset = True
'''
if 'ft' in template:
if 'face' in template:
cfg.scale = 8
cfg.dtrain = ['sr.celeba_mask']
cfg.dtest = ['sr.celeba_mask']
cfg.n_classes = 10
else:
cfg.scale = 4
cfg.dtrain = ['sr.mixed']
cfg.dtest = ['sr.mixed']
cfg.n_classes = 8
if 'rrdb' in template:
cfg.model = 'sr.rrdb'
if not cfg.resume:
if 'face' in template:
cfg.resume = 'dl-edsr-baseline-face-x8'
else:
if 'rrdb' in template:
cfg.resume = 'dl-rrdb-x4'
else:
cfg.resume = 'dl-edsr-baseline-x4'
if 'mixed' in template:
cfg.use_div2k = True
cfg.use_ost = True
cfg.use_flickr = False
if 'div2k' in template:
cfg.use_div2k = True
if 'ost' in template:
cfg.use_ost = True
if 'df2k' in template:
cfg.use_div2k = True
cfg.use_flickr = True
if 'all' in template:
cfg.use_div2k = True
cfg.use_flickr = True
cfg.use_ost = True
cfg.lr = 1e-4
cfg.gan_k = 0
if cfg.use_patch and cfg.use_div2k:
if cfg.use_flickr:
cfg.epochs = 28
cfg.milestones = [4, 7, 14, 21]
else:
if cfg.use_ost:
cfg.epochs = 95
cfg.milestones = [12, 24, 48, 72]
else:
cfg.epochs = 112
cfg.milestones = [14, 28, 56, 84]
else:
cfg.epochs = 200
cfg.milestones = [25, 50, 100, 150]
if 'face' in template:
cfg.epochs //= 2
cfg.milestones = [d // 2 for d in cfg.milestones]
if 'more' in template:
cfg.epochs = int(1.5 * cfg.epochs)
cfg.milestones = [int(1.5 * d) for d in cfg.milestones]
if 'madv' in template:
cfg.loss = 'loss/sr_mask.txt'
cfg.trainer = 'sr.mask'
if 'old' in template:
cfg.dis = 'mask.discriminator_old'
elif 'early' in template:
cfg.dis = 'mask.discriminator'
# Mask is applied at the end of classification layer,
# so the scale doesn't change. Use early_stop to modify the model
cfg.dis_early_fork = 1
cfg.mask_scale = 16
# Override
if 'early1' in template:
cfg.dis_early_fork = 1
cfg.mask_scale = 16
elif 'early2' in template:
cfg.dis_early_fork = 2
cfg.mask_scale = 16
elif 'seg' in template:
cfg.dis = 'mask.discriminator_seg'
cfg.dis_seg_model = 'segmentation/model.pt'
cfg.dis_seg_n_feat = 32
cfg.mask_scale = 16
# Override
if 'segd' in template:
cfg.dis = 'mask.discriminator_segdeep'
# TODO type other segmentation network arguments here
else:
# Default
cfg.dis = 'mask.discriminator'
else:
cfg.no_mask = True
cfg.loss = 'loss/sr_adversarial.txt'
cfg.dis = 'srgan.discriminator'
cfg.dpatch = 0
'''
| 34.379747 | 126 | 0.495766 | def set_template(template, cfg):
if 'srwarp-all' in template:
cfg.model = 'srwarp.baseline'
cfg.residual = True
cfg.kernel_size_up = 3
cfg.kernel_net = True
cfg.kernel_net_multi = True
cfg.kernel_depthwise = True
if 'down' in template:
cfg.scale = 2
cfg.patch = 64
cfg.dtrain = ['downsampler.unpaired']
cfg.augmentation = ''
cfg.unpaired_hr = '../dataset/DIV2K/patch_gradient/crop_filtered_odd'
if 'video' in template:
cfg.unpaired_lr = '../experiment/jpeg/q20_test/patch_gradient/crop_seoul_moon_filtered'
else:
cfg.unpaired_lr = '../dataset/DIV2K/patch_gradient_gaussian/crop_lr_{}_filtered_even'.format(cfg.degradation_test)
cfg.kernel_gt = '../dataset/DIV2K/kernel_{}.mat'.format(cfg.degradation_test)
cfg.n_feats = 48
cfg.depth = 4
cfg.batch_size = 16
if 'self' in template:
cfg.loss = 'loss/ds_self.txt'
cfg.trainer = 'downsampler.self'
cfg.save = 'kernels_self'
cfg.epochs = 20
cfg.milestones = [5, 10, 15]
else:
cfg.loss = 'loss/ds_iterative.txt'
cfg.trainer = 'downsampler.iterative'
cfg.save = 'kernels_16'
cfg.epochs = 30
cfg.milestones = [10, 15, 20]
if 'jaeha' in template:
cfg.model = 'jaeha.generator'
cfg.dis = 'jaeha.discriminator'
cfg.loss = 'loss/ds_jaeha.txt'
cfg.trainer = 'jaeha.unpaired'
cfg.lr = 5e-5
cfg.save = 'kernels_jaeha'
cfg.epochs = 80
cfg.milestones = [987654321]
else:
cfg.model = 'downsampler.dnew'
cfg.dis = 'downsampler.discriminator_kgan'
cfg.depth_sub = 7
cfg.width_sub = 64
cfg.adjust_weight = 0.010
cfg.reset = True
| true | true |
f7f84f2117caef9b1634023fc97912f62a96cbc8 | 4,729 | py | Python | src/TestBed.py | lallulli/web2help | 71e28f88463286c4a18ad056312ba1d5039b00a3 | [
"BSD-3-Clause"
] | null | null | null | src/TestBed.py | lallulli/web2help | 71e28f88463286c4a18ad056312ba1d5039b00a3 | [
"BSD-3-Clause"
] | null | null | null | src/TestBed.py | lallulli/web2help | 71e28f88463286c4a18ad056312ba1d5039b00a3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Aug 25 2009)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
from wx.stc import *
from PythonSTC import *
###########################################################################
## Class TestBed
###########################################################################
class TestBed ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"web2help testbed", pos = wx.DefaultPosition, size = wx.Size( 433,438 ), style = wx.CAPTION|wx.CLOSE_BOX|wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX|wx.RESIZE_BORDER )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
bSizer8 = wx.BoxSizer( wx.VERTICAL )
self.m_splitter1 = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_BORDER )
self.m_splitter1.Bind( wx.EVT_IDLE, self.m_splitter1OnIdle )
self.pannelloSuperiore = wx.Panel( self.m_splitter1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SUNKEN_BORDER|wx.TAB_TRAVERSAL )
bSizer13 = wx.BoxSizer( wx.VERTICAL )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText6 = wx.StaticText( self.pannelloSuperiore, wx.ID_ANY, u"URL", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
bSizer11.Add( self.m_staticText6, 0, wx.ALL, 5 )
self.url = wx.TextCtrl( self.pannelloSuperiore, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.url, 1, wx.ALL, 5 )
self.load = wx.Button( self.pannelloSuperiore, wx.ID_ANY, u"Load", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.load, 0, wx.ALL, 5 )
self.parse = wx.Button( self.pannelloSuperiore, wx.ID_ANY, u"Parse", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.parse, 0, wx.ALL, 5 )
bSizer13.Add( bSizer11, 0, wx.EXPAND, 5 )
self.html =StyledTextCtrl(self.pannelloSuperiore, wx.ID_ANY)
bSizer13.Add( self.html, 1, wx.ALL|wx.EXPAND, 5 )
self.pannelloSuperiore.SetSizer( bSizer13 )
self.pannelloSuperiore.Layout()
bSizer13.Fit( self.pannelloSuperiore )
self.pannelloInferiore = wx.Panel( self.m_splitter1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SUNKEN_BORDER|wx.TAB_TRAVERSAL )
bSizer14 = wx.BoxSizer( wx.VERTICAL )
self.code = PythonSTC(self.pannelloInferiore, wx.ID_ANY)
bSizer14.Add( self.code, 1, wx.ALL|wx.EXPAND, 5 )
bSizer15 = wx.BoxSizer( wx.HORIZONTAL )
self.title = wx.RadioButton( self.pannelloInferiore, wx.ID_ANY, u"Title", wx.DefaultPosition, wx.DefaultSize, 0 )
self.title.SetValue( True )
bSizer15.Add( self.title, 0, wx.ALL, 5 )
self.content = wx.RadioButton( self.pannelloInferiore, wx.ID_ANY, u"Content", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer15.Add( self.content, 0, wx.ALL, 5 )
bSizer14.Add( bSizer15, 0, wx.EXPAND, 5 )
bSizer16 = wx.BoxSizer( wx.HORIZONTAL )
m_sdbSizer1 = wx.StdDialogButtonSizer()
self.m_sdbSizer1OK = wx.Button( self.pannelloInferiore, wx.ID_OK )
m_sdbSizer1.AddButton( self.m_sdbSizer1OK )
self.m_sdbSizer1Cancel = wx.Button( self.pannelloInferiore, wx.ID_CANCEL )
m_sdbSizer1.AddButton( self.m_sdbSizer1Cancel )
m_sdbSizer1.Realize();
bSizer16.Add( m_sdbSizer1, 1, wx.ALL|wx.BOTTOM|wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, 5 )
bSizer14.Add( bSizer16, 0, wx.EXPAND, 5 )
self.pannelloInferiore.SetSizer( bSizer14 )
self.pannelloInferiore.Layout()
bSizer14.Fit( self.pannelloInferiore )
self.m_splitter1.SplitHorizontally( self.pannelloSuperiore, self.pannelloInferiore, 0 )
bSizer8.Add( self.m_splitter1, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer8 )
self.Layout()
# Connect Events
self.load.Bind( wx.EVT_BUTTON, self.OnLoad )
self.parse.Bind( wx.EVT_BUTTON, self.OnParse )
self.title.Bind( wx.EVT_RADIOBUTTON, self.OnTitle )
self.content.Bind( wx.EVT_RADIOBUTTON, self.OnContent )
self.m_sdbSizer1OK.Bind( wx.EVT_BUTTON, self.OnOk )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnLoad( self, event ):
event.Skip()
def OnParse( self, event ):
event.Skip()
def OnTitle( self, event ):
event.Skip()
def OnContent( self, event ):
event.Skip()
def OnOk( self, event ):
event.Skip()
def m_splitter1OnIdle( self, event ):
self.m_splitter1.SetSashPosition( 0 )
self.m_splitter1.Unbind( wx.EVT_IDLE )
| 36.945313 | 243 | 0.654049 | true | true | |
f7f850561c5eacba5840dc7d0b77f271aff3db51 | 6,742 | py | Python | src/champ_teleop/champ_teleop.py | chinaheyu/pydog_ws | c5ff65647845e6c96901fb925a2357507dfeecf0 | [
"MIT"
] | 5 | 2021-10-31T16:18:59.000Z | 2022-01-16T17:03:52.000Z | src/champ_teleop/champ_teleop.py | chinaheyu/pydog_ws | c5ff65647845e6c96901fb925a2357507dfeecf0 | [
"MIT"
] | 4 | 2021-09-11T06:32:48.000Z | 2021-09-16T19:24:16.000Z | src/champ_teleop/champ_teleop.py | chinaheyu/pydog_ws | c5ff65647845e6c96901fb925a2357507dfeecf0 | [
"MIT"
] | 1 | 2021-10-09T12:26:12.000Z | 2021-10-09T12:26:12.000Z | #!/usr/bin/env python
#credits to: https://github.com/ros-teleop/teleop_twist_keyboard/blob/master/teleop_twist_keyboard.py
from __future__ import print_function
import roslib; roslib.load_manifest('champ_teleop')
import rospy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from champ_msgs.msg import Pose as PoseLite
from geometry_msgs.msg import Pose as Pose
import tf
import sys, select, termios, tty
import numpy as np
class Teleop:
def __init__(self):
self.velocity_publisher = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
self.pose_lite_publisher = rospy.Publisher('body_pose/raw', PoseLite, queue_size = 1)
self.pose_publisher = rospy.Publisher('body_pose', Pose, queue_size = 1)
self.joy_subscriber = rospy.Subscriber('joy', Joy, self.joy_callback)
self.swing_height = rospy.get_param("gait/swing_height", 0)
self.nominal_height = rospy.get_param("gait/nominal_height", 0)
self.speed = rospy.get_param("~speed", 0.5)
self.turn = rospy.get_param("~turn", 1.0)
self.msg = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving around:
u i o
j k l
m , .
For Holonomic mode (strafing), hold down the shift key:
---------------------------
U I O
J K L
M < >
t : up (+z)
b : down (-z)
anything else : stop
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
CTRL-C to quit
"""
self.velocityBindings = {
'i':(1,0,0,0),
'o':(1,0,0,-1),
'j':(0,0,0,1),
'l':(0,0,0,-1),
'u':(1,0,0,1),
',':(-1,0,0,0),
'.':(-1,0,0,1),
'm':(-1,0,0,-1),
'O':(1,-1,0,0),
'I':(1,0,0,0),
'J':(0,1,0,0),
'L':(0,-1,0,0),
'U':(1,1,0,0),
'<':(-1,0,0,0),
'>':(-1,-1,0,0),
'M':(-1,1,0,0),
'v':(0,0,1,0),
'n':(0,0,-1,0),
}
self.poseBindings = {
'f':(-1,0,0,0),
'h':(1,0,0,0),
't':(0,1,0,0),
'b':(0,-1,0,0),
'r':(0,0,1,0),
'y':(0,0,-1,0),
}
self.speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
self.poll_keys()
def joy_callback(self, data):
twist = Twist()
twist.linear.x = data.axes[1] * self.speed
twist.linear.y = data.buttons[4] * data.axes[0] * self.speed
twist.linear.z = 0.0
twist.angular.x = 0.0
twist.angular.y = 0.0
twist.angular.z = (not data.buttons[4]) * data.axes[0] * self.turn
self.velocity_publisher.publish(twist)
body_pose_lite = PoseLite()
body_pose_lite.x = 0
body_pose_lite.y = 0
body_pose_lite.roll = (not data.buttons[5]) *-data.axes[3] * 0.349066
body_pose_lite.pitch = data.axes[4] * 0.174533
body_pose_lite.yaw = data.buttons[5] * data.axes[3] * 0.436332
if data.axes[5] < 0:
body_pose_lite.z = data.axes[5] * 0.5
self.pose_lite_publisher.publish(body_pose_lite)
body_pose = Pose()
body_pose.position.z = body_pose_lite.z
quaternion = tf.transformations.quaternion_from_euler(body_pose_lite.roll, body_pose_lite.pitch, body_pose_lite.yaw)
body_pose.orientation.x = quaternion[0]
body_pose.orientation.y = quaternion[1]
body_pose.orientation.z = quaternion[2]
body_pose.orientation.w = quaternion[3]
self.pose_publisher.publish(body_pose)
def poll_keys(self):
self.settings = termios.tcgetattr(sys.stdin)
x = 0
y = 0
z = 0
th = 0
roll = 0
pitch = 0
yaw = 0
status = 0
cmd_attempts = 0
try:
print(self.msg)
print(self.vels( self.speed, self.turn))
while not rospy.is_shutdown():
key = self.getKey()
if key in self.velocityBindings.keys():
x = self.velocityBindings[key][0]
y = self.velocityBindings[key][1]
z = self.velocityBindings[key][2]
th = self.velocityBindings[key][3]
if cmd_attempts > 1:
twist = Twist()
twist.linear.x = x *self.speed
twist.linear.y = y * self.speed
twist.linear.z = z * self.speed
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = th * self.turn
self.velocity_publisher.publish(twist)
cmd_attempts += 1
elif key in self.speedBindings.keys():
self.speed = self.speed * self.speedBindings[key][0]
self.turn = self.turn * self.speedBindings[key][1]
print(self.vels(self.speed, self.turn))
if (status == 14):
print(self.msg)
status = (status + 1) % 15
else:
cmd_attempts = 0
if (key == '\x03'):
break
except Exception as e:
print(e)
finally:
twist = Twist()
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0
self.velocity_publisher.publish(twist)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
def getKey(self):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
return key
def vels(self, speed, turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
def map(self, x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
if __name__ == "__main__":
rospy.init_node('champ_teleop')
teleop = Teleop() | 32.413462 | 124 | 0.492139 |
from __future__ import print_function
import roslib; roslib.load_manifest('champ_teleop')
import rospy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from champ_msgs.msg import Pose as PoseLite
from geometry_msgs.msg import Pose as Pose
import tf
import sys, select, termios, tty
import numpy as np
class Teleop:
def __init__(self):
self.velocity_publisher = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
self.pose_lite_publisher = rospy.Publisher('body_pose/raw', PoseLite, queue_size = 1)
self.pose_publisher = rospy.Publisher('body_pose', Pose, queue_size = 1)
self.joy_subscriber = rospy.Subscriber('joy', Joy, self.joy_callback)
self.swing_height = rospy.get_param("gait/swing_height", 0)
self.nominal_height = rospy.get_param("gait/nominal_height", 0)
self.speed = rospy.get_param("~speed", 0.5)
self.turn = rospy.get_param("~turn", 1.0)
self.msg = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving around:
u i o
j k l
m , .
For Holonomic mode (strafing), hold down the shift key:
---------------------------
U I O
J K L
M < >
t : up (+z)
b : down (-z)
anything else : stop
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
CTRL-C to quit
"""
self.velocityBindings = {
'i':(1,0,0,0),
'o':(1,0,0,-1),
'j':(0,0,0,1),
'l':(0,0,0,-1),
'u':(1,0,0,1),
',':(-1,0,0,0),
'.':(-1,0,0,1),
'm':(-1,0,0,-1),
'O':(1,-1,0,0),
'I':(1,0,0,0),
'J':(0,1,0,0),
'L':(0,-1,0,0),
'U':(1,1,0,0),
'<':(-1,0,0,0),
'>':(-1,-1,0,0),
'M':(-1,1,0,0),
'v':(0,0,1,0),
'n':(0,0,-1,0),
}
self.poseBindings = {
'f':(-1,0,0,0),
'h':(1,0,0,0),
't':(0,1,0,0),
'b':(0,-1,0,0),
'r':(0,0,1,0),
'y':(0,0,-1,0),
}
self.speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
self.poll_keys()
def joy_callback(self, data):
twist = Twist()
twist.linear.x = data.axes[1] * self.speed
twist.linear.y = data.buttons[4] * data.axes[0] * self.speed
twist.linear.z = 0.0
twist.angular.x = 0.0
twist.angular.y = 0.0
twist.angular.z = (not data.buttons[4]) * data.axes[0] * self.turn
self.velocity_publisher.publish(twist)
body_pose_lite = PoseLite()
body_pose_lite.x = 0
body_pose_lite.y = 0
body_pose_lite.roll = (not data.buttons[5]) *-data.axes[3] * 0.349066
body_pose_lite.pitch = data.axes[4] * 0.174533
body_pose_lite.yaw = data.buttons[5] * data.axes[3] * 0.436332
if data.axes[5] < 0:
body_pose_lite.z = data.axes[5] * 0.5
self.pose_lite_publisher.publish(body_pose_lite)
body_pose = Pose()
body_pose.position.z = body_pose_lite.z
quaternion = tf.transformations.quaternion_from_euler(body_pose_lite.roll, body_pose_lite.pitch, body_pose_lite.yaw)
body_pose.orientation.x = quaternion[0]
body_pose.orientation.y = quaternion[1]
body_pose.orientation.z = quaternion[2]
body_pose.orientation.w = quaternion[3]
self.pose_publisher.publish(body_pose)
def poll_keys(self):
self.settings = termios.tcgetattr(sys.stdin)
x = 0
y = 0
z = 0
th = 0
roll = 0
pitch = 0
yaw = 0
status = 0
cmd_attempts = 0
try:
print(self.msg)
print(self.vels( self.speed, self.turn))
while not rospy.is_shutdown():
key = self.getKey()
if key in self.velocityBindings.keys():
x = self.velocityBindings[key][0]
y = self.velocityBindings[key][1]
z = self.velocityBindings[key][2]
th = self.velocityBindings[key][3]
if cmd_attempts > 1:
twist = Twist()
twist.linear.x = x *self.speed
twist.linear.y = y * self.speed
twist.linear.z = z * self.speed
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = th * self.turn
self.velocity_publisher.publish(twist)
cmd_attempts += 1
elif key in self.speedBindings.keys():
self.speed = self.speed * self.speedBindings[key][0]
self.turn = self.turn * self.speedBindings[key][1]
print(self.vels(self.speed, self.turn))
if (status == 14):
print(self.msg)
status = (status + 1) % 15
else:
cmd_attempts = 0
if (key == '\x03'):
break
except Exception as e:
print(e)
finally:
twist = Twist()
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0
self.velocity_publisher.publish(twist)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
def getKey(self):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
return key
def vels(self, speed, turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
def map(self, x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
if __name__ == "__main__":
rospy.init_node('champ_teleop')
teleop = Teleop() | true | true |
f7f85101ff74d9e3c076f3d627ca34d67aa84a0e | 3,545 | py | Python | happy_python/happy_config.py | geekcampchina/happy-python | b421d29952bf76158375353b896dfb5eb814b948 | [
"MIT"
] | 1 | 2020-10-14T11:10:02.000Z | 2020-10-14T11:10:02.000Z | happy_python/happy_config.py | geekcampchina/happy-python | b421d29952bf76158375353b896dfb5eb814b948 | [
"MIT"
] | null | null | null | happy_python/happy_config.py | geekcampchina/happy-python | b421d29952bf76158375353b896dfb5eb814b948 | [
"MIT"
] | 1 | 2019-11-25T07:59:26.000Z | 2019-11-25T07:59:26.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
配置文件(INI)转换类
"""
import os
from abc import ABCMeta
from happy_python import HappyPyException
class HappyConfigBase(object, metaclass=ABCMeta):
_section = 'main'
def __init__(self):
pass
@property
def section(self):
"""
使用 property + setter 注解防止用户指定无效值
:return:
"""
return self._section
@section.setter
def section(self, value):
if value:
self._section = value
else:
raise ValueError("指定的 section 属性值无效。")
class HappyConfigParser(object):
@staticmethod
def load(filename: str, happy_config_object: HappyConfigBase):
if not isinstance(happy_config_object, HappyConfigBase):
raise HappyPyException('happy_config_object 不是 HappyConfigBase 类的子类对象。')
try:
if not os.path.exists(filename):
print("[Error] 配置文件 %s 不存在" % filename)
exit(1)
with open(filename, 'r', encoding='UTF-8') as f:
content = f.readlines()
HappyConfigParser._loads(''.join(content), happy_config_object)
except Exception as e:
print("[Error] 配置文件读取错误:%s" % str(e))
exit(1)
@staticmethod
def _loads(content: str, happy_config_object: HappyConfigBase):
from configparser import ConfigParser
if not isinstance(happy_config_object, HappyConfigBase):
raise HappyPyException('happy_config_object 不是 HappyConfigBase 类的子类对象。')
try:
cfg = ConfigParser()
cfg.read_string(content)
class_attrs = happy_config_object.__dict__
section = happy_config_object.section
for name, value in class_attrs.items():
if name == '_section':
continue
t = type(value)
if t is str:
v = cfg.get(section, name)
exec("happy_config_object.%s='%s'" % (name, v))
elif t is int:
v = cfg.getint(section, name)
exec("happy_config_object.%s=%d" % (name, v))
elif t is bool:
v = cfg.getboolean(section, name)
exec("happy_config_object.%s=%s" % (name, v))
elif t is float:
v = cfg.getfloat(section, name)
exec("happy_config_object.%s=%f" % (name, v))
elif t is list:
v = cfg.get(section, name).split(',')
exec("happy_config_object.%s=%s" % (name, v))
else:
v = cfg.getboolean(section, name)
exec("happy_config_object.%s=%s" % (name, v))
except Exception as e:
print("[Error] 配置文件读取错误:%s" % str(e))
exit(1)
@staticmethod
def load_with_var(filename: str, var_dict: dict, happy_config_object: HappyConfigBase):
try:
if not os.path.exists(filename):
print("[Error] 配置文件 %s 不存在" % filename)
exit(1)
with open(filename, 'r', encoding='UTF-8') as f:
content = ''.join(f.readlines())
for var, value in var_dict.items():
content = content.replace('${%s}' % var, value)
HappyConfigParser._loads(content, happy_config_object)
except Exception as e:
print("[Error] 配置文件读取错误:%s" % str(e))
exit(1)
| 31.371681 | 91 | 0.53512 |
import os
from abc import ABCMeta
from happy_python import HappyPyException
class HappyConfigBase(object, metaclass=ABCMeta):
_section = 'main'
def __init__(self):
pass
@property
def section(self):
return self._section
@section.setter
def section(self, value):
if value:
self._section = value
else:
raise ValueError("指定的 section 属性值无效。")
class HappyConfigParser(object):
@staticmethod
def load(filename: str, happy_config_object: HappyConfigBase):
if not isinstance(happy_config_object, HappyConfigBase):
raise HappyPyException('happy_config_object 不是 HappyConfigBase 类的子类对象。')
try:
if not os.path.exists(filename):
print("[Error] 配置文件 %s 不存在" % filename)
exit(1)
with open(filename, 'r', encoding='UTF-8') as f:
content = f.readlines()
HappyConfigParser._loads(''.join(content), happy_config_object)
except Exception as e:
print("[Error] 配置文件读取错误:%s" % str(e))
exit(1)
@staticmethod
def _loads(content: str, happy_config_object: HappyConfigBase):
from configparser import ConfigParser
if not isinstance(happy_config_object, HappyConfigBase):
raise HappyPyException('happy_config_object 不是 HappyConfigBase 类的子类对象。')
try:
cfg = ConfigParser()
cfg.read_string(content)
class_attrs = happy_config_object.__dict__
section = happy_config_object.section
for name, value in class_attrs.items():
if name == '_section':
continue
t = type(value)
if t is str:
v = cfg.get(section, name)
exec("happy_config_object.%s='%s'" % (name, v))
elif t is int:
v = cfg.getint(section, name)
exec("happy_config_object.%s=%d" % (name, v))
elif t is bool:
v = cfg.getboolean(section, name)
exec("happy_config_object.%s=%s" % (name, v))
elif t is float:
v = cfg.getfloat(section, name)
exec("happy_config_object.%s=%f" % (name, v))
elif t is list:
v = cfg.get(section, name).split(',')
exec("happy_config_object.%s=%s" % (name, v))
else:
v = cfg.getboolean(section, name)
exec("happy_config_object.%s=%s" % (name, v))
except Exception as e:
print("[Error] 配置文件读取错误:%s" % str(e))
exit(1)
@staticmethod
def load_with_var(filename: str, var_dict: dict, happy_config_object: HappyConfigBase):
try:
if not os.path.exists(filename):
print("[Error] 配置文件 %s 不存在" % filename)
exit(1)
with open(filename, 'r', encoding='UTF-8') as f:
content = ''.join(f.readlines())
for var, value in var_dict.items():
content = content.replace('${%s}' % var, value)
HappyConfigParser._loads(content, happy_config_object)
except Exception as e:
print("[Error] 配置文件读取错误:%s" % str(e))
exit(1)
| true | true |
f7f85173a3f091da6309b3c9506444c9517430f5 | 596 | py | Python | examples/non_reliability.py | CharleeSF/brian2 | d2be1ed33a8ac51b1891f89a2544123a937c43ff | [
"BSD-2-Clause"
] | 2 | 2020-03-20T13:30:19.000Z | 2020-03-20T13:30:57.000Z | examples/non_reliability.py | CharleeSF/brian2 | d2be1ed33a8ac51b1891f89a2544123a937c43ff | [
"BSD-2-Clause"
] | null | null | null | examples/non_reliability.py | CharleeSF/brian2 | d2be1ed33a8ac51b1891f89a2544123a937c43ff | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Reliability of spike timing.
See e.g. Mainen & Sejnowski (1995) for experimental results in vitro.
Here: a constant current is injected in all trials.
'''
from brian2 import *
N = 25
tau = 20*ms
sigma = .015
eqs_neurons = '''
dx/dt = (1.1 - x) / tau + sigma * (2 / tau)**.5 * xi : 1 (unless refractory)
'''
neurons = NeuronGroup(N, model=eqs_neurons, threshold='x > 1', reset='x = 0',
refractory=5*ms, method='euler')
spikes = SpikeMonitor(neurons)
run(500*ms)
plot(spikes.t/ms, spikes.i, '.k')
xlabel('Time (ms)')
ylabel('Neuron index')
show()
| 23.84 | 77 | 0.639262 |
from brian2 import *
N = 25
tau = 20*ms
sigma = .015
eqs_neurons = '''
dx/dt = (1.1 - x) / tau + sigma * (2 / tau)**.5 * xi : 1 (unless refractory)
'''
neurons = NeuronGroup(N, model=eqs_neurons, threshold='x > 1', reset='x = 0',
refractory=5*ms, method='euler')
spikes = SpikeMonitor(neurons)
run(500*ms)
plot(spikes.t/ms, spikes.i, '.k')
xlabel('Time (ms)')
ylabel('Neuron index')
show()
| true | true |
f7f8525d5a1f2e2461e06c67ac30feb7935b1125 | 385 | py | Python | RRMS/RRMS/asgi.py | starwriter34/Rehome-Rescue-Management-Software | 44361617ce35deb32bddeb980b01c463fd5e35e3 | [
"MIT"
] | null | null | null | RRMS/RRMS/asgi.py | starwriter34/Rehome-Rescue-Management-Software | 44361617ce35deb32bddeb980b01c463fd5e35e3 | [
"MIT"
] | 5 | 2021-03-19T02:38:53.000Z | 2021-06-10T19:04:18.000Z | RRMS/RRMS/asgi.py | starwriter34/Rehome-Rescue-Management-Software | 44361617ce35deb32bddeb980b01c463fd5e35e3 | [
"MIT"
] | null | null | null | """
ASGI config for RRMS project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RRMS.settings')
application = get_asgi_application()
| 22.647059 | 78 | 0.781818 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RRMS.settings')
application = get_asgi_application()
| true | true |
f7f853bd6ec6ab92099e3f5f52da08f9c786ec1c | 729 | py | Python | examples/python/example_angular.py | gony0/n2 | 824981473b3c499323a1f677c78fdc246a065a2a | [
"Apache-2.0"
] | 7 | 2019-04-17T07:21:01.000Z | 2019-10-08T07:31:22.000Z | examples/python/example_angular.py | javaCJ/HNSW- | a6f97d3cf189f2de4f1de4a88f69c4a43fb629ac | [
"Apache-2.0"
] | 2 | 2019-06-25T10:00:57.000Z | 2019-10-26T14:55:23.000Z | examples/python/example_angular.py | javaCJ/HNSW- | a6f97d3cf189f2de4f1de4a88f69c4a43fb629ac | [
"Apache-2.0"
] | 1 | 2021-11-03T14:59:27.000Z | 2021-11-03T14:59:27.000Z | from n2 import HnswIndex
import random
f = 3
t = HnswIndex(f) # HnswIndex(f, "L2 or angular")
for i in xrange(1000):
v = [random.gauss(0, 1) for z in xrange(f)]
t.add_data(v)
t.build(m=5, max_m0=10, n_threads=4)
t.save('test.n2')
u = HnswIndex(f, "angular")
u.load('test.n2')
search_id = 1
k = 3
neighbor_ids = u.search_by_id(search_id, k)
print(
"[search_by_id]: Nearest neighborhoods of id {}: {}".format(
search_id,
neighbor_ids))
example_vector_query = [random.gauss(0, 1) for z in xrange(f)]
nns = u.search_by_vector(example_vector_query, k, include_distances=True)
print(
"[search_by_vector]: Nearest neighborhoods of vector {}: {}".format(
example_vector_query,
nns))
| 24.3 | 73 | 0.668038 | from n2 import HnswIndex
import random
f = 3
t = HnswIndex(f)
for i in xrange(1000):
v = [random.gauss(0, 1) for z in xrange(f)]
t.add_data(v)
t.build(m=5, max_m0=10, n_threads=4)
t.save('test.n2')
u = HnswIndex(f, "angular")
u.load('test.n2')
search_id = 1
k = 3
neighbor_ids = u.search_by_id(search_id, k)
print(
"[search_by_id]: Nearest neighborhoods of id {}: {}".format(
search_id,
neighbor_ids))
example_vector_query = [random.gauss(0, 1) for z in xrange(f)]
nns = u.search_by_vector(example_vector_query, k, include_distances=True)
print(
"[search_by_vector]: Nearest neighborhoods of vector {}: {}".format(
example_vector_query,
nns))
| true | true |
f7f854123b33a3d1454c08b490ec092e9ab5baa9 | 3,322 | py | Python | src/sdk/pynni/tests/test_trial.py | kitstar/nni | c5c0fa2e2dede71d2797a8bafa85c90f59d311f8 | [
"MIT"
] | null | null | null | src/sdk/pynni/tests/test_trial.py | kitstar/nni | c5c0fa2e2dede71d2797a8bafa85c90f59d311f8 | [
"MIT"
] | 4 | 2022-02-10T06:23:52.000Z | 2022-03-08T23:37:29.000Z | src/sdk/pynni/tests/test_trial.py | kitstar/nni | c5c0fa2e2dede71d2797a8bafa85c90f59d311f8 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import nni
import nni.platform.test as test_platform
import nni.trial
import numpy as np
from unittest import TestCase, main
class TrialTestCase(TestCase):
def setUp(self):
self._trial_params = { 'msg': 'hi', 'x': 123, 'dict': { 'key': 'value', 'y': None } }
nni.trial._params = { 'parameter_id': 'test_param', 'parameters': self._trial_params }
def test_get_next_parameter(self):
self.assertEqual(nni.get_next_parameter(), self._trial_params)
def test_report_intermediate_result(self):
nni.report_intermediate_result(123)
self.assertEqual(test_platform.get_last_metric(), {
'parameter_id': 'test_param',
'trial_job_id': 'test_trial_job_id',
'type': 'PERIODICAL',
'sequence': 0,
'value': 123
})
def test_report_final_result_simple(self):
self._test_report_final_result(123, 123)
def test_report_final_result_object(self):
obj = ['obj1', {'key1': 'v1', 'k2': None}, 233, 0.456]
self._test_report_final_result(obj, obj)
def test_report_final_result_numpy(self):
self._test_report_final_result(np.float32(0.25), 0.25)
def test_report_final_result_nparray(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
nni.report_final_result(arr)
out = test_platform.get_last_metric()
self.assertEqual(len(arr), 2)
self.assertEqual(len(arr[0]), 3)
self.assertEqual(len(arr[1]), 3)
self.assertEqual(arr[0][0], 1)
self.assertEqual(arr[0][1], 2)
self.assertEqual(arr[0][2], 3)
self.assertEqual(arr[1][0], 4)
self.assertEqual(arr[1][1], 5)
self.assertEqual(arr[1][2], 6)
def _test_report_final_result(self, in_, out):
nni.report_final_result(in_)
self.assertEqual(test_platform.get_last_metric(), {
'parameter_id': 'test_param',
'trial_job_id': 'test_trial_job_id',
'type': 'FINAL',
'sequence': 0,
'value': out
})
if __name__ == '__main__':
main()
| 39.082353 | 100 | 0.654124 |
import nni
import nni.platform.test as test_platform
import nni.trial
import numpy as np
from unittest import TestCase, main
class TrialTestCase(TestCase):
def setUp(self):
self._trial_params = { 'msg': 'hi', 'x': 123, 'dict': { 'key': 'value', 'y': None } }
nni.trial._params = { 'parameter_id': 'test_param', 'parameters': self._trial_params }
def test_get_next_parameter(self):
self.assertEqual(nni.get_next_parameter(), self._trial_params)
def test_report_intermediate_result(self):
nni.report_intermediate_result(123)
self.assertEqual(test_platform.get_last_metric(), {
'parameter_id': 'test_param',
'trial_job_id': 'test_trial_job_id',
'type': 'PERIODICAL',
'sequence': 0,
'value': 123
})
def test_report_final_result_simple(self):
self._test_report_final_result(123, 123)
def test_report_final_result_object(self):
obj = ['obj1', {'key1': 'v1', 'k2': None}, 233, 0.456]
self._test_report_final_result(obj, obj)
def test_report_final_result_numpy(self):
self._test_report_final_result(np.float32(0.25), 0.25)
def test_report_final_result_nparray(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
nni.report_final_result(arr)
out = test_platform.get_last_metric()
self.assertEqual(len(arr), 2)
self.assertEqual(len(arr[0]), 3)
self.assertEqual(len(arr[1]), 3)
self.assertEqual(arr[0][0], 1)
self.assertEqual(arr[0][1], 2)
self.assertEqual(arr[0][2], 3)
self.assertEqual(arr[1][0], 4)
self.assertEqual(arr[1][1], 5)
self.assertEqual(arr[1][2], 6)
def _test_report_final_result(self, in_, out):
nni.report_final_result(in_)
self.assertEqual(test_platform.get_last_metric(), {
'parameter_id': 'test_param',
'trial_job_id': 'test_trial_job_id',
'type': 'FINAL',
'sequence': 0,
'value': out
})
if __name__ == '__main__':
main()
| true | true |
f7f854423745070268712b5cedae1f26820f2ae5 | 4,927 | py | Python | Extra/maths.py | niefermar/APPIAN-PET-APPIAN | 895fa3aaad7f444d6f4e6f7c698ddace9415664c | [
"MIT"
] | 1 | 2021-04-15T01:46:36.000Z | 2021-04-15T01:46:36.000Z | Extra/maths.py | niefermar/APPIAN-PET-APPIAN | 895fa3aaad7f444d6f4e6f7c698ddace9415664c | [
"MIT"
] | null | null | null | Extra/maths.py | niefermar/APPIAN-PET-APPIAN | 895fa3aaad7f444d6f4e6f7c698ddace9415664c | [
"MIT"
] | null | null | null | import os
import numpy as np
from nipype.interfaces.base import CommandLine, CommandLineInputSpec #, Info
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,isdefined)
class MathsOutput(TraitedSpec):
out_file = File( desc="image to write after calculations")
class MathsInput(CommandLineInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on")
out_file = File(position=-1, argstr="%s", mandatory=True, desc="image to operate on")
_opmaths = ["add", "sub", "mult", "div", "pd", "eq", "ne", "ge", "gt", "and", "or", "not", "isnan", 'nisnan']
operation = traits.Enum(*_opmaths, mandatory=True, argstr="-%s", position=3, desc="math operations to perform")
operand_file = File(exists=True, argstr="%s", mandatory=True, position=4, desc="second image to perform operation with")
clobber = traits.Bool(argstr="-clobber", usedefault=True, default_value=True, desc="Overwrite output file")
verbose = traits.Bool(argstr="-verbose", usedefault=True, default_value=True, desc="Write messages indicating progress")
class MathsCommand(CommandLine):
_cmd = "mincmath -clob"
_suffix = "_maths"
input_spec = MathsInput
output_spec = MathsOutput
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix=self._suffix)
return super(MathsCommand, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class ConstantMathsInput(CommandLineInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on")
out_file = File(position=-1, argstr="%s", mandatory=True, desc="image to operate on")
_opmaths = ["add", "sub", "mult", "div"]
operation = traits.Enum(*_opmaths, mandatory=True, argstr="-%s", position=3, desc="math operations to perform")
opt_constant = traits.Str("-const", mandatory=True, argstr="%s", position=4, desc="-const")
operand_value = traits.Float(exists=True, argstr="%.8f", mandatory=True, position=5, xor=["operand_value"], desc="value to perform operation with")
class ConstantMathsCommand(CommandLine):
_cmd = "mincmath"
_suffix = "_maths"
input_spec = ConstantMathsInput
output_spec = MathsOutput
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix=self._suffix)
return super(ConstantMathsCommand, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class Constant2MathsInput(CommandLineInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True,
desc="image to operate on")
out_file = File(position=-1, argstr="%s", mandatory=True,
desc="image to operate on")
_opmaths = ["add", "sub", "mult", "div", "exp", "log"]
operation = traits.Enum(*_opmaths, mandatory=True, argstr="-%s",
position=3,desc="math operations to perform")
opt_constant = traits.Str(argstr="%s", position=4, desc="-const2")
operand_value = traits.Float(exists=True, argstr="%.8f", mandatory=True, position=5, xor=["operand_value"],
desc="value to perform operation with")
operand_value2 = traits.Float(exists=True, argstr="%.8f", mandatory=True, position=6, xor=["operand_value2"],
desc="2nde value to perform operation with")
class Constant2MathsCommand(CommandLine):
_cmd = "mincmath"
_suffix = "_maths"
input_spec = Constant2MathsInput
output_spec = MathsOutput
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix=self._suffix)
return super(Constant2MathsCommand, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
| 38.492188 | 151 | 0.657195 | import os
import numpy as np
from nipype.interfaces.base import CommandLine, CommandLineInputSpec
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,isdefined)
class MathsOutput(TraitedSpec):
out_file = File( desc="image to write after calculations")
class MathsInput(CommandLineInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on")
out_file = File(position=-1, argstr="%s", mandatory=True, desc="image to operate on")
_opmaths = ["add", "sub", "mult", "div", "pd", "eq", "ne", "ge", "gt", "and", "or", "not", "isnan", 'nisnan']
operation = traits.Enum(*_opmaths, mandatory=True, argstr="-%s", position=3, desc="math operations to perform")
operand_file = File(exists=True, argstr="%s", mandatory=True, position=4, desc="second image to perform operation with")
clobber = traits.Bool(argstr="-clobber", usedefault=True, default_value=True, desc="Overwrite output file")
verbose = traits.Bool(argstr="-verbose", usedefault=True, default_value=True, desc="Write messages indicating progress")
class MathsCommand(CommandLine):
_cmd = "mincmath -clob"
_suffix = "_maths"
input_spec = MathsInput
output_spec = MathsOutput
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix=self._suffix)
return super(MathsCommand, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class ConstantMathsInput(CommandLineInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on")
out_file = File(position=-1, argstr="%s", mandatory=True, desc="image to operate on")
_opmaths = ["add", "sub", "mult", "div"]
operation = traits.Enum(*_opmaths, mandatory=True, argstr="-%s", position=3, desc="math operations to perform")
opt_constant = traits.Str("-const", mandatory=True, argstr="%s", position=4, desc="-const")
operand_value = traits.Float(exists=True, argstr="%.8f", mandatory=True, position=5, xor=["operand_value"], desc="value to perform operation with")
class ConstantMathsCommand(CommandLine):
_cmd = "mincmath"
_suffix = "_maths"
input_spec = ConstantMathsInput
output_spec = MathsOutput
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix=self._suffix)
return super(ConstantMathsCommand, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class Constant2MathsInput(CommandLineInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True,
desc="image to operate on")
out_file = File(position=-1, argstr="%s", mandatory=True,
desc="image to operate on")
_opmaths = ["add", "sub", "mult", "div", "exp", "log"]
operation = traits.Enum(*_opmaths, mandatory=True, argstr="-%s",
position=3,desc="math operations to perform")
opt_constant = traits.Str(argstr="%s", position=4, desc="-const2")
operand_value = traits.Float(exists=True, argstr="%.8f", mandatory=True, position=5, xor=["operand_value"],
desc="value to perform operation with")
operand_value2 = traits.Float(exists=True, argstr="%.8f", mandatory=True, position=6, xor=["operand_value2"],
desc="2nde value to perform operation with")
class Constant2MathsCommand(CommandLine):
_cmd = "mincmath"
_suffix = "_maths"
input_spec = Constant2MathsInput
output_spec = MathsOutput
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file, suffix=self._suffix)
return super(Constant2MathsCommand, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
| true | true |
f7f854aa83cf581850b148374c8bf6da51fe6f82 | 4,539 | py | Python | ducktape/cluster/windows_remoteaccount.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | ducktape/cluster/windows_remoteaccount.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | ducktape/cluster/windows_remoteaccount.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import boto3
import os
import base64
import winrm
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
from botocore.exceptions import ClientError
from ducktape.cluster.remoteaccount import RemoteAccount, RemoteCommandError
class WindowsRemoteAccount(RemoteAccount):
"""
Windows remote accounts are currently only supported in EC2. See ``_setup_winrm()`` for how the WinRM password
is fetched, which is currently specific to AWS.
The Windows AMI needs to also have an SSH server running to support SSH commands, SCP, and rsync.
"""
WINRM_USERNAME = "Administrator"
def __init__(self, ssh_config, externally_routable_ip=None, logger=None):
super(WindowsRemoteAccount, self).__init__(ssh_config, externally_routable_ip=externally_routable_ip,
logger=logger)
self.os = RemoteAccount.WINDOWS
self._winrm_client = None
@property
def winrm_client(self):
# TODO: currently this only works in AWS EC2 provisioned by Vagrant. Add support for other environments.
# check if winrm has already been setup. If yes, return immediately.
if self._winrm_client:
return self._winrm_client
# first get the instance ID of this machine from Vagrant's metadata.
ec2_instance_id_path = os.path.join(os.getcwd(), ".vagrant", "machines", self.ssh_config.host, "aws", "id")
instance_id_file = None
try:
instance_id_file = open(ec2_instance_id_path, 'r')
ec2_instance_id = instance_id_file.read().strip()
if not ec2_instance_id or ec2_instance_id == "":
raise Exception
except:
raise Exception("Could not extract EC2 instance ID from local file: %s" % ec2_instance_id_path)
finally:
if instance_id_file:
instance_id_file.close()
self._log(logging.INFO, "Found EC2 instance id: %s" % ec2_instance_id)
# then get the encrypted password.
client = boto3.client('ec2')
try:
response = client.get_password_data(InstanceId=ec2_instance_id)
except ClientError as ce:
if "InvalidInstanceID.NotFound" in ce.message:
raise Exception("The instance id '%s' couldn't be found. Is the correct AWS region configured?"
% ec2_instance_id)
else:
raise ce
self._log(logging.INFO, "Fetched encrypted winrm password and will decrypt with private key: %s"
% self.ssh_config.identityfile)
# then decrypt the password using the private key.
key_file = None
try:
key_file = open(self.ssh_config.identityfile, 'r')
key = key_file.read()
rsa_key = RSA.importKey(key)
cipher = PKCS1_v1_5.new(rsa_key)
winrm_password = cipher.decrypt(base64.b64decode(response["PasswordData"]), None)
self._winrm_client = winrm.Session(self.ssh_config.hostname, auth=(WindowsRemoteAccount.WINRM_USERNAME,
winrm_password))
finally:
if key_file:
key_file.close()
return self._winrm_client
def fetch_externally_routable_ip(self, is_aws):
if not is_aws:
raise NotImplemented("Windows is only supported in AWS.")
# EC2 windows machines aren't given an externally routable IP. Use the hostname instead.
return self.ssh_config.hostname
def run_winrm_command(self, cmd, allow_fail=False):
self._log(logging.DEBUG, "Running winrm command: %s" % cmd)
result = self.winrm_client.run_cmd(cmd)
if not allow_fail and result.status_code != 0:
raise RemoteCommandError(self, cmd, result.status_code, result.std_err)
return result.status_code
| 40.891892 | 115 | 0.659617 |
import logging
import boto3
import os
import base64
import winrm
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
from botocore.exceptions import ClientError
from ducktape.cluster.remoteaccount import RemoteAccount, RemoteCommandError
class WindowsRemoteAccount(RemoteAccount):
WINRM_USERNAME = "Administrator"
def __init__(self, ssh_config, externally_routable_ip=None, logger=None):
super(WindowsRemoteAccount, self).__init__(ssh_config, externally_routable_ip=externally_routable_ip,
logger=logger)
self.os = RemoteAccount.WINDOWS
self._winrm_client = None
@property
def winrm_client(self):
if self._winrm_client:
return self._winrm_client
ec2_instance_id_path = os.path.join(os.getcwd(), ".vagrant", "machines", self.ssh_config.host, "aws", "id")
instance_id_file = None
try:
instance_id_file = open(ec2_instance_id_path, 'r')
ec2_instance_id = instance_id_file.read().strip()
if not ec2_instance_id or ec2_instance_id == "":
raise Exception
except:
raise Exception("Could not extract EC2 instance ID from local file: %s" % ec2_instance_id_path)
finally:
if instance_id_file:
instance_id_file.close()
self._log(logging.INFO, "Found EC2 instance id: %s" % ec2_instance_id)
# then get the encrypted password.
client = boto3.client('ec2')
try:
response = client.get_password_data(InstanceId=ec2_instance_id)
except ClientError as ce:
if "InvalidInstanceID.NotFound" in ce.message:
raise Exception("The instance id '%s' couldn't be found. Is the correct AWS region configured?"
% ec2_instance_id)
else:
raise ce
self._log(logging.INFO, "Fetched encrypted winrm password and will decrypt with private key: %s"
% self.ssh_config.identityfile)
key_file = None
try:
key_file = open(self.ssh_config.identityfile, 'r')
key = key_file.read()
rsa_key = RSA.importKey(key)
cipher = PKCS1_v1_5.new(rsa_key)
winrm_password = cipher.decrypt(base64.b64decode(response["PasswordData"]), None)
self._winrm_client = winrm.Session(self.ssh_config.hostname, auth=(WindowsRemoteAccount.WINRM_USERNAME,
winrm_password))
finally:
if key_file:
key_file.close()
return self._winrm_client
def fetch_externally_routable_ip(self, is_aws):
if not is_aws:
raise NotImplemented("Windows is only supported in AWS.")
return self.ssh_config.hostname
def run_winrm_command(self, cmd, allow_fail=False):
self._log(logging.DEBUG, "Running winrm command: %s" % cmd)
result = self.winrm_client.run_cmd(cmd)
if not allow_fail and result.status_code != 0:
raise RemoteCommandError(self, cmd, result.status_code, result.std_err)
return result.status_code
| true | true |
f7f85792d127e2f11e241ebd9a78001deb199f38 | 32,295 | py | Python | stochpy/modules/Analysis.py | bgoli/stochpy | ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d | [
"BSD-3-Clause"
] | 35 | 2016-02-29T22:56:07.000Z | 2022-03-06T17:21:29.000Z | stochpy/modules/Analysis.py | bgoli/stochpy | ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d | [
"BSD-3-Clause"
] | 6 | 2016-10-13T12:43:54.000Z | 2021-04-30T09:06:59.000Z | stochpy/modules/Analysis.py | bgoli/stochpy | ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d | [
"BSD-3-Clause"
] | 13 | 2016-07-07T19:49:25.000Z | 2021-05-14T20:24:17.000Z | #! /usr/bin/env python
"""
Analysis
========
This module provides functions for Stochastic Simulation Algorithms Analysis (SSA). Implemented SSAs import this module to perform their analysis. Plotting of time series species, propensities), distributions (species, propensities, distributions), autocorrelations, and autocovariances (species, propensities) is possible.
Written by TR Maarleveld, Amsterdam, The Netherlands
E-mail: tmd200@users.sourceforge.net
"""
from __future__ import division, print_function, absolute_import
from stochpy import _IsPlotting
if _IsPlotting:
from stochpy import plt
from stochpy import matplotlib
from matplotlib import gridspec,colors as clr
from stochpy import _IsNumPy
if _IsNumPy:
import numpy as np
else:
sys.exit()
import copy,sys
def getDataForTimeSimPlot(Arr_data,npoints = 100000,quiet=False):
"""
Input:
- *Arr_data* (numpy array)
- *npoints* [default = 10000] (integer)
"""
len_data = len(Arr_data)
if (len_data > npoints): # use npoints only if datasets become too large
L_data2plot = [Arr_data[0]]
step_size = len_data//int(abs(npoints))
for i in range(step_size,len_data,step_size):
t = Arr_data[i][0]
data_point = copy.deepcopy(L_data2plot[-1][1:].tolist())
data_point.insert(0,t)
L_data2plot.append(data_point)
L_data2plot.append(Arr_data[i])
if not quiet:
print("Info: Plotting {0:d} out of {1:d} points. Use the argument 'npoints' to alter the number of plotted events.".format(npoints,len_data) )
else:
L_data2plot = copy.deepcopy(Arr_data.tolist())
j=1
for i in range(1,len_data):
t = Arr_data[i][0]
data_prev = copy.deepcopy(Arr_data[i-1]) # data of previous ...
data_prev[0] = t
L_data2plot.insert(j,data_prev)
j+=2
return np.array(L_data2plot)
def Count(data,edges):
"""
Input:
- *data* (list)
- *edges* (list)
"""
n_edges = len(edges)
L_output = np.zeros(n_edges)
for value in data:
for i in range(n_edges-1):
if (value >= edges[i]) and (value < edges[i+1]):
L_output[i]+=1
return np.array(L_output)
def GetSpeciesDistributions(sim_output,species):
"""
Get distributions, means, standard deviations, and the (raw) moments
Input:
- *sim_output* (list)
- *species* (list)
Mean = mu = sum(x*P(x))
Variance = sum(x^2 * p(x)) - mu**2
Output:
- *L_probability_mass*
- *D_means*
- *D_stds*
- *D_moments*
"""
n_species = len(species)
L_distributions = [{} for i in range(n_species)]
starttime = sim_output[0][0]
endtime = sim_output[-1][0]
n_datapoints = len(sim_output)
D_means = {}
D_stds = {}
D_moments = {}
L_probability_mass = []
if n_datapoints > 1:
for t in range(n_datapoints-1):
for i in range(n_species):
try:
L_distributions[i][int(sim_output[t][i+1])] += sim_output[t+1][0] - sim_output[t][0]
except KeyError:
L_distributions[i][int(sim_output[t][i+1])] = sim_output[t+1][0] - sim_output[t][0]
for i,s_id in enumerate(species):
x = np.array(sorted(L_distributions[i]),dtype=int)
p_x = np.array([L_distributions[i][x_i] for x_i in x])/float(endtime-starttime) # probability = dt/T
mu = (x*p_x).sum()
mu_sq = (x**2*p_x).sum()
var = mu_sq - mu**2
std = var**0.5
L_probability_mass.append([x,p_x])
D_means[s_id] = mu
D_stds[s_id] = std
D_moments[s_id] = {}
D_moments[s_id]['1'] = mu
D_moments[s_id]['2'] = mu_sq
D_moments[s_id]['3'] = (x**3*p_x).sum()
D_moments[s_id]['4'] = (x**4*p_x).sum()
return (L_probability_mass,D_means,D_stds,D_moments)
def GetDataDistributions(sim_output,identifiers):
"""
Get distributions, means, standard deviations, and the (raw) moments
This function is different, because it does not assume integers, like GetSpeciesDistributions()
Input:
- *sim_output* (list)
- *identifiers* (list)
Mean = mu = sum(x*P(x))
Variance = sum(x^2 * p(x)) - mu**2
Output:
- *L_probability_mass*
- *D_means*
- *D_stds*
- *D_moments*
"""
n_identifiers = len(identifiers)
L_distributions = [{} for i in range(n_identifiers)]
starttime = sim_output[0][0]
endtime = sim_output[-1][0]
n_datapoints = len(sim_output)
D_means = {}
D_stds = {}
D_moments = {}
L_probability_mass = []
if n_datapoints > 1:
for t in range(n_datapoints-1):
for i in range(n_identifiers):
try:
L_distributions[i][sim_output[t][i+1]] += sim_output[t+1][0] - sim_output[t][0]
except KeyError:
L_distributions[i][sim_output[t][i+1]] = sim_output[t+1][0] - sim_output[t][0]
for i,id in enumerate(identifiers):
x = np.array(sorted(L_distributions[i]))
p_x = np.array([L_distributions[i][x_i] for x_i in x])/float(endtime-starttime) # probability = dt/T
mu = (x*p_x).sum()
mu_sq = (x**2*p_x).sum()
var = mu_sq - mu**2
std = var**0.5
L_probability_mass.append([x,p_x])
D_means[id] = mu
D_stds[id] = std
D_moments[id] = {}
D_moments[id]['1'] = mu
D_moments[id]['2'] = mu_sq
D_moments[id]['3'] = (x**3*p_x).sum()
D_moments[id]['4'] = (x**4*p_x).sum()
return (L_probability_mass,D_means,D_stds,D_moments)
def LogBin(data,factor):
"""
Function that creates log bins
Input:
- *data* (list)
- *factor* (float) determines the width of the bins
Output:
- *L_x* (list)
- *L_y* (list)
- *nbins* (integer)
"""
xmin = float(min(data))
nbins = int(np.ceil(np.log(max(data)/xmin)/np.log(factor)))
L_x = None
L_y = None
if nbins:
L_edges = np.zeros(nbins)
L_edges[0] = xmin
for i in range(1,nbins): # 1,nbins
L_edges[i] = L_edges[i-1]*factor
L_x = L_edges[0:(nbins-1)]+np.diff(L_edges)/2
L_dp = Count(data,L_edges)
L_ry = np.array(L_dp[0:(nbins-1)])
L_dedges = np.array(np.diff(L_edges))
L_y = L_ry/(sum(L_ry)*L_dedges)
return(L_x,L_y,nbins)
def ObtainWaitingtimes(data_stochsim,reactions):
"""
This function extracts the waiting times for each reaction of the model from the used SSA output.
Input:
- *data_stochsim* (python data object) that stores all simulation data
- *reactions* (list)
output:
- *D_waiting_times* (dict)
Note: It is impossible to use this function in combination with the Tau-leaping method, because the Tau-Leaping results are not exact!
"""
L_time = data_stochsim.time.flatten()
L_fired_reactions = data_stochsim.fired_reactions # Reactions that fired at some time point
D_waiting_times = {}
D_last_time_fired = {}
nreactions = len(reactions)
for r_id in reactions:
D_waiting_times[r_id] = [] # create a list that will contain event waiting times for reaction r
for (current_time,r_index) in zip(L_time[1:],L_fired_reactions[1:]): # Updated Oktober 1st
for i in range(1,nreactions+1): # fired reactions are (1,2,3, .... nreactions)
if r_index == i:
if r_index in D_last_time_fired:
r_name = reactions[int(r_index-1)]
D_waiting_times[r_name].append(current_time - D_last_time_fired[r_index]) # Add inter-arrival time
D_last_time_fired[r_index] = current_time # Update last firing time
else:
D_last_time_fired[r_index] = current_time # Initial firing time
elif r_index == -i: # Handle delayed completions 01-10-2014
r_name_compl = reactions[ int(abs(r_index)-1) ] + '_Completion'
if r_index in D_last_time_fired:
D_waiting_times[r_name_compl].append(current_time - D_last_time_fired[r_index]) # Add inter-arrival time
D_last_time_fired[r_index] = current_time # Update last firing time
else:
D_last_time_fired[r_index] = current_time # Initial firing time
D_waiting_times.setdefault(r_name_compl, []) # Set keyname if not present\
#print current_time,D_last_time_fired
return D_waiting_times
def GetAverageResults(regular_grid):
"""
Gets the averaged output of multiple trajectories
Input:
- *regular_grid* (nested list)
"""
means = []
stds = []
for data in regular_grid:
means.append(np.mean(data,0))
stds.append(np.std(data,0))
return (np.array(means).transpose(),np.array(stds).transpose()) # test: 27 july 15
def RemoveBias(x, axis):
"Subtracts an estimate of the mean from signal x at axis"
padded_slice = [slice(d) for d in x.shape]
padded_slice[axis] = np.newaxis
mn = np.mean(x, axis=axis)
return x - mn[tuple(padded_slice)]
def AutoCov(s, **kwargs):
"""
Returns the autocovariance of signal s at all lags.
Notes:
Adheres to the definition
sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]}
where E{} is the expectation operator, and S is a zero mean process
"""
# only remove the mean once, if needed
debias = kwargs.pop('debias', True)
axis = kwargs.get('axis', -1)
if debias:
s = RemoveBias(s, axis)
kwargs['debias'] = False
return CrossCov(s, s, **kwargs)
def FFTconvolve(in1, in2, mode="full", axis=None):
""" Convolve two N-dimensional arrays using FFT. See convolve. """
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
if axis is None:
size = s1+s2-1
fslice = tuple([slice(0, int(sz)) for sz in size])
else:
equal_shapes = s1==s2
# allow equal_shapes[axis] to be False
equal_shapes[axis] = True
assert equal_shapes.all(), 'Shape mismatch on non-convolving axes'
size = s1[axis]+s2[axis]-1
fslice = [slice(l) for l in s1]
fslice[axis] = slice(0, int(size))
fslice = tuple(fslice)
# Always use 2**n-sized FFT
fsize = int(2**np.ceil(np.log2(size)))
if axis is None:
IN1 = np.fft.fftpack.fftn(in1,fsize)
IN1 *= np.fft.fftpack.fftn(in2,fsize)
ret = np.fft.fftpack.ifftn(IN1)[fslice].copy()
else:
IN1 = np.fft.fftpack.fft(in1,fsize,axis=axis)
IN1 *= np.fft.fftpack.fft(in2,fsize,axis=axis)
ret = np.fft.fftpack.ifft(IN1,axis=axis)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
def CrossCov(x, y, axis=-1, all_lags=False, debias=True):
"""
Returns the crosscovariance sequence between two ndarrays.
This is performed by calling fftconvolve on x, y[::-1]
Input:
- *x*: ndarray
- *y*: ndarray
- *axis*: time axis
- *all_lags*: {True/False}
whether to return all nonzero lags, or to clip the length of s_xy
to be the length of x and y. If False, then the zero lag covariance
is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2
- *debias*: {True/False}
Always removes an estimate of the mean along the axis, unless
told not to.
Notes:
cross covariance is defined as
sxy[k] := E{X[t]*Y[t+k]}, where X,Y are zero mean random processes
"""
if x.shape[axis] != y.shape[axis]:
raise ValueError('CrossCov() only works on same-length sequences for now')
if debias:
x = RemoveBias(x, axis)
y = RemoveBias(y, axis)
slicing = [slice(d) for d in x.shape]
slicing[axis] = slice(None,None,-1)
sxy = FFTconvolve(x, y[tuple(slicing)], axis=axis, mode='full')
N = x.shape[axis]
sxy /= N
if all_lags:
return sxy
slicing[axis] = slice(N-1,2*N-1)
return sxy[tuple(slicing)]
def Autocorrelation(s, **kwargs):
"""
Returns the autocorrelation of signal s at all lags.
Notes:
Adheres to the definition
rxx[k] = E{S[n]S[n+k]}/E{S*S} = cov{S[n],S[n+k]}/sigma**2
where E{} is the expectation operator, and S is a zero mean process
"""
# only remove the mean once, if needed
debias = kwargs.pop('debias', True)
axis = kwargs.get('axis', -1)
if debias:
s = RemoveBias(s, axis)
kwargs['debias'] = False
sxx = AutoCov(s, **kwargs)
all_lags = kwargs.get('all_lags', False)
if all_lags:
i = (2*s.shape[axis]-1)/2
sxx_0 = sxx[i]
else:
sxx_0 = sxx[0]
if not sxx_0:
sxx = [np.nan for i in range(len(sxx))] # Modification
else:
sxx /= sxx_0
return sxx
class DoPlotting():
"""
This class initiates the plotting options.
Input:
- *species_labels* (list) [S1,S2, ..., Sn]
- *rate_labels* (list) [R1, R2, ..., Rm]
"""
def __init__(self,species_labels,rate_labels,plotnum=1,quiet = False):
self.species_labels = species_labels
self.rate_labels = rate_labels
self.number_of_rates = len(rate_labels)
self.plotnum = plotnum
# https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/colors.py
self.colors = ['#0000FF','#00CC00','#FF0033','#FF00CC','#6600FF','#FFFF00','#000000','#CCCCCC','#00CCFF','#99CC33','#FF6666', '#FF99CC','#CC6600','#003300','#CCFFFF','#9900FF','#CC6633','#FFD700','#C0C0C0']
self.quiet = quiet
def ResetPlotnum(self):
""" Reset figure numbers if trajectories > 1 """
self.plotnum = 1
def TimeSeries(self,data,npoints,datatype,labels,trajectory_index,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):
"""
Tracks the propensities and/or species over time.
Input:
- *data* (array)
- *npoints* (integer)
- *datatype* (list)
- *labels* (list)
- *trajectory_index* (integer)
- *linestyle* (string)
- *linewidth* (float)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
"""
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
data = getDataForTimeSimPlot(data,npoints,self.quiet)
Arr_time = data[:,0]
if len(datatype) == 1:
j = trajectory_index
else:
j=0
for i in datatype_indices:
y = data[:,i+1]
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
if marker == '' and linestyle == 'solid':
plt.plot(Arr_time,y, ls = linestyle,lw = linewidth,color = self.colors[j])
else:
plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = self.colors[j])
colors = None
j+=1
if is_legend:
plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def Autocorrelations(self,lags,data,datatype,labels,trajectory_index,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):
"""
Input:
- *lags*
- *data* (array)
- *datatype* (list)
- *labels* (list)
- *trajectory_index* (integer)
- *linestyle* (string)
- *linewidth* (float)
- *marker* string)
- *colors* (list)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
"""
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
if len(datatype) == 1:
j = trajectory_index
else:
j=0
for i in datatype_indices:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
y = data[i][0:len(lags)]
if colors == None:
plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = self.colors[j])
colors = None
j+=1
if is_legend:
plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def Distributions(self,distributions,datatype,labels,trajectory_index,linestyle,linewidth,colors,title,xlabel,ylabel,is_legend=True,legend_location='upper right',bin_size=1,histtype = 'step',orientation='vertical',multiplotting=False):
"""
Plots the distributions of species and/or propensities
density=False because the total probability is determined by summation not by integration.
Input:
- *distributions* (nested list)
- *datatype* (list)
- *labels* (list)
- *trajectory_index* (integer)
- *linestyle* (string)
- *linewidth* (float)
- *colors* (list)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
- *legend_location* [default = 'upper right'] (string/integer)
- *bin_size* (string) [default = 1]
- *histtype* (string)) [default = 'step']
- *orientation* (string) [default = 'vertical']
- *multiplotting* (boolean) [default = False]
"""
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
if len(datatype) == 1:
j = trajectory_index
else:
j=0
for i in datatype_indices:
dat_min = distributions[i][0].min()
dat_max = distributions[i][0].max()
n_bins = 1 + (dat_max-dat_min) / bin_size # Just take one trajectory as reference
L_bin_edges = np.linspace(dat_min-bin_size/2.0,dat_max+bin_size/2.0,int(n_bins+1))
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
print('#'*20)
output = plt.hist(distributions[i][0], L_bin_edges, weights = distributions[i][1], ls = linestyle, lw = linewidth, color = self.colors[j], histtype = histtype, orientation=orientation, )
print('just ran this line')
output = plt.hist(distributions[i][0], L_bin_edges, weights = distributions[i][1], ls = linestyle, lw = linewidth, color = self.colors[j], histtype = histtype, orientation=orientation, density=False)
print('just ran this line')
print('#'*20)
else:
if clr.is_color_like(colors[j]):
output = plt.hist(distributions[i][0],L_bin_edges,weights = distributions[i][1],ls = linestyle,lw = linewidth,color = colors[j],histtype = histtype,orientation=orientation,density=False)
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
output = plt.hist(distributions[i][0],L_bin_edges,weights = distributions[i][1],ls = linestyle,lw = linewidth,color = self.colors[j],histtype = histtype,orientation=orientation,density=False)
colors = None
j+=1
if is_legend:
plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
if orientation.lower() == 'horizontal':
plt.xlabel(ylabel)
plt.ylabel(xlabel)
if multiplotting:
plt.xticks([0,max(output[0])*1.1])
else:
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if multiplotting:
plt.yticks([0,max(output[0])*1.1])
def WaitingtimesDistributions(self,waiting_times,rates,trajectory_index,linestyle,linewidth, marker,colors,title,xlabel,ylabel,is_legend,legend_location):
"""
Plots the waiting times for each reaction in the model.
Makes use of ObtainWaitingtimes to derive the waiting times out of the SSA output.
Input:
- *waiting_times* (dict)
- *rates* (list)
- *trajectory_index* (integer)
- *linestyle* (string)
- *linewith* (float)
- *marker* (string)
- *colors* (list)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
- *legend_location* [default = 'upper right'] (string/integer)
"""
plt.figure(self.plotnum)
if len(rates) == 1:
j = trajectory_index
else:
j=0
L_legend_names = []
for r_id in rates:
L_waiting_times = waiting_times[r_id] # get list of waiting times for a given reaction
if len(L_waiting_times) > 1: # At least 2 waiting times are necessary per reaction
(x,y,nbins) = LogBin(L_waiting_times,1.5) # Create logarithmic bins (HARDCODED 1.5)
if x is not None:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = self.colors[j])
colors = None
L_legend_names.append(r_id)
j+=1
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if is_legend:
plt.legend(L_legend_names,numpoints=1,frameon=True,loc=legend_location)
def AverageTimeSeries(self,means,stds,time,nstd,datatype,labels,linestyle,linewidth,marker,ms,colors,title,xlabel,ylabel,is_legend,legend_location):
"""
Plots the average and standard deviation of datatype on a regular grid.
Input:
- *means* (array)
- *stds* (array)
- *time* (array)
- *nstd* (float)
- *datatype* (list)
- *labels* (list)
- *linestyle* (string)
- *linewidth* (float)
- *marker* (string)
- *ms* (float)
- *colors* (list)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
- *legend_location* [default = 'upper right'] (string/integer)
"""
assert nstd > 0, "Error: The number of STDs must be a value larger than zero"
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
j=0
for i in datatype_indices:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
# plot with y-axis error bars
if colors == None:
plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = self.colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])
else:
if clr.is_color_like(colors[j]):
plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = self.colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])
colors = None
j+=1
if is_legend:
plt.legend(numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def AverageDistributions(self,means,stds,nstd,datatype,labels,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):
"""
Plots the average and standard deviation.
Input:
- *means* (nested list)
- *stds* (nested list)
- *nstd* (float)
- *labels* (list)
- *linestyle* (string)
- *linewidth* (float)
- *marker* (string)
- *colors* (list)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
- *legend_location* [default = 'upper right'] (string/integer)
"""
assert nstd > 0, "Error: The number of STDs must be a value larger than zero"
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
j=0
for i in datatype_indices:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.errorbar(means[i][0],means[i][1],yerr = nstd * np.array(stds[i][1]),color = self.colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i]) # plot with y-axis error bars
else:
if clr.is_color_like(colors[j]):
plt.errorbar(means[i][0],means[i][1],yerr = nstd*np.array(stds[i][1]),color = colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.errorbar(means[i][0],means[i][1],yerr = nstd * np.array(stds[i][1]),color = self.colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])
colors = None
j+=1
if is_legend:
plt.legend(numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def AverageDistributionsCI(self,means,stds,nstd,datatype,labels,colors,title,xlabel,ylabel,is_legend,legend_location):
"""
Plots the average and standard deviation.
Input:
- *means* (nested list)
- *stds* (nested list)
- *nstd* (float)
- *labels* (list)
- *linestyle* (string)
- *linewidth* (float)
- *marker* (string)
- *colors* (list)
- *title* (string)
- *xlabel* (string)
- *ylabel* (string)
- *is_legend* (boolean)
- *legend_location* [default = 'upper right'] (string/integer)
"""
assert nstd > 0, "Error: The number of STDs must be a value larger than zero"
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
for i in datatype_indices:
L_s_amount = copy.copy(means[i][0])
L_mu = copy.copy(means[i][1])
L_sigma = copy.copy(stds[i][1])
# Add an additional value
L_s_amount.append(L_s_amount[-1]+1)
L_mu.append(L_mu[-1])
L_sigma.append(L_sigma[-1])
X_i = []
Y_i = []
L_errors = []
for j in range(len(L_s_amount)):
if (not L_s_amount[j] == L_s_amount[0]) and (not L_s_amount[j] == L_s_amount[-1]):
X_i.append(L_s_amount[j])
Y_i.append(L_mu[j-1])
L_errors.append(L_sigma[j-1])
X_i.append(L_s_amount[j])
Y_i.append(L_mu[j])
L_errors.append(L_sigma[j])
X_e = np.concatenate([X_i, X_i[::-1]])
Y_e = np.concatenate([np.array(Y_i) - nstd*np.array(L_errors) ,(np.array(Y_i) + nstd*np.array(L_errors))[::-1]])
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = self.colors[j])
plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = colors[j])
plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = self.colors[j])
plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = self.colors[j])
colors = None
if is_legend:
plt.legend(numpoints=1,frameon=True,loc=legend_location)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
| 37.596042 | 323 | 0.553398 |
from __future__ import division, print_function, absolute_import
from stochpy import _IsPlotting
if _IsPlotting:
from stochpy import plt
from stochpy import matplotlib
from matplotlib import gridspec,colors as clr
from stochpy import _IsNumPy
if _IsNumPy:
import numpy as np
else:
sys.exit()
import copy,sys
def getDataForTimeSimPlot(Arr_data,npoints = 100000,quiet=False):
len_data = len(Arr_data)
if (len_data > npoints):
L_data2plot = [Arr_data[0]]
step_size = len_data//int(abs(npoints))
for i in range(step_size,len_data,step_size):
t = Arr_data[i][0]
data_point = copy.deepcopy(L_data2plot[-1][1:].tolist())
data_point.insert(0,t)
L_data2plot.append(data_point)
L_data2plot.append(Arr_data[i])
if not quiet:
print("Info: Plotting {0:d} out of {1:d} points. Use the argument 'npoints' to alter the number of plotted events.".format(npoints,len_data) )
else:
L_data2plot = copy.deepcopy(Arr_data.tolist())
j=1
for i in range(1,len_data):
t = Arr_data[i][0]
data_prev = copy.deepcopy(Arr_data[i-1])
data_prev[0] = t
L_data2plot.insert(j,data_prev)
j+=2
return np.array(L_data2plot)
def Count(data,edges):
n_edges = len(edges)
L_output = np.zeros(n_edges)
for value in data:
for i in range(n_edges-1):
if (value >= edges[i]) and (value < edges[i+1]):
L_output[i]+=1
return np.array(L_output)
def GetSpeciesDistributions(sim_output,species):
n_species = len(species)
L_distributions = [{} for i in range(n_species)]
starttime = sim_output[0][0]
endtime = sim_output[-1][0]
n_datapoints = len(sim_output)
D_means = {}
D_stds = {}
D_moments = {}
L_probability_mass = []
if n_datapoints > 1:
for t in range(n_datapoints-1):
for i in range(n_species):
try:
L_distributions[i][int(sim_output[t][i+1])] += sim_output[t+1][0] - sim_output[t][0]
except KeyError:
L_distributions[i][int(sim_output[t][i+1])] = sim_output[t+1][0] - sim_output[t][0]
for i,s_id in enumerate(species):
x = np.array(sorted(L_distributions[i]),dtype=int)
p_x = np.array([L_distributions[i][x_i] for x_i in x])/float(endtime-starttime)
mu = (x*p_x).sum()
mu_sq = (x**2*p_x).sum()
var = mu_sq - mu**2
std = var**0.5
L_probability_mass.append([x,p_x])
D_means[s_id] = mu
D_stds[s_id] = std
D_moments[s_id] = {}
D_moments[s_id]['1'] = mu
D_moments[s_id]['2'] = mu_sq
D_moments[s_id]['3'] = (x**3*p_x).sum()
D_moments[s_id]['4'] = (x**4*p_x).sum()
return (L_probability_mass,D_means,D_stds,D_moments)
def GetDataDistributions(sim_output,identifiers):
n_identifiers = len(identifiers)
L_distributions = [{} for i in range(n_identifiers)]
starttime = sim_output[0][0]
endtime = sim_output[-1][0]
n_datapoints = len(sim_output)
D_means = {}
D_stds = {}
D_moments = {}
L_probability_mass = []
if n_datapoints > 1:
for t in range(n_datapoints-1):
for i in range(n_identifiers):
try:
L_distributions[i][sim_output[t][i+1]] += sim_output[t+1][0] - sim_output[t][0]
except KeyError:
L_distributions[i][sim_output[t][i+1]] = sim_output[t+1][0] - sim_output[t][0]
for i,id in enumerate(identifiers):
x = np.array(sorted(L_distributions[i]))
p_x = np.array([L_distributions[i][x_i] for x_i in x])/float(endtime-starttime)
mu = (x*p_x).sum()
mu_sq = (x**2*p_x).sum()
var = mu_sq - mu**2
std = var**0.5
L_probability_mass.append([x,p_x])
D_means[id] = mu
D_stds[id] = std
D_moments[id] = {}
D_moments[id]['1'] = mu
D_moments[id]['2'] = mu_sq
D_moments[id]['3'] = (x**3*p_x).sum()
D_moments[id]['4'] = (x**4*p_x).sum()
return (L_probability_mass,D_means,D_stds,D_moments)
def LogBin(data,factor):
xmin = float(min(data))
nbins = int(np.ceil(np.log(max(data)/xmin)/np.log(factor)))
L_x = None
L_y = None
if nbins:
L_edges = np.zeros(nbins)
L_edges[0] = xmin
for i in range(1,nbins):
L_edges[i] = L_edges[i-1]*factor
L_x = L_edges[0:(nbins-1)]+np.diff(L_edges)/2
L_dp = Count(data,L_edges)
L_ry = np.array(L_dp[0:(nbins-1)])
L_dedges = np.array(np.diff(L_edges))
L_y = L_ry/(sum(L_ry)*L_dedges)
return(L_x,L_y,nbins)
def ObtainWaitingtimes(data_stochsim,reactions):
L_time = data_stochsim.time.flatten()
L_fired_reactions = data_stochsim.fired_reactions
D_waiting_times = {}
D_last_time_fired = {}
nreactions = len(reactions)
for r_id in reactions:
D_waiting_times[r_id] = []
for (current_time,r_index) in zip(L_time[1:],L_fired_reactions[1:]):
for i in range(1,nreactions+1):
if r_index == i:
if r_index in D_last_time_fired:
r_name = reactions[int(r_index-1)]
D_waiting_times[r_name].append(current_time - D_last_time_fired[r_index])
D_last_time_fired[r_index] = current_time
else:
D_last_time_fired[r_index] = current_time
elif r_index == -i:
r_name_compl = reactions[ int(abs(r_index)-1) ] + '_Completion'
if r_index in D_last_time_fired:
D_waiting_times[r_name_compl].append(current_time - D_last_time_fired[r_index])
D_last_time_fired[r_index] = current_time
else:
D_last_time_fired[r_index] = current_time
D_waiting_times.setdefault(r_name_compl, [])
return D_waiting_times
def GetAverageResults(regular_grid):
means = []
stds = []
for data in regular_grid:
means.append(np.mean(data,0))
stds.append(np.std(data,0))
return (np.array(means).transpose(),np.array(stds).transpose())
def RemoveBias(x, axis):
padded_slice = [slice(d) for d in x.shape]
padded_slice[axis] = np.newaxis
mn = np.mean(x, axis=axis)
return x - mn[tuple(padded_slice)]
def AutoCov(s, **kwargs):
debias = kwargs.pop('debias', True)
axis = kwargs.get('axis', -1)
if debias:
s = RemoveBias(s, axis)
kwargs['debias'] = False
return CrossCov(s, s, **kwargs)
def FFTconvolve(in1, in2, mode="full", axis=None):
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
if axis is None:
size = s1+s2-1
fslice = tuple([slice(0, int(sz)) for sz in size])
else:
equal_shapes = s1==s2
equal_shapes[axis] = True
assert equal_shapes.all(), 'Shape mismatch on non-convolving axes'
size = s1[axis]+s2[axis]-1
fslice = [slice(l) for l in s1]
fslice[axis] = slice(0, int(size))
fslice = tuple(fslice)
fsize = int(2**np.ceil(np.log2(size)))
if axis is None:
IN1 = np.fft.fftpack.fftn(in1,fsize)
IN1 *= np.fft.fftpack.fftn(in2,fsize)
ret = np.fft.fftpack.ifftn(IN1)[fslice].copy()
else:
IN1 = np.fft.fftpack.fft(in1,fsize,axis=axis)
IN1 *= np.fft.fftpack.fft(in2,fsize,axis=axis)
ret = np.fft.fftpack.ifft(IN1,axis=axis)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
def CrossCov(x, y, axis=-1, all_lags=False, debias=True):
if x.shape[axis] != y.shape[axis]:
raise ValueError('CrossCov() only works on same-length sequences for now')
if debias:
x = RemoveBias(x, axis)
y = RemoveBias(y, axis)
slicing = [slice(d) for d in x.shape]
slicing[axis] = slice(None,None,-1)
sxy = FFTconvolve(x, y[tuple(slicing)], axis=axis, mode='full')
N = x.shape[axis]
sxy /= N
if all_lags:
return sxy
slicing[axis] = slice(N-1,2*N-1)
return sxy[tuple(slicing)]
def Autocorrelation(s, **kwargs):
debias = kwargs.pop('debias', True)
axis = kwargs.get('axis', -1)
if debias:
s = RemoveBias(s, axis)
kwargs['debias'] = False
sxx = AutoCov(s, **kwargs)
all_lags = kwargs.get('all_lags', False)
if all_lags:
i = (2*s.shape[axis]-1)/2
sxx_0 = sxx[i]
else:
sxx_0 = sxx[0]
if not sxx_0:
sxx = [np.nan for i in range(len(sxx))]
else:
sxx /= sxx_0
return sxx
class DoPlotting():
def __init__(self,species_labels,rate_labels,plotnum=1,quiet = False):
self.species_labels = species_labels
self.rate_labels = rate_labels
self.number_of_rates = len(rate_labels)
self.plotnum = plotnum
self.colors = ['#0000FF','#00CC00','#FF0033','#FF00CC','#6600FF','#FFFF00','#000000','#CCCCCC','#00CCFF','#99CC33','#FF6666', '#FF99CC','#CC6600','#003300','#CCFFFF','#9900FF','#CC6633','#FFD700','#C0C0C0']
self.quiet = quiet
def ResetPlotnum(self):
self.plotnum = 1
def TimeSeries(self,data,npoints,datatype,labels,trajectory_index,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
data = getDataForTimeSimPlot(data,npoints,self.quiet)
Arr_time = data[:,0]
if len(datatype) == 1:
j = trajectory_index
else:
j=0
for i in datatype_indices:
y = data[:,i+1]
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
if marker == '' and linestyle == 'solid':
plt.plot(Arr_time,y, ls = linestyle,lw = linewidth,color = self.colors[j])
else:
plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = self.colors[j])
colors = None
j+=1
if is_legend:
plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def Autocorrelations(self,lags,data,datatype,labels,trajectory_index,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
if len(datatype) == 1:
j = trajectory_index
else:
j=0
for i in datatype_indices:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
y = data[i][0:len(lags)]
if colors == None:
plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = self.colors[j])
colors = None
j+=1
if is_legend:
plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def Distributions(self,distributions,datatype,labels,trajectory_index,linestyle,linewidth,colors,title,xlabel,ylabel,is_legend=True,legend_location='upper right',bin_size=1,histtype = 'step',orientation='vertical',multiplotting=False):
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
if len(datatype) == 1:
j = trajectory_index
else:
j=0
for i in datatype_indices:
dat_min = distributions[i][0].min()
dat_max = distributions[i][0].max()
n_bins = 1 + (dat_max-dat_min) / bin_size
L_bin_edges = np.linspace(dat_min-bin_size/2.0,dat_max+bin_size/2.0,int(n_bins+1))
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
print('#'*20)
output = plt.hist(distributions[i][0], L_bin_edges, weights = distributions[i][1], ls = linestyle, lw = linewidth, color = self.colors[j], histtype = histtype, orientation=orientation, )
print('just ran this line')
output = plt.hist(distributions[i][0], L_bin_edges, weights = distributions[i][1], ls = linestyle, lw = linewidth, color = self.colors[j], histtype = histtype, orientation=orientation, density=False)
print('just ran this line')
print('#'*20)
else:
if clr.is_color_like(colors[j]):
output = plt.hist(distributions[i][0],L_bin_edges,weights = distributions[i][1],ls = linestyle,lw = linewidth,color = colors[j],histtype = histtype,orientation=orientation,density=False)
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
output = plt.hist(distributions[i][0],L_bin_edges,weights = distributions[i][1],ls = linestyle,lw = linewidth,color = self.colors[j],histtype = histtype,orientation=orientation,density=False)
colors = None
j+=1
if is_legend:
plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
if orientation.lower() == 'horizontal':
plt.xlabel(ylabel)
plt.ylabel(xlabel)
if multiplotting:
plt.xticks([0,max(output[0])*1.1])
else:
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if multiplotting:
plt.yticks([0,max(output[0])*1.1])
def WaitingtimesDistributions(self,waiting_times,rates,trajectory_index,linestyle,linewidth, marker,colors,title,xlabel,ylabel,is_legend,legend_location):
plt.figure(self.plotnum)
if len(rates) == 1:
j = trajectory_index
else:
j=0
L_legend_names = []
for r_id in rates:
L_waiting_times = waiting_times[r_id]
if len(L_waiting_times) > 1:
(x,y,nbins) = LogBin(L_waiting_times,1.5)
if x is not None:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = self.colors[j])
colors = None
L_legend_names.append(r_id)
j+=1
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if is_legend:
plt.legend(L_legend_names,numpoints=1,frameon=True,loc=legend_location)
def AverageTimeSeries(self,means,stds,time,nstd,datatype,labels,linestyle,linewidth,marker,ms,colors,title,xlabel,ylabel,is_legend,legend_location):
assert nstd > 0, "Error: The number of STDs must be a value larger than zero"
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
j=0
for i in datatype_indices:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = self.colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])
else:
if clr.is_color_like(colors[j]):
plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = self.colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])
colors = None
j+=1
if is_legend:
plt.legend(numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def AverageDistributions(self,means,stds,nstd,datatype,labels,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):
assert nstd > 0, "Error: The number of STDs must be a value larger than zero"
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
j=0
for i in datatype_indices:
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.errorbar(means[i][0],means[i][1],yerr = nstd * np.array(stds[i][1]),color = self.colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])
else:
if clr.is_color_like(colors[j]):
plt.errorbar(means[i][0],means[i][1],yerr = nstd*np.array(stds[i][1]),color = colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.errorbar(means[i][0],means[i][1],yerr = nstd * np.array(stds[i][1]),color = self.colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])
colors = None
j+=1
if is_legend:
plt.legend(numpoints=1,frameon=True,loc=legend_location)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def AverageDistributionsCI(self,means,stds,nstd,datatype,labels,colors,title,xlabel,ylabel,is_legend,legend_location):
assert nstd > 0, "Error: The number of STDs must be a value larger than zero"
plt.figure(self.plotnum)
datatype_indices = [labels.index(Id) for Id in datatype]
for i in datatype_indices:
L_s_amount = copy.copy(means[i][0])
L_mu = copy.copy(means[i][1])
L_sigma = copy.copy(stds[i][1])
L_s_amount.append(L_s_amount[-1]+1)
L_mu.append(L_mu[-1])
L_sigma.append(L_sigma[-1])
X_i = []
Y_i = []
L_errors = []
for j in range(len(L_s_amount)):
if (not L_s_amount[j] == L_s_amount[0]) and (not L_s_amount[j] == L_s_amount[-1]):
X_i.append(L_s_amount[j])
Y_i.append(L_mu[j-1])
L_errors.append(L_sigma[j-1])
X_i.append(L_s_amount[j])
Y_i.append(L_mu[j])
L_errors.append(L_sigma[j])
X_e = np.concatenate([X_i, X_i[::-1]])
Y_e = np.concatenate([np.array(Y_i) - nstd*np.array(L_errors) ,(np.array(Y_i) + nstd*np.array(L_errors))[::-1]])
if colors == None:
if j >= len(self.colors):
j=0
elif isinstance(colors,list):
if j >= len(colors):
j=0
elif isinstance(colors,str):
colors = [colors]
j=0
if colors == None:
plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = self.colors[j])
plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = self.colors[j])
else:
if clr.is_color_like(colors[j]):
plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = colors[j])
plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = colors[j])
else:
print("*** WARNING ***: '{0}' is not recognized as a valid color code".format(colors[j]) )
plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = self.colors[j])
plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = self.colors[j])
colors = None
if is_legend:
plt.legend(numpoints=1,frameon=True,loc=legend_location)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
| true | true |
f7f857dea1a667af831b3fbd0fa512325882af92 | 141 | py | Python | practice/4. radius/4. radius/_4._radius.py | thekaranjit/pythonpractice | c87f535f694ee259a4d9de317f5b2eacd0e3748f | [
"Unlicense"
] | 1 | 2018-06-21T23:15:18.000Z | 2018-06-21T23:15:18.000Z | practice/4. radius/4. radius/_4._radius.py | thekaranjit/pythonpractice | c87f535f694ee259a4d9de317f5b2eacd0e3748f | [
"Unlicense"
] | null | null | null | practice/4. radius/4. radius/_4._radius.py | thekaranjit/pythonpractice | c87f535f694ee259a4d9de317f5b2eacd0e3748f | [
"Unlicense"
] | null | null | null | from math import pi
r = float(input("Input the radious of circle: "))
print("Area of a circle with radius" + str(r) + "is:" + str(pi *r**2))
| 35.25 | 70 | 0.64539 | from math import pi
r = float(input("Input the radious of circle: "))
print("Area of a circle with radius" + str(r) + "is:" + str(pi *r**2))
| true | true |
f7f858c7572f16e9c6eb451dabe10db9b9789972 | 8,678 | py | Python | rigi/tableIns.py | wangjwchn/autogr | c6b5404f6165ea6374f628dbd433ef25253ab5f8 | [
"MIT"
] | null | null | null | rigi/tableIns.py | wangjwchn/autogr | c6b5404f6165ea6374f628dbd433ef25253ab5f8 | [
"MIT"
] | null | null | null | rigi/tableIns.py | wangjwchn/autogr | c6b5404f6165ea6374f628dbd433ef25253ab5f8 | [
"MIT"
] | null | null | null | from z3 import *
from .enum import *
from .table import *
from .utils import *
from .assn import *
class TableInstance(object):
def __init__(self, table):
super(TableInstance, self).__init__()
self.table = table
self.name = table.name
self.key_type = table.key_type
self.value_type = table.value_type
self.data = Array(ID('TABLE_' + table.name), self.key_type, self.value_type)
self.attr_list = list([k_v1[0] for k_v1 in sorted(table.attrs.items())])
self.axiom = self.__initAxiom()
def __initAxiom(self):
A = Assn()
# forall k1 k1' K1, (k1:K1 /\ k1':K1 /\ k1 != k1' /\ Table[k1] != Table[k1']) => (exists k2 K2, (k2:K2) => Table[k1].K2 != Table[k1'].K2)
for pkey1 in self.table.pkeys:
ins1 = self.table.keyInstance(pkey1)
ins2 = self.table.keyInstance(pkey1)
for pkey2 in self.table.pkeys:
F = False
for k in pkey2:
F = Or(F, self.get(ins1,k) != self.get(ins2,k))
A.add(ForAll(list(ins1.values()) + list(ins2.values()),Implies(Not(z3_list_eq(list(ins1.values()),list(ins2.values()))),F)))
# forall k1 k2 K1 K2, (k1:K1 /\ k2:K2 /\ K1 != K2 /\ Table[k1].K2 == k2 /\ Table[k2].K1 == k1) => (Table[k1] == Table[k2])
for i in range(len(list(self.table.pkeys))):
for j in range(i + 1,len(list(self.table.pkeys))):
pk1 = list(self.table.pkeys)[i]
pk2 = list(self.table.pkeys)[j]
if pk1 != pk2:
ins1 = self.table.keyInstance(pk1)
ins2 = self.table.keyInstance(pk2)
R = True
P = True
for k in pk1:
P = And(P, self.get(ins2,k) == ins1[k])
for k in pk2:
P = And(P, self.get(ins1,k) == ins2[k])
Q = self.get(ins1) == self.get(ins2)
A.add(ForAll(list(ins1.values())+ list(ins2.values()),And(R,Implies(P,Q))))
##### Note: The following two axioms are fixed in the add function
# forall k1 K1 K2, (k1:K1 /\ K1 != K2) => Table[Table[k1].K2] not nil
# for i in range(len(list(self.table.pkeys))):
# for j in range(i + 1,len(list(self.table.pkeys))):
# pk1 = list(self.table.pkeys)[i]
# pk2 = list(self.table.pkeys)[j]
# if pk1 != pk2:
# ins1 = self.table.keyInstance(pk1)
# ins2 = self.table.keyInstance(pk2)
# ins2by1 = dict(zip(pk2, [self.get(ins1, k2) for k2,_ in ins2.items()]))
# A.add(ForAll(list(ins1.values()) + list(ins2.values()),And(self.notNil(ins2by1))))
# forall k1 k1' K1 K2, (k1:K1 /\ k1':K1 /\ k1 != k1' /\ K1 != K2) => Table[k1].K2 != Table[k1'].K2
# for i in range(len(list(self.table.pkeys))):
# for j in range(i + 1,len(list(self.table.pkeys))):
# pk1 = list(self.table.pkeys)[i]
# pk2 = list(self.table.pkeys)[j]
# if pk1 != pk2:
# ins1 = self.table.keyInstance(pk1)
# ins1_ = self.table.keyInstance(pk1)
# ins2 = self.table.keyInstance(pk2)
# ins2by1 = dict(zip(pk2, [self.get(ins1, k2) for k2,_ in ins2.items()]))
# ins2by1_ = dict(zip(pk2, [self.get(ins1_, k2) for k2,_ in ins2.items()]))
# A.add(ForAll(list(ins1.values()) + list(ins1_.values()) + list(ins2.values()),
# Implies(list(ins1.values())[0] != list(ins1_.values())[0], list(ins2by1.values())[0] != list(ins2by1_.values())[0])))
return A.build()
def __makeKey(self,key_dict):
return self.key_type.__dict__[TUID(list(key_dict.keys()))](*list([k_v[1] for k_v in sorted(key_dict.items())]))
def __makeValue(self,key_dict,value_dict):
return self.value_type.new(*list([value_dict[attr_name] if attr_name in value_dict else self.get(key_dict,attr_name) for attr_name in self.attr_list]))
def __isKey(self,key_dict):
for pkey in list(self.table.pkeys):
if set(pkey) >= set(key_dict.keys()):
return True
return False
def notNil(self, key_dict):
return Not(self.Nil(key_dict))
def Nil(self, key_dict):
if self.__isKey(key_dict):
return self.get(key_dict) == self.value_type.nil
else:
key_list = self.__makeIncompleteKeys(key_dict)
if key_list != []:
A = Assn()
# print key_list
for key_ins in key_list:
P = Assn()
for key_name in key_dict:
if key_name in key_ins:
P.add(key_ins[key_name] == key_dict[key_name])
Q = Assn()
Q.add(self.get(key_ins) == self.value_type.nil)
A.add(ForAll(list(key_ins.values()),Implies(P.build(),Q.build())))
return A.build()
else:
return self.XyRel(key_dict, self.value_type.nil, None, RelEqual())
def delete(self, key_dict):
z3id = self.__makeKey(key_dict)
for pkey in list(self.table.pkeys):
if set(pkey) != set(key_dict.keys()):
pkey_dict = {}
for k in pkey:
pkey_dict[k] = self.get(key_dict,k)
z3id = self.__makeKey(pkey_dict)
self.data = Store(self.data, z3id, self.value_type.nil)
self.data = Store(self.data, z3id, self.value_type.nil)
def get(self, key_dict, value_name = None):
z3id = self.__makeKey(key_dict)
values = Select(self.data, z3id)
if value_name == None:
return values
else:
return self.value_type.__dict__[value_name](values)
def add(self, key_dict, value_dict):
z3id = self.__makeKey(key_dict)
value_dict.update(key_dict)
z3value = self.__makeValue(key_dict,value_dict)
self.data = Store(self.data, z3id, z3value)
for pkey in list(self.table.pkeys):
if set(pkey) != set(key_dict.keys()) \
and (set(pkey) & set(value_dict.keys())): # update the data if it is indeed updated
# print(set(pkey), value_dict)
pkey_dict = {}
for k in pkey:
pkey_dict[k] = self.get(key_dict,k)
z3id = self.__makeKey(pkey_dict)
self.data = Store(self.data, z3id, z3value)
def update(self, key_dict, value_dict):
z3id = self.__makeKey(key_dict)
if value_dict == None:
self.delete(key_dict)
else:
self.add(key_dict, value_dict)
def __makeIncompleteKey(self, key_dict):
# find a key
for pkey in list(self.table.pkeys):
if set(pkey) >= set(key_dict.keys()):
ins = self.table.keyInstance(pkey)
return ins
return None
def __makeIncompleteKeys(self, key_dict):
# find a key
key_list = []
for pkey in list(self.table.pkeys):
if set(pkey) >= set(key_dict.keys()):
key_list.append(self.table.keyInstance(pkey))
return key_list
def XYRel(self, X_dict, Y_dict, name, R):
key_list_x = self.__makeIncompleteKeys({})
key_list_y = self.__makeIncompleteKeys({})
A = Assn()
for key_ins_x,key_ins_y in zip(key_list_x,key_list_y):
P1 = Assn()
for X_name, X_value in list(X_dict.items()):
P1.add(self.get(key_ins_x,X_name) == X_value)
P2 = Assn()
for Y_name, Y_value in list(Y_dict.items()):
P2.add(self.get(key_ins_y,Y_name) == Y_value)
Q = Assn()
Q.add(R(self.get(key_ins_x,name),self.get(key_ins_y,name)))
A.add(ForAll(list(key_ins_x.values()) + list(key_ins_y.values()),Implies(And(P1.build(),P2.build()),Q.build())))
return A.build()
def XyRel(self, X_dict, Y, name, R):
key_list_x = self.__makeIncompleteKeys({})
A = Assn()
for key_ins_x in key_list_x:
P = Assn()
for X_name, X_value in list(X_dict.items()):
P.add(self.get(key_ins_x,X_name) == X_value)
Q = Assn()
Q.add(R(self.get(key_ins_x,name),Y))
A.add(ForAll(list(key_ins_x.values()),Implies(P.build(),Q.build())))
return A.build()
| 44.050761 | 159 | 0.532035 | from z3 import *
from .enum import *
from .table import *
from .utils import *
from .assn import *
class TableInstance(object):
def __init__(self, table):
super(TableInstance, self).__init__()
self.table = table
self.name = table.name
self.key_type = table.key_type
self.value_type = table.value_type
self.data = Array(ID('TABLE_' + table.name), self.key_type, self.value_type)
self.attr_list = list([k_v1[0] for k_v1 in sorted(table.attrs.items())])
self.axiom = self.__initAxiom()
def __initAxiom(self):
A = Assn()
for pkey1 in self.table.pkeys:
ins1 = self.table.keyInstance(pkey1)
ins2 = self.table.keyInstance(pkey1)
for pkey2 in self.table.pkeys:
F = False
for k in pkey2:
F = Or(F, self.get(ins1,k) != self.get(ins2,k))
A.add(ForAll(list(ins1.values()) + list(ins2.values()),Implies(Not(z3_list_eq(list(ins1.values()),list(ins2.values()))),F)))
# forall k1 k2 K1 K2, (k1:K1 /\ k2:K2 /\ K1 != K2 /\ Table[k1].K2 == k2 /\ Table[k2].K1 == k1) => (Table[k1] == Table[k2])
for i in range(len(list(self.table.pkeys))):
for j in range(i + 1,len(list(self.table.pkeys))):
pk1 = list(self.table.pkeys)[i]
pk2 = list(self.table.pkeys)[j]
if pk1 != pk2:
ins1 = self.table.keyInstance(pk1)
ins2 = self.table.keyInstance(pk2)
R = True
P = True
for k in pk1:
P = And(P, self.get(ins2,k) == ins1[k])
for k in pk2:
P = And(P, self.get(ins1,k) == ins2[k])
Q = self.get(ins1) == self.get(ins2)
A.add(ForAll(list(ins1.values())+ list(ins2.values()),And(R,Implies(P,Q))))
##### Note: The following two axioms are fixed in the add function
# forall k1 K1 K2, (k1:K1 /\ K1 != K2) => Table[Table[k1].K2] not nil
# for i in range(len(list(self.table.pkeys))):
# for j in range(i + 1,len(list(self.table.pkeys))):
# pk1 = list(self.table.pkeys)[i]
# pk2 = list(self.table.pkeys)[j]
# if pk1 != pk2:
# ins1 = self.table.keyInstance(pk1)
# ins2 = self.table.keyInstance(pk2)
# ins2by1 = dict(zip(pk2, [self.get(ins1, k2) for k2,_ in ins2.items()]))
# A.add(ForAll(list(ins1.values()) + list(ins2.values()),And(self.notNil(ins2by1))))
# forall k1 k1' K1 K2, (k1:K1 /\ k1':K1 /\ k1 != k1' /\ K1 != K2) => Table[k1].K2 != Table[k1'].K2
# for i in range(len(list(self.table.pkeys))):
# for j in range(i + 1,len(list(self.table.pkeys))):
# pk1 = list(self.table.pkeys)[i]
# pk2 = list(self.table.pkeys)[j]
# if pk1 != pk2:
# ins1 = self.table.keyInstance(pk1)
# ins1_ = self.table.keyInstance(pk1)
# ins2 = self.table.keyInstance(pk2)
# ins2by1 = dict(zip(pk2, [self.get(ins1, k2) for k2,_ in ins2.items()]))
# ins2by1_ = dict(zip(pk2, [self.get(ins1_, k2) for k2,_ in ins2.items()]))
# A.add(ForAll(list(ins1.values()) + list(ins1_.values()) + list(ins2.values()),
# Implies(list(ins1.values())[0] != list(ins1_.values())[0], list(ins2by1.values())[0] != list(ins2by1_.values())[0])))
return A.build()
def __makeKey(self,key_dict):
return self.key_type.__dict__[TUID(list(key_dict.keys()))](*list([k_v[1] for k_v in sorted(key_dict.items())]))
def __makeValue(self,key_dict,value_dict):
return self.value_type.new(*list([value_dict[attr_name] if attr_name in value_dict else self.get(key_dict,attr_name) for attr_name in self.attr_list]))
def __isKey(self,key_dict):
for pkey in list(self.table.pkeys):
if set(pkey) >= set(key_dict.keys()):
return True
return False
def notNil(self, key_dict):
return Not(self.Nil(key_dict))
def Nil(self, key_dict):
if self.__isKey(key_dict):
return self.get(key_dict) == self.value_type.nil
else:
key_list = self.__makeIncompleteKeys(key_dict)
if key_list != []:
A = Assn()
# print key_list
for key_ins in key_list:
P = Assn()
for key_name in key_dict:
if key_name in key_ins:
P.add(key_ins[key_name] == key_dict[key_name])
Q = Assn()
Q.add(self.get(key_ins) == self.value_type.nil)
A.add(ForAll(list(key_ins.values()),Implies(P.build(),Q.build())))
return A.build()
else:
return self.XyRel(key_dict, self.value_type.nil, None, RelEqual())
def delete(self, key_dict):
z3id = self.__makeKey(key_dict)
for pkey in list(self.table.pkeys):
if set(pkey) != set(key_dict.keys()):
pkey_dict = {}
for k in pkey:
pkey_dict[k] = self.get(key_dict,k)
z3id = self.__makeKey(pkey_dict)
self.data = Store(self.data, z3id, self.value_type.nil)
self.data = Store(self.data, z3id, self.value_type.nil)
def get(self, key_dict, value_name = None):
z3id = self.__makeKey(key_dict)
values = Select(self.data, z3id)
if value_name == None:
return values
else:
return self.value_type.__dict__[value_name](values)
def add(self, key_dict, value_dict):
z3id = self.__makeKey(key_dict)
value_dict.update(key_dict)
z3value = self.__makeValue(key_dict,value_dict)
self.data = Store(self.data, z3id, z3value)
for pkey in list(self.table.pkeys):
if set(pkey) != set(key_dict.keys()) \
and (set(pkey) & set(value_dict.keys())): # update the data if it is indeed updated
# print(set(pkey), value_dict)
pkey_dict = {}
for k in pkey:
pkey_dict[k] = self.get(key_dict,k)
z3id = self.__makeKey(pkey_dict)
self.data = Store(self.data, z3id, z3value)
def update(self, key_dict, value_dict):
z3id = self.__makeKey(key_dict)
if value_dict == None:
self.delete(key_dict)
else:
self.add(key_dict, value_dict)
def __makeIncompleteKey(self, key_dict):
# find a key
for pkey in list(self.table.pkeys):
if set(pkey) >= set(key_dict.keys()):
ins = self.table.keyInstance(pkey)
return ins
return None
def __makeIncompleteKeys(self, key_dict):
# find a key
key_list = []
for pkey in list(self.table.pkeys):
if set(pkey) >= set(key_dict.keys()):
key_list.append(self.table.keyInstance(pkey))
return key_list
def XYRel(self, X_dict, Y_dict, name, R):
key_list_x = self.__makeIncompleteKeys({})
key_list_y = self.__makeIncompleteKeys({})
A = Assn()
for key_ins_x,key_ins_y in zip(key_list_x,key_list_y):
P1 = Assn()
for X_name, X_value in list(X_dict.items()):
P1.add(self.get(key_ins_x,X_name) == X_value)
P2 = Assn()
for Y_name, Y_value in list(Y_dict.items()):
P2.add(self.get(key_ins_y,Y_name) == Y_value)
Q = Assn()
Q.add(R(self.get(key_ins_x,name),self.get(key_ins_y,name)))
A.add(ForAll(list(key_ins_x.values()) + list(key_ins_y.values()),Implies(And(P1.build(),P2.build()),Q.build())))
return A.build()
def XyRel(self, X_dict, Y, name, R):
key_list_x = self.__makeIncompleteKeys({})
A = Assn()
for key_ins_x in key_list_x:
P = Assn()
for X_name, X_value in list(X_dict.items()):
P.add(self.get(key_ins_x,X_name) == X_value)
Q = Assn()
Q.add(R(self.get(key_ins_x,name),Y))
A.add(ForAll(list(key_ins_x.values()),Implies(P.build(),Q.build())))
return A.build()
| true | true |
f7f859370dd2fa13801bb8ef415e4db2b1c97a81 | 1,124 | py | Python | app/routes/pet.py | nkthanh98/flask-seed | 353993e11a765c2b9cec8adf63a555580c223981 | [
"MIT"
] | null | null | null | app/routes/pet.py | nkthanh98/flask-seed | 353993e11a765c2b9cec8adf63a555580c223981 | [
"MIT"
] | null | null | null | app/routes/pet.py | nkthanh98/flask-seed | 353993e11a765c2b9cec8adf63a555580c223981 | [
"MIT"
] | null | null | null | # coding=utf-8
import logging
from werkzeug import exceptions as exc
from flask import g
from app.extends.flask import (
Namespace,
MethodView
)
from app import schemas
from app.services import PetService
pet_ns = Namespace('pet', __name__)
@pet_ns.route('/pet', methods=['POST'])
class PetCreator(MethodView):
@pet_ns.expect(schemas.Pet)
@pet_ns.marshal_with(schemas.Pet)
def post(self):
service = PetService()
return service.create_pet(g.json)
@pet_ns.route('/pet/<int:pet_id>', methods=['GET', 'PATCH', 'DELETE'])
class Pet(MethodView):
@pet_ns.marshal_with(schemas.Pet)
def get(self, pet_id):
service = PetService()
pet = service.get_pet(pet_id)
if pet:
return pet
raise exc.NotFound('Pet not found')
@pet_ns.expect(schemas.Pet)
@pet_ns.marshal_with(schemas.Pet)
def patch(self, pet_id):
service = PetService()
return service.update_pet(pet_id, g.json)
@pet_ns.marshal_with(schemas.Pet)
def delete(self, pet_id):
service = PetService()
return service.delete_pet(pet_id)
| 23.914894 | 70 | 0.66548 |
import logging
from werkzeug import exceptions as exc
from flask import g
from app.extends.flask import (
Namespace,
MethodView
)
from app import schemas
from app.services import PetService
pet_ns = Namespace('pet', __name__)
@pet_ns.route('/pet', methods=['POST'])
class PetCreator(MethodView):
@pet_ns.expect(schemas.Pet)
@pet_ns.marshal_with(schemas.Pet)
def post(self):
service = PetService()
return service.create_pet(g.json)
@pet_ns.route('/pet/<int:pet_id>', methods=['GET', 'PATCH', 'DELETE'])
class Pet(MethodView):
@pet_ns.marshal_with(schemas.Pet)
def get(self, pet_id):
service = PetService()
pet = service.get_pet(pet_id)
if pet:
return pet
raise exc.NotFound('Pet not found')
@pet_ns.expect(schemas.Pet)
@pet_ns.marshal_with(schemas.Pet)
def patch(self, pet_id):
service = PetService()
return service.update_pet(pet_id, g.json)
@pet_ns.marshal_with(schemas.Pet)
def delete(self, pet_id):
service = PetService()
return service.delete_pet(pet_id)
| true | true |
f7f859b3f499c9a1e5744b75fe5a660dd4670b23 | 13,576 | py | Python | vw_plotspectra.py | mahdiqezlou/vw_spectra | 975b125a03b3f8505e01db7fd4b4c3c609271499 | [
"MIT"
] | null | null | null | vw_plotspectra.py | mahdiqezlou/vw_spectra | 975b125a03b3f8505e01db7fd4b4c3c609271499 | [
"MIT"
] | 3 | 2019-12-09T20:49:57.000Z | 2019-12-10T20:09:01.000Z | vw_plotspectra.py | mahdiqezlou/vw_spectra | 975b125a03b3f8505e01db7fd4b4c3c609271499 | [
"MIT"
] | 1 | 2019-12-03T02:05:29.000Z | 2019-12-03T02:05:29.000Z | # -*- coding: utf-8 -*-
"""Contains the plotting-specific functions specific to the velocity width analysis."""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from fake_spectra import plot_spectra as ps
from fake_spectra import haloassigned_spectra as hs
import kstest as ks
import vw_spectra as vw
try:
xrange(1)
except NameError:
xrange = range
def _bootstrap_sample(vel_data, v_table, samples, error):
"""Generate a Monte Carlo error sample of the differential distribution."""
# Generate some Monte Carlo samples where each element is perturbed by
# a Gaussian, sigma given by error.
index = np.random.random_integers(0, np.size(vel_data)-1, samples)
bootstrap = vel_data[index]
if error > 0.:
bootstrap += np.random.normal(0,error,size=samples)
nn = np.histogram(bootstrap,v_table)[0]
return nn
class VWPlotSpectra(hs.HaloAssignedSpectra, ps.PlottingSpectra, vw.VWSpectra):
"""Extends PlottingSpectra with velocity width specific code."""
def plot_vel_width(self, elem, ion, dv=0.17, color="red", ls="-"):
"""Plot the velocity widths of this snapshot
Parameters:
elem - element to use
ion - ionisation state: 1 is neutral.
dv - bin spacing
"""
(vbin, vels) = self.vel_width_hist(elem, ion, dv)
plt.semilogx(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
def plot_cum_vel_width(self, elem, ion, norm, dv=0.1, color="red", ls="-"):
"""Plot the velocity widths of this snapshot
Parameters:
elem - element to use
ion - ionisation state: 1 is neutral.
dv - bin spacing
"""
(vbin, vels) = self.vel_width_hist(elem, ion, dv)
cvels = np.cumsum(vels)
cvels = cvels*norm/cvels[-1]
plt.semilogx(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)
def plot_cum_f_peak(self, elem, ion, norm, dv=0.01, color="red", ls="-"):
"""Plot the velocity widths of this snapshot
Parameters:
elem - element to use
ion - ionisation state: 1 is neutral.
dv - bin spacing
"""
(vbin, vels) = self.f_peak_hist(elem, ion, dv)
cvels = np.cumsum(vels)
cvels = cvels*norm/cvels[-1]
plt.plot(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{edg}$")
def plot_f_meanmedian_errors(self, elem, ion, samples, cumulative=False, nv_table = 11, color="red"):
"""Plot 68% contour for error on the fmm distribution"""
f_peak = self.vel_mean_median(elem, ion)
ind = self.get_filt(elem, ion)
f_peak = f_peak[ind]
v_table=np.linspace(0,1,nv_table)
self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)
def plot_f_peak_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color="red"):
"""Plot 68% contour for error on the fpeak distribution"""
f_peak = self.vel_peak(elem, ion)
ind = self.get_filt(elem, ion)
f_peak = f_peak[ind]
v_table=np.linspace(0,1,nv_table)
self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)
def plot_eq_width_errors(self, elem, ion, line, samples, cumulative=False, min_width = -1.6, nv_table=11, color="red"):
"""Plot 68% contour for error on the fpeak distribution"""
eq_width = self.equivalent_width(elem, ion, line)
ind = self.get_filt(elem, ion)
eq_width = eq_width[ind]
v_table = np.logspace(min_width, np.log10(np.max(eq_width)), nv_table)
self._plot_errors(np.log10(eq_width), np.log10(v_table), samples, 0.05, cumulative, False, color)
def plot_vw_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color="red"):
"""Plot 68% contour for error on the velocity width distribution"""
vel_width = self.vel_width(elem, ion)
ind = self.get_filt(elem, ion)
vel_width = vel_width[ind]
v_table=np.logspace(1,np.log10(np.max(vel_width)+10),nv_table)
self._plot_errors(vel_width, v_table, samples, 5, cumulative, True, color)
def _plot_errors(self, vel_data, v_table, samples, error, cumulative=False, lognorm=True, color="red"):
"""Find and plot a 68% contour for a subsample of size samples, by Monte Carlo."""
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
#Get a subsample
cdfs = np.array([_bootstrap_sample(vel_data, v_table, samples, error) for _ in xrange(10000)])
if cumulative:
cdfs = np.cumsum(cdfs, axis=1)
norm = 1
else:
if lognorm:
v_table = np.log10(v_table)
norm = samples * np.array([(-v_table[i]+v_table[i+1]) for i in xrange(np.size(v_table)-1)])
lower = np.percentile(cdfs, 16, axis=0)/norm
upper = np.percentile(cdfs, 84, axis=0)/norm
plt.fill_between(vbin, lower, upper, color=color, alpha=0.3)
def plot_f_meanmedian(self, elem, ion, dv=0.06, color="red", ls="-"):
"""
Plot an f_mean_median histogram
For args see plot_vel_width
"""
(vbin, vels) = self.f_meanmedian_hist(elem, ion, dv)
plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{mm}$")
def plot_f_peak(self, elem, ion, dv=0.06, color="red", ls="-"):
"""
Plot an f_peak histogram
For args see plot_vel_width
"""
(vbin, vels) = self.f_peak_hist(elem, ion, dv)
plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{edg}$")
def plot_sep_frac(self,elem = "Si", ion = 2, thresh = 1e-1, mindist = 15, dv = 0.2, color="blue", ls="-"):
"""
Plots the fraction of spectra in each velocity width bin which are separated.
Threshold is as a percentage of the maximum value.
mindist is in km/s
"""
sep = self.get_separated(elem, ion, thresh,mindist)
vels = self.vel_width(elem, ion)
ind = self.get_filt(elem, ion)
v_table = 10**np.arange(1, 3, dv)
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
hist1 = np.histogram(vels[ind], v_table)
hist2 = np.histogram(vels[ind][sep],v_table)
hist1[0][np.where(hist1[0] == 0)] = 1
plt.semilogx(vbin, hist2[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)
def plot_vel_width_breakdown(self, elem = "Si", ion = 2, dv = 0.1):
"""
Plots the fraction of the total velocity width histogram in a series of virial velocity bins
"""
#Find velocity width
vels = self.vel_width(elem, ion)
ii = self.get_filt(elem, ion)
self._plot_breakdown(vels,ii,(0, 60, 120), (60, 120, 900), ("< 60", "60-120", "> 120"),dv)
plt.xlabel(r"$v_\mathrm{90}$ (km s$^{-1}$)")
plt.ylim(0,1)
def plot_f_peak_breakdown(self, elem = "Si", ion = 2, dv = 0.05):
"""
Plots the fraction of the total fedge histogram in a series of virial velocity bins
"""
#Find velocity width
vels = self.vel_peak(elem, ion)
ii = self.get_filt(elem, ion)
self._plot_breakdown(vels,ii,(0, 50), (50, 900), ("< 50", "> 50"),dv, False)
plt.xlabel(r"$f_\mathrm{edg}$")
plt.ylim(0,1)
plt.xlim(0,1)
plt.legend(loc=1,ncol=2)
def plot_mult_halo_frac(self,elem = "Si", ion = 2, dv = 0.2, color="blue", ls="-"):
"""
Plots the fraction of spectra in each velocity width bin which are separated.
Threshold is as a percentage of the maximum value.
mindist is in km/s
"""
#Find velocity width
(halos, subhalos) = self.find_nearby_halos()
vels = self.vel_width(elem, ion)
ii = self.get_filt(elem, ion)
#Find virial velocity
(halo, _) = self.find_nearest_halo()
ind = np.where(halo[ii] > 0)
# virial = np.ones_like(halo, dtype=np.double)
# virial[ind] = self.virial_vel(halo[ind])
vwvir = vels[ii][ind] #/virial[ind]
#Make bins
v_table = 10**np.arange(np.min(np.log10(vwvir)),np.max(np.log10(vwvir)) , dv)
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
#Histogram of vel width / virial vel
hist1 = np.histogram(vwvir, v_table)
hist1[0][np.where(hist1[0] == 0)] = 1
#Find places with multiple halos
subhalo_parent = [list(self.sub_sub_index[ss]) for ss in subhalos]
allh = np.array([list(set(subhalo_parent[ii] + halos[ii])) for ii in xrange(self.NumLos)])
indmult = np.where([len(aa) > 1 for aa in allh[ind]])
histmult = np.histogram(vwvir[indmult],v_table)
plt.semilogx(vbin, histmult[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)
def plot_Z_vs_vel_width(self,elem="Si", ion=2, color="blue",color2="darkblue"):
"""Plot the correlation between metallicity and velocity width"""
vel = self.vel_width(elem, ion)
met = self.get_metallicity()
#Ignore objects too faint to be seen
ind2 = np.where(met > 1e-4)
met = met[ind2]
vel = vel[ind2]
self._plot_2d_contour(vel, met, 10, "Z vel sim", color, color2, fit=True)
plt.plot(vel, met, 'o', color=color)
plt.xlim(10,2e3)
plt.ylabel(r"$\mathrm{Z} / \mathrm{Z}_\odot$")
plt.xlabel(r"$v_\mathrm{90}$ (km s$^{-1}$)")
def plot_vel_vs_mass(self,elem, ion, color="blue",color2="darkblue"):
"""Plot the correlation between mass and metallicity, with a fit"""
vel = self.vel_width(elem, ion)
self._plot_xx_vs_mass(vel, "vel",color,color2)
def kstest(self, Zdata, veldata, elem="Si", ion=2):
"""Find the 2D KS test value of the vel width and log metallicity
with respect to an external dataset, veldata and Z data"""
met = self.get_metallicity()
ind = self.get_filt(elem, ion)
met = np.log10(met[ind])
vel = np.log10(self.vel_width(elem, ion)[ind])
data2 = np.array([met,vel]).T
data = np.array([np.log10(Zdata), np.log10(veldata)]).T
return ks.ks_2d_2samp(data,data2)
def plot_virial_vel_vs_vel_width(self,elem, ion,color="red", ls="-", label="", dm=0.1):
"""Plot a histogram of the velocity widths vs the halo virial velocity"""
(halos, _) = self.find_nearest_halo()
ind = self.get_filt(elem,ion)
f_ind = np.where(halos[ind] != -1)
vel = self.vel_width(elem, ion)[ind][f_ind]
virial = self.virial_vel(halos[ind][f_ind])+0.1
vvvir = vel/virial
m_table = 10**np.arange(np.log10(np.min(vvvir)), np.log10(np.max(vvvir)), dm)
mbin = np.array([(m_table[i]+m_table[i+1])/2. for i in range(0,np.size(m_table)-1)])
pdf = np.histogram(np.log10(vvvir),np.log10(m_table), density=True)[0]
print("median v/vir: ",np.median(vvvir))
plt.semilogx(mbin, pdf, color=color, ls=ls, label=label)
return (mbin, pdf)
def plot_vbars(self, tau):
"""Plot the vertical bars marking the velocity widths"""
(low, high) = self._vel_width_bound(tau)
xaxis = np.arange(0,np.size(tau))*self.dvbin - (high+low)/2
if high - low > 0:
plt.plot([xaxis[0]+low,xaxis[0]+low],[-1,20], color="green")
plt.plot([xaxis[0]+high,xaxis[0]+high],[-1,20],color="red")
if high - low > 30:
tpos = xaxis[0]+low+5
else:
tpos = xaxis[0]+high+5
if high - low > 60:
tpos = xaxis[0]+low+25
if high - low > 150:
tpos = xaxis[0]+low+35
ypos = np.max(tau) -0.2
if np.max(tau) < 0.8:
ypos = 0.7
elif np.max(tau) > 4.:
ypos = 3.5
plt.text(tpos,ypos,r"$\Delta v_{90} = "+str(np.round(high-low,1))+r"$", size=14)
xlims = (np.max((xaxis[0],xaxis[0]+low-20)),np.min((xaxis[-1],xaxis[0]+high+20)))
return (xaxis, xlims)
def plot_spectrum(self, elem, ion, line, spec_num, flux=True, xlims=None, color="blue", ls="-", offset=None):
"""Plot an spectrum, centered on the maximum tau,
and marking the 90% velocity width.
offset: offset in km/s for the x-axis labels"""
if line == -1:
tau_no = self.get_observer_tau(elem, ion, spec_num, noise=False)
tau = self.get_observer_tau(elem, ion, spec_num, noise=True)
else:
tau_no = self.get_tau(elem, ion, line, spec_num, noise=False)
tau = self.get_tau(elem, ion, line, spec_num, noise=True)
(low, high, offset_def) = self.find_absorber_width(elem, ion)
if offset is None:
offset = offset_def
tau_l = np.roll(tau_no, offset[spec_num])[low[spec_num]:high[spec_num]]
(xaxis, xlims_def) = self.plot_vbars(tau_l)
if xlims is None:
xlims = xlims_def
tau_l = np.roll(tau, offset[spec_num])[low[spec_num]:high[spec_num]]
return self.plot_spectrum_raw(tau_l,xaxis, xlims, flux=flux, color=color, ls=ls)
def get_filt(self, elem, ion, thresh = 100):
"""
Get an index list to exclude spectra where the ion is too small, or velocity width < 20
thresh - observable density threshold
"""
return vw.VWSpectra.get_filt(self, elem, ion, thresh)
| 45.864865 | 123 | 0.606732 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from fake_spectra import plot_spectra as ps
from fake_spectra import haloassigned_spectra as hs
import kstest as ks
import vw_spectra as vw
try:
xrange(1)
except NameError:
xrange = range
def _bootstrap_sample(vel_data, v_table, samples, error):
index = np.random.random_integers(0, np.size(vel_data)-1, samples)
bootstrap = vel_data[index]
if error > 0.:
bootstrap += np.random.normal(0,error,size=samples)
nn = np.histogram(bootstrap,v_table)[0]
return nn
class VWPlotSpectra(hs.HaloAssignedSpectra, ps.PlottingSpectra, vw.VWSpectra):
def plot_vel_width(self, elem, ion, dv=0.17, color="red", ls="-"):
(vbin, vels) = self.vel_width_hist(elem, ion, dv)
plt.semilogx(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
def plot_cum_vel_width(self, elem, ion, norm, dv=0.1, color="red", ls="-"):
(vbin, vels) = self.vel_width_hist(elem, ion, dv)
cvels = np.cumsum(vels)
cvels = cvels*norm/cvels[-1]
plt.semilogx(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)
def plot_cum_f_peak(self, elem, ion, norm, dv=0.01, color="red", ls="-"):
(vbin, vels) = self.f_peak_hist(elem, ion, dv)
cvels = np.cumsum(vels)
cvels = cvels*norm/cvels[-1]
plt.plot(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{edg}$")
def plot_f_meanmedian_errors(self, elem, ion, samples, cumulative=False, nv_table = 11, color="red"):
f_peak = self.vel_mean_median(elem, ion)
ind = self.get_filt(elem, ion)
f_peak = f_peak[ind]
v_table=np.linspace(0,1,nv_table)
self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)
def plot_f_peak_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color="red"):
f_peak = self.vel_peak(elem, ion)
ind = self.get_filt(elem, ion)
f_peak = f_peak[ind]
v_table=np.linspace(0,1,nv_table)
self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)
def plot_eq_width_errors(self, elem, ion, line, samples, cumulative=False, min_width = -1.6, nv_table=11, color="red"):
eq_width = self.equivalent_width(elem, ion, line)
ind = self.get_filt(elem, ion)
eq_width = eq_width[ind]
v_table = np.logspace(min_width, np.log10(np.max(eq_width)), nv_table)
self._plot_errors(np.log10(eq_width), np.log10(v_table), samples, 0.05, cumulative, False, color)
def plot_vw_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color="red"):
vel_width = self.vel_width(elem, ion)
ind = self.get_filt(elem, ion)
vel_width = vel_width[ind]
v_table=np.logspace(1,np.log10(np.max(vel_width)+10),nv_table)
self._plot_errors(vel_width, v_table, samples, 5, cumulative, True, color)
def _plot_errors(self, vel_data, v_table, samples, error, cumulative=False, lognorm=True, color="red"):
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
cdfs = np.array([_bootstrap_sample(vel_data, v_table, samples, error) for _ in xrange(10000)])
if cumulative:
cdfs = np.cumsum(cdfs, axis=1)
norm = 1
else:
if lognorm:
v_table = np.log10(v_table)
norm = samples * np.array([(-v_table[i]+v_table[i+1]) for i in xrange(np.size(v_table)-1)])
lower = np.percentile(cdfs, 16, axis=0)/norm
upper = np.percentile(cdfs, 84, axis=0)/norm
plt.fill_between(vbin, lower, upper, color=color, alpha=0.3)
def plot_f_meanmedian(self, elem, ion, dv=0.06, color="red", ls="-"):
(vbin, vels) = self.f_meanmedian_hist(elem, ion, dv)
plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{mm}$")
def plot_f_peak(self, elem, ion, dv=0.06, color="red", ls="-"):
(vbin, vels) = self.f_peak_hist(elem, ion, dv)
plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{edg}$")
def plot_sep_frac(self,elem = "Si", ion = 2, thresh = 1e-1, mindist = 15, dv = 0.2, color="blue", ls="-"):
sep = self.get_separated(elem, ion, thresh,mindist)
vels = self.vel_width(elem, ion)
ind = self.get_filt(elem, ion)
v_table = 10**np.arange(1, 3, dv)
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
hist1 = np.histogram(vels[ind], v_table)
hist2 = np.histogram(vels[ind][sep],v_table)
hist1[0][np.where(hist1[0] == 0)] = 1
plt.semilogx(vbin, hist2[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)
def plot_vel_width_breakdown(self, elem = "Si", ion = 2, dv = 0.1):
vels = self.vel_width(elem, ion)
ii = self.get_filt(elem, ion)
self._plot_breakdown(vels,ii,(0, 60, 120), (60, 120, 900), ("< 60", "60-120", "> 120"),dv)
plt.xlabel(r"$v_\mathrm{90}$ (km s$^{-1}$)")
plt.ylim(0,1)
def plot_f_peak_breakdown(self, elem = "Si", ion = 2, dv = 0.05):
vels = self.vel_peak(elem, ion)
ii = self.get_filt(elem, ion)
self._plot_breakdown(vels,ii,(0, 50), (50, 900), ("< 50", "> 50"),dv, False)
plt.xlabel(r"$f_\mathrm{edg}$")
plt.ylim(0,1)
plt.xlim(0,1)
plt.legend(loc=1,ncol=2)
def plot_mult_halo_frac(self,elem = "Si", ion = 2, dv = 0.2, color="blue", ls="-"):
(halos, subhalos) = self.find_nearby_halos()
vels = self.vel_width(elem, ion)
ii = self.get_filt(elem, ion)
(halo, _) = self.find_nearest_halo()
ind = np.where(halo[ii] > 0)
vwvir = vels[ii][ind]
v_table = 10**np.arange(np.min(np.log10(vwvir)),np.max(np.log10(vwvir)) , dv)
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
hist1 = np.histogram(vwvir, v_table)
hist1[0][np.where(hist1[0] == 0)] = 1
subhalo_parent = [list(self.sub_sub_index[ss]) for ss in subhalos]
allh = np.array([list(set(subhalo_parent[ii] + halos[ii])) for ii in xrange(self.NumLos)])
indmult = np.where([len(aa) > 1 for aa in allh[ind]])
histmult = np.histogram(vwvir[indmult],v_table)
plt.semilogx(vbin, histmult[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)
def plot_Z_vs_vel_width(self,elem="Si", ion=2, color="blue",color2="darkblue"):
vel = self.vel_width(elem, ion)
met = self.get_metallicity()
ind2 = np.where(met > 1e-4)
met = met[ind2]
vel = vel[ind2]
self._plot_2d_contour(vel, met, 10, "Z vel sim", color, color2, fit=True)
plt.plot(vel, met, 'o', color=color)
plt.xlim(10,2e3)
plt.ylabel(r"$\mathrm{Z} / \mathrm{Z}_\odot$")
plt.xlabel(r"$v_\mathrm{90}$ (km s$^{-1}$)")
def plot_vel_vs_mass(self,elem, ion, color="blue",color2="darkblue"):
vel = self.vel_width(elem, ion)
self._plot_xx_vs_mass(vel, "vel",color,color2)
def kstest(self, Zdata, veldata, elem="Si", ion=2):
met = self.get_metallicity()
ind = self.get_filt(elem, ion)
met = np.log10(met[ind])
vel = np.log10(self.vel_width(elem, ion)[ind])
data2 = np.array([met,vel]).T
data = np.array([np.log10(Zdata), np.log10(veldata)]).T
return ks.ks_2d_2samp(data,data2)
def plot_virial_vel_vs_vel_width(self,elem, ion,color="red", ls="-", label="", dm=0.1):
(halos, _) = self.find_nearest_halo()
ind = self.get_filt(elem,ion)
f_ind = np.where(halos[ind] != -1)
vel = self.vel_width(elem, ion)[ind][f_ind]
virial = self.virial_vel(halos[ind][f_ind])+0.1
vvvir = vel/virial
m_table = 10**np.arange(np.log10(np.min(vvvir)), np.log10(np.max(vvvir)), dm)
mbin = np.array([(m_table[i]+m_table[i+1])/2. for i in range(0,np.size(m_table)-1)])
pdf = np.histogram(np.log10(vvvir),np.log10(m_table), density=True)[0]
print("median v/vir: ",np.median(vvvir))
plt.semilogx(mbin, pdf, color=color, ls=ls, label=label)
return (mbin, pdf)
def plot_vbars(self, tau):
(low, high) = self._vel_width_bound(tau)
xaxis = np.arange(0,np.size(tau))*self.dvbin - (high+low)/2
if high - low > 0:
plt.plot([xaxis[0]+low,xaxis[0]+low],[-1,20], color="green")
plt.plot([xaxis[0]+high,xaxis[0]+high],[-1,20],color="red")
if high - low > 30:
tpos = xaxis[0]+low+5
else:
tpos = xaxis[0]+high+5
if high - low > 60:
tpos = xaxis[0]+low+25
if high - low > 150:
tpos = xaxis[0]+low+35
ypos = np.max(tau) -0.2
if np.max(tau) < 0.8:
ypos = 0.7
elif np.max(tau) > 4.:
ypos = 3.5
plt.text(tpos,ypos,r"$\Delta v_{90} = "+str(np.round(high-low,1))+r"$", size=14)
xlims = (np.max((xaxis[0],xaxis[0]+low-20)),np.min((xaxis[-1],xaxis[0]+high+20)))
return (xaxis, xlims)
def plot_spectrum(self, elem, ion, line, spec_num, flux=True, xlims=None, color="blue", ls="-", offset=None):
if line == -1:
tau_no = self.get_observer_tau(elem, ion, spec_num, noise=False)
tau = self.get_observer_tau(elem, ion, spec_num, noise=True)
else:
tau_no = self.get_tau(elem, ion, line, spec_num, noise=False)
tau = self.get_tau(elem, ion, line, spec_num, noise=True)
(low, high, offset_def) = self.find_absorber_width(elem, ion)
if offset is None:
offset = offset_def
tau_l = np.roll(tau_no, offset[spec_num])[low[spec_num]:high[spec_num]]
(xaxis, xlims_def) = self.plot_vbars(tau_l)
if xlims is None:
xlims = xlims_def
tau_l = np.roll(tau, offset[spec_num])[low[spec_num]:high[spec_num]]
return self.plot_spectrum_raw(tau_l,xaxis, xlims, flux=flux, color=color, ls=ls)
def get_filt(self, elem, ion, thresh = 100):
return vw.VWSpectra.get_filt(self, elem, ion, thresh)
| true | true |
f7f85a1a125b655eb9c751ffdcea4a5de65ff8ce | 4,059 | py | Python | rmgpy/tools/data.py | pw0908/RMG-Py | 3846fcce701f2a5fd12dbfa429687e9fcd647298 | [
"MIT"
] | 1 | 2022-01-24T05:08:32.000Z | 2022-01-24T05:08:32.000Z | rmgpy/tools/data.py | pw0908/RMG-Py | 3846fcce701f2a5fd12dbfa429687e9fcd647298 | [
"MIT"
] | 72 | 2016-06-06T18:18:49.000Z | 2019-11-17T03:21:10.000Z | rmgpy/tools/data.py | pw0908/RMG-Py | 3846fcce701f2a5fd12dbfa429687e9fcd647298 | [
"MIT"
] | 3 | 2017-09-22T15:47:37.000Z | 2021-12-30T23:51:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import numpy
class GenericData(object):
"""
A generic data class for the purpose of plotting.
======================= ==============================================================================================
Attribute Description
======================= ==============================================================================================
`label` A string label describing the data, can be used in a plot legend or in an axis label
`data` A numpy array of the data
`uncertainty` An uncertainty value associated with the data. Either a scalar or a numpy array with same
length as `data`
`species` Contains species associated with the data, often used with a Species object
`reaction` Contains reaction associated with the data, often used with a Reaction object
`units` Contains a string describing the units associated with the data
`index` An integer containing the index associated with the data
======================= ==============================================================================================
"""
def __init__(self, label='', data=None, uncertainty=None, species=None, reaction=None, units=None, index=None):
self.label = str(label) if label else None
if isinstance(data, list):
self.data = numpy.array(data)
elif isinstance(data, numpy.ndarray):
self.data = data
else:
raise Exception('Data for GenericData object must be initialized as a list or numpy.array of values.')
self.uncertainty = uncertainty
self.species = species
self.reaction = reaction
self.units = str(units) if units else None
self.index = int(index) if index else None
| 62.446154 | 122 | 0.476472 | true | true | |
f7f85a71139fd0deb9e0541460d266eb90c94000 | 8,412 | py | Python | config/settings/production.py | preludelife/nomadgram | ca2c540f7bad3df0c2037c877eeca7c23f423a15 | [
"MIT"
] | null | null | null | config/settings/production.py | preludelife/nomadgram | ca2c540f7bad3df0c2037c877eeca7c23f423a15 | [
"MIT"
] | 3 | 2020-06-05T18:22:45.000Z | 2021-06-10T20:38:11.000Z | config/settings/production.py | preludelife/nomadgram | ca2c540f7bad3df0c2037c877eeca7c23f423a15 | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['noadcoders.co'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_AUTO_CREATE_BUCKET = True
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
STATIC_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
StaticRootS3BotoStorage = lambda: S3Boto3Storage(location='static') # noqa
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media', file_overwrite=False) # noqa
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='Nomadgram <noreply@noadcoders.co>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Nomadgram]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| 41.643564 | 96 | 0.618997 | from .base import *
from .base import env
= env('DJANGO_SECRET_KEY')
= env.list('DJANGO_ALLOWED_HOSTS', default=['noadcoders.co'])
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
': True,
}
}
}
= ('HTTP_X_FORWARDED_PROTO', 'https')
= env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
= True
= True
= True
= True
env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
= env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
= True
= 'DENY'
PS += ['storages']
SS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
ET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
_CREATE_BUCKET = True
YSTRING_AUTH = False
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
STATIC_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
StaticRootS3BotoStorage = lambda: S3Boto3Storage(location='static') # noqa
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media', file_overwrite=False) # noqa
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='Nomadgram <noreply@noadcoders.co>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Nomadgram]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| true | true |
f7f85b4f94428c38c2ae9ab5221ad6078ffbc4c5 | 4,448 | py | Python | gocddash/analysis/parse_cctray.py | peterrosell/gocddash | c1004135dc66b0d8394d6025750b94e12650ab36 | [
"MIT"
] | 4 | 2016-01-20T19:53:06.000Z | 2019-09-16T11:49:13.000Z | gocddash/analysis/parse_cctray.py | peterrosell/gocddash | c1004135dc66b0d8394d6025750b94e12650ab36 | [
"MIT"
] | null | null | null | gocddash/analysis/parse_cctray.py | peterrosell/gocddash | c1004135dc66b0d8394d6025750b94e12650ab36 | [
"MIT"
] | 5 | 2016-07-13T10:41:20.000Z | 2018-04-10T07:41:18.000Z | from collections import defaultdict
from xml.etree import ElementTree as Et
class Projects(object):
def __init__(self, source):
self.tree = Et.fromstring(source)
self.pipelines = {}
self.stages = defaultdict(list)
self.jobs = defaultdict(list)
self.parse()
def parse(self):
for project in self.tree.findall('Project'):
name_parts = [n.strip() for n in project.attrib['name'].split('::')]
project.set("pipeline_name", name_parts[0])
project.set("stage_name", name_parts[1])
if len(name_parts) > 2:
project.set("job_name", name_parts[2])
else:
project.set("job_name", None)
if project.get("pipeline_name") not in self.pipelines:
self.pipelines[project.get("pipeline_name")] = Pipeline()
self.pipelines[project.get("pipeline_name")].add_facts(project)
# noinspection PyUnusedLocal
@staticmethod
def all_which(entity):
"""
Select all pipelines.
"""
return True
@staticmethod
def progress_which(entity):
"""
Select all pipelines except those that are "green",
i.e. both those failing last build and all currently building.
"""
return entity.status != 'Success'
@staticmethod
def failing_which(entity):
"""
Select only the pipelines that failed last build
"""
return 'Failure' in entity.status
@staticmethod
def time_key(enity):
return enity.changed
def select(self, which, groups=None, group_map=None):
selection = {
'all': self.all_which,
'progress': self.progress_which,
'failing': self.failing_which,
}[which]
pipelines = [p for p in self.pipelines.values() if selection(p)]
pipelines.sort(key=self.time_key)
pipelines.reverse()
if groups:
pipelines = [pl for pl in pipelines if group_map[pl.name] in groups]
return pipelines
class Pipeline(object):
def __init__(self):
self.activity_set = set()
self.last_build_set = set()
self.status = None
self.changed = None
self.url = None
self.name = None
self.label = None
self.stages = []
self.messages = defaultdict(set)
def add_facts(self, project):
if self.changed:
self.changed = max(self.changed, project.attrib['lastBuildTime'])
else:
self.changed = project.attrib['lastBuildTime']
if not self.name:
self.name = project.get("pipeline_name")
self.url = project.attrib['webUrl'].rsplit('/', 2)[0].replace('pipelines/', 'pipelines/value_stream_map/')
self.label = self.url.split('/')[-1]
if project.get("stage_name") not in [stage.name for stage in self.stages]:
self.stages.append(Stage(project))
if project.get("job_name"):
self.stages[-1].add_job(project)
self.activity_set.add(project.attrib['activity'])
self.last_build_set.add(project.attrib['lastBuildStatus'])
prefix = "Building after " if "Building" in self.activity_set else ""
suffix = "Failure" if "Failure" in self.last_build_set else "Success"
self.status = prefix + suffix
self.add_messages(project)
def add_messages(self, project):
for message in project.findall('messages/message'):
self.messages[message.attrib['kind']].add(message.attrib['text'])
def is_success(self):
return self.status == "Success"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.name == other.name
class Entity(object):
def __init__(self, project):
prefix = "Building after " if project.attrib['activity'] == "Building" else ""
self.status = prefix + project.attrib['lastBuildStatus']
self.url = project.attrib['webUrl']
class Stage(Entity):
def __init__(self, project):
super(Stage, self).__init__(project)
self.jobs = []
self.name = project.get("stage_name")
self.counter = self.url.split('/')[-1]
def add_job(self, project):
self.jobs.append(Job(project))
class Job(Entity):
def __init__(self, project):
super(Job, self).__init__(project)
self.name = project.get("job_name")
| 33.69697 | 118 | 0.606565 | from collections import defaultdict
from xml.etree import ElementTree as Et
class Projects(object):
def __init__(self, source):
self.tree = Et.fromstring(source)
self.pipelines = {}
self.stages = defaultdict(list)
self.jobs = defaultdict(list)
self.parse()
def parse(self):
for project in self.tree.findall('Project'):
name_parts = [n.strip() for n in project.attrib['name'].split('::')]
project.set("pipeline_name", name_parts[0])
project.set("stage_name", name_parts[1])
if len(name_parts) > 2:
project.set("job_name", name_parts[2])
else:
project.set("job_name", None)
if project.get("pipeline_name") not in self.pipelines:
self.pipelines[project.get("pipeline_name")] = Pipeline()
self.pipelines[project.get("pipeline_name")].add_facts(project)
@staticmethod
def all_which(entity):
return True
@staticmethod
def progress_which(entity):
return entity.status != 'Success'
@staticmethod
def failing_which(entity):
return 'Failure' in entity.status
@staticmethod
def time_key(enity):
return enity.changed
def select(self, which, groups=None, group_map=None):
selection = {
'all': self.all_which,
'progress': self.progress_which,
'failing': self.failing_which,
}[which]
pipelines = [p for p in self.pipelines.values() if selection(p)]
pipelines.sort(key=self.time_key)
pipelines.reverse()
if groups:
pipelines = [pl for pl in pipelines if group_map[pl.name] in groups]
return pipelines
class Pipeline(object):
def __init__(self):
self.activity_set = set()
self.last_build_set = set()
self.status = None
self.changed = None
self.url = None
self.name = None
self.label = None
self.stages = []
self.messages = defaultdict(set)
def add_facts(self, project):
if self.changed:
self.changed = max(self.changed, project.attrib['lastBuildTime'])
else:
self.changed = project.attrib['lastBuildTime']
if not self.name:
self.name = project.get("pipeline_name")
self.url = project.attrib['webUrl'].rsplit('/', 2)[0].replace('pipelines/', 'pipelines/value_stream_map/')
self.label = self.url.split('/')[-1]
if project.get("stage_name") not in [stage.name for stage in self.stages]:
self.stages.append(Stage(project))
if project.get("job_name"):
self.stages[-1].add_job(project)
self.activity_set.add(project.attrib['activity'])
self.last_build_set.add(project.attrib['lastBuildStatus'])
prefix = "Building after " if "Building" in self.activity_set else ""
suffix = "Failure" if "Failure" in self.last_build_set else "Success"
self.status = prefix + suffix
self.add_messages(project)
def add_messages(self, project):
for message in project.findall('messages/message'):
self.messages[message.attrib['kind']].add(message.attrib['text'])
def is_success(self):
return self.status == "Success"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.name == other.name
class Entity(object):
def __init__(self, project):
prefix = "Building after " if project.attrib['activity'] == "Building" else ""
self.status = prefix + project.attrib['lastBuildStatus']
self.url = project.attrib['webUrl']
class Stage(Entity):
def __init__(self, project):
super(Stage, self).__init__(project)
self.jobs = []
self.name = project.get("stage_name")
self.counter = self.url.split('/')[-1]
def add_job(self, project):
self.jobs.append(Job(project))
class Job(Entity):
def __init__(self, project):
super(Job, self).__init__(project)
self.name = project.get("job_name")
| true | true |
f7f85b86a091200d3cee0b7d60d04062afdfdbe5 | 2,112 | py | Python | epytope/Data/pssms/tepitopepan/mat/DRB1_0711_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/tepitopepan/mat/DRB1_0711_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/tepitopepan/mat/DRB1_0711_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | DRB1_0711_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.3981, 'D': -1.6081, 'G': -1.1184, 'F': 0.1792, 'I': 1.1014, 'H': 0.05095, 'K': -1.2978, 'M': -0.32464, 'L': -0.74298, 'N': -1.107, 'Q': -1.4664, 'P': -1.2117, 'S': 1.4198, 'R': -1.107, 'T': 1.3474, 'W': -1.0975, 'V': 0.89927, 'Y': -0.88285}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.4996, 'D': -2.4998, 'G': -0.59982, 'F': -0.80139, 'I': -0.50109, 'H': -0.80115, 'K': -1.1006, 'M': -0.80103, 'L': -0.90071, 'N': -0.60033, 'Q': -1.1005, 'P': -0.49933, 'S': 0.5987, 'R': -1.1004, 'T': -0.080654, 'W': -0.90147, 'V': 0.09895, 'Y': -1.0009}, 6: {'A': 0.0, 'E': 0.86672, 'D': -1.3112, 'G': -0.011139, 'F': 2.069, 'I': 2.3665, 'H': 0.88412, 'K': 0.48763, 'M': 1.7789, 'L': 2.1749, 'N': 1.3742, 'Q': 1.0784, 'P': -0.20788, 'S': 0.38292, 'R': 0.69107, 'T': 0.87113, 'W': 1.3762, 'V': 1.5723, 'Y': 1.6726}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.30004, 'D': -1.2, 'G': -0.6, 'F': 2.0999, 'I': 3.3999, 'H': -0.20002, 'K': -1.1, 'M': 2.0, 'L': 3.3999, 'N': -0.50002, 'Q': -0.90002, 'P': -0.60001, 'S': -0.3, 'R': -0.8, 'T': 0.39999, 'W': 0.79995, 'V': 2.0, 'Y': 1.1}} | 2,112 | 2,112 | 0.383523 | DRB1_0711_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.3981, 'D': -1.6081, 'G': -1.1184, 'F': 0.1792, 'I': 1.1014, 'H': 0.05095, 'K': -1.2978, 'M': -0.32464, 'L': -0.74298, 'N': -1.107, 'Q': -1.4664, 'P': -1.2117, 'S': 1.4198, 'R': -1.107, 'T': 1.3474, 'W': -1.0975, 'V': 0.89927, 'Y': -0.88285}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.4996, 'D': -2.4998, 'G': -0.59982, 'F': -0.80139, 'I': -0.50109, 'H': -0.80115, 'K': -1.1006, 'M': -0.80103, 'L': -0.90071, 'N': -0.60033, 'Q': -1.1005, 'P': -0.49933, 'S': 0.5987, 'R': -1.1004, 'T': -0.080654, 'W': -0.90147, 'V': 0.09895, 'Y': -1.0009}, 6: {'A': 0.0, 'E': 0.86672, 'D': -1.3112, 'G': -0.011139, 'F': 2.069, 'I': 2.3665, 'H': 0.88412, 'K': 0.48763, 'M': 1.7789, 'L': 2.1749, 'N': 1.3742, 'Q': 1.0784, 'P': -0.20788, 'S': 0.38292, 'R': 0.69107, 'T': 0.87113, 'W': 1.3762, 'V': 1.5723, 'Y': 1.6726}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.30004, 'D': -1.2, 'G': -0.6, 'F': 2.0999, 'I': 3.3999, 'H': -0.20002, 'K': -1.1, 'M': 2.0, 'L': 3.3999, 'N': -0.50002, 'Q': -0.90002, 'P': -0.60001, 'S': -0.3, 'R': -0.8, 'T': 0.39999, 'W': 0.79995, 'V': 2.0, 'Y': 1.1}} | true | true |
f7f85ba20b6127404ab200341a88c8fff7816f0c | 1,365 | py | Python | src/users/models/componentsschemasmicrosoft_graph_teamscatalogappallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/componentsschemasmicrosoft_graph_teamscatalogappallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/componentsschemasmicrosoft_graph_teamscatalogappallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphTeamscatalogappallof1(Model):
"""teamsCatalogApp.
:param external_id:
:type external_id: str
:param name:
:type name: str
:param version:
:type version: str
:param distribution_method: Possible values include: 'store',
'organization', 'sideloaded', 'unknownFutureValue'
:type distribution_method: str or ~users.models.enum
"""
_attribute_map = {
'external_id': {'key': 'externalId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'distribution_method': {'key': 'distributionMethod', 'type': 'str'},
}
def __init__(self, external_id=None, name=None, version=None, distribution_method=None):
super(ComponentsschemasmicrosoftGraphTeamscatalogappallof1, self).__init__()
self.external_id = external_id
self.name = name
self.version = version
self.distribution_method = distribution_method
| 35.921053 | 92 | 0.600733 |
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphTeamscatalogappallof1(Model):
_attribute_map = {
'external_id': {'key': 'externalId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'distribution_method': {'key': 'distributionMethod', 'type': 'str'},
}
def __init__(self, external_id=None, name=None, version=None, distribution_method=None):
super(ComponentsschemasmicrosoftGraphTeamscatalogappallof1, self).__init__()
self.external_id = external_id
self.name = name
self.version = version
self.distribution_method = distribution_method
| true | true |
f7f85bd77607159e3f532d0ca917a01ba1522bf6 | 1,328 | py | Python | scripts/cli.py | Skalwalker/PlantsRecognition | 11c3812cfbfb789c0c023693e798918473304d59 | [
"MIT"
] | null | null | null | scripts/cli.py | Skalwalker/PlantsRecognition | 11c3812cfbfb789c0c023693e798918473304d59 | [
"MIT"
] | 3 | 2018-09-22T05:03:33.000Z | 2018-09-22T05:05:17.000Z | scripts/cli.py | Skalwalker/PlantsRecognition | 11c3812cfbfb789c0c023693e798918473304d59 | [
"MIT"
] | 2 | 2019-12-27T07:01:07.000Z | 2020-11-10T15:09:38.000Z | import argparse
from adapter import Adapter
DEFAULT_PLOT = 'none'
DEFAULT_TRAIN = False
DEFAULT_ALGORITHM = ''
DEFAULT_CROSSVAL = 10
DEFAULT_NUMBER_OF_TREES = 100
DEFAULT_PLOT = False
DEFAULT_BEST = False
def getAdapter():
parser = argparse.ArgumentParser(description='A Random Forests approach for plant species recognition.')
parser.add_argument('--algorithm', dest='algorithm', choices=[''], type=str, default=DEFAULT_ALGORITHM, help='Select Learning Algorithm')
parser.add_argument('-cv', dest='crossVal', type=int, default=DEFAULT_CROSSVAL, help='Select Cross Validation Value')
parser.add_argument('-n', dest='number_of_trees', type=int, default=DEFAULT_NUMBER_OF_TREES, help='Number of trees in forest')
parser.add_argument('--train', dest='train', default=DEFAULT_TRAIN, type=bool, help='Enable or disable the training of the model')
parser.add_argument('--plot', dest='confPlot', default=DEFAULT_PLOT ,type=bool, help='Change how confusion matrices are plot.')
parser.add_argument('-b', dest='best', default=DEFAULT_BEST ,type=bool, help='Use only the 5 best features')
args = parser.parse_args()
adapter = Adapter(args.crossVal,
args.confPlot,
args.number_of_trees,
args.best)
return adapter
| 36.888889 | 141 | 0.708584 | import argparse
from adapter import Adapter
DEFAULT_PLOT = 'none'
DEFAULT_TRAIN = False
DEFAULT_ALGORITHM = ''
DEFAULT_CROSSVAL = 10
DEFAULT_NUMBER_OF_TREES = 100
DEFAULT_PLOT = False
DEFAULT_BEST = False
def getAdapter():
parser = argparse.ArgumentParser(description='A Random Forests approach for plant species recognition.')
parser.add_argument('--algorithm', dest='algorithm', choices=[''], type=str, default=DEFAULT_ALGORITHM, help='Select Learning Algorithm')
parser.add_argument('-cv', dest='crossVal', type=int, default=DEFAULT_CROSSVAL, help='Select Cross Validation Value')
parser.add_argument('-n', dest='number_of_trees', type=int, default=DEFAULT_NUMBER_OF_TREES, help='Number of trees in forest')
parser.add_argument('--train', dest='train', default=DEFAULT_TRAIN, type=bool, help='Enable or disable the training of the model')
parser.add_argument('--plot', dest='confPlot', default=DEFAULT_PLOT ,type=bool, help='Change how confusion matrices are plot.')
parser.add_argument('-b', dest='best', default=DEFAULT_BEST ,type=bool, help='Use only the 5 best features')
args = parser.parse_args()
adapter = Adapter(args.crossVal,
args.confPlot,
args.number_of_trees,
args.best)
return adapter
| true | true |
f7f85c0734ac66943359aa744edff514fd2482af | 1,500 | py | Python | airflow/utils/weight_rule.py | shrutimantri/airflow | 61eaaacd20ab0f743786df895cf8f232b3b2a48c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15 | 2017-04-06T09:01:50.000Z | 2021-10-02T13:54:31.000Z | airflow/utils/weight_rule.py | shrutimantri/airflow | 61eaaacd20ab0f743786df895cf8f232b3b2a48c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 26 | 2019-08-05T13:44:11.000Z | 2022-03-30T10:06:18.000Z | airflow/utils/weight_rule.py | shrutimantri/airflow | 61eaaacd20ab0f743786df895cf8f232b3b2a48c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 21 | 2017-08-20T03:01:05.000Z | 2021-09-07T06:47:51.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from builtins import object
from typing import Set
class WeightRule(object):
DOWNSTREAM = 'downstream'
UPSTREAM = 'upstream'
ABSOLUTE = 'absolute'
_ALL_WEIGHT_RULES = set() # type: Set[str]
@classmethod
def is_valid(cls, weight_rule):
return weight_rule in cls.all_weight_rules()
@classmethod
def all_weight_rules(cls):
if not cls._ALL_WEIGHT_RULES:
cls._ALL_WEIGHT_RULES = {
getattr(cls, attr)
for attr in dir(cls)
if not attr.startswith("_") and not callable(getattr(cls, attr))
}
return cls._ALL_WEIGHT_RULES
| 32.608696 | 80 | 0.702667 |
from __future__ import unicode_literals
from builtins import object
from typing import Set
class WeightRule(object):
DOWNSTREAM = 'downstream'
UPSTREAM = 'upstream'
ABSOLUTE = 'absolute'
_ALL_WEIGHT_RULES = set()
@classmethod
def is_valid(cls, weight_rule):
return weight_rule in cls.all_weight_rules()
@classmethod
def all_weight_rules(cls):
if not cls._ALL_WEIGHT_RULES:
cls._ALL_WEIGHT_RULES = {
getattr(cls, attr)
for attr in dir(cls)
if not attr.startswith("_") and not callable(getattr(cls, attr))
}
return cls._ALL_WEIGHT_RULES
| true | true |
f7f85c756b5af872cec62407d22349d650619b4b | 623 | py | Python | Python/035.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | Python/035.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | 1 | 2018-04-16T21:01:50.000Z | 2018-04-16T21:01:50.000Z | Python/035.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 35
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from EulerFunctions import is_prime, primelist, numDigits
def CircPerms(x):
perms = [str(x)]
for j in range(numDigits(x) - 1):
perms.append(perms[-1][1:] + perms[-1][0])
return [int(i) for i in perms]
def run():
N = 1000000
primes = primelist(N)
circPrimes = []
for i in primes:
if all(is_prime(x) for x in CircPerms(i)):
circPrimes.append(i)
return len(circPrimes)
if __name__ == '__main__':
print(run())
| 18.323529 | 57 | 0.621188 |
from EulerFunctions import is_prime, primelist, numDigits
def CircPerms(x):
perms = [str(x)]
for j in range(numDigits(x) - 1):
perms.append(perms[-1][1:] + perms[-1][0])
return [int(i) for i in perms]
def run():
N = 1000000
primes = primelist(N)
circPrimes = []
for i in primes:
if all(is_prime(x) for x in CircPerms(i)):
circPrimes.append(i)
return len(circPrimes)
if __name__ == '__main__':
print(run())
| true | true |
f7f85ea776d29fad9e4f9a2c3d14fea6f628462d | 17,216 | py | Python | check.py | sophiawho/binaryen | 4dd90785cc27423e5368d5c1c3e4ffec05df631a | [
"Apache-2.0"
] | null | null | null | check.py | sophiawho/binaryen | 4dd90785cc27423e5368d5c1c3e4ffec05df631a | [
"Apache-2.0"
] | null | null | null | check.py | sophiawho/binaryen | 4dd90785cc27423e5368d5c1c3e4ffec05df631a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2015 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
import sys
import unittest
from collections import OrderedDict
from scripts.test import asm2wasm
from scripts.test import binaryenjs
from scripts.test import lld
from scripts.test import shared
from scripts.test import support
from scripts.test import wasm2js
from scripts.test import wasm_opt
if shared.options.interpreter:
print('[ using wasm interpreter at "%s" ]' % shared.options.interpreter)
assert os.path.exists(shared.options.interpreter), 'interpreter not found'
def get_changelog_version():
with open(os.path.join(shared.options.binaryen_root, 'CHANGELOG.md')) as f:
lines = f.readlines()
lines = [l for l in lines if len(l.split()) == 1]
lines = [l for l in lines if l.startswith('v')]
version = lines[0][1:]
print("Parsed CHANGELOG.md version: %s" % version)
return int(version)
def run_help_tests():
print('[ checking --help is useful... ]\n')
not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest']
bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)]
executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)]
executables = sorted(executables)
assert len(executables)
for e in executables:
print('.. %s --help' % e)
out, err = subprocess.Popen([e, '--help'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected help to contain program name, got:\n%s' % out
assert len(out.split('\n')) > 8, 'Expected some help, got:\n%s' % out
print('[ checking --version ... ]\n')
changelog_version = get_changelog_version()
for e in executables:
print('.. %s --version' % e)
out, err = subprocess.Popen([e, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected version to contain program name, got:\n%s' % out
assert len(out.strip().splitlines()) == 1, 'Expected only version info, got:\n%s' % out
parts = out.split()
assert parts[1] == 'version'
version = int(parts[2])
assert version == changelog_version
def run_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_DIS + [t]
if os.path.isfile(t + '.map'):
cmd += ['--source-map', t + '.map']
actual = support.run_command(cmd)
shared.fail_if_not_identical_to_file(actual, t + '.fromBinary')
# also verify there are no validation errors
def check():
cmd = shared.WASM_OPT + [t, '-all']
support.run_command(cmd)
shared.with_pass_debug(check)
shared.validate_binary(t)
def run_crash_tests():
print("\n[ checking we don't crash on tricky inputs... ]\n")
for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t]
# expect a parse error to be reported
support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1)
def run_dylink_tests():
print("\n[ we emit dylink sections properly... ]\n")
dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm'))
for t in sorted(dylink_tests):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t, '-o', 'a.wasm']
support.run_command(cmd)
with open('a.wasm', 'rb') as output:
index = output.read().find(b'dylink')
print(' ', index)
assert index == 11, 'dylink section must be first, right after the magic number etc.'
def run_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
ctors = open(t + '.ctors').read().strip()
cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wat', '-S', '--ctors', ctors]
support.run_command(cmd)
actual = open('a.wat').read()
out = t + '.out'
shared.fail_if_not_identical_to_file(actual, out)
def run_wasm_metadce_tests():
print('\n[ checking wasm-metadce ]\n')
for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
graph = t + '.graph.txt'
cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wat', '-S', '-all']
stdout = support.run_command(cmd)
expected = t + '.dced'
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
shared.fail_if_not_identical_to_file(stdout, expected + '.stdout')
def run_wasm_reduce_tests():
if not shared.has_shell_timeout():
print('\n[ skipping wasm-reduce testcases]\n')
return
print('\n[ checking wasm-reduce testcases]\n')
# fixed testcases
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
print('..', os.path.basename(t))
# convert to wasm
support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm'])
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features ' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4'])
expected = t + '.txt'
support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wat'])
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
# run on a nontrivial fuzz testcase, for general coverage
# this is very slow in ThreadSanitizer, so avoid it there
if 'fsanitize=thread' not in str(os.environ):
print('\n[ checking wasm-reduce fuzz testcase ]\n')
# TODO: re-enable multivalue once it is better optimized
support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue'])
before = os.stat('a.wasm').st_size
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm'])
after = os.stat('c.wasm').st_size
# This number is a custom threshold to check if we have shrunk the
# output sufficiently
assert after < 0.85 * before, [before, after]
def run_spec_tests():
print('\n[ checking wasm-shell spec testcases... ]\n')
for wast in shared.options.spec_tests:
print('..', os.path.basename(wast))
def run_spec_test(wast):
cmd = shared.WASM_SHELL + [wast]
output = support.run_command(cmd, stderr=subprocess.PIPE)
# filter out binaryen interpreter logging that the spec suite
# doesn't expect
filtered = [line for line in output.splitlines() if not line.startswith('[trap')]
return '\n'.join(filtered) + '\n'
def run_opt_test(wast):
# check optimization validation
cmd = shared.WASM_OPT + [wast, '-O', '-all']
support.run_command(cmd)
def check_expected(actual, expected):
if expected and os.path.exists(expected):
expected = open(expected).read()
print(' (using expected output)')
actual = actual.strip()
expected = expected.strip()
if actual != expected:
shared.fail(actual, expected)
expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')
# some spec tests should fail (actual process failure, not just assert_invalid)
try:
actual = run_spec_test(wast)
except Exception as e:
if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast):
print('<< test failed as expected >>')
continue # don't try all the binary format stuff TODO
else:
shared.fail_with_error(str(e))
check_expected(actual, expected)
# skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature
if 'exports.wast' in os.path.basename(wast): # FIXME
continue
# check binary format. here we can verify execution of the final
# result, no need for an output verification
# some wast files cannot be split:
# * comments.wast: contains characters that are not valid utf-8,
# so our string splitting code fails there
# FIXME Remove reference type tests from this list after nullref is
# implemented in V8
if os.path.basename(wast) not in ['comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast']:
split_num = 0
actual = ''
for module, asserts in support.split_wast(wast):
print(' testing split module', split_num)
split_num += 1
support.write_wast('split.wast', module, asserts)
run_spec_test('split.wast') # before binary stuff - just check it's still ok split out
run_opt_test('split.wast') # also that our optimizer doesn't break on it
result_wast = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast)
# add the asserts, and verify that the test still passes
open(result_wast, 'a').write('\n' + '\n'.join(asserts))
actual += run_spec_test(result_wast)
# compare all the outputs to the expected output
check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log'))
else:
# handle unsplittable wast files
run_spec_test(wast)
def run_validator_tests():
print('\n[ running validation tests... ]\n')
# Ensure the tests validate by default
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast')]
support.run_command(cmd, expected_status=1)
def run_gcc_tests():
print('\n[ checking native gcc testcases...]\n')
if not shared.NATIVECC or not shared.NATIVEXX:
shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!')
return
for t in sorted(os.listdir(shared.get_test_dir('example'))):
output_file = 'example'
cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file]
if t.endswith('.txt'):
# check if there is a trace in the file, if so, we should build it
out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)])
if len(out) == 0:
print(' (no trace in ', t, ')')
continue
print(' (will check trace in ', t, ')')
src = 'trace.cpp'
with open(src, 'wb') as o:
o.write(out)
expected = os.path.join(shared.get_test_dir('example'), t + '.txt')
else:
src = os.path.join(shared.get_test_dir('example'), t)
expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt')
if src.endswith(('.c', '.cpp')):
# build the C file separately
libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib')
extra = [shared.NATIVECC, src, '-c', '-o', 'example.o',
'-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread']
if src.endswith('.cpp'):
extra += ['-std=c++14']
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
extra.append(f)
print('build: ', ' '.join(extra))
subprocess.check_call(extra)
# Link against the binaryen C library DSO, using an executable-relative rpath
cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath]
else:
continue
print(' ', t, src, expected)
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [shared.NATIVEXX, '-std=c++14'] + cmd
print('link: ', ' '.join(cmd))
subprocess.check_call(cmd)
print('run...', output_file)
actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8')
os.remove(output_file)
shared.fail_if_not_identical_to_file(actual, expected)
def run_unittest():
print('\n[ checking unit tests...]\n')
# equivalent to `python -m unittest discover -s ./test -v`
suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test))
result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite)
shared.num_failures += len(result.errors) + len(result.failures)
if shared.options.abort_on_first_failure and shared.num_failures:
raise Exception("unittest failed")
TEST_SUITES = OrderedDict([
('help-messages', run_help_tests),
('wasm-opt', wasm_opt.test_wasm_opt),
('asm2wasm', asm2wasm.test_asm2wasm),
('asm2wasm-binary', asm2wasm.test_asm2wasm_binary),
('wasm-dis', run_wasm_dis_tests),
('crash', run_crash_tests),
('dylink', run_dylink_tests),
('ctor-eval', run_ctor_eval_tests),
('wasm-metadce', run_wasm_metadce_tests),
('wasm-reduce', run_wasm_reduce_tests),
('spec', run_spec_tests),
('lld', lld.test_wasm_emscripten_finalize),
('wasm2js', wasm2js.test_wasm2js),
('validator', run_validator_tests),
('gcc', run_gcc_tests),
('unit', run_unittest),
('binaryenjs', binaryenjs.test_binaryen_js),
('binaryenjs_wasm', binaryenjs.test_binaryen_wasm),
])
# Run all the tests
def main():
all_suites = TEST_SUITES.keys()
skip_by_default = ['binaryenjs', 'binaryenjs_wasm']
if shared.options.list_suites:
for suite in all_suites:
print(suite)
return 0
for r in shared.requested:
if r not in all_suites:
print('invalid test suite: %s (see --list-suites)\n' % r)
return 1
if not shared.requested:
shared.requested = [s for s in all_suites if s not in skip_by_default]
for test in shared.requested:
TEST_SUITES[test]()
# Check/display the results
if shared.num_failures == 0:
print('\n[ success! ]')
if shared.warnings:
print('\n' + '\n'.join(shared.warnings))
if shared.num_failures > 0:
print('\n[ ' + str(shared.num_failures) + ' failures! ]')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 42.613861 | 183 | 0.616055 |
import glob
import os
import subprocess
import sys
import unittest
from collections import OrderedDict
from scripts.test import asm2wasm
from scripts.test import binaryenjs
from scripts.test import lld
from scripts.test import shared
from scripts.test import support
from scripts.test import wasm2js
from scripts.test import wasm_opt
if shared.options.interpreter:
print('[ using wasm interpreter at "%s" ]' % shared.options.interpreter)
assert os.path.exists(shared.options.interpreter), 'interpreter not found'
def get_changelog_version():
with open(os.path.join(shared.options.binaryen_root, 'CHANGELOG.md')) as f:
lines = f.readlines()
lines = [l for l in lines if len(l.split()) == 1]
lines = [l for l in lines if l.startswith('v')]
version = lines[0][1:]
print("Parsed CHANGELOG.md version: %s" % version)
return int(version)
def run_help_tests():
print('[ checking --help is useful... ]\n')
not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest']
bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)]
executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)]
executables = sorted(executables)
assert len(executables)
for e in executables:
print('.. %s --help' % e)
out, err = subprocess.Popen([e, '--help'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected help to contain program name, got:\n%s' % out
assert len(out.split('\n')) > 8, 'Expected some help, got:\n%s' % out
print('[ checking --version ... ]\n')
changelog_version = get_changelog_version()
for e in executables:
print('.. %s --version' % e)
out, err = subprocess.Popen([e, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected version to contain program name, got:\n%s' % out
assert len(out.strip().splitlines()) == 1, 'Expected only version info, got:\n%s' % out
parts = out.split()
assert parts[1] == 'version'
version = int(parts[2])
assert version == changelog_version
def run_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_DIS + [t]
if os.path.isfile(t + '.map'):
cmd += ['--source-map', t + '.map']
actual = support.run_command(cmd)
shared.fail_if_not_identical_to_file(actual, t + '.fromBinary')
def check():
cmd = shared.WASM_OPT + [t, '-all']
support.run_command(cmd)
shared.with_pass_debug(check)
shared.validate_binary(t)
def run_crash_tests():
print("\n[ checking we don't crash on tricky inputs... ]\n")
for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t]
# expect a parse error to be reported
support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1)
def run_dylink_tests():
print("\n[ we emit dylink sections properly... ]\n")
dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm'))
for t in sorted(dylink_tests):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t, '-o', 'a.wasm']
support.run_command(cmd)
with open('a.wasm', 'rb') as output:
index = output.read().find(b'dylink')
print(' ', index)
assert index == 11, 'dylink section must be first, right after the magic number etc.'
def run_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
ctors = open(t + '.ctors').read().strip()
cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wat', '-S', '--ctors', ctors]
support.run_command(cmd)
actual = open('a.wat').read()
out = t + '.out'
shared.fail_if_not_identical_to_file(actual, out)
def run_wasm_metadce_tests():
print('\n[ checking wasm-metadce ]\n')
for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
graph = t + '.graph.txt'
cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wat', '-S', '-all']
stdout = support.run_command(cmd)
expected = t + '.dced'
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
shared.fail_if_not_identical_to_file(stdout, expected + '.stdout')
def run_wasm_reduce_tests():
if not shared.has_shell_timeout():
print('\n[ skipping wasm-reduce testcases]\n')
return
print('\n[ checking wasm-reduce testcases]\n')
# fixed testcases
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
print('..', os.path.basename(t))
# convert to wasm
support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm'])
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features ' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4'])
expected = t + '.txt'
support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wat'])
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
# run on a nontrivial fuzz testcase, for general coverage
# this is very slow in ThreadSanitizer, so avoid it there
if 'fsanitize=thread' not in str(os.environ):
print('\n[ checking wasm-reduce fuzz testcase ]\n')
# TODO: re-enable multivalue once it is better optimized
support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue'])
before = os.stat('a.wasm').st_size
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm'])
after = os.stat('c.wasm').st_size
# This number is a custom threshold to check if we have shrunk the
# output sufficiently
assert after < 0.85 * before, [before, after]
def run_spec_tests():
print('\n[ checking wasm-shell spec testcases... ]\n')
for wast in shared.options.spec_tests:
print('..', os.path.basename(wast))
def run_spec_test(wast):
cmd = shared.WASM_SHELL + [wast]
output = support.run_command(cmd, stderr=subprocess.PIPE)
# filter out binaryen interpreter logging that the spec suite
# doesn't expect
filtered = [line for line in output.splitlines() if not line.startswith('[trap')]
return '\n'.join(filtered) + '\n'
def run_opt_test(wast):
cmd = shared.WASM_OPT + [wast, '-O', '-all']
support.run_command(cmd)
def check_expected(actual, expected):
if expected and os.path.exists(expected):
expected = open(expected).read()
print(' (using expected output)')
actual = actual.strip()
expected = expected.strip()
if actual != expected:
shared.fail(actual, expected)
expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')
try:
actual = run_spec_test(wast)
except Exception as e:
if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast):
print('<< test failed as expected >>')
continue
else:
shared.fail_with_error(str(e))
check_expected(actual, expected)
# skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature
if 'exports.wast' in os.path.basename(wast):
continue
if os.path.basename(wast) not in ['comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast']:
split_num = 0
actual = ''
for module, asserts in support.split_wast(wast):
print(' testing split module', split_num)
split_num += 1
support.write_wast('split.wast', module, asserts)
run_spec_test('split.wast')
run_opt_test('split.wast') # also that our optimizer doesn't break on it
result_wast = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast)
open(result_wast, 'a').write('\n' + '\n'.join(asserts))
actual += run_spec_test(result_wast)
check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log'))
else:
run_spec_test(wast)
def run_validator_tests():
print('\n[ running validation tests... ]\n')
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast')]
support.run_command(cmd, expected_status=1)
def run_gcc_tests():
print('\n[ checking native gcc testcases...]\n')
if not shared.NATIVECC or not shared.NATIVEXX:
shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!')
return
for t in sorted(os.listdir(shared.get_test_dir('example'))):
output_file = 'example'
cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file]
if t.endswith('.txt'):
out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)])
if len(out) == 0:
print(' (no trace in ', t, ')')
continue
print(' (will check trace in ', t, ')')
src = 'trace.cpp'
with open(src, 'wb') as o:
o.write(out)
expected = os.path.join(shared.get_test_dir('example'), t + '.txt')
else:
src = os.path.join(shared.get_test_dir('example'), t)
expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt')
if src.endswith(('.c', '.cpp')):
libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib')
extra = [shared.NATIVECC, src, '-c', '-o', 'example.o',
'-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread']
if src.endswith('.cpp'):
extra += ['-std=c++14']
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
extra.append(f)
print('build: ', ' '.join(extra))
subprocess.check_call(extra)
cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath]
else:
continue
print(' ', t, src, expected)
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [shared.NATIVEXX, '-std=c++14'] + cmd
print('link: ', ' '.join(cmd))
subprocess.check_call(cmd)
print('run...', output_file)
actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8')
os.remove(output_file)
shared.fail_if_not_identical_to_file(actual, expected)
def run_unittest():
print('\n[ checking unit tests...]\n')
suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test))
result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite)
shared.num_failures += len(result.errors) + len(result.failures)
if shared.options.abort_on_first_failure and shared.num_failures:
raise Exception("unittest failed")
TEST_SUITES = OrderedDict([
('help-messages', run_help_tests),
('wasm-opt', wasm_opt.test_wasm_opt),
('asm2wasm', asm2wasm.test_asm2wasm),
('asm2wasm-binary', asm2wasm.test_asm2wasm_binary),
('wasm-dis', run_wasm_dis_tests),
('crash', run_crash_tests),
('dylink', run_dylink_tests),
('ctor-eval', run_ctor_eval_tests),
('wasm-metadce', run_wasm_metadce_tests),
('wasm-reduce', run_wasm_reduce_tests),
('spec', run_spec_tests),
('lld', lld.test_wasm_emscripten_finalize),
('wasm2js', wasm2js.test_wasm2js),
('validator', run_validator_tests),
('gcc', run_gcc_tests),
('unit', run_unittest),
('binaryenjs', binaryenjs.test_binaryen_js),
('binaryenjs_wasm', binaryenjs.test_binaryen_wasm),
])
def main():
all_suites = TEST_SUITES.keys()
skip_by_default = ['binaryenjs', 'binaryenjs_wasm']
if shared.options.list_suites:
for suite in all_suites:
print(suite)
return 0
for r in shared.requested:
if r not in all_suites:
print('invalid test suite: %s (see --list-suites)\n' % r)
return 1
if not shared.requested:
shared.requested = [s for s in all_suites if s not in skip_by_default]
for test in shared.requested:
TEST_SUITES[test]()
if shared.num_failures == 0:
print('\n[ success! ]')
if shared.warnings:
print('\n' + '\n'.join(shared.warnings))
if shared.num_failures > 0:
print('\n[ ' + str(shared.num_failures) + ' failures! ]')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| true | true |
f7f86067d041d6bd499f14e05c2b566ee158ccb8 | 60 | py | Python | python/testData/mover/multiLineSelection10_afterDown.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/mover/multiLineSelection10_afterDown.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/mover/multiLineSelection10_afterDown.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | if True:
pass
<caret><selection>a = 2
b = 3</selection>
| 12 | 23 | 0.616667 | if True:
pass
<caret><selection>a = 2
b = 3</selection>
| false | true |
f7f8609024e051a6ec66d9ca33f19229a1b73937 | 7,347 | py | Python | tests/predict_test.py | michael-weinstein/Elevation | f4e3e105a1e722284f3a32128f551d2ff466a1e0 | [
"MIT"
] | 96 | 2017-12-19T14:42:50.000Z | 2019-03-19T16:17:16.000Z | tests/predict_test.py | whensbrunch/Elevation | cd783ed7ea09d6d7c8c13dbba0c5f7daf5fa1719 | [
"MIT"
] | 4 | 2018-02-16T10:16:24.000Z | 2019-03-24T23:26:01.000Z | tests/predict_test.py | whensbrunch/Elevation | cd783ed7ea09d6d7c8c13dbba0c5f7daf5fa1719 | [
"MIT"
] | 18 | 2019-05-07T19:40:39.000Z | 2022-01-15T15:12:07.000Z | import os
import sys
import shutil
import unittest
# from mock import patch, Mock, PropertyMock, MagicMock
import pandas as pd
import numpy as np
from warnings import warn
class PredictTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(PredictTest, self).__init__(*args, **kwargs)
# @unittest.skip("ignore")
def test_predict_hmg(self):
sys.stdout = sys.__stdout__
import elevation.load_data
from elevation import settings, options
from elevation.cmds import predict
hmg = predict.Predict(init_models=False).get_hmg_data()
wildtype = list(hmg['30mer'])[:settings.pred_test_num]
offtarget = list(hmg['30mer_mut'])[:settings.pred_test_num]
predictions = predict.Predict().execute(wildtype, offtarget)
pred_df_data = {key: val.reshape(val.shape[0]) for key, val in predictions.iteritems()}
pred_df = pd.DataFrame(data=pred_df_data)
truth_df = pd.read_excel(settings.pred_default_fixture_file)[:settings.pred_test_num]
for column in pred_df:
if np.any(np.abs(pred_df[column] - truth_df[column]) > 0):
warn("predictions don't exactly match expected for %s" % column)
idx = np.abs(pred_df[column] - truth_df[column]) > 0
x = pred_df[column][idx] - truth_df[column][idx]
# for i, value in enumerate(x):
# warn("Inequality %s %s: %s" % (column, i, value))
assert np.allclose(pred_df[column], truth_df[column], atol=1e-08, rtol=0.0), "%s doesn't match" % column
# @unittest.skip("ignore")
def test_agg_nicolo(self):
import pickle
from elevation import settings
from elevation import aggregation
with open(settings.agg_nicolo_fixture_file, "r") as fh:
nicolo_results = pickle.load(fh)
model = aggregation.get_aggregation_model()
y_pred = model.predict(nicolo_results[0])
assert np.allclose(y_pred, nicolo_results[1])
@unittest.skip("ignore")
def test_predict_nicolo(self):
import pickle
from elevation import settings
from elevation.cmds.predict import Predict
preds_file = settings.pj(settings.repo_root, 'tests', 'fixtures', 'preds.lrs.hmg_v1v2.gsgr1.boxcox1.pkl')
with open(preds_file, 'r') as f:
preds = pickle.load(f)
p = Predict() # updated (new) Hauessler & GUIDE-seq
p.hmg_data = p.get_hmg_data(force_compute=True)
guides, offtargets = p.hmg_data['30mer'].values, p.hmg_data['30mer_mut'].values
hmg_preds = p.execute(guides, offtargets)['linear-raw-stacker']
assert np.allclose(preds, hmg_preds)
# @unittest.skip("ignore")
def test_agg_hauessler(self):
sys.stdout = sys.__stdout__
import pickle
from elevation import settings
from elevation import aggregation
with open(settings.agg_model_file) as fh:
final_model, other = pickle.load(fh)
inputs = pd.read_excel(settings.pred_default_fixture_file)
results = []
rs = np.random.RandomState(settings.default_random_seed)
perm = rs.permutation(inputs.shape[0])
stacker = inputs["linear-raw-stacker"].values[perm]
cfd = inputs["CFD"].values[perm]
isgenic = rs.random_sample(inputs.shape[0]) > 0.5
pos = 0
while pos < perm.shape[0]:
end = pos + rs.randint(1, 2000)
if end > perm.shape[0]:
end = perm.shape[0]
result = aggregation.get_aggregated_score(
stacker[pos:end],
cfd[pos:end],
isgenic[pos:end],
final_model)
results += list(result)
pos = end
pred_df = pd.DataFrame(data={"agg_score": results})
truth_df = pd.read_excel(settings.agg_default_fixture_file)
for column in pred_df:
if np.any(np.abs(pred_df[column] - truth_df[column]) > 0):
warn("aggregate predictions don't exactly match expected for %s" % column)
idx = np.abs(pred_df[column] - truth_df[column]) > 0
x = pred_df[column][idx] - truth_df[column][idx]
for i, value in enumerate(x):
warn("Inequality %s %s: %s" % (column, i, value))
assert np.allclose(pred_df[column], truth_df[column], atol=1e-10, rtol=0.0), "%s doesn't match" % column
# class FitTest(unittest.TestCase):
#
# def __init__(self, *args, **kwargs):
# super(FitTest, self).__init__(*args, **kwargs)
#
# def setUp(self):
# from elevation import settings
#
# self.cachedir = settings.pj(settings.repo_root, "tests", "cache")
# self.cachedir_patch = patch('elevation.settings.cachedir', self.cachedir)
# self.cachedir_patch.start()
#
# self.tmpdir = settings.pj(settings.repo_root, "tests", "tmp")
# self.tmpdir_patch = patch('elevation.settings.tmpdir', self.tmpdir)
# self.tmpdir_patch.start()
#
# print self.tmpdir
# if os.path.exists(self.cachedir):
# shutil.rmtree(self.cachedir)
# os.mkdir(self.cachedir)
#
# if os.path.exists(self.tmpdir):
# shutil.rmtree(self.tmpdir)
# os.mkdir(self.tmpdir)
#
# def tearDown(self):
# self.cachedir_patch.stop()
# self.tmpdir_patch.stop()
#
# @unittest.skip("ignore")
# def test_settings_mock(self):
# sys.stdout = sys.__stdout__
#
# from elevation import settings, prediction_pipeline, load_data
# from elevation.cmds import fit, predict
# import elevation
#
# assert self.cachedir == settings.cachedir
# assert self.cachedir == prediction_pipeline.settings.cachedir
# assert self.cachedir == load_data.settings.cachedir
# assert self.cachedir == fit.settings.cachedir
# assert self.cachedir == predict.settings.cachedir
# assert self.cachedir == elevation.settings.cachedir
# assert self.cachedir == elevation.prediction_pipeline.settings.cachedir
# assert self.cachedir == elevation.load_data.settings.cachedir
# assert self.cachedir == elevation.cmds.fit.settings.cachedir
# assert self.cachedir == elevation.cmds.predict.settings.cachedir
#
# assert self.tmpdir == settings.tmpdir
# assert self.tmpdir == prediction_pipeline.settings.tmpdir
# assert self.tmpdir == load_data.settings.tmpdir
# assert self.tmpdir == fit.settings.tmpdir
# assert self.tmpdir == predict.settings.tmpdir
# assert self.tmpdir == elevation.settings.tmpdir
# assert self.tmpdir == elevation.prediction_pipeline.settings.tmpdir
# assert self.tmpdir == elevation.load_data.settings.tmpdir
# assert self.tmpdir == elevation.cmds.fit.settings.tmpdir
# assert self.tmpdir == elevation.cmds.predict.settings.tmpdir
# @unittest.skip("ignore")
# def test_retrain_predict_hauessler(self):
# from elevation.cmds import predict, fit
#
# learn_options_override = {
# "seed": 12345
# }
#
# fit.Fit().execute(learn_options_override=learn_options_override, force_rebuild=True)
#
# @unittest.skip("ignore")
# def test_retrain_new_seed_predict_hauessler(self):
# pass
| 39.079787 | 116 | 0.6389 | import os
import sys
import shutil
import unittest
import pandas as pd
import numpy as np
from warnings import warn
class PredictTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(PredictTest, self).__init__(*args, **kwargs)
def test_predict_hmg(self):
sys.stdout = sys.__stdout__
import elevation.load_data
from elevation import settings, options
from elevation.cmds import predict
hmg = predict.Predict(init_models=False).get_hmg_data()
wildtype = list(hmg['30mer'])[:settings.pred_test_num]
offtarget = list(hmg['30mer_mut'])[:settings.pred_test_num]
predictions = predict.Predict().execute(wildtype, offtarget)
pred_df_data = {key: val.reshape(val.shape[0]) for key, val in predictions.iteritems()}
pred_df = pd.DataFrame(data=pred_df_data)
truth_df = pd.read_excel(settings.pred_default_fixture_file)[:settings.pred_test_num]
for column in pred_df:
if np.any(np.abs(pred_df[column] - truth_df[column]) > 0):
warn("predictions don't exactly match expected for %s" % column)
idx = np.abs(pred_df[column] - truth_df[column]) > 0
x = pred_df[column][idx] - truth_df[column][idx]
# for i, value in enumerate(x):
# warn("Inequality %s %s: %s" % (column, i, value))
assert np.allclose(pred_df[column], truth_df[column], atol=1e-08, rtol=0.0), "%s doesn't match" % column
def test_agg_nicolo(self):
import pickle
from elevation import settings
from elevation import aggregation
with open(settings.agg_nicolo_fixture_file, "r") as fh:
nicolo_results = pickle.load(fh)
model = aggregation.get_aggregation_model()
y_pred = model.predict(nicolo_results[0])
assert np.allclose(y_pred, nicolo_results[1])
@unittest.skip("ignore")
def test_predict_nicolo(self):
import pickle
from elevation import settings
from elevation.cmds.predict import Predict
preds_file = settings.pj(settings.repo_root, 'tests', 'fixtures', 'preds.lrs.hmg_v1v2.gsgr1.boxcox1.pkl')
with open(preds_file, 'r') as f:
preds = pickle.load(f)
p = Predict()
p.hmg_data = p.get_hmg_data(force_compute=True)
guides, offtargets = p.hmg_data['30mer'].values, p.hmg_data['30mer_mut'].values
hmg_preds = p.execute(guides, offtargets)['linear-raw-stacker']
assert np.allclose(preds, hmg_preds)
def test_agg_hauessler(self):
sys.stdout = sys.__stdout__
import pickle
from elevation import settings
from elevation import aggregation
with open(settings.agg_model_file) as fh:
final_model, other = pickle.load(fh)
inputs = pd.read_excel(settings.pred_default_fixture_file)
results = []
rs = np.random.RandomState(settings.default_random_seed)
perm = rs.permutation(inputs.shape[0])
stacker = inputs["linear-raw-stacker"].values[perm]
cfd = inputs["CFD"].values[perm]
isgenic = rs.random_sample(inputs.shape[0]) > 0.5
pos = 0
while pos < perm.shape[0]:
end = pos + rs.randint(1, 2000)
if end > perm.shape[0]:
end = perm.shape[0]
result = aggregation.get_aggregated_score(
stacker[pos:end],
cfd[pos:end],
isgenic[pos:end],
final_model)
results += list(result)
pos = end
pred_df = pd.DataFrame(data={"agg_score": results})
truth_df = pd.read_excel(settings.agg_default_fixture_file)
for column in pred_df:
if np.any(np.abs(pred_df[column] - truth_df[column]) > 0):
warn("aggregate predictions don't exactly match expected for %s" % column)
idx = np.abs(pred_df[column] - truth_df[column]) > 0
x = pred_df[column][idx] - truth_df[column][idx]
for i, value in enumerate(x):
warn("Inequality %s %s: %s" % (column, i, value))
assert np.allclose(pred_df[column], truth_df[column], atol=1e-10, rtol=0.0), "%s doesn't match" % column
| true | true |
f7f861c791647b08fca6b48aef7d6edd26f3b27b | 13,706 | py | Python | COMP1001_Assignment8&9_14010627X.py | brandonlee503/COMP1001-Assignments | 6e25dcb367537251887bf487970e4b3a8196b78d | [
"MIT"
] | null | null | null | COMP1001_Assignment8&9_14010627X.py | brandonlee503/COMP1001-Assignments | 6e25dcb367537251887bf487970e4b3a8196b78d | [
"MIT"
] | null | null | null | COMP1001_Assignment8&9_14010627X.py | brandonlee503/COMP1001-Assignments | 6e25dcb367537251887bf487970e4b3a8196b78d | [
"MIT"
] | null | null | null | """
Brandon Lee
14010627X
COMP1001 Assignment 8-9
11-14-14
"""
from random import shuffle
import os
"""
getKey() PseudoCode
function definition: def genkey(length):
check if length is less than 1: if(length<1):
if so raise value error: raise ValueError
otherwise: else:
create key: key = []
for loop from i to the length: for i in range(length):
add i value to key: key.append(i)
shuffle key list: shuffle(key)
return key: return key
"""
def genkey(length):
if(length < 1):
raise ValueError("The key length is not valid")
else:
key = []
for i in range(length):
key.append(i)
shuffle(key)
return key
#----------------------------------------------------------------------------------------------------------------------------------------
"""
printkey() Pseudocode
function definition: def printkey(key):
check if length of key is under 1: if(len(key) < 1):
if so raise value error: raise ValueError
otherwise print plain/cyphertext: else:
declare alphabet list: alphabet = ["a", ... ,"z"]
for each element in key: for i in range(len(key)):
print it: print(key[i])
for each element in alphabet: for i in range(len(alphabet)):
print it: print(alphabet[i])
"""
def printkey(key):
if(len(key) < 1):
raise ValueError("The key length is not valid")
else:
alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
print("Pos in CypherText: ", end="")
for i in range(len(key)):
## Uncomment if printkey() is used before encrypt()
## temp = key[i]
## key[i] = alphabet[temp]
print(key[i], end=" ")
print("\nPos in PlainText: ", end="")
for i in range(len(alphabet)):
print(alphabet[i], end=" ")
#----------------------------------------------------------------------------------------------------------------------------------------
"""
encrypt() Pesudocode
function definition: def encrypt(plaintext, key):
if key length not 26: if(len(key) != 26):
raise value error: raise ValueError
otherwise: else:
create list from plaintext and convert to lowercase: listPlainText = list(plaintext.lower())
declare alphabet: alphabet = ["a", ... ,"z"]
Change key from numbers to alphabet letters..
*Note: No need to do this again in printkey() if done here*
for each value in key: for i in range(len(key)):
store key value: temp = key[i]
redeclare key value with the stored key value in alphabet: key[i] = alphabet[temp]
Change each character in string to new encrypted key value..
for each value in plaintext: for i in range(len(plaintext)):
if character is in the alphabet: if(listPlainText[i] in alphabet):
get the letter position in the alphabet: letterPos = alphabet.index(letterPlainText[i])
change the text to key position of the letter: listPlainText[i] = key[letterPos]
join all list values together: plaintext = "".join(listPlainText)
return: return plaintext
"""
def encrypt(plaintext, key):
if(len(key) != 26):
raise ValueError("The key length is not 26")
else:
listPlainText = list( plaintext.lower() )
alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
# Comment if printkey() is used before encrypt()
for i in range(len(key)): #
temp = key[i] #
key[i] = alphabet[temp] #
for i in range(len(plaintext)):
if(listPlainText[i] in alphabet):
letterPos = alphabet.index(listPlainText[i])
listPlainText[i] = key[letterPos]
plaintext = "".join(listPlainText)
return plaintext
#----------------------------------------------------------------------------------------------------------------------------------------
"""
decrypt() Pseudocode
function decleration: def decrypt(ciphertext, key):
if key length is not 26 if(len(key) != 26):
raise value error raise ValueError
otherwise else:
create list from ciphertext and convert to lowercase: listCipherText = list( ciphertext.lower() )
declare alphabet alphabet = ["a",... ,"z"]
for every value in ciphertext for i in range(len(ciphertext)):
if it's a letter if(listCipherText[i] in alphabet):
get the letter position in the key letterPos = key.index(listCipherText[i])
change the key to alphabet position of the letter listCipherText[i] = alphabet[letterPos]
combine list ciphertext = "".join(listCipherText)
return return ciphertext
"""
def decrypt(ciphertext, key):
if(len(key) != 26):
raise ValueError("The key length is not 26")
else:
listCipherText = list( ciphertext.lower() )
alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for i in range(len(ciphertext)):
if(listCipherText[i] in alphabet):
letterPos = key.index(listCipherText[i])
listCipherText[i] = alphabet[letterPos]
ciphertext = "".join(listCipherText)
return ciphertext
#----------------------------------------------------------------------------------------------------------------------------------------
"""
getFile() Prototype
function definition: def getFile():
Create while loop: while(True):
Ask user for file name: fileName = input("Please input a text file: ")
if file exists: if os.path.exists(fileName):
open the file fileHandler = open(fileName, "r+")
return the file name and handler return (fileName, fileHandler)
otherwise else:
print error message and loop print("File not found. Please re-enter a file name: ")
"""
def getFile():
while(True):
fileName = input("Please input a text file: ")
if os.path.exists(fileName):
fileHandler = open(fileName, "r+")
return (fileName, fileHandler)
else:
print("File not found. Please re-enter a file name: ")
#----------------------------------------------------------------------------------------------------------------------------------------
"""
isSameFile() Pseudocode
function definition: def isSameFile(file1, file2):
if both files exist: if(os.path.exists(file1) and os.path.exists(file2)):
open both and read: file1h = open(file1, "r")
file2h = open(file2, "r")
read content: content1 = file1h.read()
content2 = file2h.read()
change content to lowercase: lower1 = content1.lower()
lower2 = content2.lower()
if both contents are equal: if(lower1 == lower2):
return true return True
otherwise false else:
return False
if file not found else:
raise ioerror raise IOError("The file(s) cannot be found")
"""
def isSameFile(file1, file2):
if(os.path.exists(file1) and os.path.exists(file2)):
file1h = open(file1, "r")
file2h = open(file2, "r")
content1 = file1h.read()
content2 = file2h.read()
lower1 = content1.lower()
lower2 = content2.lower()
if(lower1 == lower2):
return True
else:
return False
else:
raise IOError("The file(s) cannot be found")
#----------------------------------------------------------------------------------------------------------------------------------------
"""
test() Pseudocode
funciton definition: def test():
set working directory: os.chdir("C:\\")
create key: key = genkey(26)
get user input for file: fileName, fileHandler = getFile()
create text file: encryptFile = open("Encrypted_"+fileName+".txt", "w+")
begin try block: try:
read from user inputted file: encryptContent = encrypt(fileHandler.read(), key)
print randomly generated key: printkey(key)
read encrypted file and write over: encryptFile.write(encryptContent)
begin except block: except ValueError as errMsg:
print error message: print(errMsg)
end program: return
close file: encryptFile.close()
reopen file: encryptFile = open("Encrypted_"+fileName+".txt", "r+")
read encrypted content and write over: encryptContent = encryptFile.read()
reclose file: encryptFile.close()
create another text file for editing: decryptFile = open("Decrypted_"+fileName+".txt", "w+")
begin try block: try:
decrypt content and create decrypted string: decryptContent = decrypt(encryptContent, key)
write decrypted content onto decrypted content: decryptFile.write(decryptContent)
begin except block: except ValueError as errMsg:
print error: print(errMsg)
end program: return
begin try block: try:
check if encryption/decryption work with isSameFile(): isSameFile(fileName, "Decrypted_"+fileName+".txt")
begin except block: except IOError as errMsg:
print error: print(errMsg)
end program: return
print finish message: print("Congrats, all done!")
"""
def test():
os.chdir("C:\\") # set working directory
key = genkey(26) # create key
fileName, fileHandler = getFile()
encryptFile = open("Encrypted_"+fileName+".txt", "w+") # w+ is for creating/reading/writing
try:
encryptContent = encrypt(fileHandler.read(), key) # Read from main file
printkey(key) # Print key
encryptFile.write(encryptContent) # read encrypted content and write over
except ValueError as errMsg:
print(errMsg)
return
encryptFile.close() # Close and reopen file
encryptFile = open("Encrypted_"+fileName+".txt", "r+")
encryptContent = encryptFile.read() # Read file
encryptFile.close() # Reclose
decryptFile = open("Decrypted_"+fileName+".txt", "w+") # Create another file
try:
decryptContent = decrypt(encryptContent, key) # decrypt content and write string
decryptFile.write(decryptContent) # write decrypted content onto decrypted content
except ValueError as errMsg:
print(errMsg)
return
try:
isSameFile(fileName, "Decrypted_"+fileName+".txt")
except IOError as errMsg:
print(errMsg)
return
print("\nCongrats, encryption and decryption work correctly")
#----------------------------------------------------------------------------------------------------------------------------------------
#If module is run as main module itself, run test
if __name__ == "__main__":
test()
| 51.141791 | 138 | 0.445936 |
from random import shuffle
import os
def genkey(length):
if(length < 1):
raise ValueError("The key length is not valid")
else:
key = []
for i in range(length):
key.append(i)
shuffle(key)
return key
def printkey(key):
if(len(key) < 1):
raise ValueError("The key length is not valid")
else:
alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
print("Pos in CypherText: ", end="")
for i in range(len(key)):
in range(len(alphabet)):
print(alphabet[i], end=" ")
def encrypt(plaintext, key):
if(len(key) != 26):
raise ValueError("The key length is not 26")
else:
listPlainText = list( plaintext.lower() )
alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for i in range(len(key)):
temp = key[i]
key[i] = alphabet[temp]
for i in range(len(plaintext)):
if(listPlainText[i] in alphabet):
letterPos = alphabet.index(listPlainText[i])
listPlainText[i] = key[letterPos]
plaintext = "".join(listPlainText)
return plaintext
def decrypt(ciphertext, key):
if(len(key) != 26):
raise ValueError("The key length is not 26")
else:
listCipherText = list( ciphertext.lower() )
alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for i in range(len(ciphertext)):
if(listCipherText[i] in alphabet):
letterPos = key.index(listCipherText[i])
listCipherText[i] = alphabet[letterPos]
ciphertext = "".join(listCipherText)
return ciphertext
def getFile():
while(True):
fileName = input("Please input a text file: ")
if os.path.exists(fileName):
fileHandler = open(fileName, "r+")
return (fileName, fileHandler)
else:
print("File not found. Please re-enter a file name: ")
def isSameFile(file1, file2):
if(os.path.exists(file1) and os.path.exists(file2)):
file1h = open(file1, "r")
file2h = open(file2, "r")
content1 = file1h.read()
content2 = file2h.read()
lower1 = content1.lower()
lower2 = content2.lower()
if(lower1 == lower2):
return True
else:
return False
else:
raise IOError("The file(s) cannot be found")
def test():
os.chdir("C:\\")
key = genkey(26)
fileName, fileHandler = getFile()
encryptFile = open("Encrypted_"+fileName+".txt", "w+")
try:
encryptContent = encrypt(fileHandler.read(), key)
printkey(key)
encryptFile.write(encryptContent)
except ValueError as errMsg:
print(errMsg)
return
encryptFile.close()
encryptFile = open("Encrypted_"+fileName+".txt", "r+")
encryptContent = encryptFile.read()
encryptFile.close()
decryptFile = open("Decrypted_"+fileName+".txt", "w+")
try:
decryptContent = decrypt(encryptContent, key)
decryptFile.write(decryptContent)
except ValueError as errMsg:
print(errMsg)
return
try:
isSameFile(fileName, "Decrypted_"+fileName+".txt")
except IOError as errMsg:
print(errMsg)
return
print("\nCongrats, encryption and decryption work correctly")
if __name__ == "__main__":
test()
| true | true |
f7f862522c15f8f8be8369d94c8c90be5cd735fb | 29,816 | py | Python | easy_work_service_sdk/model/flow/flow_instance_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | easy_work_service_sdk/model/flow/flow_instance_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | easy_work_service_sdk/model/flow/flow_instance_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: flow_instance.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from easy_work_service_sdk.model.flow import flow_execute_step_pb2 as easy__work__service__sdk_dot_model_dot_flow_dot_flow__execute__step__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='flow_instance.proto',
package='flow',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/flow'),
serialized_pb=_b('\n\x13\x66low_instance.proto\x12\x04\x66low\x1a\x38\x65\x61sy_work_service_sdk/model/flow/flow_execute_step.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xf2\n\n\x0c\x46lowInstance\x12\'\n\x08stepList\x18\x01 \x03(\x0b\x32\x15.flow.FlowExecuteStep\x12\x0e\n\x06taskId\x18\x02 \x01(\t\x12,\n\x0binstanceMap\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x07outputs\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Value\x12,\n\x0crunningSteps\x18\x05 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x12\n\nneedNotify\x18\x06 \x01(\x08\x12\x11\n\tstartTime\x18\x07 \x01(\x03\x12\x0f\n\x07\x65ndTime\x18\x08 \x01(\x03\x12\x13\n\x0b\x63urrentTime\x18\t \x01(\x03\x12\x13\n\x0btotalStatus\x18\n \x01(\t\x12\x0f\n\x07message\x18\x0b \x01(\t\x12\x13\n\x0btaskCounter\x18\x0c \x01(\x05\x12/\n\x0f\x66lowOutputsData\x18\r \x01(\x0b\x32\x16.google.protobuf.Value\x12)\n\ttableData\x18\x0e \x01(\x0b\x32\x16.google.protobuf.Value\x12/\n\x0fstandardOutputs\x18\x0f \x01(\x0b\x32\x16.google.protobuf.Value\x12)\n\tagentData\x18\x10 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x0e\n\x06\x66lowId\x18\x11 \x01(\t\x12\x0f\n\x07version\x18\x12 \x01(\x05\x12*\n\nflowInputs\x18\x13 \x01(\x0b\x32\x16.google.protobuf.Value\x12\'\n\x07\x66lowEnv\x18\x14 \x01(\x0b\x32\x16.google.protobuf.Value\x12-\n\x08metadata\x18\x15 \x01(\x0b\x32\x1b.flow.FlowInstance.Metadata\x12\x0c\n\x04name\x18\x16 \x01(\t\x12\x0b\n\x03org\x18\x17 \x01(\x05\x12\x33\n\x0b\x66lowOutputs\x18\x18 \x03(\x0b\x32\x1e.flow.FlowInstance.FlowOutputs\x12\x31\n\noutputDefs\x18\x19 \x03(\x0b\x32\x1d.flow.FlowInstance.OutputDefs\x12/\n\ttableDefs\x18\x1a \x03(\x0b\x32\x1c.flow.FlowInstance.TableDefs\x12\x0f\n\x07\x63reator\x18\x1b \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x1c \x01(\t\x12\x12\n\nupdateTime\x18\x1d \x01(\t\x12\x12\n\ncreateTime\x18\x1e \x01(\t\x1a&\n\x08Metadata\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x02 \x01(\t\x1ay\n\x0b\x46lowOutputs\x12\x37\n\x07\x63olumns\x18\x01 \x03(\x0b\x32&.flow.FlowInstance.FlowOutputs.Columns\x1a\x31\n\x07\x43olumns\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1a\x34\n\nOutputDefs\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1a\xe6\x01\n\tTableDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12;\n\ndimensions\x18\x03 \x03(\x0b\x32\'.flow.FlowInstance.TableDefs.Dimensions\x12\x35\n\x07\x63olumns\x18\x04 \x03(\x0b\x32$.flow.FlowInstance.TableDefs.Columns\x1a&\n\nDimensions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a#\n\x07\x43olumns\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\tB@Z>go.easyops.local/contracts/protorepo-models/easyops/model/flowb\x06proto3')
,
dependencies=[easy__work__service__sdk_dot_model_dot_flow_dot_flow__execute__step__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_FLOWINSTANCE_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='flow.FlowInstance.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.FlowInstance.Metadata.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='flow.FlowInstance.Metadata.desc', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1064,
serialized_end=1102,
)
_FLOWINSTANCE_FLOWOUTPUTS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='flow.FlowInstance.FlowOutputs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.FlowInstance.FlowOutputs.Columns.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.FlowOutputs.Columns.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.FlowOutputs.Columns.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1176,
serialized_end=1225,
)
_FLOWINSTANCE_FLOWOUTPUTS = _descriptor.Descriptor(
name='FlowOutputs',
full_name='flow.FlowInstance.FlowOutputs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='columns', full_name='flow.FlowInstance.FlowOutputs.columns', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOWINSTANCE_FLOWOUTPUTS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1104,
serialized_end=1225,
)
_FLOWINSTANCE_OUTPUTDEFS = _descriptor.Descriptor(
name='OutputDefs',
full_name='flow.FlowInstance.OutputDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.FlowInstance.OutputDefs.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.OutputDefs.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.OutputDefs.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1227,
serialized_end=1279,
)
_FLOWINSTANCE_TABLEDEFS_DIMENSIONS = _descriptor.Descriptor(
name='Dimensions',
full_name='flow.FlowInstance.TableDefs.Dimensions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.TableDefs.Dimensions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.TableDefs.Dimensions.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1437,
serialized_end=1475,
)
_FLOWINSTANCE_TABLEDEFS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='flow.FlowInstance.TableDefs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.TableDefs.Columns.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.TableDefs.Columns.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1477,
serialized_end=1512,
)
_FLOWINSTANCE_TABLEDEFS = _descriptor.Descriptor(
name='TableDefs',
full_name='flow.FlowInstance.TableDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.TableDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.TableDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='flow.FlowInstance.TableDefs.dimensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='flow.FlowInstance.TableDefs.columns', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOWINSTANCE_TABLEDEFS_DIMENSIONS, _FLOWINSTANCE_TABLEDEFS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1282,
serialized_end=1512,
)
_FLOWINSTANCE = _descriptor.Descriptor(
name='FlowInstance',
full_name='flow.FlowInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stepList', full_name='flow.FlowInstance.stepList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='taskId', full_name='flow.FlowInstance.taskId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceMap', full_name='flow.FlowInstance.instanceMap', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='flow.FlowInstance.outputs', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='runningSteps', full_name='flow.FlowInstance.runningSteps', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='needNotify', full_name='flow.FlowInstance.needNotify', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='flow.FlowInstance.startTime', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='flow.FlowInstance.endTime', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='currentTime', full_name='flow.FlowInstance.currentTime', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='totalStatus', full_name='flow.FlowInstance.totalStatus', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='flow.FlowInstance.message', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='taskCounter', full_name='flow.FlowInstance.taskCounter', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowOutputsData', full_name='flow.FlowInstance.flowOutputsData', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableData', full_name='flow.FlowInstance.tableData', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='standardOutputs', full_name='flow.FlowInstance.standardOutputs', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agentData', full_name='flow.FlowInstance.agentData', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowId', full_name='flow.FlowInstance.flowId', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='flow.FlowInstance.version', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowInputs', full_name='flow.FlowInstance.flowInputs', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowEnv', full_name='flow.FlowInstance.flowEnv', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='flow.FlowInstance.metadata', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.name', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='flow.FlowInstance.org', index=22,
number=23, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowOutputs', full_name='flow.FlowInstance.flowOutputs', index=23,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputDefs', full_name='flow.FlowInstance.outputDefs', index=24,
number=25, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableDefs', full_name='flow.FlowInstance.tableDefs', index=25,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='flow.FlowInstance.creator', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='flow.FlowInstance.category', index=27,
number=28, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='flow.FlowInstance.updateTime', index=28,
number=29, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='flow.FlowInstance.createTime', index=29,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOWINSTANCE_METADATA, _FLOWINSTANCE_FLOWOUTPUTS, _FLOWINSTANCE_OUTPUTDEFS, _FLOWINSTANCE_TABLEDEFS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=1512,
)
_FLOWINSTANCE_METADATA.containing_type = _FLOWINSTANCE
_FLOWINSTANCE_FLOWOUTPUTS_COLUMNS.containing_type = _FLOWINSTANCE_FLOWOUTPUTS
_FLOWINSTANCE_FLOWOUTPUTS.fields_by_name['columns'].message_type = _FLOWINSTANCE_FLOWOUTPUTS_COLUMNS
_FLOWINSTANCE_FLOWOUTPUTS.containing_type = _FLOWINSTANCE
_FLOWINSTANCE_OUTPUTDEFS.containing_type = _FLOWINSTANCE
_FLOWINSTANCE_TABLEDEFS_DIMENSIONS.containing_type = _FLOWINSTANCE_TABLEDEFS
_FLOWINSTANCE_TABLEDEFS_COLUMNS.containing_type = _FLOWINSTANCE_TABLEDEFS
_FLOWINSTANCE_TABLEDEFS.fields_by_name['dimensions'].message_type = _FLOWINSTANCE_TABLEDEFS_DIMENSIONS
_FLOWINSTANCE_TABLEDEFS.fields_by_name['columns'].message_type = _FLOWINSTANCE_TABLEDEFS_COLUMNS
_FLOWINSTANCE_TABLEDEFS.containing_type = _FLOWINSTANCE
_FLOWINSTANCE.fields_by_name['stepList'].message_type = easy__work__service__sdk_dot_model_dot_flow_dot_flow__execute__step__pb2._FLOWEXECUTESTEP
_FLOWINSTANCE.fields_by_name['instanceMap'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FLOWINSTANCE.fields_by_name['outputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['runningSteps'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['flowOutputsData'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['tableData'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['standardOutputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['agentData'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['flowInputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['flowEnv'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['metadata'].message_type = _FLOWINSTANCE_METADATA
_FLOWINSTANCE.fields_by_name['flowOutputs'].message_type = _FLOWINSTANCE_FLOWOUTPUTS
_FLOWINSTANCE.fields_by_name['outputDefs'].message_type = _FLOWINSTANCE_OUTPUTDEFS
_FLOWINSTANCE.fields_by_name['tableDefs'].message_type = _FLOWINSTANCE_TABLEDEFS
DESCRIPTOR.message_types_by_name['FlowInstance'] = _FLOWINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FlowInstance = _reflection.GeneratedProtocolMessageType('FlowInstance', (_message.Message,), {
'Metadata' : _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_METADATA,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.Metadata)
})
,
'FlowOutputs' : _reflection.GeneratedProtocolMessageType('FlowOutputs', (_message.Message,), {
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_FLOWOUTPUTS_COLUMNS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.FlowOutputs.Columns)
})
,
'DESCRIPTOR' : _FLOWINSTANCE_FLOWOUTPUTS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.FlowOutputs)
})
,
'OutputDefs' : _reflection.GeneratedProtocolMessageType('OutputDefs', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_OUTPUTDEFS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.OutputDefs)
})
,
'TableDefs' : _reflection.GeneratedProtocolMessageType('TableDefs', (_message.Message,), {
'Dimensions' : _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_TABLEDEFS_DIMENSIONS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.TableDefs.Dimensions)
})
,
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_TABLEDEFS_COLUMNS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.TableDefs.Columns)
})
,
'DESCRIPTOR' : _FLOWINSTANCE_TABLEDEFS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.TableDefs)
})
,
'DESCRIPTOR' : _FLOWINSTANCE,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance)
})
_sym_db.RegisterMessage(FlowInstance)
_sym_db.RegisterMessage(FlowInstance.Metadata)
_sym_db.RegisterMessage(FlowInstance.FlowOutputs)
_sym_db.RegisterMessage(FlowInstance.FlowOutputs.Columns)
_sym_db.RegisterMessage(FlowInstance.OutputDefs)
_sym_db.RegisterMessage(FlowInstance.TableDefs)
_sym_db.RegisterMessage(FlowInstance.TableDefs.Dimensions)
_sym_db.RegisterMessage(FlowInstance.TableDefs.Columns)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.733542 | 2,770 | 0.746311 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from easy_work_service_sdk.model.flow import flow_execute_step_pb2 as easy__work__service__sdk_dot_model_dot_flow_dot_flow__execute__step__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='flow_instance.proto',
package='flow',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/flow'),
serialized_pb=_b('\n\x13\x66low_instance.proto\x12\x04\x66low\x1a\x38\x65\x61sy_work_service_sdk/model/flow/flow_execute_step.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xf2\n\n\x0c\x46lowInstance\x12\'\n\x08stepList\x18\x01 \x03(\x0b\x32\x15.flow.FlowExecuteStep\x12\x0e\n\x06taskId\x18\x02 \x01(\t\x12,\n\x0binstanceMap\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x07outputs\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Value\x12,\n\x0crunningSteps\x18\x05 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x12\n\nneedNotify\x18\x06 \x01(\x08\x12\x11\n\tstartTime\x18\x07 \x01(\x03\x12\x0f\n\x07\x65ndTime\x18\x08 \x01(\x03\x12\x13\n\x0b\x63urrentTime\x18\t \x01(\x03\x12\x13\n\x0btotalStatus\x18\n \x01(\t\x12\x0f\n\x07message\x18\x0b \x01(\t\x12\x13\n\x0btaskCounter\x18\x0c \x01(\x05\x12/\n\x0f\x66lowOutputsData\x18\r \x01(\x0b\x32\x16.google.protobuf.Value\x12)\n\ttableData\x18\x0e \x01(\x0b\x32\x16.google.protobuf.Value\x12/\n\x0fstandardOutputs\x18\x0f \x01(\x0b\x32\x16.google.protobuf.Value\x12)\n\tagentData\x18\x10 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x0e\n\x06\x66lowId\x18\x11 \x01(\t\x12\x0f\n\x07version\x18\x12 \x01(\x05\x12*\n\nflowInputs\x18\x13 \x01(\x0b\x32\x16.google.protobuf.Value\x12\'\n\x07\x66lowEnv\x18\x14 \x01(\x0b\x32\x16.google.protobuf.Value\x12-\n\x08metadata\x18\x15 \x01(\x0b\x32\x1b.flow.FlowInstance.Metadata\x12\x0c\n\x04name\x18\x16 \x01(\t\x12\x0b\n\x03org\x18\x17 \x01(\x05\x12\x33\n\x0b\x66lowOutputs\x18\x18 \x03(\x0b\x32\x1e.flow.FlowInstance.FlowOutputs\x12\x31\n\noutputDefs\x18\x19 \x03(\x0b\x32\x1d.flow.FlowInstance.OutputDefs\x12/\n\ttableDefs\x18\x1a \x03(\x0b\x32\x1c.flow.FlowInstance.TableDefs\x12\x0f\n\x07\x63reator\x18\x1b \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x1c \x01(\t\x12\x12\n\nupdateTime\x18\x1d \x01(\t\x12\x12\n\ncreateTime\x18\x1e \x01(\t\x1a&\n\x08Metadata\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x02 \x01(\t\x1ay\n\x0b\x46lowOutputs\x12\x37\n\x07\x63olumns\x18\x01 \x03(\x0b\x32&.flow.FlowInstance.FlowOutputs.Columns\x1a\x31\n\x07\x43olumns\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1a\x34\n\nOutputDefs\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1a\xe6\x01\n\tTableDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12;\n\ndimensions\x18\x03 \x03(\x0b\x32\'.flow.FlowInstance.TableDefs.Dimensions\x12\x35\n\x07\x63olumns\x18\x04 \x03(\x0b\x32$.flow.FlowInstance.TableDefs.Columns\x1a&\n\nDimensions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a#\n\x07\x43olumns\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\tB@Z>go.easyops.local/contracts/protorepo-models/easyops/model/flowb\x06proto3')
,
dependencies=[easy__work__service__sdk_dot_model_dot_flow_dot_flow__execute__step__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_FLOWINSTANCE_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='flow.FlowInstance.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.FlowInstance.Metadata.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='flow.FlowInstance.Metadata.desc', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1064,
serialized_end=1102,
)
_FLOWINSTANCE_FLOWOUTPUTS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='flow.FlowInstance.FlowOutputs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.FlowInstance.FlowOutputs.Columns.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.FlowOutputs.Columns.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.FlowOutputs.Columns.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1176,
serialized_end=1225,
)
_FLOWINSTANCE_FLOWOUTPUTS = _descriptor.Descriptor(
name='FlowOutputs',
full_name='flow.FlowInstance.FlowOutputs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='columns', full_name='flow.FlowInstance.FlowOutputs.columns', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOWINSTANCE_FLOWOUTPUTS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1104,
serialized_end=1225,
)
_FLOWINSTANCE_OUTPUTDEFS = _descriptor.Descriptor(
name='OutputDefs',
full_name='flow.FlowInstance.OutputDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.FlowInstance.OutputDefs.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.OutputDefs.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.OutputDefs.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1227,
serialized_end=1279,
)
_FLOWINSTANCE_TABLEDEFS_DIMENSIONS = _descriptor.Descriptor(
name='Dimensions',
full_name='flow.FlowInstance.TableDefs.Dimensions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.TableDefs.Dimensions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.TableDefs.Dimensions.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1437,
serialized_end=1475,
)
_FLOWINSTANCE_TABLEDEFS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='flow.FlowInstance.TableDefs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.TableDefs.Columns.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.TableDefs.Columns.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1477,
serialized_end=1512,
)
_FLOWINSTANCE_TABLEDEFS = _descriptor.Descriptor(
name='TableDefs',
full_name='flow.FlowInstance.TableDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.FlowInstance.TableDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.TableDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='flow.FlowInstance.TableDefs.dimensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='flow.FlowInstance.TableDefs.columns', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOWINSTANCE_TABLEDEFS_DIMENSIONS, _FLOWINSTANCE_TABLEDEFS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1282,
serialized_end=1512,
)
_FLOWINSTANCE = _descriptor.Descriptor(
name='FlowInstance',
full_name='flow.FlowInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stepList', full_name='flow.FlowInstance.stepList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='taskId', full_name='flow.FlowInstance.taskId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceMap', full_name='flow.FlowInstance.instanceMap', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='flow.FlowInstance.outputs', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='runningSteps', full_name='flow.FlowInstance.runningSteps', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='needNotify', full_name='flow.FlowInstance.needNotify', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='flow.FlowInstance.startTime', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='flow.FlowInstance.endTime', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='currentTime', full_name='flow.FlowInstance.currentTime', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='totalStatus', full_name='flow.FlowInstance.totalStatus', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='flow.FlowInstance.message', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='taskCounter', full_name='flow.FlowInstance.taskCounter', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowOutputsData', full_name='flow.FlowInstance.flowOutputsData', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableData', full_name='flow.FlowInstance.tableData', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='standardOutputs', full_name='flow.FlowInstance.standardOutputs', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agentData', full_name='flow.FlowInstance.agentData', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowId', full_name='flow.FlowInstance.flowId', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='flow.FlowInstance.version', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowInputs', full_name='flow.FlowInstance.flowInputs', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowEnv', full_name='flow.FlowInstance.flowEnv', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='flow.FlowInstance.metadata', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.FlowInstance.name', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='flow.FlowInstance.org', index=22,
number=23, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowOutputs', full_name='flow.FlowInstance.flowOutputs', index=23,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputDefs', full_name='flow.FlowInstance.outputDefs', index=24,
number=25, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableDefs', full_name='flow.FlowInstance.tableDefs', index=25,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='flow.FlowInstance.creator', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='flow.FlowInstance.category', index=27,
number=28, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='flow.FlowInstance.updateTime', index=28,
number=29, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='flow.FlowInstance.createTime', index=29,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOWINSTANCE_METADATA, _FLOWINSTANCE_FLOWOUTPUTS, _FLOWINSTANCE_OUTPUTDEFS, _FLOWINSTANCE_TABLEDEFS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=1512,
)
_FLOWINSTANCE_METADATA.containing_type = _FLOWINSTANCE
_FLOWINSTANCE_FLOWOUTPUTS_COLUMNS.containing_type = _FLOWINSTANCE_FLOWOUTPUTS
_FLOWINSTANCE_FLOWOUTPUTS.fields_by_name['columns'].message_type = _FLOWINSTANCE_FLOWOUTPUTS_COLUMNS
_FLOWINSTANCE_FLOWOUTPUTS.containing_type = _FLOWINSTANCE
_FLOWINSTANCE_OUTPUTDEFS.containing_type = _FLOWINSTANCE
_FLOWINSTANCE_TABLEDEFS_DIMENSIONS.containing_type = _FLOWINSTANCE_TABLEDEFS
_FLOWINSTANCE_TABLEDEFS_COLUMNS.containing_type = _FLOWINSTANCE_TABLEDEFS
_FLOWINSTANCE_TABLEDEFS.fields_by_name['dimensions'].message_type = _FLOWINSTANCE_TABLEDEFS_DIMENSIONS
_FLOWINSTANCE_TABLEDEFS.fields_by_name['columns'].message_type = _FLOWINSTANCE_TABLEDEFS_COLUMNS
_FLOWINSTANCE_TABLEDEFS.containing_type = _FLOWINSTANCE
_FLOWINSTANCE.fields_by_name['stepList'].message_type = easy__work__service__sdk_dot_model_dot_flow_dot_flow__execute__step__pb2._FLOWEXECUTESTEP
_FLOWINSTANCE.fields_by_name['instanceMap'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FLOWINSTANCE.fields_by_name['outputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['runningSteps'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['flowOutputsData'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['tableData'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['standardOutputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['agentData'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['flowInputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['flowEnv'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOWINSTANCE.fields_by_name['metadata'].message_type = _FLOWINSTANCE_METADATA
_FLOWINSTANCE.fields_by_name['flowOutputs'].message_type = _FLOWINSTANCE_FLOWOUTPUTS
_FLOWINSTANCE.fields_by_name['outputDefs'].message_type = _FLOWINSTANCE_OUTPUTDEFS
_FLOWINSTANCE.fields_by_name['tableDefs'].message_type = _FLOWINSTANCE_TABLEDEFS
DESCRIPTOR.message_types_by_name['FlowInstance'] = _FLOWINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FlowInstance = _reflection.GeneratedProtocolMessageType('FlowInstance', (_message.Message,), {
'Metadata' : _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_METADATA,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.Metadata)
})
,
'FlowOutputs' : _reflection.GeneratedProtocolMessageType('FlowOutputs', (_message.Message,), {
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_FLOWOUTPUTS_COLUMNS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.FlowOutputs.Columns)
})
,
'DESCRIPTOR' : _FLOWINSTANCE_FLOWOUTPUTS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.FlowOutputs)
})
,
'OutputDefs' : _reflection.GeneratedProtocolMessageType('OutputDefs', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_OUTPUTDEFS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.OutputDefs)
})
,
'TableDefs' : _reflection.GeneratedProtocolMessageType('TableDefs', (_message.Message,), {
'Dimensions' : _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_TABLEDEFS_DIMENSIONS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.TableDefs.Dimensions)
})
,
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _FLOWINSTANCE_TABLEDEFS_COLUMNS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.TableDefs.Columns)
})
,
'DESCRIPTOR' : _FLOWINSTANCE_TABLEDEFS,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance.TableDefs)
})
,
'DESCRIPTOR' : _FLOWINSTANCE,
'__module__' : 'flow_instance_pb2'
# @@protoc_insertion_point(class_scope:flow.FlowInstance)
})
_sym_db.RegisterMessage(FlowInstance)
_sym_db.RegisterMessage(FlowInstance.Metadata)
_sym_db.RegisterMessage(FlowInstance.FlowOutputs)
_sym_db.RegisterMessage(FlowInstance.FlowOutputs.Columns)
_sym_db.RegisterMessage(FlowInstance.OutputDefs)
_sym_db.RegisterMessage(FlowInstance.TableDefs)
_sym_db.RegisterMessage(FlowInstance.TableDefs.Dimensions)
_sym_db.RegisterMessage(FlowInstance.TableDefs.Columns)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f7f862ddf3e30024d4d37b5aa4d3cfe3eb121211 | 846 | py | Python | test/test_docker.py | ORNL/curifactory | f8be235b7fa7b91cc86f61d610d7093075b89d1f | [
"BSD-3-Clause"
] | 4 | 2022-01-25T18:27:49.000Z | 2022-03-30T22:57:04.000Z | test/test_docker.py | ORNL/curifactory | f8be235b7fa7b91cc86f61d610d7093075b89d1f | [
"BSD-3-Clause"
] | 1 | 2022-03-05T19:10:42.000Z | 2022-03-07T18:00:49.000Z | test/test_docker.py | ORNL/curifactory | f8be235b7fa7b91cc86f61d610d7093075b89d1f | [
"BSD-3-Clause"
] | null | null | null | import curifactory.utils
from curifactory import docker
from pytest_mock import mocker # noqa: F401 -- flake8 doesn't see it's used as fixture
def test_build_docker(mocker): # noqa: F811 -- mocker has to be passed in as fixture
"""This is mostly just to make sure the docker command isn't changed from what we
expect without a notification."""
mocker.patch("curifactory.utils.run_command")
docker.build_docker("test_name", "./test_cache_folder", "some_version_string")
curifactory.utils.run_command.assert_called_once_with(
"docker",
"build",
"-f",
"docker/dockerfile",
"--tag",
"test_name:some_version_string",
"--tag",
"test_name",
"--build-arg",
"run_folder=./test_cache_folder",
".",
"--progress",
"tty",
)
| 29.172414 | 87 | 0.63357 | import curifactory.utils
from curifactory import docker
from pytest_mock import mocker
def test_build_docker(mocker):
mocker.patch("curifactory.utils.run_command")
docker.build_docker("test_name", "./test_cache_folder", "some_version_string")
curifactory.utils.run_command.assert_called_once_with(
"docker",
"build",
"-f",
"docker/dockerfile",
"--tag",
"test_name:some_version_string",
"--tag",
"test_name",
"--build-arg",
"run_folder=./test_cache_folder",
".",
"--progress",
"tty",
)
| true | true |
f7f8642da248ec8872dc232e14a27351ddf47a39 | 56 | py | Python | helper.py | rp8jd/cs3240-labdemo | 304bb043b1314cd387f9af071051b15f8359cf1c | [
"MIT"
] | null | null | null | helper.py | rp8jd/cs3240-labdemo | 304bb043b1314cd387f9af071051b15f8359cf1c | [
"MIT"
] | null | null | null | helper.py | rp8jd/cs3240-labdemo | 304bb043b1314cd387f9af071051b15f8359cf1c | [
"MIT"
] | null | null | null | #Rhea Prahlad, rp8jd
def greeting(msg):
print(msg)
| 11.2 | 20 | 0.678571 |
def greeting(msg):
print(msg)
| true | true |
f7f864aa9b310ad1d40db644675ae355d5c072b5 | 7,267 | py | Python | blaze/objects/array.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | 1 | 2018-01-24T08:54:04.000Z | 2018-01-24T08:54:04.000Z | blaze/objects/array.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | null | null | null | blaze/objects/array.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | null | null | null | """This file defines the Concrete Array --- a leaf node in the expression graph
A concrete array is constructed from a Data Descriptor Object which handles the
indexing and basic interpretation of bytes
"""
from __future__ import absolute_import, division, print_function
import datashape
from ..compute.ops import ufuncs
from .. import compute
from ..datadescriptor import (DDesc, DeferredDescriptor, ddesc_as_py)
from ..io import _printing
class Array(object):
"""An Array contains:
DDesc
Sequence of Bytes (where are the bytes)
Index Object (how do I get to them)
Data Shape Object (what are the bytes? how do I interpret them)
axis and dimension labels
user-defined meta-data (whatever are needed --- provenance propagation)
"""
def __init__(self, data, axes=None, labels=None, user={}):
if not isinstance(data, DDesc):
raise TypeError(('Constructing a blaze array directly '
'requires a data descriptor, not type '
'%r') % (type(data)))
self.ddesc = data
self.axes = axes or [''] * (len(self.ddesc.dshape) - 1)
self.labels = labels or [None] * (len(self.ddesc.dshape) - 1)
self.user = user
self.expr = None
if isinstance(data, DeferredDescriptor):
# NOTE: we need 'expr' on the Array to perform dynamic programming:
# Two concrete arrays should have a single Op! We cannot
# store this in the data descriptor, since there are many
self.expr = data.expr # hurgh
# Inject the record attributes.
injected_props = {}
# This is a hack to help get the blaze-web server onto blaze arrays.
ds = data.dshape
ms = ds[-1] if isinstance(ds, datashape.DataShape) else ds
if isinstance(ms, datashape.Record):
for name in ms.names:
injected_props[name] = _named_property(name)
# Need to inject attributes on the Array depending on dshape
# attributes, in cases other than Record
if data.dshape in [datashape.dshape('int32'), datashape.dshape('int64')]:
def __int__(self):
# Evaluate to memory
e = compute.eval.eval(self)
return int(e.ddesc.dynd_arr())
injected_props['__int__'] = __int__
elif data.dshape in [datashape.dshape('float32'), datashape.dshape('float64')]:
def __float__(self):
# Evaluate to memory
e = compute.eval.eval(self)
return float(e.ddesc.dynd_arr())
injected_props['__float__'] = __float__
elif ms in [datashape.complex_float32, datashape.complex_float64]:
if len(data.dshape) == 1:
def __complex__(self):
# Evaluate to memory
e = compute.eval.eval(self)
return complex(e.ddesc.dynd_arr())
injected_props['__complex__'] = __complex__
injected_props['real'] = _ufunc_to_property(ufuncs.real)
injected_props['imag'] = _ufunc_to_property(ufuncs.imag)
if injected_props:
self.__class__ = type('Array', (Array,), injected_props)
@property
def dshape(self):
return self.ddesc.dshape
@property
def deferred(self):
return self.ddesc.capabilities.deferred
def __array__(self):
import numpy as np
# TODO: Expose PEP-3118 buffer interface
if hasattr(self.ddesc, "__array__"):
return np.array(self.ddesc)
return np.array(self.ddesc.dynd_arr())
def __iter__(self):
if len(self.dshape.shape) == 1:
return iter(ddesc_as_py(self.ddesc))
return (Array(dd) for dd in self.ddesc.__iter__())
def __getitem__(self, key):
return Array(self.ddesc.__getitem__(key))
def __setitem__(self, key, val):
self.ddesc.__setitem__(key, val)
def __len__(self):
shape = self.dshape.shape
if shape:
return shape[0]
raise IndexError('Scalar blaze arrays have no length')
def __nonzero__(self):
# For Python 2
if len(self.dshape.shape) == 0:
# Evaluate to memory
e = compute.eval.eval(self)
return bool(e.ddesc.dynd_arr())
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
def __bool__(self):
# For Python 3
if len(self.dshape.shape) == 0:
# Evaluate to memory
e = compute.eval.eval(self)
return bool(e.ddesc.dynd_arr())
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
def __str__(self):
if hasattr(self.ddesc, '_printer'):
return self.ddesc._printer()
return _printing.array_str(self)
def __repr__(self):
if hasattr(self.ddesc, "_printer_repr"):
return self.ddesc._printer_repr()
return _printing.array_repr(self)
def _named_property(name):
@property
def getprop(self):
return Array(self.ddesc.getattr(name))
return getprop
def _ufunc_to_property(uf):
@property
def getprop(self):
return uf(self)
return getprop
def binding(f):
def binder(self, *args):
return f(self, *args)
return binder
def __rufunc__(f):
def __rop__(self, other):
return f(other, self)
return __rop__
def _inject_special_binary(names):
for ufunc_name, special_name in names:
ufunc = getattr(ufuncs, ufunc_name)
setattr(Array, '__%s__' % special_name, binding(ufunc))
setattr(Array, '__r%s__' % special_name, binding(__rufunc__(ufunc)))
def _inject_special(names):
for ufunc_name, special_name in names:
ufunc = getattr(ufuncs, ufunc_name)
setattr(Array, '__%s__' % special_name, binding(ufunc))
_inject_special_binary([
('add', 'add'),
('subtract', 'sub'),
('multiply', 'mul'),
('true_divide', 'truediv'),
('mod', 'mod'),
('floor_divide', 'floordiv'),
('equal', 'eq'),
('not_equal', 'ne'),
('greater', 'gt'),
('greater_equal', 'ge'),
('less_equal', 'le'),
('less', 'lt'),
('divide', 'div'),
('bitwise_and', 'and'),
('bitwise_or', 'or'),
('bitwise_xor', 'xor'),
('power', 'pow'),
])
_inject_special([
('bitwise_not', 'invert'),
('negative', 'neg'),
])
"""
These should be functions
@staticmethod
def fromfiles(list_of_files, converters):
raise NotImplementedError
@staticmethod
def fromfile(file, converter):
raise NotImplementedError
@staticmethod
def frombuffers(list_of_buffers, converters):
raise NotImplementedError
@staticmethod
def frombuffer(buffer, converter):
raise NotImplementedError
@staticmethod
def fromobjects():
raise NotImplementedError
@staticmethod
def fromiterator(buffer):
raise NotImplementedError
"""
| 30.028926 | 87 | 0.601349 |
from __future__ import absolute_import, division, print_function
import datashape
from ..compute.ops import ufuncs
from .. import compute
from ..datadescriptor import (DDesc, DeferredDescriptor, ddesc_as_py)
from ..io import _printing
class Array(object):
def __init__(self, data, axes=None, labels=None, user={}):
if not isinstance(data, DDesc):
raise TypeError(('Constructing a blaze array directly '
'requires a data descriptor, not type '
'%r') % (type(data)))
self.ddesc = data
self.axes = axes or [''] * (len(self.ddesc.dshape) - 1)
self.labels = labels or [None] * (len(self.ddesc.dshape) - 1)
self.user = user
self.expr = None
if isinstance(data, DeferredDescriptor):
self.expr = data.expr
injected_props = {}
ds = data.dshape
ms = ds[-1] if isinstance(ds, datashape.DataShape) else ds
if isinstance(ms, datashape.Record):
for name in ms.names:
injected_props[name] = _named_property(name)
if data.dshape in [datashape.dshape('int32'), datashape.dshape('int64')]:
def __int__(self):
e = compute.eval.eval(self)
return int(e.ddesc.dynd_arr())
injected_props['__int__'] = __int__
elif data.dshape in [datashape.dshape('float32'), datashape.dshape('float64')]:
def __float__(self):
e = compute.eval.eval(self)
return float(e.ddesc.dynd_arr())
injected_props['__float__'] = __float__
elif ms in [datashape.complex_float32, datashape.complex_float64]:
if len(data.dshape) == 1:
def __complex__(self):
e = compute.eval.eval(self)
return complex(e.ddesc.dynd_arr())
injected_props['__complex__'] = __complex__
injected_props['real'] = _ufunc_to_property(ufuncs.real)
injected_props['imag'] = _ufunc_to_property(ufuncs.imag)
if injected_props:
self.__class__ = type('Array', (Array,), injected_props)
@property
def dshape(self):
return self.ddesc.dshape
@property
def deferred(self):
return self.ddesc.capabilities.deferred
def __array__(self):
import numpy as np
if hasattr(self.ddesc, "__array__"):
return np.array(self.ddesc)
return np.array(self.ddesc.dynd_arr())
def __iter__(self):
if len(self.dshape.shape) == 1:
return iter(ddesc_as_py(self.ddesc))
return (Array(dd) for dd in self.ddesc.__iter__())
def __getitem__(self, key):
return Array(self.ddesc.__getitem__(key))
def __setitem__(self, key, val):
self.ddesc.__setitem__(key, val)
def __len__(self):
shape = self.dshape.shape
if shape:
return shape[0]
raise IndexError('Scalar blaze arrays have no length')
def __nonzero__(self):
if len(self.dshape.shape) == 0:
e = compute.eval.eval(self)
return bool(e.ddesc.dynd_arr())
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
def __bool__(self):
if len(self.dshape.shape) == 0:
e = compute.eval.eval(self)
return bool(e.ddesc.dynd_arr())
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
def __str__(self):
if hasattr(self.ddesc, '_printer'):
return self.ddesc._printer()
return _printing.array_str(self)
def __repr__(self):
if hasattr(self.ddesc, "_printer_repr"):
return self.ddesc._printer_repr()
return _printing.array_repr(self)
def _named_property(name):
@property
def getprop(self):
return Array(self.ddesc.getattr(name))
return getprop
def _ufunc_to_property(uf):
@property
def getprop(self):
return uf(self)
return getprop
def binding(f):
def binder(self, *args):
return f(self, *args)
return binder
def __rufunc__(f):
def __rop__(self, other):
return f(other, self)
return __rop__
def _inject_special_binary(names):
for ufunc_name, special_name in names:
ufunc = getattr(ufuncs, ufunc_name)
setattr(Array, '__%s__' % special_name, binding(ufunc))
setattr(Array, '__r%s__' % special_name, binding(__rufunc__(ufunc)))
def _inject_special(names):
for ufunc_name, special_name in names:
ufunc = getattr(ufuncs, ufunc_name)
setattr(Array, '__%s__' % special_name, binding(ufunc))
_inject_special_binary([
('add', 'add'),
('subtract', 'sub'),
('multiply', 'mul'),
('true_divide', 'truediv'),
('mod', 'mod'),
('floor_divide', 'floordiv'),
('equal', 'eq'),
('not_equal', 'ne'),
('greater', 'gt'),
('greater_equal', 'ge'),
('less_equal', 'le'),
('less', 'lt'),
('divide', 'div'),
('bitwise_and', 'and'),
('bitwise_or', 'or'),
('bitwise_xor', 'xor'),
('power', 'pow'),
])
_inject_special([
('bitwise_not', 'invert'),
('negative', 'neg'),
])
| true | true |
f7f864e3c0dcb5ab73a36652fec21d143736473d | 3,278 | py | Python | src/vggish_input.py | LaiaTarres/TransferLearningMusic | d662327d320031ea3492720b5134ccc01d17983a | [
"0BSD"
] | null | null | null | src/vggish_input.py | LaiaTarres/TransferLearningMusic | d662327d320031ea3492720b5134ccc01d17983a | [
"0BSD"
] | null | null | null | src/vggish_input.py | LaiaTarres/TransferLearningMusic | d662327d320031ea3492720b5134ccc01d17983a | [
"0BSD"
] | null | null | null | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute input examples for VGGish from audio waveform."""
import numpy as np
import resampy
from scipy.io import wavfile
from src.utils import wavefile_to_waveform
import src.mel_features as mel_features
import src.vggish_params as vggish_params
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
"""
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != vggish_params.SAMPLE_RATE:
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
samples, sr = wavefile_to_waveform(wav_file, 'vggish')
return waveform_to_examples(samples, sr)
| 37.25 | 80 | 0.744966 |
import numpy as np
import resampy
from scipy.io import wavfile
from src.utils import wavefile_to_waveform
import src.mel_features as mel_features
import src.vggish_params as vggish_params
def waveform_to_examples(data, sample_rate):
if len(data.shape) > 1:
data = np.mean(data, axis=1)
if sample_rate != vggish_params.SAMPLE_RATE:
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
samples, sr = wavefile_to_waveform(wav_file, 'vggish')
return waveform_to_examples(samples, sr)
| true | true |
f7f866d38e7684d202b119c111405c30c1dc5cdc | 529 | py | Python | Ch4/reference-test-original/testlib/2ndcase1staction.py | SumitBisht/RobotFrameworkTestAutomation | 21d8e9feda5a5aa1a369f35ecb01706c3c8153a4 | [
"MIT"
] | 10 | 2015-04-10T07:57:27.000Z | 2020-06-18T13:47:22.000Z | Ch4/reference-test-original/testlib/2ndcase1staction.py | SumitBisht/RobotFrameworkTestAutomation | 21d8e9feda5a5aa1a369f35ecb01706c3c8153a4 | [
"MIT"
] | 1 | 2016-12-15T14:13:49.000Z | 2016-12-18T22:05:37.000Z | Ch4/reference-test-working-as-desired/testlib/2ndcase1staction.py | SumitBisht/RobotFrameworkTestAutomation | 21d8e9feda5a5aa1a369f35ecb01706c3c8153a4 | [
"MIT"
] | 12 | 2015-05-27T14:44:39.000Z | 2021-05-12T05:41:56.000Z | from __future__ import with_statement
from sikuliwrapper import *
#add custom image library
addImagePath(common.cfgImageLibrary)
Settings.MinSimilarity = common.imageMinSimilarity
class XTest(BaseLogger):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self):
None
#self.appCoordinates = (0, 0, 1024, 768)
def Execute(self, *args):
keyDown(Key.ENTER)
keyUp(Key.ENTER)
wait(1.156)
type("case changed")
wait(0.907)
None
| 23 | 51 | 0.625709 | from __future__ import with_statement
from sikuliwrapper import *
addImagePath(common.cfgImageLibrary)
Settings.MinSimilarity = common.imageMinSimilarity
class XTest(BaseLogger):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self):
None
def Execute(self, *args):
keyDown(Key.ENTER)
keyUp(Key.ENTER)
wait(1.156)
type("case changed")
wait(0.907)
None
| true | true |
f7f866e1211984c775ed2633e807a4db3d35b86d | 7,919 | py | Python | python/cugraph/dask/common/input_utils.py | ajschmidt8/cugraph | 7ad8fc36254bbc7d1a1dc7d321a93b08a66c26ab | [
"Apache-2.0"
] | null | null | null | python/cugraph/dask/common/input_utils.py | ajschmidt8/cugraph | 7ad8fc36254bbc7d1a1dc7d321a93b08a66c26ab | [
"Apache-2.0"
] | null | null | null | python/cugraph/dask/common/input_utils.py | ajschmidt8/cugraph | 7ad8fc36254bbc7d1a1dc7d321a93b08a66c26ab | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Sequence
from collections import OrderedDict
from dask_cudf.core import DataFrame as dcDataFrame
from dask_cudf.core import Series as daskSeries
import cugraph.comms.comms as Comms
from cugraph.raft.dask.common.utils import get_client
from cugraph.dask.common.part_utils import _extract_partitions
from dask.distributed import default_client
from toolz import first
from functools import reduce
class DistributedDataHandler:
"""
Class to centralize distributed data management. Functionalities include:
- Data colocation
- Worker information extraction
- GPU futures extraction,
Additional functionality can be added as needed. This class **does not**
contain the actual data, just the metadata necessary to handle it,
including common pieces of code that need to be performed to call
Dask functions.
The constructor is not meant to be used directly, but through the factory
method DistributedDataHandler.create
"""
def __init__(self, gpu_futures=None, workers=None,
datatype=None, multiple=False, client=None):
self.client = get_client(client)
self.gpu_futures = gpu_futures
self.worker_to_parts = _workers_to_parts(gpu_futures)
self.workers = workers
self.datatype = datatype
self.multiple = multiple
self.worker_info = None
self.total_rows = None
self.max_vertex_id = None
self.ranks = None
self.parts_to_sizes = None
self.local_data = None
@classmethod
def get_client(cls, client=None):
return default_client() if client is None else client
""" Class methods for initalization """
@classmethod
def create(cls, data, client=None):
"""
Creates a distributed data handler instance with the given
distributed data set(s).
Parameters
----------
data : dask.array, dask.dataframe, or unbounded Sequence of
dask.array or dask.dataframe.
client : dask.distributedClient
"""
client = cls.get_client(client)
multiple = isinstance(data, Sequence)
if isinstance(first(data) if multiple else data,
(dcDataFrame, daskSeries)):
datatype = 'cudf'
else:
raise Exception("Graph data must be dask-cudf dataframe")
gpu_futures = client.sync(_extract_partitions, data, client)
workers = tuple(OrderedDict.fromkeys(map(lambda x: x[0], gpu_futures)))
return DistributedDataHandler(gpu_futures=gpu_futures, workers=workers,
datatype=datatype, multiple=multiple,
client=client)
""" Methods to calculate further attributes """
def calculate_worker_and_rank_info(self, comms):
self.worker_info = comms.worker_info(comms.worker_addresses)
self.ranks = dict()
for w, futures in self.worker_to_parts.items():
self.ranks[w] = self.worker_info[w]["rank"]
def calculate_parts_to_sizes(self, comms=None, ranks=None):
if self.worker_info is None and comms is not None:
self.calculate_worker_and_rank_info(comms)
self.total_rows = 0
self.parts_to_sizes = dict()
parts = [(wf[0], self.client.submit(
_get_rows,
wf[1],
self.multiple,
workers=[wf[0]],
pure=False))
for idx, wf in enumerate(self.worker_to_parts.items())]
sizes = self.client.compute(parts, sync=True)
for w, sizes_parts in sizes:
sizes, total = sizes_parts
self.parts_to_sizes[self.worker_info[w]["rank"]] = \
sizes
self.total_rows += total
def calculate_local_data(self, comms, by):
if self.worker_info is None and comms is not None:
self.calculate_worker_and_rank_info(comms)
local_data = dict([(self.worker_info[wf[0]]["rank"],
self.client.submit(
_get_local_data,
wf[1],
by,
workers=[wf[0]]))
for idx, wf in enumerate(self.worker_to_parts.items()
)])
_local_data_dict = self.client.compute(local_data, sync=True)
local_data_dict = {'edges': [], 'offsets': [], 'verts': []}
max_vid = 0
for rank in range(len(_local_data_dict)):
data = _local_data_dict[rank]
local_data_dict['edges'].append(data[0])
if rank == 0:
local_offset = 0
else:
prev_data = _local_data_dict[rank-1]
local_offset = prev_data[1] + 1
local_data_dict['offsets'].append(local_offset)
local_data_dict['verts'].append(data[1] - local_offset + 1)
if data[2] > max_vid:
max_vid = data[2]
import numpy as np
local_data_dict['edges'] = np.array(local_data_dict['edges'],
dtype=np.int32)
local_data_dict['offsets'] = np.array(local_data_dict['offsets'],
dtype=np.int32)
local_data_dict['verts'] = np.array(local_data_dict['verts'],
dtype=np.int32)
self.local_data = local_data_dict
self.max_vertex_id = max_vid
def _get_local_data(df, by):
df = df[0]
num_local_edges = len(df)
local_by_max = df[by].iloc[-1]
local_max = df[['src', 'dst']].max().max()
return num_local_edges, local_by_max, local_max
""" Internal methods, API subject to change """
def _workers_to_parts(futures):
"""
Builds an ordered dict mapping each worker to their list
of parts
:param futures: list of (worker, part) tuples
:return:
"""
w_to_p_map = OrderedDict.fromkeys(Comms.get_workers())
for w, p in futures:
if w_to_p_map[w] is None:
w_to_p_map[w] = []
w_to_p_map[w].append(p)
return w_to_p_map
def _get_rows(objs, multiple):
def get_obj(x): return x[0] if multiple else x
total = list(map(lambda x: get_obj(x).shape[0], objs))
return total, reduce(lambda a, b: a + b, total)
def get_mg_batch_data(dask_cudf_data):
data = DistributedDataHandler.create(data=dask_cudf_data)
return data
def get_distributed_data(input_ddf):
ddf = input_ddf
comms = Comms.get_comms()
data = DistributedDataHandler.create(data=ddf)
if data.worker_info is None and comms is not None:
data.calculate_worker_and_rank_info(comms)
return data
def get_vertex_partition_offsets(input_graph):
import cudf
renumber_vertex_count = input_graph.renumber_map.implementation.ddf.\
map_partitions(len).compute()
renumber_vertex_cumsum = renumber_vertex_count.cumsum()
vertex_dtype = input_graph.edgelist.edgelist_df['src'].dtype
vertex_partition_offsets = cudf.Series([0], dtype=vertex_dtype)
vertex_partition_offsets = vertex_partition_offsets.append(cudf.Series(
renumber_vertex_cumsum, dtype=vertex_dtype))
return vertex_partition_offsets
| 34.133621 | 79 | 0.63354 |
from collections.abc import Sequence
from collections import OrderedDict
from dask_cudf.core import DataFrame as dcDataFrame
from dask_cudf.core import Series as daskSeries
import cugraph.comms.comms as Comms
from cugraph.raft.dask.common.utils import get_client
from cugraph.dask.common.part_utils import _extract_partitions
from dask.distributed import default_client
from toolz import first
from functools import reduce
class DistributedDataHandler:
def __init__(self, gpu_futures=None, workers=None,
datatype=None, multiple=False, client=None):
self.client = get_client(client)
self.gpu_futures = gpu_futures
self.worker_to_parts = _workers_to_parts(gpu_futures)
self.workers = workers
self.datatype = datatype
self.multiple = multiple
self.worker_info = None
self.total_rows = None
self.max_vertex_id = None
self.ranks = None
self.parts_to_sizes = None
self.local_data = None
@classmethod
def get_client(cls, client=None):
return default_client() if client is None else client
@classmethod
def create(cls, data, client=None):
client = cls.get_client(client)
multiple = isinstance(data, Sequence)
if isinstance(first(data) if multiple else data,
(dcDataFrame, daskSeries)):
datatype = 'cudf'
else:
raise Exception("Graph data must be dask-cudf dataframe")
gpu_futures = client.sync(_extract_partitions, data, client)
workers = tuple(OrderedDict.fromkeys(map(lambda x: x[0], gpu_futures)))
return DistributedDataHandler(gpu_futures=gpu_futures, workers=workers,
datatype=datatype, multiple=multiple,
client=client)
def calculate_worker_and_rank_info(self, comms):
self.worker_info = comms.worker_info(comms.worker_addresses)
self.ranks = dict()
for w, futures in self.worker_to_parts.items():
self.ranks[w] = self.worker_info[w]["rank"]
def calculate_parts_to_sizes(self, comms=None, ranks=None):
if self.worker_info is None and comms is not None:
self.calculate_worker_and_rank_info(comms)
self.total_rows = 0
self.parts_to_sizes = dict()
parts = [(wf[0], self.client.submit(
_get_rows,
wf[1],
self.multiple,
workers=[wf[0]],
pure=False))
for idx, wf in enumerate(self.worker_to_parts.items())]
sizes = self.client.compute(parts, sync=True)
for w, sizes_parts in sizes:
sizes, total = sizes_parts
self.parts_to_sizes[self.worker_info[w]["rank"]] = \
sizes
self.total_rows += total
def calculate_local_data(self, comms, by):
if self.worker_info is None and comms is not None:
self.calculate_worker_and_rank_info(comms)
local_data = dict([(self.worker_info[wf[0]]["rank"],
self.client.submit(
_get_local_data,
wf[1],
by,
workers=[wf[0]]))
for idx, wf in enumerate(self.worker_to_parts.items()
)])
_local_data_dict = self.client.compute(local_data, sync=True)
local_data_dict = {'edges': [], 'offsets': [], 'verts': []}
max_vid = 0
for rank in range(len(_local_data_dict)):
data = _local_data_dict[rank]
local_data_dict['edges'].append(data[0])
if rank == 0:
local_offset = 0
else:
prev_data = _local_data_dict[rank-1]
local_offset = prev_data[1] + 1
local_data_dict['offsets'].append(local_offset)
local_data_dict['verts'].append(data[1] - local_offset + 1)
if data[2] > max_vid:
max_vid = data[2]
import numpy as np
local_data_dict['edges'] = np.array(local_data_dict['edges'],
dtype=np.int32)
local_data_dict['offsets'] = np.array(local_data_dict['offsets'],
dtype=np.int32)
local_data_dict['verts'] = np.array(local_data_dict['verts'],
dtype=np.int32)
self.local_data = local_data_dict
self.max_vertex_id = max_vid
def _get_local_data(df, by):
df = df[0]
num_local_edges = len(df)
local_by_max = df[by].iloc[-1]
local_max = df[['src', 'dst']].max().max()
return num_local_edges, local_by_max, local_max
def _workers_to_parts(futures):
w_to_p_map = OrderedDict.fromkeys(Comms.get_workers())
for w, p in futures:
if w_to_p_map[w] is None:
w_to_p_map[w] = []
w_to_p_map[w].append(p)
return w_to_p_map
def _get_rows(objs, multiple):
def get_obj(x): return x[0] if multiple else x
total = list(map(lambda x: get_obj(x).shape[0], objs))
return total, reduce(lambda a, b: a + b, total)
def get_mg_batch_data(dask_cudf_data):
data = DistributedDataHandler.create(data=dask_cudf_data)
return data
def get_distributed_data(input_ddf):
ddf = input_ddf
comms = Comms.get_comms()
data = DistributedDataHandler.create(data=ddf)
if data.worker_info is None and comms is not None:
data.calculate_worker_and_rank_info(comms)
return data
def get_vertex_partition_offsets(input_graph):
import cudf
renumber_vertex_count = input_graph.renumber_map.implementation.ddf.\
map_partitions(len).compute()
renumber_vertex_cumsum = renumber_vertex_count.cumsum()
vertex_dtype = input_graph.edgelist.edgelist_df['src'].dtype
vertex_partition_offsets = cudf.Series([0], dtype=vertex_dtype)
vertex_partition_offsets = vertex_partition_offsets.append(cudf.Series(
renumber_vertex_cumsum, dtype=vertex_dtype))
return vertex_partition_offsets
| true | true |
f7f866ffa29448ab3f7e848c0c53552b8b4fe355 | 1,206 | py | Python | tariff_app/migrations/0002_auto_20180807_1548.py | bashmak/djing | 8cc0c670600254d288178acd47965f7b3db6856e | [
"Unlicense"
] | 23 | 2017-04-27T20:13:22.000Z | 2022-03-16T12:47:29.000Z | tariff_app/migrations/0002_auto_20180807_1548.py | bashmak/djing | 8cc0c670600254d288178acd47965f7b3db6856e | [
"Unlicense"
] | 2 | 2017-04-04T15:03:12.000Z | 2021-01-26T15:30:57.000Z | tariff_app/migrations/0002_auto_20180807_1548.py | bashmak/djing | 8cc0c670600254d288178acd47965f7b3db6856e | [
"Unlicense"
] | 13 | 2017-08-22T16:00:03.000Z | 2022-03-20T03:12:15.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-07 15:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tariff_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='periodicpay',
options={'ordering': ('-id',), 'permissions': (('can_view_periodic_pay', 'Can view periodic pay'),), 'verbose_name': 'Periodic pay', 'verbose_name_plural': 'Periodic pays'},
),
migrations.AlterModelOptions(
name='tariff',
options={'ordering': ('title',), 'verbose_name': 'Service', 'verbose_name_plural': 'Services'},
),
migrations.AlterField(
model_name='tariff',
name='calc_type',
field=models.CharField(choices=[('Df', 'Base calculate functionality'), ('Dp', 'IS'), ('Cp', 'Private service'), ('Dl', 'IS Daily service')], default='Df', max_length=2, verbose_name='Script'),
),
migrations.AlterUniqueTogether(
name='tariff',
unique_together={('speedIn', 'speedOut', 'amount', 'calc_type')},
),
]
| 36.545455 | 205 | 0.596186 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tariff_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='periodicpay',
options={'ordering': ('-id',), 'permissions': (('can_view_periodic_pay', 'Can view periodic pay'),), 'verbose_name': 'Periodic pay', 'verbose_name_plural': 'Periodic pays'},
),
migrations.AlterModelOptions(
name='tariff',
options={'ordering': ('title',), 'verbose_name': 'Service', 'verbose_name_plural': 'Services'},
),
migrations.AlterField(
model_name='tariff',
name='calc_type',
field=models.CharField(choices=[('Df', 'Base calculate functionality'), ('Dp', 'IS'), ('Cp', 'Private service'), ('Dl', 'IS Daily service')], default='Df', max_length=2, verbose_name='Script'),
),
migrations.AlterUniqueTogether(
name='tariff',
unique_together={('speedIn', 'speedOut', 'amount', 'calc_type')},
),
]
| true | true |
f7f86744f4ef4e0c33ea6f94bb0a1af4489c2926 | 361 | py | Python | tests/contacts/test_check_contact.py | ECRSSS/PythonBarancevHomeworks | df190c7331f03f9d0e3c66a2ce72176d728f8cb0 | [
"Apache-2.0"
] | null | null | null | tests/contacts/test_check_contact.py | ECRSSS/PythonBarancevHomeworks | df190c7331f03f9d0e3c66a2ce72176d728f8cb0 | [
"Apache-2.0"
] | null | null | null | tests/contacts/test_check_contact.py | ECRSSS/PythonBarancevHomeworks | df190c7331f03f9d0e3c66a2ce72176d728f8cb0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contacts import Contact
def test_check_contact(app):
app.navigation.to_contacts()
orm_contacts = app.orm.get_contacts_as_strings_list()
main_page_contacts = app.contacts.get_info_from_main_page_as_strings_list()
orm_contacts.sort()
main_page_contacts.sort()
assert orm_contacts == main_page_contacts
| 30.083333 | 79 | 0.764543 |
from model.contacts import Contact
def test_check_contact(app):
app.navigation.to_contacts()
orm_contacts = app.orm.get_contacts_as_strings_list()
main_page_contacts = app.contacts.get_info_from_main_page_as_strings_list()
orm_contacts.sort()
main_page_contacts.sort()
assert orm_contacts == main_page_contacts
| true | true |
f7f86775373b347567b3e90b759a05da5bab8779 | 1,062 | py | Python | src/transaction_output.py | PanosAntoniadis/noobcash | 47c9e7aabc010982d841e414c30b9c76cbb84b6d | [
"MIT"
] | 7 | 2020-04-11T15:21:53.000Z | 2022-03-29T21:12:15.000Z | src/transaction_output.py | PanosAntoniadis/noobcash | 47c9e7aabc010982d841e414c30b9c76cbb84b6d | [
"MIT"
] | 1 | 2021-06-10T20:32:31.000Z | 2021-06-10T20:32:31.000Z | src/transaction_output.py | PanosAntoniadis/noobcash | 47c9e7aabc010982d841e414c30b9c76cbb84b6d | [
"MIT"
] | null | null | null |
class TransactionOutput:
"""
A transaction output of a noobcash transaction.
Attributes:
transaction_id (int): id of the transaction.
recipient (int): the recipient of the transaction.
amount (int): the amount of nbcs to be transfered.
unspent (boolean): false if this output has been used as input in a transaction.
"""
def __init__(self, transaction_id, recipient, amount):
"""Inits a TransactionOutput."""
self.transaction_id = transaction_id
self.recipient = recipient
self.amount = amount
self.unspent = True
@classmethod
def fromdict(cls, output_dict):
"""Inits a TransactionOutput object given a dictionary."""
transaction_id = output_dict["transaction_id"]
recipient = output_dict["recipient"]
amount = int(output_dict["amount"])
return cls(transaction_id, recipient, amount)
def __str__(self):
"""Returns a string representation of a TransactionOutput object"""
return str(self.__dict__)
| 34.258065 | 88 | 0.661017 |
class TransactionOutput:
def __init__(self, transaction_id, recipient, amount):
self.transaction_id = transaction_id
self.recipient = recipient
self.amount = amount
self.unspent = True
@classmethod
def fromdict(cls, output_dict):
transaction_id = output_dict["transaction_id"]
recipient = output_dict["recipient"]
amount = int(output_dict["amount"])
return cls(transaction_id, recipient, amount)
def __str__(self):
return str(self.__dict__)
| true | true |
f7f867c537b595a9f154fdc45487f76dc1a7bb0b | 2,916 | py | Python | ClemBot.Bot/bot/cogs/eval_cog.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 32 | 2021-07-10T18:51:29.000Z | 2022-02-27T17:07:28.000Z | ClemBot.Bot/bot/cogs/eval_cog.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 87 | 2021-06-29T05:11:35.000Z | 2022-03-27T14:37:14.000Z | ClemBot.Bot/bot/cogs/eval_cog.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 21 | 2021-06-23T23:46:17.000Z | 2022-03-19T16:16:05.000Z | import json
import logging
import typing as t
import aiohttp
import discord.ext.commands as commands
import discord.utils as utils
import bot.bot_secrets as bot_secrets
import bot.extensions as ext
log = logging.getLogger(__name__)
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
MAX_CONTENT_LENGTH = 1900
MAX_LINE_LENGTH = 15
EVAL_COMMAND_COOLDOWN = 2
class EvalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command(aliases=['e'])
@commands.cooldown(1, EVAL_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'Allows for the evaluations of arbitrary python code directly in discord'
'Supports all internal standard libraries like json or re'
)
@ext.short_help('Runs arbitrary python code in discord')
@ext.example('eval print("hello world")')
async def eval(self, ctx, *, code) -> None:
code = code.replace('```python', '')
code = code.replace('```py', '')
code = code.replace('`', '')
code = utils.escape_mentions(code)
feedback_mes = await ctx.send('Code execution started')
log.info('Code: {code} sent for evaluation by author: {author} in guild: {guild}',
code=code,
author=ctx.author.id,
guild=ctx.guild.id)
output = await self._post_eval(code)
stdout = output['stdout']
stdout = stdout.strip('`')
stdout = utils.escape_mentions(stdout)
await feedback_mes.delete()
if len(stdout) > MAX_CONTENT_LENGTH:
await ctx.send(f'{ctx.author.mention} Attempted output length exceeds 2000 characters, Please try again')
return
result_emoji = ':white_check_mark:' if output['returncode'] == 0 else ':warning:'
out = f'{ctx.author.mention} {result_emoji} Eval Completed with response code: {output["returncode"]}'
if stdout:
await ctx.send(f'{out}\n\n```{self._format(stdout)}```')
else:
await ctx.send(f'{out}\n\n```[No Output]```')
def _format(self, resp):
lines = [f'{(i + 1):03d} | {line}' for i, line in enumerate(resp.split('\n')) if line]
if len(lines) > MAX_LINE_LENGTH:
lines = lines[:MAX_LINE_LENGTH]
lines.append('... Output line limit exceeded, data truncated')
return '\n'.join(lines)
async def _post_eval(self, code) -> t.Union[str, None]:
data = {
"input": code
}
json_data = json.dumps(data)
async with aiohttp.ClientSession() as s:
async with s.post(bot_secrets.secrets.repl_url,
data=json_data,
headers=HEADERS) as resp:
if resp.status == 200:
return json.loads(await resp.text())
def setup(bot):
bot.add_cog(EvalCog(bot))
| 32.043956 | 117 | 0.605281 | import json
import logging
import typing as t
import aiohttp
import discord.ext.commands as commands
import discord.utils as utils
import bot.bot_secrets as bot_secrets
import bot.extensions as ext
log = logging.getLogger(__name__)
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
MAX_CONTENT_LENGTH = 1900
MAX_LINE_LENGTH = 15
EVAL_COMMAND_COOLDOWN = 2
class EvalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command(aliases=['e'])
@commands.cooldown(1, EVAL_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'Allows for the evaluations of arbitrary python code directly in discord'
'Supports all internal standard libraries like json or re'
)
@ext.short_help('Runs arbitrary python code in discord')
@ext.example('eval print("hello world")')
async def eval(self, ctx, *, code) -> None:
code = code.replace('```python', '')
code = code.replace('```py', '')
code = code.replace('`', '')
code = utils.escape_mentions(code)
feedback_mes = await ctx.send('Code execution started')
log.info('Code: {code} sent for evaluation by author: {author} in guild: {guild}',
code=code,
author=ctx.author.id,
guild=ctx.guild.id)
output = await self._post_eval(code)
stdout = output['stdout']
stdout = stdout.strip('`')
stdout = utils.escape_mentions(stdout)
await feedback_mes.delete()
if len(stdout) > MAX_CONTENT_LENGTH:
await ctx.send(f'{ctx.author.mention} Attempted output length exceeds 2000 characters, Please try again')
return
result_emoji = ':white_check_mark:' if output['returncode'] == 0 else ':warning:'
out = f'{ctx.author.mention} {result_emoji} Eval Completed with response code: {output["returncode"]}'
if stdout:
await ctx.send(f'{out}\n\n```{self._format(stdout)}```')
else:
await ctx.send(f'{out}\n\n```[No Output]```')
def _format(self, resp):
lines = [f'{(i + 1):03d} | {line}' for i, line in enumerate(resp.split('\n')) if line]
if len(lines) > MAX_LINE_LENGTH:
lines = lines[:MAX_LINE_LENGTH]
lines.append('... Output line limit exceeded, data truncated')
return '\n'.join(lines)
async def _post_eval(self, code) -> t.Union[str, None]:
data = {
"input": code
}
json_data = json.dumps(data)
async with aiohttp.ClientSession() as s:
async with s.post(bot_secrets.secrets.repl_url,
data=json_data,
headers=HEADERS) as resp:
if resp.status == 200:
return json.loads(await resp.text())
def setup(bot):
bot.add_cog(EvalCog(bot))
| true | true |
f7f868afb857033e7f63e8cbbc06e5b73cc69103 | 2,163 | py | Python | emolga/layers/embeddings.py | shengwenbo/CopyNet | 013508d10ad5ed09514b233a75e7f41ce7f8fa94 | [
"MIT"
] | 193 | 2016-09-19T02:55:00.000Z | 2022-03-10T13:43:50.000Z | emolga/layers/embeddings.py | Lencof/CopyNet | 361910930337cd6a446df5dd27c539e0a17bfaf9 | [
"MIT"
] | 6 | 2016-11-25T03:38:23.000Z | 2019-03-30T06:11:31.000Z | emolga/layers/embeddings.py | Lencof/CopyNet | 361910930337cd6a446df5dd27c539e0a17bfaf9 | [
"MIT"
] | 77 | 2016-10-14T10:08:44.000Z | 2021-03-11T05:01:27.000Z | # -*- coding: utf-8 -*-
from .core import Layer
from emolga.utils.theano_utils import *
import emolga.basic.initializations as initializations
class Embedding(Layer):
'''
Turn positive integers (indexes) into denses vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
@input_dim: size of vocabulary (highest input integer + 1)
@out_dim: size of dense representation
'''
def __init__(self, input_dim, output_dim, init='uniform', name=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.W = self.init((self.input_dim, self.output_dim))
self.params = [self.W]
if name is not None:
self.set_name(name)
def get_output_mask(self, X):
return T.ones_like(X) * (1 - T.eq(X, 0))
def __call__(self, X, mask_zero=False, context=None):
if context is None:
out = self.W[X]
else:
assert context.ndim == 3
flag = False
if X.ndim == 1:
flag = True
X = X[:, None]
b_size = context.shape[0]
EMB = T.repeat(self.W[None, :, :], b_size, axis=0)
EMB = T.concatenate([EMB, context], axis=1)
m_size = EMB.shape[1]
e_size = EMB.shape[2]
maxlen = X.shape[1]
EMB = EMB.reshape((b_size * m_size, e_size))
Z = (T.arange(b_size)[:, None] * m_size + X).reshape((b_size * maxlen,))
out = EMB[Z] # (b_size * maxlen, e_size)
if not flag:
out = out.reshape((b_size, maxlen, e_size))
else:
out = out.reshape((b_size, e_size))
if mask_zero:
return out, T.cast(self.get_output_mask(X), dtype='float32')
else:
return out
class Zero(Layer):
def __call__(self, X):
out = T.zeros(X.shape)
return out
class Bias(Layer):
def __call__(self, X):
tmp = X.flatten()
tmp = tmp.dimshuffle(0, 'x')
return tmp
| 27.379747 | 86 | 0.538141 |
from .core import Layer
from emolga.utils.theano_utils import *
import emolga.basic.initializations as initializations
class Embedding(Layer):
def __init__(self, input_dim, output_dim, init='uniform', name=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.W = self.init((self.input_dim, self.output_dim))
self.params = [self.W]
if name is not None:
self.set_name(name)
def get_output_mask(self, X):
return T.ones_like(X) * (1 - T.eq(X, 0))
def __call__(self, X, mask_zero=False, context=None):
if context is None:
out = self.W[X]
else:
assert context.ndim == 3
flag = False
if X.ndim == 1:
flag = True
X = X[:, None]
b_size = context.shape[0]
EMB = T.repeat(self.W[None, :, :], b_size, axis=0)
EMB = T.concatenate([EMB, context], axis=1)
m_size = EMB.shape[1]
e_size = EMB.shape[2]
maxlen = X.shape[1]
EMB = EMB.reshape((b_size * m_size, e_size))
Z = (T.arange(b_size)[:, None] * m_size + X).reshape((b_size * maxlen,))
out = EMB[Z]
if not flag:
out = out.reshape((b_size, maxlen, e_size))
else:
out = out.reshape((b_size, e_size))
if mask_zero:
return out, T.cast(self.get_output_mask(X), dtype='float32')
else:
return out
class Zero(Layer):
def __call__(self, X):
out = T.zeros(X.shape)
return out
class Bias(Layer):
def __call__(self, X):
tmp = X.flatten()
tmp = tmp.dimshuffle(0, 'x')
return tmp
| true | true |
f7f868d8e6d70ef9898d5cd30efbf9e8192e9a4a | 9,189 | py | Python | doxy_db/interface.py | abathur/doxy_db | fb7767dbdd0b41614d8a551609d9d6ff064b3e9c | [
"MIT"
] | null | null | null | doxy_db/interface.py | abathur/doxy_db | fb7767dbdd0b41614d8a551609d9d6ff064b3e9c | [
"MIT"
] | null | null | null | doxy_db/interface.py | abathur/doxy_db | fb7767dbdd0b41614d8a551609d9d6ff064b3e9c | [
"MIT"
] | null | null | null | """
A small interface to a doxygen database manual.
This API is intended to sit above the manual's abstraction level to minimize the knowledge a consumer needs to have of Doxygen's idioms.
It may meet your needs out of the box, but it probably won't meet everyone's. Even if it doesn't fit your needs, it should be useful for understanding how to interact with the underlying APIs to tailor something to your needs.
"""
from lxml import html
from functools import lru_cache
import json
#
class XMLTranslator(object):
"""
A very minimal XML translator. Only attempts to strip tags and provide a tolerable plaintext experience.
Multiple doxygen documentation fields can contain complex entities that can't be sensibly rendered in plaintext (including HTML, markdown, doxygen commands, and so on). The only sensible non-destructive action the SQLite3 generator can take is to output these as XML.
It would be nice to accumulate a small selection of fairly generic XML translators here over time, but I think this is best driven by real-world usage.
To extend this class:
- add or override methods with the name of an XML node
- change the node's text or tail inline if needed
- return beforeText, afterText
"""
@lru_cache(maxsize=2048)
def __call__(self, desc):
if not desc or not len(desc):
return None
nodes = map(
self.__outer_paragraphs__,
# We have to use HTML; xml parser blew up on many desc fields
html.fragment_fromstring(desc, create_parent=True).iter(),
)
return "\n\n".join([x for x in nodes if len(x)]).strip()
def __outer_paragraphs__(self, node):
return "".join(map(self.__translate_node__, node.iter()))
def __translate_node__(self, node):
# Iterating over nodes yields them depth-first, so we encounter the nodes in the reverse order of our mental model. Since it makes more sense to write the formatter functions in before/after order, we'll flip them here
after, before = getattr(self, node.tag)(node)
return "{}{}{}{}".format(
before or "",
node.text if node.text else "",
after or "",
node.tail if node.tail else "",
)
def _default_method(*args):
return None, None # before, after
def __getattr__(self, name):
return self._default_method
def listitem(self, node):
return "* ", "\n" # before, after
class Cast(object):
"""
Define output types.
A little wasteful when we just need Python types, but the abstraction layer vastly simplifies overriding the return types without having to copy-paste the whole Formatter.
"""
@staticmethod
def list(items):
return list(items)
@staticmethod
def dict(**kwargs):
return dict(**kwargs)
class Formatter(object):
"""
Format returning objects for the consumer.
This may be a little tricky to keep straight, but try to think of this as tying together a few distinct jobs
Translate
Alter document-oriented XML in description fields. The default strips tags and adds minimal plain-text formatting.
Cast
Dictate which Python type/class will represent a dict or list. Enables simple re-casting of these types when needed. The default just uses dict and list.
Format
Reformat full recordsets before the Interface returns them to the user. This is the ideal place to reformat to JSON/XML/*. The default does 'nothing' (i.e., it allows the 'Cast' types to pass through unaltered).
Extract
Package a given record type for output:
- Specify the generic data type (list or dict) wrapping the whole record.
- Map data fields from the record to a field/position in the output.
- Massage field data (translate XML, tokenize, etc.) as needed.
These methods are named after the namedtuple 'types' defined in makes.py.
"""
translate = None
cast = None
def __init__(self, translate=XMLTranslator(), cast=Cast):
self.translate = translate
self.cast = cast
def __call__(self, record):
return self.format(self.extract(record))
def format(self, record):
return record
def extract(self, record):
# call record-specific extractors by name
return getattr(self, record.__class__.__name__, lambda x: x)(record)
def populate(self, items):
return self.cast.list(map(self.extract, items))
# Extract methods
def stub(self, stub):
return self.cast.dict(
rowid=stub.rowid,
kind=stub.kind,
name=stub.name,
summary=self.translate(stub.summary),
)
def member(self, doc):
return self.cast.dict(
name=doc.name,
detaileddescription=self.translate(doc.detaileddescription),
briefdescription=self.translate(doc.briefdescription),
inbodydescription=self.translate(doc.inbodydescription),
definition=doc.definition,
type=doc.type,
kind=doc.kind,
)
def compound(self, doc):
return self.cast.dict(
name=doc.name,
title=doc.title,
detaileddescription=self.translate(doc.detaileddescription),
briefdescription=self.translate(doc.briefdescription),
kind=doc.kind,
)
# No part of the interface lets the user specify relations atm, so for now there's no reason to send them along. When this changes, these need flesh
member_rel = member
compound_rel = compound
def section(self, section):
return self.cast.dict(
summary=section.summary,
children=self.populate(section.children),
root=section.root,
type=section.type,
)
def metadata(self, meta):
return self.cast.dict(
doxygen_version=meta.doxygen_version,
schema_version=meta.schema_version,
generated_at=meta.generated_at,
generated_on=meta.generated_on,
project_name=meta.project_name,
project_number=meta.project_number,
project_brief=meta.project_brief,
)
def manual(self, manual):
return self.cast.dict(
root=self.extract(manual.root),
documents=self.populate(manual.documents),
sections=self.populate(manual.sections),
meta=self.extract(manual.meta),
)
def search(self, results):
return self.cast.dict(results=self.populate(results.results))
class JSONFormatter(Formatter):
def format(self, record):
return json.dumps(record)
class Interface(object):
"""
High-level interface to a database manual.
Automates conversions/translations/etc.
Structure looks a bit like this:
interface
- manual
- documents
- sections
- manuals
- views
- documents
However, the interface should have no knowledge about this at the call level. It just knows how to use a formatter unwrap/convert the manual's return types.
"""
manual = structure = search_tuple = _description = None
def __init__(self, manual, formatter):
self.manual = manual
self._structure = manual.doc_structure()
self.fmt = formatter
# TODO: ideal addition to the search tuple is information about the query (and possibly information about how it was executed), which suggests this information (and the tuple) might be better generated down in the manual?
self.search_tuple = manual.types.get("search")
# set up LRU caches; we can't use the decorator because they'll globally cache... :(
self.fetch = lru_cache(maxsize=512)(self.fetch)
self.search = lru_cache(maxsize=512)(self.search)
self.brief = lru_cache(maxsize=512)(self.brief)
self.doc = lru_cache(maxsize=512)(self.doc)
def _disambiguate(self, results):
return self.search_tuple(results)
# I don't think this actually needs a public API?
# def disambiguate(self, results):
# return self.fmt(self._disambiguate(results))
def fetch(self, rowid):
return self.manual.doc_fetch(rowid)
def search(self, query):
return self.fmt(self.search_tuple(self.manual.doc_search(query)))
def _brief(self, query):
results = self.manual.doc_search(query)
if len(results) == 1:
return results[0]
else:
return self._disambiguate(results)
def brief(self, query):
ob = self._brief(query)
return self.fmt(ob)
def _doc(self, query):
results = self.manual.doc_search(query)
stub = None
if len(results) == 1:
stub = results[0]
else:
return self._disambiguate(results)
return self.fetch(stub.rowid)
def doc(self, query):
ob = self._doc(query)
return self.fmt(ob)
def structure(self):
return self.fmt(self._structure)
| 34.287313 | 271 | 0.650343 |
from lxml import html
from functools import lru_cache
import json
class XMLTranslator(object):
@lru_cache(maxsize=2048)
def __call__(self, desc):
if not desc or not len(desc):
return None
nodes = map(
self.__outer_paragraphs__,
html.fragment_fromstring(desc, create_parent=True).iter(),
)
return "\n\n".join([x for x in nodes if len(x)]).strip()
def __outer_paragraphs__(self, node):
return "".join(map(self.__translate_node__, node.iter()))
def __translate_node__(self, node):
after, before = getattr(self, node.tag)(node)
return "{}{}{}{}".format(
before or "",
node.text if node.text else "",
after or "",
node.tail if node.tail else "",
)
def _default_method(*args):
return None, None # before, after
def __getattr__(self, name):
return self._default_method
def listitem(self, node):
return "* ", "\n" # before, after
class Cast(object):
@staticmethod
def list(items):
return list(items)
@staticmethod
def dict(**kwargs):
return dict(**kwargs)
class Formatter(object):
translate = None
cast = None
def __init__(self, translate=XMLTranslator(), cast=Cast):
self.translate = translate
self.cast = cast
def __call__(self, record):
return self.format(self.extract(record))
def format(self, record):
return record
def extract(self, record):
# call record-specific extractors by name
return getattr(self, record.__class__.__name__, lambda x: x)(record)
def populate(self, items):
return self.cast.list(map(self.extract, items))
# Extract methods
def stub(self, stub):
return self.cast.dict(
rowid=stub.rowid,
kind=stub.kind,
name=stub.name,
summary=self.translate(stub.summary),
)
def member(self, doc):
return self.cast.dict(
name=doc.name,
detaileddescription=self.translate(doc.detaileddescription),
briefdescription=self.translate(doc.briefdescription),
inbodydescription=self.translate(doc.inbodydescription),
definition=doc.definition,
type=doc.type,
kind=doc.kind,
)
def compound(self, doc):
return self.cast.dict(
name=doc.name,
title=doc.title,
detaileddescription=self.translate(doc.detaileddescription),
briefdescription=self.translate(doc.briefdescription),
kind=doc.kind,
)
# No part of the interface lets the user specify relations atm, so for now there's no reason to send them along. When this changes, these need flesh
member_rel = member
compound_rel = compound
def section(self, section):
return self.cast.dict(
summary=section.summary,
children=self.populate(section.children),
root=section.root,
type=section.type,
)
def metadata(self, meta):
return self.cast.dict(
doxygen_version=meta.doxygen_version,
schema_version=meta.schema_version,
generated_at=meta.generated_at,
generated_on=meta.generated_on,
project_name=meta.project_name,
project_number=meta.project_number,
project_brief=meta.project_brief,
)
def manual(self, manual):
return self.cast.dict(
root=self.extract(manual.root),
documents=self.populate(manual.documents),
sections=self.populate(manual.sections),
meta=self.extract(manual.meta),
)
def search(self, results):
return self.cast.dict(results=self.populate(results.results))
class JSONFormatter(Formatter):
def format(self, record):
return json.dumps(record)
class Interface(object):
manual = structure = search_tuple = _description = None
def __init__(self, manual, formatter):
self.manual = manual
self._structure = manual.doc_structure()
self.fmt = formatter
self.search_tuple = manual.types.get("search")
self.fetch = lru_cache(maxsize=512)(self.fetch)
self.search = lru_cache(maxsize=512)(self.search)
self.brief = lru_cache(maxsize=512)(self.brief)
self.doc = lru_cache(maxsize=512)(self.doc)
def _disambiguate(self, results):
return self.search_tuple(results)
# def disambiguate(self, results):
# return self.fmt(self._disambiguate(results))
def fetch(self, rowid):
return self.manual.doc_fetch(rowid)
def search(self, query):
return self.fmt(self.search_tuple(self.manual.doc_search(query)))
def _brief(self, query):
results = self.manual.doc_search(query)
if len(results) == 1:
return results[0]
else:
return self._disambiguate(results)
def brief(self, query):
ob = self._brief(query)
return self.fmt(ob)
def _doc(self, query):
results = self.manual.doc_search(query)
stub = None
if len(results) == 1:
stub = results[0]
else:
return self._disambiguate(results)
return self.fetch(stub.rowid)
def doc(self, query):
ob = self._doc(query)
return self.fmt(ob)
def structure(self):
return self.fmt(self._structure)
| true | true |
f7f869df9b88fc253501fbf3a53e0156a4d3a40a | 33,447 | py | Python | code/EDAParallel.py | xijunlee/SPC-POSM | d5b831445437f93d00cb5fe7eb7ac462512feb13 | [
"MIT"
] | null | null | null | code/EDAParallel.py | xijunlee/SPC-POSM | d5b831445437f93d00cb5fe7eb7ac462512feb13 | [
"MIT"
] | null | null | null | code/EDAParallel.py | xijunlee/SPC-POSM | d5b831445437f93d00cb5fe7eb7ac462512feb13 | [
"MIT"
] | null | null | null | import sys
import math
import random
from random import randint
import copy
from sklearn.metrics import mean_squared_error
from sklearn import linear_model
import time
from sklearn import ensemble
from FileProcess import LoadDataFromText
import numpy as np
from pyspark import SparkContext, SparkConf
import pandas as pd
from sklearn.cluster import KMeans
class Chromosome:
def __init__(self):
self.geneSerial = []
self.v = []
self.fitness = 0
self.sigmaCost = 0
self.sigmaDemand = 0
self.sigmaCapacity = 0
self.mmd = 0
self.pbest = None
self.cluster = None
self.calcAccurate = False
class Customer:
def __init__(self):
self.x = 0
self.y = 0
self.demand = 0
class Provider:
def __init__(self):
self.x = 0
self.y = 0
self.capacity = 0
self.cost = 0
class ProviderPlus:
def __init__(self):
self.x = 0
self.y = 0
self.cnt = 0
self.capacity = []
self.cost = []
class PO:
def __init__(self):
self.PROVIDERS = []
self.CUSTOMERS = []
class Match:
def __init__(self):
self.o = 0
self.p = 0
self.w = 0
self.dis = 0
class Queue:
def __init__(self):
self.num = 0
self.parent = 0
class SwapChainSolver:
def __init__(self, providers, customers):
self.P = providers
self.O = customers
self.Assignment = []
def Solver(self):
self.initiallize_assignment()
while True:
extremeMatch = copy.deepcopy(self.find_d_satisfiable())
if not extremeMatch:
break
else:
self.swap(extremeMatch)
self.Assignment = sorted(self.Assignment, key=self.returnDis)
return self.Assignment[len(self.Assignment) - 1].dis
def swap(self, m):
self.sub_match(m)
chain = []
while True:
chain = self.find_chain(m)
if not chain:
break
else:
# chain breaking
ws = float('inf')
ws = min(ws, self.P[chain[0] - len(self.O)].capacity)
ws = min(ws, self.O[chain[len(chain) - 1]].demand)
for i in range(1, len(chain) - 1, 2):
# if i%2 == 1:
tmpo = chain[i]
tmpp = chain[i + 1] - len(self.O)
for tmp in self.Assignment:
if tmp.o == tmpo and tmp.p == tmpp:
ws = min(ws, tmp.w)
break
for i in range(1, len(chain) - 1, 2):
# if i%2 == 1:
tmpo = chain[i]
tmpp = chain[i + 1] - len(self.O)
for tmp in self.Assignment:
if tmp.o == tmpo and tmp.p == tmpp:
tmpm = copy.deepcopy(tmp)
self.sub_match(tmp)
if tmpm.w != ws:
tmpm.w = tmpm.w - ws
self.add_match(tmpm)
break
# chain matching
for i in range(0, len(chain), 2):
tmpo = chain[i + 1]
tmpp = chain[i] - len(self.O)
tmpm = Match()
tmpm.o = tmpo
tmpm.p = tmpp
tmpm.w = ws
tmpm.dis = math.sqrt(
(self.O[tmpo].x - self.P[tmpp].x) ** 2 + (self.O[tmpo].y - self.P[tmpp].y) ** 2)
self.add_match(tmpm)
if self.O[m.o].demand == 0:
break
# post matching
if self.O[m.o].demand > 0:
tmpm = Match()
tmpm.o = m.o
tmpm.p = m.p
tmpm.w = self.O[m.o].demand
tmpm.dis = math.sqrt((self.O[m.o].x - self.P[m.p].x) ** 2 + (self.O[m.o].y - self.P[m.p].y) ** 2)
self.add_match(tmpm)
def find_chain(self, m):
chain = []
flag = False
maxDis = m.dis
Q = []
hash = []
for i in range(0, 2 * (len(self.O) + len(self.P))):
Q.append(Queue())
hash.append(0)
head = 0
tail = 0
hash[m.o] = 1
Q[head].num = m.o
Q[head].parent = -1
tail = tail + 1
while not flag and head != tail:
CurrentNode = Q[head].num
if CurrentNode < len(self.O):
for i in range(0, len(self.P)):
tmpDis = math.sqrt(
(self.O[CurrentNode].x - self.P[i].x) ** 2 + (self.O[CurrentNode].y - self.P[i].y) ** 2)
if tmpDis < maxDis and hash[i + len(self.O)] == 0:
Q[tail].num = i + len(self.O)
Q[tail].parent = head
hash[i + len(self.O)] = 1
tail = (tail + 1) % len(Q)
else:
pNode = CurrentNode - len(self.O)
if self.P[pNode].capacity == 0:
for tmp in self.Assignment:
if tmp.p == pNode and hash[tmp.o] == 0:
hash[tmp.o] = 1
Q[tail].num = tmp.o
Q[tail].parent = head
tail = (tail + 1) % len(Q)
else:
flag = True
tmp = head
while tmp >= 0:
chain.append(Q[tmp].num)
tmp = Q[tmp].parent
head = (head + 1) % len(Q)
if flag:
return chain
else:
return flag
def find_d_satisfiable(self):
hash = []
myQueue = []
haveFound = False
for i in range(0, len(self.O) + len(self.P)):
hash.append(0)
for i in range(0, 2 * (len(self.O) + len(self.P))):
myQueue.append(Queue())
self.Assignment = sorted(self.Assignment, key=self.returnDis)
maxDis = self.Assignment[len(self.Assignment) - 1].dis
k = len(self.Assignment) - 1
extremeMatch = False
while not haveFound and self.Assignment[k].dis == maxDis and k >= 0:
for tmp in hash:
tmp = 0
for tmp in myQueue:
tmp.num = 0
tmp.parent = 0
head = 0
tail = 0
hash[self.Assignment[k].o] = 1
myQueue[head].num = self.Assignment[k].o
myQueue[head].parent = -1
tail += 1
extremeMatch = copy.deepcopy(self.Assignment[k])
self.sub_match(extremeMatch)
while head != tail and not haveFound:
CurrentNode = myQueue[head].num
if CurrentNode < len(self.O):
for i in range(0, len(self.P)):
tmpDis = math.sqrt(
(self.O[CurrentNode].x - self.P[i].x) ** 2 + (self.O[CurrentNode].y - self.P[i].y) ** 2)
if tmpDis < maxDis and hash[i + len(self.O)] == 0:
myQueue[tail].num = i + len(self.O)
myQueue[tail].parent = head
hash[i + len(self.O)] = 1
tail = (tail + 1) % len(myQueue)
else:
pNode = CurrentNode - len(self.O)
if self.P[pNode].capacity == 0:
for tmp in self.Assignment:
if tmp.p == pNode and hash[tmp.o] == 0:
hash[tmp.o] = 1
myQueue[tail].num = tmp.o
myQueue[tail].parent = head
tail = (tail + 1) % len(myQueue)
else:
haveFound = True
head = (head + 1) % len(myQueue)
self.add_match(extremeMatch)
k = k - 1
if haveFound:
return extremeMatch
else:
return False
def distance(self, s):
return s['distance']
def returnDis(self, s):
return s.dis
def add_match(self, m):
flag = False
for tmp in self.Assignment:
if (m.o == tmp.o and m.p == tmp.p):
tmp.w += m.w
flag = True
break
if flag == False:
self.Assignment.append(copy.deepcopy(m))
self.P[m.p].capacity -= m.w
self.O[m.o].demand -= m.w
def sub_match(self, m):
self.P[m.p].capacity += m.w
self.O[m.o].demand += m.w
for tmp in self.Assignment:
if m.o == tmp.o and m.p == tmp.p:
tmp.w -= m.w
if tmp.w == 0:
self.Assignment.remove(tmp)
break
def initiallize_assignment(self):
distanceList = []
for i in range(0, len(self.O)):
distanceList = []
for j in range(0, len(self.P)):
dis = math.sqrt((self.O[i].x - self.P[j].x) ** 2 + (self.O[i].y - self.P[j].y) ** 2)
tmp = {'p': j, 'distance': dis}
distanceList.append(tmp)
distanceList = sorted(distanceList, key=self.distance)
for j in range(0, len(self.P)):
tmp = min(self.O[i].demand, self.P[distanceList[j]['p']].capacity)
if (tmp > 0):
m = Match()
m.o = i
m.p = distanceList[j]['p']
m.w = tmp
m.dis = distanceList[j]['distance']
self.add_match(m)
if self.O[i].demand == 0:
break
self.Assignment = sorted(self.Assignment, key=self.returnDis)
# print for debug
'''for i in range(0,len(self.Assignment)):
print(self.Assignment[i].o, self.Assignment[i].p, self.Assignment[i].w, self.Assignment[i].dis)
'''
def LoadDataFromText(txtpath):
"""
load data from text,return PROVIDERS,CUSTOMERS
"""
fp = open(txtpath, "r")
arr = []
for line in fp.readlines():
arr.append(line.replace("\n", "").split(" "))
fp.close()
NumberOfProviders = int(arr[0][0])
PROVIDERS = []
for i in range(1, NumberOfProviders + 1):
tmp = arr[i]
tmpProvider = ProviderPlus()
tmpProvider.x = float(tmp[0])
tmpProvider.y = float(tmp[1])
tmpProvider.cnt = int(tmp[2])
for j in range(0, tmpProvider.cnt):
tmpProvider.capacity.append(float(tmp[j + 3]))
tmpProvider.cost.append(float(tmp[j + 3 + tmpProvider.cnt]))
PROVIDERS.append(tmpProvider)
NumberOfCustomers = int(arr[NumberOfProviders + 1][0])
CUSTOMERS = []
for i in range(0, NumberOfCustomers):
tmp = arr[i + NumberOfProviders + 2]
tmpCustomer = Customer()
tmpCustomer.x = float(tmp[0])
tmpCustomer.y = float(tmp[1])
tmpCustomer.demand = float(tmp[2])
CUSTOMERS.append(tmpCustomer)
return PROVIDERS, CUSTOMERS
class EDA:
def __init__(self, populationSize, iterationMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate, updateRatio):
self.m_PO = po
self.m_D = D
self.m_PopulationSize = populationSize
self.m_iterMax = iterationMax
self.m_Alpha = alpha
self.m_Beta = beta
self.m_Population = []
self.m_BestSolution = None
self.m_BestFitness = -65536
# self.m_BestCost = 0
self.m_BlockMax = blockMax
self.m_Block = 0
self.m_Surrogate = 0
self.m_SurrogateFlag = surrogateFlag
# self.m_SparkContext = sc
self.m_Iter = 0
# init the EDA matrix
self.m_Matrix = [[1 for _ in range(self.m_PO.PROVIDERS[0].cnt)] for _ in range(len(self.m_PO.PROVIDERS))]
# if surrogateFlag:
# n_AllSol = len(po.PROVIDERS) ** po.PROVIDERS[0].cnt
# self.m_Surrogate = Surrogate(int(n_AllSol * sizeRatio), po)
# self.m_Surrogate.trainModel()
self.m_TabuList = []
self.m_CandidateList = []
self.m_TabuMaxLength = tabuMaxLength
self.m_TabuMaxIter = tabuMaxIter
self.m_MaxNumCandidate = maxNumCandidate
self.m_CurrentSolution = None
self.m_CollectGeneration = 3
self.m_EDASearchRunTime = 0
self.m_LocalSearchRunTime = 0
self.m_BestCostPerGen = []
self.m_ConverGen = 0 # mark the generation when algorithm converges
self.m_UpdateRatio = updateRatio
def calcFitnessParallel(self, geneSerial, data, D, idx):
# alpha and beta are weight factor
alpha = self.m_Alpha
beta = self.m_Beta
customers = []
fitness = 0
for item in data.CUSTOMERS:
tmp = Customer()
tmp.x = copy.deepcopy(item.x)
tmp.y = copy.deepcopy(item.y)
tmp.demand = copy.deepcopy(item.demand)
customers.append(tmp)
providers = []
sigmaCost = 0
sigmaCapacity = 0
sigmaDemand = 0
mmd = self.m_D * 1000.0
for i in range(0, len(geneSerial)):
tmpProvider = Provider()
tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)
tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)
tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])
tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])
sigmaCost = sigmaCost + tmpProvider.cost
sigmaCapacity = sigmaCapacity + tmpProvider.capacity
providers.append(tmpProvider)
for item in customers:
sigmaDemand = sigmaDemand + item.demand
if sigmaCapacity >= sigmaDemand:
swapchainsolver = SwapChainSolver(providers, customers)
mmd = swapchainsolver.Solver()
if mmd > D:
fitness = -10.0
else:
if sigmaCost != 0:
fitness = float(20.0 / sigmaCost)
else:
fitness = 10.0
else:
fitness = -20.0
# print("fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand:",fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand)
# return math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand
return (geneSerial, idx, math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand)
def calcPopulationFitnessWithSurrogate(self, sc):
# cluster chromosome
raw_data = []
for i in range(len(self.m_Population)):
raw_data.append(self.m_Population[i].geneSerial)
raw_data = np.array(raw_data)
num_cluster = int(self.m_PopulationSize * 0.1)
kmeans = KMeans(n_clusters=num_cluster, random_state=0).fit(raw_data)
distances = kmeans.transform(raw_data).sum(axis=1)
labels = kmeans.labels_
for i in range(len(self.m_Population)):
self.m_Population[i].cluster = labels[i]
raw_data_1, closet_item_idx = [], []
for clst in range(num_cluster):
min_idx, min_dist = -1, -1
for idx in range(len(distances)):
if labels[idx] == clst:
if min_dist < 0 and min_dist < 0:
min_idx = idx
min_dist = distances[idx]
elif min_dist > distances[idx]:
min_idx = idx
min_dist = distances[idx]
raw_data_1.append((raw_data[min_idx], min_idx))
closet_item_idx.append(min_idx)
raw_data = raw_data_1
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(
lambda (geneSerial, idx): self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO), copy.copy(self.m_D),idx))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, idx, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
self.m_Population[idx].fitness = fitness
self.m_Population[idx].mmd = mmd
self.m_Population[idx].sigmaCapacity = sigmaCapacity
self.m_Population[idx].sigmaCost = sigmaCost
self.m_Population[idx].sigmaDemand = sigmaDemand
self.m_Population[idx].calcAccurate = True
for i in range(self.m_PopulationSize):
if i not in closet_item_idx:
self.m_Population[i].fitness = self.m_Population[closet_item_idx[self.m_Population[i].cluster]].fitness
self.m_Population[i].calcAccurate = False
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if not self.m_BestSolution:
for i in range(len(sortedPopulation)):
if sortedPopulation[i].calcAccurate:
self.m_BestSolution = copy.deepcopy(sortedPopulation[i])
break
else:
calcAccurateIdx = None
for i in range(len(sortedPopulation)):
if sortedPopulation[i].calcAccurate:
calcAccurateIdx = i
break
if self.m_BestSolution.fitness < sortedPopulation[calcAccurateIdx].fitness:
self.m_BestSolution = copy.deepcopy(sortedPopulation[calcAccurateIdx])
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - sortedPopulation[calcAccurateIdx].fitness) <= 0.001:
self.m_Block += 1
def calcPopulationFitness(self, sc):
# calculate the fitness of each individual
raw_data = []
for i in range(len(self.m_Population)):
raw_data.append((self.m_Population[i].geneSerial,i))
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(
lambda (geneSerial, idx): self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO), copy.copy(self.m_D), idx))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, idx, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
self.m_Population[idx].fitness = fitness
self.m_Population[idx].mmd = mmd
self.m_Population[idx].sigmaCapacity = sigmaCapacity
self.m_Population[idx].sigmaCost = sigmaCost
self.m_Population[idx].sigmaDemand = sigmaDemand
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if not self.m_BestSolution:
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
else:
if self.m_BestSolution.fitness < sortedPopulation[0].fitness:
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - sortedPopulation[0].fitness) <= 0.001:
self.m_Block += 1
def sampleAndEvaluateParallel(self, sc):
# parallel sample
self.m_Population = []
raw_data = []
idx = [i for i in range(self.m_PopulationSize)]
distPop = sc.parallelize(idx)
geneSerialSample = distPop.map(lambda idx: self.sampleParallel(idx))
geneSerialCollect = geneSerialSample.collect()
for (idx, geneSerial_tmp) in geneSerialCollect:
chromosome = Chromosome()
chromosome.geneSerial = geneSerial_tmp
self.m_Population.append(chromosome)
if self.m_SurrogateFlag:
self.calcPopulationFitnessWithSurrogate(sc)
else:
self.calcPopulationFitness(sc)
def update(self):
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if sortedPopulation[0].fitness > self.m_BestFitness:
self.m_BestFitness = sortedPopulation[0].fitness
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
self.m_Block = 0
elif math.fabs(sortedPopulation[0].fitness - self.m_BestFitness) <= 0.001:
self.m_Block += 1
# sigmaCost = 0
# for i in range(len(self.m_BestSolution)):
# sigmaCost = sigmaCost + po.PROVIDERS[i].cost[self.m_BestSolution[i]]
# print "the best individual serial, fitness, mmd, sigmaCost, sigmaCapacity, sigmaDemand ",\
# sortedPopulation[0].geneSerial, sortedPopulation[0].fitness,sortedPopulation[0].mmd, sortedPopulation[0].sigmaCost, sortedPopulation[0].sigmaCapacity, sortedPopulation[0].sigmaDemand
# for ind in sortedPopulation:
# print "the individual serial, fitness, mmd, sigmaCost, sigmaCapacity, sigmaDemand ", \
# ind.geneSerial, ind.fitness, ind.mmd, ind.sigmaCost, ind.sigmaCapacity, ind.sigmaDemand
# print sortedPopulation[0].sigmaCost
for i in range(int(self.m_PopulationSize * self.m_UpdateRatio)):
gene = sortedPopulation[i].geneSerial
for p in range(len(self.m_Matrix)):
row = self.m_Matrix[p]
row[gene[p]] += 1
def sampleParallel(self, idx):
geneSerial = []
for p in range(len(self.m_Matrix)):
# each row is for a provider, the length of row is equal to number of capacities of the provider
row = self.m_Matrix[p]
rowSum = float(sum(row))
cumulateRow = [0 for _ in range(len(row))]
cumulateRow[0] = row[0] / rowSum
for i in range(1, len(row)):
cumulateRow[i] = cumulateRow[i - 1] + row[i] / rowSum
rnd = random.random()
for i in range(len(row)):
if cumulateRow[i] >= rnd:
geneSerial.append(i)
break
return (i, geneSerial)
def EDASearch(self, sc):
self.m_Iter, self.m_Block = 0, 0
while self.m_Iter < self.m_iterMax and self.m_Block < self.m_BlockMax:
# print "the " + str(iter) + " th iteration"
self.sampleAndEvaluateParallel(sc)
self.update()
self.m_BestCostPerGen.append(self.m_BestSolution.sigmaCost)
# print self.m_BestSolution.sigmaCost
self.m_Iter += 1
self.m_ConverGen = self.m_Iter
def LocalSearch(self, sc):
# local search using tabu search
self.m_Iter, self.m_Block = 0, 0
self.m_CurrentSolution = self.m_BestSolution
while self.m_Iter < self.m_TabuMaxIter and self.m_Block < self.m_BlockMax:
self.m_CandidateList = []
raw_data = []
for _ in range(self.m_MaxNumCandidate):
flag = randint(0, 1)
geneSerial = self.m_CurrentSolution.geneSerial
if flag == 0:
pointA = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
pointB = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
tmp = geneSerial[pointA]
geneSerial[pointA] = geneSerial[pointB]
geneSerial[pointB] = tmp
else:
pointA = -1
pointB = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
geneSerial[pointB] = (geneSerial[pointB] + 1) % self.m_PO.PROVIDERS[
pointB].cnt
if (flag, pointA, pointB) not in set(self.m_TabuList):
raw_data.append((geneSerial, 0))
# parallelly compute the fitness for each individual
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(lambda (geneSerial, idx): self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO),copy.copy(self.m_D), idx))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, idx, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
chromosome = Chromosome()
chromosome.geneSerial = geneSerial
chromosome.fitness = fitness
chromosome.mmd = mmd
chromosome.sigmaCapacity = sigmaCapacity
chromosome.sigmaCost = sigmaCost
chromosome.sigmaDemand = sigmaDemand
self.m_CandidateList.append((chromosome, chromosome.fitness, (flag, pointA, pointB)))
nextBestChromosome, nextBestFitness, tabu = sorted(self.m_CandidateList, key=lambda x: x[1], reverse=True)[0]
if self.m_BestSolution.fitness <= nextBestFitness:
self.m_BestSolution = copy.deepcopy(nextBestChromosome)
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - nextBestFitness) <= 0.001:
self.m_Block += 1
if len(self.m_TabuList) >= self.m_TabuMaxLength:
self.m_TabuList.pop(0)
self.m_TabuList.append(tabu)
self.m_CurrentSolution = nextBestChromosome
self.m_Iter += 1
def Search(self, sc):
startTime = time.time()
self.EDASearch(sc)
endTime = time.time()
self.m_EDASearchRunTime = endTime - startTime
#startTime = time.time()
#self.LocalSearch(sc)
#endTime = time.time()
self.m_LocalSearchRunTime = 0
if __name__ == "__main__":
popSize = 100
iterMax = 100
blockMax = 110
alpha = 10000000.00
beta = 0.01
D = 40
surrogateFlag = False
tabuMaxLength = 10
tabuMaxIter = 100
maxNumCandidate = 10
updateRatio = 0.1
core_num = int(sys.argv[1])
conf = SparkConf().setMaster("spark://noah007:7077") \
.setAppName("SPC-POSM-EDA") \
.set("spark.submit.deployMode", "client") \
.set("spark.cores.max", core_num) \
.set("spark.executor.cores", "10") \
.set("spark.executor.memory", "20g") \
.set("spark.driver.memory", "40g")
sc = SparkContext(conf=conf)
'''
experiment on different dataset
'''
'''
# instanceSet = ['nuoxi2G'] #, 'nuoxi3G', 'huawei2G', 'huawei3G']
instanceSet = [i for i in range(60)]
aveAns, aveRuntime, aveConverGen = [], [], []
for i in instanceSet:
print i, 'th instance ...'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
times = 5
sumAns, sumRuntime, sumConverGen = 0, 0, 0
for _ in range(times):
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate, updateRatio)
eda.Search(sc)
sumAns += eda.m_BestSolution.sigmaCost
sumRuntime += (eda.m_EDASearchRunTime + eda.m_LocalSearchRunTime)
sumConverGen = eda.m_ConverGen
aveAns.append(sumAns / (times*1.0))
aveRuntime.append(sumRuntime / (times*1.0))
aveConverGen.append(sumConverGen / (times*1.0))
df = pd.DataFrame({'cost': aveAns, 'EDA runtime': aveRuntime, 'ConverGen':aveConverGen })
df.to_csv('../midResult/edaResult.csv')
'''
'''
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(59) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate)
eda.Search(sc)
df = pd.DataFrame({'cost': eda.m_BestCostPerGen})
df.to_csv('../midResult/edaResultBestCostPergen.csv')
'''
'''
experiment of convergence
'''
instNum = 20
instList = [i for i in range(0,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
costPerGenList.append(eda.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/edaResultBestCostPerGen1.csv')
'''
instNum = 40
instList = [i for i in range(20,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
costPerGenList.append(eda.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/edaResultBestCostPerGen2.csv')
instNum = 60
instList = [i for i in range(40,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
costPerGenList.append(eda.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/edaResultBestCostPerGen3.csv')
'''
'''
geneSerialList, sigmaCostList = [], []
D = [40,6,40,50,60,40]
for i in range(1,7):
print i, 'th instance ...'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/data' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D[i-1], surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate, updataRatio)
eda.Search(sc)
geneSerialList.append(eda.m_BestSolution.geneSerial)
sigmaCostList.append(eda.m_BestSolution.sigmaCost)
df = pd.DataFrame({'sigmaCost':sigmaCostList, 'geneSerial': geneSerialList})
df.to_csv('../midResult/edaTinyDatasetResult.csv')
'''
'''
experiment on testing different update ratio
'''
'''
updateRatioList = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
instList = [1,2,3,4,5,6]
D = [40, 6, 40, 50, 60, 40]
for i in instList:
print i, 'th instance'
aveAns, aveRuntime, aveConverGen = [], [], []
for updateRatio in updateRatioList:
print updateRatio, 'update ratio'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/data' + str(i) + '.txt')
times = 5
sumAns, sumRuntime, sumConverGen = 0, 0, 0
for _ in range(times):
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D[i-1], surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
sumAns += eda.m_BestSolution.sigmaCost
sumRuntime += (eda.m_EDASearchRunTime + eda.m_LocalSearchRunTime)
sumConverGen = eda.m_ConverGen
aveAns.append(sumAns / (times * 1.0))
aveRuntime.append(sumRuntime / (times * 1.0))
aveConverGen.append(sumConverGen / (times * 1.0))
df = pd.DataFrame({'ratio': updateRatioList, 'cost': aveAns, 'EDA runtime': aveRuntime, 'ConverGen': aveConverGen})
df.to_csv('../midResult/edaResultUpdateRatioData'+str(i)+'.csv')
'''
| 36.958011 | 196 | 0.555536 | import sys
import math
import random
from random import randint
import copy
from sklearn.metrics import mean_squared_error
from sklearn import linear_model
import time
from sklearn import ensemble
from FileProcess import LoadDataFromText
import numpy as np
from pyspark import SparkContext, SparkConf
import pandas as pd
from sklearn.cluster import KMeans
class Chromosome:
def __init__(self):
self.geneSerial = []
self.v = []
self.fitness = 0
self.sigmaCost = 0
self.sigmaDemand = 0
self.sigmaCapacity = 0
self.mmd = 0
self.pbest = None
self.cluster = None
self.calcAccurate = False
class Customer:
def __init__(self):
self.x = 0
self.y = 0
self.demand = 0
class Provider:
def __init__(self):
self.x = 0
self.y = 0
self.capacity = 0
self.cost = 0
class ProviderPlus:
def __init__(self):
self.x = 0
self.y = 0
self.cnt = 0
self.capacity = []
self.cost = []
class PO:
def __init__(self):
self.PROVIDERS = []
self.CUSTOMERS = []
class Match:
def __init__(self):
self.o = 0
self.p = 0
self.w = 0
self.dis = 0
class Queue:
def __init__(self):
self.num = 0
self.parent = 0
class SwapChainSolver:
def __init__(self, providers, customers):
self.P = providers
self.O = customers
self.Assignment = []
def Solver(self):
self.initiallize_assignment()
while True:
extremeMatch = copy.deepcopy(self.find_d_satisfiable())
if not extremeMatch:
break
else:
self.swap(extremeMatch)
self.Assignment = sorted(self.Assignment, key=self.returnDis)
return self.Assignment[len(self.Assignment) - 1].dis
def swap(self, m):
self.sub_match(m)
chain = []
while True:
chain = self.find_chain(m)
if not chain:
break
else:
ws = float('inf')
ws = min(ws, self.P[chain[0] - len(self.O)].capacity)
ws = min(ws, self.O[chain[len(chain) - 1]].demand)
for i in range(1, len(chain) - 1, 2):
tmpo = chain[i]
tmpp = chain[i + 1] - len(self.O)
for tmp in self.Assignment:
if tmp.o == tmpo and tmp.p == tmpp:
ws = min(ws, tmp.w)
break
for i in range(1, len(chain) - 1, 2):
tmpo = chain[i]
tmpp = chain[i + 1] - len(self.O)
for tmp in self.Assignment:
if tmp.o == tmpo and tmp.p == tmpp:
tmpm = copy.deepcopy(tmp)
self.sub_match(tmp)
if tmpm.w != ws:
tmpm.w = tmpm.w - ws
self.add_match(tmpm)
break
for i in range(0, len(chain), 2):
tmpo = chain[i + 1]
tmpp = chain[i] - len(self.O)
tmpm = Match()
tmpm.o = tmpo
tmpm.p = tmpp
tmpm.w = ws
tmpm.dis = math.sqrt(
(self.O[tmpo].x - self.P[tmpp].x) ** 2 + (self.O[tmpo].y - self.P[tmpp].y) ** 2)
self.add_match(tmpm)
if self.O[m.o].demand == 0:
break
if self.O[m.o].demand > 0:
tmpm = Match()
tmpm.o = m.o
tmpm.p = m.p
tmpm.w = self.O[m.o].demand
tmpm.dis = math.sqrt((self.O[m.o].x - self.P[m.p].x) ** 2 + (self.O[m.o].y - self.P[m.p].y) ** 2)
self.add_match(tmpm)
def find_chain(self, m):
chain = []
flag = False
maxDis = m.dis
Q = []
hash = []
for i in range(0, 2 * (len(self.O) + len(self.P))):
Q.append(Queue())
hash.append(0)
head = 0
tail = 0
hash[m.o] = 1
Q[head].num = m.o
Q[head].parent = -1
tail = tail + 1
while not flag and head != tail:
CurrentNode = Q[head].num
if CurrentNode < len(self.O):
for i in range(0, len(self.P)):
tmpDis = math.sqrt(
(self.O[CurrentNode].x - self.P[i].x) ** 2 + (self.O[CurrentNode].y - self.P[i].y) ** 2)
if tmpDis < maxDis and hash[i + len(self.O)] == 0:
Q[tail].num = i + len(self.O)
Q[tail].parent = head
hash[i + len(self.O)] = 1
tail = (tail + 1) % len(Q)
else:
pNode = CurrentNode - len(self.O)
if self.P[pNode].capacity == 0:
for tmp in self.Assignment:
if tmp.p == pNode and hash[tmp.o] == 0:
hash[tmp.o] = 1
Q[tail].num = tmp.o
Q[tail].parent = head
tail = (tail + 1) % len(Q)
else:
flag = True
tmp = head
while tmp >= 0:
chain.append(Q[tmp].num)
tmp = Q[tmp].parent
head = (head + 1) % len(Q)
if flag:
return chain
else:
return flag
def find_d_satisfiable(self):
hash = []
myQueue = []
haveFound = False
for i in range(0, len(self.O) + len(self.P)):
hash.append(0)
for i in range(0, 2 * (len(self.O) + len(self.P))):
myQueue.append(Queue())
self.Assignment = sorted(self.Assignment, key=self.returnDis)
maxDis = self.Assignment[len(self.Assignment) - 1].dis
k = len(self.Assignment) - 1
extremeMatch = False
while not haveFound and self.Assignment[k].dis == maxDis and k >= 0:
for tmp in hash:
tmp = 0
for tmp in myQueue:
tmp.num = 0
tmp.parent = 0
head = 0
tail = 0
hash[self.Assignment[k].o] = 1
myQueue[head].num = self.Assignment[k].o
myQueue[head].parent = -1
tail += 1
extremeMatch = copy.deepcopy(self.Assignment[k])
self.sub_match(extremeMatch)
while head != tail and not haveFound:
CurrentNode = myQueue[head].num
if CurrentNode < len(self.O):
for i in range(0, len(self.P)):
tmpDis = math.sqrt(
(self.O[CurrentNode].x - self.P[i].x) ** 2 + (self.O[CurrentNode].y - self.P[i].y) ** 2)
if tmpDis < maxDis and hash[i + len(self.O)] == 0:
myQueue[tail].num = i + len(self.O)
myQueue[tail].parent = head
hash[i + len(self.O)] = 1
tail = (tail + 1) % len(myQueue)
else:
pNode = CurrentNode - len(self.O)
if self.P[pNode].capacity == 0:
for tmp in self.Assignment:
if tmp.p == pNode and hash[tmp.o] == 0:
hash[tmp.o] = 1
myQueue[tail].num = tmp.o
myQueue[tail].parent = head
tail = (tail + 1) % len(myQueue)
else:
haveFound = True
head = (head + 1) % len(myQueue)
self.add_match(extremeMatch)
k = k - 1
if haveFound:
return extremeMatch
else:
return False
def distance(self, s):
return s['distance']
def returnDis(self, s):
return s.dis
def add_match(self, m):
flag = False
for tmp in self.Assignment:
if (m.o == tmp.o and m.p == tmp.p):
tmp.w += m.w
flag = True
break
if flag == False:
self.Assignment.append(copy.deepcopy(m))
self.P[m.p].capacity -= m.w
self.O[m.o].demand -= m.w
def sub_match(self, m):
self.P[m.p].capacity += m.w
self.O[m.o].demand += m.w
for tmp in self.Assignment:
if m.o == tmp.o and m.p == tmp.p:
tmp.w -= m.w
if tmp.w == 0:
self.Assignment.remove(tmp)
break
def initiallize_assignment(self):
distanceList = []
for i in range(0, len(self.O)):
distanceList = []
for j in range(0, len(self.P)):
dis = math.sqrt((self.O[i].x - self.P[j].x) ** 2 + (self.O[i].y - self.P[j].y) ** 2)
tmp = {'p': j, 'distance': dis}
distanceList.append(tmp)
distanceList = sorted(distanceList, key=self.distance)
for j in range(0, len(self.P)):
tmp = min(self.O[i].demand, self.P[distanceList[j]['p']].capacity)
if (tmp > 0):
m = Match()
m.o = i
m.p = distanceList[j]['p']
m.w = tmp
m.dis = distanceList[j]['distance']
self.add_match(m)
if self.O[i].demand == 0:
break
self.Assignment = sorted(self.Assignment, key=self.returnDis)
'''for i in range(0,len(self.Assignment)):
print(self.Assignment[i].o, self.Assignment[i].p, self.Assignment[i].w, self.Assignment[i].dis)
'''
def LoadDataFromText(txtpath):
"""
load data from text,return PROVIDERS,CUSTOMERS
"""
fp = open(txtpath, "r")
arr = []
for line in fp.readlines():
arr.append(line.replace("\n", "").split(" "))
fp.close()
NumberOfProviders = int(arr[0][0])
PROVIDERS = []
for i in range(1, NumberOfProviders + 1):
tmp = arr[i]
tmpProvider = ProviderPlus()
tmpProvider.x = float(tmp[0])
tmpProvider.y = float(tmp[1])
tmpProvider.cnt = int(tmp[2])
for j in range(0, tmpProvider.cnt):
tmpProvider.capacity.append(float(tmp[j + 3]))
tmpProvider.cost.append(float(tmp[j + 3 + tmpProvider.cnt]))
PROVIDERS.append(tmpProvider)
NumberOfCustomers = int(arr[NumberOfProviders + 1][0])
CUSTOMERS = []
for i in range(0, NumberOfCustomers):
tmp = arr[i + NumberOfProviders + 2]
tmpCustomer = Customer()
tmpCustomer.x = float(tmp[0])
tmpCustomer.y = float(tmp[1])
tmpCustomer.demand = float(tmp[2])
CUSTOMERS.append(tmpCustomer)
return PROVIDERS, CUSTOMERS
class EDA:
def __init__(self, populationSize, iterationMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate, updateRatio):
self.m_PO = po
self.m_D = D
self.m_PopulationSize = populationSize
self.m_iterMax = iterationMax
self.m_Alpha = alpha
self.m_Beta = beta
self.m_Population = []
self.m_BestSolution = None
self.m_BestFitness = -65536
self.m_BlockMax = blockMax
self.m_Block = 0
self.m_Surrogate = 0
self.m_SurrogateFlag = surrogateFlag
self.m_Iter = 0
self.m_Matrix = [[1 for _ in range(self.m_PO.PROVIDERS[0].cnt)] for _ in range(len(self.m_PO.PROVIDERS))]
self.m_TabuList = []
self.m_CandidateList = []
self.m_TabuMaxLength = tabuMaxLength
self.m_TabuMaxIter = tabuMaxIter
self.m_MaxNumCandidate = maxNumCandidate
self.m_CurrentSolution = None
self.m_CollectGeneration = 3
self.m_EDASearchRunTime = 0
self.m_LocalSearchRunTime = 0
self.m_BestCostPerGen = []
self.m_ConverGen = 0
self.m_UpdateRatio = updateRatio
def calcFitnessParallel(self, geneSerial, data, D, idx):
alpha = self.m_Alpha
beta = self.m_Beta
customers = []
fitness = 0
for item in data.CUSTOMERS:
tmp = Customer()
tmp.x = copy.deepcopy(item.x)
tmp.y = copy.deepcopy(item.y)
tmp.demand = copy.deepcopy(item.demand)
customers.append(tmp)
providers = []
sigmaCost = 0
sigmaCapacity = 0
sigmaDemand = 0
mmd = self.m_D * 1000.0
for i in range(0, len(geneSerial)):
tmpProvider = Provider()
tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)
tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)
tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])
tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])
sigmaCost = sigmaCost + tmpProvider.cost
sigmaCapacity = sigmaCapacity + tmpProvider.capacity
providers.append(tmpProvider)
for item in customers:
sigmaDemand = sigmaDemand + item.demand
if sigmaCapacity >= sigmaDemand:
swapchainsolver = SwapChainSolver(providers, customers)
mmd = swapchainsolver.Solver()
if mmd > D:
fitness = -10.0
else:
if sigmaCost != 0:
fitness = float(20.0 / sigmaCost)
else:
fitness = 10.0
else:
fitness = -20.0
return (geneSerial, idx, math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand)
def calcPopulationFitnessWithSurrogate(self, sc):
raw_data = []
for i in range(len(self.m_Population)):
raw_data.append(self.m_Population[i].geneSerial)
raw_data = np.array(raw_data)
num_cluster = int(self.m_PopulationSize * 0.1)
kmeans = KMeans(n_clusters=num_cluster, random_state=0).fit(raw_data)
distances = kmeans.transform(raw_data).sum(axis=1)
labels = kmeans.labels_
for i in range(len(self.m_Population)):
self.m_Population[i].cluster = labels[i]
raw_data_1, closet_item_idx = [], []
for clst in range(num_cluster):
min_idx, min_dist = -1, -1
for idx in range(len(distances)):
if labels[idx] == clst:
if min_dist < 0 and min_dist < 0:
min_idx = idx
min_dist = distances[idx]
elif min_dist > distances[idx]:
min_idx = idx
min_dist = distances[idx]
raw_data_1.append((raw_data[min_idx], min_idx))
closet_item_idx.append(min_idx)
raw_data = raw_data_1
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(
lambda (geneSerial, idx): self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO), copy.copy(self.m_D),idx))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, idx, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
self.m_Population[idx].fitness = fitness
self.m_Population[idx].mmd = mmd
self.m_Population[idx].sigmaCapacity = sigmaCapacity
self.m_Population[idx].sigmaCost = sigmaCost
self.m_Population[idx].sigmaDemand = sigmaDemand
self.m_Population[idx].calcAccurate = True
for i in range(self.m_PopulationSize):
if i not in closet_item_idx:
self.m_Population[i].fitness = self.m_Population[closet_item_idx[self.m_Population[i].cluster]].fitness
self.m_Population[i].calcAccurate = False
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if not self.m_BestSolution:
for i in range(len(sortedPopulation)):
if sortedPopulation[i].calcAccurate:
self.m_BestSolution = copy.deepcopy(sortedPopulation[i])
break
else:
calcAccurateIdx = None
for i in range(len(sortedPopulation)):
if sortedPopulation[i].calcAccurate:
calcAccurateIdx = i
break
if self.m_BestSolution.fitness < sortedPopulation[calcAccurateIdx].fitness:
self.m_BestSolution = copy.deepcopy(sortedPopulation[calcAccurateIdx])
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - sortedPopulation[calcAccurateIdx].fitness) <= 0.001:
self.m_Block += 1
def calcPopulationFitness(self, sc):
raw_data = []
for i in range(len(self.m_Population)):
raw_data.append((self.m_Population[i].geneSerial,i))
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(
lambda (geneSerial, idx): self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO), copy.copy(self.m_D), idx))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, idx, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
self.m_Population[idx].fitness = fitness
self.m_Population[idx].mmd = mmd
self.m_Population[idx].sigmaCapacity = sigmaCapacity
self.m_Population[idx].sigmaCost = sigmaCost
self.m_Population[idx].sigmaDemand = sigmaDemand
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if not self.m_BestSolution:
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
else:
if self.m_BestSolution.fitness < sortedPopulation[0].fitness:
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - sortedPopulation[0].fitness) <= 0.001:
self.m_Block += 1
def sampleAndEvaluateParallel(self, sc):
self.m_Population = []
raw_data = []
idx = [i for i in range(self.m_PopulationSize)]
distPop = sc.parallelize(idx)
geneSerialSample = distPop.map(lambda idx: self.sampleParallel(idx))
geneSerialCollect = geneSerialSample.collect()
for (idx, geneSerial_tmp) in geneSerialCollect:
chromosome = Chromosome()
chromosome.geneSerial = geneSerial_tmp
self.m_Population.append(chromosome)
if self.m_SurrogateFlag:
self.calcPopulationFitnessWithSurrogate(sc)
else:
self.calcPopulationFitness(sc)
def update(self):
sortedPopulation = sorted(self.m_Population, key=lambda x: x.fitness, reverse=True)
if sortedPopulation[0].fitness > self.m_BestFitness:
self.m_BestFitness = sortedPopulation[0].fitness
self.m_BestSolution = copy.deepcopy(sortedPopulation[0])
self.m_Block = 0
elif math.fabs(sortedPopulation[0].fitness - self.m_BestFitness) <= 0.001:
self.m_Block += 1
for i in range(int(self.m_PopulationSize * self.m_UpdateRatio)):
gene = sortedPopulation[i].geneSerial
for p in range(len(self.m_Matrix)):
row = self.m_Matrix[p]
row[gene[p]] += 1
def sampleParallel(self, idx):
geneSerial = []
for p in range(len(self.m_Matrix)):
row = self.m_Matrix[p]
rowSum = float(sum(row))
cumulateRow = [0 for _ in range(len(row))]
cumulateRow[0] = row[0] / rowSum
for i in range(1, len(row)):
cumulateRow[i] = cumulateRow[i - 1] + row[i] / rowSum
rnd = random.random()
for i in range(len(row)):
if cumulateRow[i] >= rnd:
geneSerial.append(i)
break
return (i, geneSerial)
def EDASearch(self, sc):
self.m_Iter, self.m_Block = 0, 0
while self.m_Iter < self.m_iterMax and self.m_Block < self.m_BlockMax:
self.sampleAndEvaluateParallel(sc)
self.update()
self.m_BestCostPerGen.append(self.m_BestSolution.sigmaCost)
self.m_Iter += 1
self.m_ConverGen = self.m_Iter
def LocalSearch(self, sc):
self.m_Iter, self.m_Block = 0, 0
self.m_CurrentSolution = self.m_BestSolution
while self.m_Iter < self.m_TabuMaxIter and self.m_Block < self.m_BlockMax:
self.m_CandidateList = []
raw_data = []
for _ in range(self.m_MaxNumCandidate):
flag = randint(0, 1)
geneSerial = self.m_CurrentSolution.geneSerial
if flag == 0:
pointA = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
pointB = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
tmp = geneSerial[pointA]
geneSerial[pointA] = geneSerial[pointB]
geneSerial[pointB] = tmp
else:
pointA = -1
pointB = randint(0, len(self.m_CurrentSolution.geneSerial) - 1)
geneSerial[pointB] = (geneSerial[pointB] + 1) % self.m_PO.PROVIDERS[
pointB].cnt
if (flag, pointA, pointB) not in set(self.m_TabuList):
raw_data.append((geneSerial, 0))
distPop = sc.parallelize(raw_data)
fitnessCalc = distPop.map(lambda (geneSerial, idx): self.calcFitnessParallel(geneSerial, copy.copy(self.m_PO),copy.copy(self.m_D), idx))
chromosomeCollect = fitnessCalc.collect()
for (geneSerial, idx, fitness, mmd, sigmaCapacity, sigmaCost, sigmaDemand) in chromosomeCollect:
chromosome = Chromosome()
chromosome.geneSerial = geneSerial
chromosome.fitness = fitness
chromosome.mmd = mmd
chromosome.sigmaCapacity = sigmaCapacity
chromosome.sigmaCost = sigmaCost
chromosome.sigmaDemand = sigmaDemand
self.m_CandidateList.append((chromosome, chromosome.fitness, (flag, pointA, pointB)))
nextBestChromosome, nextBestFitness, tabu = sorted(self.m_CandidateList, key=lambda x: x[1], reverse=True)[0]
if self.m_BestSolution.fitness <= nextBestFitness:
self.m_BestSolution = copy.deepcopy(nextBestChromosome)
self.m_Block = 0
elif math.fabs(self.m_BestSolution.fitness - nextBestFitness) <= 0.001:
self.m_Block += 1
if len(self.m_TabuList) >= self.m_TabuMaxLength:
self.m_TabuList.pop(0)
self.m_TabuList.append(tabu)
self.m_CurrentSolution = nextBestChromosome
self.m_Iter += 1
def Search(self, sc):
startTime = time.time()
self.EDASearch(sc)
endTime = time.time()
self.m_EDASearchRunTime = endTime - startTime
self.m_LocalSearchRunTime = 0
if __name__ == "__main__":
popSize = 100
iterMax = 100
blockMax = 110
alpha = 10000000.00
beta = 0.01
D = 40
surrogateFlag = False
tabuMaxLength = 10
tabuMaxIter = 100
maxNumCandidate = 10
updateRatio = 0.1
core_num = int(sys.argv[1])
conf = SparkConf().setMaster("spark://noah007:7077") \
.setAppName("SPC-POSM-EDA") \
.set("spark.submit.deployMode", "client") \
.set("spark.cores.max", core_num) \
.set("spark.executor.cores", "10") \
.set("spark.executor.memory", "20g") \
.set("spark.driver.memory", "40g")
sc = SparkContext(conf=conf)
'''
experiment on different dataset
'''
'''
# instanceSet = ['nuoxi2G'] #, 'nuoxi3G', 'huawei2G', 'huawei3G']
instanceSet = [i for i in range(60)]
aveAns, aveRuntime, aveConverGen = [], [], []
for i in instanceSet:
print i, 'th instance ...'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
times = 5
sumAns, sumRuntime, sumConverGen = 0, 0, 0
for _ in range(times):
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate, updateRatio)
eda.Search(sc)
sumAns += eda.m_BestSolution.sigmaCost
sumRuntime += (eda.m_EDASearchRunTime + eda.m_LocalSearchRunTime)
sumConverGen = eda.m_ConverGen
aveAns.append(sumAns / (times*1.0))
aveRuntime.append(sumRuntime / (times*1.0))
aveConverGen.append(sumConverGen / (times*1.0))
df = pd.DataFrame({'cost': aveAns, 'EDA runtime': aveRuntime, 'ConverGen':aveConverGen })
df.to_csv('../midResult/edaResult.csv')
'''
'''
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(59) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate)
eda.Search(sc)
df = pd.DataFrame({'cost': eda.m_BestCostPerGen})
df.to_csv('../midResult/edaResultBestCostPergen.csv')
'''
'''
experiment of convergence
'''
instNum = 20
instList = [i for i in range(0,instNum)]
costPerGenList = []
for i in instList:
po = PO()
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
costPerGenList.append(eda.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/edaResultBestCostPerGen1.csv')
'''
instNum = 40
instList = [i for i in range(20,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
costPerGenList.append(eda.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/edaResultBestCostPerGen2.csv')
instNum = 60
instList = [i for i in range(40,instNum)]
costPerGenList = []
for i in instList:
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/instance' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D, surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
costPerGenList.append(eda.m_BestCostPerGen)
costPerGenNpArr = np.array(costPerGenList)
# print costPerGenList
# print costPerGenNpArr
# print type(costPerGenNpArr)
costPerGenNpArr = np.sum(costPerGenNpArr, axis=0)
print costPerGenNpArr
# costPerGenNpArr = costPerGenNpArr / float(instNum)
df = pd.DataFrame({'aveCost': costPerGenNpArr})
df.to_csv('../midResult/edaResultBestCostPerGen3.csv')
'''
'''
geneSerialList, sigmaCostList = [], []
D = [40,6,40,50,60,40]
for i in range(1,7):
print i, 'th instance ...'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/data' + str(i) + '.txt')
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D[i-1], surrogateFlag, tabuMaxLength, tabuMaxIter, maxNumCandidate, updataRatio)
eda.Search(sc)
geneSerialList.append(eda.m_BestSolution.geneSerial)
sigmaCostList.append(eda.m_BestSolution.sigmaCost)
df = pd.DataFrame({'sigmaCost':sigmaCostList, 'geneSerial': geneSerialList})
df.to_csv('../midResult/edaTinyDatasetResult.csv')
'''
'''
experiment on testing different update ratio
'''
'''
updateRatioList = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
instList = [1,2,3,4,5,6]
D = [40, 6, 40, 50, 60, 40]
for i in instList:
print i, 'th instance'
aveAns, aveRuntime, aveConverGen = [], [], []
for updateRatio in updateRatioList:
print updateRatio, 'update ratio'
# po is data contains informantion about PROVIDERS and CUSTOMERS
po = PO()
# read providers and customers data from text
po.PROVIDERS, po.CUSTOMERS = LoadDataFromText('../data/data' + str(i) + '.txt')
times = 5
sumAns, sumRuntime, sumConverGen = 0, 0, 0
for _ in range(times):
eda = EDA(popSize, iterMax, blockMax, po, alpha, beta, D[i-1], surrogateFlag, tabuMaxLength, tabuMaxIter,
maxNumCandidate, updateRatio)
eda.Search(sc)
sumAns += eda.m_BestSolution.sigmaCost
sumRuntime += (eda.m_EDASearchRunTime + eda.m_LocalSearchRunTime)
sumConverGen = eda.m_ConverGen
aveAns.append(sumAns / (times * 1.0))
aveRuntime.append(sumRuntime / (times * 1.0))
aveConverGen.append(sumConverGen / (times * 1.0))
df = pd.DataFrame({'ratio': updateRatioList, 'cost': aveAns, 'EDA runtime': aveRuntime, 'ConverGen': aveConverGen})
df.to_csv('../midResult/edaResultUpdateRatioData'+str(i)+'.csv')
'''
| false | true |
f7f86aba727d1eeca2750c813dc714f609b6595d | 7,810 | py | Python | docs/conf.py | yazanattar99/MLOps | 0d11a10a23cbb3144590cb29e982e2c484b94f36 | [
"MIT"
] | null | null | null | docs/conf.py | yazanattar99/MLOps | 0d11a10a23cbb3144590cb29e982e2c484b94f36 | [
"MIT"
] | null | null | null | docs/conf.py | yazanattar99/MLOps | 0d11a10a23cbb3144590cb29e982e2c484b94f36 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# churn_model documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'churn_model'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'churn_modeldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'churn_model.tex',
u'churn_model Documentation',
u"Yazan_Attar", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'churn_model', u'churn_model Documentation',
[u"Yazan_Attar"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'churn_model', u'churn_model Documentation',
u"Yazan_Attar", 'churn_model',
'End to End Machine learning pipeline with MLOps tools', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.877551 | 80 | 0.708195 |
import os
import sys
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'churn_model'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'churn_modeldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'churn_model.tex',
u'churn_model Documentation',
u"Yazan_Attar", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'churn_model', u'churn_model Documentation',
[u"Yazan_Attar"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'churn_model', u'churn_model Documentation',
u"Yazan_Attar", 'churn_model',
'End to End Machine learning pipeline with MLOps tools', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true | true |
f7f86ac652d3ecf96126667f7c99c6a22c96758f | 408 | py | Python | test/travis_test_buzzer.py | mayabook/pimouse_ros | f7fb6e62561d8ebff8577e562d6f516dff0a43ea | [
"BSD-3-Clause"
] | null | null | null | test/travis_test_buzzer.py | mayabook/pimouse_ros | f7fb6e62561d8ebff8577e562d6f516dff0a43ea | [
"BSD-3-Clause"
] | null | null | null | test/travis_test_buzzer.py | mayabook/pimouse_ros | f7fb6e62561d8ebff8577e562d6f516dff0a43ea | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy, unittest, rostest
import rosnode
import time
class BuzzerTest(unittest.TestCase):
def test_node_exist(self):
nodes = rosnode.get_node_names()
self.assertIn('/buzzer',nodes, "node does not exist")
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_buzzer')
rostest.rosrun('pimouse_ros','travis_test_buzzer',BuzzerTest)
| 25.5 | 65 | 0.720588 |
import rospy, unittest, rostest
import rosnode
import time
class BuzzerTest(unittest.TestCase):
def test_node_exist(self):
nodes = rosnode.get_node_names()
self.assertIn('/buzzer',nodes, "node does not exist")
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_buzzer')
rostest.rosrun('pimouse_ros','travis_test_buzzer',BuzzerTest)
| true | true |
f7f86c5a614607a75f8761edb936740344c72f81 | 366 | py | Python | copy_logo.py | Suke-H/LearningToPaint | 444e01477ff1b632df931da67af67464c0f9cb98 | [
"MIT"
] | null | null | null | copy_logo.py | Suke-H/LearningToPaint | 444e01477ff1b632df931da67af67464c0f9cb98 | [
"MIT"
] | null | null | null | copy_logo.py | Suke-H/LearningToPaint | 444e01477ff1b632df931da67af67464c0f9cb98 | [
"MIT"
] | null | null | null | from glob import glob
import numpy as np
import shutil
import os
SOURCE_PATH = "data/LLD-logo-files/"
MV_PATH = "image/"
imgs = np.array(glob(SOURCE_PATH + "**"))
N = len(imgs)
print(N)
choice = np.random.choice(N, 10)
print(choice)
for img in imgs[choice]:
# img_name = os.path.basename(img)
# os.remove(MV_PATH + img_name)
shutil.copy(img, MV_PATH) | 19.263158 | 41 | 0.688525 | from glob import glob
import numpy as np
import shutil
import os
SOURCE_PATH = "data/LLD-logo-files/"
MV_PATH = "image/"
imgs = np.array(glob(SOURCE_PATH + "**"))
N = len(imgs)
print(N)
choice = np.random.choice(N, 10)
print(choice)
for img in imgs[choice]:
shutil.copy(img, MV_PATH) | true | true |
f7f86ceb9999dbd51c5edf24f3d3f78ecf0d45ed | 9,135 | py | Python | pyleecan/GUI/Dialog/DMachineSetup/SPreview/WMachineTable/WMachineTable.py | nnassar98/pyleecan | 3a6ffe14ab46e90dc0b2855386623833c622b95e | [
"Apache-2.0"
] | 5 | 2020-03-05T15:22:39.000Z | 2022-03-02T15:26:08.000Z | pyleecan/GUI/Dialog/DMachineSetup/SPreview/WMachineTable/WMachineTable.py | thalesmaoa/pyleecan | c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3 | [
"Apache-2.0"
] | 8 | 2020-07-09T07:43:01.000Z | 2022-03-08T12:52:06.000Z | pyleecan/GUI/Dialog/DMachineSetup/SPreview/WMachineTable/WMachineTable.py | thalesmaoa/pyleecan | c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3 | [
"Apache-2.0"
] | 4 | 2019-12-23T12:38:01.000Z | 2022-01-07T10:47:48.000Z | # -*- coding: utf-8 -*-
from logging import getLogger
from os.path import join
import matplotlib.pyplot as plt
from PySide2.QtWidgets import QFileDialog, QTableWidgetItem, QWidget, QMessageBox
from ......Classes._FEMMHandler import _FEMMHandler
from ......Classes.Output import Output
from ......Classes.Simu1 import Simu1
from ......Classes.OPdq import OPdq
from ......Classes.OPslip import OPslip
from ......definitions import config_dict
from ......loggers import GUI_LOG_NAME
from ......Functions.FEMM.update_FEMM_simulation import update_FEMM_simulation
from ......Functions.FEMM.draw_FEMM import draw_FEMM
from ......Functions.Plot.set_plot_gui_icon import set_plot_gui_icon
from ......GUI.Dialog.DMachineSetup.SPreview.WMachineTable.Ui_WMachineTable import (
Ui_WMachineTable,
)
from SciDataTool import DataLinspace
from ......Methods.Simulation.MagElmer import (
MagElmer_BP_dict,
)
try:
from ......Functions.GMSH.draw_GMSH import draw_GMSH
except Exception as e:
draw_GMSH = e
try:
from pyleecan.Functions.GMSH.gen_3D_mesh import gen_3D_mesh
except Exception as e:
gen_3D_mesh = e
class WMachineTable(Ui_WMachineTable, QWidget):
"""Table to display the main paramaters of the machine"""
def __init__(self, parent=None):
"""Initialize the GUI
Parameters
----------
self : SWindCond
A SWindCond widget
"""
# Build the interface according to the .ui file
QWidget.__init__(self, parent)
self.setupUi(self)
self.machine = None
# Connect the widget
self.b_mmf.clicked.connect(self.plot_mmf)
self.b_FEMM.clicked.connect(self.draw_FEMM)
if isinstance(draw_GMSH, Exception):
self.b_GMSH.setEnabled(False)
self.b_GMSH.setWhatsThis(str(draw_GMSH))
self.b_GMSH.setToolTip(str(draw_GMSH))
self.b_GMSH_3D.setEnabled(False)
self.b_GMSH_3D.setWhatsThis(str(gen_3D_mesh))
self.b_GMSH_3D.setToolTip(str(gen_3D_mesh))
else:
self.b_GMSH.clicked.connect(self.draw_GMSH)
self.b_GMSH_3D.clicked.connect(self.draw_GMSH_3D)
self.b_plot_machine.clicked.connect(self.plot_machine)
def update_tab(self, machine):
"""Update the table to match the machine
Parameters
----------
self : WMachineTable
A WMachineTable object
"""
self.machine = machine
desc_dict = self.machine.comp_desc_dict()
self.tab_param.clear()
# Set header
self.tab_param.setColumnCount(2)
item = QTableWidgetItem("Name")
self.tab_param.setHorizontalHeaderItem(0, item)
item = QTableWidgetItem("Value")
self.tab_param.setHorizontalHeaderItem(1, item)
# Set containt
for ii, desc in enumerate(desc_dict):
if desc["value"] is not None:
self.tab_param.insertRow(ii)
self.tab_param.setItem(ii, 0, QTableWidgetItem(desc["verbose"]))
if desc["type"] is float:
txt = format(desc["value"], ".4g")
else:
txt = str(desc["value"])
if desc["unit"] not in ["", None]:
txt += " " + desc["unit"]
self.tab_param.setItem(ii, 1, QTableWidgetItem(txt))
def plot_mmf(self):
"""Plot the unit mmf of the stator"""
if self.machine is not None:
self.machine.stator.plot_mmf_unit(is_show_fig=True)
set_plot_gui_icon()
def plot_machine(self):
"""Plot the machine"""
if self.machine is not None:
self.machine.plot()
set_plot_gui_icon()
def draw_FEMM(self):
"""Draw the Machine in FEMM"""
save_file_path = self.get_save_path(ext=".fem", file_type="FEMM (*.fem)")
# Avoid bug due to user closing the popup witout selecting a file
if save_file_path is [None, ""]:
return
femm = _FEMMHandler()
output = Output(simu=Simu1(machine=self.machine))
# Periodicity
sym, is_antiper, _, _ = self.machine.comp_periodicity()
if is_antiper:
sym *= 2
# Set Current (constant J in a layer)
S_slot = self.machine.stator.slot.comp_surface_active()
(Nrad, Ntan) = self.machine.stator.winding.get_dim_wind()
Ntcoil = self.machine.stator.winding.Ntcoil
Sphase = S_slot / (Nrad * Ntan)
J = 5e6
if self.machine.is_synchronous():
output.elec.OP = OPdq(felec=60)
else:
output.elec.OP = OPslip(felec=60)
output.elec.OP.set_Id_Iq(Id=J * Sphase / Ntcoil, Iq=0)
output.elec.Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=60,
number=20,
include_endpoint=False,
)
time = output.elec.Time.get_values(
is_oneperiod=False,
is_antiperiod=False,
)
Is = output.elec.comp_I_mag(time, is_stator=True)
alpha = output.get_angle_rotor_initial()
try:
# Draw the machine
FEMM_dict = draw_FEMM(
femm,
output,
is_mmfr=True,
is_mmfs=True,
sym=sym,
is_antiper=is_antiper,
type_calc_leakage=0,
path_save=None,
is_sliding_band=True,
)
# Set the current
update_FEMM_simulation(
femm=femm,
circuits=FEMM_dict["circuits"],
is_sliding_band=True,
is_internal_rotor=self.machine.rotor.is_internal,
angle_rotor=[alpha],
Is=Is,
Ir=None,
ii=0,
)
femm.mi_saveas(save_file_path) # Save
except Exception as e:
err_msg = (
"Error while drawing machine "
+ self.machine.name
+ " in FEMM:\n"
+ str(e)
)
getLogger(GUI_LOG_NAME).error(err_msg)
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(err_msg),
)
femm.closefemm()
def draw_GMSH(self):
save_file_path = self.get_save_path(ext=".msh", file_type="GMSH (*.msh)")
# Avoid bug due to user closing the popup witout selecting a file
if save_file_path is [None, ""]:
return
# Create the Simulation
mySimu = Simu1(name="test_gmsh_ipm", machine=self.machine)
myResults = Output(simu=mySimu)
sym, is_antiper, _, _ = self.machine.comp_periodicity()
if is_antiper:
sym *= 2
try:
draw_GMSH(
output=myResults,
sym=sym,
boundary_prop=MagElmer_BP_dict,
is_lam_only_S=False,
is_lam_only_R=False,
user_mesh_dict=None,
is_sliding_band=True,
is_airbox=True,
path_save=save_file_path,
)
except Exception as e:
err_msg = (
"Error while drawing machine "
+ self.machine.name
+ " in GMSH:\n"
+ str(e)
)
getLogger(GUI_LOG_NAME).error(err_msg)
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(err_msg),
)
def draw_GMSH_3D(self):
save_file_path = self.get_save_path(ext="_stator.msh", file_type="GMSH (*.msh)")
# Avoid bug due to user closing the popup witout selecting a file
if save_file_path is [None, ""]:
return
try:
gen_3D_mesh(
lamination=self.machine.stator,
save_path=save_file_path,
mesh_size=(self.machine.stator.Rext - self.machine.stator.Rint) / 20,
Nlayer=20,
display=False,
)
except Exception as e:
err_msg = (
"Error while drawing machine "
+ self.machine.name
+ " in GMSH:\n"
+ str(e)
)
getLogger(GUI_LOG_NAME).error(err_msg)
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(err_msg),
)
def get_save_path(self, ext=".fem", file_type="FEMM (*.fem)"):
machine_path = config_dict["MAIN"]["MACHINE_DIR"]
# Ask the user to select a .fem file to save
if self.machine.name in ["", None]:
return QFileDialog.getSaveFileName(
self, self.tr("Save file"), machine_path, file_type
)[0]
else:
def_path = join(machine_path, self.machine.name + ext)
return QFileDialog.getSaveFileName(
self, self.tr("Save file"), def_path, file_type
)[0]
| 33.833333 | 88 | 0.555665 |
from logging import getLogger
from os.path import join
import matplotlib.pyplot as plt
from PySide2.QtWidgets import QFileDialog, QTableWidgetItem, QWidget, QMessageBox
from ......Classes._FEMMHandler import _FEMMHandler
from ......Classes.Output import Output
from ......Classes.Simu1 import Simu1
from ......Classes.OPdq import OPdq
from ......Classes.OPslip import OPslip
from ......definitions import config_dict
from ......loggers import GUI_LOG_NAME
from ......Functions.FEMM.update_FEMM_simulation import update_FEMM_simulation
from ......Functions.FEMM.draw_FEMM import draw_FEMM
from ......Functions.Plot.set_plot_gui_icon import set_plot_gui_icon
from ......GUI.Dialog.DMachineSetup.SPreview.WMachineTable.Ui_WMachineTable import (
Ui_WMachineTable,
)
from SciDataTool import DataLinspace
from ......Methods.Simulation.MagElmer import (
MagElmer_BP_dict,
)
try:
from ......Functions.GMSH.draw_GMSH import draw_GMSH
except Exception as e:
draw_GMSH = e
try:
from pyleecan.Functions.GMSH.gen_3D_mesh import gen_3D_mesh
except Exception as e:
gen_3D_mesh = e
class WMachineTable(Ui_WMachineTable, QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.machine = None
self.b_mmf.clicked.connect(self.plot_mmf)
self.b_FEMM.clicked.connect(self.draw_FEMM)
if isinstance(draw_GMSH, Exception):
self.b_GMSH.setEnabled(False)
self.b_GMSH.setWhatsThis(str(draw_GMSH))
self.b_GMSH.setToolTip(str(draw_GMSH))
self.b_GMSH_3D.setEnabled(False)
self.b_GMSH_3D.setWhatsThis(str(gen_3D_mesh))
self.b_GMSH_3D.setToolTip(str(gen_3D_mesh))
else:
self.b_GMSH.clicked.connect(self.draw_GMSH)
self.b_GMSH_3D.clicked.connect(self.draw_GMSH_3D)
self.b_plot_machine.clicked.connect(self.plot_machine)
def update_tab(self, machine):
self.machine = machine
desc_dict = self.machine.comp_desc_dict()
self.tab_param.clear()
self.tab_param.setColumnCount(2)
item = QTableWidgetItem("Name")
self.tab_param.setHorizontalHeaderItem(0, item)
item = QTableWidgetItem("Value")
self.tab_param.setHorizontalHeaderItem(1, item)
for ii, desc in enumerate(desc_dict):
if desc["value"] is not None:
self.tab_param.insertRow(ii)
self.tab_param.setItem(ii, 0, QTableWidgetItem(desc["verbose"]))
if desc["type"] is float:
txt = format(desc["value"], ".4g")
else:
txt = str(desc["value"])
if desc["unit"] not in ["", None]:
txt += " " + desc["unit"]
self.tab_param.setItem(ii, 1, QTableWidgetItem(txt))
def plot_mmf(self):
if self.machine is not None:
self.machine.stator.plot_mmf_unit(is_show_fig=True)
set_plot_gui_icon()
def plot_machine(self):
if self.machine is not None:
self.machine.plot()
set_plot_gui_icon()
def draw_FEMM(self):
save_file_path = self.get_save_path(ext=".fem", file_type="FEMM (*.fem)")
if save_file_path is [None, ""]:
return
femm = _FEMMHandler()
output = Output(simu=Simu1(machine=self.machine))
sym, is_antiper, _, _ = self.machine.comp_periodicity()
if is_antiper:
sym *= 2
S_slot = self.machine.stator.slot.comp_surface_active()
(Nrad, Ntan) = self.machine.stator.winding.get_dim_wind()
Ntcoil = self.machine.stator.winding.Ntcoil
Sphase = S_slot / (Nrad * Ntan)
J = 5e6
if self.machine.is_synchronous():
output.elec.OP = OPdq(felec=60)
else:
output.elec.OP = OPslip(felec=60)
output.elec.OP.set_Id_Iq(Id=J * Sphase / Ntcoil, Iq=0)
output.elec.Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=60,
number=20,
include_endpoint=False,
)
time = output.elec.Time.get_values(
is_oneperiod=False,
is_antiperiod=False,
)
Is = output.elec.comp_I_mag(time, is_stator=True)
alpha = output.get_angle_rotor_initial()
try:
FEMM_dict = draw_FEMM(
femm,
output,
is_mmfr=True,
is_mmfs=True,
sym=sym,
is_antiper=is_antiper,
type_calc_leakage=0,
path_save=None,
is_sliding_band=True,
)
update_FEMM_simulation(
femm=femm,
circuits=FEMM_dict["circuits"],
is_sliding_band=True,
is_internal_rotor=self.machine.rotor.is_internal,
angle_rotor=[alpha],
Is=Is,
Ir=None,
ii=0,
)
femm.mi_saveas(save_file_path)
except Exception as e:
err_msg = (
"Error while drawing machine "
+ self.machine.name
+ " in FEMM:\n"
+ str(e)
)
getLogger(GUI_LOG_NAME).error(err_msg)
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(err_msg),
)
femm.closefemm()
def draw_GMSH(self):
save_file_path = self.get_save_path(ext=".msh", file_type="GMSH (*.msh)")
if save_file_path is [None, ""]:
return
mySimu = Simu1(name="test_gmsh_ipm", machine=self.machine)
myResults = Output(simu=mySimu)
sym, is_antiper, _, _ = self.machine.comp_periodicity()
if is_antiper:
sym *= 2
try:
draw_GMSH(
output=myResults,
sym=sym,
boundary_prop=MagElmer_BP_dict,
is_lam_only_S=False,
is_lam_only_R=False,
user_mesh_dict=None,
is_sliding_band=True,
is_airbox=True,
path_save=save_file_path,
)
except Exception as e:
err_msg = (
"Error while drawing machine "
+ self.machine.name
+ " in GMSH:\n"
+ str(e)
)
getLogger(GUI_LOG_NAME).error(err_msg)
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(err_msg),
)
def draw_GMSH_3D(self):
save_file_path = self.get_save_path(ext="_stator.msh", file_type="GMSH (*.msh)")
if save_file_path is [None, ""]:
return
try:
gen_3D_mesh(
lamination=self.machine.stator,
save_path=save_file_path,
mesh_size=(self.machine.stator.Rext - self.machine.stator.Rint) / 20,
Nlayer=20,
display=False,
)
except Exception as e:
err_msg = (
"Error while drawing machine "
+ self.machine.name
+ " in GMSH:\n"
+ str(e)
)
getLogger(GUI_LOG_NAME).error(err_msg)
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(err_msg),
)
def get_save_path(self, ext=".fem", file_type="FEMM (*.fem)"):
machine_path = config_dict["MAIN"]["MACHINE_DIR"]
if self.machine.name in ["", None]:
return QFileDialog.getSaveFileName(
self, self.tr("Save file"), machine_path, file_type
)[0]
else:
def_path = join(machine_path, self.machine.name + ext)
return QFileDialog.getSaveFileName(
self, self.tr("Save file"), def_path, file_type
)[0]
| true | true |
f7f86d675097ecd0c1a803bf32493b2daf3b5786 | 253 | py | Python | frappe-bench/apps/erpnext/erpnext/setup/doctype/uom/uom.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | frappe-bench/apps/erpnext/erpnext/setup/doctype/uom/uom.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/setup/doctype/uom/uom.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class UOM(Document):
pass | 25.3 | 68 | 0.802372 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class UOM(Document):
pass | true | true |
f7f86fb53925948a4a057bcb045a8d45a27bec4a | 1,312 | py | Python | app/utility/list_back_access.py | syth0le/tg_reminder_bot | 956f552c2c81732aaa41c1f006e31f4167e7cdff | [
"MIT"
] | null | null | null | app/utility/list_back_access.py | syth0le/tg_reminder_bot | 956f552c2c81732aaa41c1f006e31f4167e7cdff | [
"MIT"
] | null | null | null | app/utility/list_back_access.py | syth0le/tg_reminder_bot | 956f552c2c81732aaa41c1f006e31f4167e7cdff | [
"MIT"
] | null | null | null | from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from app.buttons.reply_btns import remindersMenu
from app.reminders import get_all_reminders, get_temporary_reminders, get_bookmarks, get_permanent_reminders
from app.utility.answer_forms import answer_forms
async def back_access(data: str) -> str:
inline_kb_to_choose = InlineKeyboardMarkup(row_width=6)
temp = 1
result_string = ''
if data['type'] == "all":
data = get_all_reminders()
type_string = "reminders"
elif data['type'] == "perm":
data = get_permanent_reminders()
type_string = "permanent reminders"
elif data['type'] == "temp":
data = get_temporary_reminders()
type_string = "temporary reminders"
elif data['type'] == "book":
data = get_bookmarks()
type_string = "bookmarks"
else:
raise KeyError
if data:
for elem in data:
inline_btn = InlineKeyboardButton(temp, callback_data=f"edit_{elem[0]}")
inline_kb_to_choose.insert(inline_btn)
result_string += answer_forms(element=elem, position=temp, adding=True)
temp += 1
else:
result_string, inline_kb_to_choose = f"No {type_string} in system.", remindersMenu
return result_string, inline_kb_to_choose
| 36.444444 | 108 | 0.682165 | from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from app.buttons.reply_btns import remindersMenu
from app.reminders import get_all_reminders, get_temporary_reminders, get_bookmarks, get_permanent_reminders
from app.utility.answer_forms import answer_forms
async def back_access(data: str) -> str:
inline_kb_to_choose = InlineKeyboardMarkup(row_width=6)
temp = 1
result_string = ''
if data['type'] == "all":
data = get_all_reminders()
type_string = "reminders"
elif data['type'] == "perm":
data = get_permanent_reminders()
type_string = "permanent reminders"
elif data['type'] == "temp":
data = get_temporary_reminders()
type_string = "temporary reminders"
elif data['type'] == "book":
data = get_bookmarks()
type_string = "bookmarks"
else:
raise KeyError
if data:
for elem in data:
inline_btn = InlineKeyboardButton(temp, callback_data=f"edit_{elem[0]}")
inline_kb_to_choose.insert(inline_btn)
result_string += answer_forms(element=elem, position=temp, adding=True)
temp += 1
else:
result_string, inline_kb_to_choose = f"No {type_string} in system.", remindersMenu
return result_string, inline_kb_to_choose
| true | true |
f7f86fd20ba936a7f977c7d9fb04efbed7318635 | 10,671 | py | Python | magpylib/_src/display/plotly/plotly_sensor_mesh.py | OrtnerMichael/magPyLib | 4c7e7f56f6e0b915ec0e024c172c460fa80126e5 | [
"BSD-2-Clause"
] | null | null | null | magpylib/_src/display/plotly/plotly_sensor_mesh.py | OrtnerMichael/magPyLib | 4c7e7f56f6e0b915ec0e024c172c460fa80126e5 | [
"BSD-2-Clause"
] | null | null | null | magpylib/_src/display/plotly/plotly_sensor_mesh.py | OrtnerMichael/magPyLib | 4c7e7f56f6e0b915ec0e024c172c460fa80126e5 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
def get_sensor_mesh(
x_color="red",
y_color="green",
z_color="blue",
center_color="grey",
x_show=True,
y_show=True,
z_show=True,
center_show=True,
colorize_tails=True,
):
"""
returns a plotly mesh3d dictionary of a x,y,z arrows oriented in space accordingly
and colored respectively in red,green,blue with a central cube of size 1
"""
# fmt: off
trace = {
"type": "mesh3d",
"i": np.array([75, 64, 2, 75, 76, 65, 65, 64, 2, 0, 1, 0, 84, 86, 86, 90, 90, 92,
92, 91, 91, 87, 87, 85, 85, 83, 83, 82, 82, 84, 94, 86, 86, 84, 84, 82,
82, 83, 83, 85, 85, 87, 87, 87, 91, 91, 92, 92, 90, 90, 94, 95, 88, 78,
79, 81, 80, 93, 96, 89, 77, 70, 72, 72, 74, 74, 73, 73, 50, 52, 52, 44,
44, 32, 32, 22, 22, 14, 14, 20, 20, 30, 30, 41, 41, 50, 57, 52, 52, 50,
50, 41, 41, 30, 30, 20, 20, 14, 14, 14, 22, 22, 32, 32, 44, 44, 57, 11,
4, 12, 58, 62, 8, 7, 39, 61, 42, 51, 43, 43, 31, 31, 21, 21, 48, 54,
54, 47, 47, 37, 37, 25, 25, 17, 17, 18, 18, 26, 26, 38, 38, 48, 59, 54,
54, 48, 48, 38, 38, 26, 26, 18, 18, 17, 17, 17, 25, 25, 37, 37, 47, 47,
59, 27, 5, 10, 56, 60, 6, 9, 55, 63, 28, 53, 45, 45, 35, 35, 23, 23],
dtype="int64"),
"j": np.array([76, 76, 3, 3, 3, 3, 1, 1, 75, 75, 3, 3, 70, 70, 72, 72, 74, 74,
73, 73, 71, 71, 69, 69, 67, 67, 66, 66, 68, 68, 89, 89, 81, 81, 79, 79,
77, 77, 78, 78, 80, 80, 88, 93, 93, 95, 95, 96, 96, 94, 97, 97, 97, 97,
97, 97, 97, 97, 97, 97, 97, 68, 68, 66, 66, 67, 67, 69, 51, 51, 43, 43,
31, 31, 21, 21, 13, 13, 19, 19, 29, 29, 40, 40, 49, 49, 61, 61, 62, 62,
58, 58, 42, 42, 12, 12, 8, 8, 4, 7, 7, 11, 11, 39, 39, 57, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 49, 49, 40, 40, 29, 29, 19, 53, 53,
45, 45, 35, 35, 23, 23, 15, 15, 16, 16, 24, 24, 36, 36, 46, 46, 63, 63,
60, 60, 56, 56, 28, 28, 10, 10, 6, 6, 5, 9, 9, 27, 27, 55, 55, 59,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 46, 46, 36, 36, 24, 24, 16],
dtype="int64"),
"k": np.array([64, 65, 75, 76, 65, 1, 64, 0, 0, 64, 0, 2, 86, 72, 90, 74, 92, 73,
91, 71, 87, 69, 85, 67, 83, 66, 82, 68, 84, 70, 86, 81, 84, 79, 82, 77,
83, 78, 85, 80, 87, 88, 93, 91, 95, 92, 96, 90, 94, 86, 89, 96, 93, 80,
77, 79, 88, 95, 94, 81, 78, 72, 66, 74, 67, 73, 69, 71, 52, 43, 44, 31,
32, 21, 22, 13, 14, 19, 20, 29, 30, 40, 41, 49, 50, 51, 52, 62, 50, 58,
41, 42, 30, 12, 20, 8, 14, 4, 7, 22, 11, 32, 39, 44, 57, 52, 61, 39,
7, 8, 42, 58, 4, 11, 57, 62, 12, 43, 40, 31, 29, 21, 19, 13, 54, 45,
47, 35, 37, 23, 25, 15, 17, 16, 18, 24, 26, 36, 38, 46, 48, 53, 54, 60,
48, 56, 38, 28, 26, 10, 18, 6, 17, 5, 9, 25, 27, 37, 55, 47, 59, 54,
63, 55, 9, 6, 28, 56, 5, 27, 59, 60, 10, 45, 36, 35, 24, 23, 16, 15],
dtype="int64"),
"x": np.array([-5.00000000e-01, -5.00000000e-01, -5.00000000e-01, -5.00000000e-01,
-2.99849272e-01, -2.87847906e-01, -2.87847906e-01, -2.57389992e-01,
-2.47108519e-01, -1.96458220e-01, -1.96458220e-01, -1.33211225e-01,
-1.15912557e-01, -9.99495536e-02, -9.99495536e-02, -9.39692631e-02,
-9.39692631e-02, -9.39692631e-02, -9.39692631e-02, -7.86073282e-02,
-7.86073282e-02, -7.45242685e-02, -7.45242685e-02, -5.00000007e-02,
-5.00000007e-02, -5.00000007e-02, -5.00000007e-02, -4.26944532e-02,
-4.26944532e-02, -2.04838570e-02, -2.04838570e-02, -1.42282564e-02,
-1.42282564e-02, -2.08166817e-16, -1.91079873e-16, 1.73648186e-02,
1.73648186e-02, 1.73648186e-02, 1.73648186e-02, 3.32611799e-02,
4.72242348e-02, 4.72242348e-02, 5.20848148e-02, 5.27253151e-02,
5.27253151e-02, 7.66044408e-02, 7.66044408e-02, 7.66044408e-02,
7.66044408e-02, 9.28355828e-02, 9.28355828e-02, 9.50081274e-02,
9.50081274e-02, 9.99999940e-02, 9.99999940e-02, 1.24624498e-01,
1.24624498e-01, 1.89173400e-01, 2.03545630e-01, 2.52376080e-01,
2.52376080e-01, 2.85024375e-01, 2.90382177e-01, 2.99999982e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 2.00000000e+00]),
"y": np.array([-5.00000000e-01, -5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
-9.50860139e-03, 1.48038471e+00, 1.48038471e+00, 1.54111609e-01,
-1.70109898e-01, 1.48038471e+00, 1.48038471e+00, 2.68802464e-01,
-2.76702493e-01, 3.17605096e-03, 3.17605096e-03, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, -6.18133359e-02,
-6.18133359e-02, 6.66793287e-02, 6.66793287e-02, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, -9.78795737e-02, -9.78795737e-02, 9.89826098e-02,
9.89826098e-02, 2.00000000e+00, -6.27497823e-17, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 2.98150450e-01,
-8.81468803e-02, -8.81468803e-02, -2.95444012e-01, 8.49708244e-02,
8.49708244e-02, 5.00000000e-01, 5.00000000e-01, 1.48038471e+00,
1.48038471e+00, -3.71692739e-02, -3.71692739e-02, 3.12002450e-02,
3.12002450e-02, 5.00000000e-01, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 2.32837781e-01, -2.20384151e-01, 1.48038471e+00,
1.48038471e+00, 9.36007351e-02, -7.53538683e-02, 1.48038471e+00,
-5.00000000e-01, -5.00000000e-01, -9.44050848e-02, -9.35176238e-02,
-5.11193462e-02, -4.88722362e-02, 1.60856955e-02, 1.86410155e-02,
7.57640675e-02, 7.74319321e-02, 9.99915898e-02, 5.00000000e-01,
5.00000000e-01, -2.99947202e-01, -2.55374074e-01, -2.49289244e-01,
-1.29721463e-01, -1.19483687e-01, -9.44050848e-02, -9.35176238e-02,
-5.11193462e-02, -4.88722362e-02, 1.60856955e-02, 1.86410155e-02,
3.71167921e-02, 4.82570902e-02, 7.57640675e-02, 7.74319321e-02,
9.99915898e-02, 1.92170724e-01, 2.00676590e-01, 2.86211818e-01,
2.89382666e-01, -3.23514738e-17]),
"z": np.array([-5.00000000e-01, 5.00000000e-01, -5.00000000e-01, 5.00000000e-01,
1.48038471e+00, -8.45197663e-02, 8.45197663e-02, 1.48038471e+00,
1.48038471e+00, -2.26724878e-01, 2.26724878e-01, 1.48038471e+00,
1.48038471e+00, 5.00000000e-01, 1.48038471e+00, -3.42020132e-02,
3.42020132e-02, -3.42020132e-02, 3.42020132e-02, 5.00000000e-01,
1.48038471e+00, 5.00000000e-01, 1.48038471e+00, -8.66025388e-02,
8.66025388e-02, -8.66025388e-02, 8.66025388e-02, -2.96946436e-01,
2.96946436e-01, 5.00000000e-01, 1.48038471e+00, 5.00000000e-01,
1.48038471e+00, 0.00000000e+00, 2.00000000e+00, -9.84807760e-02,
9.84807760e-02, -9.84807760e-02, 9.84807760e-02, 1.48038471e+00,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 5.00000000e-01,
1.48038471e+00, -6.42787591e-02, 6.42787591e-02, -6.42787591e-02,
6.42787591e-02, 5.00000000e-01, 1.48038471e+00, 5.00000000e-01,
1.48038471e+00, 0.00000000e+00, 0.00000000e+00, -2.72889614e-01,
2.72889614e-01, 1.48038471e+00, 1.48038471e+00, -1.62192255e-01,
1.62192255e-01, 1.48038471e+00, 1.48038471e+00, 0.00000000e+00,
-5.00000000e-01, 5.00000000e-01, -3.29800062e-02, 3.54182646e-02,
-8.59465674e-02, 8.72439370e-02, -9.86977741e-02, 9.82472003e-02,
-6.52671903e-02, 6.32795095e-02, -1.29736937e-03, -5.00000000e-01,
5.00000000e-01, -5.62742725e-03, 1.57429606e-01, -1.66897804e-01,
2.70503879e-01, -2.75179297e-01, -3.29800062e-02, 3.54182646e-02,
-8.59465674e-02, 8.72439370e-02, -9.86977741e-02, 9.82472003e-02,
2.97695041e-01, -2.96093315e-01, -6.52671903e-02, 6.32795095e-02,
-1.29736937e-03, 2.30370149e-01, -2.22999811e-01, 8.99043754e-02,
-7.91054145e-02, 1.98500464e-16]),
}
# fmt: on
x_color_tail = x_color
y_color_tail = y_color
z_color_tail = z_color
if colorize_tails:
x_color_tail = center_color
y_color_tail = center_color
z_color_tail = center_color
N, N2 = 56, 18
trace["facecolor"] = np.concatenate(
[
[center_color] * 12,
[x_color_tail] * (N2),
[x_color] * (N - N2),
[y_color_tail] * (N2),
[y_color] * (N - N2),
[z_color_tail] * (N2),
[z_color] * (N - N2),
]
)
indices = ((0, 12), (12, 68), (68, 124), (124, 180))
show = (center_show, x_show, z_show, y_show)
for k in ("i", "j", "k", "facecolor"):
t = []
for i, s in zip(indices, show):
if s:
t.extend(trace[k][i[0] : i[1]])
trace[k] = np.array(t)
return trace
| 66.69375 | 94 | 0.500984 | import numpy as np
def get_sensor_mesh(
x_color="red",
y_color="green",
z_color="blue",
center_color="grey",
x_show=True,
y_show=True,
z_show=True,
center_show=True,
colorize_tails=True,
):
trace = {
"type": "mesh3d",
"i": np.array([75, 64, 2, 75, 76, 65, 65, 64, 2, 0, 1, 0, 84, 86, 86, 90, 90, 92,
92, 91, 91, 87, 87, 85, 85, 83, 83, 82, 82, 84, 94, 86, 86, 84, 84, 82,
82, 83, 83, 85, 85, 87, 87, 87, 91, 91, 92, 92, 90, 90, 94, 95, 88, 78,
79, 81, 80, 93, 96, 89, 77, 70, 72, 72, 74, 74, 73, 73, 50, 52, 52, 44,
44, 32, 32, 22, 22, 14, 14, 20, 20, 30, 30, 41, 41, 50, 57, 52, 52, 50,
50, 41, 41, 30, 30, 20, 20, 14, 14, 14, 22, 22, 32, 32, 44, 44, 57, 11,
4, 12, 58, 62, 8, 7, 39, 61, 42, 51, 43, 43, 31, 31, 21, 21, 48, 54,
54, 47, 47, 37, 37, 25, 25, 17, 17, 18, 18, 26, 26, 38, 38, 48, 59, 54,
54, 48, 48, 38, 38, 26, 26, 18, 18, 17, 17, 17, 25, 25, 37, 37, 47, 47,
59, 27, 5, 10, 56, 60, 6, 9, 55, 63, 28, 53, 45, 45, 35, 35, 23, 23],
dtype="int64"),
"j": np.array([76, 76, 3, 3, 3, 3, 1, 1, 75, 75, 3, 3, 70, 70, 72, 72, 74, 74,
73, 73, 71, 71, 69, 69, 67, 67, 66, 66, 68, 68, 89, 89, 81, 81, 79, 79,
77, 77, 78, 78, 80, 80, 88, 93, 93, 95, 95, 96, 96, 94, 97, 97, 97, 97,
97, 97, 97, 97, 97, 97, 97, 68, 68, 66, 66, 67, 67, 69, 51, 51, 43, 43,
31, 31, 21, 21, 13, 13, 19, 19, 29, 29, 40, 40, 49, 49, 61, 61, 62, 62,
58, 58, 42, 42, 12, 12, 8, 8, 4, 7, 7, 11, 11, 39, 39, 57, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 49, 49, 40, 40, 29, 29, 19, 53, 53,
45, 45, 35, 35, 23, 23, 15, 15, 16, 16, 24, 24, 36, 36, 46, 46, 63, 63,
60, 60, 56, 56, 28, 28, 10, 10, 6, 6, 5, 9, 9, 27, 27, 55, 55, 59,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 46, 46, 36, 36, 24, 24, 16],
dtype="int64"),
"k": np.array([64, 65, 75, 76, 65, 1, 64, 0, 0, 64, 0, 2, 86, 72, 90, 74, 92, 73,
91, 71, 87, 69, 85, 67, 83, 66, 82, 68, 84, 70, 86, 81, 84, 79, 82, 77,
83, 78, 85, 80, 87, 88, 93, 91, 95, 92, 96, 90, 94, 86, 89, 96, 93, 80,
77, 79, 88, 95, 94, 81, 78, 72, 66, 74, 67, 73, 69, 71, 52, 43, 44, 31,
32, 21, 22, 13, 14, 19, 20, 29, 30, 40, 41, 49, 50, 51, 52, 62, 50, 58,
41, 42, 30, 12, 20, 8, 14, 4, 7, 22, 11, 32, 39, 44, 57, 52, 61, 39,
7, 8, 42, 58, 4, 11, 57, 62, 12, 43, 40, 31, 29, 21, 19, 13, 54, 45,
47, 35, 37, 23, 25, 15, 17, 16, 18, 24, 26, 36, 38, 46, 48, 53, 54, 60,
48, 56, 38, 28, 26, 10, 18, 6, 17, 5, 9, 25, 27, 37, 55, 47, 59, 54,
63, 55, 9, 6, 28, 56, 5, 27, 59, 60, 10, 45, 36, 35, 24, 23, 16, 15],
dtype="int64"),
"x": np.array([-5.00000000e-01, -5.00000000e-01, -5.00000000e-01, -5.00000000e-01,
-2.99849272e-01, -2.87847906e-01, -2.87847906e-01, -2.57389992e-01,
-2.47108519e-01, -1.96458220e-01, -1.96458220e-01, -1.33211225e-01,
-1.15912557e-01, -9.99495536e-02, -9.99495536e-02, -9.39692631e-02,
-9.39692631e-02, -9.39692631e-02, -9.39692631e-02, -7.86073282e-02,
-7.86073282e-02, -7.45242685e-02, -7.45242685e-02, -5.00000007e-02,
-5.00000007e-02, -5.00000007e-02, -5.00000007e-02, -4.26944532e-02,
-4.26944532e-02, -2.04838570e-02, -2.04838570e-02, -1.42282564e-02,
-1.42282564e-02, -2.08166817e-16, -1.91079873e-16, 1.73648186e-02,
1.73648186e-02, 1.73648186e-02, 1.73648186e-02, 3.32611799e-02,
4.72242348e-02, 4.72242348e-02, 5.20848148e-02, 5.27253151e-02,
5.27253151e-02, 7.66044408e-02, 7.66044408e-02, 7.66044408e-02,
7.66044408e-02, 9.28355828e-02, 9.28355828e-02, 9.50081274e-02,
9.50081274e-02, 9.99999940e-02, 9.99999940e-02, 1.24624498e-01,
1.24624498e-01, 1.89173400e-01, 2.03545630e-01, 2.52376080e-01,
2.52376080e-01, 2.85024375e-01, 2.90382177e-01, 2.99999982e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 2.00000000e+00]),
"y": np.array([-5.00000000e-01, -5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
-9.50860139e-03, 1.48038471e+00, 1.48038471e+00, 1.54111609e-01,
-1.70109898e-01, 1.48038471e+00, 1.48038471e+00, 2.68802464e-01,
-2.76702493e-01, 3.17605096e-03, 3.17605096e-03, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, -6.18133359e-02,
-6.18133359e-02, 6.66793287e-02, 6.66793287e-02, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, -9.78795737e-02, -9.78795737e-02, 9.89826098e-02,
9.89826098e-02, 2.00000000e+00, -6.27497823e-17, 5.00000000e-01,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 2.98150450e-01,
-8.81468803e-02, -8.81468803e-02, -2.95444012e-01, 8.49708244e-02,
8.49708244e-02, 5.00000000e-01, 5.00000000e-01, 1.48038471e+00,
1.48038471e+00, -3.71692739e-02, -3.71692739e-02, 3.12002450e-02,
3.12002450e-02, 5.00000000e-01, 1.48038471e+00, 1.48038471e+00,
1.48038471e+00, 2.32837781e-01, -2.20384151e-01, 1.48038471e+00,
1.48038471e+00, 9.36007351e-02, -7.53538683e-02, 1.48038471e+00,
-5.00000000e-01, -5.00000000e-01, -9.44050848e-02, -9.35176238e-02,
-5.11193462e-02, -4.88722362e-02, 1.60856955e-02, 1.86410155e-02,
7.57640675e-02, 7.74319321e-02, 9.99915898e-02, 5.00000000e-01,
5.00000000e-01, -2.99947202e-01, -2.55374074e-01, -2.49289244e-01,
-1.29721463e-01, -1.19483687e-01, -9.44050848e-02, -9.35176238e-02,
-5.11193462e-02, -4.88722362e-02, 1.60856955e-02, 1.86410155e-02,
3.71167921e-02, 4.82570902e-02, 7.57640675e-02, 7.74319321e-02,
9.99915898e-02, 1.92170724e-01, 2.00676590e-01, 2.86211818e-01,
2.89382666e-01, -3.23514738e-17]),
"z": np.array([-5.00000000e-01, 5.00000000e-01, -5.00000000e-01, 5.00000000e-01,
1.48038471e+00, -8.45197663e-02, 8.45197663e-02, 1.48038471e+00,
1.48038471e+00, -2.26724878e-01, 2.26724878e-01, 1.48038471e+00,
1.48038471e+00, 5.00000000e-01, 1.48038471e+00, -3.42020132e-02,
3.42020132e-02, -3.42020132e-02, 3.42020132e-02, 5.00000000e-01,
1.48038471e+00, 5.00000000e-01, 1.48038471e+00, -8.66025388e-02,
8.66025388e-02, -8.66025388e-02, 8.66025388e-02, -2.96946436e-01,
2.96946436e-01, 5.00000000e-01, 1.48038471e+00, 5.00000000e-01,
1.48038471e+00, 0.00000000e+00, 2.00000000e+00, -9.84807760e-02,
9.84807760e-02, -9.84807760e-02, 9.84807760e-02, 1.48038471e+00,
5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 5.00000000e-01,
1.48038471e+00, -6.42787591e-02, 6.42787591e-02, -6.42787591e-02,
6.42787591e-02, 5.00000000e-01, 1.48038471e+00, 5.00000000e-01,
1.48038471e+00, 0.00000000e+00, 0.00000000e+00, -2.72889614e-01,
2.72889614e-01, 1.48038471e+00, 1.48038471e+00, -1.62192255e-01,
1.62192255e-01, 1.48038471e+00, 1.48038471e+00, 0.00000000e+00,
-5.00000000e-01, 5.00000000e-01, -3.29800062e-02, 3.54182646e-02,
-8.59465674e-02, 8.72439370e-02, -9.86977741e-02, 9.82472003e-02,
-6.52671903e-02, 6.32795095e-02, -1.29736937e-03, -5.00000000e-01,
5.00000000e-01, -5.62742725e-03, 1.57429606e-01, -1.66897804e-01,
2.70503879e-01, -2.75179297e-01, -3.29800062e-02, 3.54182646e-02,
-8.59465674e-02, 8.72439370e-02, -9.86977741e-02, 9.82472003e-02,
2.97695041e-01, -2.96093315e-01, -6.52671903e-02, 6.32795095e-02,
-1.29736937e-03, 2.30370149e-01, -2.22999811e-01, 8.99043754e-02,
-7.91054145e-02, 1.98500464e-16]),
}
x_color_tail = x_color
y_color_tail = y_color
z_color_tail = z_color
if colorize_tails:
x_color_tail = center_color
y_color_tail = center_color
z_color_tail = center_color
N, N2 = 56, 18
trace["facecolor"] = np.concatenate(
[
[center_color] * 12,
[x_color_tail] * (N2),
[x_color] * (N - N2),
[y_color_tail] * (N2),
[y_color] * (N - N2),
[z_color_tail] * (N2),
[z_color] * (N - N2),
]
)
indices = ((0, 12), (12, 68), (68, 124), (124, 180))
show = (center_show, x_show, z_show, y_show)
for k in ("i", "j", "k", "facecolor"):
t = []
for i, s in zip(indices, show):
if s:
t.extend(trace[k][i[0] : i[1]])
trace[k] = np.array(t)
return trace
| true | true |
f7f86fedf6e276fcc79b2f2d5dc615ff903f34eb | 15,901 | py | Python | src/environment_provider/environment_provider.py | Greg4cr/etos-environment-provider | f150eb4946cb261a02d0da18713ea382379900fa | [
"Apache-2.0"
] | null | null | null | src/environment_provider/environment_provider.py | Greg4cr/etos-environment-provider | f150eb4946cb261a02d0da18713ea382379900fa | [
"Apache-2.0"
] | 15 | 2020-09-28T12:01:46.000Z | 2022-03-11T11:38:04.000Z | src/environment_provider/environment_provider.py | t-persson/etos-environment-provider | bf01dc2fe8f989bfc5c97dd543a15a15f0383540 | [
"Apache-2.0"
] | 4 | 2020-09-25T11:16:20.000Z | 2021-02-03T12:21:36.000Z | # Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETOS Environment Provider celery task module."""
import os
import uuid
import logging
import traceback
import json
from threading import Lock
from copy import deepcopy
from etos_lib.etos import ETOS
from etos_lib.logging.logger import FORMAT_CONFIG
from jsontas.jsontas import JsonTas
from environment_provider.splitter.split import Splitter
from .lib.celery import APP
from .lib.config import Config
from .lib.test_suite import TestSuite
from .lib.registry import ProviderRegistry
from .lib.json_dumps import JsonDumps
from .lib.uuid_generate import UuidGenerate
from .lib.join import Join
logging.getLogger("pika").setLevel(logging.WARNING)
class NoEventDataFound(Exception):
"""Could not fetch events from event storage."""
class EnvironmentProviderNotConfigured(Exception):
"""Environment provider was not configured prior to request."""
class EnvironmentProvider: # pylint:disable=too-many-instance-attributes
"""Environment provider celery Task."""
logger = logging.getLogger("EnvironmentProvider")
environment_provider_config = None
iut_provider = None
log_area_provider = None
execution_space_provider = None
task_track_started = True
lock = Lock()
def __init__(self, suite_id):
"""Initialize ETOS, dataset, provider registry and splitter.
:param suite_id: Suite ID to get an environment for
:type suite_id: str
"""
self.suite_id = suite_id
FORMAT_CONFIG.identifier = suite_id
self.logger.info("Initializing EnvironmentProvider task.")
self.etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
with self.lock:
# Since celery workers can share memory between them we need to make the configuration
# of ETOS library unique as it uses the memory sharing feature with the internal
# configuration dictionary.
# The impact of not doing this is that the environment provider would re-use
# another workers configuration instead of using its own.
self.etos.config.config = deepcopy(
self.etos.config.config
) # pylint:disable=protected-access
self.jsontas = JsonTas()
self.dataset = self.jsontas.dataset
self.dataset.add("json_dumps", JsonDumps)
self.dataset.add("uuid_generate", UuidGenerate)
self.dataset.add("join", Join)
self.registry = ProviderRegistry(self.etos, self.jsontas)
self.splitter = Splitter(self.etos, {})
def configure(self, suite_id):
"""Configure environment provider and start RabbitMQ publisher.
:param suite_id: Suite ID for this task.
:type suite_id: str
"""
self.logger.info("Configure environment provider.")
if not self.registry.wait_for_configuration(suite_id):
# TODO: Add link ref to docs that describe how the config is done.
raise EnvironmentProviderNotConfigured(
"Please do a proper configuration of "
"EnvironmentProvider before requesting an "
"environment."
)
self.logger.info("Registry is configured.")
self.iut_provider = self.registry.iut_provider(suite_id)
self.log_area_provider = self.registry.log_area_provider(suite_id)
self.execution_space_provider = self.registry.execution_space_provider(suite_id)
self.etos.config.set(
"EVENT_DATA_TIMEOUT", int(os.getenv("ETOS_EVENT_DATA_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_IUT_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_IUT_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_EXECUTION_SPACE_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_EXECUTION_SPACE_TIMEOUT", "10")),
)
self.etos.config.set(
"WAIT_FOR_LOG_AREA_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_LOG_AREA_TIMEOUT", "10")),
)
self.etos.config.set("SUITE_ID", suite_id)
self.etos.config.rabbitmq_publisher_from_environment()
self.etos.start_publisher()
self.environment_provider_config = Config(self.etos, suite_id)
if not self.environment_provider_config.generated:
missing = [
name
for name, value in [
("tercc", self.environment_provider_config.tercc),
(
"artifact_created",
self.environment_provider_config.artifact_created,
),
(
"activity_triggered",
self.environment_provider_config.activity_triggered,
),
]
if value is None
]
raise NoEventDataFound(f"Missing: {', '.join(missing)}")
self.dataset.add("environment", os.environ)
self.dataset.add("config", self.etos.config)
self.dataset.add("identity", self.environment_provider_config.identity)
self.dataset.add("artifact_id", self.environment_provider_config.artifact_id)
self.dataset.add("context", self.environment_provider_config.context)
self.dataset.add("custom_data", self.environment_provider_config.custom_data)
self.dataset.add("uuid", str(uuid.uuid4()))
self.dataset.add(
"artifact_created", self.environment_provider_config.artifact_created
)
self.dataset.add(
"artifact_published", self.environment_provider_config.artifact_published
)
self.dataset.add("tercc", self.environment_provider_config.tercc)
self.dataset.merge(self.registry.dataset(suite_id))
def cleanup(self):
"""Clean up by checkin in all checked out providers."""
self.logger.info("Cleanup by checking in all checked out providers.")
for provider in self.etos.config.get("PROVIDERS"):
try:
provider.checkin_all()
except: # noqa pylint:disable=bare-except
pass
@staticmethod
def get_constraint(recipe, key):
"""Get a constraint key from an ETOS recipe.
:param recipe: Recipe to get key from.
:type recipe: dict
:param key: Key to get value from, from the constraints.
:type key: str
:return: Constraint value.
:rtype: any
"""
for constraint in recipe.get("constraints", []):
if constraint.get("key") == key:
return constraint.get("value")
return None
def create_test_suite_dict(self):
"""Create a test suite dictionary based on test runners.
I.e. If there is only one test_runner the dictionary would be::
{
"test_suite_name": {
"MyTestrunner": {
"docker": "MyTestrunner",
"priority": 1,
"unsplit_recipes": [...]
}
}
}
Or two::
{
"test_suite_name": {
"MyTestrunner": {
"docker": "MyTestrunner",
"priority": 1,
"unsplit_recipes": [...]
},
"MyOtherTestrunner": {
"docker": "MyOtherTestrunner",
"priority": 1,
"unsplit_recipes": [...]
}
}
}
etc.
:return: A test suite dictionary based on test runners.
:rtype: dict
"""
self.logger.info("Create new test suite dictionary.")
test_suites = {}
for test_suite in self.environment_provider_config.test_suite:
test_runners = test_suites.setdefault(test_suite.get("name"), {})
for recipe in test_suite.get("recipes", []):
test_runner = self.get_constraint(recipe, "TEST_RUNNER")
test_runners.setdefault(
test_runner,
{
"docker": test_runner,
"priority": test_suite.get("priority"),
"unsplit_recipes": [],
},
)
test_runners[test_runner]["unsplit_recipes"].append(recipe)
return test_suites
def set_total_test_count_and_test_runners(self, test_runners):
"""Set total test count and test runners to be used by the splitter algorithm.
:param test_runners: Dictionary with test_runners as keys.
:type test_runners: dict
"""
total_test_count = 0
for _, data in test_runners.items():
total_test_count += len(data["unsplit_recipes"])
self.etos.config.set("TOTAL_TEST_COUNT", total_test_count)
self.etos.config.set("NUMBER_OF_TESTRUNNERS", len(test_runners.keys()))
def checkout_and_assign_iuts_to_test_runners(self, test_runners):
"""Checkout IUTs from the IUT provider and assign them to the test_runners dictionary.
:param test_runners: Dictionary with test_runners as keys.
:type test_runners: dict
"""
iuts = self.iut_provider.wait_for_and_checkout_iuts(
minimum_amount=self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
maximum_amount=self.etos.config.get("TOTAL_TEST_COUNT"),
)
self.etos.config.set("NUMBER_OF_IUTS", len(iuts))
unused_iuts = self.splitter.assign_iuts(test_runners, self.dataset.get("iuts"))
for iut in unused_iuts:
self.iut_provider.checkin(iut)
def checkout_log_area(self):
"""Checkout a log area.
Called for each executor so only a single log area needs to be checked out.
"""
return self.log_area_provider.wait_for_and_checkout_log_areas(
minimum_amount=1, maximum_amount=1
)
def checkout_and_assign_executors_to_iuts(self, test_runner, iuts):
"""Checkout and assign executors to each available IUT.
:param test_runner: Test runner which will be added to dataset in order for
JSONTas to get more information when running.
:type test_runner: dict
:param iuts: Dictionary of IUTs to assign executors to.
:type iuts: dict
"""
self.dataset.add("test_runner", test_runner)
executors = (
self.execution_space_provider.wait_for_and_checkout_execution_spaces(
minimum_amount=len(iuts),
maximum_amount=len(iuts),
)
)
for iut, suite in iuts.items():
try:
suite["executor"] = executors.pop(0)
except IndexError:
break
self.dataset.add("executor", suite["executor"])
self.dataset.add("iut", iut)
# This index will always exist or 'checkout' would raise an exception.
suite["log_area"] = self.checkout_log_area()[0]
# Checkin the unassigned executors.
for executor in executors:
self.execution_space_provider.checkin(executor)
def checkin_iuts_without_executors(self, iuts):
"""Find all IUTs without an assigned executor and check them in.
:param iuts: IUTs to check for executors.
:type iuts: dict
:return: IUTs that were removed.
:rtype: list
"""
remove = []
for iut, suite in iuts.items():
if suite.get("executor") is None:
self.iut_provider.checkin(iut)
remove.append(iut)
return remove
def verify_json(self, json_data):
"""Verify that JSON data can be serialized properly.
:param json_data: JSON data to test.
:type json_data: str or dict
"""
try:
if isinstance(json_data, dict):
json_data = json.dumps(json_data)
json.loads(json_data)
except (json.decoder.JSONDecodeError, TypeError):
self.logger.error(json_data)
raise
def run(self):
"""Run the environment provider task.
:return: Test suite JSON with assigned IUTs, execution spaces and log areas.
:rtype: dict
"""
try:
self.configure(self.suite_id)
test_suites = self.create_test_suite_dict()
for test_suite_name, test_runners in test_suites.items():
self.set_total_test_count_and_test_runners(test_runners)
self.logger.info(
"Total test count : %r", self.etos.config.get("TOTAL_TEST_COUNT")
)
self.logger.info(
"Total testrunners: %r",
self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
)
self.checkout_and_assign_iuts_to_test_runners(test_runners)
for test_runner, values in test_runners.items():
self.checkout_and_assign_executors_to_iuts(
test_runner, values["iuts"]
)
for iut in self.checkin_iuts_without_executors(values["iuts"]):
values["iuts"].remove(iut)
for sub_suite in test_runners.values():
self.splitter.split(sub_suite)
test_suite = TestSuite(
test_suite_name, test_runners, self.environment_provider_config
)
# This is where the resulting test suite is generated.
# The resulting test suite will be a dictionary with test runners, IUTs
# execution spaces and log areas with tests split up over as many as
# possible. The resulting test suite definition is further explained in
# :obj:`environment_provider.lib.test_suite.TestSuite`
test_suite.generate()
test_suite_json = test_suite.to_json()
# Test that the test suite JSON is serializable so that the
# exception is caught here and not by the webserver.
# This makes sure that we can cleanup if anything breaks.
self.verify_json(test_suite_json)
# TODO: Handle multiple test suites.
return test_suite_json
except Exception as exception: # pylint:disable=broad-except
self.cleanup()
traceback.print_exc()
return {"error": str(exception), "details": traceback.format_exc()}
finally:
if self.etos.publisher is not None:
self.etos.publisher.stop()
@APP.task(name="EnvironmentProvider")
def get_environment(suite_id):
"""Get an environment for ETOS test executions.
:param suite_id: Suite ID to get an environment for
:type suite_id: str
:return: Test suite JSON with assigned IUTs, execution spaces and log areas.
:rtype: dict
"""
environment_provider = EnvironmentProvider(suite_id)
return environment_provider.run()
| 39.358911 | 98 | 0.609459 |
import os
import uuid
import logging
import traceback
import json
from threading import Lock
from copy import deepcopy
from etos_lib.etos import ETOS
from etos_lib.logging.logger import FORMAT_CONFIG
from jsontas.jsontas import JsonTas
from environment_provider.splitter.split import Splitter
from .lib.celery import APP
from .lib.config import Config
from .lib.test_suite import TestSuite
from .lib.registry import ProviderRegistry
from .lib.json_dumps import JsonDumps
from .lib.uuid_generate import UuidGenerate
from .lib.join import Join
logging.getLogger("pika").setLevel(logging.WARNING)
class NoEventDataFound(Exception):
class EnvironmentProviderNotConfigured(Exception):
class EnvironmentProvider:
logger = logging.getLogger("EnvironmentProvider")
environment_provider_config = None
iut_provider = None
log_area_provider = None
execution_space_provider = None
task_track_started = True
lock = Lock()
def __init__(self, suite_id):
self.suite_id = suite_id
FORMAT_CONFIG.identifier = suite_id
self.logger.info("Initializing EnvironmentProvider task.")
self.etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
with self.lock:
self.etos.config.config = deepcopy(
self.etos.config.config
)
self.jsontas = JsonTas()
self.dataset = self.jsontas.dataset
self.dataset.add("json_dumps", JsonDumps)
self.dataset.add("uuid_generate", UuidGenerate)
self.dataset.add("join", Join)
self.registry = ProviderRegistry(self.etos, self.jsontas)
self.splitter = Splitter(self.etos, {})
def configure(self, suite_id):
self.logger.info("Configure environment provider.")
if not self.registry.wait_for_configuration(suite_id):
raise EnvironmentProviderNotConfigured(
"Please do a proper configuration of "
"EnvironmentProvider before requesting an "
"environment."
)
self.logger.info("Registry is configured.")
self.iut_provider = self.registry.iut_provider(suite_id)
self.log_area_provider = self.registry.log_area_provider(suite_id)
self.execution_space_provider = self.registry.execution_space_provider(suite_id)
self.etos.config.set(
"EVENT_DATA_TIMEOUT", int(os.getenv("ETOS_EVENT_DATA_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_IUT_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_IUT_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_EXECUTION_SPACE_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_EXECUTION_SPACE_TIMEOUT", "10")),
)
self.etos.config.set(
"WAIT_FOR_LOG_AREA_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_LOG_AREA_TIMEOUT", "10")),
)
self.etos.config.set("SUITE_ID", suite_id)
self.etos.config.rabbitmq_publisher_from_environment()
self.etos.start_publisher()
self.environment_provider_config = Config(self.etos, suite_id)
if not self.environment_provider_config.generated:
missing = [
name
for name, value in [
("tercc", self.environment_provider_config.tercc),
(
"artifact_created",
self.environment_provider_config.artifact_created,
),
(
"activity_triggered",
self.environment_provider_config.activity_triggered,
),
]
if value is None
]
raise NoEventDataFound(f"Missing: {', '.join(missing)}")
self.dataset.add("environment", os.environ)
self.dataset.add("config", self.etos.config)
self.dataset.add("identity", self.environment_provider_config.identity)
self.dataset.add("artifact_id", self.environment_provider_config.artifact_id)
self.dataset.add("context", self.environment_provider_config.context)
self.dataset.add("custom_data", self.environment_provider_config.custom_data)
self.dataset.add("uuid", str(uuid.uuid4()))
self.dataset.add(
"artifact_created", self.environment_provider_config.artifact_created
)
self.dataset.add(
"artifact_published", self.environment_provider_config.artifact_published
)
self.dataset.add("tercc", self.environment_provider_config.tercc)
self.dataset.merge(self.registry.dataset(suite_id))
def cleanup(self):
self.logger.info("Cleanup by checking in all checked out providers.")
for provider in self.etos.config.get("PROVIDERS"):
try:
provider.checkin_all()
except:
pass
@staticmethod
def get_constraint(recipe, key):
for constraint in recipe.get("constraints", []):
if constraint.get("key") == key:
return constraint.get("value")
return None
def create_test_suite_dict(self):
self.logger.info("Create new test suite dictionary.")
test_suites = {}
for test_suite in self.environment_provider_config.test_suite:
test_runners = test_suites.setdefault(test_suite.get("name"), {})
for recipe in test_suite.get("recipes", []):
test_runner = self.get_constraint(recipe, "TEST_RUNNER")
test_runners.setdefault(
test_runner,
{
"docker": test_runner,
"priority": test_suite.get("priority"),
"unsplit_recipes": [],
},
)
test_runners[test_runner]["unsplit_recipes"].append(recipe)
return test_suites
def set_total_test_count_and_test_runners(self, test_runners):
total_test_count = 0
for _, data in test_runners.items():
total_test_count += len(data["unsplit_recipes"])
self.etos.config.set("TOTAL_TEST_COUNT", total_test_count)
self.etos.config.set("NUMBER_OF_TESTRUNNERS", len(test_runners.keys()))
def checkout_and_assign_iuts_to_test_runners(self, test_runners):
iuts = self.iut_provider.wait_for_and_checkout_iuts(
minimum_amount=self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
maximum_amount=self.etos.config.get("TOTAL_TEST_COUNT"),
)
self.etos.config.set("NUMBER_OF_IUTS", len(iuts))
unused_iuts = self.splitter.assign_iuts(test_runners, self.dataset.get("iuts"))
for iut in unused_iuts:
self.iut_provider.checkin(iut)
def checkout_log_area(self):
return self.log_area_provider.wait_for_and_checkout_log_areas(
minimum_amount=1, maximum_amount=1
)
def checkout_and_assign_executors_to_iuts(self, test_runner, iuts):
self.dataset.add("test_runner", test_runner)
executors = (
self.execution_space_provider.wait_for_and_checkout_execution_spaces(
minimum_amount=len(iuts),
maximum_amount=len(iuts),
)
)
for iut, suite in iuts.items():
try:
suite["executor"] = executors.pop(0)
except IndexError:
break
self.dataset.add("executor", suite["executor"])
self.dataset.add("iut", iut)
suite["log_area"] = self.checkout_log_area()[0]
for executor in executors:
self.execution_space_provider.checkin(executor)
def checkin_iuts_without_executors(self, iuts):
remove = []
for iut, suite in iuts.items():
if suite.get("executor") is None:
self.iut_provider.checkin(iut)
remove.append(iut)
return remove
def verify_json(self, json_data):
try:
if isinstance(json_data, dict):
json_data = json.dumps(json_data)
json.loads(json_data)
except (json.decoder.JSONDecodeError, TypeError):
self.logger.error(json_data)
raise
def run(self):
try:
self.configure(self.suite_id)
test_suites = self.create_test_suite_dict()
for test_suite_name, test_runners in test_suites.items():
self.set_total_test_count_and_test_runners(test_runners)
self.logger.info(
"Total test count : %r", self.etos.config.get("TOTAL_TEST_COUNT")
)
self.logger.info(
"Total testrunners: %r",
self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
)
self.checkout_and_assign_iuts_to_test_runners(test_runners)
for test_runner, values in test_runners.items():
self.checkout_and_assign_executors_to_iuts(
test_runner, values["iuts"]
)
for iut in self.checkin_iuts_without_executors(values["iuts"]):
values["iuts"].remove(iut)
for sub_suite in test_runners.values():
self.splitter.split(sub_suite)
test_suite = TestSuite(
test_suite_name, test_runners, self.environment_provider_config
)
test_suite.generate()
test_suite_json = test_suite.to_json()
self.verify_json(test_suite_json)
return test_suite_json
except Exception as exception:
self.cleanup()
traceback.print_exc()
return {"error": str(exception), "details": traceback.format_exc()}
finally:
if self.etos.publisher is not None:
self.etos.publisher.stop()
@APP.task(name="EnvironmentProvider")
def get_environment(suite_id):
environment_provider = EnvironmentProvider(suite_id)
return environment_provider.run()
| true | true |
f7f8702645ae3f954f8122f88c93b97262a2fe34 | 11,528 | py | Python | Scripts/plot_Histograms_RawLENS.py | zmlabe/ExtremeEvents | 701c274c074dd2c4ae7c7294ec20f35c64e6ea2b | [
"MIT"
] | 3 | 2021-03-08T12:15:58.000Z | 2022-02-22T00:32:29.000Z | Scripts/plot_Histograms_RawLENS.py | zmlabe/ExtremeEvents | 701c274c074dd2c4ae7c7294ec20f35c64e6ea2b | [
"MIT"
] | null | null | null | Scripts/plot_Histograms_RawLENS.py | zmlabe/ExtremeEvents | 701c274c074dd2c4ae7c7294ec20f35c64e6ea2b | [
"MIT"
] | 6 | 2021-02-12T18:09:30.000Z | 2021-09-10T20:26:54.000Z | """
Scipt plots histograms of data with mean removed over 4 time periods
Author : Zachary M. Labe
Date : 7 January 2021
"""
### Import modules
import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
import calc_Utilities as UT
import calc_dataFunctions as df
import palettable.wesanderson as ww
import calc_Stats as dSS
### Set preliminaries
directoryfigure = '/Users/zlabe/Desktop/ExtremeEvents_v1/Composites/LENS/'
reg_name = 'Globe'
dataset = 'lens'
rm_ensemble_mean = True
variq = ['T2M']
monthlychoice = 'annual'
yeartype = ['1920-1964','1965-2009','2010-2054','2055-2099']
###############################################################################
###############################################################################
###############################################################################
def read_primary_dataset(variq,dataset,lat_bounds,lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
###############################################################################
###############################################################################
###############################################################################
### Call functions
for i in range(len(variq)):
### Read in data for selected region
lat_bounds,lon_bounds = UT.regions(reg_name)
dataall,lats,lons = read_primary_dataset(variq[i],dataset,
lat_bounds,lon_bounds)
### Remove ensemble mean
if rm_ensemble_mean == True:
data= dSS.remove_ensemble_mean(dataall)
print('*Removed ensemble mean*')
elif rm_ensemble_mean == False:
data = dataall
### Composite over selected period (x2)
if monthlychoice == 'DJF':
years = np.arange(dataall.shape[1]) + 1921
else:
years = np.arange(dataall.shape[1]) + 1920
meancomp = np.empty((len(years)//40,data.shape[0],data.shape[2],data.shape[3]))
for count,i in enumerate(range(0,len(years)-45,45)):
meancomp[count,:,:,:,] = np.nanmean(data[:,i:i+45,:,:],axis=1)
# meancomp = meancomp[:,:35,:,:]
### Mesh latxlon
lon2,lat2 = np.meshgrid(lons,lats)
### Set up different regions
# Globe
lat_globe = lats.copy()
lon_globe = lons.copy()
globe = meancomp.copy()
latmin_globe = -90.
latmax_globe = 90.
lonmin_globe = 0.
lonmax_globe = 360.
name_globe = 'Globe'
# Tropics
lat_trop = lats.copy()
lon_trop = lons.copy()
trop = meancomp.copy()
latmin_trop = -30.
latmax_trop = 30.
lonmin_trop = 0.
lonmax_trop = 360.
name_trop = 'Tropics'
# Northern Hemisphere
lat_nh = lats.copy()
lon_nh = lons.copy()
nh = meancomp.copy()
latmin_nh = 0.
latmax_nh = 90.
lonmin_nh = 0.
lonmax_nh = 360.
name_nh = 'Northern Hemisphere'
# Southern Hemisphere
lat_sh = lats.copy()
lon_sh = lons.copy()
sh = meancomp.copy()
latmin_sh = -90.
latmax_sh = 0.
lonmin_sh = 0.
lonmax_sh = 360.
name_sh = 'Southern Hemisphere'
# Indian Ocean
lat_io = lats.copy()
lon_io = lons.copy()
io = meancomp.copy()
latmin_io = -10.
latmax_io = 10.
lonmin_io = 50.
lonmax_io = 110.
name_io = 'Indian Ocean'
# ENSO region
lat_enso = lats.copy()
lon_enso = lons.copy()
enso = meancomp.copy()
latmin_enso = -5.
latmax_enso = 5.
lonmin_enso = 160.
lonmax_enso = 280.
name_enso = 'ENSO'
# North Atlantic
lat_na = lats.copy()
lon_na = lons.copy()
na = meancomp.copy()
latmin_na = 50.
latmax_na = 60.
lonmin_na = 315.
lonmax_na = 340.
name_na = 'North Atlantic'
# Arctic
lat_a = lats.copy()
lon_a = lons.copy()
a = meancomp.copy()
latmin_a = 67.
latmax_a = 90.
lonmin_a= 0.
lonmax_a = 360.
name_a = 'Arctic'
# Central Africa
lat_africa = lats.copy()
lon_africa = lons.copy()
africa = meancomp.copy()
latmin_africa = 0.
latmax_africa = 15.
lonmin_africa = 0.
lonmax_africa = 15.
name_africa = 'Central Africa'
# Southern Ocean
lat_so = lats.copy()
lon_so = lons.copy()
so = meancomp.copy()
latmin_so = -66.
latmax_so = 40.
lonmin_so = 5.
lonmax_so = 70.
name_so = 'Southern Ocean'
# Create lists
names = [name_globe,name_trop,name_nh,name_sh,name_io,
name_enso,name_na,name_a,name_africa,name_so]
lattall = [lat_globe,lat_trop,lat_nh,lat_sh,lat_io,
lat_enso,lat_na,lat_a,lat_africa,lat_so]
latallmin = [latmin_globe,latmin_trop,latmin_nh,latmin_sh,latmin_io,
latmin_enso,latmin_na,latmin_a,latmin_africa,latmin_so]
latallmax = [latmax_globe,latmax_trop,latmax_nh,latmax_sh,latmax_io,
latmax_enso,latmax_na,latmax_a,latmax_africa,latmax_so]
lonnall = [lon_globe,lon_trop,lon_nh,lon_sh,lon_io,
lon_enso,lon_na,lon_a,lon_africa,lon_so]
lonallmin = [lonmin_globe,lonmin_trop,lonmin_nh,lonmin_sh,lonmin_io,
lonmin_enso,lonmin_na,lonmin_a,lonmin_africa,lonmin_so]
lonallmax = [lonmax_globe,lonmax_trop,lonmax_nh,lonmax_sh,lonmax_io,
lonmax_enso,lonmax_na,lonmax_a,lonmax_africa,lonmax_so]
regionsall = [globe,trop,nh,sh,io,enso,na,a,africa,so]
### Calculate regional averages for histograms
regions_average = []
for i in range(len(regionsall)):
latq = np.where((lats >= latallmin[i]) & (lats <= latallmax[i]))[0]
lonq = np.where((lons >= lonallmin[i]) & (lons <= lonallmax[i]))[0]
latnew = lattall[i][latq]
lonnew = lonnall[i][lonq]
lonnew2,latnew2 = np.meshgrid(lonnew,latnew)
regiongrid1 = regionsall[i][:,:,latq,:]
regiongrid = regiongrid1[:,:,:,lonq]
ave = UT.calc_weightedAve(regiongrid,latnew2)
regions_average.append(ave)
### Calculate PDFs
num_bins = np.arange(-0.4,0.401,0.005)
pdfregions = np.empty((len(regions_average),meancomp.shape[0],len(num_bins)))
for rrr in range(len(regions_average)):
for hist in range(meancomp.shape[0]):
m,s = sts.norm.fit(regions_average[rrr][hist].ravel())
pdfregions[rrr,hist,:] = sts.norm.pdf(num_bins,m,s)
###############################################################################
###############################################################################
###############################################################################
### Create graph
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Begin each histogram set
color=ww.Chevalier_4.mpl_colormap(np.linspace(0,1,meancomp.shape[0]))
pp = np.empty((pdfregions.shape[0]))
for rrrr in range(pdfregions.shape[0]):
fig = plt.figure()
ax = plt.subplot(111)
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params('both',length=5.5,width=2,which='major',color='dimgrey',
labelsize=6)
ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.35)
### Calculate statistical difference
t,p = sts.ks_2samp(pdfregions[rrrr][0,:],pdfregions[rrrr][-1,:])
pp[rrrr] = p
for i,c in zip(range(pdfregions.shape[1]),color):
data = pdfregions[rrrr,i,:]
plt.plot(num_bins,data,color=c,linewidth=2,label=r'\textbf{%s}' % yeartype[i],
clip_on=False)
plt.xticks(np.arange(-0.4,0.41,0.1),map(str,np.round(np.arange(-0.4,0.41,0.1),2)))
plt.yticks(np.arange(0,21,2),map(str,np.arange(0,21,2)))
plt.xlim([-0.4,0.4])
plt.ylim([0,12])
l = plt.legend(shadow=False,fontsize=7,loc='upper center',
fancybox=True,frameon=False,ncol=4,bbox_to_anchor=(0.5,1.1),
labelspacing=0.2,columnspacing=1,handletextpad=0.4)
for text in l.get_texts():
text.set_color('k')
plt.text(-0.4,10.9,r'\textbf{%s}' % names[rrrr],fontsize=20,
color='dimgrey',ha='left',va='center')
if p < 0.0001:
plt.text(0.4,10.9,r'\textbf{\textit{p} $\bf{<<}$ 0.01}',fontsize=7,
color='k',ha='right',va='center')
elif p < 0.01:
plt.text(0.4,10.9,r'\textbf{\textit{p} $\bf{<}$ 0.01}',fontsize=7,
color='k',ha='right',va='center')
elif p < 0.05:
plt.text(0.4,10.9,r'\textbf{\textit{p} $\bf{<}$ 0.05}',fontsize=7,
color='k',ha='right',va='center')
plt.savefig(directoryfigure + 'PDFs_%s_PeriodsInternal.png' % names[rrrr],
dpi=300)
###############################################################################
###############################################################################
###############################################################################
### Begin each histogram set
c2=ww.FantasticFox2_5.mpl_colormap
pp = np.empty((pdfregions.shape[0]))
for rrrr in range(pdfregions.shape[0]):
fig = plt.figure()
ax = plt.subplot(111)
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params('both',length=5.5,width=2,which='major',color='dimgrey',
labelsize=6)
ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.35)
### Calculate statistical difference
datafirst = regions_average[rrrr][0,:]
datalasts = regions_average[rrrr][-1,:]
n_lensf, bins_lensf, patches_lensf = plt.hist(datafirst,
bins=np.arange(-0.4,0.41,0.02),density=False,color=c2(0.1),
label=r'\textbf{1920-1964}',alpha=0.75,clip_on=False)
for i in range(len(patches_lensf)):
patches_lensf[i].set_facecolor(c2(0.1))
patches_lensf[i].set_edgecolor('white')
patches_lensf[i].set_linewidth(0.5)
n_lensl, bins_lensl, patches_lensl = plt.hist(datalasts,
bins=np.arange(-0.4,0.41,0.02),density=False,color=c2(0.6),
label=r'\textbf{2055-2099}',alpha=0.75,clip_on=False)
for i in range(len(patches_lensl)):
patches_lensl[i].set_facecolor(c2(0.6))
patches_lensl[i].set_edgecolor('white')
patches_lensl[i].set_linewidth(0.5)
plt.xticks(np.arange(-0.4,0.41,0.1),map(str,np.round(np.arange(-0.4,0.41,0.1),2)))
plt.yticks(np.arange(0,21,2),map(str,np.arange(0,21,2)))
plt.xlim([-0.4,0.4])
plt.ylim([0,14])
l = plt.legend(shadow=False,fontsize=7,loc='upper center',
fancybox=True,frameon=False,ncol=4,bbox_to_anchor=(0.5,1.1),
labelspacing=0.2,columnspacing=1,handletextpad=0.4)
for text in l.get_texts():
text.set_color('k')
plt.text(-0.4,12.8,r'\textbf{%s}' % names[rrrr],fontsize=20,
color='dimgrey',ha='left',va='center')
plt.savefig(directoryfigure + 'Histogram_%s_PeriodsInternal.png' % names[rrrr],
dpi=300)
| 33.511628 | 86 | 0.59169 |
stats as sts
import matplotlib.pyplot as plt
import calc_Utilities as UT
import calc_dataFunctions as df
import palettable.wesanderson as ww
import calc_Stats as dSS
p/ExtremeEvents_v1/Composites/LENS/'
reg_name = 'Globe'
dataset = 'lens'
rm_ensemble_mean = True
variq = ['T2M']
monthlychoice = 'annual'
yeartype = ['1920-1964','1965-2009','2010-2054','2055-2099']
| true | true |
f7f871dca5889b9735f57ee44e1ad10703420a0a | 600 | py | Python | tests/test_equiv.py | mvcisback/mce-spec-inference | 58432b35e35b75cab1c77cbbe2057aff94794597 | [
"MIT"
] | null | null | null | tests/test_equiv.py | mvcisback/mce-spec-inference | 58432b35e35b75cab1c77cbbe2057aff94794597 | [
"MIT"
] | null | null | null | tests/test_equiv.py | mvcisback/mce-spec-inference | 58432b35e35b75cab1c77cbbe2057aff94794597 | [
"MIT"
] | null | null | null | import aiger_bv as BV
import aiger_coins as C
import aiger_ptltl as LTL
from mce.equiv import equiv_states
X = LTL.atom('x')
Y = LTL.atom('y')
SYS = C.circ2mdp(BV.aig2aigbv((X.once() | Y.once()).aig))
def test_equiv_states_smoke():
state = SYS._aigbv.latch2init
for t in range(3):
assert equiv_states(SYS, 3, t, state1=state, state2=state)
state1 = SYS.aigbv({'x': (True,), 'y': (False, )})[1]
state2 = SYS.aigbv({'x': (False,), 'y': (True, )})[1]
assert state1 != state2
for t in range(3):
assert equiv_states(SYS, 3, t, state1=state1, state2=state2)
| 24 | 68 | 0.633333 | import aiger_bv as BV
import aiger_coins as C
import aiger_ptltl as LTL
from mce.equiv import equiv_states
X = LTL.atom('x')
Y = LTL.atom('y')
SYS = C.circ2mdp(BV.aig2aigbv((X.once() | Y.once()).aig))
def test_equiv_states_smoke():
state = SYS._aigbv.latch2init
for t in range(3):
assert equiv_states(SYS, 3, t, state1=state, state2=state)
state1 = SYS.aigbv({'x': (True,), 'y': (False, )})[1]
state2 = SYS.aigbv({'x': (False,), 'y': (True, )})[1]
assert state1 != state2
for t in range(3):
assert equiv_states(SYS, 3, t, state1=state1, state2=state2)
| true | true |
f7f87326f4cfe5fe1ae69344beefe240004d11b6 | 2,360 | py | Python | ParlAI/tests/nightly/gpu/test_self_feeding.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 163 | 2019-06-23T14:07:57.000Z | 2022-02-25T23:06:07.000Z | ParlAI/tests/nightly/gpu/test_self_feeding.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 8 | 2019-07-24T12:41:31.000Z | 2022-02-10T00:17:20.000Z | ParlAI/tests/nightly/gpu/test_self_feeding.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 31 | 2019-06-26T01:21:07.000Z | 2021-09-06T17:23:24.000Z | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.core.testing_utils as testing_utils
"""
Integration tests for the Self-Feeding chatbot project.
See projects/self_feeding.
"""
@testing_utils.skipUnlessGPU
class TestSelffeeding(unittest.TestCase):
def test_dataset_integrity(self):
"""
Check the controllble dialogue data loads.
"""
train_output, valid_output, test_output = testing_utils.display_data(
{'task': 'self_feeding:all'}
)
# check valid data
self.assertIn("i am spending time with my 4 sisters", train_output)
self.assertIn('193777 episodes with a total of 193777 examples', train_output)
# check valid data
self.assertIn('3500 examples', valid_output)
# check test data
self.assertIn('7801 examples', test_output)
def test_train_model(self):
"""
Check the training script doesn't crash.
"""
opt = {
'model': 'projects.self_feeding.self_feeding_agent:SelfFeedingAgent',
'task': 'self_feeding:all',
'max_train_time': 120,
'dia_train': 'train_hh131k_hb60k.txt',
'n_layers': 2,
'n_heads': 2,
'candidates': 'batch',
'validation_metric': 'dia_acc',
'optimizer': 'adamax',
'learning_rate': 0.0025,
'ffn_size': 32,
'batchsize': 32,
'embeddings_scale': False,
}
_, _, _ = testing_utils.train_model(opt)
def test_released_model(self):
"""
Check the pretrained model produces correct results.
"""
_, _, test = testing_utils.eval_model(
{
'model_file': 'zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model',
'task': 'self_feeding:all',
'batchsize': 20,
},
skip_valid=True,
)
self.assertAlmostEqual(test['dia_acc'], 0.506, delta=0.001)
self.assertAlmostEqual(test['fee_acc'], 0.744, delta=0.001)
self.assertAlmostEqual(test['sat_f1'], 0.8343, delta=0.0001)
if __name__ == '__main__':
unittest.main()
| 30.649351 | 86 | 0.601695 |
import unittest
import parlai.core.testing_utils as testing_utils
@testing_utils.skipUnlessGPU
class TestSelffeeding(unittest.TestCase):
def test_dataset_integrity(self):
train_output, valid_output, test_output = testing_utils.display_data(
{'task': 'self_feeding:all'}
)
self.assertIn("i am spending time with my 4 sisters", train_output)
self.assertIn('193777 episodes with a total of 193777 examples', train_output)
self.assertIn('3500 examples', valid_output)
self.assertIn('7801 examples', test_output)
def test_train_model(self):
opt = {
'model': 'projects.self_feeding.self_feeding_agent:SelfFeedingAgent',
'task': 'self_feeding:all',
'max_train_time': 120,
'dia_train': 'train_hh131k_hb60k.txt',
'n_layers': 2,
'n_heads': 2,
'candidates': 'batch',
'validation_metric': 'dia_acc',
'optimizer': 'adamax',
'learning_rate': 0.0025,
'ffn_size': 32,
'batchsize': 32,
'embeddings_scale': False,
}
_, _, _ = testing_utils.train_model(opt)
def test_released_model(self):
_, _, test = testing_utils.eval_model(
{
'model_file': 'zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model',
'task': 'self_feeding:all',
'batchsize': 20,
},
skip_valid=True,
)
self.assertAlmostEqual(test['dia_acc'], 0.506, delta=0.001)
self.assertAlmostEqual(test['fee_acc'], 0.744, delta=0.001)
self.assertAlmostEqual(test['sat_f1'], 0.8343, delta=0.0001)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f873e80e0701b9a77efc459af53f65aac99660 | 1,514 | py | Python | project/dealership/appcoches/models.py | Borjados/IngenieriaWeb | 573590fe30327971affa3f6fc562dbb815e9e553 | [
"bzip2-1.0.6"
] | null | null | null | project/dealership/appcoches/models.py | Borjados/IngenieriaWeb | 573590fe30327971affa3f6fc562dbb815e9e553 | [
"bzip2-1.0.6"
] | null | null | null | project/dealership/appcoches/models.py | Borjados/IngenieriaWeb | 573590fe30327971affa3f6fc562dbb815e9e553 | [
"bzip2-1.0.6"
] | null | null | null | from django.db import models
class Categoria (models.Model):
nombre = models.CharField(max_length=75)
url_categoria = models.CharField(max_length=2000)
class Vendedor (models.Model):
nombre = models.CharField(max_length=75, null=True)
apellido = models.CharField(max_length=75, null=True)
email = models.CharField(max_length=200, null=True)
telefono = models.IntegerField(default=0, null=True)
rating = models.IntegerField(default=0, null=True)
url_vendedor = models.CharField(max_length=2000, null=True)
class Marca(models.Model):
nombre = models.CharField(max_length=75)
url_marca = models.CharField(max_length=2000)
class Coches(models.Model):
modelo = models.CharField(max_length=250)
caballos = models.IntegerField(default=0)
puertas = models.IntegerField(default=0)
color = models.CharField(max_length=50)
kilometros = models.IntegerField(default=0)
anno = models.IntegerField(default=0)
matricula = models.CharField(max_length=50)
oferta = models.BooleanField(default=False)
precio = models.IntegerField(default=0)
precio_oferta = models.IntegerField(default=0)
#porcentaje = Coches.objects.all().annotate(100 - 100*(F('precio_oferta') / F('precio')))
url_coche = models.CharField(max_length=2000)
categoria = models.ManyToManyField(Categoria)
vendedor = models.ForeignKey(Vendedor, on_delete=models.CASCADE)
marca = models.ForeignKey(Marca, on_delete=models.CASCADE)
# Create your models here.
| 42.055556 | 93 | 0.739762 | from django.db import models
class Categoria (models.Model):
nombre = models.CharField(max_length=75)
url_categoria = models.CharField(max_length=2000)
class Vendedor (models.Model):
nombre = models.CharField(max_length=75, null=True)
apellido = models.CharField(max_length=75, null=True)
email = models.CharField(max_length=200, null=True)
telefono = models.IntegerField(default=0, null=True)
rating = models.IntegerField(default=0, null=True)
url_vendedor = models.CharField(max_length=2000, null=True)
class Marca(models.Model):
nombre = models.CharField(max_length=75)
url_marca = models.CharField(max_length=2000)
class Coches(models.Model):
modelo = models.CharField(max_length=250)
caballos = models.IntegerField(default=0)
puertas = models.IntegerField(default=0)
color = models.CharField(max_length=50)
kilometros = models.IntegerField(default=0)
anno = models.IntegerField(default=0)
matricula = models.CharField(max_length=50)
oferta = models.BooleanField(default=False)
precio = models.IntegerField(default=0)
precio_oferta = models.IntegerField(default=0)
url_coche = models.CharField(max_length=2000)
categoria = models.ManyToManyField(Categoria)
vendedor = models.ForeignKey(Vendedor, on_delete=models.CASCADE)
marca = models.ForeignKey(Marca, on_delete=models.CASCADE)
| true | true |
f7f8742872f597c7338354e067573207641604cd | 8,430 | py | Python | rqalpha/examples/extend_api/HKMod/realtime_data_source.py | hadrianl/rqalpha_kairui | eb0e0de1d69f1a4d6f349d4dc011e1c9eccfe2d8 | [
"Apache-2.0"
] | 2 | 2019-03-06T00:39:45.000Z | 2019-05-26T15:34:03.000Z | rqalpha/examples/extend_api/HKMod/realtime_data_source.py | hadrianl/rqalpha_kairui | eb0e0de1d69f1a4d6f349d4dc011e1c9eccfe2d8 | [
"Apache-2.0"
] | null | null | null | rqalpha/examples/extend_api/HKMod/realtime_data_source.py | hadrianl/rqalpha_kairui | eb0e0de1d69f1a4d6f349d4dc011e1c9eccfe2d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/30 0030 9:43
# @Author : Hadrianl
# @File : realtime_data_source
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import numpy as np
from rqalpha.interface import AbstractDataSource
from rqalpha.const import MARGIN_TYPE
from rqalpha.utils.py2 import lru_cache
from rqalpha.utils.datetime_func import convert_date_to_int, convert_int_to_date
from rqalpha.utils.i18n import gettext as _
from rqalpha.data.future_info_cn import CN_FUTURE_INFO
from rqalpha.data.adjust import adjust_bars, FIELDS_REQUIRE_ADJUSTMENT
from rqalpha.data.public_fund_commission import PUBLIC_FUND_COMMISSION
from rqalpha.const import COMMISSION_TYPE
from spapi.spAPI import *
from spapi.sp_struct import *
import datetime as dt
from rqalpha.api import logger
from queue import Queue, Empty
import pymongo as pmg
from threading import Thread
from collections import deque
import pandas as pd
from rqalpha.events import EVENT
import time
from rqalpha.environment import Environment
from rqalpha.model.instrument import Instrument
from .util import _convert_from_ctype
class RealtimeDataSource(AbstractDataSource):
def __init__(self, db_info, server_info):
mongo_cli = pmg.MongoClient(db_info.host)
if db_info.user and db_info.pwd:
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info.user, db_info.pwd)
self._db = mongo_cli.get_database(db_info.dbname)
self._col = self._db.get_collection('realtime_future_1min_')
self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True)
self._col.create_index([('code', pmg.ASCENDING)])
self.bar_trigger_thread = Thread(target=self.trigger_bar_from_server, args=(server_info.host, server_info.port))
self.bar_trigger_thread.setDaemon(True)
def trigger_bar_from_server(self, host, port):
import zmq
ctx = zmq.Context()
self.trigger_socket = ctx.socket(zmq.SUB)
self.trigger_socket.set_string(zmq.SUBSCRIBE, '')
self.trigger_socket.setsockopt(zmq.RCVTIMEO, 5000)
addr = f'tcp://{host}:{port}'
self.trigger_socket.connect(addr)
env = Environment.get_instance()
event_queue = env.event_source.event_queue
while True:
try:
d = self.trigger_socket.recv_pyobj()
event_queue.put((d, EVENT.BAR))
except zmq.ZMQError:
...
def get_trading_minutes_for(self, order_book_id, trading_dt):
raise NotImplementedError
def get_trading_calendar(self):
Collection = self._db.future_contract_info
trading_calendar = [pd.Timestamp(td) for td in Collection.distinct('DATE')]
trading_calendar.sort(key=lambda x: x.timestamp())
return np.array(trading_calendar)
def get_all_instruments(self):
con_col = self._db.realtime_future_contract_info
prod_col = self._db.realtime_future_product_info
code_list = con_col.distinct('CODE')
inst_list = []
for c in code_list:
con_info = con_col.find_one({'CODE': c}, sort=[('DATE', pmg.DESCENDING)])
prod_info = prod_col.find_one({'CLASS_CODE': con_info['CLASS_CODE']}, sort=[('DATE', pmg.DESCENDING)])
inst = {
# 'abbrev_symbol': 'null',
'contract_multiplier': con_info['CON_SIZE'],
'de_listed_date': con_info['DATE_TO'].strftime('%Y-%m-%d'),
'exchange': 'HKEX',
'listed_date': con_info['DATE_FROM'].strftime('%Y-%m-%d'),
'margin_rate': 0.05,
'maturity_date': con_info['EXPIRY_DATE'].strftime('%Y-%m-%d'),
'order_book_id': con_info['CODE'],
'product': 'Index',
'round_lot': 1.0,
'settlement_method': 'CashSettlementRequired',
'symbol': prod_info['PROD_NAME'],
# 'trading_unit': '5',
'type': 'Future',
'underlying_order_book_id': con_info['Filler'],
'underlying_symbol': con_info['CLASS_CODE']}
inst_list.append(Instrument(inst))
return inst_list
# INSTRUMENT_TYPE_MAP = {
# 'CS': 0,
# 'INDX': 1,
# 'Future': 2,
# 'ETF': 3,
# 'LOF': 3,
# 'FenjiA': 3,
# 'FenjiB': 3,
# 'FenjiMu': 3,
# 'PublicFund': 4
# }
def get_bar(self, instrument, dt, frequency):
if frequency in ['1m', '1min']:
frequency = '1min'
order_book_id = instrument.order_book_id
Collection = self._db.get_collection(f'realtime_future_{frequency}_')
if frequency in ['1m', '1min']:
data = Collection.find_one(
{'code': order_book_id, "datetime": dt})
else:
data = None
if data is None:
return {'code': order_book_id, 'datetime': dt.strftime('%Y-%m-%d %H:%M:%S'), 'open': np.nan, 'high': np.nan,
'low': np.nan, 'close': np.nan, 'volume': np.nan}
else:
data['datetime'] = data['datetime'].strftime('%Y-%m-%d %H:%M:%S')
return data
def get_settle_price(self, instrument, date):
order_book_id = instrument.order_book_id
Collection = self._db.realtime_future_1min_
_d = dt.datetime(date.year, date.month, date.day, 16, 29)
data = Collection.find_one({'code': order_book_id, 'datetime': {'$lte': _d}}, ['close'])
_close = data['close']
return _close
def history_bars(self, instrument, bar_count, frequency, fields, dt,
skip_suspended=True, include_now=False,
adjust_type='pre', adjust_orig=None):
order_book_id = instrument.order_book_id
Collection = self._db.get_collection(f'realtime_future_{frequency}_')
query_type = '$lte' if include_now else '$lt'
cur = Collection.find({'code': order_book_id, 'datetime':{query_type: dt}}, limit=bar_count, sort=[('datetime', pmg.DESCENDING)])
data = deque()
for c in cur:
c['datetime'] = c['datetime'].timestamp()
data.appendleft(c)
_d = pd.DataFrame(list(data))
# _d['datetime'] = _d['datetime'].apply(lambda x: x.timestamp())
fields = [field for field in fields if field in _d.columns]
return _d.loc[:, fields].T.as_matrix()
def get_yield_curve(self, start_date, end_date, tenor=None):
...
def get_risk_free_rate(self, start_date, end_date):
return 0.028
def current_snapshot(self, instrument, frequency, dt):
raise NotImplementedError
def available_data_range(self, frequency):
if frequency == '1m':
return (dt.date(2011, 1, 1), dt.date.today() + dt.timedelta(days=1))
def get_margin_info(self, instrument):
return {
'margin_type': MARGIN_TYPE.BY_MONEY,
'long_margin_ratio': instrument.margin_rate,
'short_margin_ratio': instrument.margin_rate,
}
def get_commission_info(self, instrument):
order_book_id = instrument.order_book_id
if 'HSI' in order_book_id:
commission_info = {'commission_type': COMMISSION_TYPE.BY_VOLUME, 'open_commission_ratio': 33.54, 'close_commission_ratio': 33.54, 'close_commission_today_ratio': 33.54}
elif 'MHI' in order_book_id:
commission_info = {'commission_type': COMMISSION_TYPE.BY_VOLUME, 'open_commission_ratio': 13.6,
'close_commission_ratio': 13.6, 'close_commission_today_ratio': 13.6}
else:
commission_info = super(RealtimeDataSource, self).get_commission_info(instrument)
return commission_info
def get_ticks(self, order_book_id, date):
raise NotImplementedError
| 40.724638 | 180 | 0.641756 |
import os
import six
import numpy as np
from rqalpha.interface import AbstractDataSource
from rqalpha.const import MARGIN_TYPE
from rqalpha.utils.py2 import lru_cache
from rqalpha.utils.datetime_func import convert_date_to_int, convert_int_to_date
from rqalpha.utils.i18n import gettext as _
from rqalpha.data.future_info_cn import CN_FUTURE_INFO
from rqalpha.data.adjust import adjust_bars, FIELDS_REQUIRE_ADJUSTMENT
from rqalpha.data.public_fund_commission import PUBLIC_FUND_COMMISSION
from rqalpha.const import COMMISSION_TYPE
from spapi.spAPI import *
from spapi.sp_struct import *
import datetime as dt
from rqalpha.api import logger
from queue import Queue, Empty
import pymongo as pmg
from threading import Thread
from collections import deque
import pandas as pd
from rqalpha.events import EVENT
import time
from rqalpha.environment import Environment
from rqalpha.model.instrument import Instrument
from .util import _convert_from_ctype
class RealtimeDataSource(AbstractDataSource):
def __init__(self, db_info, server_info):
mongo_cli = pmg.MongoClient(db_info.host)
if db_info.user and db_info.pwd:
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info.user, db_info.pwd)
self._db = mongo_cli.get_database(db_info.dbname)
self._col = self._db.get_collection('realtime_future_1min_')
self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True)
self._col.create_index([('code', pmg.ASCENDING)])
self.bar_trigger_thread = Thread(target=self.trigger_bar_from_server, args=(server_info.host, server_info.port))
self.bar_trigger_thread.setDaemon(True)
def trigger_bar_from_server(self, host, port):
import zmq
ctx = zmq.Context()
self.trigger_socket = ctx.socket(zmq.SUB)
self.trigger_socket.set_string(zmq.SUBSCRIBE, '')
self.trigger_socket.setsockopt(zmq.RCVTIMEO, 5000)
addr = f'tcp://{host}:{port}'
self.trigger_socket.connect(addr)
env = Environment.get_instance()
event_queue = env.event_source.event_queue
while True:
try:
d = self.trigger_socket.recv_pyobj()
event_queue.put((d, EVENT.BAR))
except zmq.ZMQError:
...
def get_trading_minutes_for(self, order_book_id, trading_dt):
raise NotImplementedError
def get_trading_calendar(self):
Collection = self._db.future_contract_info
trading_calendar = [pd.Timestamp(td) for td in Collection.distinct('DATE')]
trading_calendar.sort(key=lambda x: x.timestamp())
return np.array(trading_calendar)
def get_all_instruments(self):
con_col = self._db.realtime_future_contract_info
prod_col = self._db.realtime_future_product_info
code_list = con_col.distinct('CODE')
inst_list = []
for c in code_list:
con_info = con_col.find_one({'CODE': c}, sort=[('DATE', pmg.DESCENDING)])
prod_info = prod_col.find_one({'CLASS_CODE': con_info['CLASS_CODE']}, sort=[('DATE', pmg.DESCENDING)])
inst = {
'contract_multiplier': con_info['CON_SIZE'],
'de_listed_date': con_info['DATE_TO'].strftime('%Y-%m-%d'),
'exchange': 'HKEX',
'listed_date': con_info['DATE_FROM'].strftime('%Y-%m-%d'),
'margin_rate': 0.05,
'maturity_date': con_info['EXPIRY_DATE'].strftime('%Y-%m-%d'),
'order_book_id': con_info['CODE'],
'product': 'Index',
'round_lot': 1.0,
'settlement_method': 'CashSettlementRequired',
'symbol': prod_info['PROD_NAME'],
'type': 'Future',
'underlying_order_book_id': con_info['Filler'],
'underlying_symbol': con_info['CLASS_CODE']}
inst_list.append(Instrument(inst))
return inst_list
def get_bar(self, instrument, dt, frequency):
if frequency in ['1m', '1min']:
frequency = '1min'
order_book_id = instrument.order_book_id
Collection = self._db.get_collection(f'realtime_future_{frequency}_')
if frequency in ['1m', '1min']:
data = Collection.find_one(
{'code': order_book_id, "datetime": dt})
else:
data = None
if data is None:
return {'code': order_book_id, 'datetime': dt.strftime('%Y-%m-%d %H:%M:%S'), 'open': np.nan, 'high': np.nan,
'low': np.nan, 'close': np.nan, 'volume': np.nan}
else:
data['datetime'] = data['datetime'].strftime('%Y-%m-%d %H:%M:%S')
return data
def get_settle_price(self, instrument, date):
order_book_id = instrument.order_book_id
Collection = self._db.realtime_future_1min_
_d = dt.datetime(date.year, date.month, date.day, 16, 29)
data = Collection.find_one({'code': order_book_id, 'datetime': {'$lte': _d}}, ['close'])
_close = data['close']
return _close
def history_bars(self, instrument, bar_count, frequency, fields, dt,
skip_suspended=True, include_now=False,
adjust_type='pre', adjust_orig=None):
order_book_id = instrument.order_book_id
Collection = self._db.get_collection(f'realtime_future_{frequency}_')
query_type = '$lte' if include_now else '$lt'
cur = Collection.find({'code': order_book_id, 'datetime':{query_type: dt}}, limit=bar_count, sort=[('datetime', pmg.DESCENDING)])
data = deque()
for c in cur:
c['datetime'] = c['datetime'].timestamp()
data.appendleft(c)
_d = pd.DataFrame(list(data))
fields = [field for field in fields if field in _d.columns]
return _d.loc[:, fields].T.as_matrix()
def get_yield_curve(self, start_date, end_date, tenor=None):
...
def get_risk_free_rate(self, start_date, end_date):
return 0.028
def current_snapshot(self, instrument, frequency, dt):
raise NotImplementedError
def available_data_range(self, frequency):
if frequency == '1m':
return (dt.date(2011, 1, 1), dt.date.today() + dt.timedelta(days=1))
def get_margin_info(self, instrument):
return {
'margin_type': MARGIN_TYPE.BY_MONEY,
'long_margin_ratio': instrument.margin_rate,
'short_margin_ratio': instrument.margin_rate,
}
def get_commission_info(self, instrument):
order_book_id = instrument.order_book_id
if 'HSI' in order_book_id:
commission_info = {'commission_type': COMMISSION_TYPE.BY_VOLUME, 'open_commission_ratio': 33.54, 'close_commission_ratio': 33.54, 'close_commission_today_ratio': 33.54}
elif 'MHI' in order_book_id:
commission_info = {'commission_type': COMMISSION_TYPE.BY_VOLUME, 'open_commission_ratio': 13.6,
'close_commission_ratio': 13.6, 'close_commission_today_ratio': 13.6}
else:
commission_info = super(RealtimeDataSource, self).get_commission_info(instrument)
return commission_info
def get_ticks(self, order_book_id, date):
raise NotImplementedError
| true | true |
f7f87428f3b1226ef4d4d717c6ee950613ede0b6 | 1,922 | py | Python | seqpos/lib/python2.7/site-packages/mercurial/scmwindows.py | guanjue/seqpos | ab9308ad128547ca968a1d944490710e583703bc | [
"MIT"
] | null | null | null | seqpos/lib/python2.7/site-packages/mercurial/scmwindows.py | guanjue/seqpos | ab9308ad128547ca968a1d944490710e583703bc | [
"MIT"
] | null | null | null | seqpos/lib/python2.7/site-packages/mercurial/scmwindows.py | guanjue/seqpos | ab9308ad128547ca968a1d944490710e583703bc | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os
from . import (
encoding,
pycompat,
util,
win32,
)
try:
import _winreg as winreg
winreg.CloseKey
except ImportError:
import winreg
# MS-DOS 'more' is the only pager available by default on Windows.
fallbackpager = 'more'
def systemrcpath():
'''return default os-specific hgrc search path'''
rcpath = []
filename = win32.executablepath()
# Use mercurial.ini found in directory with hg.exe
progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
rcpath.append(progrc)
# Use hgrc.d found in directory with hg.exe
progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
if os.path.isdir(progrcd):
for f, kind in util.listdir(progrcd):
if f.endswith('.rc'):
rcpath.append(os.path.join(progrcd, f))
# else look for a system rcpath in the registry
value = util.lookupreg('SOFTWARE\\Mercurial', None,
winreg.HKEY_LOCAL_MACHINE)
if not isinstance(value, str) or not value:
return rcpath
value = util.localpath(value)
for p in value.split(pycompat.ospathsep):
if p.lower().endswith('mercurial.ini'):
rcpath.append(p)
elif os.path.isdir(p):
for f, kind in util.listdir(p):
if f.endswith('.rc'):
rcpath.append(os.path.join(p, f))
return rcpath
def userrcpath():
'''return os-specific hgrc search path to the user dir'''
home = os.path.expanduser('~')
path = [os.path.join(home, 'mercurial.ini'),
os.path.join(home, '.hgrc')]
userprofile = encoding.environ.get('USERPROFILE')
if userprofile and userprofile != home:
path.append(os.path.join(userprofile, 'mercurial.ini'))
path.append(os.path.join(userprofile, '.hgrc'))
return path
def termsize(ui):
return win32.termsize()
| 31 | 69 | 0.631634 | from __future__ import absolute_import
import os
from . import (
encoding,
pycompat,
util,
win32,
)
try:
import _winreg as winreg
winreg.CloseKey
except ImportError:
import winreg
fallbackpager = 'more'
def systemrcpath():
rcpath = []
filename = win32.executablepath()
progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
rcpath.append(progrc)
progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
if os.path.isdir(progrcd):
for f, kind in util.listdir(progrcd):
if f.endswith('.rc'):
rcpath.append(os.path.join(progrcd, f))
value = util.lookupreg('SOFTWARE\\Mercurial', None,
winreg.HKEY_LOCAL_MACHINE)
if not isinstance(value, str) or not value:
return rcpath
value = util.localpath(value)
for p in value.split(pycompat.ospathsep):
if p.lower().endswith('mercurial.ini'):
rcpath.append(p)
elif os.path.isdir(p):
for f, kind in util.listdir(p):
if f.endswith('.rc'):
rcpath.append(os.path.join(p, f))
return rcpath
def userrcpath():
home = os.path.expanduser('~')
path = [os.path.join(home, 'mercurial.ini'),
os.path.join(home, '.hgrc')]
userprofile = encoding.environ.get('USERPROFILE')
if userprofile and userprofile != home:
path.append(os.path.join(userprofile, 'mercurial.ini'))
path.append(os.path.join(userprofile, '.hgrc'))
return path
def termsize(ui):
return win32.termsize()
| true | true |
f7f8751701c664915245122336c8f017860e8762 | 6,268 | py | Python | scripts/14_I.py | HalfInner/AoC2019 | 9badb1d44cd5672b4e5860c8cb7490d60d7c19e3 | [
"MIT"
] | 1 | 2019-12-13T20:02:55.000Z | 2019-12-13T20:02:55.000Z | scripts/14_I.py | HalfInner/AoC2019 | 9badb1d44cd5672b4e5860c8cb7490d60d7c19e3 | [
"MIT"
] | null | null | null | scripts/14_I.py | HalfInner/AoC2019 | 9badb1d44cd5672b4e5860c8cb7490d60d7c19e3 | [
"MIT"
] | null | null | null | '''
--- Day 14: Space Stoichiometry ---
As you approach the rings of Saturn, your ship's low fuel indicator turns on. There isn't any fuel here, but the rings have plenty of raw material. Perhaps your ship's Inter-Stellar Refinery Union brand nanofactory can turn these raw materials into fuel.
You ask the nanofactory to produce a list of the reactions it can perform that are relevant to this process (your puzzle input). Every reaction turns some quantities of specific input chemicals into some quantity of an output chemical. Almost every chemical is produced by exactly one reaction; the only exception, ORE, is the raw material input to the entire process and is not produced by a reaction.
You just need to know how much ORE you'll need to collect before you can produce one unit of FUEL.
Each reaction gives specific quantities for its inputs and output; reactions cannot be partially run, so only whole integer multiples of these quantities can be used. (It's okay to have leftover chemicals when you're done, though.) For example, the reaction 1 A, 2 B, 3 C => 2 D means that exactly 2 units of chemical D can be produced by consuming exactly 1 A, 2 B and 3 C. You can run the full reaction as many times as necessary; for example, you could produce 10 D by consuming 5 A, 10 B, and 15 C.
Suppose your nanofactory produces the following list of reactions:
10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL
The first two reactions use only ORE as inputs; they indicate that you can produce as much of chemical A as you want (in increments of 10 units, each 10 costing 10 ORE) and as much of chemical B as you want (each costing 1 ORE). To produce 1 FUEL, a total of 31 ORE is required: 1 ORE to produce 1 B, then 30 more ORE to produce the 7 + 7 + 7 + 7 = 28 A (with 2 extra A wasted) required in the reactions to convert the B into C, C into D, D into E, and finally E into FUEL. (30 A is produced because its reaction requires that it is created in increments of 10.)
Or, suppose you have the following list of reactions:
9 ORE => 2 A
8 ORE => 3 B
7 ORE => 5 C
3 A, 4 B => 1 AB
5 B, 7 C => 1 BC
4 C, 1 A => 1 CA
2 AB, 3 BC, 4 CA => 1 FUEL
The above list of reactions requires 165 ORE to produce 1 FUEL:
Consume 45 ORE to produce 10 A.
Consume 64 ORE to produce 24 B.
Consume 56 ORE to produce 40 C.
Consume 6 A, 8 B to produce 2 AB.
Consume 15 B, 21 C to produce 3 BC.
Consume 16 C, 4 A to produce 4 CA.
Consume 2 AB, 3 BC, 4 CA to produce 1 FUEL.
Here are some larger examples:
13312 ORE for 1 FUEL:
157 ORE => 5 NZVS
165 ORE => 6 DCFZ
44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
179 ORE => 7 PSHF
177 ORE => 5 HKGWZ
7 DCFZ, 7 PSHF => 2 XJWVT
165 ORE => 2 GPVTF
3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT
180697 ORE for 1 FUEL:
2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG
17 NVRVD, 3 JNWZP => 8 VPVL
53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL
22 VJHF, 37 MNCFX => 5 FWMGM
139 ORE => 4 NVRVD
144 ORE => 7 JNWZP
5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC
5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV
145 ORE => 6 MNCFX
1 NVRVD => 8 CXFTF
1 VJHF, 6 MNCFX => 4 RFSQX
176 ORE => 6 VJHF
2210736 ORE for 1 FUEL:
171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX
Given the list of reactions in your puzzle input, what is the minimum amount of ORE required to produce exactly 1 FUEL?
'''
import sys
import re
from collections import deque
from math import ceil
def produce_chemical(chemical_receipts, seek_chemical='FUEL'):
base_chemical = 'ORE'
stack = deque([seek_chemical])
chemical_ingredients = {base_chemical: 0}
ore_creators = {}
div = 1
while stack:
generating_chemical = stack.pop()
mul = chemical_ingredients[generating_chemical] if generating_chemical in chemical_ingredients.keys() else 1
chemical_list = chemical_receipts[generating_chemical][1]
for required_amount, chemical in chemical_list:
if chemical == base_chemical:
ore_creators[generating_chemical] = 0
continue
if chemical not in chemical_ingredients.keys():
chemical_ingredients[chemical] = 0
calculated_ingredients = required_amount * mul
chemical_ingredients[chemical] += calculated_ingredients
# val1 = ceil(required_amount / chemical_receipts[ore_creator][0])
# val2 = chemical_receipts[ore_creator][1][0][0]
# chemical_ingredients[base_chemical] += val1 * val2
if chemical not in stack:
stack.appendleft(chemical)
print(stack)
print(chemical_receipts)
print(chemical_ingredients)
print(chemical_ingredients[base_chemical])
print(ore_creators)
chemical_ingredients[base_chemical] = 0
for ore_creator in ore_creators:
val1 = ceil(chemical_ingredients[ore_creator] / chemical_receipts[ore_creator][0])
val2 = chemical_receipts[ore_creator][1][0][0]
chemical_ingredients[base_chemical] += val1 * val2
return chemical_ingredients[base_chemical]
def parse_file(file_path: str):
chemical_receipts = {}
with open(file_path, 'r') as f:
for line in f:
matches = [(int(amount), chemical) for (amount, chemical) in
zip(re.findall('\d+', line), re.findall('[A-Z]+', line))]
if matches:
chemical_receipts[matches[-1][1]] = [matches[-1][0], matches[:-1]]
return chemical_receipts
def main(argv):
print('{} ORES are need to get one FUEL'.format(produce_chemical(parse_file(argv[1]))))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 40.96732 | 562 | 0.689215 |
import sys
import re
from collections import deque
from math import ceil
def produce_chemical(chemical_receipts, seek_chemical='FUEL'):
base_chemical = 'ORE'
stack = deque([seek_chemical])
chemical_ingredients = {base_chemical: 0}
ore_creators = {}
div = 1
while stack:
generating_chemical = stack.pop()
mul = chemical_ingredients[generating_chemical] if generating_chemical in chemical_ingredients.keys() else 1
chemical_list = chemical_receipts[generating_chemical][1]
for required_amount, chemical in chemical_list:
if chemical == base_chemical:
ore_creators[generating_chemical] = 0
continue
if chemical not in chemical_ingredients.keys():
chemical_ingredients[chemical] = 0
calculated_ingredients = required_amount * mul
chemical_ingredients[chemical] += calculated_ingredients
if chemical not in stack:
stack.appendleft(chemical)
print(stack)
print(chemical_receipts)
print(chemical_ingredients)
print(chemical_ingredients[base_chemical])
print(ore_creators)
chemical_ingredients[base_chemical] = 0
for ore_creator in ore_creators:
val1 = ceil(chemical_ingredients[ore_creator] / chemical_receipts[ore_creator][0])
val2 = chemical_receipts[ore_creator][1][0][0]
chemical_ingredients[base_chemical] += val1 * val2
return chemical_ingredients[base_chemical]
def parse_file(file_path: str):
chemical_receipts = {}
with open(file_path, 'r') as f:
for line in f:
matches = [(int(amount), chemical) for (amount, chemical) in
zip(re.findall('\d+', line), re.findall('[A-Z]+', line))]
if matches:
chemical_receipts[matches[-1][1]] = [matches[-1][0], matches[:-1]]
return chemical_receipts
def main(argv):
print('{} ORES are need to get one FUEL'.format(produce_chemical(parse_file(argv[1]))))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| true | true |
f7f876044c0a1f8805ab465254bfe383b3380f3d | 2,420 | py | Python | Graphics/Canvas.py | TausifAnsari/PyHub | f6c949dc6a3974f57d7d146708443d0ceeb4418f | [
"MIT"
] | 1 | 2020-09-30T19:31:20.000Z | 2020-09-30T19:31:20.000Z | Graphics/Canvas.py | TanviSutar/PyHub | 6281e9f515674fb51f0d0862c26ec18020fa7d83 | [
"MIT"
] | null | null | null | Graphics/Canvas.py | TanviSutar/PyHub | 6281e9f515674fb51f0d0862c26ec18020fa7d83 | [
"MIT"
] | null | null | null | from tkinter import *
def doc():
"""
Using the Tkinter canvas :
• bd : Border Width in pixels. Default value : 2
• bg : Background Colour.
• Confine : If true, the canvas cannot be scrolled outside the scroll region.
• Cursor : The cursor is used in the canvas like an arrow, circle or a dot.
• Height : Used to set the size of the canvas in the Y-dimension.
• Highlightcolor : Changes the highlight colour.
• Relief : Type of border - Raised, ridge, groove, sharpened.
• Scrollregion : It is a tuple that defines over how large in area the canvas can be scrolled.
It takes values in the west,north,east and south(Exact order).
• Width : Used to set the size of the canvas in the X-dimension.
• xscrollincrement : If set in +ve number, then canvas can only be placed in multiples of that number.
• xscrollcommand : Canvas scrolling in the horizontal direction.
• yscrollincrement : Similar to the xscrollincrement
• yscrollcommand : Similar to the xscrollcommand but in vertical direction.
Some Standards :
• Arc
• Image
• Line
• Oval
• Polygon
"""
print(doc.__doc__)
root = Tk()
root.title("Arc")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
y = Canvas(root, width=500,height=500,bg="black")
y.pack()
coordinates = 10,50,240,210
arc = y.create_arc(coordinates, start=0, extent=150, fill="white")
mainloop()
root = Tk()
root.title("Triangle")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
x = Canvas(root, width=500,height=500,bg="red")
x.pack()
coordinates = [0,0,200,100,0,200]
poly = x.create_polygon(coordinates, outline="green", fill="yellow", width = 3)
mainloop()
root = Tk()
root.title("Images")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
z = Canvas(root, width=500,height=500)
z.pack()
photo = PhotoImage(file="assets/Random.PNG")
a = z.create_image(250,250,image=photo)
mainloop()
root = Tk()
root.title("Star")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
z = Canvas(root, width=500,height=500, bg="black")
z.pack()
coordinates = [100,140,110,110,140,100,110,90,100,60,90,90,60,100,90,110]
z.create_polygon(coordinates, outline="gold",fill="white",width=3)
mainloop() | 36.666667 | 111 | 0.652893 | from tkinter import *
def doc():
print(doc.__doc__)
root = Tk()
root.title("Arc")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
y = Canvas(root, width=500,height=500,bg="black")
y.pack()
coordinates = 10,50,240,210
arc = y.create_arc(coordinates, start=0, extent=150, fill="white")
mainloop()
root = Tk()
root.title("Triangle")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
x = Canvas(root, width=500,height=500,bg="red")
x.pack()
coordinates = [0,0,200,100,0,200]
poly = x.create_polygon(coordinates, outline="green", fill="yellow", width = 3)
mainloop()
root = Tk()
root.title("Images")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
z = Canvas(root, width=500,height=500)
z.pack()
photo = PhotoImage(file="assets/Random.PNG")
a = z.create_image(250,250,image=photo)
mainloop()
root = Tk()
root.title("Star")
root.geometry("500x500")
root.iconbitmap("assets/favicon.ico")
z = Canvas(root, width=500,height=500, bg="black")
z.pack()
coordinates = [100,140,110,110,140,100,110,90,100,60,90,90,60,100,90,110]
z.create_polygon(coordinates, outline="gold",fill="white",width=3)
mainloop() | true | true |
f7f876e7206d63a37cf68f3aa907bb9332beba14 | 5,317 | py | Python | test/module/test_template.py | PokaInc/cfn-python-lint | 2cd49b838088a1be1b1a5e5abdff556f264903b4 | [
"MIT-0"
] | null | null | null | test/module/test_template.py | PokaInc/cfn-python-lint | 2cd49b838088a1be1b1a5e5abdff556f264903b4 | [
"MIT-0"
] | null | null | null | test/module/test_template.py | PokaInc/cfn-python-lint | 2cd49b838088a1be1b1a5e5abdff556f264903b4 | [
"MIT-0"
] | null | null | null | """
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import Template # pylint: disable=E0401
from testlib.testcase import BaseTestCase
class TestTemplate(BaseTestCase):
"""Test Template Class in cfnlint """
def setUp(self):
""" SetUp template object"""
filename = 'test/fixtures/templates/good/generic.yaml'
template = self.load_template(filename)
self.template = Template(filename, template)
self.resource_names = [
'IamPipeline',
'RootInstanceProfile',
'RolePolicies',
'MyEC2Instance',
'RootRole',
'mySnsTopic',
'MyEC2Instance1',
'ElasticLoadBalancer'
]
self.parameter_names = [
'WebServerPort',
'Package',
'Package1',
'pIops'
]
def test_get_resources_success(self):
"""Test Success on Get Resources"""
valid_resource_count = 11
resources = self.template.get_resources()
assert len(resources) == valid_resource_count, 'Expected {} resources, got {}'.format(valid_resource_count, len(resources))
def test_get_resources_bad(self):
"""Don't get resources that aren't properly configured"""
template = {
'Resources': {
'Properties': {
'BucketName': "bucket_test"
},
'Type': "AWS::S3::Bucket"
}
}
self.template = Template('test.yaml', template)
resources = self.template.get_resources()
assert resources == {}
def test_get_resource_names(self):
""" Test Resource Names"""
resource_names = self.template.get_resource_names()
assert bool(set(resource_names).intersection(self.resource_names))
def test_get_parameters(self):
""" Test Get Parameters"""
valid_parameter_count = 7
parameters = self.template.get_parameters()
assert len(parameters) == valid_parameter_count, 'Expected {} parameters, got {}'.format(valid_parameter_count, len(parameters))
def test_get_parameter_names(self):
"""Test Get Parameter Names"""
parameters = self.template.get_parameter_names()
assert bool(set(parameters).intersection(self.parameter_names))
def test_get_valid_refs(self):
""" Get Valid REFs"""
valid_ref_count = 26
refs = self.template.get_valid_refs()
assert len(refs) == valid_ref_count, 'Expected {} refs, got {}'.format(valid_ref_count, len(refs))
def test_conditions_return_object_success(self):
"""Test condition object response and nested IFs"""
template = [
'isProd',
{
'Key': 'Environment1',
'Value': 'Prod'
},
{
'Fn::If': [
'isDev',
{
'Key': 'Environment2',
'Value': 'Dev'
},
{
"Ref": "AWS::NoValue"
}
]
}
]
results = self.template.get_condition_values(template, [])
self.assertEqual(results, [
{'Path': [1], 'Value': {'Value': 'Prod', 'Key': 'Environment1'}},
{'Path': [2, 'Fn::If', 1], 'Value': {'Value': 'Dev', 'Key': 'Environment2'}}
])
def test_conditions_return_list_success(self):
"""Test condition list response"""
template = [
'PrimaryRegion',
[
'EN'
],
[
'BE',
'LU',
'NL'
]
]
results = self.template.get_condition_values(template, [])
self.assertEqual(results, [
{'Value': ['EN'], 'Path': [1]}, {'Value': ['BE', 'LU', 'NL'], 'Path': [2]}
])
def test_conditions_return_string_success(self):
"""Test condition object response and nested IFs"""
template = [
'isProd',
{
'Ref': 'Sample'
},
'String'
]
results = self.template.get_condition_values(template, [])
self.assertEqual(results, [
{'Path': [1], 'Value': {'Ref': 'Sample'}}, {'Path': [2], 'Value': 'String'}
])
| 35.925676 | 136 | 0.564228 | from cfnlint import Template
from testlib.testcase import BaseTestCase
class TestTemplate(BaseTestCase):
def setUp(self):
filename = 'test/fixtures/templates/good/generic.yaml'
template = self.load_template(filename)
self.template = Template(filename, template)
self.resource_names = [
'IamPipeline',
'RootInstanceProfile',
'RolePolicies',
'MyEC2Instance',
'RootRole',
'mySnsTopic',
'MyEC2Instance1',
'ElasticLoadBalancer'
]
self.parameter_names = [
'WebServerPort',
'Package',
'Package1',
'pIops'
]
def test_get_resources_success(self):
valid_resource_count = 11
resources = self.template.get_resources()
assert len(resources) == valid_resource_count, 'Expected {} resources, got {}'.format(valid_resource_count, len(resources))
def test_get_resources_bad(self):
template = {
'Resources': {
'Properties': {
'BucketName': "bucket_test"
},
'Type': "AWS::S3::Bucket"
}
}
self.template = Template('test.yaml', template)
resources = self.template.get_resources()
assert resources == {}
def test_get_resource_names(self):
resource_names = self.template.get_resource_names()
assert bool(set(resource_names).intersection(self.resource_names))
def test_get_parameters(self):
valid_parameter_count = 7
parameters = self.template.get_parameters()
assert len(parameters) == valid_parameter_count, 'Expected {} parameters, got {}'.format(valid_parameter_count, len(parameters))
def test_get_parameter_names(self):
parameters = self.template.get_parameter_names()
assert bool(set(parameters).intersection(self.parameter_names))
def test_get_valid_refs(self):
valid_ref_count = 26
refs = self.template.get_valid_refs()
assert len(refs) == valid_ref_count, 'Expected {} refs, got {}'.format(valid_ref_count, len(refs))
def test_conditions_return_object_success(self):
template = [
'isProd',
{
'Key': 'Environment1',
'Value': 'Prod'
},
{
'Fn::If': [
'isDev',
{
'Key': 'Environment2',
'Value': 'Dev'
},
{
"Ref": "AWS::NoValue"
}
]
}
]
results = self.template.get_condition_values(template, [])
self.assertEqual(results, [
{'Path': [1], 'Value': {'Value': 'Prod', 'Key': 'Environment1'}},
{'Path': [2, 'Fn::If', 1], 'Value': {'Value': 'Dev', 'Key': 'Environment2'}}
])
def test_conditions_return_list_success(self):
template = [
'PrimaryRegion',
[
'EN'
],
[
'BE',
'LU',
'NL'
]
]
results = self.template.get_condition_values(template, [])
self.assertEqual(results, [
{'Value': ['EN'], 'Path': [1]}, {'Value': ['BE', 'LU', 'NL'], 'Path': [2]}
])
def test_conditions_return_string_success(self):
template = [
'isProd',
{
'Ref': 'Sample'
},
'String'
]
results = self.template.get_condition_values(template, [])
self.assertEqual(results, [
{'Path': [1], 'Value': {'Ref': 'Sample'}}, {'Path': [2], 'Value': 'String'}
])
| true | true |
f7f8772bc75acbd519b7c21a5edfe35cbdaad8c5 | 3,816 | py | Python | watertap/examples/flowsheets/full_treatment_train/model_components/unit_0DRO.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/examples/flowsheets/full_treatment_train/model_components/unit_0DRO.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/examples/flowsheets/full_treatment_train/model_components/unit_0DRO.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""0D reverse osmosis examples"""
from pyomo.environ import ConcreteModel
from idaes.core import FlowsheetBlock
from idaes.core.util.scaling import calculate_scaling_factors
from watertap.examples.flowsheets.full_treatment_train.model_components import property_models
from watertap.unit_models.reverse_osmosis_0D import (ReverseOsmosis0D,
ConcentrationPolarizationType,
MassTransferCoefficient,
PressureChangeType)
from watertap.examples.flowsheets.full_treatment_train.util import solve_block, check_dof
def build_RO(m, base='TDS', level='simple', name_str='RO'):
"""
Builds a 0DRO model at a specified level (simple or detailed).
Requires prop_TDS property package.
"""
if base not in ['TDS']:
raise ValueError('Unexpected property base {base} for build_RO'
''.format(base=base))
prop = property_models.get_prop(m, base=base)
if level == 'simple':
# build unit
setattr(m.fs, name_str, ReverseOsmosis0D(default={
"property_package": prop,
"mass_transfer_coefficient": MassTransferCoefficient.none,
"concentration_polarization_type": ConcentrationPolarizationType.none}))
blk = getattr(m.fs, name_str)
# specify unit
blk.area.fix(50)
blk.A_comp.fix(4.2e-12)
blk.B_comp.fix(3.5e-8)
blk.permeate.pressure[0].fix(101325)
elif level == 'detailed':
# build unit
setattr(m.fs, name_str, ReverseOsmosis0D(default={
"property_package": prop,
"has_pressure_change": True,
"pressure_change_type": PressureChangeType.calculated,
"mass_transfer_coefficient": MassTransferCoefficient.calculated,
"concentration_polarization_type": ConcentrationPolarizationType.calculated}))
blk = getattr(m.fs, name_str)
# specify unit
blk.area.fix(50)
blk.A_comp.fix(4.2e-12)
blk.B_comp.fix(3.5e-8)
blk.permeate.pressure[0].fix(101325)
blk.channel_height.fix(1e-3)
blk.spacer_porosity.fix(0.97)
blk.N_Re[0, 0].fix(500)
else:
raise ValueError('Unexpected argument {level} for level in build_RO'
''.format(level=level))
def solve_RO(base='TDS', level='simple'):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
property_models.build_prop(m, base='TDS')
build_RO(m, base=base, level=level)
# specify feed
property_models.specify_feed(m.fs.RO.feed_side.properties_in[0], base='TDS')
m.fs.RO.feed_side.properties_in[0].pressure.fix(50e5)
# scaling
calculate_scaling_factors(m)
# initialize
m.fs.RO.initialize(optarg={'nlp_scaling_method': 'user-scaling'})
check_dof(m)
solve_block(m)
m.fs.RO.report()
return m
if __name__ == "__main__":
solve_RO(base='TDS', level='simple')
solve_RO(base='TDS', level='detailed')
| 37.048544 | 94 | 0.632075 | true | true | |
f7f8777c4120d6e87e5b2115ca5195a0e9c8fe05 | 180 | py | Python | what_apps/presence/functions.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | what_apps/presence/functions.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | what_apps/presence/functions.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | def how_user_handles_food(user):
try:
if user.handles_food_with_vigilance:
return "vigilance"
except AttributeError:
return "probably a sucker." | 30 | 44 | 0.666667 | def how_user_handles_food(user):
try:
if user.handles_food_with_vigilance:
return "vigilance"
except AttributeError:
return "probably a sucker." | true | true |
f7f878f1aa78ce5df61fe7b3686bea64cf0b1e66 | 288 | py | Python | ai4water/postprocessing/SeqMetrics/__init__.py | csiro-hydroinformatics/AI4Water | cdb18bd4bf298f77b381f1829045a1e790146985 | [
"MIT"
] | 12 | 2020-10-13T08:23:17.000Z | 2021-01-22T04:36:21.000Z | ai4water/postprocessing/SeqMetrics/__init__.py | csiro-hydroinformatics/AI4Water | cdb18bd4bf298f77b381f1829045a1e790146985 | [
"MIT"
] | 1 | 2020-10-15T02:42:52.000Z | 2020-10-15T02:51:07.000Z | ai4water/postprocessing/SeqMetrics/__init__.py | csiro-hydroinformatics/AI4Water | cdb18bd4bf298f77b381f1829045a1e790146985 | [
"MIT"
] | 2 | 2020-11-23T04:45:38.000Z | 2020-11-26T10:12:34.000Z | """SeqMetrics, the module to calculate performance related to tabular/structured and sequential data.
The values in a sequence are not necessarily related.
"""
from ._SeqMetrics import Metrics
from ._regression import RegressionMetrics
from ._classification import ClassificationMetrics
| 36 | 101 | 0.833333 |
from ._SeqMetrics import Metrics
from ._regression import RegressionMetrics
from ._classification import ClassificationMetrics
| true | true |
f7f8795c7a600262712e370ae7d87edc408df69a | 306 | py | Python | taobao_wechat_bot/top/api/rest/BaichuanOrderurlGetRequest.py | zyphs21/myPythonPractise | 7da984c98ee93e650ab2f9da4a2502340f1220b4 | [
"MIT"
] | null | null | null | taobao_wechat_bot/top/api/rest/BaichuanOrderurlGetRequest.py | zyphs21/myPythonPractise | 7da984c98ee93e650ab2f9da4a2502340f1220b4 | [
"MIT"
] | null | null | null | taobao_wechat_bot/top/api/rest/BaichuanOrderurlGetRequest.py | zyphs21/myPythonPractise | 7da984c98ee93e650ab2f9da4a2502340f1220b4 | [
"MIT"
] | null | null | null | '''
Created by auto_sdk on 2015.06.10
'''
from top.api.base import RestApi
class BaichuanOrderurlGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.name = None
def getapiname(self):
return 'taobao.baichuan.orderurl.get'
| 25.5 | 56 | 0.72549 | from top.api.base import RestApi
class BaichuanOrderurlGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.name = None
def getapiname(self):
return 'taobao.baichuan.orderurl.get'
| true | true |
f7f879694ce499eb28bc61ff25bff05417c15a11 | 1,736 | py | Python | python/clx/utils/data/utils.py | shaneding/clx | 6225c03a6bf5c9f7e1b31ace664dca12118f2706 | [
"Apache-2.0"
] | 143 | 2019-11-06T16:08:50.000Z | 2022-03-22T12:14:59.000Z | python/clx/utils/data/utils.py | shaneding/clx | 6225c03a6bf5c9f7e1b31ace664dca12118f2706 | [
"Apache-2.0"
] | 361 | 2019-11-06T20:33:24.000Z | 2022-03-31T19:59:12.000Z | python/clx/utils/data/utils.py | shaneding/clx | 6225c03a6bf5c9f7e1b31ace664dca12118f2706 | [
"Apache-2.0"
] | 82 | 2019-11-06T17:36:42.000Z | 2022-03-17T07:03:04.000Z | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import logging
log = logging.getLogger(__name__)
def str2ascii(df, col_name):
"""
This function sorts domain name entries in desc order based on the length of domain and converts domain name to ascii characters.
:param df: Domains which requires conversion.
:type df: cudf.DataFrame
:param col_name: Name of the column that needs to be transformed.
:type col_name: str
:return: Ascii character converted information.
:rtype: cudf.DataFrame
"""
df["len"] = df[col_name].str.len()
df = df.sort_values("len", ascending=False)
split_df = df[col_name].str.findall("[\w\W\d\D\s\S]")
columns_cnt = len(split_df.columns)
# Replace null's with ^.
split_df = split_df.fillna("^")
temp_df = cudf.DataFrame()
for col in range(0, columns_cnt):
temp_df[col] = split_df[col].str.code_points()
del split_df
# Replace ^ ascii value 94 with 0.
temp_df = temp_df.replace(94, 0)
temp_df.index = df.index
temp_df["len"] = df["len"]
if "type" in df.columns:
temp_df["type"] = df["type"]
temp_df[col_name] = df[col_name]
return temp_df
| 33.384615 | 133 | 0.6947 |
import cudf
import logging
log = logging.getLogger(__name__)
def str2ascii(df, col_name):
df["len"] = df[col_name].str.len()
df = df.sort_values("len", ascending=False)
split_df = df[col_name].str.findall("[\w\W\d\D\s\S]")
columns_cnt = len(split_df.columns)
split_df = split_df.fillna("^")
temp_df = cudf.DataFrame()
for col in range(0, columns_cnt):
temp_df[col] = split_df[col].str.code_points()
del split_df
# Replace ^ ascii value 94 with 0.
temp_df = temp_df.replace(94, 0)
temp_df.index = df.index
temp_df["len"] = df["len"]
if "type" in df.columns:
temp_df["type"] = df["type"]
temp_df[col_name] = df[col_name]
return temp_df
| true | true |
f7f879a7e195e1737735d4a49ac43b8262fb237f | 898 | py | Python | train_net/read_and_reshape_data.py | ryanmdavis/classifyHistology | 563687250f6d81a7e2596607587238354e7279e5 | [
"MIT"
] | null | null | null | train_net/read_and_reshape_data.py | ryanmdavis/classifyHistology | 563687250f6d81a7e2596607587238354e7279e5 | [
"MIT"
] | null | null | null | train_net/read_and_reshape_data.py | ryanmdavis/classifyHistology | 563687250f6d81a7e2596607587238354e7279e5 | [
"MIT"
] | null | null | null | from tensorflow.examples.tutorials.mnist import input_data
def readReshapeData(path):
# extract data
data = input_data.read_data_sets(path,one_hot=True)
# print info about the datasets
# Shapes of training set
print("Training set (images) shape: {shape}".format(shape=data.train.images.shape))
print("Training set (labels) shape: {shape}".format(shape=data.train.labels.shape))
# Shapes of test set
print("Test set (images) shape: {shape}".format(shape=data.test.images.shape))
print("Test set (labels) shape: {shape}".format(shape=data.test.labels.shape))
# Reshape training and testing image
train_X = data.train.images.reshape(-1, 28, 28, 1)
test_X = data.test.images.reshape(-1,28,28,1)
# set the correct classes
train_y = data.train.labels
test_y = data.test.labels
return train_X,test_X, train_y,test_y | 37.416667 | 87 | 0.691537 | from tensorflow.examples.tutorials.mnist import input_data
def readReshapeData(path):
data = input_data.read_data_sets(path,one_hot=True)
print("Training set (images) shape: {shape}".format(shape=data.train.images.shape))
print("Training set (labels) shape: {shape}".format(shape=data.train.labels.shape))
print("Test set (images) shape: {shape}".format(shape=data.test.images.shape))
print("Test set (labels) shape: {shape}".format(shape=data.test.labels.shape))
train_X = data.train.images.reshape(-1, 28, 28, 1)
test_X = data.test.images.reshape(-1,28,28,1)
train_y = data.train.labels
test_y = data.test.labels
return train_X,test_X, train_y,test_y | true | true |
f7f87a2673453b53a81f3e74665cd1b6bd46b323 | 11,402 | py | Python | test_models.py | ty-97/SDA | 047d8e6ed238f77d8a7846cf3ac9916c4f0d8fbc | [
"MIT"
] | 2 | 2021-08-09T06:36:08.000Z | 2021-11-09T05:37:20.000Z | test_models.py | ty-97/SDA | 047d8e6ed238f77d8a7846cf3ac9916c4f0d8fbc | [
"MIT"
] | 1 | 2022-02-14T07:49:48.000Z | 2022-02-17T03:41:47.000Z | test_models.py | ty-97/SDA | 047d8e6ed238f77d8a7846cf3ac9916c4f0d8fbc | [
"MIT"
] | null | null | null | # Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu
import os
import time
import shutil
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm_
from torch.nn import functional as F
from sklearn.metrics import confusion_matrix
from ops.dataset import TSNDataSet
# from ops.models import VideoNet
from ops.models_test import VideoNet
from ops.transforms import *
from opts_test import parser
from ops import dataset_config
from ops.utils import AverageMeter, accuracy
# from ops.temporal_shift import make_temporal_pool
from tensorboardX import SummaryWriter
#os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
#os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
def eval_video(video_data, net):
net.eval()
with torch.no_grad():
i, data, label = video_data
batch_size = label.numel()
# print(data.size())
# print(label.size())
#+++++++++++++++++
if args.dense_sample:
num_crop = 10*args.test_crops
elif args.twice_sample:
num_crop = 2*args.test_crops
else:
num_crop = 1*args.test_crops
#++++++++++++++++
rst, weights = net(data)
rst = rst.reshape(batch_size, num_crop, -1).mean(1)
#
if args.softmax:
# take the softmax to normalize the output to probability
rst = F.softmax(rst, dim=1)
rst = rst.data.cpu().numpy().copy()
return i, rst, label, weights
def main():
global args
args = parser.parse_args()
num_class, args.train_list, args.val_list, args.root_path, prefix = dataset_config.return_dataset(args.dataset,
args.modality)
assert args.modality == 'RGB'
if args.test_list:
test_list = args.test_list
else:
test_list = args.val_list
# ==== get test args ====
test_weights_list = args.test_weights.split(',')
test_nets_list = args.test_nets.split(',')
test_segments_list = [int(s) for s in args.test_segments.split(',')]
assert len(test_nets_list) == len(test_segments_list)
# test_cdivs_list = [int(s) for s in args.test_cdivs.split(',')]
# =======================
data_iter_list = []
net_list = []
scale_size = 256
crop_size = 256 if args.full_res else 224 # 224 or 256 (scale_size)
if args.test_crops == 1:
cropping = torchvision.transforms.Compose([
GroupScale(scale_size),
GroupCenterCrop(crop_size),
])
elif args.test_crops == 3: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose([
GroupFullResSample(crop_size, scale_size, flip=False)
])
elif args.test_crops == 5: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose([
GroupOverSample(crop_size, scale_size, flip=False)
])
elif args.test_crops == 10:
cropping = torchvision.transforms.Compose([
GroupOverSample(crop_size, scale_size)
])
else:
raise ValueError("Only 1, 5, 10 crops are supported while we got {}".format(args.test_crops))
test_log = 'test_logs_256'
if not os.path.exists(test_log):
os.mkdir(test_log)
log_path = './{}/log_{}_{}_{}_a{}_b{}_seg{}_{}.txt'.format(test_log, args.arch, args.dataset, "-".join(test_nets_list), \
"-".join(str(a) for a in test_segments_list), \
crop_size)
for this_net, this_segment, this_weight in zip(test_nets_list, test_segments_list, test_weights_list):
model = VideoNet(num_class, this_segment, args.modality,
backbone=args.arch, net=this_net,
consensus_type=args.consensus_type,
element_filter=args.element_filter,
cdiv=args.cdiv)
# weights_path = "./checkpoints/%s/%s_%s_c%d_s%d.pth"%(args.dataset, args.model, this_net, this_cdiv, this_segment)
print(this_weight)
if not os.path.exists(this_weight):
raise ValueError('the checkpoint file doesnot exist: %s'%this_weight)
checkpoint = torch.load(this_weight)
print(checkpoint['best_prec1'])
checkpoint_sd = checkpoint['state_dict']
#print(checkpoint_sd.keys())
base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint_sd.items())}
for ks in list(base_dict.keys()):
if ks.split('.')[-1] in ['total_params','total_ops']:
base_dict.pop(ks)
#print(ks)
model.load_state_dict(base_dict)
# crop_size = model.scale_size if args.full_res else model.input_size # 224 or 256 (scale_size)
# scale_size = model.scale_size # 256
input_mean = model.input_mean
input_std = model.input_std
# Data loading code
if args.modality != 'RGBDiff':
normalize = GroupNormalize(input_mean, input_std)
else:
normalize = IdentityTransform()
if args.modality == 'RGB':
data_length = 1
elif args.modality in ['Flow', 'RGBDiff']:
data_length = 5
# print('----Validation----')
print('batch size', args.batch_size)
test_loader = torch.utils.data.DataLoader(
TSNDataSet(args.root_path, test_list, num_segments=this_segment,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
test_mode=True,
random_shift=False,
transform=torchvision.transforms.Compose([
cropping,
GroupScale(224),
# GroupScale(int(scale_size)),
#GroupScale(256),
#GroupCenterCrop(224),
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]), dense_sample=args.dense_sample, twice_sample=args.twice_sample),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
#
total_num = len(test_loader.dataset)
print('total test number:', total_num)
#
#model = torch.nn.DataParallel(model).cuda()
model.eval()
net_list.append(model)
data_gen = enumerate(test_loader)
data_iter_list.append(data_gen)
#
top1 = AverageMeter()
top5 = AverageMeter()
batch_times = AverageMeter()
#
proc_start_time = time.time()
output = []
fw = open(log_path, 'w')
weights_data = np.zeros((num_class, 4, 4))
for i, data_label_pairs in enumerate(zip(*data_iter_list)):
with torch.no_grad():
this_rst_list = []
this_label = None
# end = time.time()
weight_data = []
for (_, (data, label)), net in zip(data_label_pairs, net_list):
end = time.time()
rst = eval_video((i, data, label), net)
batch_times.update(time.time()-end, label.size(0))
this_rst_list.append(rst[1])
weight_data = rst[3] #bsz, 4, num_blocks, 4
this_label = label
# assert len(this_rst_list) == len(coeff_list)
# for i_coeff in range(len(this_rst_list)):
# this_rst_list[i_coeff] *= coeff_list[i_coeff]
ensembled_predict = sum(this_rst_list) / len(this_rst_list)
for p, g in zip(ensembled_predict, this_label.cpu().numpy()):
output.append([p[None, ...], g])
for j in range(len(weight_data)):
weight_data[j] = sum(weight_data[j]).cpu().numpy()
weight_data = np.array(weight_data) # 4 bsz 4
weight_data = weight_data.transpose(1,0,2) # bsz 4 4
#print(weight_data.shape)
for weight, l in zip(weight_data, this_label.cpu().numpy()): # 4, num_blocks, 4
weights_data[l] = weights_data[l] + weight
cnt_time = time.time() - proc_start_time
prec1, prec5 = accuracy(torch.from_numpy(ensembled_predict), this_label, topk=(1, 5))
top1.update(prec1.item(), this_label.numel())
top5.update(prec5.item(), this_label.numel())
if i % 20 == 0:
txt = 'video {} done, total {}/{}, average {:.3f} sec/video, moving Prec@1 {:.3f} Prec@5 {:.3f}'.format(i * args.batch_size, i * args.batch_size, total_num,
float(cnt_time) / (i+1) / args.batch_size, top1.avg, top5.avg)
print(txt)
fw.write(txt+'\n')
fw.flush()
# fw.close()
print('avg computing time', batch_times.avg)
video_pred = [np.argmax(x[0]) for x in output]
video_pred_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output]
video_labels = [x[1] for x in output]
cf = confusion_matrix(video_labels, video_pred).astype(float)
# np.save('cm.npy', cf)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print(cls_acc*100)
# upper = np.mean(np.max(cf, axis=1) / cls_cnt)
# print('upper bound: {}'.format(upper))
cls_acc_avg = np.sum(cls_acc*cls_cnt)/cls_cnt.sum()
print(cls_acc_avg)
weights_data = weights_data/np.expand_dims(np.expand_dims(cls_cnt,-1).repeat(4,axis=-1),-1).repeat(4,axis=-1)
import csv
with open(args.test_nets+'_cls_acc.csv','w') as f:
f_csv = csv.writer(f)
f_csv.writerow(cls_acc)
# with open('cls_count.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerow(cls_cnt.tolist())
# return 0
# with open('cls_weight_layer1.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,0,:]/3).tolist())
# with open('cls_weight_layer2.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,1,:]/4).tolist())
# with open('cls_weight_layer3.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,2,:]/6).tolist())
# with open('cls_weight_layer4.csv','w') as f:
# f_csv = csv.writer(f)
# f_csv.writerows((weights_data[:,3,:]/3).tolist())
print('-----Evaluation is finished------')
print('Class Accuracy {:.02f}%'.format(cls_acc_avg*100))
txt = 'Overall Prec@1 {:.02f}% Prec@5 {:.02f}%'.format(top1.avg, top5.avg)
fw.write(txt)
fw.close()
print(txt)
if __name__ == '__main__':
main() | 38.133779 | 173 | 0.564901 |
import os
import time
import shutil
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm_
from torch.nn import functional as F
from sklearn.metrics import confusion_matrix
from ops.dataset import TSNDataSet
from ops.models_test import VideoNet
from ops.transforms import *
from opts_test import parser
from ops import dataset_config
from ops.utils import AverageMeter, accuracy
from tensorboardX import SummaryWriter
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
def eval_video(video_data, net):
net.eval()
with torch.no_grad():
i, data, label = video_data
batch_size = label.numel()
if args.dense_sample:
num_crop = 10*args.test_crops
elif args.twice_sample:
num_crop = 2*args.test_crops
else:
num_crop = 1*args.test_crops
rst, weights = net(data)
rst = rst.reshape(batch_size, num_crop, -1).mean(1)
if args.softmax:
rst = F.softmax(rst, dim=1)
rst = rst.data.cpu().numpy().copy()
return i, rst, label, weights
def main():
global args
args = parser.parse_args()
num_class, args.train_list, args.val_list, args.root_path, prefix = dataset_config.return_dataset(args.dataset,
args.modality)
assert args.modality == 'RGB'
if args.test_list:
test_list = args.test_list
else:
test_list = args.val_list
test_weights_list = args.test_weights.split(',')
test_nets_list = args.test_nets.split(',')
test_segments_list = [int(s) for s in args.test_segments.split(',')]
assert len(test_nets_list) == len(test_segments_list)
data_iter_list = []
net_list = []
scale_size = 256
crop_size = 256 if args.full_res else 224
if args.test_crops == 1:
cropping = torchvision.transforms.Compose([
GroupScale(scale_size),
GroupCenterCrop(crop_size),
])
elif args.test_crops == 3:
cropping = torchvision.transforms.Compose([
GroupFullResSample(crop_size, scale_size, flip=False)
])
elif args.test_crops == 5:
cropping = torchvision.transforms.Compose([
GroupOverSample(crop_size, scale_size, flip=False)
])
elif args.test_crops == 10:
cropping = torchvision.transforms.Compose([
GroupOverSample(crop_size, scale_size)
])
else:
raise ValueError("Only 1, 5, 10 crops are supported while we got {}".format(args.test_crops))
test_log = 'test_logs_256'
if not os.path.exists(test_log):
os.mkdir(test_log)
log_path = './{}/log_{}_{}_{}_a{}_b{}_seg{}_{}.txt'.format(test_log, args.arch, args.dataset, "-".join(test_nets_list), \
"-".join(str(a) for a in test_segments_list), \
crop_size)
for this_net, this_segment, this_weight in zip(test_nets_list, test_segments_list, test_weights_list):
model = VideoNet(num_class, this_segment, args.modality,
backbone=args.arch, net=this_net,
consensus_type=args.consensus_type,
element_filter=args.element_filter,
cdiv=args.cdiv)
print(this_weight)
if not os.path.exists(this_weight):
raise ValueError('the checkpoint file doesnot exist: %s'%this_weight)
checkpoint = torch.load(this_weight)
print(checkpoint['best_prec1'])
checkpoint_sd = checkpoint['state_dict']
base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint_sd.items())}
for ks in list(base_dict.keys()):
if ks.split('.')[-1] in ['total_params','total_ops']:
base_dict.pop(ks)
model.load_state_dict(base_dict)
odel.input_mean
input_std = model.input_std
if args.modality != 'RGBDiff':
normalize = GroupNormalize(input_mean, input_std)
else:
normalize = IdentityTransform()
if args.modality == 'RGB':
data_length = 1
elif args.modality in ['Flow', 'RGBDiff']:
data_length = 5
print('batch size', args.batch_size)
test_loader = torch.utils.data.DataLoader(
TSNDataSet(args.root_path, test_list, num_segments=this_segment,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
test_mode=True,
random_shift=False,
transform=torchvision.transforms.Compose([
cropping,
GroupScale(224),
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]), dense_sample=args.dense_sample, twice_sample=args.twice_sample),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
total_num = len(test_loader.dataset)
print('total test number:', total_num)
model.eval()
net_list.append(model)
data_gen = enumerate(test_loader)
data_iter_list.append(data_gen)
top1 = AverageMeter()
top5 = AverageMeter()
batch_times = AverageMeter()
proc_start_time = time.time()
output = []
fw = open(log_path, 'w')
weights_data = np.zeros((num_class, 4, 4))
for i, data_label_pairs in enumerate(zip(*data_iter_list)):
with torch.no_grad():
this_rst_list = []
this_label = None
weight_data = []
for (_, (data, label)), net in zip(data_label_pairs, net_list):
end = time.time()
rst = eval_video((i, data, label), net)
batch_times.update(time.time()-end, label.size(0))
this_rst_list.append(rst[1])
weight_data = rst[3]
this_label = label
ensembled_predict = sum(this_rst_list) / len(this_rst_list)
for p, g in zip(ensembled_predict, this_label.cpu().numpy()):
output.append([p[None, ...], g])
for j in range(len(weight_data)):
weight_data[j] = sum(weight_data[j]).cpu().numpy()
weight_data = np.array(weight_data)
weight_data = weight_data.transpose(1,0,2)
for weight, l in zip(weight_data, this_label.cpu().numpy()):
weights_data[l] = weights_data[l] + weight
cnt_time = time.time() - proc_start_time
prec1, prec5 = accuracy(torch.from_numpy(ensembled_predict), this_label, topk=(1, 5))
top1.update(prec1.item(), this_label.numel())
top5.update(prec5.item(), this_label.numel())
if i % 20 == 0:
txt = 'video {} done, total {}/{}, average {:.3f} sec/video, moving Prec@1 {:.3f} Prec@5 {:.3f}'.format(i * args.batch_size, i * args.batch_size, total_num,
float(cnt_time) / (i+1) / args.batch_size, top1.avg, top5.avg)
print(txt)
fw.write(txt+'\n')
fw.flush()
print('avg computing time', batch_times.avg)
video_pred = [np.argmax(x[0]) for x in output]
video_pred_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output]
video_labels = [x[1] for x in output]
cf = confusion_matrix(video_labels, video_pred).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print(cls_acc*100)
cls_acc_avg = np.sum(cls_acc*cls_cnt)/cls_cnt.sum()
print(cls_acc_avg)
weights_data = weights_data/np.expand_dims(np.expand_dims(cls_cnt,-1).repeat(4,axis=-1),-1).repeat(4,axis=-1)
import csv
with open(args.test_nets+'_cls_acc.csv','w') as f:
f_csv = csv.writer(f)
f_csv.writerow(cls_acc)
print('-----Evaluation is finished------')
print('Class Accuracy {:.02f}%'.format(cls_acc_avg*100))
txt = 'Overall Prec@1 {:.02f}% Prec@5 {:.02f}%'.format(top1.avg, top5.avg)
fw.write(txt)
fw.close()
print(txt)
if __name__ == '__main__':
main() | true | true |
f7f87b3c9acd71b954620d66bf8efd52255bf583 | 1,023 | py | Python | trace-viewer/tracing/build/check_gypi.py | RSB4760/apq8016_external_chromium-trace | 45b575bb05b3714142a9d67b9bd153740ef99226 | [
"BSD-3-Clause"
] | null | null | null | trace-viewer/tracing/build/check_gypi.py | RSB4760/apq8016_external_chromium-trace | 45b575bb05b3714142a9d67b9bd153740ef99226 | [
"BSD-3-Clause"
] | null | null | null | trace-viewer/tracing/build/check_gypi.py | RSB4760/apq8016_external_chromium-trace | 45b575bb05b3714142a9d67b9bd153740ef99226 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
tracing_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..'))
if tracing_path not in sys.path:
sys.path.append(tracing_path)
from tracing.build import check_common
GYPI_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'trace_viewer.gypi'))
def GypiCheck():
f = open(GYPI_FILE, 'r')
gyp = f.read()
f.close()
data = eval(gyp)
listed_files = []
error = ''
for group in check_common.FILE_GROUPS:
filenames = map(os.path.normpath, data['variables'][group])
error += check_common.CheckListedFilesSorted(GYPI_FILE, group, filenames)
listed_files.extend(filenames)
return error + check_common.CheckCommon(GYPI_FILE, listed_files)
if __name__ == '__main__':
print GypiCheck()
| 27.648649 | 77 | 0.659824 |
import sys
import os
tracing_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..'))
if tracing_path not in sys.path:
sys.path.append(tracing_path)
from tracing.build import check_common
GYPI_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'trace_viewer.gypi'))
def GypiCheck():
f = open(GYPI_FILE, 'r')
gyp = f.read()
f.close()
data = eval(gyp)
listed_files = []
error = ''
for group in check_common.FILE_GROUPS:
filenames = map(os.path.normpath, data['variables'][group])
error += check_common.CheckListedFilesSorted(GYPI_FILE, group, filenames)
listed_files.extend(filenames)
return error + check_common.CheckCommon(GYPI_FILE, listed_files)
if __name__ == '__main__':
print GypiCheck()
| false | true |
f7f87b8eb066e5b1cbf1b21db016462edda147c6 | 1,366 | py | Python | blog/migrations/0001_initial.py | wlcobb/assignment1-wlcobb | 9cf97f7c7efac5f33f5ee36559223369bfd20973 | [
"MIT"
] | null | null | null | blog/migrations/0001_initial.py | wlcobb/assignment1-wlcobb | 9cf97f7c7efac5f33f5ee36559223369bfd20973 | [
"MIT"
] | null | null | null | blog/migrations/0001_initial.py | wlcobb/assignment1-wlcobb | 9cf97f7c7efac5f33f5ee36559223369bfd20973 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2018-09-24 00:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| 37.944444 | 147 | 0.606881 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| true | true |
f7f87cf58e6fa31c9c8b3c812216d57c8b875297 | 2,702 | py | Python | setup.py | teitei-tk/SixIsles | 68ac3510bc35910c3822e36a309a4a7e179f60c7 | [
"MIT"
] | null | null | null | setup.py | teitei-tk/SixIsles | 68ac3510bc35910c3822e36a309a4a7e179f60c7 | [
"MIT"
] | null | null | null | setup.py | teitei-tk/SixIsles | 68ac3510bc35910c3822e36a309a4a7e179f60c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
SixIsles
========
PyMongo Based ActiveRecord Pattern O/R Mapper
--------------
Dependencies
------------
- Python2.6 or Later
- PyMongo >= 3.1.1
Installation
------------
.. code:: bash
$ pip install SixIsles
Example
-------
Add Github Repository Documents
.. code:: python
from sixIsles import Structure, Document, get_client
from sixIsles.types import ObjectId, String
class Repository(Document):
struct = Structure(
_id = ObjectId(),
name = String(),
author = String(),
url = String()
)
class Meta:
database = get_client("test_db_name", "localhost")
document = Repository()
document.name = "SixIsles"
document.author = "teitei-tk"
document.url = "https://github.com/teitei-tk/SixIsles"
document.insert()
or
document = Repository({
"name": "SixIsles",
"author": "teitei-tk",
"url": "https://github.com/teitei-tk/SixIsles"
})
document.insert()
.. code:: bash
$ mongo
.....
.....
> use test_db_name
switched to db test_db_name
> show collections;
repository
system.indexes
> db.repository.find()
{ "_id" : ObjectId("565895aacc7474890284fc8d"), "url" : "https://github.com/teitei-tk/SixIsles", "name" : "SixIsles", "author" : "teitei-tk" }
>
TODO
----
- [ ] Add TestCode
- [ ] Update README
- [ ] Register CI Tools
License
-------
- MIT
"""
try:
import setuptools
from setuptools import setup, find_packages
except ImportError:
import sys
print("Please install setuptools.")
sys.exit(1)
import versions
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries',
]
setup(
name='SixIsles',
version=versions.VERSIONS,
description='PyMongo Based ActiveRecord Pattern O/R Mapper',
long_description=__doc__,
author='teitei-tk',
author_email='teitei.tk@gmail.com',
url='https://github.com/teitei-tk/SixIsles',
packages=find_packages(),
include_package_data=True,
license='MIT',
classifiers=classifiers,
install_requires=open('requirements.txt').read().splitlines(),
keywords=['orm', 'ormapper', 'o/r mapper', 'PyMongo', 'MongoDB'],
download_url='https://github.com/teitei-tk/SixIsles/archive/master.zip'
)
| 21.444444 | 146 | 0.613249 |
try:
import setuptools
from setuptools import setup, find_packages
except ImportError:
import sys
print("Please install setuptools.")
sys.exit(1)
import versions
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries',
]
setup(
name='SixIsles',
version=versions.VERSIONS,
description='PyMongo Based ActiveRecord Pattern O/R Mapper',
long_description=__doc__,
author='teitei-tk',
author_email='teitei.tk@gmail.com',
url='https://github.com/teitei-tk/SixIsles',
packages=find_packages(),
include_package_data=True,
license='MIT',
classifiers=classifiers,
install_requires=open('requirements.txt').read().splitlines(),
keywords=['orm', 'ormapper', 'o/r mapper', 'PyMongo', 'MongoDB'],
download_url='https://github.com/teitei-tk/SixIsles/archive/master.zip'
)
| true | true |
f7f87fbd5599812eb9d0729ffa5be717c649df8e | 562 | py | Python | gym_multilayerthinfilm/__init__.py | MLResearchAtOSRAM/gym-multilayerthinfilm | 3f98520577de032de52afbfe79d161b866fd6cd6 | [
"MIT"
] | 5 | 2021-09-08T13:09:52.000Z | 2022-03-17T18:49:55.000Z | gym_multilayerthinfilm/__init__.py | MLResearchAtOSRAM/gym-multilayerthinfilm | 3f98520577de032de52afbfe79d161b866fd6cd6 | [
"MIT"
] | null | null | null | gym_multilayerthinfilm/__init__.py | MLResearchAtOSRAM/gym-multilayerthinfilm | 3f98520577de032de52afbfe79d161b866fd6cd6 | [
"MIT"
] | 3 | 2021-09-08T17:55:51.000Z | 2022-01-27T07:37:53.000Z | """
gym-multilayerthinfilm - A reinforcement learning environment for designing multilayer thin-films based on
transfer-matrix method optics package, for calculating
reflection, transmission, absorption, and other relevant aspects of thin
and thick multilayer (or single-layer) films.
Written by Heribert Wankerl and Alexander Luce; based on previous work of Steven Byrnes.
Released under MIT license (Expat).
"""
from gym_multilayerthinfilm import *
from gym_multilayerthinfilm.utils import *
from gym_multilayerthinfilm.gym_class import *
| 33.058824 | 107 | 0.795374 |
from gym_multilayerthinfilm import *
from gym_multilayerthinfilm.utils import *
from gym_multilayerthinfilm.gym_class import *
| true | true |
f7f88051cccc86ea36ff52143595a5d10e923f0d | 721 | py | Python | 2015/day_05_part2.py | jorvis/AdventOfCode | 97c42037abc28c1c16cc3e48a30f2689ca950fb5 | [
"MIT"
] | null | null | null | 2015/day_05_part2.py | jorvis/AdventOfCode | 97c42037abc28c1c16cc3e48a30f2689ca950fb5 | [
"MIT"
] | null | null | null | 2015/day_05_part2.py | jorvis/AdventOfCode | 97c42037abc28c1c16cc3e48a30f2689ca950fb5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
def is_nice_string(s):
# It contains a pair of any two letters that appears at least twice
# in the string without overlapping
matches = re.findall(r"([a-zA-Z]{2}).*\1", s)
if len(matches) == 0:
return False
# It contains at least one letter which repeats with exactly one letter
# between them
m = re.search(r"([a-zA-Z]).\1", s)
if not m:
return False
return True
#############################################################
nice_count = 0
for line in open('day_05.data'):
line = line.rstrip()
if is_nice_string(line):
nice_count += 1
print("INFO: {0} of the strings are nice".format(nice_count))
| 24.033333 | 75 | 0.565881 |
import re
def is_nice_string(s):
matches = re.findall(r"([a-zA-Z]{2}).*\1", s)
if len(matches) == 0:
return False
m = re.search(r"([a-zA-Z]).\1", s)
if not m:
return False
return True
| true | true |
f7f88134a2d2774c5cd6a84b20a1928fbeea771a | 153 | py | Python | sanjip/__init__.py | Sanji-IO/sanjip | 5ab77263d8190f803f6f4bd063459873ac9bcabb | [
"MIT"
] | null | null | null | sanjip/__init__.py | Sanji-IO/sanjip | 5ab77263d8190f803f6f4bd063459873ac9bcabb | [
"MIT"
] | 1 | 2019-09-23T20:58:57.000Z | 2019-09-23T20:58:57.000Z | sanjip/__init__.py | Sanji-IO/sanjip | 5ab77263d8190f803f6f4bd063459873ac9bcabb | [
"MIT"
] | 1 | 2019-09-23T00:23:02.000Z | 2019-09-23T00:23:02.000Z | from __future__ import absolute_import
import sanjip.ip as ip
import sanjip.ip.addr
import sanjip.ip.route # noqa: F401
__all__ = [ip.addr, ip.route]
| 19.125 | 38 | 0.771242 | from __future__ import absolute_import
import sanjip.ip as ip
import sanjip.ip.addr
import sanjip.ip.route
__all__ = [ip.addr, ip.route]
| true | true |
f7f88159735653287f31753857f3ce86c7a3e4b4 | 783 | py | Python | src/agender.py | bagustris/nkululeko | 87a4918b37e2a8599b81c4752c6750fc8adaa079 | [
"MIT"
] | 3 | 2021-11-08T08:26:58.000Z | 2022-02-16T15:26:06.000Z | src/agender.py | bagustris/nkululeko | 87a4918b37e2a8599b81c4752c6750fc8adaa079 | [
"MIT"
] | 12 | 2021-09-09T10:11:50.000Z | 2022-02-22T15:36:33.000Z | src/agender.py | bagustris/nkululeko | 87a4918b37e2a8599b81c4752c6750fc8adaa079 | [
"MIT"
] | 1 | 2021-11-08T23:53:48.000Z | 2021-11-08T23:53:48.000Z | # agender.py
from dataset import Dataset
import audformat
import os
import glob_conf
class Agender(Dataset):
"""Class to represent the agender age and gender dataset"""
name = 'agender' # The name
def __init__(self, config):
"""Constructor setting the name"""
Dataset.__init__(self, self.name, config)
def load(self):
"""Load the dataframe with files, speakers and emotion labels"""
root = self.config['DATA'][self.name]
db = audformat.Database.load(root)
db.map_files(lambda x: os.path.join(root, x))
self.db = db
def split(self):
"""Split the datbase into train and devel set"""
self.df_test = self.db.tables['age.devel'].df
self.df_train = self.db.tables['age.train'].df | 30.115385 | 72 | 0.63857 |
from dataset import Dataset
import audformat
import os
import glob_conf
class Agender(Dataset):
name = 'agender'
def __init__(self, config):
Dataset.__init__(self, self.name, config)
def load(self):
root = self.config['DATA'][self.name]
db = audformat.Database.load(root)
db.map_files(lambda x: os.path.join(root, x))
self.db = db
def split(self):
self.df_test = self.db.tables['age.devel'].df
self.df_train = self.db.tables['age.train'].df | true | true |
f7f8828b4cfd7e84adf786dce8b7506703379faf | 29,044 | py | Python | tests/kafkatest/services/streams.py | smagill-test/kafka | f3a9ce4a69d17db7b8ba21134eb8118070176e48 | [
"Apache-2.0"
] | 1 | 2020-07-01T12:04:35.000Z | 2020-07-01T12:04:35.000Z | tests/kafkatest/services/streams.py | smagill-test/kafka | f3a9ce4a69d17db7b8ba21134eb8118070176e48 | [
"Apache-2.0"
] | 1 | 2021-10-09T04:39:50.000Z | 2021-10-09T04:39:50.000Z | tests/kafkatest/services/streams.py | smagill-test/kafka | f3a9ce4a69d17db7b8ba21134eb8118070176e48 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import signal
import streams_property
import consumer_property
from ducktape.cluster.remoteaccount import RemoteCommandError
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import KafkaConfig
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.version import LATEST_0_10_0, LATEST_0_10_1
STATE_DIR = "state.dir"
class StreamsTestBaseService(KafkaPathResolverMixin, JmxMixin, Service):
"""Base class for Streams Test services providing some common settings and functionality"""
PERSISTENT_ROOT = "/mnt/streams"
# The log file contains normal log4j logs written using a file appender. stdout and stderr are handled separately
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "streams.properties")
LOG_FILE = os.path.join(PERSISTENT_ROOT, "streams.log")
STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "streams.stdout")
STDERR_FILE = os.path.join(PERSISTENT_ROOT, "streams.stderr")
JMX_LOG_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.log")
JMX_ERR_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.err.log")
LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PID_FILE = os.path.join(PERSISTENT_ROOT, "streams.pid")
CLEAN_NODE_ENABLED = True
logs = {
"streams_config": {
"path": CONFIG_FILE,
"collect_default": True},
"streams_log": {
"path": LOG_FILE,
"collect_default": True},
"streams_stdout": {
"path": STDOUT_FILE,
"collect_default": True},
"streams_stderr": {
"path": STDERR_FILE,
"collect_default": True},
"streams_log.1": {
"path": LOG_FILE + ".1",
"collect_default": True},
"streams_stdout.1": {
"path": STDOUT_FILE + ".1",
"collect_default": True},
"streams_stderr.1": {
"path": STDERR_FILE + ".1",
"collect_default": True},
"streams_log.2": {
"path": LOG_FILE + ".2",
"collect_default": True},
"streams_stdout.2": {
"path": STDOUT_FILE + ".2",
"collect_default": True},
"streams_stderr.2": {
"path": STDERR_FILE + ".2",
"collect_default": True},
"streams_log.3": {
"path": LOG_FILE + ".3",
"collect_default": True},
"streams_stdout.3": {
"path": STDOUT_FILE + ".3",
"collect_default": True},
"streams_stderr.3": {
"path": STDERR_FILE + ".3",
"collect_default": True},
"streams_log.0-1": {
"path": LOG_FILE + ".0-1",
"collect_default": True},
"streams_stdout.0-1": {
"path": STDOUT_FILE + ".0-1",
"collect_default": True},
"streams_stderr.0-1": {
"path": STDERR_FILE + ".0-1",
"collect_default": True},
"streams_log.0-2": {
"path": LOG_FILE + ".0-2",
"collect_default": True},
"streams_stdout.0-2": {
"path": STDOUT_FILE + ".0-2",
"collect_default": True},
"streams_stderr.0-2": {
"path": STDERR_FILE + ".0-2",
"collect_default": True},
"streams_log.0-3": {
"path": LOG_FILE + ".0-3",
"collect_default": True},
"streams_stdout.0-3": {
"path": STDOUT_FILE + ".0-3",
"collect_default": True},
"streams_stderr.0-3": {
"path": STDERR_FILE + ".0-3",
"collect_default": True},
"streams_log.0-4": {
"path": LOG_FILE + ".0-4",
"collect_default": True},
"streams_stdout.0-4": {
"path": STDOUT_FILE + ".0-4",
"collect_default": True},
"streams_stderr.0-4": {
"path": STDERR_FILE + ".0-4",
"collect_default": True},
"streams_log.0-5": {
"path": LOG_FILE + ".0-5",
"collect_default": True},
"streams_stdout.0-5": {
"path": STDOUT_FILE + ".0-5",
"collect_default": True},
"streams_stderr.0-5": {
"path": STDERR_FILE + ".0-5",
"collect_default": True},
"streams_log.0-6": {
"path": LOG_FILE + ".0-6",
"collect_default": True},
"streams_stdout.0-6": {
"path": STDOUT_FILE + ".0-6",
"collect_default": True},
"streams_stderr.0-6": {
"path": STDERR_FILE + ".0-6",
"collect_default": True},
"streams_log.1-1": {
"path": LOG_FILE + ".1-1",
"collect_default": True},
"streams_stdout.1-1": {
"path": STDOUT_FILE + ".1-1",
"collect_default": True},
"streams_stderr.1-1": {
"path": STDERR_FILE + ".1-1",
"collect_default": True},
"streams_log.1-2": {
"path": LOG_FILE + ".1-2",
"collect_default": True},
"streams_stdout.1-2": {
"path": STDOUT_FILE + ".1-2",
"collect_default": True},
"streams_stderr.1-2": {
"path": STDERR_FILE + ".1-2",
"collect_default": True},
"streams_log.1-3": {
"path": LOG_FILE + ".1-3",
"collect_default": True},
"streams_stdout.1-3": {
"path": STDOUT_FILE + ".1-3",
"collect_default": True},
"streams_stderr.1-3": {
"path": STDERR_FILE + ".1-3",
"collect_default": True},
"streams_log.1-4": {
"path": LOG_FILE + ".1-4",
"collect_default": True},
"streams_stdout.1-4": {
"path": STDOUT_FILE + ".1-4",
"collect_default": True},
"streams_stderr.1-4": {
"path": STDERR_FILE + ".1-4",
"collect_default": True},
"streams_log.1-5": {
"path": LOG_FILE + ".1-5",
"collect_default": True},
"streams_stdout.1-5": {
"path": STDOUT_FILE + ".1-5",
"collect_default": True},
"streams_stderr.1-5": {
"path": STDERR_FILE + ".1-5",
"collect_default": True},
"streams_log.1-6": {
"path": LOG_FILE + ".1-6",
"collect_default": True},
"streams_stdout.1-6": {
"path": STDOUT_FILE + ".1-6",
"collect_default": True},
"streams_stderr.1-6": {
"path": STDERR_FILE + ".1-6",
"collect_default": True},
"jmx_log": {
"path": JMX_LOG_FILE,
"collect_default": True},
"jmx_err": {
"path": JMX_ERR_FILE,
"collect_default": True},
}
def __init__(self, test_context, kafka, streams_class_name, user_test_args1, user_test_args2=None, user_test_args3=None, user_test_args4=None):
Service.__init__(self, test_context, num_nodes=1)
self.kafka = kafka
self.args = {'streams_class_name': streams_class_name,
'user_test_args1': user_test_args1,
'user_test_args2': user_test_args2,
'user_test_args3': user_test_args3,
'user_test_args4': user_test_args4}
self.log_level = "DEBUG"
@property
def node(self):
return self.nodes[0]
def pids(self, node):
try:
pids = [pid for pid in node.account.ssh_capture("cat " + self.PID_FILE, callback=str)]
return [int(pid) for pid in pids]
except Exception, exception:
self.logger.debug(str(exception))
return []
def stop_nodes(self, clean_shutdown=True):
for node in self.nodes:
self.stop_node(node, clean_shutdown)
def stop_node(self, node, clean_shutdown=True):
self.logger.info((clean_shutdown and "Cleanly" or "Forcibly") + " stopping Streams Test on " + str(node.account))
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=True)
if clean_shutdown:
for pid in pids:
wait_until(lambda: not node.account.alive(pid), timeout_sec=120, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
node.account.ssh("rm -f " + self.PID_FILE, allow_fail=False)
def restart(self):
# We don't want to do any clean up here, just restart the process.
for node in self.nodes:
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.stop_node(node)
self.start_node(node)
def abortThenRestart(self):
# We don't want to do any clean up here, just abort then restart the process. The running service is killed immediately.
for node in self.nodes:
self.logger.info("Aborting Kafka Streams on " + str(node.account))
self.stop_node(node, False)
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.start_node(node)
def wait(self, timeout_sec=1440):
for node in self.nodes:
self.wait_node(node, timeout_sec)
def wait_node(self, node, timeout_sec=None):
for pid in self.pids(node):
wait_until(lambda: not node.account.alive(pid), timeout_sec=timeout_sec, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
def clean_node(self, node):
node.account.kill_process("streams", clean_shutdown=False, allow_fail=True)
if self.CLEAN_NODE_ENABLED:
node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing streams cmd: " + cmd)
return cmd
def prop_file(self):
cfg = KafkaConfig(**{streams_property.STATE_DIR: self.PERSISTENT_ROOT, streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()})
return cfg.render()
def start_node(self, node):
node.account.mkdirs(self.PERSISTENT_ROOT)
prop_file = self.prop_file()
node.account.create_file(self.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j.properties', log_file=self.LOG_FILE))
self.logger.info("Starting StreamsTest process on " + str(node.account))
with node.account.monitor_log(self.STDOUT_FILE) as monitor:
node.account.ssh(self.start_cmd(node))
monitor.wait_until('StreamsTest instance started', timeout_sec=60, err_msg="Never saw message indicating StreamsTest finished startup on " + str(node.account))
if len(self.pids(node)) == 0:
raise RuntimeError("No process ids recorded")
class StreamsSmokeTestBaseService(StreamsTestBaseService):
"""Base class for Streams Smoke Test services providing some common settings and functionality"""
def __init__(self, test_context, kafka, command, processing_guarantee = 'at_least_once', num_threads = 3):
super(StreamsSmokeTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsSmokeTest",
command)
self.NUM_THREADS = num_threads
self.PROCESSING_GUARANTEE = processing_guarantee
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
streams_property.NUM_THREADS: self.NUM_THREADS}
cfg = KafkaConfig(**properties)
return cfg.render()
class StreamsEosTestBaseService(StreamsTestBaseService):
"""Base class for Streams EOS Test services providing some common settings and functionality"""
clean_node_enabled = True
def __init__(self, test_context, kafka, processing_guarantee, command):
super(StreamsEosTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsEosTest",
command)
self.PROCESSING_GUARANTEE = processing_guarantee
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE}
cfg = KafkaConfig(**properties)
return cfg.render()
def clean_node(self, node):
if self.clean_node_enabled:
super(StreamsEosTestBaseService, self).clean_node(node)
class StreamsSmokeTestDriverService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestDriverService, self).__init__(test_context, kafka, "run")
self.DISABLE_AUTO_TERMINATE = ""
def disable_auto_terminate(self):
self.DISABLE_AUTO_TERMINATE = "disableAutoTerminate"
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['disable_auto_terminate'] = self.DISABLE_AUTO_TERMINATE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(disable_auto_terminate)s" \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
return cmd
class StreamsSmokeTestJobRunnerService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee, num_threads = 3):
super(StreamsSmokeTestJobRunnerService, self).__init__(test_context, kafka, "process", processing_guarantee, num_threads)
class StreamsEosTestDriverService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestDriverService, self).__init__(test_context, kafka, "not-required", "run")
class StreamsEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee):
super(StreamsEosTestJobRunnerService, self).__init__(test_context, kafka, processing_guarantee, "process")
class StreamsComplexEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee):
super(StreamsComplexEosTestJobRunnerService, self).__init__(test_context, kafka, processing_guarantee, "process-complex")
class StreamsEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestVerifyRunnerService, self).__init__(test_context, kafka, "not-required", "verify")
class StreamsComplexEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsComplexEosTestVerifyRunnerService, self).__init__(test_context, kafka, "not-required", "verify-complex")
class StreamsSmokeTestShutdownDeadlockService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestShutdownDeadlockService, self).__init__(test_context, kafka, "close-deadlock-test")
class StreamsBrokerCompatibilityService(StreamsTestBaseService):
def __init__(self, test_context, kafka, processingMode):
super(StreamsBrokerCompatibilityService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.BrokerCompatibilityTest",
processingMode)
class StreamsBrokerDownResilienceService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsBrokerDownResilienceService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsBrokerDownResilienceTest",
configs)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsStandbyTaskService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsStandbyTaskService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsStandByReplicaTest",
configs)
class StreamsOptimizedUpgradeTestService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsOptimizedUpgradeTestService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsOptimizedTest",
"")
self.OPTIMIZED_CONFIG = 'none'
self.INPUT_TOPIC = None
self.AGGREGATION_TOPIC = None
self.REDUCE_TOPIC = None
self.JOIN_TOPIC = None
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
properties['topology.optimization'] = self.OPTIMIZED_CONFIG
properties['input.topic'] = self.INPUT_TOPIC
properties['aggregation.topic'] = self.AGGREGATION_TOPIC
properties['reduce.topic'] = self.REDUCE_TOPIC
properties['join.topic'] = self.JOIN_TOPIC
# Long.MAX_VALUE lets us do the assignment without a warmup
properties['acceptable.recovery.lag'] = "9223372036854775807"
cfg = KafkaConfig(**properties)
return cfg.render()
class StreamsUpgradeTestJobRunnerService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsUpgradeTestJobRunnerService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeTest",
"")
self.UPGRADE_FROM = None
self.UPGRADE_TO = None
self.extra_properties = {}
def set_config(self, key, value):
self.extra_properties[key] = value
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_from(self, upgrade_from):
self.UPGRADE_FROM = upgrade_from
def set_upgrade_to(self, upgrade_to):
self.UPGRADE_TO = upgrade_to
def prop_file(self):
properties = self.extra_properties.copy()
properties[streams_property.STATE_DIR] = self.PERSISTENT_ROOT
properties[streams_property.KAFKA_SERVERS] = self.kafka.bootstrap_servers()
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
if self.UPGRADE_TO == "future_version":
properties['test.future.metadata'] = "any_value"
cfg = KafkaConfig(**properties)
return cfg.render()
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsNamedRepartitionTopicService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsNamedRepartitionTopicService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsNamedRepartitionTest",
"")
self.ADD_ADDITIONAL_OPS = 'false'
self.INPUT_TOPIC = None
self.AGGREGATION_TOPIC = None
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
properties['input.topic'] = self.INPUT_TOPIC
properties['aggregation.topic'] = self.AGGREGATION_TOPIC
properties['add.operations'] = self.ADD_ADDITIONAL_OPS
cfg = KafkaConfig(**properties)
return cfg.render()
class StaticMemberTestService(StreamsTestBaseService):
def __init__(self, test_context, kafka, group_instance_id, num_threads):
super(StaticMemberTestService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StaticMemberTestClient",
"")
self.INPUT_TOPIC = None
self.GROUP_INSTANCE_ID = group_instance_id
self.NUM_THREADS = num_threads
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.NUM_THREADS: self.NUM_THREADS,
consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
consumer_property.SESSION_TIMEOUT_MS: 60000}
properties['input.topic'] = self.INPUT_TOPIC
# TODO KIP-441: consider rewriting the test for HighAvailabilityTaskAssignor
properties['internal.task.assignor.class'] = "org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor"
cfg = KafkaConfig(**properties)
return cfg.render()
class CooperativeRebalanceUpgradeService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(CooperativeRebalanceUpgradeService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeToCooperativeRebalanceTest",
"")
self.UPGRADE_FROM = None
# these properties will be overridden in test
self.SOURCE_TOPIC = None
self.SINK_TOPIC = None
self.TASK_DELIMITER = "#"
self.REPORT_INTERVAL = None
self.standby_tasks = None
self.active_tasks = None
self.upgrade_phase = None
def set_tasks(self, task_string):
label = "TASK-ASSIGNMENTS:"
task_string_substr = task_string[len(label):]
all_tasks = task_string_substr.split(self.TASK_DELIMITER)
self.active_tasks = set(all_tasks[0].split(","))
if len(all_tasks) > 1:
self.standby_tasks = set(all_tasks[1].split(","))
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_phase(self, upgrade_phase):
self.upgrade_phase = upgrade_phase
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
else:
try:
del properties['upgrade.from']
except KeyError:
self.logger.info("Key 'upgrade.from' not there, better safe than sorry")
if self.upgrade_phase is not None:
properties['upgrade.phase'] = self.upgrade_phase
properties['source.topic'] = self.SOURCE_TOPIC
properties['sink.topic'] = self.SINK_TOPIC
properties['task.delimiter'] = self.TASK_DELIMITER
properties['report.interval'] = self.REPORT_INTERVAL
cfg = KafkaConfig(**properties)
return cfg.render()
| 43.740964 | 171 | 0.603119 |
import os.path
import signal
import streams_property
import consumer_property
from ducktape.cluster.remoteaccount import RemoteCommandError
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import KafkaConfig
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.version import LATEST_0_10_0, LATEST_0_10_1
STATE_DIR = "state.dir"
class StreamsTestBaseService(KafkaPathResolverMixin, JmxMixin, Service):
"""Base class for Streams Test services providing some common settings and functionality"""
PERSISTENT_ROOT = "/mnt/streams"
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "streams.properties")
LOG_FILE = os.path.join(PERSISTENT_ROOT, "streams.log")
STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "streams.stdout")
STDERR_FILE = os.path.join(PERSISTENT_ROOT, "streams.stderr")
JMX_LOG_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.log")
JMX_ERR_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.err.log")
LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PID_FILE = os.path.join(PERSISTENT_ROOT, "streams.pid")
CLEAN_NODE_ENABLED = True
logs = {
"streams_config": {
"path": CONFIG_FILE,
"collect_default": True},
"streams_log": {
"path": LOG_FILE,
"collect_default": True},
"streams_stdout": {
"path": STDOUT_FILE,
"collect_default": True},
"streams_stderr": {
"path": STDERR_FILE,
"collect_default": True},
"streams_log.1": {
"path": LOG_FILE + ".1",
"collect_default": True},
"streams_stdout.1": {
"path": STDOUT_FILE + ".1",
"collect_default": True},
"streams_stderr.1": {
"path": STDERR_FILE + ".1",
"collect_default": True},
"streams_log.2": {
"path": LOG_FILE + ".2",
"collect_default": True},
"streams_stdout.2": {
"path": STDOUT_FILE + ".2",
"collect_default": True},
"streams_stderr.2": {
"path": STDERR_FILE + ".2",
"collect_default": True},
"streams_log.3": {
"path": LOG_FILE + ".3",
"collect_default": True},
"streams_stdout.3": {
"path": STDOUT_FILE + ".3",
"collect_default": True},
"streams_stderr.3": {
"path": STDERR_FILE + ".3",
"collect_default": True},
"streams_log.0-1": {
"path": LOG_FILE + ".0-1",
"collect_default": True},
"streams_stdout.0-1": {
"path": STDOUT_FILE + ".0-1",
"collect_default": True},
"streams_stderr.0-1": {
"path": STDERR_FILE + ".0-1",
"collect_default": True},
"streams_log.0-2": {
"path": LOG_FILE + ".0-2",
"collect_default": True},
"streams_stdout.0-2": {
"path": STDOUT_FILE + ".0-2",
"collect_default": True},
"streams_stderr.0-2": {
"path": STDERR_FILE + ".0-2",
"collect_default": True},
"streams_log.0-3": {
"path": LOG_FILE + ".0-3",
"collect_default": True},
"streams_stdout.0-3": {
"path": STDOUT_FILE + ".0-3",
"collect_default": True},
"streams_stderr.0-3": {
"path": STDERR_FILE + ".0-3",
"collect_default": True},
"streams_log.0-4": {
"path": LOG_FILE + ".0-4",
"collect_default": True},
"streams_stdout.0-4": {
"path": STDOUT_FILE + ".0-4",
"collect_default": True},
"streams_stderr.0-4": {
"path": STDERR_FILE + ".0-4",
"collect_default": True},
"streams_log.0-5": {
"path": LOG_FILE + ".0-5",
"collect_default": True},
"streams_stdout.0-5": {
"path": STDOUT_FILE + ".0-5",
"collect_default": True},
"streams_stderr.0-5": {
"path": STDERR_FILE + ".0-5",
"collect_default": True},
"streams_log.0-6": {
"path": LOG_FILE + ".0-6",
"collect_default": True},
"streams_stdout.0-6": {
"path": STDOUT_FILE + ".0-6",
"collect_default": True},
"streams_stderr.0-6": {
"path": STDERR_FILE + ".0-6",
"collect_default": True},
"streams_log.1-1": {
"path": LOG_FILE + ".1-1",
"collect_default": True},
"streams_stdout.1-1": {
"path": STDOUT_FILE + ".1-1",
"collect_default": True},
"streams_stderr.1-1": {
"path": STDERR_FILE + ".1-1",
"collect_default": True},
"streams_log.1-2": {
"path": LOG_FILE + ".1-2",
"collect_default": True},
"streams_stdout.1-2": {
"path": STDOUT_FILE + ".1-2",
"collect_default": True},
"streams_stderr.1-2": {
"path": STDERR_FILE + ".1-2",
"collect_default": True},
"streams_log.1-3": {
"path": LOG_FILE + ".1-3",
"collect_default": True},
"streams_stdout.1-3": {
"path": STDOUT_FILE + ".1-3",
"collect_default": True},
"streams_stderr.1-3": {
"path": STDERR_FILE + ".1-3",
"collect_default": True},
"streams_log.1-4": {
"path": LOG_FILE + ".1-4",
"collect_default": True},
"streams_stdout.1-4": {
"path": STDOUT_FILE + ".1-4",
"collect_default": True},
"streams_stderr.1-4": {
"path": STDERR_FILE + ".1-4",
"collect_default": True},
"streams_log.1-5": {
"path": LOG_FILE + ".1-5",
"collect_default": True},
"streams_stdout.1-5": {
"path": STDOUT_FILE + ".1-5",
"collect_default": True},
"streams_stderr.1-5": {
"path": STDERR_FILE + ".1-5",
"collect_default": True},
"streams_log.1-6": {
"path": LOG_FILE + ".1-6",
"collect_default": True},
"streams_stdout.1-6": {
"path": STDOUT_FILE + ".1-6",
"collect_default": True},
"streams_stderr.1-6": {
"path": STDERR_FILE + ".1-6",
"collect_default": True},
"jmx_log": {
"path": JMX_LOG_FILE,
"collect_default": True},
"jmx_err": {
"path": JMX_ERR_FILE,
"collect_default": True},
}
def __init__(self, test_context, kafka, streams_class_name, user_test_args1, user_test_args2=None, user_test_args3=None, user_test_args4=None):
Service.__init__(self, test_context, num_nodes=1)
self.kafka = kafka
self.args = {'streams_class_name': streams_class_name,
'user_test_args1': user_test_args1,
'user_test_args2': user_test_args2,
'user_test_args3': user_test_args3,
'user_test_args4': user_test_args4}
self.log_level = "DEBUG"
@property
def node(self):
return self.nodes[0]
def pids(self, node):
try:
pids = [pid for pid in node.account.ssh_capture("cat " + self.PID_FILE, callback=str)]
return [int(pid) for pid in pids]
except Exception, exception:
self.logger.debug(str(exception))
return []
def stop_nodes(self, clean_shutdown=True):
for node in self.nodes:
self.stop_node(node, clean_shutdown)
def stop_node(self, node, clean_shutdown=True):
self.logger.info((clean_shutdown and "Cleanly" or "Forcibly") + " stopping Streams Test on " + str(node.account))
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=True)
if clean_shutdown:
for pid in pids:
wait_until(lambda: not node.account.alive(pid), timeout_sec=120, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
node.account.ssh("rm -f " + self.PID_FILE, allow_fail=False)
def restart(self):
for node in self.nodes:
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.stop_node(node)
self.start_node(node)
def abortThenRestart(self):
# We don't want to do any clean up here, just abort then restart the process. The running service is killed immediately.
for node in self.nodes:
self.logger.info("Aborting Kafka Streams on " + str(node.account))
self.stop_node(node, False)
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.start_node(node)
def wait(self, timeout_sec=1440):
for node in self.nodes:
self.wait_node(node, timeout_sec)
def wait_node(self, node, timeout_sec=None):
for pid in self.pids(node):
wait_until(lambda: not node.account.alive(pid), timeout_sec=timeout_sec, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
def clean_node(self, node):
node.account.kill_process("streams", clean_shutdown=False, allow_fail=True)
if self.CLEAN_NODE_ENABLED:
node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing streams cmd: " + cmd)
return cmd
def prop_file(self):
cfg = KafkaConfig(**{streams_property.STATE_DIR: self.PERSISTENT_ROOT, streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()})
return cfg.render()
def start_node(self, node):
node.account.mkdirs(self.PERSISTENT_ROOT)
prop_file = self.prop_file()
node.account.create_file(self.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j.properties', log_file=self.LOG_FILE))
self.logger.info("Starting StreamsTest process on " + str(node.account))
with node.account.monitor_log(self.STDOUT_FILE) as monitor:
node.account.ssh(self.start_cmd(node))
monitor.wait_until('StreamsTest instance started', timeout_sec=60, err_msg="Never saw message indicating StreamsTest finished startup on " + str(node.account))
if len(self.pids(node)) == 0:
raise RuntimeError("No process ids recorded")
class StreamsSmokeTestBaseService(StreamsTestBaseService):
"""Base class for Streams Smoke Test services providing some common settings and functionality"""
def __init__(self, test_context, kafka, command, processing_guarantee = 'at_least_once', num_threads = 3):
super(StreamsSmokeTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsSmokeTest",
command)
self.NUM_THREADS = num_threads
self.PROCESSING_GUARANTEE = processing_guarantee
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
streams_property.NUM_THREADS: self.NUM_THREADS}
cfg = KafkaConfig(**properties)
return cfg.render()
class StreamsEosTestBaseService(StreamsTestBaseService):
"""Base class for Streams EOS Test services providing some common settings and functionality"""
clean_node_enabled = True
def __init__(self, test_context, kafka, processing_guarantee, command):
super(StreamsEosTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsEosTest",
command)
self.PROCESSING_GUARANTEE = processing_guarantee
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE}
cfg = KafkaConfig(**properties)
return cfg.render()
def clean_node(self, node):
if self.clean_node_enabled:
super(StreamsEosTestBaseService, self).clean_node(node)
class StreamsSmokeTestDriverService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestDriverService, self).__init__(test_context, kafka, "run")
self.DISABLE_AUTO_TERMINATE = ""
def disable_auto_terminate(self):
self.DISABLE_AUTO_TERMINATE = "disableAutoTerminate"
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['disable_auto_terminate'] = self.DISABLE_AUTO_TERMINATE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(disable_auto_terminate)s" \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
return cmd
class StreamsSmokeTestJobRunnerService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee, num_threads = 3):
super(StreamsSmokeTestJobRunnerService, self).__init__(test_context, kafka, "process", processing_guarantee, num_threads)
class StreamsEosTestDriverService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestDriverService, self).__init__(test_context, kafka, "not-required", "run")
class StreamsEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee):
super(StreamsEosTestJobRunnerService, self).__init__(test_context, kafka, processing_guarantee, "process")
class StreamsComplexEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee):
super(StreamsComplexEosTestJobRunnerService, self).__init__(test_context, kafka, processing_guarantee, "process-complex")
class StreamsEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestVerifyRunnerService, self).__init__(test_context, kafka, "not-required", "verify")
class StreamsComplexEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsComplexEosTestVerifyRunnerService, self).__init__(test_context, kafka, "not-required", "verify-complex")
class StreamsSmokeTestShutdownDeadlockService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestShutdownDeadlockService, self).__init__(test_context, kafka, "close-deadlock-test")
class StreamsBrokerCompatibilityService(StreamsTestBaseService):
def __init__(self, test_context, kafka, processingMode):
super(StreamsBrokerCompatibilityService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.BrokerCompatibilityTest",
processingMode)
class StreamsBrokerDownResilienceService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsBrokerDownResilienceService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsBrokerDownResilienceTest",
configs)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsStandbyTaskService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsStandbyTaskService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsStandByReplicaTest",
configs)
class StreamsOptimizedUpgradeTestService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsOptimizedUpgradeTestService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsOptimizedTest",
"")
self.OPTIMIZED_CONFIG = 'none'
self.INPUT_TOPIC = None
self.AGGREGATION_TOPIC = None
self.REDUCE_TOPIC = None
self.JOIN_TOPIC = None
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
properties['topology.optimization'] = self.OPTIMIZED_CONFIG
properties['input.topic'] = self.INPUT_TOPIC
properties['aggregation.topic'] = self.AGGREGATION_TOPIC
properties['reduce.topic'] = self.REDUCE_TOPIC
properties['join.topic'] = self.JOIN_TOPIC
properties['acceptable.recovery.lag'] = "9223372036854775807"
cfg = KafkaConfig(**properties)
return cfg.render()
class StreamsUpgradeTestJobRunnerService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsUpgradeTestJobRunnerService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeTest",
"")
self.UPGRADE_FROM = None
self.UPGRADE_TO = None
self.extra_properties = {}
def set_config(self, key, value):
self.extra_properties[key] = value
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_from(self, upgrade_from):
self.UPGRADE_FROM = upgrade_from
def set_upgrade_to(self, upgrade_to):
self.UPGRADE_TO = upgrade_to
def prop_file(self):
properties = self.extra_properties.copy()
properties[streams_property.STATE_DIR] = self.PERSISTENT_ROOT
properties[streams_property.KAFKA_SERVERS] = self.kafka.bootstrap_servers()
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
if self.UPGRADE_TO == "future_version":
properties['test.future.metadata'] = "any_value"
cfg = KafkaConfig(**properties)
return cfg.render()
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsNamedRepartitionTopicService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsNamedRepartitionTopicService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsNamedRepartitionTest",
"")
self.ADD_ADDITIONAL_OPS = 'false'
self.INPUT_TOPIC = None
self.AGGREGATION_TOPIC = None
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
properties['input.topic'] = self.INPUT_TOPIC
properties['aggregation.topic'] = self.AGGREGATION_TOPIC
properties['add.operations'] = self.ADD_ADDITIONAL_OPS
cfg = KafkaConfig(**properties)
return cfg.render()
class StaticMemberTestService(StreamsTestBaseService):
def __init__(self, test_context, kafka, group_instance_id, num_threads):
super(StaticMemberTestService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StaticMemberTestClient",
"")
self.INPUT_TOPIC = None
self.GROUP_INSTANCE_ID = group_instance_id
self.NUM_THREADS = num_threads
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.NUM_THREADS: self.NUM_THREADS,
consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
consumer_property.SESSION_TIMEOUT_MS: 60000}
properties['input.topic'] = self.INPUT_TOPIC
properties['internal.task.assignor.class'] = "org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor"
cfg = KafkaConfig(**properties)
return cfg.render()
class CooperativeRebalanceUpgradeService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(CooperativeRebalanceUpgradeService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeToCooperativeRebalanceTest",
"")
self.UPGRADE_FROM = None
self.SOURCE_TOPIC = None
self.SINK_TOPIC = None
self.TASK_DELIMITER = "#"
self.REPORT_INTERVAL = None
self.standby_tasks = None
self.active_tasks = None
self.upgrade_phase = None
def set_tasks(self, task_string):
label = "TASK-ASSIGNMENTS:"
task_string_substr = task_string[len(label):]
all_tasks = task_string_substr.split(self.TASK_DELIMITER)
self.active_tasks = set(all_tasks[0].split(","))
if len(all_tasks) > 1:
self.standby_tasks = set(all_tasks[1].split(","))
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_phase(self, upgrade_phase):
self.upgrade_phase = upgrade_phase
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
else:
try:
del properties['upgrade.from']
except KeyError:
self.logger.info("Key 'upgrade.from' not there, better safe than sorry")
if self.upgrade_phase is not None:
properties['upgrade.phase'] = self.upgrade_phase
properties['source.topic'] = self.SOURCE_TOPIC
properties['sink.topic'] = self.SINK_TOPIC
properties['task.delimiter'] = self.TASK_DELIMITER
properties['report.interval'] = self.REPORT_INTERVAL
cfg = KafkaConfig(**properties)
return cfg.render()
| false | true |
f7f885492edf5b61c328e48b5c3832df019289bf | 3,523 | py | Python | src/train_DBM.py | 1512474508/deep-generative-models | 67d65d63f9f67050c29ae500bdd6b4518da14f7c | [
"MIT"
] | 1 | 2020-07-28T07:03:21.000Z | 2020-07-28T07:03:21.000Z | src/train_DBM.py | 1512474508/deep-generative-models | 67d65d63f9f67050c29ae500bdd6b4518da14f7c | [
"MIT"
] | null | null | null | src/train_DBM.py | 1512474508/deep-generative-models | 67d65d63f9f67050c29ae500bdd6b4518da14f7c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pickle
import datetime
import argparse
import re
import glob
from obj.DBM import DBM
import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
from skimage.transform import resize
################################
# train DBM from input data
################################
def trainDBM(data, learning_rate, k1, k2, epochs, batch_size, dims):
# import data
print("importing training data")
if data == "fashion_mnist":
fashion_mnist = tf.keras.datasets.fashion_mnist
(x_train, _), (_,_) = fashion_mnist.load_data()
elif data == "mnist":
mnist = tf.keras.datasets.mnist
(x_train, _), (_,_) = mnist.load_data()
elif data == "faces":
x_train = [resize(mpimg.imread(file),(28,28)) for file in glob.glob("data/faces/*")]
x_train = np.asarray(x_train)
# make images sparse for easier distinctions
for img in x_train:
img[img < np.mean(img)+0.5*np.std(img)] = 0
else:
raise NameError("unknown data type: %s" % data)
if data == "mnist" or data == "fashion_mnist":
x_train = x_train/255.0
x_train = [tf.cast(tf.reshape(x,shape=(784,1)),"float32") for x in x_train]
elif data == "faces":
# auto conversion to probabilities in earlier step
x_train = [tf.cast(tf.reshape(x,shape=(784,1)),"float32") for x in x_train]
# create log directory
current_time = getCurrentTime()+"_"+re.sub(",","_",dims)+"_"+data+"_dbm"
os.makedirs("pickles/"+current_time)
# parse string input into integer list
dims = [int(el) for el in dims.split(",")]
dbm = DBM(dims, learning_rate, k1, k2, epochs, batch_size)
dbm.train_PCD(x_train)
# dump dbm pickle
f = open("pickles/"+current_time+"/dbm.pickle", "wb")
pickle.dump(dbm, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
def getCurrentTime():
return datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
####################################
# main command call
####################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="mnist",
help="data source to train DBM, possibilities are 'mnist', 'fashion_mnist' and 'faces' <default: 'mnist'>")
parser.add_argument("--learning-rate", type=float, default=0.01,
help="learning rate for stacked RBMs <default: 0.01>")
parser.add_argument("--k1", type=int, default=1,
help="number of Gibbs-sampling steps pre-PCD-k algorithm <default: 1>")
parser.add_argument("--k2", type=int, default=5,
help="number of Gibbs-sampling steps during PCD-k algorithm <default: 5>")
parser.add_argument("--epochs", type=int, default=1,
help="number of overall training data passes for each RBM <default: 1>")
parser.add_argument("--batch-size", type=int, default=5,
help="size of training data batches <default: 5>")
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-d', '--dimensions', type=str,
help="consecutive enumeration of visible and hidden layers separated by a comma character, eg. 784,500,784,500",
required=True)
args = parser.parse_args()
# train DBM based on parameters
trainDBM(args.data,args.learning_rate,args.k1,args.k2,args.epochs,args.batch_size,args.dimensions)
| 42.445783 | 144 | 0.62759 |
import os
import pickle
import datetime
import argparse
import re
import glob
from obj.DBM import DBM
import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
from skimage.transform import resize
time = getCurrentTime()+"_"+re.sub(",","_",dims)+"_"+data+"_dbm"
os.makedirs("pickles/"+current_time)
dims = [int(el) for el in dims.split(",")]
dbm = DBM(dims, learning_rate, k1, k2, epochs, batch_size)
dbm.train_PCD(x_train)
f = open("pickles/"+current_time+"/dbm.pickle", "wb")
pickle.dump(dbm, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
def getCurrentTime():
return datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
args = parser.parse_args()
trainDBM(args.data,args.learning_rate,args.k1,args.k2,args.epochs,args.batch_size,args.dimensions)
| true | true |
f7f8858d90fb3295fa86e40f94d4ca11fce76ac9 | 2,047 | py | Python | lpot/ux/utils/expiring_dict.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 172 | 2021-09-14T18:34:17.000Z | 2022-03-30T06:49:53.000Z | lpot/ux/utils/expiring_dict.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 40 | 2021-09-14T02:26:12.000Z | 2022-03-29T08:34:04.000Z | lpot/ux/utils/expiring_dict.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 33 | 2021-09-15T07:27:25.000Z | 2022-03-25T08:30:57.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dict with items expiring after given time."""
import time
from collections import UserDict
from typing import Any, Optional
class ExpiringDictItem:
"""Item that knows it it's already expired."""
def __init__(self, value: Any, expires_at: float):
"""Create object."""
self.value = value
self.expires_at = expires_at
def is_expired(self) -> bool:
"""Check if item is already expired."""
return time.time() > self.expires_at
class ExpiringDict(UserDict):
"""Dict with items expiring after given time."""
def __init__(self, initial_value: Optional[dict] = None, ttl: int = 120) -> None:
"""Create object."""
super().__init__()
self.ttl = ttl
if initial_value is None:
initial_value = {}
for (key, value) in initial_value.items():
self[key] = value
def __setitem__(self, key: str, item: Any) -> None:
"""Add item to dict."""
super().__setitem__(key, self._create_item(value=item))
def __getitem__(self, key: str) -> Any:
"""Get item from dict."""
item: ExpiringDictItem = super().__getitem__(key)
if item.is_expired():
raise KeyError(key)
return item.value
def _create_item(self, value: Any) -> ExpiringDictItem:
"""Create item for collection."""
return ExpiringDictItem(value=value, expires_at=time.time() + self.ttl)
| 33.557377 | 85 | 0.657548 |
import time
from collections import UserDict
from typing import Any, Optional
class ExpiringDictItem:
def __init__(self, value: Any, expires_at: float):
self.value = value
self.expires_at = expires_at
def is_expired(self) -> bool:
return time.time() > self.expires_at
class ExpiringDict(UserDict):
def __init__(self, initial_value: Optional[dict] = None, ttl: int = 120) -> None:
super().__init__()
self.ttl = ttl
if initial_value is None:
initial_value = {}
for (key, value) in initial_value.items():
self[key] = value
def __setitem__(self, key: str, item: Any) -> None:
super().__setitem__(key, self._create_item(value=item))
def __getitem__(self, key: str) -> Any:
item: ExpiringDictItem = super().__getitem__(key)
if item.is_expired():
raise KeyError(key)
return item.value
def _create_item(self, value: Any) -> ExpiringDictItem:
return ExpiringDictItem(value=value, expires_at=time.time() + self.ttl)
| true | true |
f7f88cc7be9898b09db70273b2cfc6e67eac9843 | 451 | py | Python | apps/puzzles/migrations/0010_auto_20180201_2215.py | madjaqk/puzzle_master_v2 | e50b0f02abbf32aebe9583152bd9a5b43f5da7d7 | [
"MIT"
] | null | null | null | apps/puzzles/migrations/0010_auto_20180201_2215.py | madjaqk/puzzle_master_v2 | e50b0f02abbf32aebe9583152bd9a5b43f5da7d7 | [
"MIT"
] | 8 | 2020-02-07T04:11:07.000Z | 2022-02-10T07:04:57.000Z | apps/puzzles/migrations/0010_auto_20180201_2215.py | madjaqk/puzzle_master_v2 | e50b0f02abbf32aebe9583152bd9a5b43f5da7d7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2018-02-02 06:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('puzzles', '0009_puzzle_description'),
]
operations = [
migrations.AlterField(
model_name='puzzle',
name='metapuzzles',
field=models.ManyToManyField(blank=True, related_name='_puzzle_metapuzzles_+', to='puzzles.Puzzle'),
),
]
| 23.736842 | 112 | 0.631929 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('puzzles', '0009_puzzle_description'),
]
operations = [
migrations.AlterField(
model_name='puzzle',
name='metapuzzles',
field=models.ManyToManyField(blank=True, related_name='_puzzle_metapuzzles_+', to='puzzles.Puzzle'),
),
]
| true | true |
f7f88cf4609bc9be85514bd2253b6d7fa8cfb737 | 862 | py | Python | password_generator/urls.py | ritssaikiran90/django-passwordgenerator | 03cebcd97be68794531b13c116d767f1d669c471 | [
"Apache-2.0"
] | null | null | null | password_generator/urls.py | ritssaikiran90/django-passwordgenerator | 03cebcd97be68794531b13c116d767f1d669c471 | [
"Apache-2.0"
] | null | null | null | password_generator/urls.py | ritssaikiran90/django-passwordgenerator | 03cebcd97be68794531b13c116d767f1d669c471 | [
"Apache-2.0"
] | null | null | null | """password_generator URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#from django.contrib import admin
from django.urls import path
from generator import views
urlpatterns = [
path('home/',views.home,name="home"),
path('home/generatepasswords/',views.passwords,name="password"),
]
| 37.478261 | 77 | 0.719258 |
from django.urls import path
from generator import views
urlpatterns = [
path('home/',views.home,name="home"),
path('home/generatepasswords/',views.passwords,name="password"),
]
| true | true |
f7f88db69a48ca2950b283e2c40bcc0b02579ea4 | 23,322 | py | Python | src/spring-cloud/azext_spring_cloud/vendored_sdks/appplatform/operations/_custom_domains_operations.py | tilnl/azure-cli-extensions | ef9946bbcde34bb51343554a8f2a8dedd1f7d44a | [
"MIT"
] | null | null | null | src/spring-cloud/azext_spring_cloud/vendored_sdks/appplatform/operations/_custom_domains_operations.py | tilnl/azure-cli-extensions | ef9946bbcde34bb51343554a8f2a8dedd1f7d44a | [
"MIT"
] | null | null | null | src/spring-cloud/azext_spring_cloud/vendored_sdks/appplatform/operations/_custom_domains_operations.py | tilnl/azure-cli-extensions | ef9946bbcde34bb51343554a8f2a8dedd1f7d44a | [
"MIT"
] | 1 | 2020-07-28T18:01:53.000Z | 2020-07-28T18:01:53.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class CustomDomainsOperations(object):
"""CustomDomainsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2019-05-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-05-01-preview"
self.config = config
def get(
self, resource_group_name, service_name, app_name, domain_name, custom_headers=None, raw=False, **operation_config):
"""Get the custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CustomDomainResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.appplatform.models.CustomDomainResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def create_or_update(
self, resource_group_name, service_name, app_name, domain_name, properties=None, custom_headers=None, raw=False, **operation_config):
"""Create or update custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:param properties: Properties of the custom domain resource.
:type properties:
~azure.mgmt.appplatform.models.CustomDomainProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CustomDomainResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.appplatform.models.CustomDomainResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
domain_resource = models.CustomDomainResource(properties=properties)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(domain_resource, 'CustomDomainResource')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def delete(
self, resource_group_name, service_name, app_name, domain_name, custom_headers=None, raw=False, **operation_config):
"""Delete the custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def patch(
self, resource_group_name, service_name, app_name, domain_name, properties=None, custom_headers=None, raw=False, **operation_config):
"""Update custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:param properties: Properties of the custom domain resource.
:type properties:
~azure.mgmt.appplatform.models.CustomDomainProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CustomDomainResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.appplatform.models.CustomDomainResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
domain_resource = models.CustomDomainResource(properties=properties)
# Construct URL
url = self.patch.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(domain_resource, 'CustomDomainResource')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def list(
self, resource_group_name, service_name, app_name, custom_headers=None, raw=False, **operation_config):
"""List the custom domains of one lifecycle application.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of CustomDomainResource
:rtype:
~azure.mgmt.appplatform.models.CustomDomainResourcePaged[~azure.mgmt.appplatform.models.CustomDomainResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.CustomDomainResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains'}
def validate(
self, resource_group_name, service_name, app_name, name, custom_headers=None, raw=False, **operation_config):
"""Check the resource name is valid as well as not in use.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param name: Name to be validated
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CustomDomainValidateResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.appplatform.models.CustomDomainValidateResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
validate_payload = models.CustomDomainValidatePayload(name=name)
# Construct URL
url = self.validate.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(validate_payload, 'CustomDomainValidatePayload')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainValidateResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
validate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/validate'}
| 48.790795 | 198 | 0.671855 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class CustomDomainsOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-05-01-preview"
self.config = config
def get(
self, resource_group_name, service_name, app_name, domain_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def create_or_update(
self, resource_group_name, service_name, app_name, domain_name, properties=None, custom_headers=None, raw=False, **operation_config):
domain_resource = models.CustomDomainResource(properties=properties)
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(domain_resource, 'CustomDomainResource')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def delete(
self, resource_group_name, service_name, app_name, domain_name, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def patch(
self, resource_group_name, service_name, app_name, domain_name, properties=None, custom_headers=None, raw=False, **operation_config):
domain_resource = models.CustomDomainResource(properties=properties)
url = self.patch.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(domain_resource, 'CustomDomainResource')
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}'}
def list(
self, resource_group_name, service_name, app_name, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.CustomDomainResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains'}
def validate(
self, resource_group_name, service_name, app_name, name, custom_headers=None, raw=False, **operation_config):
validate_payload = models.CustomDomainValidatePayload(name=name)
url = self.validate.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(validate_payload, 'CustomDomainValidatePayload')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainValidateResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
validate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/validate'}
| true | true |
f7f88e0d59aa4250edde3820139163f252991316 | 1,479 | py | Python | meiduo_mall/meiduo_mall/urls.py | 00wsmart00/meiduo_project_all | 43af3685aa847705154d0a1982a9ed8a1432fc43 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/urls.py | 00wsmart00/meiduo_project_all | 43af3685aa847705154d0a1982a9ed8a1432fc43 | [
"MIT"
] | 9 | 2020-05-11T20:24:01.000Z | 2022-02-26T15:05:53.000Z | meiduo_mall/meiduo_mall/urls.py | 00wsmart00/meiduo_project_all | 43af3685aa847705154d0a1982a9ed8a1432fc43 | [
"MIT"
] | null | null | null | """meiduo_mall URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
# users
url(r'^', include('users.urls', namespace='users')),
# contents
url(r'^', include('contents.urls', namespace='contents')),
# verifications
url(r'^', include('verifications.urls')),
# oauth
url(r'^', include('oauth.urls')),
# areas:
url(r'^', include('areas.urls')),
# goods
url(r'^', include('goods.urls', namespace='goods')),
# Haystack 注册
url(r'^search/', include('haystack.urls')),
# carts
url(r'^', include('carts.urls', namespace='carts')),
# orders
url(r'^', include('orders.urls', namespace='orders')),
# payment
url(r'^', include('payment.urls')),
url('^meiduo_admin/', include('meiduo_admin.urls')),
]
| 33.613636 | 79 | 0.644354 | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('users.urls', namespace='users')),
url(r'^', include('contents.urls', namespace='contents')),
url(r'^', include('verifications.urls')),
url(r'^', include('oauth.urls')),
url(r'^', include('areas.urls')),
url(r'^', include('goods.urls', namespace='goods')),
url(r'^search/', include('haystack.urls')),
url(r'^', include('carts.urls', namespace='carts')),
url(r'^', include('orders.urls', namespace='orders')),
url(r'^', include('payment.urls')),
url('^meiduo_admin/', include('meiduo_admin.urls')),
]
| true | true |
f7f88e5b74de452ffb6e4c73bb7886b085b10acb | 427 | py | Python | rununi2zg.py | gold-mdy-geo/UFC1 | 308ddfd6a20420756255ffd73c71d8c8af8fcd63 | [
"MIT"
] | null | null | null | rununi2zg.py | gold-mdy-geo/UFC1 | 308ddfd6a20420756255ffd73c71d8c8af8fcd63 | [
"MIT"
] | null | null | null | rununi2zg.py | gold-mdy-geo/UFC1 | 308ddfd6a20420756255ffd73c71d8c8af8fcd63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import codecs
import uni2zg
import sys
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
input_file = codecs.open(input_file_name, encoding='utf-8')
output_file = codecs.open(output_file_name, encoding='utf-8', mode='w')
for input_line in input_file:
input_line = uni2zg.convert(input_line)
output_file.write(input_line)
output_file.flush()
input_file.close()
output_file.close()
| 20.333333 | 71 | 0.754098 |
import codecs
import uni2zg
import sys
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
input_file = codecs.open(input_file_name, encoding='utf-8')
output_file = codecs.open(output_file_name, encoding='utf-8', mode='w')
for input_line in input_file:
input_line = uni2zg.convert(input_line)
output_file.write(input_line)
output_file.flush()
input_file.close()
output_file.close()
| true | true |
f7f88e9018ecd10e3778563e14f73ca1e1520113 | 8,123 | py | Python | docs/conf.py | d-brakenhoff/timml | ca94c8f155e59e5c02db3cb5c3dd50a7d478896e | [
"MIT"
] | 24 | 2015-09-13T17:11:58.000Z | 2021-12-14T09:09:17.000Z | docs/conf.py | d-brakenhoff/timml | ca94c8f155e59e5c02db3cb5c3dd50a7d478896e | [
"MIT"
] | 42 | 2015-09-23T19:29:34.000Z | 2022-01-17T09:13:14.000Z | docs/conf.py | d-brakenhoff/timml | ca94c8f155e59e5c02db3cb5c3dd50a7d478896e | [
"MIT"
] | 26 | 2015-08-24T17:25:27.000Z | 2021-07-09T14:09:30.000Z | # -*- coding: utf-8 -*-
#
# PASTAS documentation build configuration file, created by
# sphinx-quickstart on Wed May 11 12:38:06 2016.
# Repository setup is according to:
# http://gisellezeno.com/tutorials/sphinx-for-python-documentation.html
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TimML'
copyright = u'2017, Mark Bakker'
author = u'Mark Bakker'
rst_epilog = '.. |project| replace:: %s' % project
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.0.0'
# The full version, including alpha/beta/rc tags.
release = '5.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_path = [alabaster.get_path()]
html_static_path = ['_static']
html_theme_options = {
'logo': False,
'travis_button': False,
'logo_name': False,
'github_user': 'mbakker7',
'github_repo': 'timml',
'github_banner': False,
'github_button': True,
'github_type': 'watch',
'github_count': True,
'description': "TimML is a multi-layer analytic element model",
'codecov_button': False,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html'
]
}
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "TimML"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'timmldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TimML.tex', u'TimML Documentation',
u'M. Bakker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'timml', u'TimML Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TimML', u'TimML Documentation',
author, 'TimML', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'https://docs.python.org/3': None,
# 'http://pandas.pydata.org/pandas-docs/stable/': None,
# 'https://docs.scipy.org/doc/scipy/reference/': None,
# 'https://docs.scipy.org/doc/numpy/': None}
# Added by MB
autodoc_member_order = 'bysource'
| 30.309701 | 79 | 0.690878 |
import os
import sys
import alabaster
sys.path.insert(0, os.path.abspath('.'))
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'TimML'
copyright = u'2017, Mark Bakker'
author = u'Mark Bakker'
rst_epilog = '.. |project| replace:: %s' % project
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.0.0'
# The full version, including alpha/beta/rc tags.
release = '5.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_path = [alabaster.get_path()]
html_static_path = ['_static']
html_theme_options = {
'logo': False,
'travis_button': False,
'logo_name': False,
'github_user': 'mbakker7',
'github_repo': 'timml',
'github_banner': False,
'github_button': True,
'github_type': 'watch',
'github_count': True,
'description': "TimML is a multi-layer analytic element model",
'codecov_button': False,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html'
]
}
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "TimML"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'timmldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TimML.tex', u'TimML Documentation',
u'M. Bakker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'timml', u'TimML Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TimML', u'TimML Documentation',
author, 'TimML', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
autodoc_member_order = 'bysource'
| true | true |
f7f88e9418b85fafd10517a63f6258ec08a8d1ff | 2,862 | py | Python | tests/conftest.py | Richie78321/airtable-python-wrapper | 033f2b85f9da511730258a61af2fba040c4b7e95 | [
"MIT"
] | null | null | null | tests/conftest.py | Richie78321/airtable-python-wrapper | 033f2b85f9da511730258a61af2fba040c4b7e95 | [
"MIT"
] | null | null | null | tests/conftest.py | Richie78321/airtable-python-wrapper | 033f2b85f9da511730258a61af2fba040c4b7e95 | [
"MIT"
] | null | null | null | import pytest
from collections import OrderedDict
from posixpath import join as urljoin
from requests import HTTPError
from urllib.parse import urlencode, quote
from mock import Mock
from airtable import Airtable
@pytest.fixture
def url_builder():
""" Builds Airtable Api Url Manually for mock testing """
def _url_builder(base_key, table_name, params=None):
urltable_name = quote(table_name, safe="")
url = urljoin(Airtable.API_URL, base_key, urltable_name)
if params:
params = OrderedDict(sorted(params.items()))
url += "?" + urlencode(params)
return url
return _url_builder
@pytest.fixture
def constants():
return dict(
API_KEY="FakeApiKey", BASE_KEY="appJMY16gZDQrMWpA", TABLE_NAME="Table Name"
)
@pytest.fixture()
def table(constants):
return Airtable(
constants["BASE_KEY"], constants["TABLE_NAME"], api_key=constants["API_KEY"]
)
@pytest.fixture
def mock_records():
return [
{
"id": "recH73JJvr7vv1234",
"fields": {"SameField": 1234, "Value": "abc"},
"createdTime": "2017-06-06T18:30:57.000Z",
},
{
"id": "recyXhbY4uax4567",
"fields": {"SameField": 456, "Value": "def"},
"createdTime": "2017-06-06T18:30:57.000Z",
},
{
"id": "recyXhbY4uax891",
"fields": {"SameField": 789, "Value": "xyz"},
"createdTime": "2017-06-06T18:30:57.000Z",
},
]
@pytest.fixture
def mock_response_single(mock_records):
return mock_records[0]
@pytest.fixture
def mock_response_batch(mock_records):
return {"records": mock_records * 2}
@pytest.fixture
def mock_response_list(mock_records):
return [
{"records": mock_records[0:2], "offset": "recuOeLpF6TQpArJi"},
{"records": [mock_records[2]]},
]
@pytest.fixture
def mock_response_insert(mock_records):
{
"id": "rec9MgW8WhqcbnBx4",
"fields": {
"Editorial": ["recdaBsWECUC2aml3"],
"Persona": "Startup CEO",
"Verticals": ["recpI1hFWtSrbw5XI"],
"Content Types": ["How-to posts", "Tutorials"],
"Notes": "Driven by high impact; looking for ways to implement data driven initiatives",
},
"createdTime": "2017-06-06T18:31:12.000Z",
}
@pytest.fixture
def mock_response_iterator(mock_response_list):
""" Each call will return the next response in mock_response_list """
i = iter(mock_response_list)
def _response_iterator(request, context):
return next(i)
return _response_iterator
def http_error():
raise HTTPError("Not Found")
@pytest.fixture
def response():
response = Mock()
response.raise_for_status.side_effect = http_error
response.url = "page%20url"
return response
| 24.672414 | 100 | 0.627184 | import pytest
from collections import OrderedDict
from posixpath import join as urljoin
from requests import HTTPError
from urllib.parse import urlencode, quote
from mock import Mock
from airtable import Airtable
@pytest.fixture
def url_builder():
def _url_builder(base_key, table_name, params=None):
urltable_name = quote(table_name, safe="")
url = urljoin(Airtable.API_URL, base_key, urltable_name)
if params:
params = OrderedDict(sorted(params.items()))
url += "?" + urlencode(params)
return url
return _url_builder
@pytest.fixture
def constants():
return dict(
API_KEY="FakeApiKey", BASE_KEY="appJMY16gZDQrMWpA", TABLE_NAME="Table Name"
)
@pytest.fixture()
def table(constants):
return Airtable(
constants["BASE_KEY"], constants["TABLE_NAME"], api_key=constants["API_KEY"]
)
@pytest.fixture
def mock_records():
return [
{
"id": "recH73JJvr7vv1234",
"fields": {"SameField": 1234, "Value": "abc"},
"createdTime": "2017-06-06T18:30:57.000Z",
},
{
"id": "recyXhbY4uax4567",
"fields": {"SameField": 456, "Value": "def"},
"createdTime": "2017-06-06T18:30:57.000Z",
},
{
"id": "recyXhbY4uax891",
"fields": {"SameField": 789, "Value": "xyz"},
"createdTime": "2017-06-06T18:30:57.000Z",
},
]
@pytest.fixture
def mock_response_single(mock_records):
return mock_records[0]
@pytest.fixture
def mock_response_batch(mock_records):
return {"records": mock_records * 2}
@pytest.fixture
def mock_response_list(mock_records):
return [
{"records": mock_records[0:2], "offset": "recuOeLpF6TQpArJi"},
{"records": [mock_records[2]]},
]
@pytest.fixture
def mock_response_insert(mock_records):
{
"id": "rec9MgW8WhqcbnBx4",
"fields": {
"Editorial": ["recdaBsWECUC2aml3"],
"Persona": "Startup CEO",
"Verticals": ["recpI1hFWtSrbw5XI"],
"Content Types": ["How-to posts", "Tutorials"],
"Notes": "Driven by high impact; looking for ways to implement data driven initiatives",
},
"createdTime": "2017-06-06T18:31:12.000Z",
}
@pytest.fixture
def mock_response_iterator(mock_response_list):
i = iter(mock_response_list)
def _response_iterator(request, context):
return next(i)
return _response_iterator
def http_error():
raise HTTPError("Not Found")
@pytest.fixture
def response():
response = Mock()
response.raise_for_status.side_effect = http_error
response.url = "page%20url"
return response
| true | true |
f7f88ee32d093e14d7e3353849f14ba2671cc82e | 13,520 | py | Python | python/infra/DisplayCoverage.py | uofs-simlab/ChasteOS | 04d98998e2ebad3f29086b8eaa1d89c08c6fccf6 | [
"Apache-2.0"
] | null | null | null | python/infra/DisplayCoverage.py | uofs-simlab/ChasteOS | 04d98998e2ebad3f29086b8eaa1d89c08c6fccf6 | [
"Apache-2.0"
] | null | null | null | python/infra/DisplayCoverage.py | uofs-simlab/ChasteOS | 04d98998e2ebad3f29086b8eaa1d89c08c6fccf6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Copyright (c) 2005-2016, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Script to run gcov on source files after a Coverage build has been done,
# and summarise the results.
# The script takes arguments:
# <output_dir> The directory in which to generate summary files and
# an index page.
# <build_type> The build type used; defaults to Coverage.
import itertools
import glob
import os
import sys
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path[0:0] = [parent_path]
import BuildTypes
# Arguments to gcov
# -l Create long file names for included source files.
# -p Preserve complete path information in the names of generated .gcov files.
gcov_flags = ' -lp '
# Get output dir and build type object
if len(sys.argv) < 2:
print "Syntax error."
print "Usage:", sys.argv[0], "<test output dir> [<build type> [<project> ...]]"
sys.exit(1)
output_dir = sys.argv[1]
if len(sys.argv) > 2:
build_type = sys.argv[2]
projects = sys.argv[3:]
else:
build_type = 'Coverage'
projects = []
build = BuildTypes.GetBuildType(build_type)
# Remove any old output files/test results from output_dir
for filename in os.listdir(output_dir):
os.remove(os.path.join(output_dir, filename))
# Find .gcda files to determine which source files to run gcov on
# First, find appropriate build directories
build_dirs = glob.glob('*/build/' + build.build_dir)
build_dirs.extend(map(lambda p: os.path.join(p, 'build', build.build_dir), projects))
# Now find .gcda files within there
gcda_files = []
for build_dir in build_dirs:
for dirpath, dirnames, filenames in os.walk(build_dir):
for filename in filenames:
if filename[-5:] == '.gcda':
gcda_files.append({'dir': dirpath, 'file': filename})
# Run gcov on all the .cpp files which have .gcda files.
for gcda_file in gcda_files:
# For added interest, the source file to process is in different locations
# depending on whether it is a test or not.
if gcda_file['file'][:4] == 'Test' or \
gcda_file['dir'][-5:] == '/test':
#gcda_file['dir'].find('/test/') != -1:
# .cpp file is in the same folder
os.system('gcov -o ' + gcda_file['dir'] + gcov_flags +
os.path.join(gcda_file['dir'], gcda_file['file'][:-4] + 'cpp'))
else:
# .cpp file is contained within the Chaste source tree
# gcda_file['dir'] should look something like mesh/build/coverage/src/reader
# We then want to look in mesh/src/reader
try:
toplevel, rest = gcda_file['dir'].split('build')
except:
print gcda_file
raise
# Get rid of slashes (or system equivalent)
toplevel = os.path.dirname(toplevel)
# Drop the '/coverage/'
rest = rest.split(os.path.sep, 2)[-1]
# Run gcov
os.system('gcov -o ' + gcda_file['dir'] + gcov_flags +
os.path.join(toplevel, rest, gcda_file['file'][:-4] + 'cpp'))
# Now find all our source files
src_dirs = glob.glob('*/src')
src_dirs.remove('apps/src')
src_dirs.extend(map(lambda p: os.path.join(p, 'src'), projects))
src_files = []
for src_dir in src_dirs:
for dirpath, dirnames, filenames in os.walk(src_dir):
for filename in filenames:
if filename[-4:] in ['.cpp', '.hpp']:
src_files.append({'dir': dirpath, 'file': filename})
def coverage_ignore(src_file):
"""Whether to ignore the fact that a source file is not used.
If a file contains only typedefs, for example, this is not an error.
For .hpp files we check this by looking for the presence of either
'template' or 'class' at the start of a line. If neither are found,
we assume the file contains no real code.
This will only work if header files don't contain non-template function
definitions, which should be the case if we're being good programmers.
Unfortunately the boost serialization tweaking file "SerializationExportWrapper.hpp"
has some templated definitions which are not code, for this reason we only
scrape the file for "template" or "class" definitions that are not surrounded
by COVERAGE_IGNORE.
"""
ignore = False
if src_file['dir'].endswith('fortests'):
# 'Source' code that is only used for tests, and hence coverage doesn't
# matter.
ignore = True
elif src_file['file'] == 'triangle.cpp' or src_file['file'] == 'tetgen.cpp' or src_file['file'] == 'predicates.cpp':
# We don't try to cover other people's code
ignore = True
elif src_file['file'] in ['HeartRegionCodes.cpp', 'Version.hpp']:
# Special cases
ignore = True
elif src_file['file'][-4:] == '.hpp':
ignore = True
fp = open(os.path.join(src_file['dir'], src_file['file']))
code = True
for line in fp:
if line.find('#define COVERAGE_IGNORE') != -1:
code = False
elif line.find('#undef COVERAGE_IGNORE') != -1:
code = True
if code and (line.startswith('template') or line.startswith('class ')):
ignore = False
break
fp.close()
return ignore
for src_file in src_files:
# Mangle the name like gcov does
mangled_dir = src_file['dir'].replace(os.path.sep, '#')
# Find .gcov files relating to this source file
gcov_files = glob.glob('*' + mangled_dir + '#' + src_file['file'] + '.gcov')
# Open all the files, and an output file
gcov_fps = [open(gcov_file) for gcov_file in gcov_files]
out_file_name = os.path.join(output_dir, mangled_dir + '#' + src_file['file'])
out_file_name = out_file_name.replace('#', '-')
out_file = open(out_file_name, 'w')
# Now go through them line by line in lock-step,
# aggregating line execution counts
covered_line_count, missed_line_count, warn, ignore = 0, 0, True, False
for lines in itertools.izip(*gcov_fps):
aggregated_count = 0
maybe_not_code, really_uncovered = False, False
for line in lines:
count, line_no, src_line = line.split(':', 2)
count, line_no = count.strip(), line_no.strip()
if src_line.find('#define COVERAGE_IGNORE') != -1:
ignore = True
out_file.write("%9s:%5s:%s" % ('ignored', line_no, src_line))
break
elif src_line.find('#undef COVERAGE_IGNORE') != -1:
ignore = False
out_file.write("%9s:%5s:%s" % ('ignored', line_no, src_line))
break
if line_no == 0:
# This is a gcov header line; what it is doesn't matter
out_file.write(line)
break
if count == '-':
# This line "isn't code". This may be because it's blank, a comment, or
# similar. Or it may be because it's within a templated method that hasn't
# been instantiated in this particular execution, but it might be in another.
maybe_not_code = True
elif count == '#####' or count == '=====':
# The line was really uncovered here, so it must be code.
# From gcov documentation, # indicates reachable by non-exceptional paths;
# = only by an exceptional path (e.g. catch block).
really_uncovered = True
else:
aggregated_count += int(count)
else:
if aggregated_count == 0:
if maybe_not_code and not really_uncovered:
# This really wasn't a code line (or the template is *never* instantiated).
# Would be nice to differentiate these cases, but doing so is decidedly
# non-trivial.
aggregated_count = '-'
else:
src_line_stripped = src_line.strip()
# gcov is buggy: it claims some non-code lines are uncovered.
# There are some other cases it gets wrong for better reasons too.
if not (ignore or src_line_stripped in ['{', '}', 'NEVER_REACHED;'] or
(src_line_stripped.startswith('return') and
src_line_stripped[6] in [';', ' ']) or
src_line_stripped.startswith('TERMINATE(') or
src_line_stripped.startswith('assert(DIM') or
src_line_stripped.startswith('assert(ELEM_DIM') or
src_line_stripped.startswith('assert(SPACE_DIM') or
src_line_stripped.startswith('assert(ELEMENT_DIM') or
src_line_stripped.startswith('EXCEPT_IF_NOT(ELEMENT_DIM') or
src_line_stripped.startswith('#') or
src_line_stripped.startswith('EXPORT_TEMPLATE') or
src_line_stripped.startswith('template class ') or
(src_line_stripped.startswith('virtual ') and src_line_stripped.endswith('(')) or
(src_line_stripped.startswith('catch ') and #Line is catch (...)
src_line_stripped[-1] == ')') or
src_line_stripped.startswith('class ') or
#Method definition (possibly). Currently overlaps with previous 'catch' ignore
(len(src_line_stripped) > 0 and
(src_line_stripped[-1] == ')' or src_line_stripped.endswith(') const')))
):
warn = False
aggregated_count = '#####'
#print 'Full details of coverage: ', src_line_stripped,'\t',src_file,'\t',aggregated_count,'\t', line_no,'\t', src_line
else:
aggregated_count = 'ignored'
missed_line_count += 1
else:
covered_line_count += 1
out_file.write("%9s:%5s:%s" % (aggregated_count, line_no, src_line))
# Output a summary
if not gcov_files:
# No gcov files found for this source file.
# This may not be an error, if the source file in question is an .hpp file with
# an associated .cpp file containing all the code for the class.
##print src_file
if src_file['file'][-4:] == '.hpp' and \
os.path.exists(os.path.join(src_file['dir'], src_file['file'][:-3]+'cpp')):
status = '' # So output file will be deleted
else:
out_file.write("This source file wasn't used at all!\n\nFailed 1 of 1 test\n")
status = "1_1"
elif not ignore and missed_line_count == 0:
out_file.write('\nOK!\n\n')
status = 'OK'
else:
counts = (missed_line_count, missed_line_count+covered_line_count)
out_file.write('\nFailed %d of %d tests\n\n' % counts)
status = "%d_%d" % counts
if warn:
status = 'warn_' + status
if ignore:
status = 'ignore_' + status
if coverage_ignore(src_file):
# All special case ignorable files (not just ones with partial coverage)
status = ''
# Close all files
[fp.close() for fp in gcov_fps]
out_file.close()
# Alter file name to indicate summary
if status:
os.rename(out_file_name, out_file_name + '.' + status + '.0')
else:
os.remove(out_file_name)
# Now remove .gcov files from the Chaste root directory
for filename in os.listdir('.'):
if filename[-5:] == '.gcov':
os.remove(filename)
# And generate a summary page
os.system('python python/DisplayTests.py '+output_dir+' '+build_type)
| 45.986395 | 143 | 0.617456 |
"""Copyright (c) 2005-2016, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import itertools
import glob
import os
import sys
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path[0:0] = [parent_path]
import BuildTypes
gcov_flags = ' -lp '
if len(sys.argv) < 2:
print "Syntax error."
print "Usage:", sys.argv[0], "<test output dir> [<build type> [<project> ...]]"
sys.exit(1)
output_dir = sys.argv[1]
if len(sys.argv) > 2:
build_type = sys.argv[2]
projects = sys.argv[3:]
else:
build_type = 'Coverage'
projects = []
build = BuildTypes.GetBuildType(build_type)
for filename in os.listdir(output_dir):
os.remove(os.path.join(output_dir, filename))
build_dirs = glob.glob('*/build/' + build.build_dir)
build_dirs.extend(map(lambda p: os.path.join(p, 'build', build.build_dir), projects))
gcda_files = []
for build_dir in build_dirs:
for dirpath, dirnames, filenames in os.walk(build_dir):
for filename in filenames:
if filename[-5:] == '.gcda':
gcda_files.append({'dir': dirpath, 'file': filename})
for gcda_file in gcda_files:
if gcda_file['file'][:4] == 'Test' or \
gcda_file['dir'][-5:] == '/test':
os.system('gcov -o ' + gcda_file['dir'] + gcov_flags +
os.path.join(gcda_file['dir'], gcda_file['file'][:-4] + 'cpp'))
else:
try:
toplevel, rest = gcda_file['dir'].split('build')
except:
print gcda_file
raise
toplevel = os.path.dirname(toplevel)
rest = rest.split(os.path.sep, 2)[-1]
os.system('gcov -o ' + gcda_file['dir'] + gcov_flags +
os.path.join(toplevel, rest, gcda_file['file'][:-4] + 'cpp'))
src_dirs = glob.glob('*/src')
src_dirs.remove('apps/src')
src_dirs.extend(map(lambda p: os.path.join(p, 'src'), projects))
src_files = []
for src_dir in src_dirs:
for dirpath, dirnames, filenames in os.walk(src_dir):
for filename in filenames:
if filename[-4:] in ['.cpp', '.hpp']:
src_files.append({'dir': dirpath, 'file': filename})
def coverage_ignore(src_file):
"""Whether to ignore the fact that a source file is not used.
If a file contains only typedefs, for example, this is not an error.
For .hpp files we check this by looking for the presence of either
'template' or 'class' at the start of a line. If neither are found,
we assume the file contains no real code.
This will only work if header files don't contain non-template function
definitions, which should be the case if we're being good programmers.
Unfortunately the boost serialization tweaking file "SerializationExportWrapper.hpp"
has some templated definitions which are not code, for this reason we only
scrape the file for "template" or "class" definitions that are not surrounded
by COVERAGE_IGNORE.
"""
ignore = False
if src_file['dir'].endswith('fortests'):
# matter.
ignore = True
elif src_file['file'] == 'triangle.cpp' or src_file['file'] == 'tetgen.cpp' or src_file['file'] == 'predicates.cpp':
# We don't try to cover other people's code
ignore = True
elif src_file['file'] in ['HeartRegionCodes.cpp', 'Version.hpp']:
# Special cases
ignore = True
elif src_file['file'][-4:] == '.hpp':
ignore = True
fp = open(os.path.join(src_file['dir'], src_file['file']))
code = True
for line in fp:
if line.find('
code = False
elif line.find('
code = True
if code and (line.startswith('template') or line.startswith('class ')):
ignore = False
break
fp.close()
return ignore
for src_file in src_files:
# Mangle the name like gcov does
mangled_dir = src_file['dir'].replace(os.path.sep, '
# Find .gcov files relating to this source file
gcov_files = glob.glob('*' + mangled_dir + '
# Open all the files, and an output file
gcov_fps = [open(gcov_file) for gcov_file in gcov_files]
out_file_name = os.path.join(output_dir, mangled_dir + '
out_file_name = out_file_name.replace('
out_file = open(out_file_name, 'w')
# Now go through them line by line in lock-step,
# aggregating line execution counts
covered_line_count, missed_line_count, warn, ignore = 0, 0, True, False
for lines in itertools.izip(*gcov_fps):
aggregated_count = 0
maybe_not_code, really_uncovered = False, False
for line in lines:
count, line_no, src_line = line.split(':', 2)
count, line_no = count.strip(), line_no.strip()
if src_line.find('
ignore = True
out_file.write("%9s:%5s:%s" % ('ignored', line_no, src_line))
break
elif src_line.find('
ignore = False
out_file.write("%9s:%5s:%s" % ('ignored', line_no, src_line))
break
if line_no == 0:
# This is a gcov header line; what it is doesn't matter
out_file.write(line)
break
if count == '-':
maybe_not_code = True
elif count == '#####' or count == '=====':
red = True
else:
aggregated_count += int(count)
else:
if aggregated_count == 0:
if maybe_not_code and not really_uncovered:
# Would be nice to differentiate these cases, but doing so is decidedly
# non-trivial.
aggregated_count = '-'
else:
src_line_stripped = src_line.strip()
# gcov is buggy: it claims some non-code lines are uncovered.
# There are some other cases it gets wrong for better reasons too.
if not (ignore or src_line_stripped in ['{', '}', 'NEVER_REACHED;'] or
(src_line_stripped.startswith('return') and
src_line_stripped[6] in [';', ' ']) or
src_line_stripped.startswith('TERMINATE(') or
src_line_stripped.startswith('assert(DIM') or
src_line_stripped.startswith('assert(ELEM_DIM') or
src_line_stripped.startswith('assert(SPACE_DIM') or
src_line_stripped.startswith('assert(ELEMENT_DIM') or
src_line_stripped.startswith('EXCEPT_IF_NOT(ELEMENT_DIM') or
src_line_stripped.startswith('
src_line_stripped.startswith('EXPORT_TEMPLATE') or
src_line_stripped.startswith('template class ') or
(src_line_stripped.startswith('virtual ') and src_line_stripped.endswith('(')) or
(src_line_stripped.startswith('catch ') and #Line is catch (...)
src_line_stripped[-1] == ')') or
src_line_stripped.startswith('class ') or
#Method definition (possibly). Currently overlaps with previous 'catch' ignore
(len(src_line_stripped) > 0 and
(src_line_stripped[-1] == ')' or src_line_stripped.endswith(') const')))
):
warn = False
aggregated_count = ' #print 'Full details of coverage: ', src_line_stripped,'\t',src_file,'\t',aggregated_count,'\t', line_no,'\t', src_line
else:
aggregated_count = 'ignored'
missed_line_count += 1
else:
covered_line_count += 1
out_file.write("%9s:%5s:%s" % (aggregated_count, line_no, src_line))
# Output a summary
if not gcov_files:
# No gcov files found for this source file.
# This may not be an error, if the source file in question is an .hpp file with
# an associated .cpp file containing all the code for the class.
##print src_file
if src_file['file'][-4:] == '.hpp' and \
os.path.exists(os.path.join(src_file['dir'], src_file['file'][:-3]+'cpp')):
status = '' # So output file will be deleted
else:
out_file.write("This source file wasn't used at all!\n\nFailed 1 of 1 test\n")
status = "1_1"
elif not ignore and missed_line_count == 0:
out_file.write('\nOK!\n\n')
status = 'OK'
else:
counts = (missed_line_count, missed_line_count+covered_line_count)
out_file.write('\nFailed %d of %d tests\n\n' % counts)
status = "%d_%d" % counts
if warn:
status = 'warn_' + status
if ignore:
status = 'ignore_' + status
if coverage_ignore(src_file):
status = ''
[fp.close() for fp in gcov_fps]
out_file.close()
if status:
os.rename(out_file_name, out_file_name + '.' + status + '.0')
else:
os.remove(out_file_name)
for filename in os.listdir('.'):
if filename[-5:] == '.gcov':
os.remove(filename)
os.system('python python/DisplayTests.py '+output_dir+' '+build_type)
| false | true |
f7f88fa8748e9e687c730de4d73ba1cc49ab3ac0 | 317 | py | Python | scripts/exercicios/ex107.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex107.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex107.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | from utilidadesCev import moeda
preco = float(input('Digite o preço: R$ '))
print(f'A metade de R${preco} é R${moeda.metade(preco)}')
print(f'O dobro de R${preco} é R${moeda.dobro(preco)}')
print(f'Aumentando 10%, temos R${moeda.aumentar(preco)}')
print(f'Diminuindo 13%, temos R${moeda.diminuir(preco)}')
print()
| 31.7 | 58 | 0.697161 | from utilidadesCev import moeda
preco = float(input('Digite o preço: R$ '))
print(f'A metade de R${preco} é R${moeda.metade(preco)}')
print(f'O dobro de R${preco} é R${moeda.dobro(preco)}')
print(f'Aumentando 10%, temos R${moeda.aumentar(preco)}')
print(f'Diminuindo 13%, temos R${moeda.diminuir(preco)}')
print()
| true | true |
f7f891cbec2d7de7bfaa07c51a7bbc0169f3390c | 408 | py | Python | generator/modules/onnx.py | dayta-ai/deepo | fa720e39052e63adfe0f2b9dbd8444a0d69c2540 | [
"MIT"
] | 1 | 2021-11-18T18:34:29.000Z | 2021-11-18T18:34:29.000Z | generator/modules/onnx.py | dayta-ai/deepo | fa720e39052e63adfe0f2b9dbd8444a0d69c2540 | [
"MIT"
] | null | null | null | generator/modules/onnx.py | dayta-ai/deepo | fa720e39052e63adfe0f2b9dbd8444a0d69c2540 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source
from .python import Python
@dependency(Python)
@source('pip')
class Onnx(Module):
def build(self):
return r'''
RUN DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
protobuf-compiler \
libprotoc-dev \
&& \
$PIP_INSTALL \
onnx
'''
| 21.473684 | 57 | 0.536765 |
from .__module__ import Module, dependency, source
from .python import Python
@dependency(Python)
@source('pip')
class Onnx(Module):
def build(self):
return r'''
RUN DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
protobuf-compiler \
libprotoc-dev \
&& \
$PIP_INSTALL \
onnx
'''
| true | true |
f7f8928fd3787be3903e58e17905e46d770f2c88 | 347 | py | Python | toontown/rpc/AwardManagerUD.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2020-02-07T18:15:12.000Z | 2020-02-07T18:15:12.000Z | toontown/rpc/AwardManagerUD.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/rpc/AwardManagerUD.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
class AwardManagerUD(DistributedObjectGlobalUD):
notify = DirectNotifyGlobal.directNotify.newCategory("AwardManagerUD")
def giveAwardToToon(self, todo0, todo1, todo2, todo3, todo4, todo5):
pass
| 34.7 | 82 | 0.818444 | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
class AwardManagerUD(DistributedObjectGlobalUD):
notify = DirectNotifyGlobal.directNotify.newCategory("AwardManagerUD")
def giveAwardToToon(self, todo0, todo1, todo2, todo3, todo4, todo5):
pass
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.