hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4826c33c1d16a74af01fb85c32290195d70209
| 2,439
|
py
|
Python
|
examples/dummynode.py
|
sq8kfh/pyh9
|
7b1f05709849c30cd6c9086c6539e33106aa5fa2
|
[
"MIT"
] | null | null | null |
examples/dummynode.py
|
sq8kfh/pyh9
|
7b1f05709849c30cd6c9086c6539e33106aa5fa2
|
[
"MIT"
] | null | null | null |
examples/dummynode.py
|
sq8kfh/pyh9
|
7b1f05709849c30cd6c9086c6539e33106aa5fa2
|
[
"MIT"
] | null | null | null |
import asyncio
import h9.asyncmsgstream
from h9.msg import H9ExecuteMethod, H9SendFrame, H9Frame
node_id = 32
dev_des=[0, 5, 0, 1] #type_h, type_l, version_major, version_minor
seqnum = -1
reg_10 = 0
def get_next_seqnum():
global seqnum
seqnum = seqnum + 1
seqnum = seqnum % 32
return seqnum
def procces_frame(conn, frame):
global reg_10
print(frame.frametype)
if frame.frametype == H9Frame.FrameType.GET_REG:
if frame.data[0] == 10:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_VALUE, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.SET_REG:
if frame.data[0] == 10:
reg_10 = frame.data[1]
reg_10 = reg_10 % 9
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_EXTERNALLY_CHANGED, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.DISCOVER:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_INFO, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=dev_des)
conn.writemsg(res)
async def run():
conn = h9.asyncmsgstream.H9msgStream("127.0.0.1", 7878)
await conn.connect()
exec_method = H9ExecuteMethod("subscribe")
exec_method.value = {'event': 'frame'}
conn.writemsg(exec_method)
frame = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_TURNED_ON, seqnum=get_next_seqnum(),
source=node_id,
destination=511, data=dev_des)
conn.writemsg(frame)
while True:
recv_msg = await conn.readmsg()
if isinstance(recv_msg, H9Frame) and (recv_msg.destination == node_id or recv_msg.destination == 511):
procces_frame(conn, recv_msg)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run())
finally:
loop.close()
| 35.347826
| 110
| 0.609266
|
import asyncio
import h9.asyncmsgstream
from h9.msg import H9ExecuteMethod, H9SendFrame, H9Frame
node_id = 32
dev_des=[0, 5, 0, 1]
seqnum = -1
reg_10 = 0
def get_next_seqnum():
global seqnum
seqnum = seqnum + 1
seqnum = seqnum % 32
return seqnum
def procces_frame(conn, frame):
global reg_10
print(frame.frametype)
if frame.frametype == H9Frame.FrameType.GET_REG:
if frame.data[0] == 10:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_VALUE, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.SET_REG:
if frame.data[0] == 10:
reg_10 = frame.data[1]
reg_10 = reg_10 % 9
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_EXTERNALLY_CHANGED, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.DISCOVER:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_INFO, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=dev_des)
conn.writemsg(res)
async def run():
conn = h9.asyncmsgstream.H9msgStream("127.0.0.1", 7878)
await conn.connect()
exec_method = H9ExecuteMethod("subscribe")
exec_method.value = {'event': 'frame'}
conn.writemsg(exec_method)
frame = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_TURNED_ON, seqnum=get_next_seqnum(),
source=node_id,
destination=511, data=dev_des)
conn.writemsg(frame)
while True:
recv_msg = await conn.readmsg()
if isinstance(recv_msg, H9Frame) and (recv_msg.destination == node_id or recv_msg.destination == 511):
procces_frame(conn, recv_msg)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run())
finally:
loop.close()
| true
| true
|
1c482777e51dc00263580068f3d916b2c4437bbe
| 1,574
|
py
|
Python
|
resources/property.py
|
codeforpdx/dwellinglybackend
|
92fee6d19a68ae00750927b8700eaa7195b57668
|
[
"MIT"
] | 15
|
2020-07-09T20:51:09.000Z
|
2021-11-28T21:59:02.000Z
|
resources/property.py
|
codeforpdx/dwellinglybackend
|
92fee6d19a68ae00750927b8700eaa7195b57668
|
[
"MIT"
] | 148
|
2020-03-28T22:10:30.000Z
|
2021-12-19T09:22:59.000Z
|
resources/property.py
|
codeforpdx/dwellinglybackend
|
92fee6d19a68ae00750927b8700eaa7195b57668
|
[
"MIT"
] | 30
|
2020-03-12T02:31:27.000Z
|
2021-07-29T02:40:36.000Z
|
from flask_restful import Resource
from flask import request
from utils.authorizations import admin_required
from db import db
from models.property import PropertyModel
from schemas.property import PropertySchema
class Property(Resource):
@admin_required
def get(self, id):
return PropertyModel.find(id).json(include_tenants=True)
@admin_required
def delete(self, id):
PropertyModel.delete(id)
return {"message": "Property deleted"}
@admin_required
def put(self, id):
property = PropertyModel.find(id)
return property.update(
schema=PropertySchema,
context={"name": property.name},
payload=request.json,
).json()
class Properties(Resource):
@admin_required
def get(self):
return {"properties": PropertyModel.query.json()}
@admin_required
def post(self):
return (
PropertyModel.create(schema=PropertySchema, payload=request.json).json(),
201,
)
class ArchiveProperties(Resource):
@admin_required
def patch(self):
if not ("ids" in request.json and type(request.json["ids"]) is list):
return {"message": "Property IDs missing in request"}, 400
properties = []
for id in request.json["ids"]:
property = PropertyModel.find(id)
property.archived = True
properties.append(property)
db.session.bulk_save_objects(properties)
db.session.commit()
return {"properties": PropertyModel.query.json()}
| 27.137931
| 85
| 0.644854
|
from flask_restful import Resource
from flask import request
from utils.authorizations import admin_required
from db import db
from models.property import PropertyModel
from schemas.property import PropertySchema
class Property(Resource):
@admin_required
def get(self, id):
return PropertyModel.find(id).json(include_tenants=True)
@admin_required
def delete(self, id):
PropertyModel.delete(id)
return {"message": "Property deleted"}
@admin_required
def put(self, id):
property = PropertyModel.find(id)
return property.update(
schema=PropertySchema,
context={"name": property.name},
payload=request.json,
).json()
class Properties(Resource):
@admin_required
def get(self):
return {"properties": PropertyModel.query.json()}
@admin_required
def post(self):
return (
PropertyModel.create(schema=PropertySchema, payload=request.json).json(),
201,
)
class ArchiveProperties(Resource):
@admin_required
def patch(self):
if not ("ids" in request.json and type(request.json["ids"]) is list):
return {"message": "Property IDs missing in request"}, 400
properties = []
for id in request.json["ids"]:
property = PropertyModel.find(id)
property.archived = True
properties.append(property)
db.session.bulk_save_objects(properties)
db.session.commit()
return {"properties": PropertyModel.query.json()}
| true
| true
|
1c4827c357cf7a405de0181536ad034ca79debe7
| 124
|
py
|
Python
|
scripts/secrets.py
|
Aviah/one-click-django-dev-ubuntu-14-04-trusty
|
b6f5da980185eedde8a7a99f7efe76304c6f5c40
|
[
"MIT"
] | 10
|
2016-03-22T22:14:40.000Z
|
2021-07-23T22:00:02.000Z
|
scripts/secrets.py
|
Aviah/one-click-django-dev-ubuntu-14-04-trusty
|
b6f5da980185eedde8a7a99f7efe76304c6f5c40
|
[
"MIT"
] | 1
|
2017-06-03T12:11:47.000Z
|
2017-06-03T12:11:47.000Z
|
scripts/secrets.py
|
Aviah/one-click-django-dev-osx-el-capitan
|
ea6832f57e126d30499c9bc66c5b4c77d0ef4020
|
[
"MIT"
] | 4
|
2016-04-05T05:41:15.000Z
|
2017-01-08T10:03:25.000Z
|
# Add here secrets, password etc you don't want to keep in the repository
# e.g. django SECRET_KEY, database credentials etc
| 62
| 73
| 0.782258
|
# e.g. django SECRET_KEY, database credentials etc
| true
| true
|
1c482859e6dd971e0ebdc01fe98a1798be6c2f40
| 1,548
|
py
|
Python
|
setup.py
|
pashtetbezd/miracles-server
|
131071d1a4add240151ef55fe9c4f9ff9f5261cc
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pashtetbezd/miracles-server
|
131071d1a4add240151ef55fe9c4f9ff9f5261cc
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pashtetbezd/miracles-server
|
131071d1a4add240151ef55fe9c4f9ff9f5261cc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
'connexion[swagger-ui]',
'connexion==2.6.0',
'sqlalchemy>=1.3.13',
'SQLAlchemy-serializer',
'psycopg2>=2.8.4',
'alembic==1.4.2',
'rauth',
'pyjwt',
'flask-socketio',
'redis',
'eventlet',
'six'
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='connexion_sql_utils',
version='0.1.4',
description="Sqlalchemy, Postgres, Connexion utility",
long_description=readme + '\n\n' + history,
author="Michael Housh",
author_email='mhoush@houshhomeenergy.com',
url='https://github.com/m-housh/connexion_sql_utils',
packages=[
'connexion_sql_utils',
],
package_dir={'connexion_sql_utils':
'connexion_sql_utils'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='connexion_sql_utils',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
| 25.377049
| 58
| 0.633075
|
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'connexion[swagger-ui]',
'connexion==2.6.0',
'sqlalchemy>=1.3.13',
'SQLAlchemy-serializer',
'psycopg2>=2.8.4',
'alembic==1.4.2',
'rauth',
'pyjwt',
'flask-socketio',
'redis',
'eventlet',
'six'
]
test_requirements = [
]
setup(
name='connexion_sql_utils',
version='0.1.4',
description="Sqlalchemy, Postgres, Connexion utility",
long_description=readme + '\n\n' + history,
author="Michael Housh",
author_email='mhoush@houshhomeenergy.com',
url='https://github.com/m-housh/connexion_sql_utils',
packages=[
'connexion_sql_utils',
],
package_dir={'connexion_sql_utils':
'connexion_sql_utils'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='connexion_sql_utils',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
| true
| true
|
1c482950e64a9537a2996df66ed9403e53cf8a71
| 44,005
|
py
|
Python
|
tensorflow/contrib/tpu/python/tpu/tpu.py
|
jiefangxuanyan/tensorflow
|
f78fd433118830482dddbf6055751898a19265de
|
[
"Apache-2.0"
] | 1
|
2021-05-03T12:10:38.000Z
|
2021-05-03T12:10:38.000Z
|
tensorflow/contrib/tpu/python/tpu/tpu.py
|
jiefangxuanyan/tensorflow
|
f78fd433118830482dddbf6055751898a19265de
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/tpu/python/tpu/tpu.py
|
jiefangxuanyan/tensorflow
|
f78fd433118830482dddbf6055751898a19265de
|
[
"Apache-2.0"
] | 1
|
2018-06-12T01:58:06.000Z
|
2018-06-12T01:58:06.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
"Placeholder",
])
# These operations will currently fail to compile, but we should be able to
# support them eventually via CPU offload or extending our operation set.
_NOT_IMPLEMENTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, an `EmbeddingLayerConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX)
that contains the TPU devices that will be initialized. If job=None
it is assumed there is only one job in the TensorFlow flock, and an
error will be returned if this assumption does not hold.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system."""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it
integrates with Tensorflow constructs like ResourceVariables. For example,
if a `ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
def __init__(self, name, num_replicas, pivot):
"""Builds a new TPUReplicateContext.
Args:
name: a unique name for the context, used to populate the `_tpu_replicate`
attribute.
num_replicas: an integer that gives the number of replicas for the
computation.
pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an
# outside_compilation cluster C in a forward computation we
# would like to put the ops corresponding to the gradient of
# X into a new outside_compilation cluster C'. However, if
# we take the gradient of X twice, the second one should get
# yet another new outside_compilation cluster C''.
#
# The mechanism we adopt is to use a 'root_cluster' which is
# the cluster that X was in before we took gradients, and a
# 'gradient_uid' which is different for every invocation of
# gradients, and put the gradient of X in cluster
# 'root_cluster.gradient_uid'.
#
# When taking a gradient of a gradient, some ops will be
# colocated with Op in the forward pass (e.g., cluster
# root_cluster) and some in the backward pass (e.g., cluster
# root_cluster.initial_gradient_uid). We need all of the
# grad-of-grad ops to be in the same cluster to avoid cyclic
# dependencies between clusters. We adopt a heuristic that
# puts any op clustered with root_cluster.<xxx> in
# root_cluster.gradient_uid, even if xxx was
# initial_gradient_uid.
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
# The attr was not present: do nothing.
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
"""A helper class to determine the current device.
Supports only the device set/get methods needed to run the
graph's _apply_device_function method.
"""
def __init__(self):
self._device = ""
@property
def device(self):
return self._device
def _set_device(self, device):
self._device = device.to_string()
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op) # pylint: disable=protected-access
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack # pylint: disable=protected-access
graph._device_function_stack = self._outer_device_function_stack # pylint: disable=protected-access
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack # pylint: disable=protected-access
def Enter(self):
if not self._outer_device_function_stack:
# Capture the device function stack at the time of first entry
# since that is the stack that will be used outside_compilation.
graph = ops.get_default_graph()
self._outer_device_function_stack = list(graph._device_function_stack) # pylint: disable=protected-access
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _NOT_IMPLEMENTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
# Prevent feeding or fetching anything that is being compiled,
# and any replicated outside_compilation Op.
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors.
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot())
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def outside_compilation(computation, *args, **kwargs):
"""Builds part of a computation outside any current TPU replicate scope.
Args:
computation: A Python function that builds the computation to
place on the host.
*args: the positional arguments for the computation.
**kwargs: the keyword arguments for the computation.
Returns:
The Tensors returned by computation.
"""
args = [] if args is None else args
graph = ops.get_default_graph()
# If we are in a TPUReplicateContext, signal that we are now
# outside_compilation
initial_context = graph._get_control_flow_context() # pylint: disable=protected-access
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
retval = computation(*args, **kwargs)
# If we are in a TPUReplicateContext, signal that we are no longer
# outside_compilation
final_context = graph._get_control_flow_context() # pylint: disable=protected-access
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
return retval
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of lists of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
return split_compile_and_replicate(computation, inputs, infeed_queue,
device_assignment, name)[1]
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True):
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist(),
"computation_shape":
device_assignment.computation_shape.tolist()
}
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Converts inputs to Tensors.
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
# Verifies that all replicas have matching numbers and types of inputs
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# The EncapsulateTPUComputations rewrite needs to identify the
# replicated arguments inside each computation. Adds identity operators
# tagged with an attribute _tpu_replicated_input to identify the
# replicated inputs.
# pylint: disable=protected-access
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
# pylint: enable=protected-access
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation returns `None`, add `no_op` here so that when user
# fetches `no_op` returned by this function, the TPUExecute node will be
# triggered.
if outputs is None:
outputs = (control_flow_ops.no_op(),)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
new_output_tensors.append(array_ops.identity(t))
output_tensors = new_output_tensors
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status, [
control_flow_ops.no_op(name="shard_%d" % i)
for i in range(num_replicas)
]
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
compile_status, [[
array_ops.identity(
outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)
]
for replica in xrange(num_replicas)]
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty
list), each of which has a corresponding split axis (from
`input_shard_axes`). Each input is split into `num_shards` pieces
along the corresponding axis, and computation is applied to each
shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty
list). Each input tensor has a corresponding shard axes, given
by `input_shard_axes`, which must have size divisible by
`num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty
list). Each input is split into `num_shards` pieces along the 0-th
dimension, and computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty
list). The 0-th dimension of each Tensor must have size
divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
"""
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `TPUReplicateContext` is to sanity check operators inside
a tpu.rewrite_for_inference() computation.
"""
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
@experimental
def validate_inference_rewrite_for_variables(graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append
GuaranteeConstOps after ReadVariableOps, but this mechanism works only
if you are using tf.get_variable() to create and access variables in your
tpu computation. This validation method can be called immediately after
calling tpu.rewrite_for_inference() to check whether GuaranteeConstOps
where added to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(tf.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any([x.type == "GuaranteeConst" for x in graph.get_operations()]):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after "
"running tpu.rewrite_for_inference(...). Please "
"check that you are using tf.get_variable() to "
"create and access variables in your tpu "
"computation.")
@experimental
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.get_variable() to create and
access variables in your tpu computation. You can validate whether
this worked, by calling validate_inference_rewrite_for_variables() method
immediately after this method to check whether GuaranteeConstOps where
added to the graph.
Args:
computation: A Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
| 39.823529
| 112
| 0.695785
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_BLACKLISTED_OPS = set([
"Placeholder",
])
# These operations will currently fail to compile, but we should be able to
# support them eventually via CPU offload or extending our operation set.
_NOT_IMPLEMENTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
def initialize_system(embedding_config=None, job=None):
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def shutdown_system(job=None):
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
def __init__(self, name, num_replicas, pivot):
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an
# outside_compilation cluster C in a forward computation we
# would like to put the ops corresponding to the gradient of
# X into a new outside_compilation cluster C'. However, if
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
def __init__(self):
self._device = ""
@property
def device(self):
return self._device
def _set_device(self, device):
self._device = device.to_string()
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op)
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack
graph._device_function_stack = self._outer_device_function_stack
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack
def Enter(self):
if not self._outer_device_function_stack:
graph = ops.get_default_graph()
self._outer_device_function_stack = list(graph._device_function_stack)
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _NOT_IMPLEMENTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
if not op.inputs:
if not control_inputs:
op._add_control_input(self.GetControlPivot())
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x)
if external_inputs:
with ops.control_dependencies(None):
self.Enter()
external_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
context._values.update(output_names)
context = context._outer_context
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
if val.name in self._values:
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
return None
@property
def back_prop(self):
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def outside_compilation(computation, *args, **kwargs):
args = [] if args is None else args
graph = ops.get_default_graph()
initial_context = graph._get_control_flow_context()
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope()
context = context.outer_context
retval = computation(*args, **kwargs)
final_context = graph._get_control_flow_context()
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope()
context = context.outer_context
return retval
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
return split_compile_and_replicate(computation, inputs, infeed_queue,
device_assignment, name)[1]
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True):
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist(),
"computation_shape":
device_assignment.computation_shape.tolist()
}
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
if num_replicas == 0:
return []
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation returns `None`, add `no_op` here so that when user
# fetches `no_op` returned by this function, the TPUExecute node will be
# triggered.
if outputs is None:
outputs = (control_flow_ops.no_op(),)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
new_output_tensors.append(array_ops.identity(t))
output_tensors = new_output_tensors
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status, [
control_flow_ops.no_op(name="shard_%d" % i)
for i in range(num_replicas)
]
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
compile_status, [[
array_ops.identity(
outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)
]
for replica in xrange(num_replicas)]
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
@experimental
def validate_inference_rewrite_for_variables(graph):
if not any([x.type == "GuaranteeConst" for x in graph.get_operations()]):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after "
"running tpu.rewrite_for_inference(...). Please "
"check that you are using tf.get_variable() to "
"create and access variables in your tpu "
"computation.")
@experimental
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
| true
| true
|
1c48297f4c379fead9dd0d6d37e22bd65db66e33
| 1,778
|
py
|
Python
|
bot_plugins/weather.py
|
UtopiaXC/Utopia-Bot-For-QQ
|
87281f509e20c2d5d25367614d5202f6e53cea50
|
[
"MIT"
] | 5
|
2021-03-25T15:18:18.000Z
|
2021-03-31T02:29:28.000Z
|
bot_plugins/weather.py
|
UtopiaXC/Utopia-Bot-For-QQ
|
87281f509e20c2d5d25367614d5202f6e53cea50
|
[
"MIT"
] | null | null | null |
bot_plugins/weather.py
|
UtopiaXC/Utopia-Bot-For-QQ
|
87281f509e20c2d5d25367614d5202f6e53cea50
|
[
"MIT"
] | null | null | null |
from nonebot.command import CommandSession
from services.common import ServiceException
from services.weather import get_current_weather_short, get_current_weather_desc
from nonebot.natural_language import NLPSession, IntentCommand
from nonebot.experimental.plugin import on_command, on_natural_language
from jieba import posseg
__plugin_name__ = '天气'
__plugin_usage__ = (
'用法:\n'
'对我说 “天气 香港” 获取天气简要\n'
'“天气 香港 详细” 获取当前天气的详细报告'
)
weather_permission = lambda sender: (not sender.is_privatechat) or sender.is_superuser
@on_command('weather', aliases=('气温', '天气'), permission=weather_permission)
async def _(session: CommandSession):
# 若用户对机器人说“天气”,则此变量为 `['']`
# 若用户对机器人说“天气 香港”,则此变量为 `['香港']`
# 若用户对机器人说“天气 香港 详细”,则此变量为 `['香港', '详细']`
args = session.current_arg_text.strip().split(' ', 1)
if not args[0]:
city = await session.aget(key='city', prompt='请问是什么城市呢?', at_sender=True)
else:
city = args[0]
is_detailed = (len(args) == 2 and args[1].__contains__('详')) or session.state.get('is_detailed')
try:
func = get_current_weather_desc if is_detailed else get_current_weather_short
result = await func(city)
except ServiceException as e:
result = e.message
await session.send(result)
# 只要消息包含“天气”,就执行此处理器
@on_natural_language(keywords={'天气'}, permission=weather_permission)
async def _(session: NLPSession):
# 使用 jieba 将消息句子分词
words = posseg.lcut(session.msg_text.strip())
args = {}
for word in words:
if word.flag == 'ns': # ns 表示该词为地名
args['city'] = word.word
elif word.word in ('详细', '报告', '详情'):
args['is_detailed'] = True
# 置信度为 90,意为将此会话当作 'weather' 命令处理
return IntentCommand(90, 'weather', args=args)
| 29.633333
| 100
| 0.68279
|
from nonebot.command import CommandSession
from services.common import ServiceException
from services.weather import get_current_weather_short, get_current_weather_desc
from nonebot.natural_language import NLPSession, IntentCommand
from nonebot.experimental.plugin import on_command, on_natural_language
from jieba import posseg
__plugin_name__ = '天气'
__plugin_usage__ = (
'用法:\n'
'对我说 “天气 香港” 获取天气简要\n'
'“天气 香港 详细” 获取当前天气的详细报告'
)
weather_permission = lambda sender: (not sender.is_privatechat) or sender.is_superuser
@on_command('weather', aliases=('气温', '天气'), permission=weather_permission)
async def _(session: CommandSession):
args = session.current_arg_text.strip().split(' ', 1)
if not args[0]:
city = await session.aget(key='city', prompt='请问是什么城市呢?', at_sender=True)
else:
city = args[0]
is_detailed = (len(args) == 2 and args[1].__contains__('详')) or session.state.get('is_detailed')
try:
func = get_current_weather_desc if is_detailed else get_current_weather_short
result = await func(city)
except ServiceException as e:
result = e.message
await session.send(result)
@on_natural_language(keywords={'天气'}, permission=weather_permission)
async def _(session: NLPSession):
words = posseg.lcut(session.msg_text.strip())
args = {}
for word in words:
if word.flag == 'ns':
args['city'] = word.word
elif word.word in ('详细', '报告', '详情'):
args['is_detailed'] = True
return IntentCommand(90, 'weather', args=args)
| true
| true
|
1c4829860384987e89e27fe3bc17e0a11f6813fc
| 12,746
|
py
|
Python
|
grid_search_loop/tr5000_N200/ESNtrainCV.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | 1
|
2020-11-29T12:42:30.000Z
|
2020-11-29T12:42:30.000Z
|
grid_search_loop/tr5000_N200/ESNtrainCV.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | null | null | null |
grid_search_loop/tr5000_N200/ESNtrainCV.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Config
# biased_regress = True
# normal_equations = True
dataset = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_zero = True
mm = False
std = False
numpy_load = True
nanfill = True
## ESN parameters
N_def = [200] # Neurons
scale_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # scaling
mem_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # memory
exponent_def = 1.0 # sigmoid exponent
# Script name struct for report
#script_name = 'ESNtrainCV'
#name_struct_meta = "_N_scale_mem"
#name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N_def, scale_def, mem_def)
## Imports
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
#import matplotlib.pyplot as plt
import ESNtools
import GSK
#Needed for reporting
import platform
import time
# Fix boundary nans (replicate head/tail vals)
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats)-1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
# print("pointer_left:", pointer_left)
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left*np.ones((1,pointer_left),dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
# print("pointer_right:", pointer_right)
else:
val_right = feats[pointer_right]
feats[pointer_right+1:] = val_right*np.ones((1,len(feats)-pointer_right-1),dtype=np.float)
fix_right = False
# nan interpolation
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos-1]
nan_remain -= 1
nandim = 1
initpos = nanpos
# Check whether it extends
while nanpos+1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
# Average sides
if np.isfinite(feats[nanpos+1]):
nanval = 0.5 * (nanval + feats[nanpos+1])
# Single value average
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos+1])
feats[initpos:initpos+nandim] = nanval*np.ones((1,nandim),dtype=np.double)
nanpos += 1
nanid += 1
## Get sepsis patients
def get_sepsis_patients(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label),dtype=np.int)
for i in range(n):
i_pat = np.where(patient==i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat])>0)*np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep!=0)[0]
patient_healthy_idx = np.where(patient_sep==0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
## Create the feature matrix
features = []
patient = []
sepsis_label = []
dataloaded = False
## Read data
if not numpy_load:
## Folder and files
fnames = os.listdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
## read data
for i in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "../npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
print(npyfilename, " loaded")
npyfilename = "../npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
print(npyfilename, " loaded")
#ADD nanfill tag
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "../npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "../npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
##Flatten patient
patient = patient.flatten()
## Separate pointers
feature_phys = feature_matrix[:,:-6] ## Physiology
feature_demog = feature_matrix[:,-6:] ## Demographics
## Normalize mm(all) or std (sepsis, phys) vals, feature-based
if mm:
scaler = MinMaxScaler()
for i in range(n):
i_pat = np.where(patient==i)[0]
scaler.fit(feature_matrix[i_pat,:])
feature_matrix[i_pat,:] = scaler.transform(feature_matrix[i_pat,:])
elif std:
## (Get sepsis patients)
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
scaler = StandardScaler()
scaler.fit(feature_phys[patient_healthy_idx,:])
feature_phys[:,:] = scaler.transform(feature_phys[:,:])
## nan to zero
if nan_to_zero:
feature_matrix[np.isnan(feature_matrix)]=0
print("Changed nan to 0")
## Septic groups stratify
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
#healthy_patient_list = np.unique(patient[patient_healthy_idx])
#sep_patient_list = np.unique(patient[patient_sep_idx])
## Nonlinear mapping function
sigmoid_exponent = exponent_def
func = ESNtools.sigmoid
#SFK
#skf = StratifiedKFold(n_splits=kfold_split)
#skf.get_n_splits(X)
#GSKF
groups = patient
train_index, test_index = GSK.GroupStratifiedKFold(np.hstack([patient_sep.reshape(-1,1), groups.reshape(-1,1)]), 10)
def get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index):
script_name = 'ESNtrainCV'
name_struct_meta = "_N_scale_mem"
name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N, scale, mem)
## ESN Generation parameters
## Perform ESN feed
pat_shift = np.append(np.where(np.diff(patient)!=0)[0] + 1, [len(patient)])
pat_ipos = 0
print("pat_shift: ",len(pat_shift))
allocateESN = True
print('ESN: ')
if allocateESN:
ESN = np.ones((len(feature_matrix),N+1), dtype = np.float)
for i in range(len(pat_shift)):
print("Feeding ESN patient:", i)
ESN[pat_ipos:pat_shift[i],:] = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
pat_ipos = pat_shift[i]
else:
for i in range(len(pat_shift)):
if i == 0:
ESN = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
else:
ESN = np.vstack((ESN, ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)))
pat_ipos = pat_shift[i]
del feature_matrix
## Divide in sets
X = ESN
y = sepsis_label
## KFold
results = []
target = []
kk = 0
#for train_index, test_index in skf.split(X,y): #Stratified KFold
for j in range(len(train_index)): #GSKF
X_train, X_test = X[train_index[j]], X[test_index[j]] #GSKF
y_train, y_test = y[train_index[j]], y[test_index[j]] #GSKF
patients_id_train, patients_id_test = patient[train_index[j]], patient[test_index[j]]
w = ESNtools.get_weights_lu_biasedNE(X_train, y_train)
print("Start testing...", flush=True)
Y_pred = (np.matmul(X_test,w))
print(kk, ' realisation ')
print("auc: ", roc_auc_score(y_test, Y_pred))
kk +=1
target.append(y_test)
results.append(Y_pred)
## Evaluate results
results = np.concatenate(results)
target = np.concatenate(target)
auc = roc_auc_score(target,results)
print('auc: ', auc)
## Threshold study
th_i = np.min(results)
th_f = np.max(results)
## AUC-based CV
AUC_CV = True
if AUC_CV:
th_max = 0
f1 = 0
ACC = 0
Pr = 0
Re = 0
else:
th_steps = 1000
th_step = (th_f-th_i)/th_steps
thsum = 0
th = np.zeros((1000, 1), dtype = np.double)
f1 =np.zeros((1000, 1), dtype = np.double)
print("Threshold: Loop between ", th_i, th_i+th_step*th_steps)
for i, j in enumerate(np.arange(th_i, th_f, th_step)):
if j < th_steps:
th[i] = j
f1[i] = f1_score(target, results > th[i])
thsum = thsum + th[i]
if i%100 == 0:
print(i, th[i], f1[i])
if f1[i] < 0.001 and np.abs(thsum) > 0:
th = th[:i]
f1 = f1[:i]
break
## Max Threshold
th_max = th[np.argmax(f1)]
## Metrics
Pr = precision_score(target, results > th_max)
Re = recall_score(target, results > th_max)
ACC = accuracy_score(target, results > th_max)
auc = roc_auc_score(target, results)
f1 = f1_score(target, results > th_max)
user = platform.uname()[1] + '@' + platform.platform()
dir_path = os.path.dirname(os.path.realpath(__file__))
# write to report file
output_file = 'report_' + script_name + name_struct + '.txt'
with open(output_file, 'w') as f:
f.write(user + '\n')
f.write(dir_path + '\n')
f.write(__file__ + '\n')
f.write(time.strftime("%Y-%m-%d %H:%M") + '\n')
# f.write('Dataset: ' + path + '\n')
f.write('{:03d} \t N \n'.format(N))
f.write('{:1.3f} \t scale \n'.format(scale))
f.write('{:1.3f} \t mem \n'.format(mem))
f.write('%1.3f \t exp\n' % sigmoid_exponent)
f.write('(%2.4f, %2.4f, %2.4f) \t th_i, th_f, *th_sc\n' % (th_i, th_f, th_f-th_i))
f.write('%2.4f \t th\n' % th_max)
f.write('%2.4f \t Pr\n' % Pr)
f.write('%2.4f \t Re\n' % Re)
f.write('%2.4f \t F1\n' % f1)
f.write('%2.4f \t ACC\n' % ACC)
f.write('%2.4f \t AUC\n' % auc)
print(user)
print(dir_path)
print(__file__)
print(time.strftime("%Y-%m-%d %H:%M"))
print('Dataset: ' + path)
print('N: {:03d}'.format(N))
print('scale: {:1.3f}'.format(scale))
print('mem: {:1.3f}'.format(mem))
print('exp: %1.3f' % sigmoid_exponent)
print('th_i, th_f, *th_sc: (%2.4f, %2.4f, %2.4f)' % (th_i, th_f, th_f-th_i))
print('th: %2.4f' % th_max)
print('Pr: %2.4f' % Pr)
print('Re: %2.4f' % Re)
print('F1: %2.4f' % f1)
print('ACC: %2.4f' % ACC)
print('AUC: %2.4f' % auc)
## Grid_search for loop
for i_N in range(len(N_def)):
N = N_def[i_N] # Neurons
## Random seed
np.random.seed(seed=0)
## Mask parameters
M = 2*np.random.rand(np.shape(feature_matrix)[1],N)-1
Mb = 2*np.random.rand(1,N)-1
for i_scale in range(len(scale_def)):
scale = scale_def[i_scale] # scaling factor
for i_mem in range(len(mem_def)):
mem = mem_def[i_mem] # memory
try:
get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index)
except:
print("Error at ", N, scale, mem)
pass
| 31.944862
| 154
| 0.595716
|
et = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_zero = True
mm = False
std = False
numpy_load = True
nanfill = True
scale_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
mem_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
exponent_def = 1.0
umpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
import ESNtools
import GSK
import platform
import time
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats)-1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left*np.ones((1,pointer_left),dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
else:
val_right = feats[pointer_right]
feats[pointer_right+1:] = val_right*np.ones((1,len(feats)-pointer_right-1),dtype=np.float)
fix_right = False
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos-1]
nan_remain -= 1
nandim = 1
initpos = nanpos
while nanpos+1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
if np.isfinite(feats[nanpos+1]):
nanval = 0.5 * (nanval + feats[nanpos+1])
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos+1])
feats[initpos:initpos+nandim] = nanval*np.ones((1,nandim),dtype=np.double)
nanpos += 1
nanid += 1
nts(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label),dtype=np.int)
for i in range(n):
i_pat = np.where(patient==i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat])>0)*np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep!=0)[0]
patient_healthy_idx = np.where(patient_sep==0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
sepsis_label = []
dataloaded = False
y_load:
istdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "../npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
print(npyfilename, " loaded")
npyfilename = "../npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
print(npyfilename, " loaded")
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "../npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "../npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
nt.flatten()
ture_matrix[:,:-6] og = feature_matrix[:,-6:] = np.where(patient==i)[0]
scaler.fit(feature_matrix[i_pat,:])
feature_matrix[i_pat,:] = scaler.transform(feature_matrix[i_pat,:])
elif std:
nt_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
scaler = StandardScaler()
scaler.fit(feature_phys[patient_healthy_idx,:])
feature_phys[:,:] = scaler.transform(feature_phys[:,:])
ro:
feature_matrix[np.isnan(feature_matrix)]=0
print("Changed nan to 0")
p_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
_def
func = ESNtools.sigmoid
groups = patient
train_index, test_index = GSK.GroupStratifiedKFold(np.hstack([patient_sep.reshape(-1,1), groups.reshape(-1,1)]), 10)
def get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index):
script_name = 'ESNtrainCV'
name_struct_meta = "_N_scale_mem"
name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N, scale, mem)
(np.diff(patient)!=0)[0] + 1, [len(patient)])
pat_ipos = 0
print("pat_shift: ",len(pat_shift))
allocateESN = True
print('ESN: ')
if allocateESN:
ESN = np.ones((len(feature_matrix),N+1), dtype = np.float)
for i in range(len(pat_shift)):
print("Feeding ESN patient:", i)
ESN[pat_ipos:pat_shift[i],:] = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
pat_ipos = pat_shift[i]
else:
for i in range(len(pat_shift)):
if i == 0:
ESN = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
else:
ESN = np.vstack((ESN, ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)))
pat_ipos = pat_shift[i]
del feature_matrix
y = sepsis_label
sults = []
target = []
kk = 0
ge(len(train_index)):
X_train, X_test = X[train_index[j]], X[test_index[j]]
y_train, y_test = y[train_index[j]], y[test_index[j]]
patients_id_train, patients_id_test = patient[train_index[j]], patient[test_index[j]]
w = ESNtools.get_weights_lu_biasedNE(X_train, y_train)
print("Start testing...", flush=True)
Y_pred = (np.matmul(X_test,w))
print(kk, ' realisation ')
print("auc: ", roc_auc_score(y_test, Y_pred))
kk +=1
target.append(y_test)
results.append(Y_pred)
concatenate(results)
target = np.concatenate(target)
auc = roc_auc_score(target,results)
print('auc: ', auc)
n(results)
th_f = np.max(results)
True
if AUC_CV:
th_max = 0
f1 = 0
ACC = 0
Pr = 0
Re = 0
else:
th_steps = 1000
th_step = (th_f-th_i)/th_steps
thsum = 0
th = np.zeros((1000, 1), dtype = np.double)
f1 =np.zeros((1000, 1), dtype = np.double)
print("Threshold: Loop between ", th_i, th_i+th_step*th_steps)
for i, j in enumerate(np.arange(th_i, th_f, th_step)):
if j < th_steps:
th[i] = j
f1[i] = f1_score(target, results > th[i])
thsum = thsum + th[i]
if i%100 == 0:
print(i, th[i], f1[i])
if f1[i] < 0.001 and np.abs(thsum) > 0:
th = th[:i]
f1 = f1[:i]
break
= th[np.argmax(f1)]
Pr = precision_score(target, results > th_max)
Re = recall_score(target, results > th_max)
ACC = accuracy_score(target, results > th_max)
auc = roc_auc_score(target, results)
f1 = f1_score(target, results > th_max)
user = platform.uname()[1] + '@' + platform.platform()
dir_path = os.path.dirname(os.path.realpath(__file__))
output_file = 'report_' + script_name + name_struct + '.txt'
with open(output_file, 'w') as f:
f.write(user + '\n')
f.write(dir_path + '\n')
f.write(__file__ + '\n')
f.write(time.strftime("%Y-%m-%d %H:%M") + '\n')
f.write('{:03d} \t N \n'.format(N))
f.write('{:1.3f} \t scale \n'.format(scale))
f.write('{:1.3f} \t mem \n'.format(mem))
f.write('%1.3f \t exp\n' % sigmoid_exponent)
f.write('(%2.4f, %2.4f, %2.4f) \t th_i, th_f, *th_sc\n' % (th_i, th_f, th_f-th_i))
f.write('%2.4f \t th\n' % th_max)
f.write('%2.4f \t Pr\n' % Pr)
f.write('%2.4f \t Re\n' % Re)
f.write('%2.4f \t F1\n' % f1)
f.write('%2.4f \t ACC\n' % ACC)
f.write('%2.4f \t AUC\n' % auc)
print(user)
print(dir_path)
print(__file__)
print(time.strftime("%Y-%m-%d %H:%M"))
print('Dataset: ' + path)
print('N: {:03d}'.format(N))
print('scale: {:1.3f}'.format(scale))
print('mem: {:1.3f}'.format(mem))
print('exp: %1.3f' % sigmoid_exponent)
print('th_i, th_f, *th_sc: (%2.4f, %2.4f, %2.4f)' % (th_i, th_f, th_f-th_i))
print('th: %2.4f' % th_max)
print('Pr: %2.4f' % Pr)
print('Re: %2.4f' % Re)
print('F1: %2.4f' % f1)
print('ACC: %2.4f' % ACC)
print('AUC: %2.4f' % auc)
N_def)):
N = N_def[i_N]
m.seed(seed=0)
dom.rand(np.shape(feature_matrix)[1],N)-1
Mb = 2*np.random.rand(1,N)-1
for i_scale in range(len(scale_def)):
scale = scale_def[i_scale]
for i_mem in range(len(mem_def)):
mem = mem_def[i_mem]
try:
get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index)
except:
print("Error at ", N, scale, mem)
pass
| true
| true
|
1c482d59925e0619904da8fa23e69b884ba76a39
| 825
|
py
|
Python
|
shooter/config.py
|
codershkoder/zombie_shooter_025
|
12582915af81d641f6a654418c02792ee96ea2a8
|
[
"MIT"
] | null | null | null |
shooter/config.py
|
codershkoder/zombie_shooter_025
|
12582915af81d641f6a654418c02792ee96ea2a8
|
[
"MIT"
] | null | null | null |
shooter/config.py
|
codershkoder/zombie_shooter_025
|
12582915af81d641f6a654418c02792ee96ea2a8
|
[
"MIT"
] | null | null | null |
from pathlib import Path
# Настройки путей
_BASE_DIR = Path.cwd()
_RESOURCES_DIR = _BASE_DIR / 'resources'
_IMAGES_DIR = _RESOURCES_DIR / 'images'
_LEVELS_DIR = _RESOURCES_DIR / 'levels'
# Общие настройки
WINDOW_CAPTION = 'Зомби шутер'
FRAME_RATE = 60
BACKGROUND_COLOR = (0, 0, 0)
# Настройки для игрока
PLAYER_IMAGE = _IMAGES_DIR / 'player_min.png'
PLAYER_SPEED = 5
PLAYER_HEALTH = 100
PLAYER_IMMORTALITY_TIME = 1
# Настройки пуль
BULLET_IMAGE = _IMAGES_DIR / 'bullet.png'
BULLET_SPEED = 15
BULLET_DAMAGE = 10
# Настройки зомби
ZOMBIE_IMAGE = _IMAGES_DIR / 'zombie_min.png'
ZOMBIE_SPEED = 2
ZOMBIE_RADIUS_AGR = 70
ZOMBIE_HEALTH = 2000
ZOMBIE_DAMAGE = 40
# Список уровней
LEVEL_1 = _LEVELS_DIR / 'level.txt'
# Объекты окружения
LANDSCAPE_GROUND = _IMAGES_DIR / 'ground.png'
LANDSCAPE_STONE = _IMAGES_DIR / 'stone.png'
| 21.710526
| 45
| 0.768485
|
from pathlib import Path
_BASE_DIR = Path.cwd()
_RESOURCES_DIR = _BASE_DIR / 'resources'
_IMAGES_DIR = _RESOURCES_DIR / 'images'
_LEVELS_DIR = _RESOURCES_DIR / 'levels'
WINDOW_CAPTION = 'Зомби шутер'
FRAME_RATE = 60
BACKGROUND_COLOR = (0, 0, 0)
PLAYER_IMAGE = _IMAGES_DIR / 'player_min.png'
PLAYER_SPEED = 5
PLAYER_HEALTH = 100
PLAYER_IMMORTALITY_TIME = 1
BULLET_IMAGE = _IMAGES_DIR / 'bullet.png'
BULLET_SPEED = 15
BULLET_DAMAGE = 10
ZOMBIE_IMAGE = _IMAGES_DIR / 'zombie_min.png'
ZOMBIE_SPEED = 2
ZOMBIE_RADIUS_AGR = 70
ZOMBIE_HEALTH = 2000
ZOMBIE_DAMAGE = 40
LEVEL_1 = _LEVELS_DIR / 'level.txt'
LANDSCAPE_GROUND = _IMAGES_DIR / 'ground.png'
LANDSCAPE_STONE = _IMAGES_DIR / 'stone.png'
| true
| true
|
1c482e3d03274f06a56af75d2a96e0b689dfe117
| 887
|
py
|
Python
|
roshant/everest/everest/urls.py
|
sushant60/Python-web
|
426a89200e52e902b3db519998485a5de202fa91
|
[
"Apache-2.0"
] | null | null | null |
roshant/everest/everest/urls.py
|
sushant60/Python-web
|
426a89200e52e902b3db519998485a5de202fa91
|
[
"Apache-2.0"
] | null | null | null |
roshant/everest/everest/urls.py
|
sushant60/Python-web
|
426a89200e52e902b3db519998485a5de202fa91
|
[
"Apache-2.0"
] | null | null | null |
"""everest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('eve', views.first, name='first'),
path('science', views.second, name='second'),
]
| 35.48
| 78
| 0.680947
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('eve', views.first, name='first'),
path('science', views.second, name='second'),
]
| true
| true
|
1c482ebcbaa1bfb3289d76e89372b8cceb55517f
| 3,201
|
py
|
Python
|
B4860-V7/xxx/scons-3.1.1/engine/SCons/Tool/zip.py
|
miaopei/B4860
|
6f084bd485b787bb36de26d40f83ff4833098c3d
|
[
"MIT"
] | null | null | null |
B4860-V7/xxx/scons-3.1.1/engine/SCons/Tool/zip.py
|
miaopei/B4860
|
6f084bd485b787bb36de26d40f83ff4833098c3d
|
[
"MIT"
] | null | null | null |
B4860-V7/xxx/scons-3.1.1/engine/SCons/Tool/zip.py
|
miaopei/B4860
|
6f084bd485b787bb36de26d40f83ff4833098c3d
|
[
"MIT"
] | null | null | null |
"""SCons.Tool.zip
Tool-specific initialization for zip.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/zip.py 72ae09dc35ac2626f8ff711d8c4b30b6138e08e3 2019-08-08 14:50:06 bdeegan"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
import zipfile
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
else:
zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
zf.close()
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for zip to an Environment."""
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
env['ZIPROOT'] = SCons.Util.CLVar('')
def exists(env):
return True
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.419355
| 114
| 0.663855
|
__revision__ = "src/engine/SCons/Tool/zip.py 72ae09dc35ac2626f8ff711d8c4b30b6138e08e3 2019-08-08 14:50:06 bdeegan"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
import zipfile
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
else:
zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
zf.close()
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
env['ZIPROOT'] = SCons.Util.CLVar('')
def exists(env):
return True
| true
| true
|
1c482f45ae4ff817a2e5c8c5c289bc77f9c36105
| 1,792
|
py
|
Python
|
esolang_IDE/visualisers/io_widget.py
|
Avanta8/Esolang-Interpreter-IDE
|
9a958eb26314c6c138d1921e76c52b1bb53c85ed
|
[
"MIT"
] | 3
|
2020-01-16T23:04:24.000Z
|
2020-07-21T23:55:59.000Z
|
esolang_IDE/visualisers/io_widget.py
|
Avanta8/Esolang-Interpreter-IDE
|
9a958eb26314c6c138d1921e76c52b1bb53c85ed
|
[
"MIT"
] | null | null | null |
esolang_IDE/visualisers/io_widget.py
|
Avanta8/Esolang-Interpreter-IDE
|
9a958eb26314c6c138d1921e76c52b1bb53c85ed
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtCore, QtWidgets
from esolang_IDE.input_text import HighlightInputText
from esolang_IDE.output_text import OutputText
class IOWidget(QtWidgets.QWidget):
def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.init_widgets()
self.error_text_active = True
self.clear_error_text()
def init_widgets(self):
self._error_text_timer = QtCore.QTimer(self)
self._error_text_timer.setSingleShot(True)
self._error_text_timer.timeout.connect(self.clear_error_text)
self._input_text = HighlightInputText(self)
self._output_text = OutputText(self)
self._error_text = QtWidgets.QLineEdit(self)
self._error_text.setReadOnly(True)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Input:'))
layout.addWidget(self._input_text)
layout.addWidget(QtWidgets.QLabel('Output:'))
layout.addWidget(self._output_text)
layout.addWidget(self._error_text)
self.setLayout(layout)
def set_error_text(self, message):
self._error_text_timer.stop()
self._error_text.setText(message)
self._error_text.show()
self.error_text_active = True
def timed_error_text(self, message, time=1000):
self.set_error_text(message)
self._error_text_timer.start(time)
def clear_error_text(self):
if not self.error_text_active:
return
self._error_text_timer.stop()
self._error_text.clear()
self._error_text.hide()
self.error_text_active = False
def get_input_text(self):
return self._input_text
def get_output_text(self):
return self._output_text
| 30.896552
| 69
| 0.689174
|
from PyQt5 import QtCore, QtWidgets
from esolang_IDE.input_text import HighlightInputText
from esolang_IDE.output_text import OutputText
class IOWidget(QtWidgets.QWidget):
def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.init_widgets()
self.error_text_active = True
self.clear_error_text()
def init_widgets(self):
self._error_text_timer = QtCore.QTimer(self)
self._error_text_timer.setSingleShot(True)
self._error_text_timer.timeout.connect(self.clear_error_text)
self._input_text = HighlightInputText(self)
self._output_text = OutputText(self)
self._error_text = QtWidgets.QLineEdit(self)
self._error_text.setReadOnly(True)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Input:'))
layout.addWidget(self._input_text)
layout.addWidget(QtWidgets.QLabel('Output:'))
layout.addWidget(self._output_text)
layout.addWidget(self._error_text)
self.setLayout(layout)
def set_error_text(self, message):
self._error_text_timer.stop()
self._error_text.setText(message)
self._error_text.show()
self.error_text_active = True
def timed_error_text(self, message, time=1000):
self.set_error_text(message)
self._error_text_timer.start(time)
def clear_error_text(self):
if not self.error_text_active:
return
self._error_text_timer.stop()
self._error_text.clear()
self._error_text.hide()
self.error_text_active = False
def get_input_text(self):
return self._input_text
def get_output_text(self):
return self._output_text
| true
| true
|
1c48308b9835d4ad17cc2c255db05b765a7dd3a3
| 2,479
|
py
|
Python
|
conpaas-services/contrib/libcloud/common/hostvirtual.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 5
|
2016-02-24T14:44:03.000Z
|
2020-11-29T19:18:40.000Z
|
conpaas-services/contrib/libcloud/common/hostvirtual.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 25
|
2015-03-23T16:05:19.000Z
|
2018-02-13T17:22:22.000Z
|
conpaas-services/contrib/libcloud/common/hostvirtual.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 3
|
2018-09-14T16:54:14.000Z
|
2020-07-26T03:14:56.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.common.types import LibcloudError
API_HOST = 'www.vr.org'
class HostVirtualException(LibcloudError):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<HostVirtualException in %d: %s>' % (self.code, self.message)
class HostVirtualConnection(ConnectionKey):
host = API_HOST
def add_default_params(self, params):
params['key'] = self.key
return params
class HostVirtualResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_body(self):
if not self.body:
return None
data = json.loads(self.body)
return data
def parse_error(self):
data = self.parse_body()
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('%(code)s:%(message)s' % (data['error']))
elif self.status == httplib.PRECONDITION_FAILED:
raise HostVirtualException(
data['error']['code'], data['error']['message'])
elif self.status == httplib.NOT_FOUND:
raise HostVirtualException(
data['error']['code'], data['error']['message'])
return self.body
def success(self):
return self.status in self.valid_response_codes
| 32.618421
| 77
| 0.689794
|
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.common.types import LibcloudError
API_HOST = 'www.vr.org'
class HostVirtualException(LibcloudError):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<HostVirtualException in %d: %s>' % (self.code, self.message)
class HostVirtualConnection(ConnectionKey):
host = API_HOST
def add_default_params(self, params):
params['key'] = self.key
return params
class HostVirtualResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_body(self):
if not self.body:
return None
data = json.loads(self.body)
return data
def parse_error(self):
data = self.parse_body()
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('%(code)s:%(message)s' % (data['error']))
elif self.status == httplib.PRECONDITION_FAILED:
raise HostVirtualException(
data['error']['code'], data['error']['message'])
elif self.status == httplib.NOT_FOUND:
raise HostVirtualException(
data['error']['code'], data['error']['message'])
return self.body
def success(self):
return self.status in self.valid_response_codes
| true
| true
|
1c483094595d09d08bba4956265bb0fbca8a59fc
| 538
|
py
|
Python
|
src/myres/migrations/0003_auto_20170404_0924.py
|
tsotetsi/myres-api
|
9ca8f6762168d07a767c30a490520dfad54079d9
|
[
"MIT"
] | null | null | null |
src/myres/migrations/0003_auto_20170404_0924.py
|
tsotetsi/myres-api
|
9ca8f6762168d07a767c30a490520dfad54079d9
|
[
"MIT"
] | null | null | null |
src/myres/migrations/0003_auto_20170404_0924.py
|
tsotetsi/myres-api
|
9ca8f6762168d07a767c30a490520dfad54079d9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-04 07:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('myres', '0002_student_residence'),
]
operations = [
migrations.AlterField(
model_name='student',
name='residence',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myres.Residence'),
),
]
| 24.454545
| 103
| 0.650558
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('myres', '0002_student_residence'),
]
operations = [
migrations.AlterField(
model_name='student',
name='residence',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myres.Residence'),
),
]
| true
| true
|
1c48317e7445a689b165340a69a42ccf53b75ba5
| 15,085
|
py
|
Python
|
src/models/data_loader.py
|
tikhonovpavel/LdaSummarization
|
fbfb229e83548d9dd8f921626fd3fbf423b0305a
|
[
"MIT"
] | null | null | null |
src/models/data_loader.py
|
tikhonovpavel/LdaSummarization
|
fbfb229e83548d9dd8f921626fd3fbf423b0305a
|
[
"MIT"
] | null | null | null |
src/models/data_loader.py
|
tikhonovpavel/LdaSummarization
|
fbfb229e83548d9dd8f921626fd3fbf423b0305a
|
[
"MIT"
] | null | null | null |
import bisect
import gc
import glob
import pickle
import random
import torch
from others.logging import logger
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(2018)
# import nltk
# nltk.download('wordnet')
#
# with open('../topic_modelling_data/dictionary.pkl', 'rb') as f:
# tm_dictionary = pickle.load(f)
#
# with open('../topic_modelling_data/lda_model.pkl', 'rb') as f:
# lda_model = pickle.load(f)
#
# stemmer = SnowballStemmer('english')
#
# def lemmatize_stemming(text):
# return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
#
# def preprocess(text):
# result = []
# for token in gensim.utils.simple_preprocess(text):
# if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
# result.append(lemmatize_stemming(token))
# return result
class Batch(object):
def _pad(self, data, pad_id, width=-1):
if (width == -1):
width = max(len(d) for d in data)
rtn_data = [d + [pad_id] * (width - len(d)) for d in data]
return rtn_data
def __init__(self, data=None, device=None, is_test=False):
"""Create a Batch from a list of examples."""
if data is not None:
self.batch_size = len(data)
pre_src = [x[0] for x in data]
pre_tgt = [x[1] for x in data]
pre_segs = [x[2] for x in data]
pre_clss = [x[3] for x in data]
pre_src_sent_labels = [x[4] for x in data]
src = torch.tensor(self._pad(pre_src, 0))
tgt = torch.tensor(self._pad(pre_tgt, 0))
segs = torch.tensor(self._pad(pre_segs, 0))
try:
mask_src = 1 - (src == 0)
mask_tgt = 1 - (tgt == 0)
except RuntimeError as err:
if 'Subtraction, the `-` operator, with a bool tensor is not supported' not in str(err):
raise err
mask_src = ~(src == 0)
mask_tgt = ~(tgt == 0)
clss = torch.tensor(self._pad(pre_clss, -1))
src_sent_labels = torch.tensor(self._pad(pre_src_sent_labels, 0))
try:
mask_cls = 1 - (clss == -1)
except RuntimeError as err:
if 'Subtraction, the `-` operator, with a bool tensor is not supported' not in str(err):
raise err
mask_cls = ~(clss == -1)
clss[clss == -1] = 0
setattr(self, 'clss', clss.to(device))
setattr(self, 'mask_cls', mask_cls.to(device))
setattr(self, 'src_sent_labels', src_sent_labels.to(device))
setattr(self, 'src', src.to(device))
setattr(self, 'tgt', tgt.to(device))
setattr(self, 'segs', segs.to(device))
setattr(self, 'mask_src', mask_src.to(device))
setattr(self, 'mask_tgt', mask_tgt.to(device))
# setattr(self, 'topics', topics.to(device))
if (is_test) or True:
src_str = [x[-3] for x in data]
setattr(self, 'src_str', src_str)
tgt_str = [x[-2] for x in data]
setattr(self, 'tgt_str', tgt_str)
topics = [x[-1] for x in data]
setattr(self, 'topics', topics)
def __len__(self):
return self.batch_size
def load_dataset(args, corpus_type, shuffle):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid", "test"]
def _lazy_dataset_loader(pt_file, corpus_type, use_topic_modelling):
dataset = torch.load(pt_file)
# if use_topic_modelling:
# for article in dataset:
# # unseen_document = 'How a Pentagon deal became an identity crisis for Google'
# bow_vector = tm_dictionary.doc2bow(preprocess(' '.join(article['src_txt'])))
#
# article_topic = sorted(lda_model[bow_vector], key=lambda tup: -1 * tup[1])[0]
# article_topic = article_topic[0]
# DICTIONARY_SIZE = 30_000
# article_topic = DICTIONARY_SIZE + article_topic
#
# article['src'] = [article_topic] + article['src']
#
# # for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1 * tup[1]):
# # print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 5)))
logger.info('Loading %s dataset from %s, number of examples: %d' %
(corpus_type, pt_file, len(dataset)))
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(args.bert_data_path + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
if (shuffle):
random.shuffle(pts)
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type, args.use_topic_modelling)
else:
# Only one inputters.*Dataset, simple!
pt = args.bert_data_path + '.' + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type, args.use_topic_modelling)
def abs_batch_size_fn(new, count):
src, tgt = new[0], new[1]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents=0
max_n_tokens=0
max_n_sents = max(max_n_sents, len(tgt))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
if (count > 6):
return src_elements + 1e3
return src_elements
def ext_batch_size_fn(new, count):
if (len(new) == 4):
pass
src, labels = new[0], new[4]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents = 0
max_n_tokens = 0
max_n_sents = max(max_n_sents, len(src))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
return src_elements
class Dataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.datasets = datasets
self.batch_size = batch_size
self.device = device
self.shuffle = shuffle
self.is_test = is_test
self.use_topic_modelling = args.use_topic_modelling
self.cur_iter = self._next_dataset_iterator(datasets)
assert self.cur_iter is not None
def __iter__(self):
dataset_iter = (d for d in self.datasets)
while self.cur_iter is not None:
for batch in self.cur_iter:
yield batch
self.cur_iter = self._next_dataset_iterator(dataset_iter)
def _next_dataset_iterator(self, dataset_iter):
try:
# Drop the current dataset for decreasing memory
if hasattr(self, "cur_dataset"):
self.cur_dataset = None
gc.collect()
del self.cur_dataset
gc.collect()
self.cur_dataset = next(dataset_iter)
except StopIteration:
return None
return DataIterator(args = self.args,
dataset=self.cur_dataset, batch_size=self.batch_size,
device=self.device, shuffle=self.shuffle, is_test=self.is_test)
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False,
shuffle=True):
self.args = args
self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset
self.iterations = 0
self.device = device
self.shuffle = shuffle
self.sort_key = lambda x: len(x[1])
self._iterations_this_epoch = 0
if (self.args.task == 'abs'):
self.batch_size_fn = abs_batch_size_fn
else:
self.batch_size_fn = ext_batch_size_fn
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1]+[2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if(not self.args.use_interval):
segs=[0]*len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
try:
topics = ex['topics']
except KeyError:
print('Warning: topics are not presented!')
topics = None
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
# src_txt = src_txt[:max_sent_id]
if(is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
else:
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if(len(ex['src'])==0):
continue
ex = self.preprocess(ex, self.is_test)
if(ex is None):
continue
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def batch(self, data, batch_size):
"""Yield elements from data in chunks of batch_size."""
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = self.batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if(len(b)==0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
class TextDataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.batch_size = batch_size
self.device = device
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1] + [2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if (not self.args.use_interval):
segs = [0] * len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
topics = ex['topics']
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
# src_txt = src_txt[:max_sent_id]
if (is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
else:
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if (len(ex['src']) == 0):
continue
ex = self.preprocess(ex, self.is_test)
if (ex is None):
continue
minibatch.append(ex)
size_so_far = simple_batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = batch(p_batch, self.batch_size)
p_batch = batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if (len(b) == 0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
| 33.522222
| 104
| 0.562678
|
import bisect
import gc
import glob
import pickle
import random
import torch
from others.logging import logger
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(2018)
class Batch(object):
def _pad(self, data, pad_id, width=-1):
if (width == -1):
width = max(len(d) for d in data)
rtn_data = [d + [pad_id] * (width - len(d)) for d in data]
return rtn_data
def __init__(self, data=None, device=None, is_test=False):
if data is not None:
self.batch_size = len(data)
pre_src = [x[0] for x in data]
pre_tgt = [x[1] for x in data]
pre_segs = [x[2] for x in data]
pre_clss = [x[3] for x in data]
pre_src_sent_labels = [x[4] for x in data]
src = torch.tensor(self._pad(pre_src, 0))
tgt = torch.tensor(self._pad(pre_tgt, 0))
segs = torch.tensor(self._pad(pre_segs, 0))
try:
mask_src = 1 - (src == 0)
mask_tgt = 1 - (tgt == 0)
except RuntimeError as err:
if 'Subtraction, the `-` operator, with a bool tensor is not supported' not in str(err):
raise err
mask_src = ~(src == 0)
mask_tgt = ~(tgt == 0)
clss = torch.tensor(self._pad(pre_clss, -1))
src_sent_labels = torch.tensor(self._pad(pre_src_sent_labels, 0))
try:
mask_cls = 1 - (clss == -1)
except RuntimeError as err:
if 'Subtraction, the `-` operator, with a bool tensor is not supported' not in str(err):
raise err
mask_cls = ~(clss == -1)
clss[clss == -1] = 0
setattr(self, 'clss', clss.to(device))
setattr(self, 'mask_cls', mask_cls.to(device))
setattr(self, 'src_sent_labels', src_sent_labels.to(device))
setattr(self, 'src', src.to(device))
setattr(self, 'tgt', tgt.to(device))
setattr(self, 'segs', segs.to(device))
setattr(self, 'mask_src', mask_src.to(device))
setattr(self, 'mask_tgt', mask_tgt.to(device))
if (is_test) or True:
src_str = [x[-3] for x in data]
setattr(self, 'src_str', src_str)
tgt_str = [x[-2] for x in data]
setattr(self, 'tgt_str', tgt_str)
topics = [x[-1] for x in data]
setattr(self, 'topics', topics)
def __len__(self):
return self.batch_size
def load_dataset(args, corpus_type, shuffle):
assert corpus_type in ["train", "valid", "test"]
def _lazy_dataset_loader(pt_file, corpus_type, use_topic_modelling):
dataset = torch.load(pt_file)
et
pts = sorted(glob.glob(args.bert_data_path + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
if (shuffle):
random.shuffle(pts)
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type, args.use_topic_modelling)
else:
pt = args.bert_data_path + '.' + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type, args.use_topic_modelling)
def abs_batch_size_fn(new, count):
src, tgt = new[0], new[1]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents=0
max_n_tokens=0
max_n_sents = max(max_n_sents, len(tgt))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
if (count > 6):
return src_elements + 1e3
return src_elements
def ext_batch_size_fn(new, count):
if (len(new) == 4):
pass
src, labels = new[0], new[4]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents = 0
max_n_tokens = 0
max_n_sents = max(max_n_sents, len(src))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
return src_elements
class Dataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.datasets = datasets
self.batch_size = batch_size
self.device = device
self.shuffle = shuffle
self.is_test = is_test
self.use_topic_modelling = args.use_topic_modelling
self.cur_iter = self._next_dataset_iterator(datasets)
assert self.cur_iter is not None
def __iter__(self):
dataset_iter = (d for d in self.datasets)
while self.cur_iter is not None:
for batch in self.cur_iter:
yield batch
self.cur_iter = self._next_dataset_iterator(dataset_iter)
def _next_dataset_iterator(self, dataset_iter):
try:
if hasattr(self, "cur_dataset"):
self.cur_dataset = None
gc.collect()
del self.cur_dataset
gc.collect()
self.cur_dataset = next(dataset_iter)
except StopIteration:
return None
return DataIterator(args = self.args,
dataset=self.cur_dataset, batch_size=self.batch_size,
device=self.device, shuffle=self.shuffle, is_test=self.is_test)
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False,
shuffle=True):
self.args = args
self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset
self.iterations = 0
self.device = device
self.shuffle = shuffle
self.sort_key = lambda x: len(x[1])
self._iterations_this_epoch = 0
if (self.args.task == 'abs'):
self.batch_size_fn = abs_batch_size_fn
else:
self.batch_size_fn = ext_batch_size_fn
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1]+[2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if(not self.args.use_interval):
segs=[0]*len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
try:
topics = ex['topics']
except KeyError:
print('Warning: topics are not presented!')
topics = None
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
if(is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
else:
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if(len(ex['src'])==0):
continue
ex = self.preprocess(ex, self.is_test)
if(ex is None):
continue
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def batch(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = self.batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if(len(b)==0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
class TextDataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.batch_size = batch_size
self.device = device
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1] + [2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if (not self.args.use_interval):
segs = [0] * len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
topics = ex['topics']
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
if (is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
else:
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if (len(ex['src']) == 0):
continue
ex = self.preprocess(ex, self.is_test)
if (ex is None):
continue
minibatch.append(ex)
size_so_far = simple_batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = batch(p_batch, self.batch_size)
p_batch = batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if (len(b) == 0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
| true
| true
|
1c4831c92bce75f0f171e01cc007b17f4ff0e01b
| 291
|
py
|
Python
|
tests/urls.py
|
movermeyer/django-umanage
|
9327772efbf1f13c05b22afcbccaebb2c8595850
|
[
"MIT"
] | 4
|
2015-04-21T01:01:23.000Z
|
2016-01-15T08:41:56.000Z
|
tests/urls.py
|
movermeyer/django-umanage
|
9327772efbf1f13c05b22afcbccaebb2c8595850
|
[
"MIT"
] | 1
|
2018-03-04T20:46:41.000Z
|
2018-03-04T20:46:41.000Z
|
tests/urls.py
|
movermeyer/django-umanage
|
9327772efbf1f13c05b22afcbccaebb2c8595850
|
[
"MIT"
] | 3
|
2017-08-14T01:53:44.000Z
|
2019-06-06T17:47:49.000Z
|
from django.conf.urls import include
from django.conf.urls import url
urlpatterns = [
url(r'', include('umanage.auth.urls')),
url(r'', include('umanage.forgot_username.urls')),
url(r'', include('umanage.forgot_password.urls')),
url(r'^account', include('umanage.urls')),
]
| 26.454545
| 54
| 0.680412
|
from django.conf.urls import include
from django.conf.urls import url
urlpatterns = [
url(r'', include('umanage.auth.urls')),
url(r'', include('umanage.forgot_username.urls')),
url(r'', include('umanage.forgot_password.urls')),
url(r'^account', include('umanage.urls')),
]
| true
| true
|
1c4833f8843bf3183d8117ca678adcf0a5a840f2
| 4,689
|
py
|
Python
|
kubernetes_spawner/swagger_client/models/unversioned_list_meta.py
|
AdrianGPrado/k8s-jupyterhub-spawner
|
f3d28adf1d70102bc60ba57f5737a7ec864537d9
|
[
"Apache-2.0"
] | 16
|
2016-09-18T21:20:49.000Z
|
2020-02-15T06:28:03.000Z
|
kubernetes_spawner/swagger_client/models/unversioned_list_meta.py
|
AdrianGPrado/k8s-jupyterhub-spawner
|
f3d28adf1d70102bc60ba57f5737a7ec864537d9
|
[
"Apache-2.0"
] | 2
|
2016-11-10T17:51:55.000Z
|
2018-03-18T05:38:22.000Z
|
kubernetes_spawner/swagger_client/models/unversioned_list_meta.py
|
AdrianGPrado/k8s-jupyterhub-spawner
|
f3d28adf1d70102bc60ba57f5737a7ec864537d9
|
[
"Apache-2.0"
] | 12
|
2016-09-28T20:48:56.000Z
|
2020-01-17T04:50:59.000Z
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class UnversionedListMeta(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
UnversionedListMeta - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'self_link': 'str',
'resource_version': 'str'
}
self.attribute_map = {
'self_link': 'selfLink',
'resource_version': 'resourceVersion'
}
self._self_link = None
self._resource_version = None
@property
def self_link(self):
"""
Gets the self_link of this UnversionedListMeta.
SelfLink is a URL representing this object. Populated by the system. Read-only.
:return: The self_link of this UnversionedListMeta.
:rtype: str
"""
return self._self_link
@self_link.setter
def self_link(self, self_link):
"""
Sets the self_link of this UnversionedListMeta.
SelfLink is a URL representing this object. Populated by the system. Read-only.
:param self_link: The self_link of this UnversionedListMeta.
:type: str
"""
self._self_link = self_link
@property
def resource_version(self):
"""
Gets the resource_version of this UnversionedListMeta.
String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
:return: The resource_version of this UnversionedListMeta.
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""
Sets the resource_version of this UnversionedListMeta.
String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
:param resource_version: The resource_version of this UnversionedListMeta.
:type: str
"""
self._resource_version = resource_version
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 33.492857
| 369
| 0.629132
|
from pprint import pformat
from six import iteritems
class UnversionedListMeta(object):
def __init__(self):
self.swagger_types = {
'self_link': 'str',
'resource_version': 'str'
}
self.attribute_map = {
'self_link': 'selfLink',
'resource_version': 'resourceVersion'
}
self._self_link = None
self._resource_version = None
@property
def self_link(self):
return self._self_link
@self_link.setter
def self_link(self, self_link):
self._self_link = self_link
@property
def resource_version(self):
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
self._resource_version = resource_version
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c48346c9bf89ec65e95fe3f36582e8e59a98ca7
| 2,281
|
py
|
Python
|
account/forms.py
|
mijiFernandes/pa4
|
3850c5bf6af1f89cd3876b08c9a9fa319d583fae
|
[
"Unlicense"
] | null | null | null |
account/forms.py
|
mijiFernandes/pa4
|
3850c5bf6af1f89cd3876b08c9a9fa319d583fae
|
[
"Unlicense"
] | null | null | null |
account/forms.py
|
mijiFernandes/pa4
|
3850c5bf6af1f89cd3876b08c9a9fa319d583fae
|
[
"Unlicense"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext_lazy as _
from account.models import User, UserManager
class UserCreationForm(forms.ModelForm):
# 사용자 생성 폼
username = forms.CharField(
label=_('Username'),
required=True,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': _('Username'),
'required': 'True',
}
)
)
password1 = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': _('Password'),
'required': 'True',
}
)
)
password2 = forms.CharField(
label=_('Password confirmation'),
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': _('Password confirmation'),
'required': 'True',
}
)
)
class Meta:
model = User
fields = ('username',)
def clean_password2(self):
# 두 비밀번호 입력 일치 확인
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
# 비밀번호 변경 폼
password = ReadOnlyPasswordHashField(
label=_('Password')
)
class Meta:
model = User
fields = ('username', 'password', 'is_active', 'is_superuser')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| 29.24359
| 73
| 0.577378
|
from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext_lazy as _
from account.models import User, UserManager
class UserCreationForm(forms.ModelForm):
username = forms.CharField(
label=_('Username'),
required=True,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': _('Username'),
'required': 'True',
}
)
)
password1 = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': _('Password'),
'required': 'True',
}
)
)
password2 = forms.CharField(
label=_('Password confirmation'),
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': _('Password confirmation'),
'required': 'True',
}
)
)
class Meta:
model = User
fields = ('username',)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
# 비밀번호 변경 폼
password = ReadOnlyPasswordHashField(
label=_('Password')
)
class Meta:
model = User
fields = ('username', 'password', 'is_active', 'is_superuser')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| true
| true
|
1c4834ad7231269805601500145db87c940a6876
| 2,080
|
py
|
Python
|
src/aiosdnotify/__init__.py
|
vivienm/python-aiosdnotify
|
b0fe62bccf55041b00f65d395bea96c0964de9a4
|
[
"MIT"
] | null | null | null |
src/aiosdnotify/__init__.py
|
vivienm/python-aiosdnotify
|
b0fe62bccf55041b00f65d395bea96c0964de9a4
|
[
"MIT"
] | null | null | null |
src/aiosdnotify/__init__.py
|
vivienm/python-aiosdnotify
|
b0fe62bccf55041b00f65d395bea96c0964de9a4
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import os
import socket
from abc import ABC, abstractmethod
from asyncio.events import AbstractEventLoop
from typing import Optional, Union
logger = logging.getLogger(__name__)
__version__ = "0.1.0"
class AbstractNotifier(ABC):
@abstractmethod
async def connect(self) -> None:
pass
@abstractmethod
async def close(self) -> None:
pass
@abstractmethod
async def notify(self, state: Union[str, bytes]) -> None:
pass
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
class SystemdNotifier(AbstractNotifier):
__slots__ = (
"addr",
"sock",
"loop",
)
def __init__(
self,
addr: Optional[str] = None,
*,
loop: Optional[AbstractEventLoop] = None,
) -> None:
self.addr = addr or os.environ["NOTIFY_SOCKET"]
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.loop = loop or asyncio.get_event_loop()
async def connect(self) -> None:
await self.loop.sock_connect(self.sock, self.addr)
async def close(self) -> None:
self.sock.close()
async def notify(self, state: Union[str, bytes]) -> None:
if isinstance(state, str):
state = state.encode()
await self.loop.sock_sendall(self.sock, state)
class DummyNotifier(AbstractNotifier):
async def connect(self) -> None:
pass
async def close(self) -> None:
pass
async def notify(self, state: Union[str, bytes]) -> None:
pass
def notifier(
addr: Optional[str] = None,
*,
loop: Optional[AbstractEventLoop] = None,
) -> AbstractNotifier:
if addr or "NOTIFY_SOCKET" in os.environ:
return SystemdNotifier(addr=addr, loop=loop)
else:
logger.warning(
"Could not determine systemd socket address, "
"systemd notifications are disabled",
)
return DummyNotifier()
| 23.111111
| 68
| 0.625962
|
import asyncio
import logging
import os
import socket
from abc import ABC, abstractmethod
from asyncio.events import AbstractEventLoop
from typing import Optional, Union
logger = logging.getLogger(__name__)
__version__ = "0.1.0"
class AbstractNotifier(ABC):
@abstractmethod
async def connect(self) -> None:
pass
@abstractmethod
async def close(self) -> None:
pass
@abstractmethod
async def notify(self, state: Union[str, bytes]) -> None:
pass
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
class SystemdNotifier(AbstractNotifier):
__slots__ = (
"addr",
"sock",
"loop",
)
def __init__(
self,
addr: Optional[str] = None,
*,
loop: Optional[AbstractEventLoop] = None,
) -> None:
self.addr = addr or os.environ["NOTIFY_SOCKET"]
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.loop = loop or asyncio.get_event_loop()
async def connect(self) -> None:
await self.loop.sock_connect(self.sock, self.addr)
async def close(self) -> None:
self.sock.close()
async def notify(self, state: Union[str, bytes]) -> None:
if isinstance(state, str):
state = state.encode()
await self.loop.sock_sendall(self.sock, state)
class DummyNotifier(AbstractNotifier):
async def connect(self) -> None:
pass
async def close(self) -> None:
pass
async def notify(self, state: Union[str, bytes]) -> None:
pass
def notifier(
addr: Optional[str] = None,
*,
loop: Optional[AbstractEventLoop] = None,
) -> AbstractNotifier:
if addr or "NOTIFY_SOCKET" in os.environ:
return SystemdNotifier(addr=addr, loop=loop)
else:
logger.warning(
"Could not determine systemd socket address, "
"systemd notifications are disabled",
)
return DummyNotifier()
| true
| true
|
1c483537f12b82976dc803943a2780c94717e1c9
| 2,119
|
py
|
Python
|
flaskapp/utils.py
|
crockmitnic/question-paper-generator
|
3f5339226aedd4332c562913945a08cdb45983b0
|
[
"MIT"
] | 6
|
2020-08-02T20:58:34.000Z
|
2022-03-23T20:33:20.000Z
|
flaskapp/utils.py
|
crockmitnic/question-paper-generator
|
3f5339226aedd4332c562913945a08cdb45983b0
|
[
"MIT"
] | 209
|
2020-02-12T17:09:15.000Z
|
2021-06-03T20:34:35.000Z
|
flaskapp/utils.py
|
crockmitnic/question-paper-generator
|
3f5339226aedd4332c562913945a08cdb45983b0
|
[
"MIT"
] | 54
|
2020-02-18T14:54:35.000Z
|
2021-09-05T06:31:12.000Z
|
import os
from enum import Enum
from flask import url_for
from flask_login import current_user
from itsdangerous import URLSafeSerializer
json_url = URLSafeSerializer(os.environ.get("SECRET_KEY", "secret_key"))
class AbstractEnum(Enum):
@classmethod
def from_string(cls, value):
return cls.__members__[value]
class CognitiveEnum(AbstractEnum):
Knowledge = 1
Comprehension = 2
Application = 3
class DifficultyEnum(AbstractEnum):
Easy = 1
Medium = 2
Hard = 3
class QuestionTypeEnum(AbstractEnum):
sub = 1
mcq = 2
def profile_path():
"""get the profile path of user
Returns:
URL : if user is authentic then return url of user
"""
if current_user.is_authenticated:
return url_for("static",
filename="profile_pics/" + current_user.image_file)
return ""
default_instructions = [
"Write your name and student number in the space provided",
"Make sure your mobile phone is switched off and place it at the front together with\
any bags, books, coats etc. Then find your seat.",
"Remember that talking is not allowed at any time in the exam hall.",
"Listen carefully to instructions. Students are required to comply with\
the instructions of invigilators at all times.",
"You are not permitted to share stationery, \
calculators or any other materials during the examination.",
"If you have a question or need more papers, raise your hand and a teacher\
will come to you. Teachers will not give hints or answers, so please do not ask for them.",
"Stop writing immediately when the teacher says it is the end of the exam.",
"Leave the exam hall quickly and quietly. Remember to take all your belongings with you.\
(Remember to collect all your belongings from holding rooms.)\
You must remain silent until after you have exited the building.",
"Remember! Any form of cheating is not allowed and action will be taken.",
]
| 33.634921
| 116
| 0.668712
|
import os
from enum import Enum
from flask import url_for
from flask_login import current_user
from itsdangerous import URLSafeSerializer
json_url = URLSafeSerializer(os.environ.get("SECRET_KEY", "secret_key"))
class AbstractEnum(Enum):
@classmethod
def from_string(cls, value):
return cls.__members__[value]
class CognitiveEnum(AbstractEnum):
Knowledge = 1
Comprehension = 2
Application = 3
class DifficultyEnum(AbstractEnum):
Easy = 1
Medium = 2
Hard = 3
class QuestionTypeEnum(AbstractEnum):
sub = 1
mcq = 2
def profile_path():
if current_user.is_authenticated:
return url_for("static",
filename="profile_pics/" + current_user.image_file)
return ""
default_instructions = [
"Write your name and student number in the space provided",
"Make sure your mobile phone is switched off and place it at the front together with\
any bags, books, coats etc. Then find your seat.",
"Remember that talking is not allowed at any time in the exam hall.",
"Listen carefully to instructions. Students are required to comply with\
the instructions of invigilators at all times.",
"You are not permitted to share stationery, \
calculators or any other materials during the examination.",
"If you have a question or need more papers, raise your hand and a teacher\
will come to you. Teachers will not give hints or answers, so please do not ask for them.",
"Stop writing immediately when the teacher says it is the end of the exam.",
"Leave the exam hall quickly and quietly. Remember to take all your belongings with you.\
(Remember to collect all your belongings from holding rooms.)\
You must remain silent until after you have exited the building.",
"Remember! Any form of cheating is not allowed and action will be taken.",
]
| true
| true
|
1c48357c46224c703897df7c26b65ca54e06f218
| 546
|
py
|
Python
|
layout/serializers/sidebarSubsectionSerializer.py
|
rankrh/soli
|
8f19945a175106064591d09a53d07fcbfa26b7da
|
[
"MIT"
] | null | null | null |
layout/serializers/sidebarSubsectionSerializer.py
|
rankrh/soli
|
8f19945a175106064591d09a53d07fcbfa26b7da
|
[
"MIT"
] | null | null | null |
layout/serializers/sidebarSubsectionSerializer.py
|
rankrh/soli
|
8f19945a175106064591d09a53d07fcbfa26b7da
|
[
"MIT"
] | 2
|
2019-09-07T15:10:14.000Z
|
2020-09-04T01:51:19.000Z
|
from rest_framework import serializers
class SidebarSubsectionSerializer(serializers.Serializer):
name = serializers.CharField(read_only=True, max_length=32)
id = serializers.CharField(read_only=True, max_length=16)
icon = serializers.CharField(read_only=True, max_length=32)
url = serializers.CharField(read_only=True, max_length=64)
url_params = serializers.ListField(
allow_empty=True, read_only=True, required=False, max_length=64
)
subsection_name = serializers.CharField(read_only=True, max_length=32)
| 42
| 74
| 0.776557
|
from rest_framework import serializers
class SidebarSubsectionSerializer(serializers.Serializer):
name = serializers.CharField(read_only=True, max_length=32)
id = serializers.CharField(read_only=True, max_length=16)
icon = serializers.CharField(read_only=True, max_length=32)
url = serializers.CharField(read_only=True, max_length=64)
url_params = serializers.ListField(
allow_empty=True, read_only=True, required=False, max_length=64
)
subsection_name = serializers.CharField(read_only=True, max_length=32)
| true
| true
|
1c4836932fda8321a41ab0f347753291a7915da9
| 2,595
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/input_photo.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_photo.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_photo.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputPhoto(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.InputPhoto`.
Details:
- Layer: ``117``
- ID: ``0x3bb3b94a``
Parameters:
id: ``int`` ``64-bit``
access_hash: ``int`` ``64-bit``
file_reference: ``bytes``
"""
__slots__: List[str] = ["id", "access_hash", "file_reference"]
ID = 0x3bb3b94a
QUALNAME = "types.InputPhoto"
def __init__(self, *, id: int, access_hash: int, file_reference: bytes) -> None:
self.id = id # long
self.access_hash = access_hash # long
self.file_reference = file_reference # bytes
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputPhoto":
# No flags
id = Long.read(data)
access_hash = Long.read(data)
file_reference = Bytes.read(data)
return InputPhoto(id=id, access_hash=access_hash, file_reference=file_reference)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Long(self.id))
data.write(Long(self.access_hash))
data.write(Bytes(self.file_reference))
return data.getvalue()
| 32.037037
| 103
| 0.614644
|
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
| true
| true
|
1c4836e8d6c54e7d96ea2a3dadcfa5b15943f85b
| 238
|
py
|
Python
|
ex010.py
|
Vhassan/Python-Cheatsheet
|
526f5fcfc8e93d0aca139ca6d8d4f20851ab16f5
|
[
"MIT"
] | null | null | null |
ex010.py
|
Vhassan/Python-Cheatsheet
|
526f5fcfc8e93d0aca139ca6d8d4f20851ab16f5
|
[
"MIT"
] | null | null | null |
ex010.py
|
Vhassan/Python-Cheatsheet
|
526f5fcfc8e93d0aca139ca6d8d4f20851ab16f5
|
[
"MIT"
] | null | null | null |
#Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar.
real = float(input('quanto vc tem na carteira: R$'))
print('A conversão para moeda dolar é : US${:.2f}'.format((real/3.27)))
| 59.5
| 112
| 0.735294
|
real = float(input('quanto vc tem na carteira: R$'))
print('A conversão para moeda dolar é : US${:.2f}'.format((real/3.27)))
| true
| true
|
1c4837316def2f2de591b95262f04bd0a307b76b
| 3,392
|
py
|
Python
|
ExtendedAIModule/rhombus_services/arg_parser.py
|
Bricktheworld/rhombus-api-examples-python
|
b4778c3a635786070ee10a3131b1a1f7f6ebac36
|
[
"MIT"
] | null | null | null |
ExtendedAIModule/rhombus_services/arg_parser.py
|
Bricktheworld/rhombus-api-examples-python
|
b4778c3a635786070ee10a3131b1a1f7f6ebac36
|
[
"MIT"
] | 20
|
2021-06-08T22:29:20.000Z
|
2022-01-15T19:51:46.000Z
|
ExtendedAIModule/rhombus_services/arg_parser.py
|
Bricktheworld/rhombus-api-examples-python
|
b4778c3a635786070ee10a3131b1a1f7f6ebac36
|
[
"MIT"
] | 9
|
2021-06-08T22:15:35.000Z
|
2022-03-03T05:19:58.000Z
|
###################################################################################
# Copyright (c) 2021 Rhombus Systems #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
###################################################################################
# Import type hints
from typing import List
# Import argparse to parse our arguments for us easily
import argparse
def parse_arguments(argv: List[str]) -> argparse.Namespace:
"""Parse the command line args.
:param argv: The Commandline arguments from the user, which can be retrieved via sys.argv[1:]
"""
# Create our parser
parser = argparse.ArgumentParser(description='Pulls footage from a camera on LAN and stores it to the filesystem.')
# The --api_key or -a param will hold our API key
parser.add_argument('--api_key', '-a', type=str, required=True, help='Rhombus API key')
# The --camera_uuid or -c param will hold the UUID of the camera which will be processed
parser.add_argument('--camera_uuid', '-c', type=str, required=True, help='Device Id to pull footage from')
# The --interval or -i param will hold how often to poll the camera for new footage in seconds, by default 10 seconds
parser.add_argument('--interval', '-i', type=int, required=False,
help='How often to poll the camera for new footage in seconds, by default 10 seconds',
default=10)
# The --connection_type or -t param will hold the ConnectionType to the camera. It is not recommended to run in WAN mode unless this python server is running on a separate network from the camera
parser.add_argument('--connection_type', '-t', type=str, required=False,
help='The connection type to the camera, either LAN or WAN (default LAN)', default="LAN")
# Return all of our arguments
return parser.parse_args(argv)
| 60.571429
| 199
| 0.583137
| true
| true
|
|
1c48374373ae16db6dbcfd16316661e717dab9fc
| 5,230
|
py
|
Python
|
tests/input/pdf/test_pdf.py
|
asweeney86/preview-generator
|
354cbac1c131ebbb81cd9cfd9b4bc0c184d10103
|
[
"MIT"
] | null | null | null |
tests/input/pdf/test_pdf.py
|
asweeney86/preview-generator
|
354cbac1c131ebbb81cd9cfd9b4bc0c184d10103
|
[
"MIT"
] | null | null | null |
tests/input/pdf/test_pdf.py
|
asweeney86/preview-generator
|
354cbac1c131ebbb81cd9cfd9b4bc0c184d10103
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import re
import shutil
import typing
from PIL import Image
from PyPDF2 import PdfFileReader
import PyPDF2.utils
import pytest
from preview_generator.exception import UnavailablePreviewType
from preview_generator.manager import PreviewManager
from tests import test_utils
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CACHE_DIR = "/tmp/preview-generator-tests/cache"
PDF_FILE_PATH = os.path.join(CURRENT_DIR, "the_pdf.pdf")
PDF_FILE_PATH__ENCRYPTED = os.path.join(CURRENT_DIR, "the_pdf.encrypted.pdf")
PDF_FILE_PATH__A4 = os.path.join(CURRENT_DIR, "qpdfconvert.pdf")
def setup_function(function: typing.Callable) -> None:
shutil.rmtree(CACHE_DIR, ignore_errors=True)
def test_to_jpeg() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=PDF_FILE_PATH, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height in range(453, 455)
assert jpeg.width == 321
def test_to_jpeg__encrypted_pdf() -> None:
with pytest.raises(PyPDF2.utils.PdfReadError): # ensure file is encrpyted
pdf = PdfFileReader(PDF_FILE_PATH__ENCRYPTED)
pdf.getPage(0)
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=PDF_FILE_PATH__ENCRYPTED, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height in range(453, 455)
assert jpeg.width == 321
def test_to_jpeg_no_size() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(file_path=PDF_FILE_PATH, force=True)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width in range(180, 182)
def test_to_text() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_text_preview(file_path=PDF_FILE_PATH) is False
with pytest.raises(UnavailablePreviewType):
manager.get_text_preview(file_path=PDF_FILE_PATH, force=True)
def test_to_json() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_json_preview(file_path=PDF_FILE_PATH) is True
manager.get_json_preview(file_path=PDF_FILE_PATH, force=True)
# TODO - G.M - 2018-11-06 - To be completed
def test_to_pdf() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=PDF_FILE_PATH) is True
manager.get_pdf_preview(file_path=PDF_FILE_PATH, force=True)
# TODO - G.M - 2018-11-06 - To be completed
def test_to_pdf_one_page() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=PDF_FILE_PATH) is True
path_0 = manager.get_pdf_preview(file_path=PDF_FILE_PATH, page=0, force=True)
assert os.path.exists(path_0) is True
assert os.path.getsize(path_0) > 1000 # verify if the size of the pdf refer to a normal content
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN_WITH_PAGE__PDF, path_0)
pdf = PdfFileReader(open(path_0, "rb"))
assert pdf.getNumPages() == 1
path_1 = manager.get_pdf_preview(file_path=PDF_FILE_PATH, page=1, force=True)
assert os.path.exists(path_1) is True
assert os.path.getsize(path_1) > 1000 # verify if the size of the pdf refer to a normal content
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN_WITH_PAGE__PDF, path_1)
pdf = PdfFileReader(open(path_1, "rb"))
assert pdf.getNumPages() == 1
def test_algorithm4() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH__A4) is True
path_to_file = manager.get_jpeg_preview(file_path=PDF_FILE_PATH__A4, force=True)
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width in range(180, 182)
def test_get_nb_page() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH)
assert nb_page == 2
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH__ENCRYPTED)
assert nb_page == 2
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH__A4)
assert nb_page == 2
| 39.621212
| 100
| 0.759656
|
import os
import re
import shutil
import typing
from PIL import Image
from PyPDF2 import PdfFileReader
import PyPDF2.utils
import pytest
from preview_generator.exception import UnavailablePreviewType
from preview_generator.manager import PreviewManager
from tests import test_utils
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CACHE_DIR = "/tmp/preview-generator-tests/cache"
PDF_FILE_PATH = os.path.join(CURRENT_DIR, "the_pdf.pdf")
PDF_FILE_PATH__ENCRYPTED = os.path.join(CURRENT_DIR, "the_pdf.encrypted.pdf")
PDF_FILE_PATH__A4 = os.path.join(CURRENT_DIR, "qpdfconvert.pdf")
def setup_function(function: typing.Callable) -> None:
shutil.rmtree(CACHE_DIR, ignore_errors=True)
def test_to_jpeg() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=PDF_FILE_PATH, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height in range(453, 455)
assert jpeg.width == 321
def test_to_jpeg__encrypted_pdf() -> None:
with pytest.raises(PyPDF2.utils.PdfReadError):
pdf = PdfFileReader(PDF_FILE_PATH__ENCRYPTED)
pdf.getPage(0)
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=PDF_FILE_PATH__ENCRYPTED, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height in range(453, 455)
assert jpeg.width == 321
def test_to_jpeg_no_size() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(file_path=PDF_FILE_PATH, force=True)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width in range(180, 182)
def test_to_text() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_text_preview(file_path=PDF_FILE_PATH) is False
with pytest.raises(UnavailablePreviewType):
manager.get_text_preview(file_path=PDF_FILE_PATH, force=True)
def test_to_json() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_json_preview(file_path=PDF_FILE_PATH) is True
manager.get_json_preview(file_path=PDF_FILE_PATH, force=True)
def test_to_pdf() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=PDF_FILE_PATH) is True
manager.get_pdf_preview(file_path=PDF_FILE_PATH, force=True)
def test_to_pdf_one_page() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=PDF_FILE_PATH) is True
path_0 = manager.get_pdf_preview(file_path=PDF_FILE_PATH, page=0, force=True)
assert os.path.exists(path_0) is True
assert os.path.getsize(path_0) > 1000
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN_WITH_PAGE__PDF, path_0)
pdf = PdfFileReader(open(path_0, "rb"))
assert pdf.getNumPages() == 1
path_1 = manager.get_pdf_preview(file_path=PDF_FILE_PATH, page=1, force=True)
assert os.path.exists(path_1) is True
assert os.path.getsize(path_1) > 1000
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN_WITH_PAGE__PDF, path_1)
pdf = PdfFileReader(open(path_1, "rb"))
assert pdf.getNumPages() == 1
def test_algorithm4() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH__A4) is True
path_to_file = manager.get_jpeg_preview(file_path=PDF_FILE_PATH__A4, force=True)
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width in range(180, 182)
def test_get_nb_page() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH)
assert nb_page == 2
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH__ENCRYPTED)
assert nb_page == 2
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH__A4)
assert nb_page == 2
| true
| true
|
1c48380c9cbf94328974481e6bfb12901edaac59
| 4,307
|
py
|
Python
|
tests/app/service/test_sender.py
|
cds-snc/notification-api
|
b1c1064f291eb860b494c3fa65ac256ad70bf47c
|
[
"MIT"
] | 41
|
2019-11-28T16:58:41.000Z
|
2022-01-28T21:11:16.000Z
|
tests/app/service/test_sender.py
|
cds-snc/notification-api
|
b1c1064f291eb860b494c3fa65ac256ad70bf47c
|
[
"MIT"
] | 1,083
|
2019-07-08T12:57:24.000Z
|
2022-03-08T18:53:40.000Z
|
tests/app/service/test_sender.py
|
cds-snc/notification-api
|
b1c1064f291eb860b494c3fa65ac256ad70bf47c
|
[
"MIT"
] | 9
|
2020-01-24T19:56:43.000Z
|
2022-01-27T21:36:53.000Z
|
import pytest
from flask import current_app
from app.dao.services_dao import dao_add_user_to_service
from app.models import EMAIL_TYPE, SMS_TYPE, Notification
from app.service.sender import send_notification_to_service_users
from tests.app.conftest import notify_service as create_notify_service
from tests.app.conftest import sample_service as create_sample_service
from tests.app.db import create_template, create_user
@pytest.mark.parametrize("notification_type", [EMAIL_TYPE, SMS_TYPE])
def test_send_notification_to_service_users_persists_notifications_correctly(
notify_db, notify_db_session, notification_type, sample_user, mocker
):
mocker.patch("app.service.sender.send_notification_to_queue")
notify_service, user = create_notify_service(notify_db, notify_db_session)
service = create_sample_service(notify_db, notify_db_session, user=sample_user)
template = create_template(service, template_type=notification_type)
send_notification_to_service_users(service_id=service.id, template_id=template.id)
to = sample_user.email_address if notification_type == EMAIL_TYPE else sample_user.mobile_number
notification = Notification.query.one()
assert Notification.query.count() == 1
assert notification.to == to
assert str(notification.service_id) == current_app.config["NOTIFY_SERVICE_ID"]
assert notification.template.id == template.id
assert notification.template.template_type == notification_type
assert notification.notification_type == notification_type
assert notification.reply_to_text == notify_service.get_default_reply_to_email_address()
def test_send_notification_to_service_users_sends_to_queue(notify_db, notify_db_session, sample_user, mocker):
send_mock = mocker.patch("app.service.sender.send_notification_to_queue")
create_notify_service(notify_db, notify_db_session)
service = create_sample_service(notify_db, notify_db_session, user=sample_user)
template = create_template(service, template_type=EMAIL_TYPE)
send_notification_to_service_users(service_id=service.id, template_id=template.id)
assert send_mock.called
assert send_mock.call_count == 1
def test_send_notification_to_service_users_includes_user_fields_in_personalisation(
notify_db, notify_db_session, sample_user, mocker
):
persist_mock = mocker.patch("app.service.sender.persist_notification")
mocker.patch("app.service.sender.send_notification_to_queue")
create_notify_service(notify_db, notify_db_session)
service = create_sample_service(notify_db, notify_db_session, user=sample_user)
template = create_template(service, template_type=EMAIL_TYPE)
send_notification_to_service_users(
service_id=service.id,
template_id=template.id,
include_user_fields=["name", "email_address", "state"],
)
persist_call = persist_mock.call_args_list[0][1]
assert len(persist_mock.call_args_list) == 1
assert persist_call["personalisation"] == {
"name": sample_user.name,
"email_address": sample_user.email_address,
"state": sample_user.state,
}
def test_send_notification_to_service_users_sends_to_active_users_only(notify_db, notify_db_session, mocker):
mocker.patch("app.service.sender.send_notification_to_queue")
create_notify_service(notify_db, notify_db_session)
first_active_user = create_user(email="foo@bar.com", state="active")
second_active_user = create_user(email="foo1@bar.com", state="active")
pending_user = create_user(email="foo2@bar.com", state="pending")
service = create_sample_service(notify_db, notify_db_session, user=first_active_user)
dao_add_user_to_service(service, second_active_user)
dao_add_user_to_service(service, pending_user)
template = create_template(service, template_type=EMAIL_TYPE)
send_notification_to_service_users(service_id=service.id, template_id=template.id)
notifications = Notification.query.all()
notifications_recipients = [notification.to for notification in notifications]
assert Notification.query.count() == 2
assert pending_user.email_address not in notifications_recipients
assert first_active_user.email_address in notifications_recipients
assert second_active_user.email_address in notifications_recipients
| 46.311828
| 110
| 0.80404
|
import pytest
from flask import current_app
from app.dao.services_dao import dao_add_user_to_service
from app.models import EMAIL_TYPE, SMS_TYPE, Notification
from app.service.sender import send_notification_to_service_users
from tests.app.conftest import notify_service as create_notify_service
from tests.app.conftest import sample_service as create_sample_service
from tests.app.db import create_template, create_user
@pytest.mark.parametrize("notification_type", [EMAIL_TYPE, SMS_TYPE])
def test_send_notification_to_service_users_persists_notifications_correctly(
notify_db, notify_db_session, notification_type, sample_user, mocker
):
mocker.patch("app.service.sender.send_notification_to_queue")
notify_service, user = create_notify_service(notify_db, notify_db_session)
service = create_sample_service(notify_db, notify_db_session, user=sample_user)
template = create_template(service, template_type=notification_type)
send_notification_to_service_users(service_id=service.id, template_id=template.id)
to = sample_user.email_address if notification_type == EMAIL_TYPE else sample_user.mobile_number
notification = Notification.query.one()
assert Notification.query.count() == 1
assert notification.to == to
assert str(notification.service_id) == current_app.config["NOTIFY_SERVICE_ID"]
assert notification.template.id == template.id
assert notification.template.template_type == notification_type
assert notification.notification_type == notification_type
assert notification.reply_to_text == notify_service.get_default_reply_to_email_address()
def test_send_notification_to_service_users_sends_to_queue(notify_db, notify_db_session, sample_user, mocker):
send_mock = mocker.patch("app.service.sender.send_notification_to_queue")
create_notify_service(notify_db, notify_db_session)
service = create_sample_service(notify_db, notify_db_session, user=sample_user)
template = create_template(service, template_type=EMAIL_TYPE)
send_notification_to_service_users(service_id=service.id, template_id=template.id)
assert send_mock.called
assert send_mock.call_count == 1
def test_send_notification_to_service_users_includes_user_fields_in_personalisation(
notify_db, notify_db_session, sample_user, mocker
):
persist_mock = mocker.patch("app.service.sender.persist_notification")
mocker.patch("app.service.sender.send_notification_to_queue")
create_notify_service(notify_db, notify_db_session)
service = create_sample_service(notify_db, notify_db_session, user=sample_user)
template = create_template(service, template_type=EMAIL_TYPE)
send_notification_to_service_users(
service_id=service.id,
template_id=template.id,
include_user_fields=["name", "email_address", "state"],
)
persist_call = persist_mock.call_args_list[0][1]
assert len(persist_mock.call_args_list) == 1
assert persist_call["personalisation"] == {
"name": sample_user.name,
"email_address": sample_user.email_address,
"state": sample_user.state,
}
def test_send_notification_to_service_users_sends_to_active_users_only(notify_db, notify_db_session, mocker):
mocker.patch("app.service.sender.send_notification_to_queue")
create_notify_service(notify_db, notify_db_session)
first_active_user = create_user(email="foo@bar.com", state="active")
second_active_user = create_user(email="foo1@bar.com", state="active")
pending_user = create_user(email="foo2@bar.com", state="pending")
service = create_sample_service(notify_db, notify_db_session, user=first_active_user)
dao_add_user_to_service(service, second_active_user)
dao_add_user_to_service(service, pending_user)
template = create_template(service, template_type=EMAIL_TYPE)
send_notification_to_service_users(service_id=service.id, template_id=template.id)
notifications = Notification.query.all()
notifications_recipients = [notification.to for notification in notifications]
assert Notification.query.count() == 2
assert pending_user.email_address not in notifications_recipients
assert first_active_user.email_address in notifications_recipients
assert second_active_user.email_address in notifications_recipients
| true
| true
|
1c48388e6a7603d89677a9f9449e120b5b428b22
| 15,266
|
py
|
Python
|
Original_Codes/GDL_code-master/models/WGANGP.py
|
TeaKatz/Generative_Deep_Learning
|
1f499e482f78b3d1146b24213e5d558226b8fc6e
|
[
"MIT"
] | 2
|
2021-07-09T16:45:51.000Z
|
2021-10-30T18:00:06.000Z
|
models/WGANGP.py
|
steveive8/Study-Generative-Deep-Learning
|
f62b9150a5e18240dd22816918f2ce6abf807d58
|
[
"MIT"
] | 10
|
2020-09-26T01:22:18.000Z
|
2022-03-12T00:42:42.000Z
|
models/WGANGP.py
|
steveive8/Study-Generative-Deep-Learning
|
f62b9150a5e18240dd22816918f2ce6abf807d58
|
[
"MIT"
] | null | null | null |
from keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, BatchNormalization, LeakyReLU, Dropout, ZeroPadding2D, UpSampling2D
from keras.layers.merge import _Merge
from keras.models import Model, Sequential
from keras import backend as K
from keras.optimizers import Adam, RMSprop
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
from keras.initializers import RandomNormal
from functools import partial
import numpy as np
import json
import os
import pickle
import matplotlib.pyplot as plt
class RandomWeightedAverage(_Merge):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((self.batch_size, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self
, input_dim
, critic_conv_filters
, critic_conv_kernel_size
, critic_conv_strides
, critic_batch_norm_momentum
, critic_activation
, critic_dropout_rate
, critic_learning_rate
, generator_initial_dense_layer_size
, generator_upsample
, generator_conv_filters
, generator_conv_kernel_size
, generator_conv_strides
, generator_batch_norm_momentum
, generator_activation
, generator_dropout_rate
, generator_learning_rate
, optimiser
, grad_weight
, z_dim
, batch_size
):
self.name = 'gan'
self.input_dim = input_dim
self.critic_conv_filters = critic_conv_filters
self.critic_conv_kernel_size = critic_conv_kernel_size
self.critic_conv_strides = critic_conv_strides
self.critic_batch_norm_momentum = critic_batch_norm_momentum
self.critic_activation = critic_activation
self.critic_dropout_rate = critic_dropout_rate
self.critic_learning_rate = critic_learning_rate
self.generator_initial_dense_layer_size = generator_initial_dense_layer_size
self.generator_upsample = generator_upsample
self.generator_conv_filters = generator_conv_filters
self.generator_conv_kernel_size = generator_conv_kernel_size
self.generator_conv_strides = generator_conv_strides
self.generator_batch_norm_momentum = generator_batch_norm_momentum
self.generator_activation = generator_activation
self.generator_dropout_rate = generator_dropout_rate
self.generator_learning_rate = generator_learning_rate
self.optimiser = optimiser
self.z_dim = z_dim
self.n_layers_critic = len(critic_conv_filters)
self.n_layers_generator = len(generator_conv_filters)
self.weight_init = RandomNormal(mean=0., stddev=0.02) # 'he_normal' #RandomNormal(mean=0., stddev=0.02)
self.grad_weight = grad_weight
self.batch_size = batch_size
self.d_losses = []
self.g_losses = []
self.epoch = 0
self._build_critic()
self._build_generator()
self._build_adversarial()
def gradient_penalty_loss(self, y_true, y_pred, interpolated_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, interpolated_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def wasserstein(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def get_activation(self, activation):
if activation == 'leaky_relu':
layer = LeakyReLU(alpha = 0.2)
else:
layer = Activation(activation)
return layer
def _build_critic(self):
### THE critic
critic_input = Input(shape=self.input_dim, name='critic_input')
x = critic_input
for i in range(self.n_layers_critic):
x = Conv2D(
filters = self.critic_conv_filters[i]
, kernel_size = self.critic_conv_kernel_size[i]
, strides = self.critic_conv_strides[i]
, padding = 'same'
, name = 'critic_conv_' + str(i)
, kernel_initializer = self.weight_init
)(x)
if self.critic_batch_norm_momentum and i > 0:
x = BatchNormalization(momentum = self.critic_batch_norm_momentum)(x)
x = self.get_activation(self.critic_activation)(x)
if self.critic_dropout_rate:
x = Dropout(rate = self.critic_dropout_rate)(x)
x = Flatten()(x)
# x = Dense(512, kernel_initializer = self.weight_init)(x)
# x = self.get_activation(self.critic_activation)(x)
critic_output = Dense(1, activation=None
, kernel_initializer = self.weight_init
)(x)
self.critic = Model(critic_input, critic_output)
def _build_generator(self):
### THE generator
generator_input = Input(shape=(self.z_dim,), name='generator_input')
x = generator_input
x = Dense(np.prod(self.generator_initial_dense_layer_size), kernel_initializer = self.weight_init)(x)
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum = self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
x = Reshape(self.generator_initial_dense_layer_size)(x)
if self.generator_dropout_rate:
x = Dropout(rate = self.generator_dropout_rate)(x)
for i in range(self.n_layers_generator):
if self.generator_upsample[i] == 2:
x = UpSampling2D()(x)
x = Conv2D(
filters = self.generator_conv_filters[i]
, kernel_size = self.generator_conv_kernel_size[i]
, padding = 'same'
, name = 'generator_conv_' + str(i)
, kernel_initializer = self.weight_init
)(x)
else:
x = Conv2DTranspose(
filters = self.generator_conv_filters[i]
, kernel_size = self.generator_conv_kernel_size[i]
, padding = 'same'
, strides = self.generator_conv_strides[i]
, name = 'generator_conv_' + str(i)
, kernel_initializer = self.weight_init
)(x)
if i < self.n_layers_generator - 1:
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum = self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
else:
x = Activation('tanh')(x)
generator_output = x
self.generator = Model(generator_input, generator_output)
def get_opti(self, lr):
if self.optimiser == 'adam':
opti = Adam(lr=lr, beta_1=0.5)
elif self.optimiser == 'rmsprop':
opti = RMSprop(lr=lr)
else:
opti = Adam(lr=lr)
return opti
def set_trainable(self, m, val):
m.trainable = val
for l in m.layers:
l.trainable = val
def _build_adversarial(self):
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.set_trainable(self.generator, False)
# Image input (real sample)
real_img = Input(shape=self.input_dim)
# Fake image
z_disc = Input(shape=(self.z_dim,))
fake_img = self.generator(z_disc)
# critic determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage(self.batch_size)([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'interpolated_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
interpolated_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(
loss=[self.wasserstein,self.wasserstein, partial_gp_loss]
,optimizer=self.get_opti(self.critic_learning_rate)
,loss_weights=[1, 1, self.grad_weight]
)
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.set_trainable(self.critic, False)
self.set_trainable(self.generator, True)
# Sampled noise for input to generator
model_input = Input(shape=(self.z_dim,))
# Generate images based of noise
img = self.generator(model_input)
# Discriminator determines validity
model_output = self.critic(img)
# Defines generator model
self.model = Model(model_input, model_output)
self.model.compile(optimizer=self.get_opti(self.generator_learning_rate)
, loss=self.wasserstein
)
self.set_trainable(self.critic, True)
def train_critic(self, x_train, batch_size, using_generator):
valid = np.ones((batch_size,1), dtype=np.float32)
fake = -np.ones((batch_size,1), dtype=np.float32)
dummy = np.zeros((batch_size, 1), dtype=np.float32) # Dummy gt for gradient penalty
if using_generator:
true_imgs = next(x_train)[0]
if true_imgs.shape[0] != batch_size:
true_imgs = next(x_train)[0]
else:
idx = np.random.randint(0, x_train.shape[0], batch_size)
true_imgs = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
d_loss = self.critic_model.train_on_batch([true_imgs, noise], [valid, fake, dummy])
return d_loss
def train_generator(self, batch_size):
valid = np.ones((batch_size,1), dtype=np.float32)
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
return self.model.train_on_batch(noise, valid)
def train(self, x_train, batch_size, epochs, run_folder, print_every_n_batches = 10
, n_critic = 5
, using_generator = False):
for epoch in range(self.epoch, self.epoch + epochs):
if epoch % 100 == 0:
critic_loops = 5
else:
critic_loops = n_critic
for _ in range(critic_loops):
d_loss = self.train_critic(x_train, batch_size, using_generator)
g_loss = self.train_generator(batch_size)
print ("%d (%d, %d) [D loss: (%.1f)(R %.1f, F %.1f, G %.1f)] [G loss: %.1f]" % (epoch, critic_loops, 1, d_loss[0], d_loss[1],d_loss[2],d_loss[3],g_loss))
self.d_losses.append(d_loss)
self.g_losses.append(g_loss)
# If at save interval => save generated image samples
if epoch % print_every_n_batches == 0:
self.sample_images(run_folder)
self.model.save_weights(os.path.join(run_folder, 'weights/weights-%d.h5' % (epoch)))
self.model.save_weights(os.path.join(run_folder, 'weights/weights.h5'))
self.save_model(run_folder)
self.epoch+=1
def sample_images(self, run_folder):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.z_dim))
gen_imgs = self.generator.predict(noise)
#Rescale images 0 - 1
gen_imgs = 0.5 * (gen_imgs + 1)
gen_imgs = np.clip(gen_imgs, 0, 1)
fig, axs = plt.subplots(r, c, figsize=(15,15))
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(np.squeeze(gen_imgs[cnt, :,:,:]), cmap = 'gray_r')
axs[i,j].axis('off')
cnt += 1
fig.savefig(os.path.join(run_folder, "images/sample_%d.png" % self.epoch))
plt.close()
def plot_model(self, run_folder):
plot_model(self.model, to_file=os.path.join(run_folder ,'viz/model.png'), show_shapes = True, show_layer_names = True)
plot_model(self.critic, to_file=os.path.join(run_folder ,'viz/critic.png'), show_shapes = True, show_layer_names = True)
plot_model(self.generator, to_file=os.path.join(run_folder ,'viz/generator.png'), show_shapes = True, show_layer_names = True)
def save(self, folder):
with open(os.path.join(folder, 'params.pkl'), 'wb') as f:
pickle.dump([
self.input_dim
, self.critic_conv_filters
, self.critic_conv_kernel_size
, self.critic_conv_strides
, self.critic_batch_norm_momentum
, self.critic_activation
, self.critic_dropout_rate
, self.critic_learning_rate
, self.generator_initial_dense_layer_size
, self.generator_upsample
, self.generator_conv_filters
, self.generator_conv_kernel_size
, self.generator_conv_strides
, self.generator_batch_norm_momentum
, self.generator_activation
, self.generator_dropout_rate
, self.generator_learning_rate
, self.optimiser
, self.grad_weight
, self.z_dim
, self.batch_size
], f)
self.plot_model(folder)
def save_model(self, run_folder):
self.model.save(os.path.join(run_folder, 'model.h5'))
self.critic.save(os.path.join(run_folder, 'critic.h5'))
self.generator.save(os.path.join(run_folder, 'generator.h5'))
pickle.dump(self, open( os.path.join(run_folder, "obj.pkl"), "wb" ))
def load_weights(self, filepath):
self.model.load_weights(filepath)
| 35.502326
| 169
| 0.60435
|
from keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, BatchNormalization, LeakyReLU, Dropout, ZeroPadding2D, UpSampling2D
from keras.layers.merge import _Merge
from keras.models import Model, Sequential
from keras import backend as K
from keras.optimizers import Adam, RMSprop
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
from keras.initializers import RandomNormal
from functools import partial
import numpy as np
import json
import os
import pickle
import matplotlib.pyplot as plt
class RandomWeightedAverage(_Merge):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
def _merge_function(self, inputs):
alpha = K.random_uniform((self.batch_size, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self
, input_dim
, critic_conv_filters
, critic_conv_kernel_size
, critic_conv_strides
, critic_batch_norm_momentum
, critic_activation
, critic_dropout_rate
, critic_learning_rate
, generator_initial_dense_layer_size
, generator_upsample
, generator_conv_filters
, generator_conv_kernel_size
, generator_conv_strides
, generator_batch_norm_momentum
, generator_activation
, generator_dropout_rate
, generator_learning_rate
, optimiser
, grad_weight
, z_dim
, batch_size
):
self.name = 'gan'
self.input_dim = input_dim
self.critic_conv_filters = critic_conv_filters
self.critic_conv_kernel_size = critic_conv_kernel_size
self.critic_conv_strides = critic_conv_strides
self.critic_batch_norm_momentum = critic_batch_norm_momentum
self.critic_activation = critic_activation
self.critic_dropout_rate = critic_dropout_rate
self.critic_learning_rate = critic_learning_rate
self.generator_initial_dense_layer_size = generator_initial_dense_layer_size
self.generator_upsample = generator_upsample
self.generator_conv_filters = generator_conv_filters
self.generator_conv_kernel_size = generator_conv_kernel_size
self.generator_conv_strides = generator_conv_strides
self.generator_batch_norm_momentum = generator_batch_norm_momentum
self.generator_activation = generator_activation
self.generator_dropout_rate = generator_dropout_rate
self.generator_learning_rate = generator_learning_rate
self.optimiser = optimiser
self.z_dim = z_dim
self.n_layers_critic = len(critic_conv_filters)
self.n_layers_generator = len(generator_conv_filters)
self.weight_init = RandomNormal(mean=0., stddev=0.02) ight
self.batch_size = batch_size
self.d_losses = []
self.g_losses = []
self.epoch = 0
self._build_critic()
self._build_generator()
self._build_adversarial()
def gradient_penalty_loss(self, y_true, y_pred, interpolated_samples):
gradients = K.gradients(y_pred, interpolated_samples)[0]
gradients_sqr = K.square(gradients)
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
gradient_penalty = K.square(1 - gradient_l2_norm)
return K.mean(gradient_penalty)
def wasserstein(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def get_activation(self, activation):
if activation == 'leaky_relu':
layer = LeakyReLU(alpha = 0.2)
else:
layer = Activation(activation)
return layer
def _build_critic(self):
nput(shape=self.input_dim, name='critic_input')
x = critic_input
for i in range(self.n_layers_critic):
x = Conv2D(
filters = self.critic_conv_filters[i]
, kernel_size = self.critic_conv_kernel_size[i]
, strides = self.critic_conv_strides[i]
, padding = 'same'
, name = 'critic_conv_' + str(i)
, kernel_initializer = self.weight_init
)(x)
if self.critic_batch_norm_momentum and i > 0:
x = BatchNormalization(momentum = self.critic_batch_norm_momentum)(x)
x = self.get_activation(self.critic_activation)(x)
if self.critic_dropout_rate:
x = Dropout(rate = self.critic_dropout_rate)(x)
x = Flatten()(x)
critic_output = Dense(1, activation=None
, kernel_initializer = self.weight_init
)(x)
self.critic = Model(critic_input, critic_output)
def _build_generator(self):
ut(shape=(self.z_dim,), name='generator_input')
x = generator_input
x = Dense(np.prod(self.generator_initial_dense_layer_size), kernel_initializer = self.weight_init)(x)
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum = self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
x = Reshape(self.generator_initial_dense_layer_size)(x)
if self.generator_dropout_rate:
x = Dropout(rate = self.generator_dropout_rate)(x)
for i in range(self.n_layers_generator):
if self.generator_upsample[i] == 2:
x = UpSampling2D()(x)
x = Conv2D(
filters = self.generator_conv_filters[i]
, kernel_size = self.generator_conv_kernel_size[i]
, padding = 'same'
, name = 'generator_conv_' + str(i)
, kernel_initializer = self.weight_init
)(x)
else:
x = Conv2DTranspose(
filters = self.generator_conv_filters[i]
, kernel_size = self.generator_conv_kernel_size[i]
, padding = 'same'
, strides = self.generator_conv_strides[i]
, name = 'generator_conv_' + str(i)
, kernel_initializer = self.weight_init
)(x)
if i < self.n_layers_generator - 1:
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum = self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
else:
x = Activation('tanh')(x)
generator_output = x
self.generator = Model(generator_input, generator_output)
def get_opti(self, lr):
if self.optimiser == 'adam':
opti = Adam(lr=lr, beta_1=0.5)
elif self.optimiser == 'rmsprop':
opti = RMSprop(lr=lr)
else:
opti = Adam(lr=lr)
return opti
def set_trainable(self, m, val):
m.trainable = val
for l in m.layers:
l.trainable = val
def _build_adversarial(self):
self.set_trainable(self.generator, False)
# Image input (real sample)
real_img = Input(shape=self.input_dim)
# Fake image
z_disc = Input(shape=(self.z_dim,))
fake_img = self.generator(z_disc)
# critic determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage(self.batch_size)([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'interpolated_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
interpolated_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(
loss=[self.wasserstein,self.wasserstein, partial_gp_loss]
,optimizer=self.get_opti(self.critic_learning_rate)
,loss_weights=[1, 1, self.grad_weight]
)
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.set_trainable(self.critic, False)
self.set_trainable(self.generator, True)
model_input = Input(shape=(self.z_dim,))
img = self.generator(model_input)
model_output = self.critic(img)
self.model = Model(model_input, model_output)
self.model.compile(optimizer=self.get_opti(self.generator_learning_rate)
, loss=self.wasserstein
)
self.set_trainable(self.critic, True)
def train_critic(self, x_train, batch_size, using_generator):
valid = np.ones((batch_size,1), dtype=np.float32)
fake = -np.ones((batch_size,1), dtype=np.float32)
dummy = np.zeros((batch_size, 1), dtype=np.float32)
if using_generator:
true_imgs = next(x_train)[0]
if true_imgs.shape[0] != batch_size:
true_imgs = next(x_train)[0]
else:
idx = np.random.randint(0, x_train.shape[0], batch_size)
true_imgs = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
d_loss = self.critic_model.train_on_batch([true_imgs, noise], [valid, fake, dummy])
return d_loss
def train_generator(self, batch_size):
valid = np.ones((batch_size,1), dtype=np.float32)
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
return self.model.train_on_batch(noise, valid)
def train(self, x_train, batch_size, epochs, run_folder, print_every_n_batches = 10
, n_critic = 5
, using_generator = False):
for epoch in range(self.epoch, self.epoch + epochs):
if epoch % 100 == 0:
critic_loops = 5
else:
critic_loops = n_critic
for _ in range(critic_loops):
d_loss = self.train_critic(x_train, batch_size, using_generator)
g_loss = self.train_generator(batch_size)
print ("%d (%d, %d) [D loss: (%.1f)(R %.1f, F %.1f, G %.1f)] [G loss: %.1f]" % (epoch, critic_loops, 1, d_loss[0], d_loss[1],d_loss[2],d_loss[3],g_loss))
self.d_losses.append(d_loss)
self.g_losses.append(g_loss)
if epoch % print_every_n_batches == 0:
self.sample_images(run_folder)
self.model.save_weights(os.path.join(run_folder, 'weights/weights-%d.h5' % (epoch)))
self.model.save_weights(os.path.join(run_folder, 'weights/weights.h5'))
self.save_model(run_folder)
self.epoch+=1
def sample_images(self, run_folder):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.z_dim))
gen_imgs = self.generator.predict(noise)
gen_imgs = 0.5 * (gen_imgs + 1)
gen_imgs = np.clip(gen_imgs, 0, 1)
fig, axs = plt.subplots(r, c, figsize=(15,15))
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(np.squeeze(gen_imgs[cnt, :,:,:]), cmap = 'gray_r')
axs[i,j].axis('off')
cnt += 1
fig.savefig(os.path.join(run_folder, "images/sample_%d.png" % self.epoch))
plt.close()
def plot_model(self, run_folder):
plot_model(self.model, to_file=os.path.join(run_folder ,'viz/model.png'), show_shapes = True, show_layer_names = True)
plot_model(self.critic, to_file=os.path.join(run_folder ,'viz/critic.png'), show_shapes = True, show_layer_names = True)
plot_model(self.generator, to_file=os.path.join(run_folder ,'viz/generator.png'), show_shapes = True, show_layer_names = True)
def save(self, folder):
with open(os.path.join(folder, 'params.pkl'), 'wb') as f:
pickle.dump([
self.input_dim
, self.critic_conv_filters
, self.critic_conv_kernel_size
, self.critic_conv_strides
, self.critic_batch_norm_momentum
, self.critic_activation
, self.critic_dropout_rate
, self.critic_learning_rate
, self.generator_initial_dense_layer_size
, self.generator_upsample
, self.generator_conv_filters
, self.generator_conv_kernel_size
, self.generator_conv_strides
, self.generator_batch_norm_momentum
, self.generator_activation
, self.generator_dropout_rate
, self.generator_learning_rate
, self.optimiser
, self.grad_weight
, self.z_dim
, self.batch_size
], f)
self.plot_model(folder)
def save_model(self, run_folder):
self.model.save(os.path.join(run_folder, 'model.h5'))
self.critic.save(os.path.join(run_folder, 'critic.h5'))
self.generator.save(os.path.join(run_folder, 'generator.h5'))
pickle.dump(self, open( os.path.join(run_folder, "obj.pkl"), "wb" ))
def load_weights(self, filepath):
self.model.load_weights(filepath)
| true
| true
|
1c4839afde50eb8dd507b972de44d105bb02aea1
| 1,077
|
py
|
Python
|
tests/application/cms/test_filters.py
|
AlexKouzy/ethnicity-facts-and-figures-publisher
|
18ab2495a8633f585e18e607c7f75daa564a053d
|
[
"MIT"
] | 1
|
2021-10-06T13:48:36.000Z
|
2021-10-06T13:48:36.000Z
|
tests/application/cms/test_filters.py
|
AlexKouzy/ethnicity-facts-and-figures-publisher
|
18ab2495a8633f585e18e607c7f75daa564a053d
|
[
"MIT"
] | 116
|
2018-11-02T17:20:47.000Z
|
2022-02-09T11:06:22.000Z
|
tests/application/cms/test_filters.py
|
racedisparityaudit/rd_cms
|
a12f0e3f5461cc41eed0077ed02e11efafc5dd76
|
[
"MIT"
] | 2
|
2018-11-09T16:47:35.000Z
|
2020-04-09T13:06:48.000Z
|
import pytest
from application.cms.filters import index_of_last_initial_zero, yesno
class TestYesNo:
@pytest.mark.parametrize(
"input_value, expected_output",
((True, "yes"), (False, "no"), (1, 1), (0, 0), ("true", "true"), ("false", "false"), ("abc", "abc")),
)
def test_yesno_converts_boolean_true_and_false_only(self, input_value, expected_output):
assert yesno(input_value) == expected_output
class TestIndexOfLastInitialZero:
def test_when_only_one_zero(self):
assert index_of_last_initial_zero([0, 10, 20]) == 0
def test_when_many_zeros(self):
assert index_of_last_initial_zero([0, 0, 0, 0, 1, 2]) == 3
def test_when_later_zeros_are_present(self):
assert index_of_last_initial_zero([0, 0, 1, 2, 1, 0]) == 1
def test_when_no_zeros_are_present(self):
with pytest.raises(ValueError):
index_of_last_initial_zero([1, 2, 3, 4])
def test_when_array_contains_strings(self):
with pytest.raises(ValueError):
index_of_last_initial_zero(["0", "1", "2"])
| 33.65625
| 109
| 0.673166
|
import pytest
from application.cms.filters import index_of_last_initial_zero, yesno
class TestYesNo:
@pytest.mark.parametrize(
"input_value, expected_output",
((True, "yes"), (False, "no"), (1, 1), (0, 0), ("true", "true"), ("false", "false"), ("abc", "abc")),
)
def test_yesno_converts_boolean_true_and_false_only(self, input_value, expected_output):
assert yesno(input_value) == expected_output
class TestIndexOfLastInitialZero:
def test_when_only_one_zero(self):
assert index_of_last_initial_zero([0, 10, 20]) == 0
def test_when_many_zeros(self):
assert index_of_last_initial_zero([0, 0, 0, 0, 1, 2]) == 3
def test_when_later_zeros_are_present(self):
assert index_of_last_initial_zero([0, 0, 1, 2, 1, 0]) == 1
def test_when_no_zeros_are_present(self):
with pytest.raises(ValueError):
index_of_last_initial_zero([1, 2, 3, 4])
def test_when_array_contains_strings(self):
with pytest.raises(ValueError):
index_of_last_initial_zero(["0", "1", "2"])
| true
| true
|
1c483aecbbbdbbb994f33b24f66067faffd38da9
| 17,014
|
py
|
Python
|
modules/templates/CCC/menus.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 205
|
2015-01-20T08:26:09.000Z
|
2022-03-27T19:59:33.000Z
|
modules/templates/CCC/menus.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 249
|
2015-02-10T09:56:35.000Z
|
2022-03-23T19:54:36.000Z
|
modules/templates/CCC/menus.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 231
|
2015-02-10T09:33:17.000Z
|
2022-02-18T19:56:05.000Z
|
# -*- coding: utf-8 -*-
from gluon import current, URL
#from s3 import IS_ISO639_2_LANGUAGE_CODE
from s3layouts import M, MM
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
)
# Additional menus
current.menu.personal = cls.menu_personal()
#current.menu.lang = cls.menu_lang()
current.menu.about = cls.menu_about()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
auth = current.auth
if not auth.is_logged_in():
menu = [MM("Volunteer Your Time", c="default", f="index", args="volunteer"),
#MM("Donate Items", c="default", f="index", args="donate"),
]
return menu
has_role = auth.s3_has_role
if has_role("ADMIN"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("All Documents", c="doc", f="document", m="datalist"),
MM("Affected People", c="br", f="person")(
MM("Import", m="import"),
),
MM("Donors", c="pr", f="person", vars={"donors": 1})(
MM("Donations", c="supply", f="person_item"),
MM("Edit General Information", c="cms", f="post", vars={"~.name": "Donor"}, m="update"),
),
MM("Organisations", c="org", f="organisation", m="summary")(
MM("Import", m="import"),
#MM("Message", c="org", f="organisation", args="message"),
),
MM("Volunteers", c="hrm", f="human_resource")(
MM("Reserves", c="pr", f="person", vars={"reserves": 1}),
MM("Reserve Groups", c="pr", f="group"),
MM("Inactives", c="pr", f="person", vars={"inactive": 1}),
),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Messages", c="project", f="task"),
]
elif has_role("ORG_ADMIN"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisation Documents", c="doc", f="document", m="datalist"),
MM("Donors", c="pr", f="person", vars={"donors": 1})(
MM("Donations", c="supply", f="person_item"),
),
MM("Organisations", c="org", f="organisation", m="summary")(
#MM("Message", c="org", f="organisation", args="message"),
),
MM("Volunteers", c="hrm", f="human_resource")(
MM("Reserves", c="pr", f="person", vars={"reserves": 1}),
#MM("Reserve Groups", c="pr", f="group"),
),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Messages", c="project", f="task"),
]
elif has_role("AGENCY"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Documents", c="doc", f="document", m="datalist"),
MM("Affected People", c="br", f="person")(
MM("Import", c="br", f="person", m="import"),
),
MM("Donors", c="pr", f="person", vars={"donors": 1})(
MM("Donations", c="supply", f="person_item"),
),
MM("Organisations", c="org", f="organisation", m="summary")(
#MM("Message", c="org", f="organisation", args="message"),
),
MM("Volunteers", c="hrm", f="human_resource")(
MM("Reserves", c="pr", f="person", vars={"reserves": 1}),
MM("Reserve Groups", c="pr", f="group"),
),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Messages", c="project", f="task")(
MM("Contact Organisation Admins", c="project", f="task", m="create"),
),
]
elif has_role("VOLUNTEER"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisation Documents", c="doc", f="document", m="datalist"),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Contact Organisation Admins", c="project", f="task", m="create"),
]
elif has_role("GROUP_ADMIN"):
menu = [#MM("Volunteer Your Time", c="default", f="index", args="volunteer"),
#MM("Donate Items", c="default", f="index", args="donate"),
MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Group", c="pr", f="group", m="update"),
]
elif has_role("DONOR"):
menu = [#MM("Volunteer Your Time", c="default", f="index", args="volunteer"),
#MM("Donate Items", c="default", f="index", args="donate"),
MM("General Information", c="default", f="index", m="donor"),
MM("Messages", c="project", f="task"),
]
elif has_role("RESERVE"):
# Reserve Volunteer
menu = [#MM("Volunteer Your Time", c="default", f="index", args="volunteer"),
#MM("Donate Items", c="default", f="index", args="donate"),
MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisations", c="org", f="organisation", m="summary"),
MM("Events", c="hrm", f="training_event"), # They can only see ones they're invited to
MM("Opportunities", c="req", f="need"), # They can only see ones they're invited to
]
else:
# Inactive Volunteer
menu = [#MM("Volunteer Your Time", c="default", f="index", args="volunteer"),
#MM("Donate Items", c="default", f="index", args="donate"),
#MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisations", c="org", f="organisation", m="summary"),
#MM("Events", c="hrm", f="training_event"), # They can only see ones they're invited to
#MM("Opportunities", c="req", f="need"), # They can only see ones they're invited to
]
return menu
# -------------------------------------------------------------------------
#@classmethod
#def menu_lang(cls):
# """ Language Selector """
# languages = current.deployment_settings.get_L10n_languages()
# represent_local = IS_ISO639_2_LANGUAGE_CODE.represent_local
# menu_lang = ML("Language", right=True)
# for code in languages:
# # Show each language name in its own language
# lang_name = represent_local(code)
# menu_lang(ML(lang_name,
# translate = False,
# lang_code = code,
# lang_name = lang_name,
# )
# )
# return menu_lang
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Personal Menu """
auth = current.auth
#s3 = current.response.s3
#settings = current.deployment_settings
if not auth.is_logged_in():
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
#self_registration = settings.get_security_self_registration()
menu_personal = MP()(
#MP("Register", c="default", f="user",
# m = "register",
# check = self_registration,
# ),
MP("Login", c="default", f="user",
m = "login",
vars = {"_next": login_next},
),
)
#if settings.get_auth_password_retrieval():
# menu_personal(MP("Lost Password", c="default", f="user",
# m = "retrieve_password",
# ),
# )
else:
ADMIN = current.auth.get_system_roles().ADMIN
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: not s3_has_role(ADMIN) and \
s3_has_role("ORG_ADMIN")
menu_personal = MP()(
MP("Administration", c="admin", f="index",
restrict = ADMIN,
),
MP("Administration", c="admin", f="user",
check = is_org_admin,
),
MP("Profile", c="default", f="person"),
MP("Change Password", c="default", f="user",
m = "change_password",
),
MP("Logout", c="default", f="user",
m = "logout",
),
)
return menu_personal
# -------------------------------------------------------------------------
@classmethod
def menu_about(cls):
#ADMIN = current.auth.get_system_roles().ADMIN
menu_about = MA(c="default")(
MA("Help", f="help"),
MA("Contact Us", f="contact"),
#MA("Version", f="about", restrict = ADMIN),
)
return menu_about
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Controller Menus """
# -------------------------------------------------------------------------
def admin(self):
""" ADMIN menu """
if not current.auth.s3_has_role("ADMIN"):
# OrgAdmin: No Side-menu
return None
#settings_messaging = self.settings_messaging()
#settings = current.deployment_settings
#consent_tracking = lambda i: settings.get_auth_consent_tracking()
#is_data_repository = lambda i: settings.get_sync_data_repository()
#translate = settings.has_module("translate")
# NB: Do not specify a controller for the main menu to allow
# re-use of this menu by other controllers
return M()(
#M("Setup", c="setup", f="deployment")(
# #M("Create", m="create"),
# #M("Servers", f="server")(
# #),
# #M("Instances", f="instance")(
# #),
#),
#M("Settings", c="admin", f="setting")(
# settings_messaging,
#),
M("User Management", c="admin", f="user")(
M("Create User", m="create"),
M("List All Users"),
M("Import Users", m="import"),
M("List All Roles", f="role"),
#M("List All Organization Approvers & Whitelists", f="organisation"),
#M("Roles", f="group"),
#M("Membership", f="membership"),
),
#M("Consent Tracking", c="admin", link=False, check=consent_tracking)(
M("Consent Tracking", c="admin", link=False)(
M("Processing Types", f="processing_type"),
M("Consent Options", f="consent_option"),
),
#M("Goods / Services", c="supply", f="item")(),
#M("Qualifications", c="hrm", f="certificate")(),
M("Organizations", c="org", f="organisation")(
M("Types", f="organisation_type"),
M("Job Titles", c="hrm", f="job_title"),
),
#M("Time Slots", c="pr", f="slot")(),
#M("Volunteer Offers", c="hrm", f="skill")(),
#M("CMS", c="cms", f="post")(
#),
M("Database", c="appadmin", f="index")(
M("Raw Database access", c="appadmin", f="index")
),
M("Error Tickets", c="admin", f="errors"),
#M("Monitoring", c="setup", f="server")(
# M("Checks", f="monitor_check"),
# M("Servers", f="server"),
# M("Tasks", f="monitor_task"),
# M("Logs", f="monitor_run"),
#),
M("Scheduler", c="admin", f="task"),
#M("Synchronization", c="sync", f="index")(
# M("Settings", f="config", args=[1], m="update"),
# M("Repositories", f="repository"),
# M("Public Data Sets", f="dataset", check=is_data_repository),
# M("Log", f="log"),
#),
#M("Edit Application", a="admin", c="default", f="design",
#args=[request.application]),
#M("Translation", c="admin", f="translate", check=translate)(
# M("Select Modules for translation", c="admin", f="translate",
# m="create", vars=dict(opt="1")),
# M("Upload translated files", c="admin", f="translate",
# m="create", vars=dict(opt="2")),
# M("View Translation Percentage", c="admin", f="translate",
# m="create", vars=dict(opt="3")),
# M("Add strings manually", c="admin", f="translate",
# m="create", vars=dict(opt="4"))
#),
#M("View Test Result Reports", c="admin", f="result"),
#M("Portable App", c="admin", f="portable")
)
# -------------------------------------------------------------------------
@staticmethod
def br():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def cms():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def doc():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def hrm():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def org():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def pr():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def project():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def req():
""" No Side Menu """
return None
# -------------------------------------------------------------------------
@staticmethod
def supply():
""" No Side Menu """
return None
# END =========================================================================
| 43.514066
| 111
| 0.401846
|
from gluon import current, URL
from s3layouts import M, MM
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
@classmethod
def menu(cls):
main_menu = MM()(
cls.menu_modules(),
)
current.menu.personal = cls.menu_personal()
current.menu.about = cls.menu_about()
return main_menu
@classmethod
def menu_modules(cls):
auth = current.auth
if not auth.is_logged_in():
menu = [MM("Volunteer Your Time", c="default", f="index", args="volunteer"),
]
return menu
has_role = auth.s3_has_role
if has_role("ADMIN"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("All Documents", c="doc", f="document", m="datalist"),
MM("Affected People", c="br", f="person")(
MM("Import", m="import"),
),
MM("Donors", c="pr", f="person", vars={"donors": 1})(
MM("Donations", c="supply", f="person_item"),
MM("Edit General Information", c="cms", f="post", vars={"~.name": "Donor"}, m="update"),
),
MM("Organisations", c="org", f="organisation", m="summary")(
MM("Import", m="import"),
),
MM("Volunteers", c="hrm", f="human_resource")(
MM("Reserves", c="pr", f="person", vars={"reserves": 1}),
MM("Reserve Groups", c="pr", f="group"),
MM("Inactives", c="pr", f="person", vars={"inactive": 1}),
),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Messages", c="project", f="task"),
]
elif has_role("ORG_ADMIN"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisation Documents", c="doc", f="document", m="datalist"),
MM("Donors", c="pr", f="person", vars={"donors": 1})(
MM("Donations", c="supply", f="person_item"),
),
MM("Organisations", c="org", f="organisation", m="summary")(
),
MM("Volunteers", c="hrm", f="human_resource")(
MM("Reserves", c="pr", f="person", vars={"reserves": 1}),
),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Messages", c="project", f="task"),
]
elif has_role("AGENCY"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Documents", c="doc", f="document", m="datalist"),
MM("Affected People", c="br", f="person")(
MM("Import", c="br", f="person", m="import"),
),
MM("Donors", c="pr", f="person", vars={"donors": 1})(
MM("Donations", c="supply", f="person_item"),
),
MM("Organisations", c="org", f="organisation", m="summary")(
),
MM("Volunteers", c="hrm", f="human_resource")(
MM("Reserves", c="pr", f="person", vars={"reserves": 1}),
MM("Reserve Groups", c="pr", f="group"),
),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Messages", c="project", f="task")(
MM("Contact Organisation Admins", c="project", f="task", m="create"),
),
]
elif has_role("VOLUNTEER"):
menu = [MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisation Documents", c="doc", f="document", m="datalist"),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"),
MM("Contact Organisation Admins", c="project", f="task", m="create"),
]
elif has_role("GROUP_ADMIN"):
menu = [
MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Group", c="pr", f="group", m="update"),
]
elif has_role("DONOR"):
menu = [
MM("General Information", c="default", f="index", m="donor"),
MM("Messages", c="project", f="task"),
]
elif has_role("RESERVE"):
menu = [
MM("General Information and Advice", c="cms", f="post", m="datalist"),
MM("Organisations", c="org", f="organisation", m="summary"),
MM("Events", c="hrm", f="training_event"),
MM("Opportunities", c="req", f="need"), # They can only see ones they're invited to
]
else:
menu = [
MM("Organisations", c="org", f="organisation", m="summary"),
="req", f="need"), # They can only see ones they're invited to
]
return menu
@classmethod
def menu_personal(cls):
auth = current.auth
if not auth.is_logged_in():
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
menu_personal = MP()(
MP("Login", c="default", f="user",
m = "login",
vars = {"_next": login_next},
),
)
else:
ADMIN = current.auth.get_system_roles().ADMIN
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: not s3_has_role(ADMIN) and \
s3_has_role("ORG_ADMIN")
menu_personal = MP()(
MP("Administration", c="admin", f="index",
restrict = ADMIN,
),
MP("Administration", c="admin", f="user",
check = is_org_admin,
),
MP("Profile", c="default", f="person"),
MP("Change Password", c="default", f="user",
m = "change_password",
),
MP("Logout", c="default", f="user",
m = "logout",
),
)
return menu_personal
@classmethod
def menu_about(cls):
menu_about = MA(c="default")(
MA("Help", f="help"),
MA("Contact Us", f="contact"),
)
return menu_about
class S3OptionsMenu(default.S3OptionsMenu):
def admin(self):
if not current.auth.s3_has_role("ADMIN"):
return None
return M()(
M("User Management", c="admin", f="user")(
M("Create User", m="create"),
M("List All Users"),
M("Import Users", m="import"),
M("List All Roles", f="role"),
),
M("Consent Tracking", c="admin", link=False)(
M("Processing Types", f="processing_type"),
M("Consent Options", f="consent_option"),
),
M("Organizations", c="org", f="organisation")(
M("Types", f="organisation_type"),
M("Job Titles", c="hrm", f="job_title"),
),
M("Database", c="appadmin", f="index")(
M("Raw Database access", c="appadmin", f="index")
),
M("Error Tickets", c="admin", f="errors"),
M("Scheduler", c="admin", f="task"),
)
@staticmethod
def br():
return None
@staticmethod
def cms():
return None
@staticmethod
def doc():
return None
@staticmethod
def hrm():
return None
@staticmethod
def org():
return None
@staticmethod
def pr():
return None
@staticmethod
def project():
return None
@staticmethod
def req():
return None
@staticmethod
def supply():
return None
| true
| true
|
1c483ccf406e95ea8d666dbe860d047dfb31581a
| 3,024
|
py
|
Python
|
flask/lib/python3.6/site-packages/stem/interpreter/autocomplete.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | null | null | null |
flask/lib/python3.6/site-packages/stem/interpreter/autocomplete.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | 3
|
2019-07-29T09:47:34.000Z
|
2019-07-29T09:47:35.000Z
|
flask/lib/python3.6/site-packages/stem/interpreter/autocomplete.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright 2014-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Tab completion for our interpreter prompt.
"""
from stem.interpreter import uses_settings
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
@uses_settings
def _get_commands(controller, config):
"""
Provides commands recognized by tor.
"""
commands = config.get('autocomplete', [])
if controller is None:
return commands
# GETINFO commands. Lines are of the form '[option] -- [description]'. This
# strips '*' from options that accept values.
results = controller.get_info('info/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0].rstrip('*')
commands.append('GETINFO %s' % option)
else:
commands.append('GETINFO ')
# GETCONF, SETCONF, and RESETCONF commands. Lines are of the form
# '[option] [type]'.
results = controller.get_info('config/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0]
commands.append('GETCONF %s' % option)
commands.append('SETCONF %s' % option)
commands.append('RESETCONF %s' % option)
else:
commands += ['GETCONF ', 'SETCONF ', 'RESETCONF ']
# SETEVENT, USEFEATURE, and SIGNAL commands. For each of these the GETINFO
# results are simply a space separated lists of the values they can have.
options = (
('SETEVENTS ', 'events/names'),
('USEFEATURE ', 'features/names'),
('SIGNAL ', 'signal/names'),
)
for prefix, getinfo_cmd in options:
results = controller.get_info(getinfo_cmd, None)
if results:
commands += [prefix + value for value in results.split()]
else:
commands.append(prefix)
# Adds /help commands.
usage_info = config.get('help.usage', {})
for cmd in usage_info.keys():
commands.append('/help ' + cmd)
return commands
class Autocompleter(object):
def __init__(self, controller):
self._commands = _get_commands(controller)
@lru_cache()
def matches(self, text):
"""
Provides autocompletion matches for the given text.
:param str text: text to check for autocompletion matches with
:returns: **list** with possible matches
"""
lowercase_text = text.lower()
return [cmd for cmd in self._commands if cmd.lower().startswith(lowercase_text)]
def complete(self, text, state):
"""
Provides case insensetive autocompletion options, acting as a functor for
the readlines set_completer function.
:param str text: text to check for autocompletion matches with
:param int state: index of result to be provided, readline fetches matches
until this function provides None
:returns: **str** with the autocompletion match, **None** if eithe none
exists or state is higher than our number of matches
"""
try:
return self.matches(text)[state]
except IndexError:
return None
| 26.068966
| 84
| 0.680886
|
from stem.interpreter import uses_settings
try:
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
@uses_settings
def _get_commands(controller, config):
commands = config.get('autocomplete', [])
if controller is None:
return commands
results = controller.get_info('info/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0].rstrip('*')
commands.append('GETINFO %s' % option)
else:
commands.append('GETINFO ')
results = controller.get_info('config/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0]
commands.append('GETCONF %s' % option)
commands.append('SETCONF %s' % option)
commands.append('RESETCONF %s' % option)
else:
commands += ['GETCONF ', 'SETCONF ', 'RESETCONF ']
options = (
('SETEVENTS ', 'events/names'),
('USEFEATURE ', 'features/names'),
('SIGNAL ', 'signal/names'),
)
for prefix, getinfo_cmd in options:
results = controller.get_info(getinfo_cmd, None)
if results:
commands += [prefix + value for value in results.split()]
else:
commands.append(prefix)
usage_info = config.get('help.usage', {})
for cmd in usage_info.keys():
commands.append('/help ' + cmd)
return commands
class Autocompleter(object):
def __init__(self, controller):
self._commands = _get_commands(controller)
@lru_cache()
def matches(self, text):
lowercase_text = text.lower()
return [cmd for cmd in self._commands if cmd.lower().startswith(lowercase_text)]
def complete(self, text, state):
try:
return self.matches(text)[state]
except IndexError:
return None
| true
| true
|
1c483ce4f303e0026de5ff70630090340b35fd96
| 836
|
py
|
Python
|
backend/tasks.py
|
ioxio-nexus/mycompany-consent-demo
|
aefa69375c14dfb345e81aad203db223cec6afe8
|
[
"BSD-3-Clause"
] | null | null | null |
backend/tasks.py
|
ioxio-nexus/mycompany-consent-demo
|
aefa69375c14dfb345e81aad203db223cec6afe8
|
[
"BSD-3-Clause"
] | null | null | null |
backend/tasks.py
|
ioxio-nexus/mycompany-consent-demo
|
aefa69375c14dfb345e81aad203db223cec6afe8
|
[
"BSD-3-Clause"
] | null | null | null |
from os import environ
import uvicorn
from invoke import task
from uvicorn.supervisors import ChangeReload
DEV_ENV = {"FIRESTORE_EMULATOR_HOST": "127.0.0.1:8686"}
@task
def dev(ctx):
environ.update(DEV_ENV)
port = environ.get("PORT", 8000)
host = "0.0.0.0" # nosec, it's not a mistake
config = uvicorn.Config(app="main:app", host=host, port=int(port), debug=True)
server = uvicorn.Server(config)
from app.log import logger # noqa, must be imported before running supervisor
supervisor = ChangeReload(config, target=server.run, sockets=[config.bind_socket()])
supervisor.run()
@task
def serve(ctx):
server = uvicorn.Server(
uvicorn.Config(
app="main:app",
uds="/run/nginx/uvicorn.sock",
forwarded_allow_ips="*",
),
)
server.run()
| 23.885714
| 88
| 0.65311
|
from os import environ
import uvicorn
from invoke import task
from uvicorn.supervisors import ChangeReload
DEV_ENV = {"FIRESTORE_EMULATOR_HOST": "127.0.0.1:8686"}
@task
def dev(ctx):
environ.update(DEV_ENV)
port = environ.get("PORT", 8000)
host = "0.0.0.0"
config = uvicorn.Config(app="main:app", host=host, port=int(port), debug=True)
server = uvicorn.Server(config)
from app.log import logger # noqa, must be imported before running supervisor
supervisor = ChangeReload(config, target=server.run, sockets=[config.bind_socket()])
supervisor.run()
@task
def serve(ctx):
server = uvicorn.Server(
uvicorn.Config(
app="main:app",
uds="/run/nginx/uvicorn.sock",
forwarded_allow_ips="*",
),
)
server.run()
| true
| true
|
1c483d9e661792fda761dec9b82a4f18dbf7a9aa
| 1,126
|
py
|
Python
|
tools/trainercard/trainercard.py
|
stoiandan/OpenPokemonRed
|
3ce2483d4620255c7fe182012f2821be3121c375
|
[
"MIT"
] | 204
|
2020-11-04T07:32:28.000Z
|
2022-01-16T20:39:22.000Z
|
tools/trainercard/trainercard.py
|
stoiandan/OpenPokemonRed
|
3ce2483d4620255c7fe182012f2821be3121c375
|
[
"MIT"
] | 11
|
2020-10-26T07:53:24.000Z
|
2021-01-07T19:03:09.000Z
|
tools/trainercard/trainercard.py
|
stoiandan/OpenPokemonRed
|
3ce2483d4620255c7fe182012f2821be3121c375
|
[
"MIT"
] | 14
|
2020-11-21T22:02:28.000Z
|
2022-02-15T15:26:55.000Z
|
import cv2
import os
import shutil
if os.path.exists("result"):
shutil.rmtree("result")
os.mkdir("result")
# https://www.spriters-resource.com/fullview/8733/
img = cv2.imread("trainercard.png")
leader = [
"brock",
"misty",
"lt_surge",
"erika",
"koga",
"sabrina",
"blaine",
"giovanni"
]
width = 16
height = 16
face = [
[31, 103],
[31+32, 103],
[31+32+32, 103],
[31+32+32+32, 103],
[31, 127],
[31+32, 127],
[31+32+32, 127],
[31+32+32+32, 127],
]
badge = [
[31, 168],
[31+32, 168],
[31+32+32, 168],
[31+32+32+32, 168],
[31, 192],
[31+32, 192],
[31+32+32, 192],
[31+32+32+32, 192],
]
# face
for i in range(8):
name = leader[i]
x0 = face[i][0]
y0 = face[i][1]
x1 = x0 + width
y1 = y0 + height
tile = img[y0:y1, x0:x1]
cv2.imwrite("./result/{}_face.png".format(name), tile)
# badge
for i in range(8):
name = leader[i]
x0 = badge[i][0]
y0 = badge[i][1]
x1 = x0 + width
y1 = y0 + height
tile = img[y0:y1, x0:x1]
cv2.imwrite("./result/{}_badge.png".format(name), tile)
| 16.318841
| 59
| 0.519538
|
import cv2
import os
import shutil
if os.path.exists("result"):
shutil.rmtree("result")
os.mkdir("result")
img = cv2.imread("trainercard.png")
leader = [
"brock",
"misty",
"lt_surge",
"erika",
"koga",
"sabrina",
"blaine",
"giovanni"
]
width = 16
height = 16
face = [
[31, 103],
[31+32, 103],
[31+32+32, 103],
[31+32+32+32, 103],
[31, 127],
[31+32, 127],
[31+32+32, 127],
[31+32+32+32, 127],
]
badge = [
[31, 168],
[31+32, 168],
[31+32+32, 168],
[31+32+32+32, 168],
[31, 192],
[31+32, 192],
[31+32+32, 192],
[31+32+32+32, 192],
]
for i in range(8):
name = leader[i]
x0 = face[i][0]
y0 = face[i][1]
x1 = x0 + width
y1 = y0 + height
tile = img[y0:y1, x0:x1]
cv2.imwrite("./result/{}_face.png".format(name), tile)
for i in range(8):
name = leader[i]
x0 = badge[i][0]
y0 = badge[i][1]
x1 = x0 + width
y1 = y0 + height
tile = img[y0:y1, x0:x1]
cv2.imwrite("./result/{}_badge.png".format(name), tile)
| true
| true
|
1c483dcfb05352b30e44f9812d4f220140dfff77
| 23,987
|
py
|
Python
|
AgentRun.py
|
zhangtjtongxue/DL_RL_Zoo
|
fe8393a941a8c22205b9dc5534f399cf7860f409
|
[
"Apache-2.0"
] | 1
|
2021-06-08T08:20:31.000Z
|
2021-06-08T08:20:31.000Z
|
AgentRun.py
|
zhangtjtongxue/DL_RL_Zoo
|
fe8393a941a8c22205b9dc5534f399cf7860f409
|
[
"Apache-2.0"
] | null | null | null |
AgentRun.py
|
zhangtjtongxue/DL_RL_Zoo
|
fe8393a941a8c22205b9dc5534f399cf7860f409
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import gym
import torch
import numpy as np
from AgentZoo import Recorder
from AgentZoo import BufferArray, initial_exploration
"""
2019-07-01 Zen4Jia1Hao2, GitHub: YonV1943 DL_RL_Zoo RL
2019-11-11 Issay-0.0 [Essay Consciousness]
2020-02-02 Issay-0.1 Deep Learning Techniques (spectral norm, DenseNet, etc.)
2020-04-04 Issay-0.1 [An Essay of Consciousness by YonV1943], IntelAC
2020-04-20 Issay-0.2 SN_AC, IntelAC_UnitedLoss
2020-04-22 Issay-0.2 [Essay, LongDear's Cerebellum (Little Brain)]
2020-06-06 Issay-0.3 check PPO, SAC. Plan to add discrete SAC, EBM(soft-q-learning)
I consider that Reinforcement Learning Algorithms before 2020 have not consciousness
They feel more like a Cerebellum (Little Brain) for Machines.
In my opinion, before 2020, the policy gradient algorithm agent didn't learn s policy.
Actually, they "learn game feel" or "get a soft touch". In Chinese "shou3 gan3".
Learn more about policy gradient algorithms in:
https://lilianweng.github.io/lil-log/2018/04/08/policy-gradient-algorithms.html
2020-04-28 Add Discrete Env CartPole, Pendulum
"""
class Arguments: # default working setting and hyper-parameter
def __init__(self, class_agent):
self.class_agent = class_agent
self.net_dim = 2 ** 7 # the network width
self.max_step = 2 ** 10 # max steps in one epoch
self.max_memo = 2 ** 17 # memories capacity (memories: replay buffer)
self.max_epoch = 2 ** 10 # max times of train_epoch
self.batch_size = 2 ** 7 # num of transitions sampled from replay buffer.
self.repeat_times = 1 # Two-time Update Rule (TTUR)
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.gamma = 0.99 # discount factor of future rewards
self.gpu_id = 0
self.random_seed = 19430
self.is_remove = True # remove the pre-training data? (True, False, None:ask me)
self.env_name = "LunarLanderContinuous-v2"
self.cwd = 'AC_Methods_LL' # current work directory
self.show_gap = 2 ** 7 # show the Reward and Loss of actor and critic per show_gap seconds
def init_for_training(self): # remove cwd, choose GPU, set random seed, set CPU threads
print('GPU: {} | CWD: {}'.format(self.gpu_id, self.cwd))
whether_remove_history(self.cwd, self.is_remove)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
# env.seed() # env has random seed too.
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_default_dtype(torch.float32)
torch.set_num_threads(8)
def train_agent__off_policy(
class_agent, net_dim, batch_size, repeat_times, gamma, reward_scale, cwd,
env_name, max_step, max_memo, max_epoch, **_kwargs): # 2020-06-01
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=False)
assert not is_discrete
'''init'''
agent = class_agent(state_dim, action_dim, net_dim) # training agent
agent.state = env.reset()
buffer = BufferArray(max_memo, state_dim, action_dim) # experiment replay buffer
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs) # unnecessary
'''loop'''
with torch.no_grad(): # update replay buffer
# rewards, steps = agent.update_buffer(env, buffer, max_step, max_action, reward_scale, gamma)
rewards, steps = initial_exploration(env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
recorder.show_reward(rewards, steps, loss_a=0, loss_c=0)
try:
for epoch in range(max_epoch):
# update replay buffer by interact with environment
with torch.no_grad(): # for saving the GPU buffer
rewards, steps = agent.update_buffer(
env, buffer, max_step, max_action, reward_scale, gamma)
# update network parameters by random sampling buffer for gradient descent
buffer.init_before_sample()
loss_a, loss_c = agent.update_parameters(
buffer, max_step, batch_size, repeat_times)
# show/check the reward, save the max reward actor
with torch.no_grad(): # for saving the GPU buffer
# NOTICE! Recorder saves the agent with max reward automatically.
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("| raise KeyboardInterrupt and break training loop")
# except AssertionError: # for BipedWalker BUG 2020-03-03
# print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
train_time = recorder.print_and_save_npy(env_name, cwd)
if is_solved:
agent.save_or_load_model(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
def train_agent__on_policy(
class_agent, net_dim, batch_size, repeat_times, gamma, reward_scale, cwd,
env_name, max_step, max_memo, max_epoch, **_kwargs): # 2020-0430
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
agent = class_agent(state_dim, action_dim, net_dim)
agent.save_or_load_model(cwd, is_save=False)
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
try:
for epoch in range(max_epoch):
with torch.no_grad(): # just the GPU memory
rewards, steps, buffer = agent.update_buffer_online(
env, max_step, max_memo, max_action, reward_scale, gamma)
loss_a, loss_c = agent.update_parameters_online(
buffer, batch_size, repeat_times)
with torch.no_grad(): # just the GPU memory
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("raise KeyboardInterrupt while training.")
except AssertionError: # for BipedWalker BUG 2020-03-03
print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
return False
train_time = recorder.print_and_save_npy(env_name, cwd)
draw_plot_with_npy(cwd, train_time)
return True
def train_agent_discrete(
class_agent, net_dim, batch_size, repeat_times, gamma, reward_scale, cwd,
env_name, max_step, max_memo, max_epoch, **_kwargs): # 2020-05-20
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
assert is_discrete
'''init'''
agent = class_agent(state_dim, action_dim, net_dim) # training agent
agent.state = env.reset()
buffer = BufferArray(max_memo, state_dim, action_dim=1) # experiment replay buffer
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
'''loop'''
with torch.no_grad(): # update replay buffer
rewards, steps = initial_exploration(
env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
recorder.show_reward(rewards, steps, loss_a=0, loss_c=0)
try:
for epoch in range(max_epoch):
# update replay buffer by interact with environment
with torch.no_grad(): # for saving the GPU buffer
rewards, steps = agent.update_buffer(
env, buffer, max_step, max_action, reward_scale, gamma)
# update network parameters by random sampling buffer for gradient descent
buffer.init_before_sample()
loss_a, loss_c = agent.update_parameters(buffer, max_step, batch_size, repeat_times)
# show/check the reward, save the max reward actor
with torch.no_grad(): # for saving the GPU buffer
# NOTICE! Recorder saves the agent with max reward automatically.
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("| raise KeyboardInterrupt and break training loop")
# except AssertionError: # for BipedWalker BUG 2020-03-03
# print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
train_time = recorder.print_and_save_npy(env_name, cwd)
if is_solved:
agent.save_or_load_model(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
"""utils"""
def get_env_info(env, is_print): # 2020-06-06
state_dim = env.observation_space.shape[0]
try:
is_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if is_discrete: # discrete
action_dim = env.action_space.n
action_max = int(1)
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
else:
raise AttributeError
except AttributeError:
print("| Could you assign these value manually? \n"
"| I need: state_dim, action_dim, action_max, target_reward, is_discrete")
raise AttributeError
target_reward = env.spec.reward_threshold
if target_reward is None:
print("| Could you assign these value manually? \n"
"| I need: target_reward")
raise ValueError
if is_print:
print("| env_name: {}, action space: {}".format(repr(env)[10:-1], 'Discrete' if is_discrete else 'Continuous'))
print("| state_dim: {}, action_dim: {}, action_max: {}, target_reward: {}".format(
state_dim, action_dim, action_max, target_reward))
return state_dim, action_dim, action_max, target_reward, is_discrete
def draw_plot_with_npy(mod_dir, train_time): # 2020-04-40
record_epoch = np.load('%s/record_epoch.npy' % mod_dir) # , allow_pickle=True)
# record_epoch.append((epoch_reward, actor_loss, critic_loss, iter_num))
record_eval = np.load('%s/record_eval.npy' % mod_dir) # , allow_pickle=True)
# record_eval.append((epoch, eval_reward, eval_std))
# print(';record_epoch:', record_epoch.shape)
# print(';record_eval:', record_eval.shape)
# print(record_epoch)
# # print(record_eval)
# exit()
if len(record_eval.shape) == 1:
record_eval = np.array([[0., 0., 0.]])
train_time = int(train_time)
iter_num = int(sum(record_epoch[:, -1]))
epoch_num = int(record_eval[-1, 0])
save_title = "plot_{:04}E_{}T_{}s".format(epoch_num, iter_num, train_time)
save_path = "{}/{}.png".format(mod_dir, save_title)
"""plot"""
import matplotlib as mpl # draw figure in Terminal
mpl.use('Agg')
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
fig, axs = plt.subplots(2)
plt.title(save_title, y=2.3)
ax13 = axs[0].twinx()
ax13.fill_between(np.arange(record_epoch.shape[0]), record_epoch[:, 3],
facecolor='grey', alpha=0.1, )
ax11 = axs[0]
ax11_color = 'royalblue'
ax11_label = 'Epo R'
ax11.set_ylabel(ylabel=ax11_label, color=ax11_color)
ax11.tick_params(axis='y', labelcolor=ax11_color)
ax11.plot(record_epoch[:, 0], label=ax11_label, color=ax11_color)
ax12 = axs[0]
ax12_color = 'lightcoral'
ax12_label = 'Epoch R'
ax12.set_ylabel(ylabel=ax12_label, color=ax12_color)
ax12.tick_params(axis='y', labelcolor=ax12_color)
xs = record_eval[:, 0]
r_avg = record_eval[:, 1]
r_std = record_eval[:, 2]
ax12.plot(xs, r_avg, label=ax12_label, color=ax12_color)
ax12.fill_between(xs, r_avg - r_std, r_avg + r_std, facecolor=ax12_color, alpha=0.3, )
ax21 = axs[1]
ax21_color = 'darkcyan'
ax21_label = '- loss A'
ax21.set_ylabel(ax21_label, color=ax21_color)
ax21.plot(-record_epoch[:, 1], label=ax21_label, color=ax21_color) # negative loss A
ax21.tick_params(axis='y', labelcolor=ax21_color)
ax22 = axs[1].twinx()
ax22_color = 'darkcyan'
ax22_label = 'loss C'
ax22.set_ylabel(ax22_label, color=ax22_color)
ax22.fill_between(np.arange(record_epoch.shape[0]), record_epoch[:, 2], facecolor=ax22_color, alpha=0.2, )
ax22.tick_params(axis='y', labelcolor=ax22_color)
plt.savefig(save_path)
# plt.show()
# plt.ion()
# plt.pause(4)
def whether_remove_history(cwd, is_remove=None): # 2020-03-04
import shutil
if is_remove is None:
is_remove = bool(input("PRESS 'y' to REMOVE: {}? ".format(cwd)) == 'y')
if is_remove:
shutil.rmtree(cwd, ignore_errors=True)
print("| Remove")
os.makedirs(cwd, exist_ok=True)
# shutil.copy(sys.argv[-1], "{}/AgentRun-py-backup".format(cwd)) # copy *.py to cwd
# shutil.copy('AgentZoo.py', "{}/AgentZoo-py-backup".format(cwd)) # copy *.py to cwd
# shutil.copy('AgentNet.py', "{}/AgentNetwork-py-backup".format(cwd)) # copy *.py to cwd
del shutil
"""demo"""
def run__demo(gpu_id, cwd='AC_BasicAC'):
from AgentZoo import AgentSNAC as AgentClass
args = Arguments(AgentClass)
args.gpu_id = gpu_id
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
args.env_name = "BipedalWalker-v3"
args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
def run__zoo(gpu_id, cwd='AC_Zoo'):
import AgentZoo as Zoo
class_agent = Zoo.AgentDeepSAC
assert class_agent in {
Zoo.AgentDDPG, Zoo.AgentTD3, Zoo.ActorSAC, Zoo.AgentDeepSAC,
Zoo.AgentBasicAC, Zoo.AgentSNAC, Zoo.AgentInterAC, Zoo.AgentInterSAC,
} # you can't run PPO here. goto run__ppo(). PPO need its hyper-parameters
args = Arguments(class_agent)
args.gpu_id = gpu_id
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
args.env_name = "BipedalWalker-v3"
args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
# args.env_name = "BipedalWalkerHardcore-v3"
# args.cwd = './{}/BWHC_{}'.format(cwd, gpu_id)
# args.net_dim = int(2 ** 8.5)
# args.max_memo = int(2 ** 20)
# args.batch_size = int(2 ** 9)
# args.max_epoch = 2 ** 14
# args.reward_scale = int(2 ** 6.5)
# args.is_remove = None
# args.init_for_training()
# while not train_agent(**vars(args)):
# args.random_seed += 42
# import pybullet_envs # for python-bullet-gym
# dir(pybullet_envs)
# args.env_name = "MinitaurBulletEnv-v0"
# args.cwd = './{}/Minitaur_{}'.format(cwd, args.gpu_id)
# args.max_epoch = 2 ** 13
# args.max_memo = 2 ** 20
# args.net_dim = 2 ** 9
# args.max_step = 2 ** 12
# args.batch_size = 2 ** 8
# args.reward_scale = 2 ** 3
# args.is_remove = True
# args.eva_size = 2 ** 5 # for Recorder
# args.show_gap = 2 ** 8 # for Recorder
# args.init_for_training()
# while not train_agent(**vars(args)):
# args.random_seed += 42
# import pybullet_envs # for python-bullet-gym
# dir(pybullet_envs)
# args.env_name = "AntBulletEnv-v0"
# args.cwd = './{}/Ant_{}'.format(cwd, args.gpu_id)
# args.max_epoch = 2 ** 13
# args.max_memo = 2 ** 20
# args.max_step = 2 ** 10
# args.net_dim = 2 ** 8
# args.batch_size = 2 ** 8
# args.reward_scale = 2 ** -3
# args.is_remove = True
# args.eva_size = 2 ** 5 # for Recorder
# args.show_gap = 2 ** 8 # for Recorder
# args.init_for_training()
# while not train_agent(**vars(args)):
# args.random_seed += 42
def run__ppo(gpu_id, cwd):
import AgentZoo as Zoo
class_agent = Zoo.AgentGAE
assert class_agent in {Zoo.AgentPPO, Zoo.AgentGAE}
args = Arguments(class_agent)
args.gpu_id = gpu_id
args.max_memo = 2 ** 12
args.batch_size = 2 ** 9
args.repeat_times = 2 ** 4
args.net_dim = 2 ** 8
args.gamma = 0.99
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
while not train_agent__on_policy(**vars(args)):
args.random_seed += 42
args.env_name = "BipedalWalker-v3"
args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
args.init_for_training()
while not train_agent__on_policy(**vars(args)):
args.random_seed += 42
def run__dqn(gpu_id, cwd='RL_DQN'):
from AgentZoo import AgentDQN
# from AgentZoo import AgentNoisyDQN
# from AgentZoo import AgentDoubleDQN
args = Arguments(AgentDQN)
args.gpu_id = gpu_id
args.show_gap = 2 ** 5
args.env_name = "CartPole-v0"
args.cwd = '{}/{}'.format(cwd, args.env_name)
args.init_for_training()
train_agent_discrete(**vars(args))
args.env_name = "LunarLander-v2"
args.cwd = '{}/{}'.format(cwd, args.env_name)
args.init_for_training()
train_agent_discrete(**vars(args))
def run__multi_process(target_func, gpu_tuple=(0, 1), cwd='RL_MP'):
os.makedirs(cwd, exist_ok=True) # all the files save in here
'''run in multiprocessing'''
import multiprocessing as mp
processes = [mp.Process(target=target_func, args=(gpu_id, cwd)) for gpu_id in gpu_tuple]
[process.start() for process in processes]
[process.join() for process in processes]
def process__buffer(q_aggr, qs_dist, args, **_kwargs):
max_memo = args.max_memo
env_name = args.env_name
max_step = args.max_step
batch_size = args.batch_size
repeat_times = 2
# reward_scale = args.reward_scale
# gamma = args.gamma
'''init'''
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=False)
buffer = BufferArray(max_memo, state_dim, action_dim) # experiment replay buffer
workers_num = len(qs_dist)
'''loop'''
is_training = True
while is_training:
for i in range(workers_num):
memo_array, is_solved = q_aggr.get()
buffer.extend_memo(memo_array)
if is_solved:
is_training = False
buffer.init_before_sample()
for i in range(max_step * repeat_times):
# batch_arrays = buffer.random_sample(batch_size, device=None) # faster but worse
for q_dist in qs_dist:
batch_arrays = buffer.random_sample(batch_size, device=None) # slower but better
q_dist.put(batch_arrays)
print('|| Exit: process__buffer')
def process__workers(gpu_id, root_cwd, q_aggr, q_dist, args, **_kwargs):
class_agent = args.class_agent
env_name = args.env_name
cwd = args.cwd
net_dim = args.net_dim
max_step = args.max_step
# max_memo = args.max_memo
max_epoch = args.max_epoch
batch_size = args.batch_size * 1.5
gamma = args.gamma
update_gap = args.update_gap
reward_scale = args.reward_scale
cwd = '{}/{}_{}'.format(root_cwd, cwd, gpu_id)
os.makedirs(cwd, exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
random_seed = 42 + gpu_id
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.set_default_dtype(torch.float32)
torch.set_num_threads(8)
env = gym.make(env_name)
is_solved = False
class BufferArrayMP(BufferArray):
def init_before_sample(self):
q_aggr.put((self.memories, is_solved))
# self.now_len = self.max_len if self.is_full else self.next_idx
def random_sample(self, _batch_size, device=None):
batch_arrays = q_dist.get()
'''convert array into torch.tensor'''
tensors = [torch.tensor(ary, device=device) for ary in batch_arrays]
return tensors
'''init'''
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
agent = class_agent(env, state_dim, action_dim, net_dim) # training agent
buffer = BufferArrayMP(max_step, state_dim, action_dim) # experiment replay buffer
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
'''loop'''
# with torch.no_grad(): # update replay buffer
# # rewards, steps = agent.update_buffer(
# # env, buffer, max_step, max_action, reward_scale, gamma)
# rewards, steps = initial_exploration(
# env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
# recorder.show_reward(rewards, steps, 0, 0)
try:
for epoch in range(max_epoch):
'''update replay buffer by interact with environment'''
with torch.no_grad(): # for saving the GPU buffer
rewards, steps = agent.update_buffer(env, buffer, max_step, max_action, reward_scale, gamma)
'''update network parameters by random sampling buffer for stochastic gradient descent'''
loss_a, loss_c = agent.update_parameters(buffer, max_step, batch_size, update_gap)
'''show/check the reward, save the max reward actor'''
with torch.no_grad(): # for saving the GPU buffer
'''NOTICE! Recorder saves the agent with max reward automatically. '''
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("raise KeyboardInterrupt while training.")
# except AssertionError: # for BipedWalker BUG 2020-03-03
# print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
# return False
train_time = recorder.print_and_save_npy(env_name, cwd)
# agent.save_or_load_model(cwd, is_save=True) # save max reward agent in Recorder
# buffer.save_or_load_memo(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
return True
def run__multi_workers(gpu_tuple=(0, 1), root_cwd='RL_MP'):
print('GPU: {} | CWD: {}'.format(gpu_tuple, root_cwd))
whether_remove_history(root_cwd, is_remove=True)
from AgentZoo import AgentSAC
args = Arguments(AgentSAC)
args.env_name = "BipedalWalker-v3"
# args.env_name = "LunarLanderContinuous-v2"
args.show_gap = 2 ** 8 # for Recorder
'''run in multiprocessing'''
import multiprocessing as mp
workers_num = len(gpu_tuple)
queue_aggr = mp.Queue(maxsize=workers_num) # queue of aggregation
queues_dist = [mp.Queue(maxsize=args.max_step) for _ in range(workers_num)] # queue of distribution
processes = [mp.Process(target=process__buffer, args=(queue_aggr, queues_dist, args))]
processes.extend([mp.Process(target=process__workers, args=(gpu_id, root_cwd, queue_aggr, queue_dist, args))
for gpu_id, queue_dist in zip(gpu_tuple, queues_dist)])
[process.start() for process in processes]
# [process.join() for process in processes]
[process.close() for process in processes]
if __name__ == '__main__':
# run__demo(gpu_id=0, cwd='AC_BasicAC')
run__zoo(gpu_id=0, cwd='AC_SAC')
# run__ppo(gpu_id=1, cwd='AC_PPO')
# run__multi_process(run__zoo, gpu_tuple=(0, 1, 2, 3), cwd='AC_ZooMP')
# run__multi_process(run__ppo, gpu_tuple=(2, 3), cwd='AC_PPO')
# run__multi_workers(gpu_tuple=(2, 3), root_cwd='AC_SAC_MP')
# '''Discrete action space'''
# run__dqn(gpu_id=sys.argv[-1][-4], cwd='RL_DQN')
# '''multi worker'''
# run__multi_workers(gpu_tuple=(2, 3), root_cwd='AC_SAC_MP')
print('Finish:', sys.argv[-1])
| 38.074603
| 119
| 0.6614
|
import os
import sys
import gym
import torch
import numpy as np
from AgentZoo import Recorder
from AgentZoo import BufferArray, initial_exploration
class Arguments:
def __init__(self, class_agent):
self.class_agent = class_agent
self.net_dim = 2 ** 7
self.max_step = 2 ** 10
self.max_memo = 2 ** 17
self.max_epoch = 2 ** 10
self.batch_size = 2 ** 7
self.repeat_times = 1
self.reward_scale = 2 ** 0
self.gamma = 0.99
self.gpu_id = 0
self.random_seed = 19430
self.is_remove = True
self.env_name = "LunarLanderContinuous-v2"
self.cwd = 'AC_Methods_LL'
self.show_gap = 2 ** 7
def init_for_training(self):
print('GPU: {} | CWD: {}'.format(self.gpu_id, self.cwd))
whether_remove_history(self.cwd, self.is_remove)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
lf.random_seed)
torch.manual_seed(self.random_seed)
torch.set_default_dtype(torch.float32)
torch.set_num_threads(8)
def train_agent__off_policy(
class_agent, net_dim, batch_size, repeat_times, gamma, reward_scale, cwd,
env_name, max_step, max_memo, max_epoch, **_kwargs):
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=False)
assert not is_discrete
agent = class_agent(state_dim, action_dim, net_dim)
agent.state = env.reset()
buffer = BufferArray(max_memo, state_dim, action_dim)
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
with torch.no_grad():
rewards, steps = initial_exploration(env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
recorder.show_reward(rewards, steps, loss_a=0, loss_c=0)
try:
for epoch in range(max_epoch):
with torch.no_grad():
rewards, steps = agent.update_buffer(
env, buffer, max_step, max_action, reward_scale, gamma)
buffer.init_before_sample()
loss_a, loss_c = agent.update_parameters(
buffer, max_step, batch_size, repeat_times)
with torch.no_grad():
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("| raise KeyboardInterrupt and break training loop")
.print_and_save_npy(env_name, cwd)
if is_solved:
agent.save_or_load_model(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
def train_agent__on_policy(
class_agent, net_dim, batch_size, repeat_times, gamma, reward_scale, cwd,
env_name, max_step, max_memo, max_epoch, **_kwargs):
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
agent = class_agent(state_dim, action_dim, net_dim)
agent.save_or_load_model(cwd, is_save=False)
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
try:
for epoch in range(max_epoch):
with torch.no_grad():
rewards, steps, buffer = agent.update_buffer_online(
env, max_step, max_memo, max_action, reward_scale, gamma)
loss_a, loss_c = agent.update_parameters_online(
buffer, batch_size, repeat_times)
with torch.no_grad():
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("raise KeyboardInterrupt while training.")
except AssertionError:
print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
return False
train_time = recorder.print_and_save_npy(env_name, cwd)
draw_plot_with_npy(cwd, train_time)
return True
def train_agent_discrete(
class_agent, net_dim, batch_size, repeat_times, gamma, reward_scale, cwd,
env_name, max_step, max_memo, max_epoch, **_kwargs):
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
assert is_discrete
agent = class_agent(state_dim, action_dim, net_dim)
agent.state = env.reset()
buffer = BufferArray(max_memo, state_dim, action_dim=1)
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
with torch.no_grad():
rewards, steps = initial_exploration(
env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
recorder.show_reward(rewards, steps, loss_a=0, loss_c=0)
try:
for epoch in range(max_epoch):
with torch.no_grad():
rewards, steps = agent.update_buffer(
env, buffer, max_step, max_action, reward_scale, gamma)
buffer.init_before_sample()
loss_a, loss_c = agent.update_parameters(buffer, max_step, batch_size, repeat_times)
with torch.no_grad():
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("| raise KeyboardInterrupt and break training loop")
.print_and_save_npy(env_name, cwd)
if is_solved:
agent.save_or_load_model(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
def get_env_info(env, is_print):
state_dim = env.observation_space.shape[0]
try:
is_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if is_discrete:
action_dim = env.action_space.n
action_max = int(1)
elif isinstance(env.action_space, gym.spaces.Box):
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
else:
raise AttributeError
except AttributeError:
print("| Could you assign these value manually? \n"
"| I need: state_dim, action_dim, action_max, target_reward, is_discrete")
raise AttributeError
target_reward = env.spec.reward_threshold
if target_reward is None:
print("| Could you assign these value manually? \n"
"| I need: target_reward")
raise ValueError
if is_print:
print("| env_name: {}, action space: {}".format(repr(env)[10:-1], 'Discrete' if is_discrete else 'Continuous'))
print("| state_dim: {}, action_dim: {}, action_max: {}, target_reward: {}".format(
state_dim, action_dim, action_max, target_reward))
return state_dim, action_dim, action_max, target_reward, is_discrete
def draw_plot_with_npy(mod_dir, train_time):
record_epoch = np.load('%s/record_epoch.npy' % mod_dir)
record_eval = np.load('%s/record_eval.npy' % mod_dir)
cord_eval.shape) == 1:
record_eval = np.array([[0., 0., 0.]])
train_time = int(train_time)
iter_num = int(sum(record_epoch[:, -1]))
epoch_num = int(record_eval[-1, 0])
save_title = "plot_{:04}E_{}T_{}s".format(epoch_num, iter_num, train_time)
save_path = "{}/{}.png".format(mod_dir, save_title)
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2)
plt.title(save_title, y=2.3)
ax13 = axs[0].twinx()
ax13.fill_between(np.arange(record_epoch.shape[0]), record_epoch[:, 3],
facecolor='grey', alpha=0.1, )
ax11 = axs[0]
ax11_color = 'royalblue'
ax11_label = 'Epo R'
ax11.set_ylabel(ylabel=ax11_label, color=ax11_color)
ax11.tick_params(axis='y', labelcolor=ax11_color)
ax11.plot(record_epoch[:, 0], label=ax11_label, color=ax11_color)
ax12 = axs[0]
ax12_color = 'lightcoral'
ax12_label = 'Epoch R'
ax12.set_ylabel(ylabel=ax12_label, color=ax12_color)
ax12.tick_params(axis='y', labelcolor=ax12_color)
xs = record_eval[:, 0]
r_avg = record_eval[:, 1]
r_std = record_eval[:, 2]
ax12.plot(xs, r_avg, label=ax12_label, color=ax12_color)
ax12.fill_between(xs, r_avg - r_std, r_avg + r_std, facecolor=ax12_color, alpha=0.3, )
ax21 = axs[1]
ax21_color = 'darkcyan'
ax21_label = '- loss A'
ax21.set_ylabel(ax21_label, color=ax21_color)
ax21.plot(-record_epoch[:, 1], label=ax21_label, color=ax21_color)
ax21.tick_params(axis='y', labelcolor=ax21_color)
ax22 = axs[1].twinx()
ax22_color = 'darkcyan'
ax22_label = 'loss C'
ax22.set_ylabel(ax22_label, color=ax22_color)
ax22.fill_between(np.arange(record_epoch.shape[0]), record_epoch[:, 2], facecolor=ax22_color, alpha=0.2, )
ax22.tick_params(axis='y', labelcolor=ax22_color)
plt.savefig(save_path)
def whether_remove_history(cwd, is_remove=None):
import shutil
if is_remove is None:
is_remove = bool(input("PRESS 'y' to REMOVE: {}? ".format(cwd)) == 'y')
if is_remove:
shutil.rmtree(cwd, ignore_errors=True)
print("| Remove")
os.makedirs(cwd, exist_ok=True)
d='AC_BasicAC'):
from AgentZoo import AgentSNAC as AgentClass
args = Arguments(AgentClass)
args.gpu_id = gpu_id
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
args.env_name = "BipedalWalker-v3"
args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
def run__zoo(gpu_id, cwd='AC_Zoo'):
import AgentZoo as Zoo
class_agent = Zoo.AgentDeepSAC
assert class_agent in {
Zoo.AgentDDPG, Zoo.AgentTD3, Zoo.ActorSAC, Zoo.AgentDeepSAC,
Zoo.AgentBasicAC, Zoo.AgentSNAC, Zoo.AgentInterAC, Zoo.AgentInterSAC,
}
args = Arguments(class_agent)
args.gpu_id = gpu_id
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
args.env_name = "BipedalWalker-v3"
args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
args.init_for_training()
train_agent__off_policy(**vars(args))
# args.env_name = "BipedalWalkerHardcore-v3"
# args.cwd = './{}/BWHC_{}'.format(cwd, gpu_id)
# args.net_dim = int(2 ** 8.5)
# args.max_memo = int(2 ** 20)
# args.batch_size = int(2 ** 9)
# args.max_epoch = 2 ** 14
# args.reward_scale = int(2 ** 6.5)
# args.is_remove = None
# args.init_for_training()
# while not train_agent(**vars(args)):
# args.random_seed += 42
# import pybullet_envs # for python-bullet-gym
# dir(pybullet_envs)
# args.env_name = "MinitaurBulletEnv-v0"
# args.cwd = './{}/Minitaur_{}'.format(cwd, args.gpu_id)
# args.max_epoch = 2 ** 13
# args.max_memo = 2 ** 20
# args.net_dim = 2 ** 9
# args.max_step = 2 ** 12
# args.batch_size = 2 ** 8
# args.reward_scale = 2 ** 3
# args.is_remove = True
# args.eva_size = 2 ** 5 # for Recorder
# args.show_gap = 2 ** 8 # for Recorder
# args.init_for_training()
# while not train_agent(**vars(args)):
# args.random_seed += 42
# import pybullet_envs # for python-bullet-gym
# dir(pybullet_envs)
# args.env_name = "AntBulletEnv-v0"
# args.cwd = './{}/Ant_{}'.format(cwd, args.gpu_id)
# args.max_epoch = 2 ** 13
# args.max_memo = 2 ** 20
# args.max_step = 2 ** 10
# args.net_dim = 2 ** 8
# args.batch_size = 2 ** 8
# args.reward_scale = 2 ** -3
# args.is_remove = True
# args.eva_size = 2 ** 5 # for Recorder
# args.show_gap = 2 ** 8 # for Recorder
# args.init_for_training()
# while not train_agent(**vars(args)):
# args.random_seed += 42
def run__ppo(gpu_id, cwd):
import AgentZoo as Zoo
class_agent = Zoo.AgentGAE
assert class_agent in {Zoo.AgentPPO, Zoo.AgentGAE}
args = Arguments(class_agent)
args.gpu_id = gpu_id
args.max_memo = 2 ** 12
args.batch_size = 2 ** 9
args.repeat_times = 2 ** 4
args.net_dim = 2 ** 8
args.gamma = 0.99
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
while not train_agent__on_policy(**vars(args)):
args.random_seed += 42
args.env_name = "BipedalWalker-v3"
args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
args.init_for_training()
while not train_agent__on_policy(**vars(args)):
args.random_seed += 42
def run__dqn(gpu_id, cwd='RL_DQN'):
from AgentZoo import AgentDQN
# from AgentZoo import AgentNoisyDQN
# from AgentZoo import AgentDoubleDQN
args = Arguments(AgentDQN)
args.gpu_id = gpu_id
args.show_gap = 2 ** 5
args.env_name = "CartPole-v0"
args.cwd = '{}/{}'.format(cwd, args.env_name)
args.init_for_training()
train_agent_discrete(**vars(args))
args.env_name = "LunarLander-v2"
args.cwd = '{}/{}'.format(cwd, args.env_name)
args.init_for_training()
train_agent_discrete(**vars(args))
def run__multi_process(target_func, gpu_tuple=(0, 1), cwd='RL_MP'):
os.makedirs(cwd, exist_ok=True) # all the files save in here
import multiprocessing as mp
processes = [mp.Process(target=target_func, args=(gpu_id, cwd)) for gpu_id in gpu_tuple]
[process.start() for process in processes]
[process.join() for process in processes]
def process__buffer(q_aggr, qs_dist, args, **_kwargs):
max_memo = args.max_memo
env_name = args.env_name
max_step = args.max_step
batch_size = args.batch_size
repeat_times = 2
# reward_scale = args.reward_scale
# gamma = args.gamma
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=False)
buffer = BufferArray(max_memo, state_dim, action_dim) # experiment replay buffer
workers_num = len(qs_dist)
is_training = True
while is_training:
for i in range(workers_num):
memo_array, is_solved = q_aggr.get()
buffer.extend_memo(memo_array)
if is_solved:
is_training = False
buffer.init_before_sample()
for i in range(max_step * repeat_times):
# batch_arrays = buffer.random_sample(batch_size, device=None) # faster but worse
for q_dist in qs_dist:
batch_arrays = buffer.random_sample(batch_size, device=None) # slower but better
q_dist.put(batch_arrays)
print('|| Exit: process__buffer')
def process__workers(gpu_id, root_cwd, q_aggr, q_dist, args, **_kwargs):
class_agent = args.class_agent
env_name = args.env_name
cwd = args.cwd
net_dim = args.net_dim
max_step = args.max_step
# max_memo = args.max_memo
max_epoch = args.max_epoch
batch_size = args.batch_size * 1.5
gamma = args.gamma
update_gap = args.update_gap
reward_scale = args.reward_scale
cwd = '{}/{}_{}'.format(root_cwd, cwd, gpu_id)
os.makedirs(cwd, exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
random_seed = 42 + gpu_id
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.set_default_dtype(torch.float32)
torch.set_num_threads(8)
env = gym.make(env_name)
is_solved = False
class BufferArrayMP(BufferArray):
def init_before_sample(self):
q_aggr.put((self.memories, is_solved))
# self.now_len = self.max_len if self.is_full else self.next_idx
def random_sample(self, _batch_size, device=None):
batch_arrays = q_dist.get()
tensors = [torch.tensor(ary, device=device) for ary in batch_arrays]
return tensors
state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
agent = class_agent(env, state_dim, action_dim, net_dim) # training agent
buffer = BufferArrayMP(max_step, state_dim, action_dim) # experiment replay buffer
recorder = Recorder(agent, max_step, max_action, target_reward, env_name, **_kwargs)
# with torch.no_grad(): # update replay buffer
# # rewards, steps = agent.update_buffer(
# # env, buffer, max_step, max_action, reward_scale, gamma)
# rewards, steps = initial_exploration(
# env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
# recorder.show_reward(rewards, steps, 0, 0)
try:
for epoch in range(max_epoch):
with torch.no_grad(): # for saving the GPU buffer
rewards, steps = agent.update_buffer(env, buffer, max_step, max_action, reward_scale, gamma)
loss_a, loss_c = agent.update_parameters(buffer, max_step, batch_size, update_gap)
with torch.no_grad(): # for saving the GPU buffer
recorder.show_reward(rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("raise KeyboardInterrupt while training.")
# except AssertionError: # for BipedWalker BUG 2020-03-03
# print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
# return False
train_time = recorder.print_and_save_npy(env_name, cwd)
# agent.save_or_load_model(cwd, is_save=True) # save max reward agent in Recorder
# buffer.save_or_load_memo(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
return True
def run__multi_workers(gpu_tuple=(0, 1), root_cwd='RL_MP'):
print('GPU: {} | CWD: {}'.format(gpu_tuple, root_cwd))
whether_remove_history(root_cwd, is_remove=True)
from AgentZoo import AgentSAC
args = Arguments(AgentSAC)
args.env_name = "BipedalWalker-v3"
# args.env_name = "LunarLanderContinuous-v2"
args.show_gap = 2 ** 8 # for Recorder
import multiprocessing as mp
workers_num = len(gpu_tuple)
queue_aggr = mp.Queue(maxsize=workers_num) # queue of aggregation
queues_dist = [mp.Queue(maxsize=args.max_step) for _ in range(workers_num)] # queue of distribution
processes = [mp.Process(target=process__buffer, args=(queue_aggr, queues_dist, args))]
processes.extend([mp.Process(target=process__workers, args=(gpu_id, root_cwd, queue_aggr, queue_dist, args))
for gpu_id, queue_dist in zip(gpu_tuple, queues_dist)])
[process.start() for process in processes]
# [process.join() for process in processes]
[process.close() for process in processes]
if __name__ == '__main__':
# run__demo(gpu_id=0, cwd='AC_BasicAC')
run__zoo(gpu_id=0, cwd='AC_SAC')
# run__ppo(gpu_id=1, cwd='AC_PPO')
# run__multi_process(run__zoo, gpu_tuple=(0, 1, 2, 3), cwd='AC_ZooMP')
# run__multi_process(run__ppo, gpu_tuple=(2, 3), cwd='AC_PPO')
# run__multi_workers(gpu_tuple=(2, 3), root_cwd='AC_SAC_MP')
# '''Discrete action space'''
# run__dqn(gpu_id=sys.argv[-1][-4], cwd='RL_DQN')
# '''multi worker'''
# run__multi_workers(gpu_tuple=(2, 3), root_cwd='AC_SAC_MP')
print('Finish:', sys.argv[-1])
| true
| true
|
1c483e974ca788f3d309d21acc49121f28db829a
| 911
|
py
|
Python
|
searches/double_linear_search_recursion.py
|
jenia90/Python
|
696fb4a681ad9e4d84e0d2b894daf449a3e30b24
|
[
"MIT"
] | 145,614
|
2016-07-21T05:40:05.000Z
|
2022-03-31T22:17:22.000Z
|
searches/double_linear_search_recursion.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 3,987
|
2016-07-28T17:31:25.000Z
|
2022-03-30T23:07:46.000Z
|
searches/double_linear_search_recursion.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 40,014
|
2016-07-26T15:14:41.000Z
|
2022-03-31T22:23:03.000Z
|
def search(list_data: list, key: int, left: int = 0, right: int = 0) -> int:
"""
Iterate through the array to find the index of key using recursion.
:param list_data: the list to be searched
:param key: the key to be searched
:param left: the index of first element
:param right: the index of last element
:return: the index of key value if found, -1 otherwise.
>>> search(list(range(0, 11)), 5)
5
>>> search([1, 2, 4, 5, 3], 4)
2
>>> search([1, 2, 4, 5, 3], 6)
-1
>>> search([5], 5)
0
>>> search([], 1)
-1
"""
right = right or len(list_data) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(list_data, key, left + 1, right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25.305556
| 76
| 0.567508
|
def search(list_data: list, key: int, left: int = 0, right: int = 0) -> int:
right = right or len(list_data) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(list_data, key, left + 1, right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| true
| true
|
1c483eda5ea59fd0903c853ecf78214873dd9e96
| 652
|
py
|
Python
|
qiskit/providers/aer/backends/__init__.py
|
derivation/qiskit-aer
|
d8d77270c745e4c31129ce7f816a93e1efc2e743
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/aer/backends/__init__.py
|
derivation/qiskit-aer
|
d8d77270c745e4c31129ce7f816a93e1efc2e743
|
[
"Apache-2.0"
] | 29
|
2018-12-19T10:11:00.000Z
|
2018-12-19T10:16:18.000Z
|
qiskit/providers/aer/backends/__init__.py
|
atilag/qiskit-aer
|
d964795b0a24b1d3287ba2ba2dda45d1dfed4a5d
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Aer Backends."""
from .qasm_simulator import QasmSimulator
from .statevector_simulator import StatevectorSimulator
from .unitary_simulator import UnitarySimulator
| 34.315789
| 77
| 0.779141
|
from .qasm_simulator import QasmSimulator
from .statevector_simulator import StatevectorSimulator
from .unitary_simulator import UnitarySimulator
| true
| true
|
1c483f25e50a0b953ab2d539931be326a66b2eb4
| 18,592
|
py
|
Python
|
src/pyphocorehelpers/gui/Qt/GlobalConnectionManager.py
|
CommanderPho/pyPhoCoreHelpers
|
1872cc9779d3ec936077be1df867fc13bc7f177a
|
[
"MIT"
] | null | null | null |
src/pyphocorehelpers/gui/Qt/GlobalConnectionManager.py
|
CommanderPho/pyPhoCoreHelpers
|
1872cc9779d3ec936077be1df867fc13bc7f177a
|
[
"MIT"
] | null | null | null |
src/pyphocorehelpers/gui/Qt/GlobalConnectionManager.py
|
CommanderPho/pyPhoCoreHelpers
|
1872cc9779d3ec936077be1df867fc13bc7f177a
|
[
"MIT"
] | null | null | null |
# GlobalConnectionManager
from indexed import IndexedOrderedDict
from qtpy import QtCore, QtWidgets, QtGui
"""
Requires
https://github.com/jazzycamel/PyQt5Singleton.git
pip install PyQt5Singleton
"""
from PyQt5Singleton import Singleton
class GlobalConnectionManager(QtCore.QObject, metaclass=Singleton):
""" A singleton owned by the QApplication instance that owns connections between widgets/windows and includes tools for discovering widgets to control/be controlled by. """
_currentInstance = None
def __init__(self, owning_application: QtWidgets.QApplication, parent=None, **kwargs):
super(GlobalConnectionManager, self).__init__(parent, **kwargs)
if owning_application is None or not isinstance(owning_application, QtWidgets.QApplication):
# app was never constructed is already deleted or is an
# QCoreApplication/QGuiApplication and not a full QApplication
raise NotImplementedError
# Setup member variables:
self._registered_available_drivers = IndexedOrderedDict({})
self._registered_available_drivables = IndexedOrderedDict({})
self._active_connections = IndexedOrderedDict({})
# Setup internal connections:
# owning_application.aboutToQuit.connect(self.on_application_quit)
@property
def registered_available_drivers(self):
""" an IndexedOrderedDict of widget/objects that can drive a certain property (currently limited to time or time windows) """
return self._registered_available_drivers
@property
def registered_available_drivables(self):
""" an IndexedOrderedDict of widgets/objects that can be driven by a driver."""
return self._registered_available_drivables
@property
def active_connections(self):
""" an IndexedOrderedDict of widgets/objects that can be driven by a driver."""
return self._active_connections
#### ================ Registration Methods:
def register_driver(self, driver, driver_identifier=None):
"""Registers a new driver object/widget """
return GlobalConnectionManager.register_control_object(self._registered_available_drivers, driver, driver_identifier) # return the new identifier
def register_drivable(self, drivable, drivable_identifier=None):
return GlobalConnectionManager.register_control_object(self._registered_available_drivables, drivable, drivable_identifier) # return the new identifier
def unregister_object(self, control_object, debug_print=True):
# unregisters object from both drivers and drivables
# For Driver list:
found_driver_key, found_object = GlobalConnectionManager._unregister_object(self._registered_available_drivers, control_object=control_object)
if found_driver_key is not None:
print(f'removed object with key {found_driver_key} from drivers list.')
# For Drivable List:
found_drivable_key, found_object = GlobalConnectionManager._unregister_object(self._registered_available_drivables, control_object=control_object)
if found_drivable_key is not None:
print(f'removed object with key {found_drivable_key} from drivers list.')
return found_driver_key, found_drivable_key
def connect_drivable_to_driver(self, drivable, driver, custom_connect_function=None):
""" attempts to connect the drivable to the driver.
drivable/driver can either be a key for a drivable/driver already registered or the drivable/driver itself.
Inputs:
custom_connect_function: is an optional Callable that takes the driver, drivable as input and returns a connection.
"""
# Get key for drivable:
if isinstance(drivable, str):
drivable_key = drivable
drivable = self.registered_available_drivables[drivable_key]
else:
# already have the object, just find the key:
drivable_key = GlobalConnectionManager._try_find_object_key(self.registered_available_drivables, control_object=drivable)
# Get Key for driver:
if isinstance(driver, str):
driver_key = driver
driver = self.registered_available_drivers[driver_key]
else:
# already have the object, just find the key:
driver_key = GlobalConnectionManager._try_find_object_key(self.registered_available_drivers, control_object=driver)
## Make sure the connection doesn't already exist:
extant_connection = self.active_connections.get(drivable, None)
if extant_connection is None:
## Make the connection:
if custom_connect_function is not None:
# Perform the custom connection function:
new_connection_obj = custom_connect_function(driver, drivable)
else:
# Otherwise perform the default:
new_connection_obj = GlobalConnectionManager.connect_additional_controlled_plotter(driver, controlled_plt=drivable)
self.active_connections[drivable] = new_connection_obj # add the connection object to the self.active_connections array
return self.active_connections[drivable]
else:
print(f'connection already existed!')
return extant_connection
## Make the connection:
## Sync ipspikesDataExplorer to raster window:
# extra_interactive_spike_behavior_browser_sync_connection = spike_raster_window.connect_additional_controlled_plotter(controlled_plt=ipspikesDataExplorer)
# extra_interactive_spike_behavior_browser_sync_connection = _connect_additional_controlled_plotter(spike_raster_window.spike_raster_plt_2d, ipspikesDataExplorer)
def disconnect_drivable(self, drivable):
""" disconnects the drivable from any drivers. """
self.unregister_object(drivable)
#### ================ Access Methods:
def get_available_drivers(self):
""" gets a list of the available widgets that could be used to drive a time widget. """
return self.registered_available_drivers
def get_available_drivables(self):
""" gets a list of the available widgets that could be driven via a time widget. """
return self.registered_available_drivables
#### ================ Utility Methods:
def _disambiguate_driver_name(self, extant_name):
""" attempts to create a unique name for the driver that doesn't already exist in the dict and return it """
return GlobalConnectionManager.disambiguate_registered_name(self._registered_available_drivers, extant_name)
def _disambiguate_drivable_name(self, extant_name):
""" attempts to create a unique name for the drivable that doesn't already exist in the dict and return it """
return GlobalConnectionManager.disambiguate_registered_name(self._registered_available_drivables, extant_name)
#### ================ Slots Methods:
# @QtCore.Slot()
# def on_application_quit(self):
# print(f'GlobalConnectionManager.on_application_quit')
# GlobalConnectionManager._currentInstance = None
#### ================ Static Methods:
@classmethod
def disambiguate_registered_name(cls, registraction_dict, extant_name):
""" attempts to create a unique name for the driver/drivee that doesn't already exist in the dict and return it """
matching_names_with_prefix = list(filter(lambda x: x.startswith(extant_name), list(registraction_dict.keys())))
itr_index = len(matching_names_with_prefix) # get the next number after the previous matching names to build a string like # "RasterPlot2D_1"
proposed_driver_identifier = f'{extant_name}_{itr_index}'
# Proposed name shouldn't exist:
extant_driver_with_identifier = registraction_dict.get(proposed_driver_identifier, None)
assert extant_driver_with_identifier is None, f"Driver with new name {extant_driver_with_identifier} already exists too!"
# return the new name
return proposed_driver_identifier
@classmethod
def register_control_object(cls, registraction_dict, control_object, control_identifier=None):
"""Registers a new driver or driven object/widget
Args:
control_object (_type_): _description_
control_identifier (_type_, optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
if control_identifier is None:
control_identifier = control_object.windowName # 'Spike3DRasterWindow'
try:
extant_driver_index = list(registraction_dict.values()).index(control_object)
# Driver already exists somewhere in the registered drivers:
return registraction_dict.keys()[extant_driver_index] # return its key
except ValueError as e:
# driver doesn't exist anywhere in the registered drivers:
pass
extant_driver_with_identifier = registraction_dict.get(control_identifier, None)
if extant_driver_with_identifier is not None:
# driver already exists with this identifier:
# check and see if it's the same object
if extant_driver_with_identifier == control_object:
# driver with this key already exists, but it's the same driver, so it's just attempting to be re-registered for some reason. No problem.
return
else:
print(f'driver with key {control_identifier} already exists and is a different object. Disambiguating name...')
# control_identifier = self.disambiguate_driver_name(control_identifier)
control_identifier = GlobalConnectionManager.disambiguate_registered_name(registraction_dict, control_identifier)
print(f'\t proposed_driver_name is now {control_identifier}')
# now has a unique driver identifier
# register the driver provided:
registraction_dict[control_identifier] = control_object
return control_identifier # return the new identifier
@classmethod
def _try_find_object_key(cls, registraction_dict, control_object):
# tries to find the key of the object in the provided registration_dict
found_key = None
try:
extant_item_index = list(registraction_dict.values()).index(control_object)
found_key = registraction_dict.keys()[extant_item_index]
return found_key
except ValueError as e:
pass
except KeyError as e:
pass
return found_key
@classmethod
def _unregister_object(cls, registraction_dict, control_object):
# unregisters object from both drivers and drivables
found_key = cls._try_find_object_key(registraction_dict, control_object=control_object)
found_object = None
if found_key is not None:
found_object = registraction_dict.pop(found_key) # pop the key
## TODO: tear down any connections that use it.
return found_key, found_object
#### ================ Static Methods factored out of SyncedTimelineWindowLink.py on 2022-05-25
@classmethod
def connect_additional_controlled_plotter(cls, source_spike_raster_plt, controlled_plt):
""" allow the window to control InteractivePlaceCellDataExplorer (ipspikesDataExplorer) objects;
source_spike_raster_plt: the spike raster plotter to connect to as the source
controlled_plt: should be a InteractivePlaceCellDataExplorer object (ipspikesDataExplorer), but can be any function with a valid update_window_start_end @QtCore.Slot(float, float) slot.
Requirements:
source_spike_raster_plt:
.spikes_window.active_time_window
.window_scrolled
controlled_plt:
.disable_ui_window_updating_controls()
.update_window_start_end(float, float)
Usage:
from pyphoplacecellanalysis.GUI.Qt.SpikeRasterWindows.Spike3DRasterWindowWidget import Spike3DRasterWindowWidget
# Build the controlled ipspikesDataExplorer:
display_output = dict()
pActiveSpikesBehaviorPlotter = None
display_output = display_output | curr_active_pipeline.display(DefaultDisplayFunctions._display_3d_interactive_spike_and_behavior_browser, active_config_name, extant_plotter=display_output.get('pActiveSpikesBehaviorPlotter', None)) # Works now!
ipspikesDataExplorer = display_output['ipspikesDataExplorer']
display_output['pActiveSpikesBehaviorPlotter'] = display_output.pop('plotter') # rename the key from the generic "plotter" to "pActiveSpikesBehaviorPlotter" to avoid collisions with others
pActiveSpikesBehaviorPlotter = display_output['pActiveSpikesBehaviorPlotter']
# Build the contolling raster window:
spike_raster_window = Spike3DRasterWindowWidget(curr_spikes_df)
# Call this function to connect them:
extra_interactive_spike_behavior_browser_sync_connection = connect_additional_controlled_plotter(spike_raster_window.spike_raster_plt_2d, ipspikesDataExplorer)
"""
# Perform Initial (one-time) update from source -> controlled:
controlled_plt.disable_ui_window_updating_controls() # disable the GUI for manual updates.
controlled_plt.update_window_start_end(source_spike_raster_plt.spikes_window.active_time_window[0], source_spike_raster_plt.spikes_window.active_time_window[1])
# Connect to update self when video window playback position changes
sync_connection = source_spike_raster_plt.window_scrolled.connect(controlled_plt.update_window_start_end)
return sync_connection
@classmethod
def connect_controlled_time_synchornized_plotter(cls, source_spike_raster_plt, controlled_plt):
"""
source_spike_raster_plt: TimeSynchronizedPlotterBase
Identical to the connect_additional_controlled_plotter(...) but uses on_window_changed(...) instead of update_window_start_end(...)
"""
controlled_plt.on_window_changed(source_spike_raster_plt.spikes_window.active_time_window[0], source_spike_raster_plt.spikes_window.active_time_window[1])
sync_connection = source_spike_raster_plt.window_scrolled.connect(controlled_plt.on_window_changed) # connect the window_scrolled event to the _on_window_updated function
return sync_connection
# @classmethod
# def connect_additional_controlled_spike_raster_plotter(cls, spike_raster_plt_2d, controlled_spike_raster_plt):
# """ Connect an additional plotter to a source that's driving the update of the data-window:
# Requirements:
# source_spike_raster_plt:
# .spikes_window.active_time_window
# .window_scrolled
# controlled_spike_raster_plt:
# .spikes_window.update_window_start_end(float, float)
# Usage:
# spike_raster_plt_3d, spike_raster_plt_2d, spike_3d_to_2d_window_connection = build_spike_3d_raster_with_2d_controls(curr_spikes_df)
# spike_raster_plt_3d_vedo = Spike3DRaster_Vedo(curr_spikes_df, window_duration=15.0, window_start_time=30.0, neuron_colors=None, neuron_sort_order=None)
# extra_vedo_sync_connection = connect_additional_controlled_spike_raster_plotter(spike_raster_plt_2d, spike_raster_plt_3d_vedo)
# """
# controlled_spike_raster_plt.spikes_window.update_window_start_end(spike_raster_plt_2d.spikes_window.active_time_window[0], spike_raster_plt_2d.spikes_window.active_time_window[1])
# # Connect to update self when video window playback position changes
# sync_connection = spike_raster_plt_2d.window_scrolled.connect(controlled_spike_raster_plt.spikes_window.update_window_start_end)
# return sync_connection
### Usesful Examples:
### Checking if application instance exists yet:
# if QtGui.QApplication.instance() is None:
# return
### Checking if an object is still alive/extant:
# from ...Qt import isQObjectAlive
# for k in ViewBox.AllViews:
# if isQObjectAlive(k) and getConfigOption('crashWarning'):
# sys.stderr.write('Warning: ViewBox should be closed before application exit.\n')
# try:
# k.destroyed.disconnect()
# except RuntimeError: ## signal is already disconnected.
# pass
# except TypeError: ## view has already been deleted (?)
# pass
# except AttributeError: # PySide has deleted signal
# pass
class GlobalConnectionManagerAccessingMixin:
""" Implementor owns a connection manager instance which it usually uses to register itself or its children as drivers/drivable
Required Properties:
._connection_man
"""
@property
def connection_man(self):
"""The connection_man property."""
return self._connection_man
def GlobalConnectionManagerAccessingMixin_on_init(self, owning_application=None):
if owning_application is None:
owning_application = QtWidgets.QApplication.instance() # <PyQt5.QtWidgets.QApplication at 0x1d44a4891f0>
if owning_application is None:
print(f'could not get valid QApplication instance!')
raise NotImplementedError
# Set self._connection_man:
self._connection_man = GlobalConnectionManager(owning_application=owning_application)
########################################################
## For GlobalConnectionManagerAccessingMixin conformance:
########################################################
# @QtCore.pyqtSlot()
def GlobalConnectionManagerAccessingMixin_on_setup(self):
""" perfrom registration of drivers/drivables:"""
## TODO: register children
pass
# @QtCore.pyqtSlot()
def GlobalConnectionManagerAccessingMixin_on_destroy(self):
""" perfrom teardown/destruction of anything that needs to be manually removed or released """
## TODO: unregister children
pass
| 49.978495
| 256
| 0.701592
|
from indexed import IndexedOrderedDict
from qtpy import QtCore, QtWidgets, QtGui
from PyQt5Singleton import Singleton
class GlobalConnectionManager(QtCore.QObject, metaclass=Singleton):
_currentInstance = None
def __init__(self, owning_application: QtWidgets.QApplication, parent=None, **kwargs):
super(GlobalConnectionManager, self).__init__(parent, **kwargs)
if owning_application is None or not isinstance(owning_application, QtWidgets.QApplication):
raise NotImplementedError
self._registered_available_drivers = IndexedOrderedDict({})
self._registered_available_drivables = IndexedOrderedDict({})
self._active_connections = IndexedOrderedDict({})
@property
def registered_available_drivers(self):
return self._registered_available_drivers
@property
def registered_available_drivables(self):
return self._registered_available_drivables
@property
def active_connections(self):
return self._active_connections
ect(self._registered_available_drivers, driver, driver_identifier)
def register_drivable(self, drivable, drivable_identifier=None):
return GlobalConnectionManager.register_control_object(self._registered_available_drivables, drivable, drivable_identifier)
def unregister_object(self, control_object, debug_print=True):
found_driver_key, found_object = GlobalConnectionManager._unregister_object(self._registered_available_drivers, control_object=control_object)
if found_driver_key is not None:
print(f'removed object with key {found_driver_key} from drivers list.')
found_drivable_key, found_object = GlobalConnectionManager._unregister_object(self._registered_available_drivables, control_object=control_object)
if found_drivable_key is not None:
print(f'removed object with key {found_drivable_key} from drivers list.')
return found_driver_key, found_drivable_key
def connect_drivable_to_driver(self, drivable, driver, custom_connect_function=None):
if isinstance(drivable, str):
drivable_key = drivable
drivable = self.registered_available_drivables[drivable_key]
else:
drivable_key = GlobalConnectionManager._try_find_object_key(self.registered_available_drivables, control_object=drivable)
if isinstance(driver, str):
driver_key = driver
driver = self.registered_available_drivers[driver_key]
else:
driver_key = GlobalConnectionManager._try_find_object_key(self.registered_available_drivers, control_object=driver)
ons.get(drivable, None)
if extant_connection is None:
## Make the connection:
if custom_connect_function is not None:
# Perform the custom connection function:
new_connection_obj = custom_connect_function(driver, drivable)
else:
# Otherwise perform the default:
new_connection_obj = GlobalConnectionManager.connect_additional_controlled_plotter(driver, controlled_plt=drivable)
self.active_connections[drivable] = new_connection_obj # add the connection object to the self.active_connections array
return self.active_connections[drivable]
else:
print(f'connection already existed!')
return extant_connection
## Make the connection:
## Sync ipspikesDataExplorer to raster window:
# extra_interactive_spike_behavior_browser_sync_connection = spike_raster_window.connect_additional_controlled_plotter(controlled_plt=ipspikesDataExplorer)
# extra_interactive_spike_behavior_browser_sync_connection = _connect_additional_controlled_plotter(spike_raster_window.spike_raster_plt_2d, ipspikesDataExplorer)
def disconnect_drivable(self, drivable):
self.unregister_object(drivable)
#### ================ Access Methods:
def get_available_drivers(self):
return self.registered_available_drivers
def get_available_drivables(self):
return self.registered_available_drivables
#### ================ Utility Methods:
def _disambiguate_driver_name(self, extant_name):
return GlobalConnectionManager.disambiguate_registered_name(self._registered_available_drivers, extant_name)
def _disambiguate_drivable_name(self, extant_name):
return GlobalConnectionManager.disambiguate_registered_name(self._registered_available_drivables, extant_name)
#### ================ Slots Methods:
# @QtCore.Slot()
# def on_application_quit(self):
# print(f'GlobalConnectionManager.on_application_quit')
# GlobalConnectionManager._currentInstance = None
#### ================ Static Methods:
@classmethod
def disambiguate_registered_name(cls, registraction_dict, extant_name):
matching_names_with_prefix = list(filter(lambda x: x.startswith(extant_name), list(registraction_dict.keys())))
itr_index = len(matching_names_with_prefix) # get the next number after the previous matching names to build a string like # "RasterPlot2D_1"
proposed_driver_identifier = f'{extant_name}_{itr_index}'
# Proposed name shouldn't exist:
extant_driver_with_identifier = registraction_dict.get(proposed_driver_identifier, None)
assert extant_driver_with_identifier is None, f"Driver with new name {extant_driver_with_identifier} already exists too!"
return proposed_driver_identifier
@classmethod
def register_control_object(cls, registraction_dict, control_object, control_identifier=None):
if control_identifier is None:
control_identifier = control_object.windowName
try:
extant_driver_index = list(registraction_dict.values()).index(control_object)
return registraction_dict.keys()[extant_driver_index]
except ValueError as e:
pass
extant_driver_with_identifier = registraction_dict.get(control_identifier, None)
if extant_driver_with_identifier is not None:
# driver already exists with this identifier:
# check and see if it's the same object
if extant_driver_with_identifier == control_object:
return
else:
print(f'driver with key {control_identifier} already exists and is a different object. Disambiguating name...')
control_identifier = GlobalConnectionManager.disambiguate_registered_name(registraction_dict, control_identifier)
print(f'\t proposed_driver_name is now {control_identifier}')
registraction_dict[control_identifier] = control_object
return control_identifier
@classmethod
def _try_find_object_key(cls, registraction_dict, control_object):
found_key = None
try:
extant_item_index = list(registraction_dict.values()).index(control_object)
found_key = registraction_dict.keys()[extant_item_index]
return found_key
except ValueError as e:
pass
except KeyError as e:
pass
return found_key
@classmethod
def _unregister_object(cls, registraction_dict, control_object):
found_key = cls._try_find_object_key(registraction_dict, control_object=control_object)
found_object = None
if found_key is not None:
found_object = registraction_dict.pop(found_key)
me_window[0], source_spike_raster_plt.spikes_window.active_time_window[1])
sync_connection = source_spike_raster_plt.window_scrolled.connect(controlled_plt.update_window_start_end)
return sync_connection
@classmethod
def connect_controlled_time_synchornized_plotter(cls, source_spike_raster_plt, controlled_plt):
controlled_plt.on_window_changed(source_spike_raster_plt.spikes_window.active_time_window[0], source_spike_raster_plt.spikes_window.active_time_window[1])
sync_connection = source_spike_raster_plt.window_scrolled.connect(controlled_plt.on_window_changed)
return sync_connection
# Requirements:
# source_spike_raster_plt:
# .spikes_window.active_time_window
# .window_scrolled
# controlled_spike_raster_plt:
# .spikes_window.update_window_start_end(float, float)
# Usage:
# spike_raster_plt_3d, spike_raster_plt_2d, spike_3d_to_2d_window_connection = build_spike_3d_raster_with_2d_controls(curr_spikes_df)
# spike_raster_plt_3d_vedo = Spike3DRaster_Vedo(curr_spikes_df, window_duration=15.0, window_start_time=30.0, neuron_colors=None, neuron_sort_order=None)
# extra_vedo_sync_connection = connect_additional_controlled_spike_raster_plotter(spike_raster_plt_2d, spike_raster_plt_3d_vedo)
# """
# controlled_spike_raster_plt.spikes_window.update_window_start_end(spike_raster_plt_2d.spikes_window.active_time_window[0], spike_raster_plt_2d.spikes_window.active_time_window[1])
# # Connect to update self when video window playback position changes
# sync_connection = spike_raster_plt_2d.window_scrolled.connect(controlled_spike_raster_plt.spikes_window.update_window_start_end)
# return sync_connection
### Usesful Examples:
### Checking if application instance exists yet:
# if QtGui.QApplication.instance() is None:
# return
### Checking if an object is still alive/extant:
# from ...Qt import isQObjectAlive
# for k in ViewBox.AllViews:
# if isQObjectAlive(k) and getConfigOption('crashWarning'):
# sys.stderr.write('Warning: ViewBox should be closed before application exit.\n')
# try:
# k.destroyed.disconnect()
# except RuntimeError: ## signal is already disconnected.
# pass
# except TypeError: ## view has already been deleted (?)
# pass
# except AttributeError: # PySide has deleted signal
# pass
class GlobalConnectionManagerAccessingMixin:
@property
def connection_man(self):
return self._connection_man
def GlobalConnectionManagerAccessingMixin_on_init(self, owning_application=None):
if owning_application is None:
owning_application = QtWidgets.QApplication.instance() # <PyQt5.QtWidgets.QApplication at 0x1d44a4891f0>
if owning_application is None:
print(f'could not get valid QApplication instance!')
raise NotImplementedError
# Set self._connection_man:
self._connection_man = GlobalConnectionManager(owning_application=owning_application)
########################################################
## For GlobalConnectionManagerAccessingMixin conformance:
########################################################
# @QtCore.pyqtSlot()
def GlobalConnectionManagerAccessingMixin_on_setup(self):
## TODO: register children
pass
# @QtCore.pyqtSlot()
def GlobalConnectionManagerAccessingMixin_on_destroy(self):
## TODO: unregister children
pass
| true
| true
|
1c484057952e765042ba5f556beae1700c93a132
| 2,106
|
py
|
Python
|
services/users/project/api/utils/response.py
|
shwetha-manvinkurke/dx-automator
|
ec01e51d80c8be8f5dea4669baa25d38256b1052
|
[
"MIT"
] | 14
|
2018-01-04T22:33:54.000Z
|
2020-03-04T18:38:34.000Z
|
services/users/project/api/utils/response.py
|
shwetha-manvinkurke/dx-automator
|
ec01e51d80c8be8f5dea4669baa25d38256b1052
|
[
"MIT"
] | 87
|
2018-01-04T22:15:16.000Z
|
2022-01-06T14:49:07.000Z
|
services/users/project/api/utils/response.py
|
shwetha-manvinkurke/dx-automator
|
ec01e51d80c8be8f5dea4669baa25d38256b1052
|
[
"MIT"
] | 17
|
2018-01-04T23:33:48.000Z
|
2021-11-08T18:39:04.000Z
|
def response_json_ok(json):
"""Creates a tuple representing the HTTP package to
respond the requisition with the given JSON on its body
and status code 200
:param json: object to be sent on HTTP body
:return response: tuple representing the HTTP response package
"""
return _make_json_response(json, 200)
def response_json_created(json):
"""Creates a tuple representing the HTTP package to
respond the requisition with the given JSON on its body
and status code 201
:param json: object to be sent on HTTP body
:return response: tuple representing the HTTP response package
"""
return _make_json_response(json, 201)
def response_json_bad_request(json):
"""Creates a tuple representing the HTTP package to
respond the requisition with the given JSON on its body
and status code 400
:param json: object to be sent on HTTP body
:return response: tuple representing the HTTP response package
"""
return _make_json_response(json, 400)
def response_json_unauthorized(json):
"""Creates a tuple representing the HTTP package to
respond the requisition with the given JSON on its body
and status code 401
:param json: object to be sent on HTTP body
:return response: tuple representing the HTTP response package
"""
return _make_json_response(json, 401)
def response_json_not_found(json):
"""Creates a tuple representing the HTTP package to
respond the requisition with the given JSON on its body
and status code 404
:param json: object to be sent on HTTP body
:return response: tuple representing the HTTP response package
"""
return _make_json_response(json, 404)
def _make_json_response(json, status):
"""Creates a tuple representing the HTTP package to
respond the requisition with the given JSON on its body
and the given status code.
:param json: object to be sent on HTTP body
:param status: status code
:return response: tuple representing the HTTP response package
"""
return json, status, {'Content-Type': 'application/json'}
| 36.310345
| 66
| 0.731244
|
def response_json_ok(json):
return _make_json_response(json, 200)
def response_json_created(json):
return _make_json_response(json, 201)
def response_json_bad_request(json):
return _make_json_response(json, 400)
def response_json_unauthorized(json):
return _make_json_response(json, 401)
def response_json_not_found(json):
return _make_json_response(json, 404)
def _make_json_response(json, status):
return json, status, {'Content-Type': 'application/json'}
| true
| true
|
1c48412fd41a287281f75f9338ade4cb7bd2bfd1
| 12,087
|
py
|
Python
|
src/relstorage/cache/tests/test_lru_cffiring.py
|
enfold/relstorage
|
9fcd526b537cb6537cc2ae33154b63096550f210
|
[
"ZPL-2.1"
] | 40
|
2015-10-08T05:35:13.000Z
|
2022-03-28T23:50:06.000Z
|
src/relstorage/cache/tests/test_lru_cffiring.py
|
enfold/relstorage
|
9fcd526b537cb6537cc2ae33154b63096550f210
|
[
"ZPL-2.1"
] | 364
|
2015-03-23T15:25:42.000Z
|
2022-03-17T08:41:34.000Z
|
src/relstorage/cache/tests/test_lru_cffiring.py
|
enfold/relstorage
|
9fcd526b537cb6537cc2ae33154b63096550f210
|
[
"ZPL-2.1"
] | 33
|
2015-06-08T23:03:22.000Z
|
2022-03-21T08:25:53.000Z
|
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
# The overhead of cache values, and thus how much fits in a ring or the
# cache, depends on 32 or 64 bit, whether or not we copy
# bytes into native code std::string, and how the compiler lays out
# the objects. We only copy strings on PyPy, but we have little control
# over the object layout, especially with the various MSVC compilers
# we have to deal with. So that explains the tests that have a range of sizes.
from relstorage.tests import TestCase
from relstorage.cache import interfaces
from . import Cache
class GenerationTests(TestCase):
def _makeCache(self, limit):
from . import Cache as BaseCache
return BaseCache(limit)
def _makeOne(self, limit):
return self._makeCache(limit).eden
def test_bool(self):
cache = self._makeCache(100)
lru = cache.eden
self.assertFalse(lru)
cache[1] = (b'', 0)
self.assertTrue(lru)
self.assertEqual(1, len(lru))
del cache[1]
self.assertFalse(lru)
class EdenTests(TestCase):
def _makeOne(self, limit):
from . import Cache as BaseCache
return BaseCache(limit)
def test_add_MRUs_empty(self):
lru = self._makeOne(100)
self.assertEqual((), lru.add_MRUs([]))
def test_add_MRUs_too_many(self):
lru = self._makeOne(1000)
too_many = [(i, (b'a' * i, 0, 0, 1)) for i in range(50)]
# They just exceed the limit
added = lru.add_MRUs(too_many)
# Much less got added
self.assertGreaterEqual(len(added), 7)
self.assertLessEqual(len(added), 9)
class NoOverheadSizeCache(Cache):
def __init__(self, byte_limit):
Cache.__init__(self, byte_limit)
self.base_size = self.weight
self[0] = (b'', 0)
self.entry_size = self[0].weight
del self[0]
@property
def weight(self):
weight = super(NoOverheadSizeCache, self).size
weight -= self.base_size
weight -= self.entry_size * len(self)
return weight
@property
def size(self):
return self.weight
def __getitem__(self, oid):
entry = Cache.__getitem__(self, oid)
if entry is not None:
return self.get_item_with_tid(oid, entry.tid)
class GenericLRUCacheTests(TestCase):
"""
Generic LRU caching tests that can be applied to any
LRU implementation, using the kind of keys and
values that we actually use: ``(oid_int, tid_int)`` and
``(state_bytes, tid_int)``.
"""
def _getClass(self):
return NoOverheadSizeCache
def _makeOne(self, limit, kind=None):
kind = kind or self._getClass()
return kind(limit)
def _getIface(self):
return interfaces.ILRUCache
def test_implements(self):
cache = self._makeOne(100)
assert_that(cache,
validly_provides(self._getIface()))
return cache
def test_eden_implements(self):
cache = self._makeOne(100)
assert_that(cache.eden,
validly_provides(interfaces.IGeneration))
def test_item_implements(self):
cache = self._makeOne(20)
cache[1] = (b'', 0)
entrya = cache[1]
assert_that(entrya, validly_provides(interfaces.ILRUEntry))
def test_add_too_many(self):
class _Cache(self._getClass()):
pass
cache = _Cache(20 + _Cache(20).base_size + (_Cache(20).entry_size * 2))
entry_count = 10
entries = cache.add_MRUs(list(reversed([
# oid, state, frozen, frequency
(x, (b'abcde', 0, False, x))
for x
in range(1, entry_count)
])))
self.assertEqual(
[5 + cache.entry_size] * len(entries),
[e.weight for e in entries])
self.assertLessEqual(
cache.weight,
cache.limit
)
self.assertEqual(
[e.key for e in entries],
[e.frequency for e in entries])
self.assertEqual(
[8, 7, 6, 5],
[e.key for e in entries]
)
self.assertEqual(4, len(cache))
return cache
def test_age(self):
cache = self._makeOne(100)
base_size = cache.base_size
entry_size = cache.entry_size
cache = self._getClass()(100 + base_size + entry_size)
entries = cache.add_MRUs([
(1, (b'abcde', 0, False, 1)),
(2, (b'abcde', 0, False, 1)),
(3, (b'abcde', 0, False, 1)),
(0, (b'abcde', 0, False, 1)),
])
self.assertIn(
[e.key for e in entries],
([1, 2, 3, 0], [2, 3, 0])
)
for _ in range(4):
for e in entries:
_ = cache[e.key]
freqs = [e.frequency for e in cache.values()]
self.assertEqual([5] * len(entries), freqs)
# By half each time
cache.age_frequencies()
freqs = [e.frequency for e in cache.values()]
self.assertEqual([2] * len(entries), freqs)
return cache
def test_delete(self):
cache = self._makeOne(20)
cache[1] = (b'abc', 0)
self.assertIn(1, cache)
self.assertEqual(1, len(cache))
self.assertEqual(3, cache.size)
self.assertEqual(cache[1], (b'abc', 0))
self.assertEqual(list(cache), [(1, 0)])
del cache[1]
self.assertNotIn(1, cache)
self.assertEqual(0, len(cache))
self.assertEqual(0, cache.size)
self.assertIsNone(cache[1])
self.assertEqual(list(cache), [])
def test_entries(self):
cache = self._makeOne(20)
cache[1] = (b'abc', 0)
entries = list(cache.values())
self.assertEqual(1, len(entries))
entry = entries[0]
assert_that(entry, validly_provides(interfaces.ILRUEntry))
self.assertEqual(1, entry.key)
self.assertEqual(b'abc', entry.value)
self.assertEqual(1, entry.frequency)
# Getting it again updates its frequency, not
# necessarily on the same object though.
self.assertIsNotNone(cache[1])
entries = list(cache.values())
self.assertEqual(1, len(entries))
entry = entries[0]
self.assertEqual(1, entry.key)
self.assertEqual(b'abc', entry.value)
self.assertEqual(2, entry.frequency)
def test_add_too_many_MRUs_works_aronud_big_entry(self):
cache = self._getClass()(20)
base_size = cache.base_size
entry_size = cache.entry_size
cache = self._getClass()(40 + base_size + entry_size)
entries = cache.add_MRUs([
(1, (b'abc', 0, False, 1)),
# This entry itself will fit nowhere
(2, (b'12345678901234567890' * 20, 0, False, 1)),
(3, (b'bcd', 0, False, 1)),
(4, (b'cde', 0, False, 1)),
(5, (b'dehi', 0, False, 1)),
(6, (b'edghijkl', 0, False, 1)),
])
self.assertGreaterEqual(len(cache), 3)
self.assertLessEqual(len(cache), 4)
self.assertIn(
[e.key for e in entries],
([1, 3, 4, 5], [3, 4, 5]))
return cache
class GenericGenerationalLRUCacheTests(GenericLRUCacheTests):
"""
Tests for any generational LRU cache.
"""
def test_implements(self):
cache = super(GenericGenerationalLRUCacheTests, self).test_implements()
assert_that(cache.eden,
validly_provides(interfaces.IGeneration))
assert_that(cache.protected,
validly_provides(interfaces.IGeneration))
assert_that(cache.probation,
validly_provides(interfaces.IGeneration))
def test_bad_generation_index_attribute_error(self):
cache = self._makeOne(20)
# Check proper init
getattr(cache.generations[1], 'limit')
getattr(cache.generations[2], 'limit')
getattr(cache.generations[3], 'limit')
# Gen 0 should be missing
with self.assertRaisesRegex(AttributeError,
"Generation 0 has no attribute 'on_hit'"):
cache.generations[0].on_hit()
def test_add_MRUs_reject_sets_sentinel_values(self):
# When we find an item that completely fills the cache,
# all the rest of the items are marked as rejected.
cache = self._getClass()(20)
base_size = cache.base_size
entry_size = cache.entry_size
cache = self._getClass()(20 + base_size + entry_size)
added_entries = cache.add_MRUs([
# over fill eden with item of size 15
(1, (b'012345678901234' * 20, 0, False, 1)),
# 1 goes to protected, filling it. eden is also over full with 2. probation is empty
(2, (b'012', 0, False, 1)),
# 3 fills eden, bumping 2 to probation. But probation is actually overfull now
# so we'd like to spill something if we could (but we can't.)
(3, (b'0', 0, False, 1)),
# 4 should never be added because it won't fit anywhere.
(4, (b'ee', 0, False, 1)),
])
def keys(x):
return [e.key for e in x]
self.assertEqual(keys(cache.protected), [3, 2])
self.assertEqual(keys(cache.probation), [])
self.assertEqual(keys(cache.eden), [4])
self.assertEqual(
[2, 3, 4],
[e.key for e in added_entries])
self.assertEqual(3, len(added_entries))
self.assertEqual(3, len(cache))
self.assertEqual(3, len(list(cache)))
class CFFICacheTests(TestCase):
"""
Tests that are specific to the CFFI implementation
of the cache.
These can use arbitrary keys and values.
"""
def _getClass(self):
return NoOverheadSizeCache
def _makeOne(self, limit, kind=None):
self.skipTest("Weights not supported")
kind = kind or self._getClass()
return kind(limit,
key_weight=self.key_weight,
value_weight=self.value_weight)
def key_weight(self, k):
return len(k)
def value_weight(self, v):
return len(v)
def test_free_reuse(self):
cache = self._makeOne(20)
lru = cache.protected
self.assertEqual(lru.limit, 16)
entrya = lru.add_MRU('a', b'')
entryb = lru.add_MRU('b', b'')
entryc = lru.add_MRU('c', b'1')
entryd = lru.add_MRU('d', b'1')
evicted = lru.update_MRU(entryb, b'1234567890')
self.assertEqual(evicted, ())
# Not changing the size is just a hit, it doesnt't
# evict anything.
evicted = lru.update_MRU(entryb, b'1234567890')
self.assertEqual(evicted, ())
evicted = lru.update_MRU(entryc, b'1234567890')
# a and d were evicted and placed on the freelist
self.assertEqual(entrya.key, None)
self.assertEqual(entrya.value, None)
self.assertEqual(entryd.key, None)
self.assertEqual(entryd.key, None)
self.assertEqual(evicted,
[('a', b''),
('d', b'1')])
self.assertEqual(2, len(lru.node_free_list))
lru.add_MRU('c', b'1')
self.assertEqual(1, len(lru.node_free_list))
| 32.579515
| 96
| 0.587822
|
[
(1, (b'abc', 0, False, 1)),
(2, (b'12345678901234567890' * 20, 0, False, 1)),
(3, (b'bcd', 0, False, 1)),
(4, (b'cde', 0, False, 1)),
(5, (b'dehi', 0, False, 1)),
(6, (b'edghijkl', 0, False, 1)),
])
self.assertGreaterEqual(len(cache), 3)
self.assertLessEqual(len(cache), 4)
self.assertIn(
[e.key for e in entries],
([1, 3, 4, 5], [3, 4, 5]))
return cache
class GenericGenerationalLRUCacheTests(GenericLRUCacheTests):
def test_implements(self):
cache = super(GenericGenerationalLRUCacheTests, self).test_implements()
assert_that(cache.eden,
validly_provides(interfaces.IGeneration))
assert_that(cache.protected,
validly_provides(interfaces.IGeneration))
assert_that(cache.probation,
validly_provides(interfaces.IGeneration))
def test_bad_generation_index_attribute_error(self):
cache = self._makeOne(20)
getattr(cache.generations[1], 'limit')
getattr(cache.generations[2], 'limit')
getattr(cache.generations[3], 'limit')
with self.assertRaisesRegex(AttributeError,
"Generation 0 has no attribute 'on_hit'"):
cache.generations[0].on_hit()
def test_add_MRUs_reject_sets_sentinel_values(self):
cache = self._getClass()(20)
base_size = cache.base_size
entry_size = cache.entry_size
cache = self._getClass()(20 + base_size + entry_size)
added_entries = cache.add_MRUs([
(1, (b'012345678901234' * 20, 0, False, 1)),
(2, (b'012', 0, False, 1)),
(3, (b'0', 0, False, 1)),
(4, (b'ee', 0, False, 1)),
])
def keys(x):
return [e.key for e in x]
self.assertEqual(keys(cache.protected), [3, 2])
self.assertEqual(keys(cache.probation), [])
self.assertEqual(keys(cache.eden), [4])
self.assertEqual(
[2, 3, 4],
[e.key for e in added_entries])
self.assertEqual(3, len(added_entries))
self.assertEqual(3, len(cache))
self.assertEqual(3, len(list(cache)))
class CFFICacheTests(TestCase):
def _getClass(self):
return NoOverheadSizeCache
def _makeOne(self, limit, kind=None):
self.skipTest("Weights not supported")
kind = kind or self._getClass()
return kind(limit,
key_weight=self.key_weight,
value_weight=self.value_weight)
def key_weight(self, k):
return len(k)
def value_weight(self, v):
return len(v)
def test_free_reuse(self):
cache = self._makeOne(20)
lru = cache.protected
self.assertEqual(lru.limit, 16)
entrya = lru.add_MRU('a', b'')
entryb = lru.add_MRU('b', b'')
entryc = lru.add_MRU('c', b'1')
entryd = lru.add_MRU('d', b'1')
evicted = lru.update_MRU(entryb, b'1234567890')
self.assertEqual(evicted, ())
# Not changing the size is just a hit, it doesnt't
evicted = lru.update_MRU(entryb, b'1234567890')
self.assertEqual(evicted, ())
evicted = lru.update_MRU(entryc, b'1234567890')
self.assertEqual(entrya.key, None)
self.assertEqual(entrya.value, None)
self.assertEqual(entryd.key, None)
self.assertEqual(entryd.key, None)
self.assertEqual(evicted,
[('a', b''),
('d', b'1')])
self.assertEqual(2, len(lru.node_free_list))
lru.add_MRU('c', b'1')
self.assertEqual(1, len(lru.node_free_list))
| true
| true
|
1c48417f4536995b8d781890b1514b8e62adaaf0
| 607
|
py
|
Python
|
apps/static_pages/tests/test_urls.py
|
ilyukevich/tasks
|
ba0c8202cfe61d26975c35f388155d36e1c2b856
|
[
"MIT"
] | null | null | null |
apps/static_pages/tests/test_urls.py
|
ilyukevich/tasks
|
ba0c8202cfe61d26975c35f388155d36e1c2b856
|
[
"MIT"
] | null | null | null |
apps/static_pages/tests/test_urls.py
|
ilyukevich/tasks
|
ba0c8202cfe61d26975c35f388155d36e1c2b856
|
[
"MIT"
] | null | null | null |
from django.test import Client, TestCase
class StaticURLTests(TestCase):
def setUp(self):
self.guest_client = Client()
def test_about_url_exists_at_desired_location(self):
"""Проверка доступности адреса /page/about/."""
response = self.guest_client.get('/page/about/')
self.assertEqual(response.status_code, 200)
def test_about_url_uses_correct_template(self):
"""Проверка шаблона для адреса /page/about/."""
response = self.guest_client.get('/page/about/')
self.assertTemplateUsed(response, 'static_pages/about.html')
| 35.705882
| 69
| 0.680395
|
from django.test import Client, TestCase
class StaticURLTests(TestCase):
def setUp(self):
self.guest_client = Client()
def test_about_url_exists_at_desired_location(self):
response = self.guest_client.get('/page/about/')
self.assertEqual(response.status_code, 200)
def test_about_url_uses_correct_template(self):
response = self.guest_client.get('/page/about/')
self.assertTemplateUsed(response, 'static_pages/about.html')
| true
| true
|
1c4841829620980ad574c246291fe78ff2d81173
| 5,779
|
py
|
Python
|
projects/mammography_project/integrate_final_result.py
|
lanhsincheng/detectron2
|
45ec85c3bde2a39ed4e870b76442021e8da26ede
|
[
"Apache-2.0"
] | null | null | null |
projects/mammography_project/integrate_final_result.py
|
lanhsincheng/detectron2
|
45ec85c3bde2a39ed4e870b76442021e8da26ede
|
[
"Apache-2.0"
] | null | null | null |
projects/mammography_project/integrate_final_result.py
|
lanhsincheng/detectron2
|
45ec85c3bde2a39ed4e870b76442021e8da26ede
|
[
"Apache-2.0"
] | null | null | null |
from detectron2.utils.visualizer import ColorMode
import cv2
import random
from detectron2.utils.visualizer import Visualizer
from projects.mammography_project.mammo_dataset import *
import operator
import xlsxwriter
import csv
wb_name = 'mammo0824_model_0059999'
def mammo_integrate(test_dirname, predictor, dataset_metadata, test_data_csv_path, output_dir):
dataset_dicts = get_mammo_dicts(test_dirname,test_data_csv_path)
answer_sheet_list = []
for_ensemble_confidence_list = []
score_class_list = []
big_list = []
for d in dataset_dicts:
im = cv2.imread(d["file_name"])
outputs = predictor(im)
num_instances = len(outputs['instances'])
scores_class_dict = {}
for_ensemble_dict = {}
per_class_dict = {}
for s in range(num_instances):
scores = outputs['instances']._fields['scores'].T[s].item()
pred_classes_num = outputs['instances']._fields['pred_classes'].T[s].item()
per_class_dict.update({pred_classes_num : scores})
# if pred_classes_num == 0 or pred_classes_num == 2 or pred_classes_num == 4 or pred_classes_num == 6:
# pred_classes = 'benign'
# elif pred_classes_num == 1 or pred_classes_num == 3 or pred_classes_num == 5 or pred_classes_num == 7:
# pred_classes = 'malignant'
if pred_classes_num == 0 :
pred_classes = 'benign'
elif pred_classes_num == 1 :
pred_classes = 'malignant'
# if pred_classes_num == 1:
# pred_classes = 'benign'
# elif pred_classes_num == 0:
# pred_classes = 'malignant'
scores_class_dict.update( {scores : pred_classes} )
if (pred_classes not in for_ensemble_dict) or (pred_classes in for_ensemble_dict and for_ensemble_dict[pred_classes] < scores):
for_ensemble_dict.update( {pred_classes : scores} )
if ('benign' not in for_ensemble_dict):
for_ensemble_dict.update({'benign': 0})
if ('malignant' not in for_ensemble_dict):
for_ensemble_dict.update({'malignant': 0})
per_class_dict = sorted(per_class_dict.items(), key= lambda per_class_dict: per_class_dict[0])
big_list.append(per_class_dict)
if ( not bool(scores_class_dict)==True ):
print(scores_class_dict)
scores_class_dict.update({0: 'benign', 0: 'malignant'})
print(scores_class_dict)
score_class_list.append(scores_class_dict)
final_for_ensemble = sorted(for_ensemble_dict.items(), key=lambda for_ensemble_dict: for_ensemble_dict[0])
final_class = max(scores_class_dict.items(), key=lambda scores_class_dict: scores_class_dict[0])[1]
answer_sheet_list.append(final_class)
for i in range(len(final_for_ensemble)):
for_ensemble_confidence_list.append(final_for_ensemble[i][1])
# load golden answer and evaluate accuracy
golden = []
golden_sheet = r'D:\Mammograph\golden/balance_golden_v1_852.csv'
with open(golden_sheet, newline='') as csvFile:
T = 0
F = 0
mm = 0
bb = 0
bm = 0
mb = 0
rows = csv.reader(csvFile)
for row in rows:
golden.append(row[0])
for i, j in zip(golden, answer_sheet_list):
if i == j:
if(i=='benign'):
bb += 1
else:
mm += 1
T += 1
else:
if (i == 'benign'):
bm += 1
else:
mb += 1
F += 1
accuracy = T / (T+F)
accuracy_malignant = (mm + bb)/(mm + bb + bm + mb)
accuracy_benign = (bb + mm)/(bb + mm + mb + bm)
print('T: ', T, ' F: ', F, ' accuracy: ',accuracy, 'malignant to benign : ', mb, 'benign to malignant : ', bm)
# write class and confidence per bounding box to the xlsfile(4 classes)
big_name = 'big_' + wb_name + '.xlsx'
workbook = xlsxwriter.Workbook(big_name)
worksheet = workbook.add_worksheet()
row = 0
column = 0
for list_ele in big_list:
for item in list_ele:
worksheet.write(row, column, item[0])
column += 1
worksheet.write(row, column, item[1])
column += 1
column = 0
row += 1
workbook.close()
# write class and confidence per bounding box to the xlsfile
dict_name = 'dict_' + wb_name + '.xlsx'
# workbook = xlsxwriter.Workbook(r'dict.xlsx')
workbook = xlsxwriter.Workbook(dict_name)
worksheet = workbook.add_worksheet()
row = 0
column = 0
for dict_ele in score_class_list:
item_list = []
for key, value in dict_ele.items():
item_list.append(key)
item_list.append(value)
for item in item_list:
worksheet.write(row, column, item)
column += 1
column = 0
row += 1
workbook.close()
# write final_class to the xlsfile
ans_name = 'ans_' + wb_name + '.xlsx'
workbook = xlsxwriter.Workbook(ans_name)
worksheet = workbook.add_worksheet()
row = 0
column = 0
# write down answer sheet
for item in answer_sheet_list:
# write operation perform
worksheet.write(row, column, item)
row += 1
workbook.close()
# write down confidence for every class
integrate_name = 'integrate_' + wb_name + '.csv'
with open(integrate_name, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# 2 items a row
for i in range(0, len(for_ensemble_confidence_list), 2): # step by threes.
writer.writerow(for_ensemble_confidence_list[i:i + 2])
return T, F, accuracy, mb, bm
| 38.526667
| 139
| 0.607372
|
from detectron2.utils.visualizer import ColorMode
import cv2
import random
from detectron2.utils.visualizer import Visualizer
from projects.mammography_project.mammo_dataset import *
import operator
import xlsxwriter
import csv
wb_name = 'mammo0824_model_0059999'
def mammo_integrate(test_dirname, predictor, dataset_metadata, test_data_csv_path, output_dir):
dataset_dicts = get_mammo_dicts(test_dirname,test_data_csv_path)
answer_sheet_list = []
for_ensemble_confidence_list = []
score_class_list = []
big_list = []
for d in dataset_dicts:
im = cv2.imread(d["file_name"])
outputs = predictor(im)
num_instances = len(outputs['instances'])
scores_class_dict = {}
for_ensemble_dict = {}
per_class_dict = {}
for s in range(num_instances):
scores = outputs['instances']._fields['scores'].T[s].item()
pred_classes_num = outputs['instances']._fields['pred_classes'].T[s].item()
per_class_dict.update({pred_classes_num : scores})
if pred_classes_num == 0 :
pred_classes = 'benign'
elif pred_classes_num == 1 :
pred_classes = 'malignant'
scores_class_dict.update( {scores : pred_classes} )
if (pred_classes not in for_ensemble_dict) or (pred_classes in for_ensemble_dict and for_ensemble_dict[pred_classes] < scores):
for_ensemble_dict.update( {pred_classes : scores} )
if ('benign' not in for_ensemble_dict):
for_ensemble_dict.update({'benign': 0})
if ('malignant' not in for_ensemble_dict):
for_ensemble_dict.update({'malignant': 0})
per_class_dict = sorted(per_class_dict.items(), key= lambda per_class_dict: per_class_dict[0])
big_list.append(per_class_dict)
if ( not bool(scores_class_dict)==True ):
print(scores_class_dict)
scores_class_dict.update({0: 'benign', 0: 'malignant'})
print(scores_class_dict)
score_class_list.append(scores_class_dict)
final_for_ensemble = sorted(for_ensemble_dict.items(), key=lambda for_ensemble_dict: for_ensemble_dict[0])
final_class = max(scores_class_dict.items(), key=lambda scores_class_dict: scores_class_dict[0])[1]
answer_sheet_list.append(final_class)
for i in range(len(final_for_ensemble)):
for_ensemble_confidence_list.append(final_for_ensemble[i][1])
golden = []
golden_sheet = r'D:\Mammograph\golden/balance_golden_v1_852.csv'
with open(golden_sheet, newline='') as csvFile:
T = 0
F = 0
mm = 0
bb = 0
bm = 0
mb = 0
rows = csv.reader(csvFile)
for row in rows:
golden.append(row[0])
for i, j in zip(golden, answer_sheet_list):
if i == j:
if(i=='benign'):
bb += 1
else:
mm += 1
T += 1
else:
if (i == 'benign'):
bm += 1
else:
mb += 1
F += 1
accuracy = T / (T+F)
accuracy_malignant = (mm + bb)/(mm + bb + bm + mb)
accuracy_benign = (bb + mm)/(bb + mm + mb + bm)
print('T: ', T, ' F: ', F, ' accuracy: ',accuracy, 'malignant to benign : ', mb, 'benign to malignant : ', bm)
big_name = 'big_' + wb_name + '.xlsx'
workbook = xlsxwriter.Workbook(big_name)
worksheet = workbook.add_worksheet()
row = 0
column = 0
for list_ele in big_list:
for item in list_ele:
worksheet.write(row, column, item[0])
column += 1
worksheet.write(row, column, item[1])
column += 1
column = 0
row += 1
workbook.close()
dict_name = 'dict_' + wb_name + '.xlsx'
workbook = xlsxwriter.Workbook(dict_name)
worksheet = workbook.add_worksheet()
row = 0
column = 0
for dict_ele in score_class_list:
item_list = []
for key, value in dict_ele.items():
item_list.append(key)
item_list.append(value)
for item in item_list:
worksheet.write(row, column, item)
column += 1
column = 0
row += 1
workbook.close()
ans_name = 'ans_' + wb_name + '.xlsx'
workbook = xlsxwriter.Workbook(ans_name)
worksheet = workbook.add_worksheet()
row = 0
column = 0
for item in answer_sheet_list:
worksheet.write(row, column, item)
row += 1
workbook.close()
integrate_name = 'integrate_' + wb_name + '.csv'
with open(integrate_name, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for i in range(0, len(for_ensemble_confidence_list), 2):
writer.writerow(for_ensemble_confidence_list[i:i + 2])
return T, F, accuracy, mb, bm
| true
| true
|
1c4842ccf80b14547a6aafc0838a19e7f6e672cc
| 4,340
|
py
|
Python
|
src/models/hg_3d.py
|
DNALuo/3Dposes
|
c5e2ed5fea612318d7715e239176571f593ccf83
|
[
"MIT"
] | null | null | null |
src/models/hg_3d.py
|
DNALuo/3Dposes
|
c5e2ed5fea612318d7715e239176571f593ccf83
|
[
"MIT"
] | null | null | null |
src/models/hg_3d.py
|
DNALuo/3Dposes
|
c5e2ed5fea612318d7715e239176571f593ccf83
|
[
"MIT"
] | null | null | null |
from .layers.Residual import Residual
import torch.nn as nn
import math
import ref
class Hourglass(nn.Module):
def __init__(self, n, nModules, nFeats):
super(Hourglass, self).__init__()
self.n = n
self.nModules = nModules
self.nFeats = nFeats
_up1_, _low1_, _low2_, _low3_ = [], [], [], []
for j in range(self.nModules):
_up1_.append(Residual(self.nFeats, self.nFeats))
self.low1 = nn.MaxPool2d(kernel_size = 2, stride = 2)
for j in range(self.nModules):
_low1_.append(Residual(self.nFeats, self.nFeats))
if self.n > 1:
self.low2 = Hourglass(n - 1, self.nModules, self.nFeats)
else:
for j in range(self.nModules):
_low2_.append(Residual(self.nFeats, self.nFeats))
self.low2_ = nn.ModuleList(_low2_)
for j in range(self.nModules):
_low3_.append(Residual(self.nFeats, self.nFeats))
self.up1_ = nn.ModuleList(_up1_)
self.low1_ = nn.ModuleList(_low1_)
self.low3_ = nn.ModuleList(_low3_)
self.up2 = nn.Upsample(scale_factor = 2)
def forward(self, x):
up1 = x
for j in range(self.nModules):
up1 = self.up1_[j](up1)
low1 = self.low1(x)
for j in range(self.nModules):
low1 = self.low1_[j](low1)
if self.n > 1:
low2 = self.low2(low1)
else:
low2 = low1
for j in range(self.nModules):
low2 = self.low2_[j](low2)
low3 = low2
for j in range(self.nModules):
low3 = self.low3_[j](low3)
up2 = self.up2(low3)
return up1 + up2
class HourglassNet3D(nn.Module):
def __init__(self, nStack, nModules, nFeats, nRegModules):
super(HourglassNet3D, self).__init__()
self.nStack = nStack
self.nModules = nModules
self.nFeats = nFeats
self.nRegModules = nRegModules
self.conv1_ = nn.Conv2d(3, 64, bias = True, kernel_size = 7, stride = 2, padding = 3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace = True)
self.r1 = Residual(64, 128)
self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.r4 = Residual(128, 128)
self.r5 = Residual(128, self.nFeats)
_hourglass, _Residual, _lin_, _tmpOut, _ll_, _tmpOut_, _reg_ = [], [], [], [], [], [], []
for i in range(self.nStack):
_hourglass.append(Hourglass(4, self.nModules, self.nFeats))
for j in range(self.nModules):
_Residual.append(Residual(self.nFeats, self.nFeats))
lin = nn.Sequential(nn.Conv2d(self.nFeats, self.nFeats, bias = True, kernel_size = 1, stride = 1),
nn.BatchNorm2d(self.nFeats), self.relu)
_lin_.append(lin)
_tmpOut.append(nn.Conv2d(self.nFeats, ref.nJoints, bias = True, kernel_size = 1, stride = 1))
_ll_.append(nn.Conv2d(self.nFeats, self.nFeats, bias = True, kernel_size = 1, stride = 1))
_tmpOut_.append(nn.Conv2d(ref.nJoints, self.nFeats, bias = True, kernel_size = 1, stride = 1))
for i in range(4):
for j in range(self.nRegModules):
_reg_.append(Residual(self.nFeats, self.nFeats))
self.hourglass = nn.ModuleList(_hourglass)
self.Residual = nn.ModuleList(_Residual)
self.lin_ = nn.ModuleList(_lin_)
self.tmpOut = nn.ModuleList(_tmpOut)
self.ll_ = nn.ModuleList(_ll_)
self.tmpOut_ = nn.ModuleList(_tmpOut_)
self.reg_ = nn.ModuleList(_reg_)
self.reg = nn.Linear(4 * 4 * self.nFeats, ref.nJoints)
def forward(self, x):
x = self.conv1_(x)
x = self.bn1(x)
x = self.relu(x)
x = self.r1(x)
x = self.maxpool(x)
x = self.r4(x)
x = self.r5(x)
out = []
for i in range(self.nStack):
hg = self.hourglass[i](x)
ll = hg
for j in range(self.nModules):
ll = self.Residual[i * self.nModules + j](ll)
ll = self.lin_[i](ll)
tmpOut = self.tmpOut[i](ll)
out.append(tmpOut)
ll_ = self.ll_[i](ll)
tmpOut_ = self.tmpOut_[i](tmpOut)
x = x + ll_ + tmpOut_
for i in range(4):
for j in range(self.nRegModules):
x = self.reg_[i * self.nRegModules + j](x)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
reg = self.reg(x)
out.append(reg)
return out
| 32.148148
| 106
| 0.589862
|
from .layers.Residual import Residual
import torch.nn as nn
import math
import ref
class Hourglass(nn.Module):
def __init__(self, n, nModules, nFeats):
super(Hourglass, self).__init__()
self.n = n
self.nModules = nModules
self.nFeats = nFeats
_up1_, _low1_, _low2_, _low3_ = [], [], [], []
for j in range(self.nModules):
_up1_.append(Residual(self.nFeats, self.nFeats))
self.low1 = nn.MaxPool2d(kernel_size = 2, stride = 2)
for j in range(self.nModules):
_low1_.append(Residual(self.nFeats, self.nFeats))
if self.n > 1:
self.low2 = Hourglass(n - 1, self.nModules, self.nFeats)
else:
for j in range(self.nModules):
_low2_.append(Residual(self.nFeats, self.nFeats))
self.low2_ = nn.ModuleList(_low2_)
for j in range(self.nModules):
_low3_.append(Residual(self.nFeats, self.nFeats))
self.up1_ = nn.ModuleList(_up1_)
self.low1_ = nn.ModuleList(_low1_)
self.low3_ = nn.ModuleList(_low3_)
self.up2 = nn.Upsample(scale_factor = 2)
def forward(self, x):
up1 = x
for j in range(self.nModules):
up1 = self.up1_[j](up1)
low1 = self.low1(x)
for j in range(self.nModules):
low1 = self.low1_[j](low1)
if self.n > 1:
low2 = self.low2(low1)
else:
low2 = low1
for j in range(self.nModules):
low2 = self.low2_[j](low2)
low3 = low2
for j in range(self.nModules):
low3 = self.low3_[j](low3)
up2 = self.up2(low3)
return up1 + up2
class HourglassNet3D(nn.Module):
def __init__(self, nStack, nModules, nFeats, nRegModules):
super(HourglassNet3D, self).__init__()
self.nStack = nStack
self.nModules = nModules
self.nFeats = nFeats
self.nRegModules = nRegModules
self.conv1_ = nn.Conv2d(3, 64, bias = True, kernel_size = 7, stride = 2, padding = 3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace = True)
self.r1 = Residual(64, 128)
self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.r4 = Residual(128, 128)
self.r5 = Residual(128, self.nFeats)
_hourglass, _Residual, _lin_, _tmpOut, _ll_, _tmpOut_, _reg_ = [], [], [], [], [], [], []
for i in range(self.nStack):
_hourglass.append(Hourglass(4, self.nModules, self.nFeats))
for j in range(self.nModules):
_Residual.append(Residual(self.nFeats, self.nFeats))
lin = nn.Sequential(nn.Conv2d(self.nFeats, self.nFeats, bias = True, kernel_size = 1, stride = 1),
nn.BatchNorm2d(self.nFeats), self.relu)
_lin_.append(lin)
_tmpOut.append(nn.Conv2d(self.nFeats, ref.nJoints, bias = True, kernel_size = 1, stride = 1))
_ll_.append(nn.Conv2d(self.nFeats, self.nFeats, bias = True, kernel_size = 1, stride = 1))
_tmpOut_.append(nn.Conv2d(ref.nJoints, self.nFeats, bias = True, kernel_size = 1, stride = 1))
for i in range(4):
for j in range(self.nRegModules):
_reg_.append(Residual(self.nFeats, self.nFeats))
self.hourglass = nn.ModuleList(_hourglass)
self.Residual = nn.ModuleList(_Residual)
self.lin_ = nn.ModuleList(_lin_)
self.tmpOut = nn.ModuleList(_tmpOut)
self.ll_ = nn.ModuleList(_ll_)
self.tmpOut_ = nn.ModuleList(_tmpOut_)
self.reg_ = nn.ModuleList(_reg_)
self.reg = nn.Linear(4 * 4 * self.nFeats, ref.nJoints)
def forward(self, x):
x = self.conv1_(x)
x = self.bn1(x)
x = self.relu(x)
x = self.r1(x)
x = self.maxpool(x)
x = self.r4(x)
x = self.r5(x)
out = []
for i in range(self.nStack):
hg = self.hourglass[i](x)
ll = hg
for j in range(self.nModules):
ll = self.Residual[i * self.nModules + j](ll)
ll = self.lin_[i](ll)
tmpOut = self.tmpOut[i](ll)
out.append(tmpOut)
ll_ = self.ll_[i](ll)
tmpOut_ = self.tmpOut_[i](tmpOut)
x = x + ll_ + tmpOut_
for i in range(4):
for j in range(self.nRegModules):
x = self.reg_[i * self.nRegModules + j](x)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
reg = self.reg(x)
out.append(reg)
return out
| true
| true
|
1c484385a54e0922af6c19242e523fb1932ef401
| 698
|
py
|
Python
|
src/sadie/airr/airrtable/airrseries.py
|
jwillis0720/pybody
|
2d7c68650ac1ef5f3003ccb67171898eac1f63eb
|
[
"MIT"
] | null | null | null |
src/sadie/airr/airrtable/airrseries.py
|
jwillis0720/pybody
|
2d7c68650ac1ef5f3003ccb67171898eac1f63eb
|
[
"MIT"
] | null | null | null |
src/sadie/airr/airrtable/airrseries.py
|
jwillis0720/pybody
|
2d7c68650ac1ef5f3003ccb67171898eac1f63eb
|
[
"MIT"
] | null | null | null |
from typing import Any
import pandas as pd
from sadie.airr.models import AirrSeriesModel
class AirrSeries(pd.Series):
_metadata = ["meta"] # add custom namespaces here
def __init__(self, data: Any, copy: bool = False):
super(AirrSeries, self).__init__(data=data, copy=copy)
if not isinstance(data, pd.core.internals.managers.SingleBlockManager):
if isinstance(data, pd.core.series.Series):
self._verify()
@property
def _constructor(self) -> "AirrSeries":
return AirrSeries # type: ignore[return-value]
def _verify(self) -> None:
data = AirrSeriesModel(**self).dict() # type: ignore
self.update(data)
| 29.083333
| 79
| 0.659026
|
from typing import Any
import pandas as pd
from sadie.airr.models import AirrSeriesModel
class AirrSeries(pd.Series):
_metadata = ["meta"]
def __init__(self, data: Any, copy: bool = False):
super(AirrSeries, self).__init__(data=data, copy=copy)
if not isinstance(data, pd.core.internals.managers.SingleBlockManager):
if isinstance(data, pd.core.series.Series):
self._verify()
@property
def _constructor(self) -> "AirrSeries":
return AirrSeries
def _verify(self) -> None:
data = AirrSeriesModel(**self).dict()
self.update(data)
| true
| true
|
1c48461a6c4205aa58ec966c45311939425186de
| 11,551
|
py
|
Python
|
my_app/blog/models.py
|
Faisal-Sey/official1
|
49af7a9fd60c980bd5d4ef7075a4c1f27ecc9642
|
[
"MIT"
] | 1
|
2021-06-19T00:17:02.000Z
|
2021-06-19T00:17:02.000Z
|
my_app/blog/models.py
|
Faisal-Sey/official1
|
49af7a9fd60c980bd5d4ef7075a4c1f27ecc9642
|
[
"MIT"
] | null | null | null |
my_app/blog/models.py
|
Faisal-Sey/official1
|
49af7a9fd60c980bd5d4ef7075a4c1f27ecc9642
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.shortcuts import reverse, get_object_or_404
from django.conf import settings
from django_countries.fields import CountryField
# Create your models here.
class UserDb(models.Model):
Name = models.CharField(max_length=30)
Email = models.EmailField(max_length=200)
Message = models.CharField(max_length=500)
def __str__(self):
return '{}'.format(self.Message)
class item(models.Model):
items_name = models.CharField(max_length=300)
price = models.IntegerField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
class LatestProduct(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class LatestProductOne(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class TopProduct(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class TopProductOne(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class ReviewProduct(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class ReviewProductOne(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
PRODUCT_CHOICE = {
'Jordan': 'Jordan',
'nike': 'Nike',
'man': 'Man',
}
class ShopMain(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
class Shoes(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Shoes'
class Watches(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Watches'
class Slippers(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Slippers'
class Shorts(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Shorts'
class Pants(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Pants'
class Shirts(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Shirts'
class OrderItem(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
ordered = models.BooleanField(default=False)
item = models.ForeignKey(Shoes, on_delete=models.CASCADE)
quantity = models.FloatField(default=1)
objects = models.Manager()
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_item_price(self):
return self.quantity * self.item.price
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
items = models.ManyToManyField(OrderItem)
start_date = models.DateTimeField(auto_now=True)
ordered_date = models.DateTimeField()
ordered = models.BooleanField(default=False)
objects = models.Manager()
billing_address = models.ForeignKey('BillingAddress', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.user.username
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_total_item_price()
return total
def get_sub_total(self):
subtotal = 0
for order_item in self.items.all():
subtotal += order_item.item.price
return subtotal
class BillingAddress(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
First_name = models.CharField(max_length=100)
Last_name = models.CharField(max_length=100)
Email = models.EmailField()
Country = CountryField(multiple=True)
street_address = models.CharField(max_length=100)
Apartment_address = models.CharField(max_length=100)
Town_or_City = models.CharField(max_length=100)
Zip = models.CharField(max_length=100)
Phone = models.IntegerField()
def __str__(self):
return self.user.username
class Post(models.Model):
title = models.TextField(max_length=800)
slug = models.SlugField(blank=True)
class PostForm(models.Model):
title = models.TextField(max_length=800)
answers = models.TextField(max_length=800)
slug = models.SlugField(blank=True)
| 27.372038
| 107
| 0.65293
|
from django.db import models
from django.shortcuts import reverse, get_object_or_404
from django.conf import settings
from django_countries.fields import CountryField
class UserDb(models.Model):
Name = models.CharField(max_length=30)
Email = models.EmailField(max_length=200)
Message = models.CharField(max_length=500)
def __str__(self):
return '{}'.format(self.Message)
class item(models.Model):
items_name = models.CharField(max_length=300)
price = models.IntegerField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
class LatestProduct(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class LatestProductOne(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class TopProduct(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class TopProductOne(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class ReviewProduct(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
class ReviewProductOne(models.Model):
items_name = models.CharField(max_length=300)
price = models.FloatField()
Description = models.TextField(max_length=600, blank=True)
Image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.items_name)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
PRODUCT_CHOICE = {
'Jordan': 'Jordan',
'nike': 'Nike',
'man': 'Man',
}
class ShopMain(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
class Shoes(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Shoes'
class Watches(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Watches'
class Slippers(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Slippers'
class Shorts(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Shorts'
class Pants(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Pants'
class Shirts(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
image = models.ImageField()
slug = models.SlugField()
description = models.TextField(max_length=3000)
image1 = models.ImageField(blank=True)
image2 = models.ImageField(blank=True)
image3 = models.ImageField(blank=True)
image4 = models.ImageField(blank=True)
objects = models.Manager()
def __str__(self):
return '{}'.format(self.title)
def get_absolute_url(self):
return reverse("shop-details", kwargs={
'slug': self.slug
})
def get_add_to_cart(self):
return reverse("add_to_cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart(self):
return reverse("remove_from_cart", kwargs={
'slug': self.slug
})
class Meta:
verbose_name_plural = 'Shirts'
class OrderItem(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
ordered = models.BooleanField(default=False)
item = models.ForeignKey(Shoes, on_delete=models.CASCADE)
quantity = models.FloatField(default=1)
objects = models.Manager()
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_item_price(self):
return self.quantity * self.item.price
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
items = models.ManyToManyField(OrderItem)
start_date = models.DateTimeField(auto_now=True)
ordered_date = models.DateTimeField()
ordered = models.BooleanField(default=False)
objects = models.Manager()
billing_address = models.ForeignKey('BillingAddress', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.user.username
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_total_item_price()
return total
def get_sub_total(self):
subtotal = 0
for order_item in self.items.all():
subtotal += order_item.item.price
return subtotal
class BillingAddress(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
First_name = models.CharField(max_length=100)
Last_name = models.CharField(max_length=100)
Email = models.EmailField()
Country = CountryField(multiple=True)
street_address = models.CharField(max_length=100)
Apartment_address = models.CharField(max_length=100)
Town_or_City = models.CharField(max_length=100)
Zip = models.CharField(max_length=100)
Phone = models.IntegerField()
def __str__(self):
return self.user.username
class Post(models.Model):
title = models.TextField(max_length=800)
slug = models.SlugField(blank=True)
class PostForm(models.Model):
title = models.TextField(max_length=800)
answers = models.TextField(max_length=800)
slug = models.SlugField(blank=True)
| true
| true
|
1c4846dac0af0ccb197b9ac571341196be451a99
| 619
|
py
|
Python
|
molecule/tang/tests/test_creation.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 3
|
2020-04-14T19:57:25.000Z
|
2021-01-11T09:09:16.000Z
|
molecule/tang/tests/test_creation.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 4
|
2020-08-12T10:24:25.000Z
|
2022-01-17T17:48:28.000Z
|
molecule/tang/tests/test_creation.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 2
|
2021-06-17T21:57:42.000Z
|
2022-02-20T08:02:43.000Z
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_crypto_devices(host):
f = host.file('/dev/mapper/cryptotest')
assert f.exists
def test_key_files_exist(host):
f = host.file('/etc/luks-keys/dev-vdb')
assert not f.exists
@pytest.mark.parametrize('file, content', [
("/etc/crypttab", "cryptotest /dev/vdb none _netdev"),
])
def test_crypttab(host, file, content):
file = host.file(file)
assert file.exists
assert file.contains(content)
| 22.925926
| 63
| 0.723748
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_crypto_devices(host):
f = host.file('/dev/mapper/cryptotest')
assert f.exists
def test_key_files_exist(host):
f = host.file('/etc/luks-keys/dev-vdb')
assert not f.exists
@pytest.mark.parametrize('file, content', [
("/etc/crypttab", "cryptotest /dev/vdb none _netdev"),
])
def test_crypttab(host, file, content):
file = host.file(file)
assert file.exists
assert file.contains(content)
| true
| true
|
1c48470b6af2b5dacfff0ceab70a8a5b3cef97d3
| 23,230
|
py
|
Python
|
tools/imagenet-tfrecords-builder/build_dataset.py
|
isabella232/heldout-influence-estimation
|
634527bf7ca6630e6fe66867347747e2e04bc780
|
[
"Apache-2.0"
] | 43
|
2020-09-11T23:40:16.000Z
|
2022-03-10T02:14:32.000Z
|
tools/imagenet-tfrecords-builder/build_dataset.py
|
google-research/heldout-influence-estimation
|
634527bf7ca6630e6fe66867347747e2e04bc780
|
[
"Apache-2.0"
] | 1
|
2022-01-16T13:01:16.000Z
|
2022-01-16T13:01:16.000Z
|
tools/imagenet-tfrecords-builder/build_dataset.py
|
isabella232/heldout-influence-estimation
|
634527bf7ca6630e6fe66867347747e2e04bc780
|
[
"Apache-2.0"
] | 5
|
2020-11-16T10:34:08.000Z
|
2022-03-20T04:42:39.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Adapted from https://github.com/kmonachopoulos/ImageNet-to-TFrecord
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import absl
absl.flags.DEFINE_string('train_directory', None,
'Training data directory')
absl.flags.DEFINE_string('validation_directory', None,
'Validation data directory')
absl.flags.DEFINE_string('output_directory', None,
'Output data directory')
absl.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
absl.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
absl.flags.DEFINE_integer('num_threads', 1,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
absl.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
absl.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# ImageNet Index used in https://pluskid.github.io/influence-memorization/
absl.flags.DEFINE_string('imagenet_index_file',
'imagenet_index.npz',
'ImageNet Example Index.')
FLAGS = absl.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, index, synset, human,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
index: integer, example index
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'index': _int64_feature(index),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(bytes(synset,'utf-8')),
'image/class/text': _bytes_feature(bytes(human,'utf-8')),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(bytes(os.path.basename(filename),'utf-8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
# def __init__(self):
# # Create a single Session to run all image coding calls.
# self._sess = tf.Session()
# # Initializes function that converts PNG to JPEG data.
# self._png_data = tf.placeholder(dtype=tf.string)
# image = tf.image.decode_png(self._png_data, channels=3)
# self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# # Initializes function that converts CMYK JPEG data to RGB JPEG data.
# self._cmyk_data = tf.placeholder(dtype=tf.string)
# image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
# self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# # Initializes function that decodes RGB JPEG data.
# self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
# self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
image = tf.image.decode_png(image_data, channels=3)
return tf.image.encode_jpeg(image, format='rgb', quality=100).numpy()
# return self._sess.run(self._png_to_jpeg,
# feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
image = tf.image.decode_jpeg(image_data, channels=0)
return tf.image.encode_jpeg(image, format='rgb', quality=100).numpy()
# return self._sess.run(self._cmyk_to_rgb,
# feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = tf.image.decode_jpeg(image_data, channels=3).numpy()
# image = self._sess.run(self._decode_jpeg,
# feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.io.gfile.GFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, idxs, humans, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
idxs: list of integer; each integre identifies the example index
humans: list of strings; each string is a human-readable label
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) # HERE
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
idx = idxs[i]
synset = synsets[i]
human = humans[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label, idx, synset, human, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, idxs, humans, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
idxs: list of integer; each integer identifies the index of the example
humans: list of strings; each string is a human-readable label
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(idxs)
assert len(filenames) == len(humans)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, idxs, humans, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file, index_lookup):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
index_lookup: a dict maps from filename to (index, label) pair.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.io.gfile.GFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
idxs = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.io.gfile.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(list(range(len(shuffled_index))))
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
lookup_results = [index_lookup[os.path.basename(fn)] for fn in filenames]
for i in range(len(lookup_results)):
# +1 because the exported index file use 0-999 labels instead of 1-1001
assert labels[i] == lookup_results[i][1] + 1
idxs = [x[0] for x in lookup_results]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels, idxs
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _process_dataset(name, directory, num_shards, synset_to_human, index_lookup):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
index_lookup: dict of filename to (index, label) pair.
"""
filenames, synsets, labels, idxs = _find_image_files(
directory, FLAGS.labels_file, index_lookup)
humans = _find_human_readable_labels(synsets, synset_to_human)
_process_image_files(name, filenames, synsets, labels, idxs,
humans, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.io.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_index_lookup(imagenet_index_file):
"""Build lookup table from filename to example index."""
index_data = np.load(imagenet_index_file, allow_pickle=True)
lookups = {}
for split, name in [('tr', 'train'), ('tt', 'validation')]:
lookups[name] = {}
for idx, (fn, label) in enumerate(zip(index_data['{}_filenames'.format(split)],
index_data['{}_labels'.format(split)])):
lookups[name][fn.decode('utf-8')] = (idx, label)
return lookups
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
index_lookups = _build_index_lookup(FLAGS.imagenet_index_file)
if(FLAGS.train_directory != None):
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, index_lookups['train'])
if(FLAGS.validation_directory != None):
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, index_lookups['validation'])
if __name__ == '__main__':
absl.app.run(main)
| 39.845626
| 101
| 0.69957
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange
import tensorflow as tf
import absl
absl.flags.DEFINE_string('train_directory', None,
'Training data directory')
absl.flags.DEFINE_string('validation_directory', None,
'Validation data directory')
absl.flags.DEFINE_string('output_directory', None,
'Output data directory')
absl.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
absl.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
absl.flags.DEFINE_integer('num_threads', 1,
'Number of threads to preprocess the images.')
absl.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
absl.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
absl.flags.DEFINE_string('imagenet_index_file',
'imagenet_index.npz',
'ImageNet Example Index.')
FLAGS = absl.flags.FLAGS
def _int64_feature(value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, index, synset, human,
height, width):
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'index': _int64_feature(index),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(bytes(synset,'utf-8')),
'image/class/text': _bytes_feature(bytes(human,'utf-8')),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(bytes(os.path.basename(filename),'utf-8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
()
def cmyk_to_rgb(self, image_data):
image = tf.image.decode_jpeg(image_data, channels=0)
return tf.image.encode_jpeg(image, format='rgb', quality=100).numpy()
def decode_jpeg(self, image_data):
image = tf.image.decode_jpeg(image_data, channels=3).numpy()
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
JPEG' in filename
def _is_cmyk(filename):
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
image_data = tf.io.gfile.GFile(filename, 'rb').read()
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
image = coder.decode_jpeg(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, idxs, humans, num_shards):
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
idx = idxs[i]
synset = synsets[i]
human = humans[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label, idx, synset, human, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, idxs, humans, num_shards):
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(idxs)
assert len(filenames) == len(humans)
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, idxs, humans, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file, index_lookup):
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.io.gfile.GFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
idxs = []
label_index = 1
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.io.gfile.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(list(range(len(shuffled_index))))
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
lookup_results = [index_lookup[os.path.basename(fn)] for fn in filenames]
for i in range(len(lookup_results)):
assert labels[i] == lookup_results[i][1] + 1
idxs = [x[0] for x in lookup_results]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels, idxs
def _find_human_readable_labels(synsets, synset_to_human):
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _process_dataset(name, directory, num_shards, synset_to_human, index_lookup):
filenames, synsets, labels, idxs = _find_image_files(
directory, FLAGS.labels_file, index_lookup)
humans = _find_human_readable_labels(synsets, synset_to_human)
_process_image_files(name, filenames, synsets, labels, idxs,
humans, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
lines = tf.io.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_index_lookup(imagenet_index_file):
index_data = np.load(imagenet_index_file, allow_pickle=True)
lookups = {}
for split, name in [('tr', 'train'), ('tt', 'validation')]:
lookups[name] = {}
for idx, (fn, label) in enumerate(zip(index_data['{}_filenames'.format(split)],
index_data['{}_labels'.format(split)])):
lookups[name][fn.decode('utf-8')] = (idx, label)
return lookups
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
index_lookups = _build_index_lookup(FLAGS.imagenet_index_file)
if(FLAGS.train_directory != None):
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, index_lookups['train'])
if(FLAGS.validation_directory != None):
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, index_lookups['validation'])
if __name__ == '__main__':
absl.app.run(main)
| true
| true
|
1c484798429c47a0c48e129b17f74d46202aa650
| 625
|
py
|
Python
|
ssr-top/ssr_monitor.py
|
BooAA/SSR
|
6f976dc30a975544cd111806ed6ffc5a760d2836
|
[
"BSD-3-Clause"
] | 1
|
2021-10-03T11:56:32.000Z
|
2021-10-03T11:56:32.000Z
|
ssr-top/ssr_monitor.py
|
BooAA/SSR
|
6f976dc30a975544cd111806ed6ffc5a760d2836
|
[
"BSD-3-Clause"
] | null | null | null |
ssr-top/ssr_monitor.py
|
BooAA/SSR
|
6f976dc30a975544cd111806ed6ffc5a760d2836
|
[
"BSD-3-Clause"
] | null | null | null |
import os
class ssr_monitor:
ssr_path = ""
def __init__(self, path = '/sys/kernel/rdma_rxe') -> None:
self.ssr_path = path
def get_qp_list(self) -> list:
return os.listdir(self.ssr_path)
def get_qp_counters(self, qpn) -> dict:
qp_dir_path = os.path.join(self.ssr_path, str(qpn))
ret = {}
try:
for counter in os.listdir(qp_dir_path):
with open(os.path.join(qp_dir_path, counter)) as file:
for line in file:
ret[counter] = int(line)
except:
pass
return ret
| 25
| 70
| 0.5344
|
import os
class ssr_monitor:
ssr_path = ""
def __init__(self, path = '/sys/kernel/rdma_rxe') -> None:
self.ssr_path = path
def get_qp_list(self) -> list:
return os.listdir(self.ssr_path)
def get_qp_counters(self, qpn) -> dict:
qp_dir_path = os.path.join(self.ssr_path, str(qpn))
ret = {}
try:
for counter in os.listdir(qp_dir_path):
with open(os.path.join(qp_dir_path, counter)) as file:
for line in file:
ret[counter] = int(line)
except:
pass
return ret
| true
| true
|
1c484836cd45e3a21df1132cbfa29ac7fe759213
| 16,530
|
py
|
Python
|
intersight/model/appliance_data_export_policy.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/appliance_data_export_policy.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/appliance_data_export_policy.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.appliance_data_export_policy_all_of import ApplianceDataExportPolicyAllOf
from intersight.model.appliance_data_export_policy_relationship import ApplianceDataExportPolicyRelationship
from intersight.model.display_names import DisplayNames
from intersight.model.iam_account_relationship import IamAccountRelationship
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['ApplianceDataExportPolicyAllOf'] = ApplianceDataExportPolicyAllOf
globals()['ApplianceDataExportPolicyRelationship'] = ApplianceDataExportPolicyRelationship
globals()['DisplayNames'] = DisplayNames
globals()['IamAccountRelationship'] = IamAccountRelationship
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class ApplianceDataExportPolicy(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'APPLIANCE.DATAEXPORTPOLICY': "appliance.DataExportPolicy",
},
('object_type',): {
'APPLIANCE.DATAEXPORTPOLICY': "appliance.DataExportPolicy",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'enable': (bool,), # noqa: E501
'name': (str,), # noqa: E501
'account': (IamAccountRelationship,), # noqa: E501
'parent_config': (ApplianceDataExportPolicyRelationship,), # noqa: E501
'sub_configs': ([ApplianceDataExportPolicyRelationship], none_type,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'enable': 'Enable', # noqa: E501
'name': 'Name', # noqa: E501
'account': 'Account', # noqa: E501
'parent_config': 'ParentConfig', # noqa: E501
'sub_configs': 'SubConfigs', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ApplianceDataExportPolicy - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "appliance.DataExportPolicy", must be one of ["appliance.DataExportPolicy", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "appliance.DataExportPolicy", must be one of ["appliance.DataExportPolicy", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
enable (bool): Status of the data collection mode. If the value is 'true', then data collection is enabled.. [optional] # noqa: E501
name (str): Name of the Data Export Policy.. [optional] # noqa: E501
account (IamAccountRelationship): [optional] # noqa: E501
parent_config (ApplianceDataExportPolicyRelationship): [optional] # noqa: E501
sub_configs ([ApplianceDataExportPolicyRelationship], none_type): An array of relationships to applianceDataExportPolicy resources.. [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "appliance.DataExportPolicy")
object_type = kwargs.get('object_type', "appliance.DataExportPolicy")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ApplianceDataExportPolicyAllOf,
MoBaseMo,
],
'oneOf': [
],
}
| 53.495146
| 1,678
| 0.642105
|
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.appliance_data_export_policy_all_of import ApplianceDataExportPolicyAllOf
from intersight.model.appliance_data_export_policy_relationship import ApplianceDataExportPolicyRelationship
from intersight.model.display_names import DisplayNames
from intersight.model.iam_account_relationship import IamAccountRelationship
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['ApplianceDataExportPolicyAllOf'] = ApplianceDataExportPolicyAllOf
globals()['ApplianceDataExportPolicyRelationship'] = ApplianceDataExportPolicyRelationship
globals()['DisplayNames'] = DisplayNames
globals()['IamAccountRelationship'] = IamAccountRelationship
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class ApplianceDataExportPolicy(ModelComposed):
allowed_values = {
('class_id',): {
'APPLIANCE.DATAEXPORTPOLICY': "appliance.DataExportPolicy",
},
('object_type',): {
'APPLIANCE.DATAEXPORTPOLICY': "appliance.DataExportPolicy",
},
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'class_id': (str,),
'object_type': (str,),
'enable': (bool,),
'name': (str,),
'account': (IamAccountRelationship,),
'parent_config': (ApplianceDataExportPolicyRelationship,),
'sub_configs': ([ApplianceDataExportPolicyRelationship], none_type,),
'account_moid': (str,),
'create_time': (datetime,),
'domain_group_moid': (str,),
'mod_time': (datetime,),
'moid': (str,),
'owners': ([str], none_type,),
'shared_scope': (str,),
'tags': ([MoTag], none_type,),
'version_context': (MoVersionContext,),
'ancestors': ([MoBaseMoRelationship], none_type,),
'parent': (MoBaseMoRelationship,),
'permission_resources': ([MoBaseMoRelationship], none_type,),
'display_names': (DisplayNames,),
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
'enable': 'Enable',
'name': 'Name',
'account': 'Account',
'parent_config': 'ParentConfig',
'sub_configs': 'SubConfigs',
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'display_names': 'DisplayNames',
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
class_id = kwargs.get('class_id', "appliance.DataExportPolicy")
object_type = kwargs.get('object_type', "appliance.DataExportPolicy")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ApplianceDataExportPolicyAllOf,
MoBaseMo,
],
'oneOf': [
],
}
| true
| true
|
1c4848c1f29b4b4d5d33da873e06fe8c0aa82152
| 2,834
|
py
|
Python
|
src/python/pants/backend/codegen/wire/java/java_wire_library.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:24.000Z
|
2021-11-11T14:04:24.000Z
|
src/python/pants/backend/codegen/wire/java/java_wire_library.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/wire/java/java_wire_library.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:12.000Z
|
2021-11-11T14:04:12.000Z
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.base.validation import assert_list
logger = logging.getLogger(__name__)
class JavaWireLibrary(ExportableJvmLibrary):
"""A Java library generated from Wire IDL files.
Supports Wire 1.x only.
For an example Wire 2.x interface that generates service stubs see:
https://github.com/ericzundel/mvn2pants/tree/master/src/python/squarepants/plugins/sake_wire_codegen
But note this requires you to write a custom wire code generator with a command line interface.
:API: public
"""
def __init__(self,
payload=None,
service_writer=None,
service_writer_options=None,
roots=None,
registry_class=None,
enum_options=None,
no_options=None,
**kwargs):
"""
:param string service_writer: the name of the class to pass as the --service_writer option to
the Wire compiler (For wire 1.0 only)
:param list service_writer_options: A list of options to pass to the service writer (For
wire 1.x only)
:param list roots: passed through to the --roots option of the Wire compiler
:param string registry_class: fully qualified class name of RegistryClass to create. If in
doubt, specify com.squareup.wire.SimpleServiceWriter
:param list enum_options: list of enums to pass to as the --enum-enum_options option, # optional
:param boolean no_options: boolean that determines if --no_options flag is passed
"""
if not service_writer and service_writer_options:
raise TargetDefinitionException(self,
'service_writer_options requires setting service_writer')
payload = payload or Payload()
payload.add_fields({
'service_writer': PrimitiveField(service_writer or None),
'service_writer_options': PrimitiveField(
assert_list(service_writer_options, key_arg='service_writer_options',
raise_type=TargetDefinitionException)),
'roots': PrimitiveField(roots or []),
'registry_class': PrimitiveField(registry_class or None),
'enum_options': PrimitiveField(enum_options or []),
'no_options': PrimitiveField(no_options or False),
})
super(JavaWireLibrary, self).__init__(payload=payload, **kwargs)
| 39.915493
| 102
| 0.717713
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.base.validation import assert_list
logger = logging.getLogger(__name__)
class JavaWireLibrary(ExportableJvmLibrary):
def __init__(self,
payload=None,
service_writer=None,
service_writer_options=None,
roots=None,
registry_class=None,
enum_options=None,
no_options=None,
**kwargs):
if not service_writer and service_writer_options:
raise TargetDefinitionException(self,
'service_writer_options requires setting service_writer')
payload = payload or Payload()
payload.add_fields({
'service_writer': PrimitiveField(service_writer or None),
'service_writer_options': PrimitiveField(
assert_list(service_writer_options, key_arg='service_writer_options',
raise_type=TargetDefinitionException)),
'roots': PrimitiveField(roots or []),
'registry_class': PrimitiveField(registry_class or None),
'enum_options': PrimitiveField(enum_options or []),
'no_options': PrimitiveField(no_options or False),
})
super(JavaWireLibrary, self).__init__(payload=payload, **kwargs)
| true
| true
|
1c4848ec3d5e9913f64fb0a43e2ba992978ad91b
| 5,150
|
py
|
Python
|
tools/error.py
|
Alex-Au1/Haku_Bot
|
87cc1813546797eec0f4760fafa76ce65a387a1d
|
[
"MIT"
] | null | null | null |
tools/error.py
|
Alex-Au1/Haku_Bot
|
87cc1813546797eec0f4760fafa76ce65a387a1d
|
[
"MIT"
] | null | null | null |
tools/error.py
|
Alex-Au1/Haku_Bot
|
87cc1813546797eec0f4760fafa76ce65a387a1d
|
[
"MIT"
] | null | null | null |
import discord
import copy
from tools.embed import Embed, EmbededMessage
from tools.string import StringTools
import tools.members as Members
from typing import Optional
# Err_Embed: A template for embeding errors and warnings
class Err_Embed():
def __init__(self, title, description):
self.title = title
self.description = description
errors = {1: Err_Embed("Unable to Find Error", "{bot_nickname} is unable to find the error by the code `{err_code}`"),
2: Err_Embed("Unable to Warning", "{bot_nickname} is unable to find the warning by the code `{warn_code}`"),
3: Err_Embed("Unable to Find Selected Guild and Channel", "{bot_nickname} is unable to find the guild by the {guild_search_type} `{search_guild}` and the channel by the {channel_search_type} `{search_channel}`"),
4: Err_Embed("Unable to Find Selected Guild", "{bot_nickname} is unable to find the guild by the {guild_search_type} `{search_guild}`"),
5: Err_Embed("Unable to Find Selected Channel", "{bot_nickname} is unable to find the channel by the {channel_search_type} `{search_channel}`"),
6: Err_Embed("Please Enter {type_article} {correct_type} Parameter", "Please enter {type_article} **{correct_type}** for the parameter `{parameter}`"),
7: Err_Embed("Please Enter {type_article} {correct_type} Greater or Equal to {value}", "Please enter {type_article} **{correct_type} greater or equal to {value}** for the parameter `{parameter}`"),
8: Err_Embed("Please Enter {type_article} {correct_type} Lesser or Equal to {value}", "Please enter {type_article} **{correct_type} lesser or equal to {value}** for the parameter `{parameter}`"),
9: Err_Embed("Cannot Perform Action in the Channel: {channel}", "Cannot {action} to the channel, `{channel}`, in the guild, `{guild}`"),
10: Err_Embed("{element} is not part of {group}", "The input `{element}` for the parameter `{parameter}` is not an element of `{group}`"),
11: Err_Embed("Please Enter {type_article} {correct_type} Greater than {value}", "Please enter {type_article} **{correct_type} greater than {value}** for the parameter `{parameter}`"),
12: Err_Embed("Please Enter {type_article} {correct_type} Lesser than {value}", "Please enter {type_article} **{correct_type} lesser than {value}** for the parameter `{parameter}`"),
13: Err_Embed("Please Enter a Valid Subcommand", "Please enter a valid subcommand for the command `{command}`"),
14: Err_Embed("Please Enter a Valid Url for {type_article} {correct_type}", "Please enter a valid url for **{type_article} {correct_type}** in the parameter `{parameter}`"),
15: Err_Embed("Please Enter a Valid {correct_type}", "Please enter a valid **{correct_type}** for the parameter `{parameter}`"),
18: Err_Embed("Unable to Find Selected {member}", "{bot_nickname} is unable to find the {member} by the {member_search_type} `{search_member}`"),
19: Err_Embed("{action} Failed", "{bot_nickname} is unable to {action}"),
20: Err_Embed("Please Enter {type_article} {correct_type} {scope} in between {left} and {right}", "Please enter {type_article} **{correct_type} {scope} in between __{left}__ and __{right}__** for the parameter `{parameter}`")}
warnings = {}
# display_error(client, code, type, choice, **kwargs) Displays an error/ warning embeded message
# depending on 'code'
# requires: 0 <= code
# 0 <= choice
# 'type' is either "error" or "warning"
def display_error(client: discord.Client, code: int, type: str = "error", choice: int = 0, **kwargs) -> Optional[EmbededMessage]:
embed = Embed(client)
description = ""
title = ""
if (type == "error"):
title = "ERROR "
elif (type == "warning"):
title = "Warning "
kw_keys = list(kwargs.keys())
for k in kw_keys:
new_key = "{" + k + "}"
kwargs[new_key] = kwargs.pop(k)
if (type == "error"):
if (code <= len(errors) and code > 0):
err_title = errors[code].title
err_description = errors[code].description
else:
err_title = errors[1].title
err_description = errors[1].description
kwargs = {"{err_code}": f"{code}"}
code = 1
elif (type == "warning"):
if (code <= len(warnings) and code > 0):
err_title = warnings[code].title
err_description = warnings[code].description
else:
err_title = warnings[2].title
err_description = warnings[2].description
kwargs = {"{warn_code}": f"{code}"}
code = 2
kwargs.update({"{bot_nickname}": Members.DEFAULT_BOT_NAME})
title += f"{code}: {err_title}"
title = StringTools.word_replace(title, kwargs, capitalize = True)
description = StringTools.word_replace(err_description, kwargs)
if (type == "error"):
embeded_message = embed.error_embed(description, title)
elif (type == "warning"):
embeded_message = embed.warning_embed(description, title, choice = choice)
return embeded_message
| 57.222222
| 236
| 0.65767
|
import discord
import copy
from tools.embed import Embed, EmbededMessage
from tools.string import StringTools
import tools.members as Members
from typing import Optional
class Err_Embed():
def __init__(self, title, description):
self.title = title
self.description = description
errors = {1: Err_Embed("Unable to Find Error", "{bot_nickname} is unable to find the error by the code `{err_code}`"),
2: Err_Embed("Unable to Warning", "{bot_nickname} is unable to find the warning by the code `{warn_code}`"),
3: Err_Embed("Unable to Find Selected Guild and Channel", "{bot_nickname} is unable to find the guild by the {guild_search_type} `{search_guild}` and the channel by the {channel_search_type} `{search_channel}`"),
4: Err_Embed("Unable to Find Selected Guild", "{bot_nickname} is unable to find the guild by the {guild_search_type} `{search_guild}`"),
5: Err_Embed("Unable to Find Selected Channel", "{bot_nickname} is unable to find the channel by the {channel_search_type} `{search_channel}`"),
6: Err_Embed("Please Enter {type_article} {correct_type} Parameter", "Please enter {type_article} **{correct_type}** for the parameter `{parameter}`"),
7: Err_Embed("Please Enter {type_article} {correct_type} Greater or Equal to {value}", "Please enter {type_article} **{correct_type} greater or equal to {value}** for the parameter `{parameter}`"),
8: Err_Embed("Please Enter {type_article} {correct_type} Lesser or Equal to {value}", "Please enter {type_article} **{correct_type} lesser or equal to {value}** for the parameter `{parameter}`"),
9: Err_Embed("Cannot Perform Action in the Channel: {channel}", "Cannot {action} to the channel, `{channel}`, in the guild, `{guild}`"),
10: Err_Embed("{element} is not part of {group}", "The input `{element}` for the parameter `{parameter}` is not an element of `{group}`"),
11: Err_Embed("Please Enter {type_article} {correct_type} Greater than {value}", "Please enter {type_article} **{correct_type} greater than {value}** for the parameter `{parameter}`"),
12: Err_Embed("Please Enter {type_article} {correct_type} Lesser than {value}", "Please enter {type_article} **{correct_type} lesser than {value}** for the parameter `{parameter}`"),
13: Err_Embed("Please Enter a Valid Subcommand", "Please enter a valid subcommand for the command `{command}`"),
14: Err_Embed("Please Enter a Valid Url for {type_article} {correct_type}", "Please enter a valid url for **{type_article} {correct_type}** in the parameter `{parameter}`"),
15: Err_Embed("Please Enter a Valid {correct_type}", "Please enter a valid **{correct_type}** for the parameter `{parameter}`"),
18: Err_Embed("Unable to Find Selected {member}", "{bot_nickname} is unable to find the {member} by the {member_search_type} `{search_member}`"),
19: Err_Embed("{action} Failed", "{bot_nickname} is unable to {action}"),
20: Err_Embed("Please Enter {type_article} {correct_type} {scope} in between {left} and {right}", "Please enter {type_article} **{correct_type} {scope} in between __{left}__ and __{right}__** for the parameter `{parameter}`")}
warnings = {}
def display_error(client: discord.Client, code: int, type: str = "error", choice: int = 0, **kwargs) -> Optional[EmbededMessage]:
embed = Embed(client)
description = ""
title = ""
if (type == "error"):
title = "ERROR "
elif (type == "warning"):
title = "Warning "
kw_keys = list(kwargs.keys())
for k in kw_keys:
new_key = "{" + k + "}"
kwargs[new_key] = kwargs.pop(k)
if (type == "error"):
if (code <= len(errors) and code > 0):
err_title = errors[code].title
err_description = errors[code].description
else:
err_title = errors[1].title
err_description = errors[1].description
kwargs = {"{err_code}": f"{code}"}
code = 1
elif (type == "warning"):
if (code <= len(warnings) and code > 0):
err_title = warnings[code].title
err_description = warnings[code].description
else:
err_title = warnings[2].title
err_description = warnings[2].description
kwargs = {"{warn_code}": f"{code}"}
code = 2
kwargs.update({"{bot_nickname}": Members.DEFAULT_BOT_NAME})
title += f"{code}: {err_title}"
title = StringTools.word_replace(title, kwargs, capitalize = True)
description = StringTools.word_replace(err_description, kwargs)
if (type == "error"):
embeded_message = embed.error_embed(description, title)
elif (type == "warning"):
embeded_message = embed.warning_embed(description, title, choice = choice)
return embeded_message
| true
| true
|
1c484939ae60c601405bced057c4dedd90dff5c0
| 772
|
py
|
Python
|
lessons/best-practices/boulder_dem.py
|
csdms/ivy
|
862fc8bafa665864ceae25c4ead9e376ffe175cb
|
[
"CC-BY-4.0"
] | null | null | null |
lessons/best-practices/boulder_dem.py
|
csdms/ivy
|
862fc8bafa665864ceae25c4ead9e376ffe175cb
|
[
"CC-BY-4.0"
] | 1
|
2022-03-30T18:18:50.000Z
|
2022-03-30T18:18:50.000Z
|
lessons/best-practices/boulder_dem.py
|
csdms/ivy
|
862fc8bafa665864ceae25c4ead9e376ffe175cb
|
[
"CC-BY-4.0"
] | null | null | null |
"""An example of reading topographical data from a file and displaying it."""
import pandas as pd
import matplotlib.pyplot as plt
topo_file = "../../data/topo.asc"
def read():
try:
topo = pd.read_csv(topo_file, header=None)
except IOError:
print("IOError: file '{}' cannot be read".format(topo_file))
else:
return topo
def display(data, show=False, outfile="boulder_dem.png"):
fig, ax = plt.subplots()
elev = ax.imshow(data, cmap="jet")
fig.colorbar(elev, label="Elevation (m)")
plt.title("Boulder Topography")
if show is True:
plt.show()
else:
plt.savefig(outfile, dpi=96)
plt.close()
if __name__ == "__main__":
topo = read()
if topo is not None:
display(topo)
| 21.444444
| 77
| 0.619171
|
import pandas as pd
import matplotlib.pyplot as plt
topo_file = "../../data/topo.asc"
def read():
try:
topo = pd.read_csv(topo_file, header=None)
except IOError:
print("IOError: file '{}' cannot be read".format(topo_file))
else:
return topo
def display(data, show=False, outfile="boulder_dem.png"):
fig, ax = plt.subplots()
elev = ax.imshow(data, cmap="jet")
fig.colorbar(elev, label="Elevation (m)")
plt.title("Boulder Topography")
if show is True:
plt.show()
else:
plt.savefig(outfile, dpi=96)
plt.close()
if __name__ == "__main__":
topo = read()
if topo is not None:
display(topo)
| true
| true
|
1c48496a2fcea3d1c774c8ee9daba45438f2e15a
| 166
|
py
|
Python
|
logging_middleware/checks.py
|
fearsd/django-logging-middleware
|
6eb95774c1bcb1829aa1a94223d9e2c39217d8f9
|
[
"MIT"
] | 4
|
2021-04-08T14:14:04.000Z
|
2021-09-08T07:57:38.000Z
|
logging_middleware/checks.py
|
fearsd/django-logging-middleware
|
6eb95774c1bcb1829aa1a94223d9e2c39217d8f9
|
[
"MIT"
] | null | null | null |
logging_middleware/checks.py
|
fearsd/django-logging-middleware
|
6eb95774c1bcb1829aa1a94223d9e2c39217d8f9
|
[
"MIT"
] | null | null | null |
# from django.conf import settings
from django.core import checks
@checks.register
def check_settings(app_configs, **kwargs):
# temporary solution
return []
| 20.75
| 42
| 0.753012
|
from django.core import checks
@checks.register
def check_settings(app_configs, **kwargs):
return []
| true
| true
|
1c484a7d6852dd81d7c8ee92a960b16a3012a4e2
| 1,341
|
py
|
Python
|
backend/src/ml_models/context.py
|
lukemiloszewski/ml-models
|
826ab6c0adebe851e73b9e883af8abccfaebdacb
|
[
"MIT"
] | null | null | null |
backend/src/ml_models/context.py
|
lukemiloszewski/ml-models
|
826ab6c0adebe851e73b9e883af8abccfaebdacb
|
[
"MIT"
] | 16
|
2022-02-21T19:27:42.000Z
|
2022-03-31T01:47:33.000Z
|
backend/src/ml_models/context.py
|
lukemiloszewski/ml-models
|
826ab6c0adebe851e73b9e883af8abccfaebdacb
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, Optional
from ml_models.clients.mnist_client import MNISTClient
_CONTEXT: Optional[Context] = None
class Attributes:
def __init__(self, attributes_dict: Dict[str, Any]) -> None:
self._attributes_dict = attributes_dict
def get(self, attribute_id: str) -> Any:
rv = self._get_attribute(attribute_id=attribute_id)
return rv
def _get_attribute(self, attribute_id: str) -> Any:
attribute = self._attributes_dict.get(attribute_id, None)
if attribute is None:
err_msg = f"Invalid attribute: {attribute_id}, available attributes: {list(self._attributes_dict.keys())}"
raise AttributeError(err_msg)
return attribute
class Context:
def __init__(self, clients: Attributes) -> None:
self.clients = clients
def configure_context(root_path: Path, mnist_onnx_path: Path):
global _CONTEXT
client_dict = {
"mnist": MNISTClient(str(root_path / mnist_onnx_path)),
}
client_attributes = Attributes(attributes_dict=client_dict)
context = Context(clients=client_attributes)
_CONTEXT = context
def get_context() -> Context:
if _CONTEXT is None:
raise ValueError("Context has not been initialised")
return _CONTEXT
| 27.367347
| 118
| 0.706189
|
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, Optional
from ml_models.clients.mnist_client import MNISTClient
_CONTEXT: Optional[Context] = None
class Attributes:
def __init__(self, attributes_dict: Dict[str, Any]) -> None:
self._attributes_dict = attributes_dict
def get(self, attribute_id: str) -> Any:
rv = self._get_attribute(attribute_id=attribute_id)
return rv
def _get_attribute(self, attribute_id: str) -> Any:
attribute = self._attributes_dict.get(attribute_id, None)
if attribute is None:
err_msg = f"Invalid attribute: {attribute_id}, available attributes: {list(self._attributes_dict.keys())}"
raise AttributeError(err_msg)
return attribute
class Context:
def __init__(self, clients: Attributes) -> None:
self.clients = clients
def configure_context(root_path: Path, mnist_onnx_path: Path):
global _CONTEXT
client_dict = {
"mnist": MNISTClient(str(root_path / mnist_onnx_path)),
}
client_attributes = Attributes(attributes_dict=client_dict)
context = Context(clients=client_attributes)
_CONTEXT = context
def get_context() -> Context:
if _CONTEXT is None:
raise ValueError("Context has not been initialised")
return _CONTEXT
| true
| true
|
1c484c826f55f41d4e0d2c5b6336352e83d80519
| 2,571
|
py
|
Python
|
preprocessors/rcv1v2_data.py
|
laddie132/LW-PT
|
28b469ba68a5d4fba68b992cff5372e63ec2ed42
|
[
"MIT"
] | 9
|
2020-08-20T18:15:43.000Z
|
2022-02-10T02:54:30.000Z
|
preprocessors/rcv1v2_data.py
|
laddie132/LW-PT
|
28b469ba68a5d4fba68b992cff5372e63ec2ed42
|
[
"MIT"
] | 1
|
2021-11-19T01:29:47.000Z
|
2021-11-19T09:58:38.000Z
|
preprocessors/rcv1v2_data.py
|
laddie132/LW-PT
|
28b469ba68a5d4fba68b992cff5372e63ec2ed42
|
[
"MIT"
] | 3
|
2021-05-29T02:11:34.000Z
|
2021-12-14T15:43:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Han"
__email__ = "liuhan132@foxmail.com"
import os
import logging
from .base import BaseDataset
logger = logging.getLogger(__name__)
class RCV1V2(BaseDataset):
"""
RCV1-V2 dataset
"""
def __init__(self, data_path, random_seed):
super(RCV1V2, self).__init__(h5_path='data/rcv1v2.h5',
save_data_path='data/rcv1v2.pkl',
save_meta_data_path='data/rcv1v2.meta.json',
w2v_path='data/rcv1v2_word2vec.model',
load_emb=False,
emb_dim=256,
max_vocab_size=None,
max_sent_num=15,
max_sent_len=50,
max_doc_len=500,
hier=False,
random_seed=random_seed)
self.data_path = 'data/rcv1-v2/sgm' if data_path is '' else data_path
def load_all_data(self):
train_text_labels = self.load_data(os.path.join(self.data_path, 'train.src.id'),
os.path.join(self.data_path, 'train.tgt.id'))
val_text_labels = self.load_data(os.path.join(self.data_path, 'valid.src.id'),
os.path.join(self.data_path, 'valid.tgt.id'))
test_text_labels = self.load_data(os.path.join(self.data_path, 'test.src.id'),
os.path.join(self.data_path, 'test.tgt.id'))
return train_text_labels, val_text_labels, test_text_labels
def load_data(self, text_path, label_path):
texts_labels = []
with open(text_path, 'r') as tf, open(label_path, 'r') as lf:
for text, label in zip(tf, lf):
if text != '' and label != '':
text = list(map(lambda x: int(x), text.strip().split()))
label = list(map(lambda x: int(x), label.strip().split()))
label = label[1:-1]
# text = text.strip().split()
# label = label.strip().split()
self.texts_labels_sum += len(label)
self.texts_words_sum += len(text)
self.all_texts.append(text)
self.all_labels.extend(label)
texts_labels.append((text, label))
return texts_labels
| 38.373134
| 88
| 0.492804
|
__author__ = "Han"
__email__ = "liuhan132@foxmail.com"
import os
import logging
from .base import BaseDataset
logger = logging.getLogger(__name__)
class RCV1V2(BaseDataset):
def __init__(self, data_path, random_seed):
super(RCV1V2, self).__init__(h5_path='data/rcv1v2.h5',
save_data_path='data/rcv1v2.pkl',
save_meta_data_path='data/rcv1v2.meta.json',
w2v_path='data/rcv1v2_word2vec.model',
load_emb=False,
emb_dim=256,
max_vocab_size=None,
max_sent_num=15,
max_sent_len=50,
max_doc_len=500,
hier=False,
random_seed=random_seed)
self.data_path = 'data/rcv1-v2/sgm' if data_path is '' else data_path
def load_all_data(self):
train_text_labels = self.load_data(os.path.join(self.data_path, 'train.src.id'),
os.path.join(self.data_path, 'train.tgt.id'))
val_text_labels = self.load_data(os.path.join(self.data_path, 'valid.src.id'),
os.path.join(self.data_path, 'valid.tgt.id'))
test_text_labels = self.load_data(os.path.join(self.data_path, 'test.src.id'),
os.path.join(self.data_path, 'test.tgt.id'))
return train_text_labels, val_text_labels, test_text_labels
def load_data(self, text_path, label_path):
texts_labels = []
with open(text_path, 'r') as tf, open(label_path, 'r') as lf:
for text, label in zip(tf, lf):
if text != '' and label != '':
text = list(map(lambda x: int(x), text.strip().split()))
label = list(map(lambda x: int(x), label.strip().split()))
label = label[1:-1]
self.texts_labels_sum += len(label)
self.texts_words_sum += len(text)
self.all_texts.append(text)
self.all_labels.extend(label)
texts_labels.append((text, label))
return texts_labels
| true
| true
|
1c484d2322cae0bb96e69cc698013819ab7ee299
| 3,346
|
py
|
Python
|
PyWidget3/shape/__init__.py
|
galaxyjim/PyWidget3
|
eb3d269e4e7d8a68ca957d32bc704e31eca20015
|
[
"BSD-3-Clause"
] | null | null | null |
PyWidget3/shape/__init__.py
|
galaxyjim/PyWidget3
|
eb3d269e4e7d8a68ca957d32bc704e31eca20015
|
[
"BSD-3-Clause"
] | 23
|
2015-03-14T00:03:11.000Z
|
2015-04-10T23:24:21.000Z
|
PyWidget3/shape/__init__.py
|
galaxyjim/PyWidget3
|
eb3d269e4e7d8a68ca957d32bc704e31eca20015
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2009 Nicolas Rougier
# Copyright (c) 2015 James Gaston
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
'''Defines a set of basic 2D shapes.
All shapes have:
----------------
- a position in 2D space
- a dimension in 2D space
- a x alignment ('left', 'center' or 'right')
- a y alignment ('top', 'center' or 'bottom')
- background color(s)
- background texture
- foreground color(s) (for the one pixel border)
Display Model:
--------------
Any shape is defined by the x, y, width and height attributes. Borders are
drawn on the inside of the shape as a single pixel line in the specified border
color(s). Foreground or background color can be specified as a single tuple of 4
floats for uniform color, 2 tuples of 4 floats for radial color patterns (going
from inner to outer) or 4 tuples of 4 floats for an interpolated pattern between
the four corners. Note that the radial pattern does not work for triangle or
rectangle.
Available shapes:
-----------------
- Rectangle (with round corners or not)
- Ellipse (circle if width == height)
- Triangle
- Cross (with any number of branches)
- Star (with any number of branches)
Example usage:
--------------
rectangle = Rectangle(x=100,y=100,width=100,height=100,radius=10)
@window.event
def on_draw():
window.clear()
rectangle.draw()
@window.event
def on_mouse_press(x,y,button,modifiers):
if rectangle.hit_test(x,y):
print 'Hit'
:requires: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '1.0'
from .rectangle import Rectangle
from .triangle import Triangle
from .ellipse import Ellipse
from .cross import Cross
from .star import Star
| 34.854167
| 80
| 0.698745
|
__docformat__ = 'restructuredtext'
__version__ = '1.0'
from .rectangle import Rectangle
from .triangle import Triangle
from .ellipse import Ellipse
from .cross import Cross
from .star import Star
| true
| true
|
1c484dccfdcbf952d46374d4d53d3daed255caa8
| 48
|
py
|
Python
|
samcli/__init__.py
|
rawhideron/mav_0122
|
3f8b92347087f94ec76667dbb2f647937725660d
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2021-07-10T14:19:00.000Z
|
2021-07-10T14:19:00.000Z
|
samcli/__init__.py
|
QPC-database/aws-sam-cli
|
59c85768356089edb265c2ea7f53bce2412f9e19
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
samcli/__init__.py
|
QPC-database/aws-sam-cli
|
59c85768356089edb265c2ea7f53bce2412f9e19
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
"""
SAM CLI version
"""
__version__ = "1.26.0"
| 8
| 22
| 0.583333
|
__version__ = "1.26.0"
| true
| true
|
1c484f70ffaacf4cb7ed13a8f996d67a217a2f85
| 1,558
|
py
|
Python
|
user_main.py
|
s-jun/OSS_Term_Project
|
47747a92944f7f94f1393c9072f7ee9034de090a
|
[
"MIT"
] | null | null | null |
user_main.py
|
s-jun/OSS_Term_Project
|
47747a92944f7f94f1393c9072f7ee9034de090a
|
[
"MIT"
] | null | null | null |
user_main.py
|
s-jun/OSS_Term_Project
|
47747a92944f7f94f1393c9072f7ee9034de090a
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import *
from PyQt5 import uic
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import chart
from data_manager import read_predict
form_class = uic.loadUiType("user.ui")[0]
class WindowClass(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.initUI()
self.comboBox.activated[str].connect(self.clicked)
def initUI(self):
self.fig = plt.Figure()
combo = self.comboBox.currentText()
self.canvas = FigureCanvas(self.fig)
self.chart.addWidget(self.canvas)
chart.draw_chart(self, combo)
self.canvas.draw()
self.prediction.append(read_predict(combo))
def clicked(self, text):
self.clear()
self.prediction.append(read_predict(text))
self.fig = plt.Figure()
self.canvas = FigureCanvas(self.fig)
self.chart.addWidget(self.canvas)
chart.draw_chart(self, text)
self.canvas.draw()
def clear(self):
self.prediction.clear()
tmp = self.chart
if tmp is not None:
while tmp.count():
item = tmp.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearvbox(item.layout())
if __name__== "__main__" :
app = QApplication(sys.argv)
myWindow = WindowClass()
myWindow.show()
app.exec_()
| 27.333333
| 80
| 0.616175
|
import sys
from PyQt5.QtWidgets import *
from PyQt5 import uic
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import chart
from data_manager import read_predict
form_class = uic.loadUiType("user.ui")[0]
class WindowClass(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.initUI()
self.comboBox.activated[str].connect(self.clicked)
def initUI(self):
self.fig = plt.Figure()
combo = self.comboBox.currentText()
self.canvas = FigureCanvas(self.fig)
self.chart.addWidget(self.canvas)
chart.draw_chart(self, combo)
self.canvas.draw()
self.prediction.append(read_predict(combo))
def clicked(self, text):
self.clear()
self.prediction.append(read_predict(text))
self.fig = plt.Figure()
self.canvas = FigureCanvas(self.fig)
self.chart.addWidget(self.canvas)
chart.draw_chart(self, text)
self.canvas.draw()
def clear(self):
self.prediction.clear()
tmp = self.chart
if tmp is not None:
while tmp.count():
item = tmp.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearvbox(item.layout())
if __name__== "__main__" :
app = QApplication(sys.argv)
myWindow = WindowClass()
myWindow.show()
app.exec_()
| true
| true
|
1c484f734e415132256c335e128e6abcf0544a59
| 13,032
|
py
|
Python
|
tests/generator/test_compression.py
|
randywessels/tad-blockchain
|
08a5f9565aa27f211350717d5e8cda14b46359e4
|
[
"Apache-2.0"
] | null | null | null |
tests/generator/test_compression.py
|
randywessels/tad-blockchain
|
08a5f9565aa27f211350717d5e8cda14b46359e4
|
[
"Apache-2.0"
] | null | null | null |
tests/generator/test_compression.py
|
randywessels/tad-blockchain
|
08a5f9565aa27f211350717d5e8cda14b46359e4
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa: F501
from dataclasses import dataclass
from typing import List, Any
from unittest import TestCase
from tad.full_node.bundle_tools import (
bundle_suitable_for_compression,
compressed_coin_solution_entry_list,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
simple_solution_generator,
spend_bundle_to_serialized_coin_solution_entry_list,
)
from tad.full_node.generator import run_generator, create_generator_args
from tad.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from tad.types.generator_types import BlockGenerator, CompressorArg, GeneratorArg
from tad.types.spend_bundle import SpendBundle
from tad.util.byte_types import hexstr_to_bytes
from tad.util.ints import uint32
from tad.wallet.puzzles.load_clvm import load_clvm
from tests.core.make_block_generator import make_spend_bundle
from clvm import SExp
import io
from clvm.serialize import sexp_from_stream
from clvm_tools import binutils
TEST_GEN_DESERIALIZE = load_clvm("test_generator_deserialize.clvm", package_or_requirement="tad.wallet.puzzles")
DESERIALIZE_MOD = load_clvm("chialisp_deserialisation.clvm", package_or_requirement="tad.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="tad.wallet.puzzles")
DECOMPRESS_CSE = load_clvm("decompress_coin_solution_entry.clvm", package_or_requirement="tad.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_solution_entry_with_prefix.clvm", package_or_requirement="tad.wallet.puzzles"
)
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="tad.wallet.puzzles")
TEST_MULTIPLE = load_clvm("test_multiple_generator_input_arguments.clvm", package_or_requirement="tad.wallet.puzzles")
Nil = Program.from_bytes(b"\x80")
original_generator = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080"
) # noqa
gen1 = b"aaaaaaaaaa" + original_generator
gen2 = b"bb" + original_generator
FAKE_BLOCK_HEIGHT1 = uint32(100)
FAKE_BLOCK_HEIGHT2 = uint32(200)
@dataclass(frozen=True)
class MultipleCompressorArg:
arg: List[CompressorArg]
split_offset: int
def create_multiple_ref_generator(args: MultipleCompressorArg, spend_bundle: SpendBundle) -> BlockGenerator:
"""
Decompress a transaction by referencing bytes from multiple input generator references
"""
compressed_cse_list = compressed_coin_solution_entry_list(spend_bundle)
program = TEST_MULTIPLE.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
args.arg[0].start,
args.arg[0].end - args.split_offset,
args.arg[1].end - args.split_offset,
args.arg[1].end,
compressed_cse_list,
)
# TODO aqk: Improve ergonomics of CompressorArg -> GeneratorArg conversion
generator_args = [
GeneratorArg(FAKE_BLOCK_HEIGHT1, args.arg[0].generator),
GeneratorArg(FAKE_BLOCK_HEIGHT2, args.arg[1].generator),
]
return BlockGenerator(program, generator_args)
def spend_bundle_to_coin_solution_entry_list(bundle: SpendBundle) -> List[Any]:
r = []
for coin_solution in bundle.coin_solutions:
entry = [
coin_solution.coin.parent_coin_info,
sexp_from_stream(io.BytesIO(bytes(coin_solution.puzzle_reveal)), SExp.to),
coin_solution.coin.amount,
sexp_from_stream(io.BytesIO(bytes(coin_solution.solution)), SExp.to),
]
r.append(entry)
return r
class TestCompression(TestCase):
def test_spend_bundle_suitable(self):
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)
def test_compress_spend_bundle(self):
pass
def test_multiple_input_gen_refs(self):
start1, end1 = match_standard_transaction_at_any_index(gen1)
start2, end2 = match_standard_transaction_at_any_index(gen2)
ca1 = CompressorArg(FAKE_BLOCK_HEIGHT1, SerializedProgram.from_bytes(gen1), start1, end1)
ca2 = CompressorArg(FAKE_BLOCK_HEIGHT2, SerializedProgram.from_bytes(gen2), start2, end2)
prefix_len1 = end1 - start1
prefix_len2 = end2 - start2
assert prefix_len1 == prefix_len2
prefix_len = prefix_len1
results = []
for split_offset in range(prefix_len):
gen_args = MultipleCompressorArg([ca1, ca2], split_offset)
spend_bundle: SpendBundle = make_spend_bundle(1)
multi_gen = create_multiple_ref_generator(gen_args, spend_bundle)
cost, result = run_generator(multi_gen, INFINITE_COST)
results.append(result)
assert result is not None
assert cost > 0
assert all(r == results[0] for r in results)
def test_compressed_block_results(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator(c, INFINITE_COST)
cost_s, result_s = run_generator(s, INFINITE_COST)
print(result_c)
assert result_c is not None
assert result_s is not None
assert result_c == result_s
def test_spend_byndle_coin_solution(self):
for i in range(0, 10):
sb: SpendBundle = make_spend_bundle(i)
cs1 = SExp.to(spend_bundle_to_coin_solution_entry_list(sb)).as_bin()
cs2 = spend_bundle_to_serialized_coin_solution_entry_list(sb)
assert cs1 == cs2
class TestDecompression(TestCase):
def __init__(self, *args, **kwargs):
super(TestDecompression, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_deserialization(self):
self.maxDiff = None
cost, out = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [bytes(Program.to("hello"))])
assert out == Program.to("hello")
def test_deserialization_as_argument(self):
self.maxDiff = None
cost, out = TEST_GEN_DESERIALIZE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, Nil, bytes(Program.to("hello"))]
)
print(bytes(Program.to("hello")))
print()
print(out)
assert out == Program.to("hello")
def test_decompress_puzzle(self):
cost, out = DECOMPRESS_PUZZLE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, b"\xff", bytes(Program.to("pubkey")), b"\x80"]
)
print()
print(out)
# An empty CSE is invalid. (An empty CSE list may be okay)
# def test_decompress_empty_cse(self):
# cse0 = binutils.assemble("()")
# cost, out = DECOMPRESS_CSE.run_with_cost(INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0])
# print()
# print(out)
def test_decompress_cse(self):
"""Decompress a single CSE / CoinSolutionEntry"""
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
cost, out = DECOMPRESS_CSE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0]
)
print()
print(out)
def test_decompress_cse_with_prefix(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
start = 2 + 44
end = start + 238
prefix = original_generator[start:end]
# (deserialize decompress_puzzle puzzle_prefix cse)
cost, out = DECOMPRESS_CSE_WITH_PREFIX.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, prefix, cse0]
)
print()
print(out)
def test_block_program_zero(self):
"Decompress a list of CSEs"
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_solution_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
cost, out = DECOMPRESS_BLOCK.run_with_cost(
INFINITE_COST,
[
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
[bytes(original_generator)],
],
)
print()
print(out)
def test_block_program_zero_with_curry(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_solution_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
p = DECOMPRESS_BLOCK.curry(DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end))
cost, out = p.run_with_cost(INFINITE_COST, [cse2, DESERIALIZE_MOD, [bytes(original_generator)]])
print()
print(p)
print(out)
p_with_cses = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end), cse2, DESERIALIZE_MOD
)
generator_args = create_generator_args([SerializedProgram.from_bytes(original_generator)])
cost, out = p_with_cses.run_with_cost(INFINITE_COST, generator_args)
print()
print(p_with_cses)
print(out)
| 44.176271
| 792
| 0.73872
|
from dataclasses import dataclass
from typing import List, Any
from unittest import TestCase
from tad.full_node.bundle_tools import (
bundle_suitable_for_compression,
compressed_coin_solution_entry_list,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
simple_solution_generator,
spend_bundle_to_serialized_coin_solution_entry_list,
)
from tad.full_node.generator import run_generator, create_generator_args
from tad.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from tad.types.generator_types import BlockGenerator, CompressorArg, GeneratorArg
from tad.types.spend_bundle import SpendBundle
from tad.util.byte_types import hexstr_to_bytes
from tad.util.ints import uint32
from tad.wallet.puzzles.load_clvm import load_clvm
from tests.core.make_block_generator import make_spend_bundle
from clvm import SExp
import io
from clvm.serialize import sexp_from_stream
from clvm_tools import binutils
TEST_GEN_DESERIALIZE = load_clvm("test_generator_deserialize.clvm", package_or_requirement="tad.wallet.puzzles")
DESERIALIZE_MOD = load_clvm("chialisp_deserialisation.clvm", package_or_requirement="tad.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="tad.wallet.puzzles")
DECOMPRESS_CSE = load_clvm("decompress_coin_solution_entry.clvm", package_or_requirement="tad.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_solution_entry_with_prefix.clvm", package_or_requirement="tad.wallet.puzzles"
)
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="tad.wallet.puzzles")
TEST_MULTIPLE = load_clvm("test_multiple_generator_input_arguments.clvm", package_or_requirement="tad.wallet.puzzles")
Nil = Program.from_bytes(b"\x80")
original_generator = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080"
)
gen1 = b"aaaaaaaaaa" + original_generator
gen2 = b"bb" + original_generator
FAKE_BLOCK_HEIGHT1 = uint32(100)
FAKE_BLOCK_HEIGHT2 = uint32(200)
@dataclass(frozen=True)
class MultipleCompressorArg:
arg: List[CompressorArg]
split_offset: int
def create_multiple_ref_generator(args: MultipleCompressorArg, spend_bundle: SpendBundle) -> BlockGenerator:
compressed_cse_list = compressed_coin_solution_entry_list(spend_bundle)
program = TEST_MULTIPLE.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
args.arg[0].start,
args.arg[0].end - args.split_offset,
args.arg[1].end - args.split_offset,
args.arg[1].end,
compressed_cse_list,
)
generator_args = [
GeneratorArg(FAKE_BLOCK_HEIGHT1, args.arg[0].generator),
GeneratorArg(FAKE_BLOCK_HEIGHT2, args.arg[1].generator),
]
return BlockGenerator(program, generator_args)
def spend_bundle_to_coin_solution_entry_list(bundle: SpendBundle) -> List[Any]:
r = []
for coin_solution in bundle.coin_solutions:
entry = [
coin_solution.coin.parent_coin_info,
sexp_from_stream(io.BytesIO(bytes(coin_solution.puzzle_reveal)), SExp.to),
coin_solution.coin.amount,
sexp_from_stream(io.BytesIO(bytes(coin_solution.solution)), SExp.to),
]
r.append(entry)
return r
class TestCompression(TestCase):
def test_spend_bundle_suitable(self):
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)
def test_compress_spend_bundle(self):
pass
def test_multiple_input_gen_refs(self):
start1, end1 = match_standard_transaction_at_any_index(gen1)
start2, end2 = match_standard_transaction_at_any_index(gen2)
ca1 = CompressorArg(FAKE_BLOCK_HEIGHT1, SerializedProgram.from_bytes(gen1), start1, end1)
ca2 = CompressorArg(FAKE_BLOCK_HEIGHT2, SerializedProgram.from_bytes(gen2), start2, end2)
prefix_len1 = end1 - start1
prefix_len2 = end2 - start2
assert prefix_len1 == prefix_len2
prefix_len = prefix_len1
results = []
for split_offset in range(prefix_len):
gen_args = MultipleCompressorArg([ca1, ca2], split_offset)
spend_bundle: SpendBundle = make_spend_bundle(1)
multi_gen = create_multiple_ref_generator(gen_args, spend_bundle)
cost, result = run_generator(multi_gen, INFINITE_COST)
results.append(result)
assert result is not None
assert cost > 0
assert all(r == results[0] for r in results)
def test_compressed_block_results(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator(c, INFINITE_COST)
cost_s, result_s = run_generator(s, INFINITE_COST)
print(result_c)
assert result_c is not None
assert result_s is not None
assert result_c == result_s
def test_spend_byndle_coin_solution(self):
for i in range(0, 10):
sb: SpendBundle = make_spend_bundle(i)
cs1 = SExp.to(spend_bundle_to_coin_solution_entry_list(sb)).as_bin()
cs2 = spend_bundle_to_serialized_coin_solution_entry_list(sb)
assert cs1 == cs2
class TestDecompression(TestCase):
def __init__(self, *args, **kwargs):
super(TestDecompression, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_deserialization(self):
self.maxDiff = None
cost, out = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [bytes(Program.to("hello"))])
assert out == Program.to("hello")
def test_deserialization_as_argument(self):
self.maxDiff = None
cost, out = TEST_GEN_DESERIALIZE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, Nil, bytes(Program.to("hello"))]
)
print(bytes(Program.to("hello")))
print()
print(out)
assert out == Program.to("hello")
def test_decompress_puzzle(self):
cost, out = DECOMPRESS_PUZZLE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, b"\xff", bytes(Program.to("pubkey")), b"\x80"]
)
print()
print(out)
def test_decompress_cse(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
)
cost, out = DECOMPRESS_CSE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0]
)
print()
print(out)
def test_decompress_cse_with_prefix(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
)
start = 2 + 44
end = start + 238
prefix = original_generator[start:end]
cost, out = DECOMPRESS_CSE_WITH_PREFIX.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, prefix, cse0]
)
print()
print(out)
def test_block_program_zero(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
)
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
)
start = 2 + 44
end = start + 238
cost, out = DECOMPRESS_BLOCK.run_with_cost(
INFINITE_COST,
[
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
[bytes(original_generator)],
],
)
print()
print(out)
def test_block_program_zero_with_curry(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
)
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
)
start = 2 + 44
end = start + 238
p = DECOMPRESS_BLOCK.curry(DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end))
cost, out = p.run_with_cost(INFINITE_COST, [cse2, DESERIALIZE_MOD, [bytes(original_generator)]])
print()
print(p)
print(out)
p_with_cses = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end), cse2, DESERIALIZE_MOD
)
generator_args = create_generator_args([SerializedProgram.from_bytes(original_generator)])
cost, out = p_with_cses.run_with_cost(INFINITE_COST, generator_args)
print()
print(p_with_cses)
print(out)
| true
| true
|
1c484fa08fc49e2469b08d94e3f89720e8e00a3f
| 4,022
|
py
|
Python
|
utils/extra/common.py
|
ekojs/mitra
|
9c2458b7bf83af4a7e56b0e227f07454d99298e1
|
[
"MIT"
] | null | null | null |
utils/extra/common.py
|
ekojs/mitra
|
9c2458b7bf83af4a7e56b0e227f07454d99298e1
|
[
"MIT"
] | null | null | null |
utils/extra/common.py
|
ekojs/mitra
|
9c2458b7bf83af4a7e56b0e227f07454d99298e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# common functions
# Ange Albertini 2020
import random
import re
from string import punctuation, digits, ascii_letters
def randblock(l):
return bytes([random.randrange(255) for i in range(l)])
# Cosmetic functions ###########################################################
ASCII = (punctuation + digits + ascii_letters + " ").encode()
def hexii(c):
#replace 00 by empty char
if c == b"\0":
return b" "
#replace printable char by .<char>
if c in ASCII:
return b" " + bytes([c])
if c == 0x0a:
return b"\n"
if c == b"\r":
return b"\\r"
#otherwise, return hex
return b"%02X" % c
def hexiis(s):
return repr(b" ".join([hexii(c) for c in s]))[2:-1]
def showsplit(d, i):
WIDTH = 8
return "%s | %s" % (hexiis(d[i-WIDTH:i]), hexiis(d[i:i+WIDTH]))
# 'GCM' functions ##############################################################
def cut3(data, a):
# skip 0:a[0] -- not needed ?
return data[a[0]:a[1]], data[a[1]:a[2]], data[a[2]:]
def mixfiles(d1, d2, cuts):
"""mixing data with exclusive parts of each data"""
assert len(d1) == len(d2)
d = b""
start = 0
keep = d1
skip = d2
for end in cuts:
d += keep[start:end]
start = end
keep, skip = skip, keep
d += keep[start:]
return d
def splitfile(data, cuts):
p1 = b""
p2 = b""
start = 0
count = 0
for end in cuts:
count += 1
p1 += data[start:end]
p2 += randblock(end-start)
start = end
p1, p2 = p2, p1
p1 += data[end:]
p2 += randblock(len(data)-end)
assert len(p1) == len(p2)
if count % 2 == 1:
p1, p2 = p2, p1
return p1, p2
# PDF functions ################################################################
def EnclosedStringS(d, starts, ends):
off = d.find(starts)
return d[off:d.find(ends, off + len(starts))]
def EnclosedString(d, starts, ends):
off = d.find(starts) + len(starts)
return d[off:d.find(ends, off)]
def getCount(d):
s = EnclosedString(d, b"/Count ", b"/")
count = int(s)
return count
def getObjDecl(d, s):
val = EnclosedString(d, s, b"0 R")
val = val.strip()
if val.decode().isnumeric():
return b"%s %s 0 R" % (s, val)
else:
return b""
def getValDecl(d, s):
"""locates declaration such as '/PageMode /UseOutlines' """
off = d.find(s) + len(s)
if off == -1:
return b""
match = re.match(b" *\/[A-Za-z0-9]*", d[off:])
if match is None:
return b""
else:
return b"%s %s" % (s, match[0])
def adjustToC(toc):
"""increasing page numbers of each ToC entry"""
for entry in toc:
d = entry[3]
if d["kind"] == 1:
d["page"] += 1
entry[2] += 1
return toc
def adjustPDF(contents):
startSig = contents.find(b"%PDF") # relative to file start
startXREF = contents.find(b"\nxref\n0 ") + 1
endXREF = contents.find(b" \n\n", startXREF) + 1
origXref = contents[startXREF:endXREF]
objCount = int(origXref.splitlines()[1].split(b" ")[1])
xrefLines = [
b"xref",
b"0 %i" % objCount,
# mutool declare its first xref like this
b"0000000000 00001 f "
]
i = 1
while i < objCount:
# only very standard object declarations
off = contents.find(b"\n%i 0 obj\n" % i) + 1
xrefLines.append(b"%010i 00000 n " % (off - startSig))
i += 1
xref = b"\n".join(xrefLines)
# XREF length should be unchanged
try:
assert len(xref) == len(origXref)
except AssertionError:
print("<:", repr(origXref))
print(">:", repr(xref))
contents = contents[:startXREF] + xref + contents[endXREF:]
startStartXref = contents.find(b"\nstartxref\n", endXREF) + len(b"\nstartxref\n")
endStartXref = contents.find(b"\n%%EOF", startStartXref)
contents = contents[:startStartXref] + b"%08i" % (startXREF - startSig) + contents[endStartXref:]
return contents
template = b"""%%PDF-1.3
%%\xC2\xB5\xC2\xB6
1 0 obj
<</Length 2 0 R>>
stream
%(payload)s
endstream
endobj
2 0 obj
%(payload_l)i
endobj
3 0 obj
<<
/Type /Catalog
/Pages 4 0 R
/Payload 1 0 R %% to prevent garbage collection
%(extra)s %% optional: Names + OpenAction + Outlines + PageMode
>>
endobj
4 0 obj
<</Type/Pages/Count %(count)i/Kids[%(kids)s]>>
endobj
"""
| 19.812808
| 98
| 0.598707
|
import random
import re
from string import punctuation, digits, ascii_letters
def randblock(l):
return bytes([random.randrange(255) for i in range(l)])
| true
| true
|
1c484fc0efb0612c2fdd4194aa295cc755d295d9
| 6,922
|
py
|
Python
|
pyrsss/signal/spectrum.py
|
grawe/pyrsss
|
31fd88734b00f814e7aaa5829c4ac49c7bf53563
|
[
"MIT"
] | null | null | null |
pyrsss/signal/spectrum.py
|
grawe/pyrsss
|
31fd88734b00f814e7aaa5829c4ac49c7bf53563
|
[
"MIT"
] | null | null | null |
pyrsss/signal/spectrum.py
|
grawe/pyrsss
|
31fd88734b00f814e7aaa5829c4ac49c7bf53563
|
[
"MIT"
] | null | null | null |
from __future__ import division
import math
import numpy as NP
import scipy.signal
def rect(t, a):
"""
Return a vector of the same length as $t$ that is equal to 1 for
absolute values of $t$ less than $a$/2 and 0 otherwise.
"""
x = NP.zeros_like(t)
x[NP.abs(t) < a/2] = 1
x[NP.abs(t) == a/2] = 1/2
return x
def nextpow2(N):
"""
Return the power of 2 greater than or equal to *N*.
"""
return 2**int(math.ceil(math.log(N, 2)))
def spectrum(x,
n0=0,
T_s=1,
oversample=1,
only_positive=True):
"""
Return the spectrum for the signal *x* calculated via FFT and the
associated frequencies as a tuple. The *n0* parameter gives the
index in *x* for time index 0 (*n0* = 0 means that `x[0]` is at
time 0). The number of spectral samples returned is the next power
of 2 greater than the length of *x* multiplied by *oversample*. If
*only_positive*, return the spectrum only for positive frequencies
(assuming *x* is real).
"""
assert oversample >= 1 and isinstance(oversample, int)
N = nextpow2(len(x)) * 2**(oversample - 1)
if only_positive:
X = NP.fft.rfft(x, n=N) * T_s
f = NP.fft.rfftfreq(N, d=T_s)
else:
X = NP.fft.fft(x, n=N) * T_s
f = NP.fft.fftfreq(N, d=T_s)
X = NP.fft.fftshift(X)
f = NP.fft.fftshift(f)
if n0 != 0:
X *= NP.exp(-1j * 2 * math.pi * NP.arange(N) * n0 / N)
return f, X
def blackman_tukey(x,
M,
L,
y=None,
window='boxcar',
d=1,
full=False):
"""
Compute the Blackman-Tukey cross power spectral density (PSD)
estimate between the time-domain signals *x* and *y* (must be the
same length as *x*). If *y* is not given, compute the power
spectral density estimate of *x*. Use the spectral window with
identifier *window* (see the options in
:func:scipy.`signal.get_window`, e.g., a tuple can be used to pass
arguments to the window function) and length *M* (i.e., the
maximum auto-correlation lag to include in the estimate). Compute
the estimate at *L* uniformly spaced frequency samples where *d*
is the time domain sample interval. If not *full*, return the
tuple containing the length *L* PSD estimate and length *L*
corresponding frequencies. If *full*, also return the estimated
cross correlation and window function (i.e., a tuple with four
elements).
"""
N = len(x)
assert M <= N
if y is None:
y = x
else:
assert len(y) == N
Rxy = scipy.signal.correlate(x, y) / N
Rxy_window = Rxy[(N - 1) - M:(N - 1) + M + 1]
window = scipy.signal.get_window(window, 2*M + 1, fftbins=False)
k_range = NP.arange(0, L)
shift = NP.exp(2j * NP.pi * k_range * M / L)
Sxy = NP.fft.fft(window * Rxy_window, n=L) * shift
f = NP.fft.fftfreq(L, d=d)
if full:
return (Sxy, f, Rxy, window)
else:
return (Sxy, f)
def periodogram(x,
L,
y=None,
d=1,
full=False):
"""
Compute the periodogram of the cross power spectral density of *x*
and *y*. The implementation is based on :func:`blackman-tukey`,
following the same input and output conventions.
"""
return blackman_tukey(x, len(x) - 1, L, y=y, d=d, full=full)
def etfe(x,
y,
M,
L,
d=1,
window='parzen'):
"""
Compute the empirical transfer function estimate (ETFE) relating
the input time series *x* to the output time series *y*. Compute
the response at *L* equally spaced frequency samples (where the
sampling period is *D*). Limit the correlations to a lag of *M*
(and *M* <= len(*x*) - 1) and use the window function *window*
(see :func:`scipy.signal.get_window`). Return the tuple containing
the ETFE and the frequency sample points.
See Section 6.3 of Ljung, System Identification Theory for the
User, 2nd Edition.
"""
Phi_yu, f = blackman_tukey(y, M, L, y=x, d=d, window=window)
Phi_u, _ = blackman_tukey(x, M, L, d=d, window=window)
return Phi_yu / Phi_u, f
if __name__ == '__main__':
import pylab as PL
# Reproduction of Oppenheim and Schafer (O&S), 3rd edition,
# Example 10.4. The example considers the effects of windowing and
# frequency sampling of a two sinusoid superposition example.
N = 64 # number of samples (window length)
fs = 10e3 # sampling frequency (Hz)
T = 1 / fs # sample period (s)
def W_r(w, L):
"""
DTFT of rectangular window of length *L* evaluated an angular
frequencies *w*. See O&S (10.11).
"""
return NP.exp(-1j * w * (L - 1) / 2) * NP.sin(w * L / 2) / NP.sin(w / 2)
K = 2048 # number of frequency samples
w = NP.linspace(-math.pi, math.pi, K)
W = W_r(w, N)
n = NP.arange(N)
v = NP.cos(2*math.pi/14 * n) + 0.75 * NP.cos(4*math.pi/15 * n)
def V(w, A0, w0, A1, w1, L):
"""
DTFT of the superposition of two sinusoids with amplitudes *A0*
and *A1*, angular frequencies *w0* and *w1*, and a recangular
window length *L* and angular frequencies *w*.
"""
V1 = A0 / 2 * W_r(w - w0, L)
V2 = A0 / 2 * W_r(w + w0, L)
V3 = A1 / 2 * W_r(w - w1, L)
V4 = A1 / 2 * W_r(w + w1, L)
return V1 + V2 + V3 + V4
A0 = 1
A1 = 0.75
f2 = NP.linspace(-fs/2, fs/2, K)
w2 = f2 / fs * 2 * math.pi
V_dtft = V(w2, A0, 2*math.pi/14, A1, 4*math.pi/15, N)
V_spectrum, f_spectrum = spectrum(v,
T_s=T,
only_positive=False)
V_spectrum2, f_spectrum2 = spectrum(v,
T_s=T,
oversample=2,
only_positive=False)
V_spectrum3, f_spectrum3 = spectrum(v,
T_s=T)
PL.figure(figsize=(10, 4))
PL.plot(f2,
NP.abs(V_dtft) * T,
c='C0',
label='DTFT (scaled)')
PL.scatter(f_spectrum,
NP.abs(V_spectrum),
c='C1',
s=10,
label='spectrum')
PL.scatter(f_spectrum2,
NP.abs(V_spectrum2),
c='C2',
zorder=-1,
s=20,
label='spectrum (oversample=2)')
PL.scatter(f_spectrum3,
NP.abs(V_spectrum3),
c='C3',
zorder=-2,
s=40,
label='spectrum (positive-only)')
PL.legend()
PL.xlabel('Frequency (Hz)')
PL.ylabel('Amplitude')
PL.title('Comparison of pyrsss spectrum and scaled DTFT')
PL.show()
| 31.463636
| 80
| 0.545652
|
from __future__ import division
import math
import numpy as NP
import scipy.signal
def rect(t, a):
x = NP.zeros_like(t)
x[NP.abs(t) < a/2] = 1
x[NP.abs(t) == a/2] = 1/2
return x
def nextpow2(N):
return 2**int(math.ceil(math.log(N, 2)))
def spectrum(x,
n0=0,
T_s=1,
oversample=1,
only_positive=True):
assert oversample >= 1 and isinstance(oversample, int)
N = nextpow2(len(x)) * 2**(oversample - 1)
if only_positive:
X = NP.fft.rfft(x, n=N) * T_s
f = NP.fft.rfftfreq(N, d=T_s)
else:
X = NP.fft.fft(x, n=N) * T_s
f = NP.fft.fftfreq(N, d=T_s)
X = NP.fft.fftshift(X)
f = NP.fft.fftshift(f)
if n0 != 0:
X *= NP.exp(-1j * 2 * math.pi * NP.arange(N) * n0 / N)
return f, X
def blackman_tukey(x,
M,
L,
y=None,
window='boxcar',
d=1,
full=False):
N = len(x)
assert M <= N
if y is None:
y = x
else:
assert len(y) == N
Rxy = scipy.signal.correlate(x, y) / N
Rxy_window = Rxy[(N - 1) - M:(N - 1) + M + 1]
window = scipy.signal.get_window(window, 2*M + 1, fftbins=False)
k_range = NP.arange(0, L)
shift = NP.exp(2j * NP.pi * k_range * M / L)
Sxy = NP.fft.fft(window * Rxy_window, n=L) * shift
f = NP.fft.fftfreq(L, d=d)
if full:
return (Sxy, f, Rxy, window)
else:
return (Sxy, f)
def periodogram(x,
L,
y=None,
d=1,
full=False):
return blackman_tukey(x, len(x) - 1, L, y=y, d=d, full=full)
def etfe(x,
y,
M,
L,
d=1,
window='parzen'):
Phi_yu, f = blackman_tukey(y, M, L, y=x, d=d, window=window)
Phi_u, _ = blackman_tukey(x, M, L, d=d, window=window)
return Phi_yu / Phi_u, f
if __name__ == '__main__':
import pylab as PL
N = 64
fs = 10e3
T = 1 / fs
def W_r(w, L):
return NP.exp(-1j * w * (L - 1) / 2) * NP.sin(w * L / 2) / NP.sin(w / 2)
K = 2048
w = NP.linspace(-math.pi, math.pi, K)
W = W_r(w, N)
n = NP.arange(N)
v = NP.cos(2*math.pi/14 * n) + 0.75 * NP.cos(4*math.pi/15 * n)
def V(w, A0, w0, A1, w1, L):
V1 = A0 / 2 * W_r(w - w0, L)
V2 = A0 / 2 * W_r(w + w0, L)
V3 = A1 / 2 * W_r(w - w1, L)
V4 = A1 / 2 * W_r(w + w1, L)
return V1 + V2 + V3 + V4
A0 = 1
A1 = 0.75
f2 = NP.linspace(-fs/2, fs/2, K)
w2 = f2 / fs * 2 * math.pi
V_dtft = V(w2, A0, 2*math.pi/14, A1, 4*math.pi/15, N)
V_spectrum, f_spectrum = spectrum(v,
T_s=T,
only_positive=False)
V_spectrum2, f_spectrum2 = spectrum(v,
T_s=T,
oversample=2,
only_positive=False)
V_spectrum3, f_spectrum3 = spectrum(v,
T_s=T)
PL.figure(figsize=(10, 4))
PL.plot(f2,
NP.abs(V_dtft) * T,
c='C0',
label='DTFT (scaled)')
PL.scatter(f_spectrum,
NP.abs(V_spectrum),
c='C1',
s=10,
label='spectrum')
PL.scatter(f_spectrum2,
NP.abs(V_spectrum2),
c='C2',
zorder=-1,
s=20,
label='spectrum (oversample=2)')
PL.scatter(f_spectrum3,
NP.abs(V_spectrum3),
c='C3',
zorder=-2,
s=40,
label='spectrum (positive-only)')
PL.legend()
PL.xlabel('Frequency (Hz)')
PL.ylabel('Amplitude')
PL.title('Comparison of pyrsss spectrum and scaled DTFT')
PL.show()
| true
| true
|
1c485046ce096457e354bad1db4cbc7a66d76bb5
| 2,862
|
py
|
Python
|
awardsApp/api.py
|
MutuaFranklin/App-Awards
|
020c85db144156ec02f12815cd675245d4ad9db3
|
[
"MIT"
] | null | null | null |
awardsApp/api.py
|
MutuaFranklin/App-Awards
|
020c85db144156ec02f12815cd675245d4ad9db3
|
[
"MIT"
] | null | null | null |
awardsApp/api.py
|
MutuaFranklin/App-Awards
|
020c85db144156ec02f12815cd675245d4ad9db3
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse
from django.http import HttpResponse, Http404,HttpResponseRedirect
from .permissions import IsAdminOrReadOnly
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from profiles.models import Profile
from .models import Project
from .serializer import ProfileSerializer, ProjectSerializer
from rest_framework import viewsets
from rest_framework import status
# class ProfileViewSet(viewsets.ModelViewSet):
# queryset = Profile.objects.all()
# serializer_class = ProfileSerializer
# class ProjectViewSet(viewsets.ModelViewSet):
# queryset = Project.objects.all()
# serializer_class = ProjectSerializer
#LMS
class ProfileList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
profiles = Profile.objects.all()
serializers = ProfileSerializer(profiles, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = Profile(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
projects = Project.objects.all()
serializers = ProjectSerializer(projects, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = Project(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectDescription(APIView):
permission_classes = (IsAuthenticated,)
def get_project(self, pk):
try:
return Project.objects.get(pk=pk)
except Project.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
project = self.get_project(pk)
serializers = ProjectSerializer(project)
return Response(serializers.data)
def put(self, request, pk, format=None):
project = self.get_project()(pk)
serializers = ProjectSerializer(project, request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
else:
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
project = self.get_project(pk)
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 30.774194
| 83
| 0.710342
|
from django.http import JsonResponse
from django.http import HttpResponse, Http404,HttpResponseRedirect
from .permissions import IsAdminOrReadOnly
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from profiles.models import Profile
from .models import Project
from .serializer import ProfileSerializer, ProjectSerializer
from rest_framework import viewsets
from rest_framework import status
class ProfileList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
profiles = Profile.objects.all()
serializers = ProfileSerializer(profiles, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = Profile(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
projects = Project.objects.all()
serializers = ProjectSerializer(projects, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = Project(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectDescription(APIView):
permission_classes = (IsAuthenticated,)
def get_project(self, pk):
try:
return Project.objects.get(pk=pk)
except Project.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
project = self.get_project(pk)
serializers = ProjectSerializer(project)
return Response(serializers.data)
def put(self, request, pk, format=None):
project = self.get_project()(pk)
serializers = ProjectSerializer(project, request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
else:
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
project = self.get_project(pk)
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| true
| true
|
1c48504c2a6e00bf9546d691cb3602dd96353db6
| 193
|
py
|
Python
|
language/equivalence/formation.py
|
jedhsu/language
|
3772a4a0ff287e1fc5ebefc716b8d91928d04c72
|
[
"MIT"
] | null | null | null |
language/equivalence/formation.py
|
jedhsu/language
|
3772a4a0ff287e1fc5ebefc716b8d91928d04c72
|
[
"MIT"
] | null | null | null |
language/equivalence/formation.py
|
jedhsu/language
|
3772a4a0ff287e1fc5ebefc716b8d91928d04c72
|
[
"MIT"
] | null | null | null |
"""
*Product Formation* A, B: Type _proves_ A x B: Type
"The prevalence of products of types concides with the
prevalence of cartesian products in categories." (Corfield 40)
"""
| 21.444444
| 64
| 0.689119
| true
| true
|
|
1c48511bdf2bd0df09a51e286757ce2441bb1185
| 441
|
py
|
Python
|
pkgs/ops-pkg/src/genie/libs/ops/ospf/iosxe/yang/ospf.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/ospf/iosxe/yang/ospf.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/ospf/iosxe/yang/ospf.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
from genie.ops.base import Context
from genie.libs.ops.ospf.iosxe.ospf import Ospf as b_ospf
from genie.libs.parser.iosxe import show_ospf
class Ospf(b_ospf):
'''Ospf Ops Object'''
# To keep short names
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.context_manager[show_ospf.ShowIpOspf] = Context.yang
# Rest use cli as their info cannot be retrieve via yang at the moment
| 33.923077
| 78
| 0.705215
|
from genie.ops.base import Context
from genie.libs.ops.ospf.iosxe.ospf import Ospf as b_ospf
from genie.libs.parser.iosxe import show_ospf
class Ospf(b_ospf):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.context_manager[show_ospf.ShowIpOspf] = Context.yang
| true
| true
|
1c48511d9b3288b699faa35bf674bb9a5336cf68
| 1,995
|
py
|
Python
|
Code/test-PSO.py
|
gitFloyd/AAI-Project-2
|
c6bb4d389248c3385e58a0c399343322a6dd887f
|
[
"MIT"
] | null | null | null |
Code/test-PSO.py
|
gitFloyd/AAI-Project-2
|
c6bb4d389248c3385e58a0c399343322a6dd887f
|
[
"MIT"
] | null | null | null |
Code/test-PSO.py
|
gitFloyd/AAI-Project-2
|
c6bb4d389248c3385e58a0c399343322a6dd887f
|
[
"MIT"
] | null | null | null |
import random
#random.seed(111)
import PSO
import Model
import Dataset as DS
from Log import Log
Dataset = DS.Dataset
Pistachio = DS.Pistachio
Layer = Model.Layer
DenseLayer = Model.DenseLayer
SparseLayer = Model.SparseLayer
FMLayer = Model.FuzzyMembershipLayer
InputLayer = Model.InputLayer
# particles, weights
# X = [1,2,3,4,5]
# y = [1,0,0]
pistachios = Pistachio(Dataset.LINUX_NL)
pistachios.Load()
pistachios.Shuffle()
offset = 0
data = pistachios.Fetch('Area', 'Solidity', 'Roundness', 'Compactness', 'Shapefactor_1', 'Class', limit = 100, offset = offset)
X = [row[0:-1] for row in data]
y = [row[-1] for row in data]
val_data = pistachios.Fetch('Area', 'Solidity', 'Roundness', 'Compactness', 'Shapefactor_1', 'Class', limit = 10, offset = offset + 100)
val_X = [row[0:-1] for row in val_data]
val_y = [row[-1] for row in val_data]
#randConnections = SparseLayer.RandConnections(len(X[0]), len(X[0])*2, len(X[0]))
myModel = Model.Model([
InputLayer(len(X[0])),
#SparseLayer(randConnections, Layer.RELU),
FMLayer(len(X[0]), len(X[0]) * 2, Layer.RELU),
DenseLayer(128, Layer.RELU),
DenseLayer(16, Layer.RELU),
DenseLayer(2, Layer.SOFTMAX),
])
numInputs = 20
numParticles = 20
numWeights = myModel.NumWeights()
psoTest = PSO.PSO(myModel, (numParticles, myModel.NumWeights()))
records = psoTest.ExecuteMany(X[0:numInputs], y[0:numInputs], iterations=20)
# bestWeights = records[-1][1]
bestWeights = records.pop()[1]
for i in range(10):
# print(records[i][1][0:10])
pass
# Log.Print()
for i in range(len(val_X)):
#print(val_X[i])
print('Predict: {}; Actual: {}; val_X: {}'.format(myModel.Execute(val_X[i], bestWeights), val_y[i], val_X[i]))
print('---------------------')
for i in range(20):
#print(val_X[i])
print('Predict: {}'.format(myModel.Execute([random.randint(-1,0) + random.random() for _ in range(5)], bestWeights)))
Log.Add('bestWeights', '{}'.format(bestWeights))
Log.SaveAll()
| 27.328767
| 137
| 0.663659
|
import random
import PSO
import Model
import Dataset as DS
from Log import Log
Dataset = DS.Dataset
Pistachio = DS.Pistachio
Layer = Model.Layer
DenseLayer = Model.DenseLayer
SparseLayer = Model.SparseLayer
FMLayer = Model.FuzzyMembershipLayer
InputLayer = Model.InputLayer
pistachios = Pistachio(Dataset.LINUX_NL)
pistachios.Load()
pistachios.Shuffle()
offset = 0
data = pistachios.Fetch('Area', 'Solidity', 'Roundness', 'Compactness', 'Shapefactor_1', 'Class', limit = 100, offset = offset)
X = [row[0:-1] for row in data]
y = [row[-1] for row in data]
val_data = pistachios.Fetch('Area', 'Solidity', 'Roundness', 'Compactness', 'Shapefactor_1', 'Class', limit = 10, offset = offset + 100)
val_X = [row[0:-1] for row in val_data]
val_y = [row[-1] for row in val_data]
myModel = Model.Model([
InputLayer(len(X[0])),
FMLayer(len(X[0]), len(X[0]) * 2, Layer.RELU),
DenseLayer(128, Layer.RELU),
DenseLayer(16, Layer.RELU),
DenseLayer(2, Layer.SOFTMAX),
])
numInputs = 20
numParticles = 20
numWeights = myModel.NumWeights()
psoTest = PSO.PSO(myModel, (numParticles, myModel.NumWeights()))
records = psoTest.ExecuteMany(X[0:numInputs], y[0:numInputs], iterations=20)
bestWeights = records.pop()[1]
for i in range(10):
pass
for i in range(len(val_X)):
print('Predict: {}; Actual: {}; val_X: {}'.format(myModel.Execute(val_X[i], bestWeights), val_y[i], val_X[i]))
print('---------------------')
for i in range(20):
print('Predict: {}'.format(myModel.Execute([random.randint(-1,0) + random.random() for _ in range(5)], bestWeights)))
Log.Add('bestWeights', '{}'.format(bestWeights))
Log.SaveAll()
| true
| true
|
1c4852d27622febf8afb7626b16291be56d91b72
| 2,894
|
py
|
Python
|
vimcc.py
|
joas77/vim-winccoa
|
46c84244e5bf6679e7ef00aaf814bcfdedb596b6
|
[
"MIT"
] | 2
|
2021-03-12T04:48:48.000Z
|
2021-09-27T15:09:33.000Z
|
vimcc.py
|
joas77/vim-winccoa
|
46c84244e5bf6679e7ef00aaf814bcfdedb596b6
|
[
"MIT"
] | null | null | null |
vimcc.py
|
joas77/vim-winccoa
|
46c84244e5bf6679e7ef00aaf814bcfdedb596b6
|
[
"MIT"
] | 1
|
2021-04-15T18:13:50.000Z
|
2021-04-15T18:13:50.000Z
|
#!/usr/bin/env python3
import sys
import os
import configparser
import shutil
import subprocess
def find_project_config_file(directory, max_levels = 5):
if max_levels == 0 or directory == "/" or directory == "":
return None #Config file not found
fileName = os.path.join(directory, "config/config")
if os.path.isfile(fileName):
return fileName
else:
return find_project_config_file(os.path.dirname(directory), max_levels-1)
#Returns true: success, false: no success
def copy_file_from_subprojects(file_name):
abs_file_name = os.path.abspath(file_name)
#Find project config file
cfgFile = find_project_config_file(os.path.dirname(abs_file_name))
if not cfgFile:
print("Could not find the config file of the WinCC project for '{}'!"
"".format(file_name))
return False
config = configparser.RawConfigParser(dict_type=MultiOrderedDict, strict=False)
if not config.read(cfgFile):
print("Could not parse config file '{}'!".format(cfgFile))
return False
projDirs = config.get("general", "proj_path", fallback=None)
if not projDirs:
print("Could not parse general:proj_path from '{}'!".format(cfgFile))
return False
projDirs = [ directory.replace('"', '') for directory in projDirs]
#Main and sub project directories
mainDir = projDirs.pop(-1)
subDirs = projDirs
rel_file_name = abs_file_name.replace(mainDir+"/", "")
for subDir in subDirs[::-1]: #subDirs are listed from least to highest priority
new_file_name = os.path.join(subDir, rel_file_name)
if os.path.isfile(new_file_name):
destDir = os.path.dirname(file_name)
if destDir != "":
os.makedirs(os.path.dirname(file_name), exist_ok=True) #Create dir if needed
shutil.copy2(new_file_name, file_name)
print("Copied '{}' to '{}'!".format(new_file_name, file_name))
return True
print("File '{}' not found in subprojects.".format(file_name))
return False
#Necessary to load duplicate keys/options from config file
from collections import OrderedDict
class MultiOrderedDict(OrderedDict):
def __setitem__(self, key, value):
if isinstance(value, list) and key in self:
self[key].extend(value)
else:
super(OrderedDict, self).__setitem__(key, value)
def keys(self):
return super(OrderedDict, self).keys()
if __name__ == "__main__":
allFiles = sys.argv[1:] #All provided files
#Existing and missing files in current directory
existingFiles = []
missingFiles = []
for file in allFiles:
if os.path.isfile(file) or copy_file_from_subprojects(file):
existingFiles.append(file)
else:
missingFiles.append(file)
subprocess.run(["vim"]+existingFiles+missingFiles)
| 32.516854
| 93
| 0.666551
|
import sys
import os
import configparser
import shutil
import subprocess
def find_project_config_file(directory, max_levels = 5):
if max_levels == 0 or directory == "/" or directory == "":
return None
fileName = os.path.join(directory, "config/config")
if os.path.isfile(fileName):
return fileName
else:
return find_project_config_file(os.path.dirname(directory), max_levels-1)
def copy_file_from_subprojects(file_name):
abs_file_name = os.path.abspath(file_name)
cfgFile = find_project_config_file(os.path.dirname(abs_file_name))
if not cfgFile:
print("Could not find the config file of the WinCC project for '{}'!"
"".format(file_name))
return False
config = configparser.RawConfigParser(dict_type=MultiOrderedDict, strict=False)
if not config.read(cfgFile):
print("Could not parse config file '{}'!".format(cfgFile))
return False
projDirs = config.get("general", "proj_path", fallback=None)
if not projDirs:
print("Could not parse general:proj_path from '{}'!".format(cfgFile))
return False
projDirs = [ directory.replace('"', '') for directory in projDirs]
#Main and sub project directories
mainDir = projDirs.pop(-1)
subDirs = projDirs
rel_file_name = abs_file_name.replace(mainDir+"/", "")
for subDir in subDirs[::-1]: #subDirs are listed from least to highest priority
new_file_name = os.path.join(subDir, rel_file_name)
if os.path.isfile(new_file_name):
destDir = os.path.dirname(file_name)
if destDir != "":
os.makedirs(os.path.dirname(file_name), exist_ok=True) #Create dir if needed
shutil.copy2(new_file_name, file_name)
print("Copied '{}' to '{}'!".format(new_file_name, file_name))
return True
print("File '{}' not found in subprojects.".format(file_name))
return False
#Necessary to load duplicate keys/options from config file
from collections import OrderedDict
class MultiOrderedDict(OrderedDict):
def __setitem__(self, key, value):
if isinstance(value, list) and key in self:
self[key].extend(value)
else:
super(OrderedDict, self).__setitem__(key, value)
def keys(self):
return super(OrderedDict, self).keys()
if __name__ == "__main__":
allFiles = sys.argv[1:] #All provided files
#Existing and missing files in current directory
existingFiles = []
missingFiles = []
for file in allFiles:
if os.path.isfile(file) or copy_file_from_subprojects(file):
existingFiles.append(file)
else:
missingFiles.append(file)
subprocess.run(["vim"]+existingFiles+missingFiles)
| true
| true
|
1c48532d9b777c913fbc3fc8cf4210092c8650ef
| 101
|
py
|
Python
|
other/exawizards2019_b.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
other/exawizards2019_b.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
other/exawizards2019_b.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
n = int(input())
s = input()
if s.count('R') > s.count('B'):
print('Yes')
else:
print('No')
| 12.625
| 31
| 0.49505
|
n = int(input())
s = input()
if s.count('R') > s.count('B'):
print('Yes')
else:
print('No')
| true
| true
|
1c48542a1c9ecd55c6e00af6037038147a68539f
| 59,704
|
py
|
Python
|
sklearn/decomposition/_dict_learning.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T18:06:44.000Z
|
2021-05-25T18:06:44.000Z
|
sklearn/decomposition/_dict_learning.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/decomposition/_dict_learning.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | null | null | null |
""" Dictionary learning.
"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
import warnings
from math import ceil
import numpy as np
from scipy import linalg
from joblib import Parallel, effective_n_jobs
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches)
from ..utils.extmath import randomized_svd, row_norms, svd_flip
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _check_positive_coding(method, positive):
if positive and method in ["omp", "lars"]:
raise ValueError(
"Positive constraint not supported for '{}' "
"coding method.".format(method)
)
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0,
positive=False):
"""Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components) or None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
check_input : bool, default=True
If `False`, the input arrays `X` and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if dictionary.shape[1] != X.shape[1]:
raise ValueError("Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(
dictionary.shape, X.shape))
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
_check_positive_coding(algorithm, positive)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False,
positive=positive, max_iter=max_iter)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True,
positive=positive)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
if new_code.ndim != 2:
return new_code.reshape(n_samples, n_components)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, *, gram=None, cov=None,
algorithm='lasso_lars', n_nonzero_coefs=None, alpha=None,
copy_cov=True, init=None, max_iter=1000, n_jobs=None,
check_input=True, verbose=0, positive=False):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if effective_n_jobs(n_jobs) == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, A=None, B=None, verbose=False,
random_state=None, positive=False):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
# 1e-6 is arbitrary but consistent with the spams implementation
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
# kth atom is almost never used -> sample a new one from the data
newd = Y[random_state.choice(n_samples)]
# add small noise to avoid making the sparse coding ill conditioned
noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
# Projection on the constraint set ||V_k|| == 1
dictionary[k] /= linalg.norm(dictionary[k])
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
def dict_learning(X, n_components, *, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=None, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False, positive_dict=False,
positive_code=False, method_max_iter=1000):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
callback : callable, default=None
Callable that gets invoked every five iterations
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
# flip the initial code's sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = np.asfortranarray(dictionary)
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs, positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
# Update dictionary in place
_update_dict(dictionary, X, code, verbose=verbose,
random_state=random_state, positive=positive_dict)
# Cost function
current_cost = (0.5 * np.sum((X - code @ dictionary)**2)
+ alpha * np.sum(np.abs(code)))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, *, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True,
n_jobs=None, method='lars', iter_offset=0,
random_state=None, return_inner_stats=False,
inner_stats=None, return_n_iter=False,
positive_dict=False, positive_code=False,
method_max_iter=1000):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int, default=2
Number of dictionary atoms to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=100
Number of mini-batch iterations to perform.
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios.
callback : callable, default=None
callable that gets invoked every five iterations.
batch_size : int, default=3
The number of samples to take in each batch.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default=0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
return_inner_stats : bool, default=False
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If `return_inner_stats` is `True`, `return_code` is
ignored.
inner_stats : tuple of (A, B) ndarrays, default=None
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid losing the history of the evolution.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = check_array(dictionary, order='F', dtype=np.float64,
copy=False)
dictionary = np.require(dictionary, requirements='W')
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary, algorithm=method,
alpha=alpha, n_jobs=n_jobs,
check_input=False,
positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code.T, this_code)
B *= beta
B += np.dot(this_X.T, this_code)
# Update dictionary in place
_update_dict(dictionary, this_X, this_code, A, B, verbose=verbose,
random_state=random_state, positive=positive_dict)
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary, (A, B), ii - iter_offset + 1
else:
return dictionary, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False,
positive=positive_code, max_iter=method_max_iter,
verbose=verbose)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary, ii - iter_offset + 1
else:
return code, dictionary
if return_n_iter:
return dictionary, ii - iter_offset + 1
else:
return dictionary
class _BaseSparseCoding(TransformerMixin):
"""Base class from SparseCoder and DictionaryLearning algorithms."""
def __init__(self, transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter):
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.transform_max_iter = transform_max_iter
self.split_sign = split_sign
self.n_jobs = n_jobs
self.positive_code = positive_code
def _transform(self, X, dictionary):
"""Private method allowing to accomodate both DictionaryLearning and
SparseCoder."""
X = self._validate_data(X, reset=False)
# transform_alpha has to be changed in _transform
# this is done for consistency with the value of alpha
if (hasattr(self, "alpha") and self.alpha != 1. and
self.transform_alpha is None):
warnings.warn("By default transform_alpha will be equal to"
"alpha instead of 1.0 starting from version 1.2",
FutureWarning)
transform_alpha = 1. # TODO change to self.alpha in 1.2
else:
transform_alpha = self.transform_alpha
code = sparse_encode(
X, dictionary, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=transform_alpha, max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, positive=self.positive_code)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
return self._transform(X, self.components_)
class SparseCoder(_BaseSparseCoding, BaseEstimator):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution;
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
the estimated components are sparse;
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`lasso_lars`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The unchanged dictionary atoms.
.. deprecated:: 0.24
This attribute is deprecated in 0.24 and will be removed in
1.1 (renaming of 0.26). Use `dictionary` instead.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import SparseCoder
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> coder = SparseCoder(
... dictionary=dictionary, transform_algorithm='lasso_lars',
... transform_alpha=1e-10,
... )
>>> coder.transform(X)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
See Also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
_required_parameters = ["dictionary"]
def __init__(self, dictionary, *, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=None, positive_code=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.dictionary = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : Ignored
y : Ignored
Returns
-------
self : object
"""
return self
@deprecated("The attribute 'components_' is deprecated " # type: ignore
"in 0.24 and will be removed in 1.1 (renaming of 0.26). Use "
"the 'dictionary' instead.")
@property
def components_(self):
return self.dictionary
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
y : Ignored
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
return super()._transform(X, self.dictionary)
def _more_tags(self):
return {"requires_fit": False}
@property
def n_components_(self):
return self.dictionary.shape[0]
@property
def n_features_in_(self):
return self.dictionary.shape[1]
class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=n_features
Number of dictionary elements to extract.
alpha : float, default=1.0
Sparsity controlling parameter.
max_iter : int, default=1000
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for numerical error.
fit_algorithm : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (:func:`~sklearn.linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
faster if the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(:func:`~sklearn.linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
will be faster if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and
`algorithm='omp'`. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `None`, defaults to `alpha`.
n_jobs : int or None, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the code, for warm restart. Only used if `code_init`
and `dict_init` are not None.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary, for warm restart. Only used if
`code_init` and `dict_init` are not None.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import DictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> dict_learner = DictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.87...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.08...
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See Also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, *, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=None, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None, positive_code=False,
positive_dict=False, transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : Ignored
Returns
-------
self : object
Returns the object itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, alpha=self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=None
Number of dictionary elements to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=1000
Total number of iterations to perform.
fit_algorithm : {'lars', 'cd'}, default='lars'
The algorithm used:
- `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`)
- `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
batch_size : int, default=3
Number of samples in each mini-batch.
shuffle : bool, default=True
Whether to shuffle the samples before forming batches.
dict_init : ndarray of shape (n_components, n_features), default=None
initial value of the dictionary for warm restart scenarios
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and
`algorithm='omp'`. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `None`, defaults to `alpha`.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Components extracted from the data.
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid losing the
history of the evolution, but they shouldn't have any use for the
end user.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
n_iter_ : int
Number of iterations run.
iter_offset_ : int
The number of iteration on data batches that has been
performed before.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generattor or by `np.random`.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import MiniBatchDictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42)
>>> dict_learner = MiniBatchDictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.86...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.07...
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See Also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, *, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=None, batch_size=3, shuffle=True,
dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None,
positive_code=False, positive_dict=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs, transform_alpha,
split_sign, n_jobs, positive_code, transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
self.random_state_ = random_state
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
iter_offset : int, default=None
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
X = self._validate_data(X, reset=(iter_offset == 0))
U, (A, B) = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=1, method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + 1
return self
| 36.228155
| 79
| 0.62222
|
import time
import sys
import itertools
import warnings
from math import ceil
import numpy as np
from scipy import linalg
from joblib import Parallel, effective_n_jobs
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches)
from ..utils.extmath import randomized_svd, row_norms, svd_flip
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _check_positive_coding(method, positive):
if positive and method in ["omp", "lars"]:
raise ValueError(
"Positive constraint not supported for '{}' "
"coding method.".format(method)
)
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0,
positive=False):
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if dictionary.shape[1] != X.shape[1]:
raise ValueError("Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(
dictionary.shape, X.shape))
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
_check_positive_coding(algorithm, positive)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False,
positive=positive, max_iter=max_iter)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True,
positive=positive)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
if new_code.ndim != 2:
return new_code.reshape(n_samples, n_components)
return new_code
def sparse_encode(X, dictionary, *, gram=None, cov=None,
algorithm='lasso_lars', n_nonzero_coefs=None, alpha=None,
copy_cov=True, init=None, max_iter=1000, n_jobs=None,
check_input=True, verbose=0, positive=False):
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if effective_n_jobs(n_jobs) == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
return code
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, A=None, B=None, verbose=False,
random_state=None, positive=False):
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
newd = Y[random_state.choice(n_samples)]
noise_level = 0.01 * (newd.std() or 1)
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
dictionary[k] /= linalg.norm(dictionary[k])
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
def dict_learning(X, n_components, *, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=None, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False, positive_dict=False,
positive_code=False, method_max_iter=1000):
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
alpha = float(alpha)
random_state = check_random_state(random_state)
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
# flip the initial code's sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.asfortranarray(dictionary)
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs, positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
_update_dict(dictionary, X, code, verbose=verbose,
random_state=random_state, positive=positive_dict)
current_cost = (0.5 * np.sum((X - code @ dictionary)**2)
+ alpha * np.sum(np.abs(code)))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
if dE < tol * errors[-1]:
if verbose == 1:
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, *, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True,
n_jobs=None, method='lars', iter_offset=0,
random_state=None, return_inner_stats=False,
inner_stats=None, return_n_iter=False,
positive_dict=False, positive_code=False,
method_max_iter=1000):
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
_check_positive_coding(method, positive_code)
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
alpha = float(alpha)
random_state = check_random_state(random_state)
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary, order='F', dtype=np.float64,
copy=False)
dictionary = np.require(dictionary, requirements='W')
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
if inner_stats is None:
A = np.zeros((n_components, n_components))
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary, algorithm=method,
alpha=alpha, n_jobs=n_jobs,
check_input=False,
positive=positive_code,
max_iter=method_max_iter, verbose=verbose)
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code.T, this_code)
B *= beta
B += np.dot(this_X.T, this_code)
_update_dict(dictionary, this_X, this_code, A, B, verbose=verbose,
random_state=random_state, positive=positive_dict)
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary, (A, B), ii - iter_offset + 1
else:
return dictionary, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False,
positive=positive_code, max_iter=method_max_iter,
verbose=verbose)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary, ii - iter_offset + 1
else:
return code, dictionary
if return_n_iter:
return dictionary, ii - iter_offset + 1
else:
return dictionary
class _BaseSparseCoding(TransformerMixin):
def __init__(self, transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter):
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.transform_max_iter = transform_max_iter
self.split_sign = split_sign
self.n_jobs = n_jobs
self.positive_code = positive_code
def _transform(self, X, dictionary):
X = self._validate_data(X, reset=False)
if (hasattr(self, "alpha") and self.alpha != 1. and
self.transform_alpha is None):
warnings.warn("By default transform_alpha will be equal to"
"alpha instead of 1.0 starting from version 1.2",
FutureWarning)
transform_alpha = 1.
else:
transform_alpha = self.transform_alpha
code = sparse_encode(
X, dictionary, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=transform_alpha, max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, positive=self.positive_code)
if self.split_sign:
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
def transform(self, X):
check_is_fitted(self)
return self._transform(X, self.components_)
class SparseCoder(_BaseSparseCoding, BaseEstimator):
_required_parameters = ["dictionary"]
def __init__(self, dictionary, *, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=None, positive_code=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.dictionary = dictionary
def fit(self, X, y=None):
return self
@deprecated("The attribute 'components_' is deprecated "
"in 0.24 and will be removed in 1.1 (renaming of 0.26). Use "
"the 'dictionary' instead.")
@property
def components_(self):
return self.dictionary
def transform(self, X, y=None):
return super()._transform(X, self.dictionary)
def _more_tags(self):
return {"requires_fit": False}
@property
def n_components_(self):
return self.dictionary.shape[0]
@property
def n_features_in_(self):
return self.dictionary.shape[1]
class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
def __init__(self, n_components=None, *, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=None, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None, positive_code=False,
positive_dict=False, transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs, positive_code,
transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, alpha=self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
def __init__(self, n_components=None, *, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=None, batch_size=3, shuffle=True,
dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None,
positive_code=False, positive_dict=False,
transform_max_iter=1000):
super().__init__(
transform_algorithm, transform_n_nonzero_coefs, transform_alpha,
split_sign, n_jobs, positive_code, transform_max_iter
)
self.n_components = n_components
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
self.random_state_ = random_state
return self
def partial_fit(self, X, y=None, iter_offset=None):
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
X = self._validate_data(X, reset=(iter_offset == 0))
U, (A, B) = dict_learning_online(
X, self.n_components, alpha=self.alpha,
n_iter=1, method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats,
positive_dict=self.positive_dict,
positive_code=self.positive_code)
self.components_ = U
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + 1
return self
| true
| true
|
1c485451846b20e51102909630e63adf8b06d286
| 366
|
py
|
Python
|
UchuujinPatcher/patch_eboot.py
|
colebob9/UchuujinPatcher
|
1f5880240e7b3da329d4c8334fc23df92eece402
|
[
"MIT"
] | null | null | null |
UchuujinPatcher/patch_eboot.py
|
colebob9/UchuujinPatcher
|
1f5880240e7b3da329d4c8334fc23df92eece402
|
[
"MIT"
] | null | null | null |
UchuujinPatcher/patch_eboot.py
|
colebob9/UchuujinPatcher
|
1f5880240e7b3da329d4c8334fc23df92eece402
|
[
"MIT"
] | null | null | null |
# https://pypi.org/project/bsdiff4/
# Use bsdiff and eboot patch in main repo
# Needs wheel and VC++ 2015 v140 toolset (choco install vcbuildtools)
# on Windows, look into Linux / Docker
import bsdiff4
bsdiff4.file_patch("isofiles/EBOOT.BIN",
"isofiles/EBOOT_patched.BIN",
"main_repo/EBOOT.BIN.patch"
)
| 26.142857
| 69
| 0.63388
|
import bsdiff4
bsdiff4.file_patch("isofiles/EBOOT.BIN",
"isofiles/EBOOT_patched.BIN",
"main_repo/EBOOT.BIN.patch"
)
| true
| true
|
1c485455fea34089e7998b97dab5da52c8ae328a
| 493
|
py
|
Python
|
queries/q5.py
|
csruiliu/tpch-pyspark
|
ec707ddd8a5e917b08e0ee1ce320b826fa6aa977
|
[
"MIT"
] | null | null | null |
queries/q5.py
|
csruiliu/tpch-pyspark
|
ec707ddd8a5e917b08e0ee1ce320b826fa6aa977
|
[
"MIT"
] | null | null | null |
queries/q5.py
|
csruiliu/tpch-pyspark
|
ec707ddd8a5e917b08e0ee1ce320b826fa6aa977
|
[
"MIT"
] | null | null | null |
query = """
SELECT N_NAME,
sum(L_EXTENDEDPRICE * (1 - L_DISCOUNT)) AS REVENUE
FROM customer,
orders,
lineitem,
supplier,
nation,
region
WHERE C_CUSTKEY = O_CUSTKEY
AND L_ORDERKEY = O_ORDERKEY
AND L_SUPPKEY = S_SUPPKEY
AND C_NATIONKEY = S_NATIONKEY
AND S_NATIONKEY = N_NATIONKEY
AND N_REGIONKEY = R_REGIONKEY
AND R_NAME = 'ASIA'
AND O_ORDERDATE >= '1994-01-01'
AND O_ORDERDATE < '1995-01-01'
GROUP BY N_NAME
ORDER BY REVENUE desc
"""
| 22.409091
| 59
| 0.6714
|
query = """
SELECT N_NAME,
sum(L_EXTENDEDPRICE * (1 - L_DISCOUNT)) AS REVENUE
FROM customer,
orders,
lineitem,
supplier,
nation,
region
WHERE C_CUSTKEY = O_CUSTKEY
AND L_ORDERKEY = O_ORDERKEY
AND L_SUPPKEY = S_SUPPKEY
AND C_NATIONKEY = S_NATIONKEY
AND S_NATIONKEY = N_NATIONKEY
AND N_REGIONKEY = R_REGIONKEY
AND R_NAME = 'ASIA'
AND O_ORDERDATE >= '1994-01-01'
AND O_ORDERDATE < '1995-01-01'
GROUP BY N_NAME
ORDER BY REVENUE desc
"""
| true
| true
|
1c48545cf2834159a424d70b7811f00db3a47e6c
| 3,089
|
py
|
Python
|
bassl/pretrain/utils/metric.py
|
kakaobrain/bassl
|
551fe94343debf60a64c787be6752284153a0f7a
|
[
"Apache-2.0"
] | 55
|
2022-01-17T02:18:40.000Z
|
2022-03-25T08:24:28.000Z
|
bassl/pretrain/utils/metric.py
|
kakaobrain/bassl
|
551fe94343debf60a64c787be6752284153a0f7a
|
[
"Apache-2.0"
] | 5
|
2022-01-18T01:59:49.000Z
|
2022-03-24T00:20:35.000Z
|
bassl/pretrain/utils/metric.py
|
kakaobrain/bassl
|
551fe94343debf60a64c787be6752284153a0f7a
|
[
"Apache-2.0"
] | 1
|
2022-01-23T10:50:15.000Z
|
2022-01-23T10:50:15.000Z
|
"""
- kNN Precision
"""
from collections import defaultdict
import torch
import torchmetrics
class KnnPrecisionMetric(torchmetrics.Metric):
def __init__(self, top_k_list):
super().__init__(compute_on_step=False, dist_sync_on_step=True)
self.add_state("feat_data", default=[], dist_reduce_fx=None)
self.add_state("vids_data", default=[], dist_reduce_fx=None)
self.add_state("scene_data", default=[], dist_reduce_fx=None)
self.top_k_list = set(top_k_list)
self.max_k = max(self.top_k_list)
def update(self, vid, invideo_scene_id, feat):
assert isinstance(invideo_scene_id, torch.Tensor)
assert isinstance(vid, torch.Tensor)
assert isinstance(feat, torch.Tensor)
self.feat_data.append(feat)
self.vids_data.append(vid)
self.scene_data.append(invideo_scene_id)
def compute(self) -> torch.Tensor:
score = defaultdict(dict)
pool_feats = defaultdict(list)
pool_invideo_scene_id = defaultdict(list)
pool_gts = defaultdict(dict)
num_data = 0
for vid, invideo_scene_id, gathered_feat in zip(
self.vids_data, self.scene_data, self.feat_data
):
vid = vid.item()
invideo_scene_id = invideo_scene_id.item()
if invideo_scene_id not in pool_gts[vid]:
pool_gts[vid][invideo_scene_id] = set()
pool_gts[vid][invideo_scene_id].add(len(pool_feats[vid]))
pool_invideo_scene_id[vid].append(invideo_scene_id)
pool_feats[vid].append(gathered_feat)
num_data += 1
for top_k in self.top_k_list:
score[top_k] = {"correct": 0, "total": 0}
for vid, gt in pool_gts.items():
X = torch.stack(pool_feats[vid])
sim = torch.matmul(X, X.t())
sim = sim - 999 * torch.eye(sim.shape[0]).type_as(sim) # exclude self
indices = torch.argsort(sim, descending=True)
assert indices.shape[1] >= self.max_k, f"{indices.shape[1]} >= {self.max_k}"
indices = indices[:, : self.max_k]
for j in range(indices.shape[0]):
_cache = {"correct": 0, "total": 0}
_query_scene_id = pool_invideo_scene_id[vid][j]
for k in range(self.max_k):
if _query_scene_id in gt:
if indices[j][k].item() in gt[_query_scene_id]:
_cache["correct"] += 1
_cache["total"] += 1
if k + 1 in self.top_k_list and len(gt[_query_scene_id]) > k:
score[k + 1]["correct"] += _cache["correct"]
score[k + 1]["total"] += _cache["total"]
for top_k in self.top_k_list:
assert score[top_k]["total"] > 0
score[top_k]["precision"] = (
100.0 * score[top_k]["correct"] / score[top_k]["total"]
)
del X, sim, indices, pool_feats, pool_invideo_scene_id, pool_gts
torch.cuda.empty_cache()
return score
| 39.602564
| 88
| 0.583684
|
from collections import defaultdict
import torch
import torchmetrics
class KnnPrecisionMetric(torchmetrics.Metric):
def __init__(self, top_k_list):
super().__init__(compute_on_step=False, dist_sync_on_step=True)
self.add_state("feat_data", default=[], dist_reduce_fx=None)
self.add_state("vids_data", default=[], dist_reduce_fx=None)
self.add_state("scene_data", default=[], dist_reduce_fx=None)
self.top_k_list = set(top_k_list)
self.max_k = max(self.top_k_list)
def update(self, vid, invideo_scene_id, feat):
assert isinstance(invideo_scene_id, torch.Tensor)
assert isinstance(vid, torch.Tensor)
assert isinstance(feat, torch.Tensor)
self.feat_data.append(feat)
self.vids_data.append(vid)
self.scene_data.append(invideo_scene_id)
def compute(self) -> torch.Tensor:
score = defaultdict(dict)
pool_feats = defaultdict(list)
pool_invideo_scene_id = defaultdict(list)
pool_gts = defaultdict(dict)
num_data = 0
for vid, invideo_scene_id, gathered_feat in zip(
self.vids_data, self.scene_data, self.feat_data
):
vid = vid.item()
invideo_scene_id = invideo_scene_id.item()
if invideo_scene_id not in pool_gts[vid]:
pool_gts[vid][invideo_scene_id] = set()
pool_gts[vid][invideo_scene_id].add(len(pool_feats[vid]))
pool_invideo_scene_id[vid].append(invideo_scene_id)
pool_feats[vid].append(gathered_feat)
num_data += 1
for top_k in self.top_k_list:
score[top_k] = {"correct": 0, "total": 0}
for vid, gt in pool_gts.items():
X = torch.stack(pool_feats[vid])
sim = torch.matmul(X, X.t())
sim = sim - 999 * torch.eye(sim.shape[0]).type_as(sim)
indices = torch.argsort(sim, descending=True)
assert indices.shape[1] >= self.max_k, f"{indices.shape[1]} >= {self.max_k}"
indices = indices[:, : self.max_k]
for j in range(indices.shape[0]):
_cache = {"correct": 0, "total": 0}
_query_scene_id = pool_invideo_scene_id[vid][j]
for k in range(self.max_k):
if _query_scene_id in gt:
if indices[j][k].item() in gt[_query_scene_id]:
_cache["correct"] += 1
_cache["total"] += 1
if k + 1 in self.top_k_list and len(gt[_query_scene_id]) > k:
score[k + 1]["correct"] += _cache["correct"]
score[k + 1]["total"] += _cache["total"]
for top_k in self.top_k_list:
assert score[top_k]["total"] > 0
score[top_k]["precision"] = (
100.0 * score[top_k]["correct"] / score[top_k]["total"]
)
del X, sim, indices, pool_feats, pool_invideo_scene_id, pool_gts
torch.cuda.empty_cache()
return score
| true
| true
|
1c48545f0baaba0b798a29f73f40c45f2e843e2d
| 29,175
|
py
|
Python
|
venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_divider.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_divider.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_divider.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 11
|
2019-01-21T17:51:48.000Z
|
2021-08-10T07:04:33.000Z
|
"""
The axes_divider module provides helper classes to adjust the positions of
multiple axes at drawing time.
Divider: this is the class that is used to calculate the axes
position. It divides the given rectangular area into several sub
rectangles. You initialize the divider by setting the horizontal
and vertical lists of sizes that the division will be based on. You
then use the new_locator method, whose return value is a callable
object that can be used to set the axes_locator of the axes.
"""
from matplotlib import cbook
from matplotlib.axes import SubplotBase
from matplotlib.gridspec import SubplotSpec, GridSpec
import matplotlib.transforms as mtransforms
from . import axes_size as Size
class Divider:
"""
This class calculates the axes position. It
divides the given rectangular area into several
sub-rectangles. You initialize the divider by setting the
horizontal and vertical lists of sizes
(:mod:`mpl_toolkits.axes_grid.axes_size`) that the division will
be based on. You then use the new_locator method to create a
callable object that can be used as the axes_locator of the
axes.
"""
def __init__(self, fig, pos, horizontal, vertical,
aspect=None, anchor="C"):
"""
Parameters
----------
fig : Figure
pos : tuple of 4 floats
position of the rectangle that will be divided
horizontal : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for horizontal division
vertical : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for vertical division
aspect : bool
if True, the overall rectangular area is reduced
so that the relative part of the horizontal and
vertical scales have the same scale.
anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
placement of the reduced rectangle when *aspect* is True
"""
self._fig = fig
self._pos = pos
self._horizontal = horizontal
self._vertical = vertical
self._anchor = anchor
self._aspect = aspect
self._xrefindex = 0
self._yrefindex = 0
self._locator = None
def get_horizontal_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_horizontal()]
def get_vertical_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_vertical()]
def get_vsize_hsize(self):
from .axes_size import AddList
vsize = AddList(self.get_vertical())
hsize = AddList(self.get_horizontal())
return vsize, hsize
@staticmethod
def _calc_k(l, total_size):
rs_sum, as_sum = 0., 0.
for _rs, _as in l:
rs_sum += _rs
as_sum += _as
if rs_sum != 0.:
k = (total_size - as_sum) / rs_sum
return k
else:
return 0.
@staticmethod
def _calc_offsets(l, k):
offsets = [0.]
for _rs, _as in l:
offsets.append(offsets[-1] + _rs*k + _as)
return offsets
def set_position(self, pos):
"""
set the position of the rectangle.
Parameters
----------
pos : tuple of 4 floats
position of the rectangle that will be divided
"""
self._pos = pos
def get_position(self):
"return the position of the rectangle."
return self._pos
def set_anchor(self, anchor):
"""
Parameters
----------
anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
anchor position
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if len(anchor) != 2:
cbook._check_in_list(mtransforms.Bbox.coefs, anchor=anchor)
self._anchor = anchor
def get_anchor(self):
"return the anchor"
return self._anchor
def set_horizontal(self, h):
"""
Parameters
----------
h : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for horizontal division
"""
self._horizontal = h
def get_horizontal(self):
"return horizontal sizes"
return self._horizontal
def set_vertical(self, v):
"""
Parameters
----------
v : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for vertical division
"""
self._vertical = v
def get_vertical(self):
"return vertical sizes"
return self._vertical
def set_aspect(self, aspect=False):
"""
Parameters
----------
aspect : bool
"""
self._aspect = aspect
def get_aspect(self):
"return aspect"
return self._aspect
def set_locator(self, _locator):
self._locator = _locator
def get_locator(self):
return self._locator
def get_position_runtime(self, ax, renderer):
if self._locator is None:
return self.get_position()
else:
return self._locator(ax, renderer).bounds
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
axes
renderer
"""
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
hsizes = self.get_horizontal_sizes(renderer)
vsizes = self.get_vertical_sizes(renderer)
k_h = self._calc_k(hsizes, figW*w)
k_v = self._calc_k(vsizes, figH*h)
if self.get_aspect():
k = min(k_h, k_v)
ox = self._calc_offsets(hsizes, k)
oy = self._calc_offsets(vsizes, k)
ww = (ox[-1] - ox[0])/figW
hh = (oy[-1] - oy[0])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
else:
ox = self._calc_offsets(hsizes, k_h)
oy = self._calc_offsets(vsizes, k_v)
x0, y0 = x, y
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def new_locator(self, nx, ny, nx1=None, ny1=None):
"""
Returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
"""
return AxesLocator(self, nx, ny, nx1, ny1)
def append_size(self, position, size):
if position == "left":
self._horizontal.insert(0, size)
self._xrefindex += 1
elif position == "right":
self._horizontal.append(size)
elif position == "bottom":
self._vertical.insert(0, size)
self._yrefindex += 1
elif position == "top":
self._vertical.append(size)
else:
cbook._check_in_list(["left", "right", "bottom", "top"],
position=position)
def add_auto_adjustable_area(self,
use_axes, pad=0.1,
adjust_dirs=None,
):
if adjust_dirs is None:
adjust_dirs = ["left", "right", "bottom", "top"]
from .axes_size import Padded, SizeFromFunc, GetExtentHelper
for d in adjust_dirs:
helper = GetExtentHelper(use_axes, d)
size = SizeFromFunc(helper)
padded_size = Padded(size, pad) # pad in inch
self.append_size(d, padded_size)
class AxesLocator:
"""
A simple callable object, initialized with AxesDivider class,
returns the position and size of the given cell.
"""
def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
"""
Parameters
----------
axes_divider : AxesDivider
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
"""
self._axes_divider = axes_divider
_xrefindex = axes_divider._xrefindex
_yrefindex = axes_divider._yrefindex
self._nx, self._ny = nx - _xrefindex, ny - _yrefindex
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
self._nx1 = nx1 - _xrefindex
self._ny1 = ny1 - _yrefindex
def __call__(self, axes, renderer):
_xrefindex = self._axes_divider._xrefindex
_yrefindex = self._axes_divider._yrefindex
return self._axes_divider.locate(self._nx + _xrefindex,
self._ny + _yrefindex,
self._nx1 + _xrefindex,
self._ny1 + _yrefindex,
axes,
renderer)
def get_subplotspec(self):
if hasattr(self._axes_divider, "get_subplotspec"):
return self._axes_divider.get_subplotspec()
else:
return None
class SubplotDivider(Divider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def __init__(self, fig, *args, horizontal=None, vertical=None,
aspect=None, anchor='C'):
"""
Parameters
----------
fig : `matplotlib.figure.Figure`
*args : tuple (*nrows*, *ncols*, *index*) or int
The array of subplots in the figure has dimensions ``(nrows,
ncols)``, and *index* is the index of the subplot being created.
*index* starts at 1 in the upper left corner and increases to the
right.
If *nrows*, *ncols*, and *index* are all single digit numbers, then
*args* can be passed as a single 3-digit number (e.g. 234 for
(2, 3, 4)).
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = map(int, s)
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
# num - 1 for converting from MATLAB to python indexing
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError(f'Illegal argument(s) to subplot: {args}')
# total = rows*cols
# num -= 1 # convert from matlab to python indexing
# # i.e., num in range(0, total)
# if num >= total:
# raise ValueError( 'Subplot number exceeds total subplots')
# self._rows = rows
# self._cols = cols
# self._num = num
# self.update_params()
# sets self.fixbox
self.update_params()
pos = self.figbox.bounds
Divider.__init__(self, fig, pos, horizontal or [], vertical or [],
aspect=aspect, anchor=anchor)
def get_position(self):
"return the bounds of the subplot box"
self.update_params() # update self.figbox
return self.figbox.bounds
# def update_params(self):
# 'update the subplot position from fig.subplotpars'
# rows = self._rows
# cols = self._cols
# num = self._num
# pars = self.figure.subplotpars
# left = pars.left
# right = pars.right
# bottom = pars.bottom
# top = pars.top
# wspace = pars.wspace
# hspace = pars.hspace
# totWidth = right-left
# totHeight = top-bottom
# figH = totHeight/(rows + hspace*(rows-1))
# sepH = hspace*figH
# figW = totWidth/(cols + wspace*(cols-1))
# sepW = wspace*figW
# rowNum, colNum = divmod(num, cols)
# figBottom = top - (rowNum+1)*figH - rowNum*sepH
# figLeft = left + colNum*(figW + sepW)
# self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
# figW, figH)
def update_params(self):
"""Update the subplot position from fig.subplotpars."""
self.figbox = self.get_subplotspec().get_position(self.figure)
def get_geometry(self):
"""Get the subplot geometry, e.g., (2, 2, 3)."""
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
"""Change subplot geometry, e.g., from (1, 1, 1) to (2, 2, 3)."""
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
"""Get the SubplotSpec instance."""
return self._subplotspec
def set_subplotspec(self, subplotspec):
"""Set the SubplotSpec instance."""
self._subplotspec = subplotspec
class AxesDivider(Divider):
"""
Divider based on the pre-existing axes.
"""
def __init__(self, axes, xref=None, yref=None):
"""
Parameters
----------
axes : :class:`~matplotlib.axes.Axes`
xref
yref
"""
self._axes = axes
if xref is None:
self._xref = Size.AxesX(axes)
else:
self._xref = xref
if yref is None:
self._yref = Size.AxesY(axes)
else:
self._yref = yref
Divider.__init__(self, fig=axes.get_figure(), pos=None,
horizontal=[self._xref], vertical=[self._yref],
aspect=None, anchor="C")
def _get_new_axes(self, *, axes_class=None, **kwargs):
axes = self._axes
if axes_class is None:
if isinstance(axes, SubplotBase):
axes_class = axes._axes_class
else:
axes_class = type(axes)
return axes_class(axes.get_figure(), axes.get_position(original=True),
**kwargs)
def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the right (or left) side of the main axes.
Parameters
----------
size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str
A width of the axes. If float or string is given, *from_any*
function is used to create the size, with *ref_size* set to AxesX
instance of the current axes.
pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str
Pad between the axes. It takes same argument as *size*.
pack_start : bool
If False, the new axes is appended at the end
of the list, i.e., it became the right-most axes. If True, it is
inserted at the start of the list, and becomes the left-most axes.
**kwargs
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad is None:
cbook.warn_deprecated(
"3.2", message="In a future version, 'pad' will default to "
"rcParams['figure.subplot.wspace']. Set pad=0 to keep the "
"old behavior.")
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad, fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, pad)
self._xrefindex += 1
else:
self._horizontal.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size, fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, size)
self._xrefindex += 1
locator = self.new_locator(nx=0, ny=self._yrefindex)
else:
self._horizontal.append(size)
locator = self.new_locator(
nx=len(self._horizontal) - 1, ny=self._yrefindex)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the top (or bottom) side of the main axes.
Parameters
----------
size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str
A height of the axes. If float or string is given, *from_any*
function is used to create the size, with *ref_size* set to AxesX
instance of the current axes.
pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str
Pad between the axes. It takes same argument as *size*.
pack_start : bool
If False, the new axes is appended at the end
of the list, i.e., it became the right-most axes. If True, it is
inserted at the start of the list, and becomes the left-most axes.
**kwargs
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad is None:
cbook.warn_deprecated(
"3.2", message="In a future version, 'pad' will default to "
"rcParams['figure.subplot.hspace']. Set pad=0 to keep the "
"old behavior.")
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad, fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, pad)
self._yrefindex += 1
else:
self._vertical.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size, fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, size)
self._yrefindex += 1
locator = self.new_locator(nx=self._xrefindex, ny=0)
else:
self._vertical.append(size)
locator = self.new_locator(
nx=self._xrefindex, ny=len(self._vertical)-1)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def append_axes(self, position, size, pad=None, add_to_figure=True,
**kwargs):
"""
Create an axes at the given *position* with the same height
(or width) of the main axes.
*position*
["left"|"right"|"bottom"|"top"]
*size* and *pad* should be axes_grid.axes_size compatible.
"""
if position == "left":
ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)
elif position == "right":
ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)
elif position == "bottom":
ax = self.new_vertical(size, pad, pack_start=True, **kwargs)
elif position == "top":
ax = self.new_vertical(size, pad, pack_start=False, **kwargs)
else:
cbook._check_in_list(["left", "right", "bottom", "top"],
position=position)
if add_to_figure:
self._fig.add_axes(ax)
return ax
def get_aspect(self):
if self._aspect is None:
aspect = self._axes.get_aspect()
if aspect == "auto":
return False
else:
return True
else:
return self._aspect
def get_position(self):
if self._pos is None:
bbox = self._axes.get_position(original=True)
return bbox.bounds
else:
return self._pos
def get_anchor(self):
if self._anchor is None:
return self._axes.get_anchor()
else:
return self._anchor
def get_subplotspec(self):
if hasattr(self._axes, "get_subplotspec"):
return self._axes.get_subplotspec()
else:
return None
class HBoxDivider(SubplotDivider):
def __init__(self, fig, *args, **kwargs):
SubplotDivider.__init__(self, fig, *args, **kwargs)
@staticmethod
def _determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size):
n = len(equivalent_sizes)
import numpy as np
A = np.mat(np.zeros((n+1, n+1), dtype="d"))
B = np.zeros((n+1), dtype="d")
# AxK = B
# populated A
for i, (r, a) in enumerate(equivalent_sizes):
A[i, i] = r
A[i, -1] = -1
B[i] = -a
A[-1, :-1] = [r for r, a in appended_sizes]
B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])
karray_H = (A.I*np.mat(B).T).A1
karray = karray_H[:-1]
H = karray_H[-1]
if H > max_equivalent_size:
karray = ((max_equivalent_size -
np.array([a for r, a in equivalent_sizes]))
/ np.array([r for r, a in equivalent_sizes]))
return karray
@staticmethod
def _calc_offsets(appended_sizes, karray):
offsets = [0.]
for (r, a), k in zip(appended_sizes, karray):
offsets.append(offsets[-1] + r*k + a)
return offsets
def new_locator(self, nx, nx1=None):
"""
Create a new `~mpl_toolkits.axes_grid.axes_divider.AxesLocator` for
the specified cell.
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
"""
return AxesLocator(self, nx, 0, nx1, None)
def _locate(self, x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH):
"""
Parameters
----------
x
y
w
h
y_equivalent_sizes
x_appended_sizes
figW
figH
"""
equivalent_sizes = y_equivalent_sizes
appended_sizes = x_appended_sizes
max_equivalent_size = figH*h
total_appended_size = figW*w
karray = self._determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size)
ox = self._calc_offsets(appended_sizes, karray)
ww = (ox[-1] - ox[0])/figW
ref_h = equivalent_sizes[0]
hh = (karray[0]*ref_h[0] + ref_h[1])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
return x0, y0, ox, hh
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
Parameters
----------
axes_divider : AxesDivider
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
axes
renderer
"""
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
y_equivalent_sizes = self.get_vertical_sizes(renderer)
x_appended_sizes = self.get_horizontal_sizes(renderer)
x0, y0, ox, hh = self._locate(x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH)
if nx1 is None:
nx1 = nx+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0, hh
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class VBoxDivider(HBoxDivider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def new_locator(self, ny, ny1=None):
"""
Create a new `~mpl_toolkits.axes_grid.axes_divider.AxesLocator` for
the specified cell.
Parameters
----------
ny, ny1 : int
Integers specifying the row-position of the
cell. When *ny1* is None, a single *ny*-th row is
specified. Otherwise location of rows spanning between *ny*
to *ny1* (but excluding *ny1*-th row) is specified.
"""
return AxesLocator(self, 0, ny, None, ny1)
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
Parameters
----------
axes_divider : AxesDivider
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
axes
renderer
"""
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
x_equivalent_sizes = self.get_horizontal_sizes(renderer)
y_appended_sizes = self.get_vertical_sizes(renderer)
y0, x0, oy, ww = self._locate(y, x, h, w,
x_equivalent_sizes, y_appended_sizes,
figH, figW)
if ny1 is None:
ny1 = ny+1
x1, w1 = x0, ww
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def make_axes_locatable(axes):
divider = AxesDivider(axes)
locator = divider.new_locator(nx=0, ny=0)
axes.set_axes_locator(locator)
return divider
def make_axes_area_auto_adjustable(ax,
use_axes=None, pad=0.1,
adjust_dirs=None):
if adjust_dirs is None:
adjust_dirs = ["left", "right", "bottom", "top"]
divider = make_axes_locatable(ax)
if use_axes is None:
use_axes = ax
divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,
adjust_dirs=adjust_dirs)
| 33.650519
| 79
| 0.55078
|
from matplotlib import cbook
from matplotlib.axes import SubplotBase
from matplotlib.gridspec import SubplotSpec, GridSpec
import matplotlib.transforms as mtransforms
from . import axes_size as Size
class Divider:
def __init__(self, fig, pos, horizontal, vertical,
aspect=None, anchor="C"):
self._fig = fig
self._pos = pos
self._horizontal = horizontal
self._vertical = vertical
self._anchor = anchor
self._aspect = aspect
self._xrefindex = 0
self._yrefindex = 0
self._locator = None
def get_horizontal_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_horizontal()]
def get_vertical_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_vertical()]
def get_vsize_hsize(self):
from .axes_size import AddList
vsize = AddList(self.get_vertical())
hsize = AddList(self.get_horizontal())
return vsize, hsize
@staticmethod
def _calc_k(l, total_size):
rs_sum, as_sum = 0., 0.
for _rs, _as in l:
rs_sum += _rs
as_sum += _as
if rs_sum != 0.:
k = (total_size - as_sum) / rs_sum
return k
else:
return 0.
@staticmethod
def _calc_offsets(l, k):
offsets = [0.]
for _rs, _as in l:
offsets.append(offsets[-1] + _rs*k + _as)
return offsets
def set_position(self, pos):
self._pos = pos
def get_position(self):
return self._pos
def set_anchor(self, anchor):
if len(anchor) != 2:
cbook._check_in_list(mtransforms.Bbox.coefs, anchor=anchor)
self._anchor = anchor
def get_anchor(self):
return self._anchor
def set_horizontal(self, h):
self._horizontal = h
def get_horizontal(self):
return self._horizontal
def set_vertical(self, v):
self._vertical = v
def get_vertical(self):
return self._vertical
def set_aspect(self, aspect=False):
self._aspect = aspect
def get_aspect(self):
return self._aspect
def set_locator(self, _locator):
self._locator = _locator
def get_locator(self):
return self._locator
def get_position_runtime(self, ax, renderer):
if self._locator is None:
return self.get_position()
else:
return self._locator(ax, renderer).bounds
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
hsizes = self.get_horizontal_sizes(renderer)
vsizes = self.get_vertical_sizes(renderer)
k_h = self._calc_k(hsizes, figW*w)
k_v = self._calc_k(vsizes, figH*h)
if self.get_aspect():
k = min(k_h, k_v)
ox = self._calc_offsets(hsizes, k)
oy = self._calc_offsets(vsizes, k)
ww = (ox[-1] - ox[0])/figW
hh = (oy[-1] - oy[0])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
else:
ox = self._calc_offsets(hsizes, k_h)
oy = self._calc_offsets(vsizes, k_v)
x0, y0 = x, y
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def new_locator(self, nx, ny, nx1=None, ny1=None):
return AxesLocator(self, nx, ny, nx1, ny1)
def append_size(self, position, size):
if position == "left":
self._horizontal.insert(0, size)
self._xrefindex += 1
elif position == "right":
self._horizontal.append(size)
elif position == "bottom":
self._vertical.insert(0, size)
self._yrefindex += 1
elif position == "top":
self._vertical.append(size)
else:
cbook._check_in_list(["left", "right", "bottom", "top"],
position=position)
def add_auto_adjustable_area(self,
use_axes, pad=0.1,
adjust_dirs=None,
):
if adjust_dirs is None:
adjust_dirs = ["left", "right", "bottom", "top"]
from .axes_size import Padded, SizeFromFunc, GetExtentHelper
for d in adjust_dirs:
helper = GetExtentHelper(use_axes, d)
size = SizeFromFunc(helper)
padded_size = Padded(size, pad)
self.append_size(d, padded_size)
class AxesLocator:
def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
self._axes_divider = axes_divider
_xrefindex = axes_divider._xrefindex
_yrefindex = axes_divider._yrefindex
self._nx, self._ny = nx - _xrefindex, ny - _yrefindex
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
self._nx1 = nx1 - _xrefindex
self._ny1 = ny1 - _yrefindex
def __call__(self, axes, renderer):
_xrefindex = self._axes_divider._xrefindex
_yrefindex = self._axes_divider._yrefindex
return self._axes_divider.locate(self._nx + _xrefindex,
self._ny + _yrefindex,
self._nx1 + _xrefindex,
self._ny1 + _yrefindex,
axes,
renderer)
def get_subplotspec(self):
if hasattr(self._axes_divider, "get_subplotspec"):
return self._axes_divider.get_subplotspec()
else:
return None
class SubplotDivider(Divider):
def __init__(self, fig, *args, horizontal=None, vertical=None,
aspect=None, anchor='C'):
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = map(int, s)
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
else:
raise ValueError(f'Illegal argument(s) to subplot: {args}')
self.update_params()
pos = self.figbox.bounds
Divider.__init__(self, fig, pos, horizontal or [], vertical or [],
aspect=aspect, anchor=anchor)
def get_position(self):
self.update_params()
return self.figbox.bounds
def update_params(self):
self.figbox = self.get_subplotspec().get_position(self.figure)
def get_geometry(self):
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1
def change_geometry(self, numrows, numcols, num):
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
return self._subplotspec
def set_subplotspec(self, subplotspec):
self._subplotspec = subplotspec
class AxesDivider(Divider):
def __init__(self, axes, xref=None, yref=None):
self._axes = axes
if xref is None:
self._xref = Size.AxesX(axes)
else:
self._xref = xref
if yref is None:
self._yref = Size.AxesY(axes)
else:
self._yref = yref
Divider.__init__(self, fig=axes.get_figure(), pos=None,
horizontal=[self._xref], vertical=[self._yref],
aspect=None, anchor="C")
def _get_new_axes(self, *, axes_class=None, **kwargs):
axes = self._axes
if axes_class is None:
if isinstance(axes, SubplotBase):
axes_class = axes._axes_class
else:
axes_class = type(axes)
return axes_class(axes.get_figure(), axes.get_position(original=True),
**kwargs)
def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
if pad is None:
cbook.warn_deprecated(
"3.2", message="In a future version, 'pad' will default to "
"rcParams['figure.subplot.wspace']. Set pad=0 to keep the "
"old behavior.")
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad, fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, pad)
self._xrefindex += 1
else:
self._horizontal.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size, fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, size)
self._xrefindex += 1
locator = self.new_locator(nx=0, ny=self._yrefindex)
else:
self._horizontal.append(size)
locator = self.new_locator(
nx=len(self._horizontal) - 1, ny=self._yrefindex)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
if pad is None:
cbook.warn_deprecated(
"3.2", message="In a future version, 'pad' will default to "
"rcParams['figure.subplot.hspace']. Set pad=0 to keep the "
"old behavior.")
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad, fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, pad)
self._yrefindex += 1
else:
self._vertical.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size, fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, size)
self._yrefindex += 1
locator = self.new_locator(nx=self._xrefindex, ny=0)
else:
self._vertical.append(size)
locator = self.new_locator(
nx=self._xrefindex, ny=len(self._vertical)-1)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def append_axes(self, position, size, pad=None, add_to_figure=True,
**kwargs):
if position == "left":
ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)
elif position == "right":
ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)
elif position == "bottom":
ax = self.new_vertical(size, pad, pack_start=True, **kwargs)
elif position == "top":
ax = self.new_vertical(size, pad, pack_start=False, **kwargs)
else:
cbook._check_in_list(["left", "right", "bottom", "top"],
position=position)
if add_to_figure:
self._fig.add_axes(ax)
return ax
def get_aspect(self):
if self._aspect is None:
aspect = self._axes.get_aspect()
if aspect == "auto":
return False
else:
return True
else:
return self._aspect
def get_position(self):
if self._pos is None:
bbox = self._axes.get_position(original=True)
return bbox.bounds
else:
return self._pos
def get_anchor(self):
if self._anchor is None:
return self._axes.get_anchor()
else:
return self._anchor
def get_subplotspec(self):
if hasattr(self._axes, "get_subplotspec"):
return self._axes.get_subplotspec()
else:
return None
class HBoxDivider(SubplotDivider):
def __init__(self, fig, *args, **kwargs):
SubplotDivider.__init__(self, fig, *args, **kwargs)
@staticmethod
def _determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size):
n = len(equivalent_sizes)
import numpy as np
A = np.mat(np.zeros((n+1, n+1), dtype="d"))
B = np.zeros((n+1), dtype="d")
for i, (r, a) in enumerate(equivalent_sizes):
A[i, i] = r
A[i, -1] = -1
B[i] = -a
A[-1, :-1] = [r for r, a in appended_sizes]
B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])
karray_H = (A.I*np.mat(B).T).A1
karray = karray_H[:-1]
H = karray_H[-1]
if H > max_equivalent_size:
karray = ((max_equivalent_size -
np.array([a for r, a in equivalent_sizes]))
/ np.array([r for r, a in equivalent_sizes]))
return karray
@staticmethod
def _calc_offsets(appended_sizes, karray):
offsets = [0.]
for (r, a), k in zip(appended_sizes, karray):
offsets.append(offsets[-1] + r*k + a)
return offsets
def new_locator(self, nx, nx1=None):
return AxesLocator(self, nx, 0, nx1, None)
def _locate(self, x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH):
equivalent_sizes = y_equivalent_sizes
appended_sizes = x_appended_sizes
max_equivalent_size = figH*h
total_appended_size = figW*w
karray = self._determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size)
ox = self._calc_offsets(appended_sizes, karray)
ww = (ox[-1] - ox[0])/figW
ref_h = equivalent_sizes[0]
hh = (karray[0]*ref_h[0] + ref_h[1])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
return x0, y0, ox, hh
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
y_equivalent_sizes = self.get_vertical_sizes(renderer)
x_appended_sizes = self.get_horizontal_sizes(renderer)
x0, y0, ox, hh = self._locate(x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH)
if nx1 is None:
nx1 = nx+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0, hh
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class VBoxDivider(HBoxDivider):
def new_locator(self, ny, ny1=None):
return AxesLocator(self, 0, ny, None, ny1)
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
x_equivalent_sizes = self.get_horizontal_sizes(renderer)
y_appended_sizes = self.get_vertical_sizes(renderer)
y0, x0, oy, ww = self._locate(y, x, h, w,
x_equivalent_sizes, y_appended_sizes,
figH, figW)
if ny1 is None:
ny1 = ny+1
x1, w1 = x0, ww
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def make_axes_locatable(axes):
divider = AxesDivider(axes)
locator = divider.new_locator(nx=0, ny=0)
axes.set_axes_locator(locator)
return divider
def make_axes_area_auto_adjustable(ax,
use_axes=None, pad=0.1,
adjust_dirs=None):
if adjust_dirs is None:
adjust_dirs = ["left", "right", "bottom", "top"]
divider = make_axes_locatable(ax)
if use_axes is None:
use_axes = ax
divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,
adjust_dirs=adjust_dirs)
| true
| true
|
1c4854884ba21127c83dd2876aef95bd6d1e9f13
| 1,785
|
py
|
Python
|
data/p4VQE/R4/benchmark/startCirq672.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startCirq672.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startCirq672.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=9
c.append(cirq.Z.on(input_qubit[3])) # number=10
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=6
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq672.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 28.333333
| 77
| 0.696359
|
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
from cirq.contrib.svg import SVGCircuit
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0]))
c.append(cirq.Z.on(input_qubit[3]))
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0]))
c.append(cirq.Z.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq672.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| true
| true
|
1c4856012f6f123905c3fbdfe922f2752e1467ef
| 3,974
|
py
|
Python
|
ProjectEuler/Problems_051_100/P070_TotientPermutation.py
|
mqq-marek/ProjectEuler
|
3a865b32a655c5ba39bf58a4cb96cef0ffeccbbd
|
[
"MIT"
] | null | null | null |
ProjectEuler/Problems_051_100/P070_TotientPermutation.py
|
mqq-marek/ProjectEuler
|
3a865b32a655c5ba39bf58a4cb96cef0ffeccbbd
|
[
"MIT"
] | null | null | null |
ProjectEuler/Problems_051_100/P070_TotientPermutation.py
|
mqq-marek/ProjectEuler
|
3a865b32a655c5ba39bf58a4cb96cef0ffeccbbd
|
[
"MIT"
] | null | null | null |
"""
Euler's Totient function, phi(n), is used to determine the number of positive
numbers less than or equal to n which are relatively prime to n.
For example, as 1, 2, 4, 5, 7, and 8, are all less than nine and relatively prime to nine, phi(9)=6.
The number 1 is considered to be relatively prime to every positive number, so phi(1)=1.
Interestingly, phi(87109)=79180, and it can be seen that 87109 is a permutation of 79180.
Find the value of n, 1 < n < 10**7, for which phi(n) is a permutation of n and the ratio n/phi(n) produces a minimum.
"""
import time
from bisect import bisect_right
from fractions import Fraction
import math
from collections import Counter
from typing import Iterator
def eratosthenes_sieve(n):
"""Return primes <= n."""
def add_prime(k):
"""Add founded prime."""
p = k + k + 3
primes.append(p)
pos = k + p
while pos <= n:
numbers[pos] = 1
pos += p
numbers = [0] * (n + 1)
primes = [2]
for i in range(n):
if not numbers[i]:
add_prime(i)
return primes
primes = eratosthenes_sieve(10 ** 4)
def prime_divisors(num: int) -> Iterator[int]:
"""
Get all num prime divisors.
:param num: number for which we yields prime divisors
:yields: num prime divisors
"""
assert num > 0
start_num = num
sqrt_num = int(math.sqrt(num)) + 1
counter = 0
for p in primes:
while num % p == 0:
yield p
counter += 1
num //= p
if num == 1 or counter > 3:
return
if p > sqrt_num:
yield num
return
raise Exception(f"Primes too short for {start_num} -> {num}, Primes{len(primes)}/{primes[-1]}")
def totient(n):
""" Compute totient.
totient(prime**k) = p**k - p**(k-1)
totient(n*m) = totient(n) * totient(m) if n,m coprime.
"""
result = list(prime_divisors(n))
prime_power = Counter(result)
res = 1
for p, cnt in prime_power.items():
if cnt == 1:
res *= (p - 1)
else:
res = pow(p, cnt - 1) * (p - 1)
return res, result
def totient_loop_naive(n):
index = 0
ratio = Fraction(2, 1)
start = time.perf_counter()
cnt = 0
for i in range(17, n):
tt = totient(i)
t = tt[0]
if sorted(str(i)) == sorted(str(t)):
new_ratio = Fraction(i, t)
if new_ratio < ratio:
# print(cnt, i, i - index, t, tt[1], round(float(new_ratio),3), round(time.perf_counter() - start))
cnt += 1
start = time.perf_counter()
ratio = new_ratio
index = i
return index
def totient_guesser(n):
ratio = n
index = 0
n_sqrt = int(math.sqrt(n)) + 1
start = primes.index(149)
end = bisect_right(primes, n_sqrt) + 1
for i1 in range(start, end):
p1 = primes[i1]
for i2 in range(i1, len(primes)):
p2 = primes[i2]
p12 = p1 * p2
if p12 >= n:
break
phi = (p1-1) * (p2 - 1)
new_ratio = p12 / phi
if new_ratio < ratio and sorted(str(p12)) == sorted(str(phi)):
# print(p1, p2, new_ratio, p12)
ratio = new_ratio
index = p12
for i3 in range(i2, len(primes)):
p3 = primes[i3]
p123 = p12 * p3
if p123 >= n:
break
phi *= (p3 - 1)
new_ratio = p123 / phi
if new_ratio < ratio and sorted(str(p123)) == sorted(str(phi)):
# print(p1, p2, new_ratio, p12)
ratio = new_ratio
index = p123
return index
def totient_solver(n):
if n <= 76000:
return totient_loop_naive(n)
return totient_guesser(n)
print(primes)
#n = 10**7
n = int(input())
print(totient_solver(n))
| 26.671141
| 117
| 0.530448
|
import time
from bisect import bisect_right
from fractions import Fraction
import math
from collections import Counter
from typing import Iterator
def eratosthenes_sieve(n):
def add_prime(k):
p = k + k + 3
primes.append(p)
pos = k + p
while pos <= n:
numbers[pos] = 1
pos += p
numbers = [0] * (n + 1)
primes = [2]
for i in range(n):
if not numbers[i]:
add_prime(i)
return primes
primes = eratosthenes_sieve(10 ** 4)
def prime_divisors(num: int) -> Iterator[int]:
assert num > 0
start_num = num
sqrt_num = int(math.sqrt(num)) + 1
counter = 0
for p in primes:
while num % p == 0:
yield p
counter += 1
num //= p
if num == 1 or counter > 3:
return
if p > sqrt_num:
yield num
return
raise Exception(f"Primes too short for {start_num} -> {num}, Primes{len(primes)}/{primes[-1]}")
def totient(n):
result = list(prime_divisors(n))
prime_power = Counter(result)
res = 1
for p, cnt in prime_power.items():
if cnt == 1:
res *= (p - 1)
else:
res = pow(p, cnt - 1) * (p - 1)
return res, result
def totient_loop_naive(n):
index = 0
ratio = Fraction(2, 1)
start = time.perf_counter()
cnt = 0
for i in range(17, n):
tt = totient(i)
t = tt[0]
if sorted(str(i)) == sorted(str(t)):
new_ratio = Fraction(i, t)
if new_ratio < ratio:
cnt += 1
start = time.perf_counter()
ratio = new_ratio
index = i
return index
def totient_guesser(n):
ratio = n
index = 0
n_sqrt = int(math.sqrt(n)) + 1
start = primes.index(149)
end = bisect_right(primes, n_sqrt) + 1
for i1 in range(start, end):
p1 = primes[i1]
for i2 in range(i1, len(primes)):
p2 = primes[i2]
p12 = p1 * p2
if p12 >= n:
break
phi = (p1-1) * (p2 - 1)
new_ratio = p12 / phi
if new_ratio < ratio and sorted(str(p12)) == sorted(str(phi)):
ratio = new_ratio
index = p12
for i3 in range(i2, len(primes)):
p3 = primes[i3]
p123 = p12 * p3
if p123 >= n:
break
phi *= (p3 - 1)
new_ratio = p123 / phi
if new_ratio < ratio and sorted(str(p123)) == sorted(str(phi)):
ratio = new_ratio
index = p123
return index
def totient_solver(n):
if n <= 76000:
return totient_loop_naive(n)
return totient_guesser(n)
print(primes)
n = int(input())
print(totient_solver(n))
| true
| true
|
1c4857b3876c7f9d4b85021c0fe07b0789cd8808
| 842
|
py
|
Python
|
sls_api/scripts/reset_user_projects.py
|
slsfi/sls_gde_api
|
68c6342cc3af95d9cf5b87cf096fc03b7fd5e67d
|
[
"Apache-2.0"
] | null | null | null |
sls_api/scripts/reset_user_projects.py
|
slsfi/sls_gde_api
|
68c6342cc3af95d9cf5b87cf096fc03b7fd5e67d
|
[
"Apache-2.0"
] | null | null | null |
sls_api/scripts/reset_user_projects.py
|
slsfi/sls_gde_api
|
68c6342cc3af95d9cf5b87cf096fc03b7fd5e67d
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from sls_api.models import User
from sls_api import app
if __name__ == "__main__":
with app.app_context():
parser = argparse.ArgumentParser(description="Helper script to reset a Users projects")
parser.add_argument("email", help="User email address")
parser.add_argument("projects", help="User projects")
args = parser.parse_args()
success = User.reset_projects(args.email, args.projects)
if success is None:
print("Error during projects reset! Check API backend logs.")
sys.exit(1)
elif success:
print(f"Projects for user {args.email} successfully changed to {args.projects}!")
sys.exit(0)
else:
print(f"No user with the email {args.email} could be found!")
sys.exit(1)
| 33.68
| 95
| 0.640143
|
import argparse
import sys
from sls_api.models import User
from sls_api import app
if __name__ == "__main__":
with app.app_context():
parser = argparse.ArgumentParser(description="Helper script to reset a Users projects")
parser.add_argument("email", help="User email address")
parser.add_argument("projects", help="User projects")
args = parser.parse_args()
success = User.reset_projects(args.email, args.projects)
if success is None:
print("Error during projects reset! Check API backend logs.")
sys.exit(1)
elif success:
print(f"Projects for user {args.email} successfully changed to {args.projects}!")
sys.exit(0)
else:
print(f"No user with the email {args.email} could be found!")
sys.exit(1)
| true
| true
|
1c48585dd10eb78791eb0b974d23a9c5313b1493
| 3,340
|
py
|
Python
|
producerapril19/boto3/__init__.py
|
drwitt/AWS-lambda-NLP-project-4
|
a1cdcaee5cb8679bb86a25811e8323abd40fffcf
|
[
"Apache-2.0"
] | null | null | null |
producerapril19/boto3/__init__.py
|
drwitt/AWS-lambda-NLP-project-4
|
a1cdcaee5cb8679bb86a25811e8323abd40fffcf
|
[
"Apache-2.0"
] | 9
|
2021-03-19T03:06:53.000Z
|
2022-03-12T00:37:04.000Z
|
myvenv/lib/python3.6/site-packages/boto3/__init__.py
|
yog240597/saleor
|
b75a23827a4ec2ce91637f0afe6808c9d09da00a
|
[
"CC-BY-4.0"
] | 1
|
2021-04-06T15:08:09.000Z
|
2021-04-06T15:08:09.000Z
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.12.42'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all boto3 messages to ``stdout``.
>>> import boto3
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
For debugging purposes a good choice is to set the stream logger to ``''``
which is equivalent to saying "log everything".
.. WARNING::
Be aware that when logging anything from ``'botocore'`` the full wire
trace will appear in your logs. If your payloads contain sensitive data
this should not be used in production.
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: :py:class:`~boto3.session.Session`
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(*args, **kwargs):
"""
Create a low-level service client by name using the default session.
See :py:meth:`boto3.session.Session.client`.
"""
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
"""
Create a resource service client by name using the default session.
See :py:meth:`boto3.session.Session.resource`.
"""
return _get_default_session().resource(*args, **kwargs)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| 30.09009
| 81
| 0.703593
|
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.12.42'
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(*args, **kwargs):
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
return _get_default_session().resource(*args, **kwargs)
):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| true
| true
|
1c48586d5ac96b2e1374dcfec7c9d9e588473c51
| 3,235
|
py
|
Python
|
shcomplete/shell_scraper/settings.py
|
gy741/shell-complete
|
20ad82eb45015a79afc734f4cce2201b5fba3785
|
[
"Apache-2.0"
] | null | null | null |
shcomplete/shell_scraper/settings.py
|
gy741/shell-complete
|
20ad82eb45015a79afc734f4cce2201b5fba3785
|
[
"Apache-2.0"
] | null | null | null |
shcomplete/shell_scraper/settings.py
|
gy741/shell-complete
|
20ad82eb45015a79afc734f4cce2201b5fba3785
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for shell_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'shell_scraper'
SPIDER_MODULES = ['shell_scraper.spiders']
NEWSPIDER_MODULE = 'shell_scraper.spiders'
# Crawl responsibly by identifying yourself
# (and your website) on the user-agent
# USER_AGENT = 'shell_scraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest
# /topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,
# application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'shell_scraper.middlewares.ShellScraperSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'shell_scraper.middlewares.MyCustomDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'shell_scraper.pipelines.ShellScraperPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics
# /downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.052632
| 79
| 0.772179
|
BOT_NAME = 'shell_scraper'
SPIDER_MODULES = ['shell_scraper.spiders']
NEWSPIDER_MODULE = 'shell_scraper.spiders'
ROBOTSTXT_OBEY = False
# application/xml;q=0.9,*/*;q=0.8',
| true
| true
|
1c485b47804f4492c2b8ffc80ef6f14d8497cc7c
| 1,004
|
py
|
Python
|
tests/test_config_load.py
|
voidpp/magrathea-python-tools
|
0fc7460c827b02d8914411cedddadc23ccb3cc73
|
[
"MIT"
] | null | null | null |
tests/test_config_load.py
|
voidpp/magrathea-python-tools
|
0fc7460c827b02d8914411cedddadc23ccb3cc73
|
[
"MIT"
] | null | null | null |
tests/test_config_load.py
|
voidpp/magrathea-python-tools
|
0fc7460c827b02d8914411cedddadc23ccb3cc73
|
[
"MIT"
] | null | null | null |
import pytest
from voidpp_tools.mocks.file_system import mockfs
from voidpp_tools.json_config import JSONConfigLoader
from voidpp_tools.config_loader import ConfigFileNotFoundException
@mockfs(dict(etc = {'app1.json': u'{"the_answer": 42}'}))
def test_load_config_from_etc():
# Arrange
loader = JSONConfigLoader('')
# Act
data = loader.load("app1.json")
# Assert
assert data == dict(the_answer = 42)
@mockfs()
def test_load_config_file_not_found():
# Arrange
loader = JSONConfigLoader('')
# Act & Assert
with pytest.raises(ConfigFileNotFoundException):
loader.load("app1.json")
@mockfs(dict(
etc = {'app1.json': u'{"the_answer": 42}'},
home = dict(douglas = {'app1.json': u'{"the_question": "6*7"}'})
), user = 'douglas')
def test_load_config_nested():
# Arrange
loader = JSONConfigLoader('', nested = True)
# Act
data = loader.load("app1.json")
# Assert
assert data == dict(the_answer = 42, the_question = "6*7")
| 24.487805
| 68
| 0.670319
|
import pytest
from voidpp_tools.mocks.file_system import mockfs
from voidpp_tools.json_config import JSONConfigLoader
from voidpp_tools.config_loader import ConfigFileNotFoundException
@mockfs(dict(etc = {'app1.json': u'{"the_answer": 42}'}))
def test_load_config_from_etc():
loader = JSONConfigLoader('')
data = loader.load("app1.json")
assert data == dict(the_answer = 42)
@mockfs()
def test_load_config_file_not_found():
loader = JSONConfigLoader('')
with pytest.raises(ConfigFileNotFoundException):
loader.load("app1.json")
@mockfs(dict(
etc = {'app1.json': u'{"the_answer": 42}'},
home = dict(douglas = {'app1.json': u'{"the_question": "6*7"}'})
), user = 'douglas')
def test_load_config_nested():
loader = JSONConfigLoader('', nested = True)
data = loader.load("app1.json")
assert data == dict(the_answer = 42, the_question = "6*7")
| true
| true
|
1c485b6e79e47b54e18602864ca41cb848d6dcf1
| 477
|
py
|
Python
|
Python/Algorithms/1002.py
|
DimitrisJim/leetcode_solutions
|
765ea578748f8c9b21243dec9dc8a16163e85c0c
|
[
"Unlicense"
] | 2
|
2021-01-15T17:22:54.000Z
|
2021-05-16T19:58:02.000Z
|
Python/Algorithms/1002.py
|
DimitrisJim/leetcode_solutions
|
765ea578748f8c9b21243dec9dc8a16163e85c0c
|
[
"Unlicense"
] | null | null | null |
Python/Algorithms/1002.py
|
DimitrisJim/leetcode_solutions
|
765ea578748f8c9b21243dec9dc8a16163e85c0c
|
[
"Unlicense"
] | null | null | null |
from collections import Counter
class Solution:
# 40 - 92.66, 14.3 - 54.19
def commonChars(self, A):
# Build counter of characters
Counter_ = Counter
commons = Counter_(A[0])
for i in range(1, len(A)):
# In-place intersection of minimum of elements.
commons &= Counter_(A[i])
# bail whenever it becomes empty.
if not commons:
return []
return commons.elements()
| 28.058824
| 60
| 0.555556
|
from collections import Counter
class Solution:
def commonChars(self, A):
Counter_ = Counter
commons = Counter_(A[0])
for i in range(1, len(A)):
commons &= Counter_(A[i])
if not commons:
return []
return commons.elements()
| true
| true
|
1c485cf9254aebb6da185f4c1cd864b675899f50
| 3,250
|
py
|
Python
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/aio/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/aio/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/aio/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class ApplicationInsightsManagementClientConfiguration(Configuration):
"""Configuration for ApplicationInsightsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2015-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.507463
| 134
| 0.701846
|
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class ApplicationInsightsManagementClientConfiguration(Configuration):
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2015-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| true
| true
|
1c485d1431e0cce094796e78eee222184717bdb4
| 2,473
|
py
|
Python
|
userbot/utils/tools.py
|
ronaldyganteng/WeebProject
|
d630cda9f79fafd83453650e414aa59ae136303e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-05-29T05:31:53.000Z
|
2021-05-29T05:31:53.000Z
|
userbot/utils/tools.py
|
ronaldyganteng/WeebProject
|
d630cda9f79fafd83453650e414aa59ae136303e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/utils/tools.py
|
ronaldyganteng/WeebProject
|
d630cda9f79fafd83453650e414aa59ae136303e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 21
|
2021-02-01T14:01:42.000Z
|
2021-08-22T01:13:28.000Z
|
# Copyright (C) 2020 Adek Maulana
#
# SPDX-License-Identifier: GPL-3.0-or-later
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import re
import hashlib
from typing import List
async def md5(fname: str) -> str:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def humanbytes(size: int) -> str:
if size is None or isinstance(size, str):
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(seconds: int) -> str:
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "") +
((str(hours) + " hour(s), ") if hours else "") +
((str(minutes) + " minute(s), ") if minutes else "") +
((str(seconds) + " second(s), ") if seconds else "")
)
return tmp[:-2]
def human_to_bytes(size: str) -> int:
units = {
"M": 2**20, "MB": 2**20,
"G": 2**30, "GB": 2**30,
"T": 2**40, "TB": 2**40
}
size = size.upper()
if not re.match(r' ', size):
size = re.sub(r'([KMGT])', r' \1', size)
number, unit = [string.strip() for string in size.split()]
return int(float(number) * units[unit])
async def run_cmd(cmd: List) -> (str, str):
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
out, err = await process.communicate()
t_resp = out.strip()
e_resp = err.strip()
return t_resp, e_resp
| 30.158537
| 72
| 0.615851
|
import asyncio
import re
import hashlib
from typing import List
async def md5(fname: str) -> str:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def humanbytes(size: int) -> str:
if size is None or isinstance(size, str):
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(seconds: int) -> str:
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "") +
((str(hours) + " hour(s), ") if hours else "") +
((str(minutes) + " minute(s), ") if minutes else "") +
((str(seconds) + " second(s), ") if seconds else "")
)
return tmp[:-2]
def human_to_bytes(size: str) -> int:
units = {
"M": 2**20, "MB": 2**20,
"G": 2**30, "GB": 2**30,
"T": 2**40, "TB": 2**40
}
size = size.upper()
if not re.match(r' ', size):
size = re.sub(r'([KMGT])', r' \1', size)
number, unit = [string.strip() for string in size.split()]
return int(float(number) * units[unit])
async def run_cmd(cmd: List) -> (str, str):
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
out, err = await process.communicate()
t_resp = out.strip()
e_resp = err.strip()
return t_resp, e_resp
| true
| true
|
1c485e14fe7d5c3bbc2cb0ef4066486b1c2f9fc0
| 2,579
|
py
|
Python
|
pyad/pyadexceptions.py
|
sukhjinderpalsingh/pyad
|
d95ff67745065cafca4f2653aab1fbce2df91fb9
|
[
"Apache-2.0"
] | 145
|
2015-01-14T21:53:35.000Z
|
2022-03-10T19:40:32.000Z
|
pyad/pyadexceptions.py
|
zakird/pyad
|
d95ff67745065cafca4f2653aab1fbce2df91fb9
|
[
"Apache-2.0"
] | 104
|
2015-01-12T21:14:10.000Z
|
2022-03-02T12:38:41.000Z
|
pyad/pyadexceptions.py
|
sukhjinderpalsingh/pyad
|
d95ff67745065cafca4f2653aab1fbce2df91fb9
|
[
"Apache-2.0"
] | 73
|
2015-03-27T07:36:47.000Z
|
2022-03-04T18:59:18.000Z
|
from __future__ import print_function
from builtins import str
class comException(Exception):
def __init__(self, error_info, additional_info={}):
self.error_info = error_info
self.additional_info = additional_info
def __str__(self):
print("Error Constant: %s" % self.error_info['error_constant'])
print("Error Code: %s" % str(self.error_info['error_code']))
#print "Error Message: %s" % self.error_info['error_message']
#print "type is ", self.error_info['error_message'].__class__
#return "%s (%s): %s" % (str(self.error_info['error_constant']), str(self.error_info['error_code']), str(self.error_info['error_message']))
class genericADSIException(comException):
def __init__(self, error_info, additional_info={}):
comException.__init__(error_info, additional_info)
def __str__(self):
return "%s (%s): %s" % (self.error_info['error_constant'], self.error_info['error_code'], self.error_info['error_message'])
class win32Exception(comException):
def __init__(self, error_info, additional_info={}):
comException.__init__(self, error_info, additional_info)
def __str__(self):
return "%s: %s" % (self.error_info['error_code'], self.error_info['message'])
class invalidOwnerException(Exception):
def __str__(self):
return "The submitted object is not eligible to own another object."
class noObjectFoundException(Exception):
def __str__(self):
return "The requested object does not exist."
class InvalidObjectException(noObjectFoundException, win32Exception):
def __init__(self, error_info, additional_info):
win32Exception.__init__(self, error_info, additional_info)
class InvalidAttribute(AttributeError):
def __init__(self, obj, attribute):
self.obj, self.attribute = obj, attribute
def __str__(self):
return 'The attribute "%s" is not permitted by the schema definition of the object "%s" (the requested attribute does not exist).' % (self.attribute, self.obj)
class noExecutedQuery(Exception):
def __str__(self):
return 'No query has been executed. Therefore there are no results to return. Execute a query before requesting results.'
class invalidResults(Exception):
def __init__(self, numberResults):
self.__numberResults = numberResults
def __str__(self):
return 'The specified query returned %i results. getSingleResults only functions with a single result.' % self.__numberResults
| 39.676923
| 168
| 0.697945
|
from __future__ import print_function
from builtins import str
class comException(Exception):
def __init__(self, error_info, additional_info={}):
self.error_info = error_info
self.additional_info = additional_info
def __str__(self):
print("Error Constant: %s" % self.error_info['error_constant'])
print("Error Code: %s" % str(self.error_info['error_code']))
class genericADSIException(comException):
def __init__(self, error_info, additional_info={}):
comException.__init__(error_info, additional_info)
def __str__(self):
return "%s (%s): %s" % (self.error_info['error_constant'], self.error_info['error_code'], self.error_info['error_message'])
class win32Exception(comException):
def __init__(self, error_info, additional_info={}):
comException.__init__(self, error_info, additional_info)
def __str__(self):
return "%s: %s" % (self.error_info['error_code'], self.error_info['message'])
class invalidOwnerException(Exception):
def __str__(self):
return "The submitted object is not eligible to own another object."
class noObjectFoundException(Exception):
def __str__(self):
return "The requested object does not exist."
class InvalidObjectException(noObjectFoundException, win32Exception):
def __init__(self, error_info, additional_info):
win32Exception.__init__(self, error_info, additional_info)
class InvalidAttribute(AttributeError):
def __init__(self, obj, attribute):
self.obj, self.attribute = obj, attribute
def __str__(self):
return 'The attribute "%s" is not permitted by the schema definition of the object "%s" (the requested attribute does not exist).' % (self.attribute, self.obj)
class noExecutedQuery(Exception):
def __str__(self):
return 'No query has been executed. Therefore there are no results to return. Execute a query before requesting results.'
class invalidResults(Exception):
def __init__(self, numberResults):
self.__numberResults = numberResults
def __str__(self):
return 'The specified query returned %i results. getSingleResults only functions with a single result.' % self.__numberResults
| true
| true
|
1c485ee284a25efec48426c3601ced0b86cf1c38
| 1,167
|
py
|
Python
|
axitom/phantoms.py
|
PolymerGuy/AXITOM
|
7682be5b21fa933b9bea4082fe9a830076431feb
|
[
"MIT"
] | 4
|
2019-09-06T16:31:11.000Z
|
2022-02-04T12:18:47.000Z
|
axitom/phantoms.py
|
PolymerGuy/AXITOM
|
7682be5b21fa933b9bea4082fe9a830076431feb
|
[
"MIT"
] | 1
|
2019-08-08T12:30:33.000Z
|
2019-08-08T12:34:55.000Z
|
axitom/phantoms.py
|
PolymerGuy/AXITOM
|
7682be5b21fa933b9bea4082fe9a830076431feb
|
[
"MIT"
] | 7
|
2019-08-21T20:51:12.000Z
|
2020-02-04T14:20:42.000Z
|
import numpy as np
""" Phantoms
This module contains the phantoms that can be used for forward projection and virtual experiments
"""
def barrel(domain_size=128, outer_rad_fraction=0.7,center_val=None):
""" Barrel shaped phantom with a linear density gradient
The domain size is cubic with dimension "domain_size" along each axis
Parameters
----------
domain_size : int
The length of the sides of the domain
outer_rad_fraction : float
The diameter of the barrel given as a the fraction of the side length
center_val : float
The density value in the center of the barrel
Returns
-------
ndarray
The phantom
"""
center = domain_size / 2.
domain = np.zeros((domain_size, domain_size, domain_size), dtype=np.float64)
xs, ys = np.meshgrid(np.arange(domain_size), np.arange(domain_size))
xs = xs - center
ys = ys - center
r = np.sqrt(xs ** 2. + ys ** 2.)
domain[r < outer_rad_fraction * center, :] = 1.
if center_val is not None:
domain = domain * (center_val + (r / (outer_rad_fraction * center)) ** 2. * 0.5)[:, :, np.newaxis]
return domain
| 28.463415
| 106
| 0.652099
|
import numpy as np
def barrel(domain_size=128, outer_rad_fraction=0.7,center_val=None):
center = domain_size / 2.
domain = np.zeros((domain_size, domain_size, domain_size), dtype=np.float64)
xs, ys = np.meshgrid(np.arange(domain_size), np.arange(domain_size))
xs = xs - center
ys = ys - center
r = np.sqrt(xs ** 2. + ys ** 2.)
domain[r < outer_rad_fraction * center, :] = 1.
if center_val is not None:
domain = domain * (center_val + (r / (outer_rad_fraction * center)) ** 2. * 0.5)[:, :, np.newaxis]
return domain
| true
| true
|
1c485f4180c62d34716606f40f2b0dda9ebcd895
| 10,087
|
py
|
Python
|
tests/integration/standard/test_cluster.py
|
josh-mckenzie/python-driver
|
472675c61664afa99d2c9eb6c32424c846c1a367
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/standard/test_cluster.py
|
josh-mckenzie/python-driver
|
472675c61664afa99d2c9eb6c32424c846c1a367
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/standard/test_cluster.py
|
josh-mckenzie/python-driver
|
472675c61664afa99d2c9eb6c32424c846c1a367
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.integration import PROTOCOL_VERSION
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import cassandra
from cassandra.query import SimpleStatement, TraceUnavailable
from cassandra.policies import RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance
from cassandra.cluster import Cluster, NoHostAvailable
class ClusterTests(unittest.TestCase):
def test_basic(self):
"""
Test basic connection and usage
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
CREATE KEYSPACE clustertests
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
self.assertEqual(None, result)
result = session.execute(
"""
CREATE TABLE clustertests.cf0 (
a text,
b text,
c text,
PRIMARY KEY (a, b)
)
""")
self.assertEqual(None, result)
result = session.execute(
"""
INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c')
""")
self.assertEqual(None, result)
result = session.execute("SELECT * FROM clustertests.cf0")
self.assertEqual([('a', 'b', 'c')], result)
cluster.shutdown()
def test_connect_on_keyspace(self):
"""
Ensure clusters that connect on a keyspace, do
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
INSERT INTO test3rf.test (k, v) VALUES (8889, 8889)
""")
self.assertEqual(None, result)
result = session.execute("SELECT * FROM test3rf.test")
self.assertEqual([(8889, 8889)], result)
# test_connect_on_keyspace
session2 = cluster.connect('test3rf')
result2 = session2.execute("SELECT * FROM test")
self.assertEqual(result, result2)
def test_set_keyspace_twice(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("USE system")
session.execute("USE system")
def test_default_connections(self):
"""
Ensure errors are not thrown when using non-default policies
"""
Cluster(
load_balancing_policy=RoundRobinPolicy(),
reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0),
default_retry_policy=RetryPolicy(),
conviction_policy_factory=SimpleConvictionPolicy,
protocol_version=PROTOCOL_VERSION
)
def test_connect_to_already_shutdown_cluster(self):
"""
Ensure you cannot connect to a cluster that's been shutdown
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.shutdown()
self.assertRaises(Exception, cluster.connect)
def test_auth_provider_is_callable(self):
"""
Ensure that auth_providers are always callable
"""
self.assertRaises(TypeError, Cluster, auth_provider=1, protocol_version=1)
c = Cluster(protocol_version=1)
self.assertRaises(TypeError, setattr, c, 'auth_provider', 1)
def test_v2_auth_provider(self):
"""
Check for v2 auth_provider compliance
"""
bad_auth_provider = lambda x: {'username': 'foo', 'password': 'bar'}
self.assertRaises(TypeError, Cluster, auth_provider=bad_auth_provider, protocol_version=2)
c = Cluster(protocol_version=2)
self.assertRaises(TypeError, setattr, c, 'auth_provider', bad_auth_provider)
def test_conviction_policy_factory_is_callable(self):
"""
Ensure that conviction_policy_factory are always callable
"""
self.assertRaises(ValueError, Cluster, conviction_policy_factory=1)
def test_connect_to_bad_hosts(self):
"""
Ensure that a NoHostAvailable Exception is thrown
when a cluster cannot connect to given hosts
"""
cluster = Cluster(['127.1.2.9', '127.1.2.10'],
protocol_version=PROTOCOL_VERSION)
self.assertRaises(NoHostAvailable, cluster.connect)
def test_cluster_settings(self):
"""
Test connection setting getters and setters
"""
if PROTOCOL_VERSION >= 3:
raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol")
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
min_requests_per_connection = cluster.get_min_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_REQUESTS, min_requests_per_connection)
cluster.set_min_requests_per_connection(HostDistance.LOCAL, min_requests_per_connection + 1)
self.assertEqual(cluster.get_min_requests_per_connection(HostDistance.LOCAL), min_requests_per_connection + 1)
max_requests_per_connection = cluster.get_max_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_REQUESTS, max_requests_per_connection)
cluster.set_max_requests_per_connection(HostDistance.LOCAL, max_requests_per_connection + 1)
self.assertEqual(cluster.get_max_requests_per_connection(HostDistance.LOCAL), max_requests_per_connection + 1)
core_connections_per_host = cluster.get_core_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, core_connections_per_host)
cluster.set_core_connections_per_host(HostDistance.LOCAL, core_connections_per_host + 1)
self.assertEqual(cluster.get_core_connections_per_host(HostDistance.LOCAL), core_connections_per_host + 1)
max_connections_per_host = cluster.get_max_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, max_connections_per_host)
cluster.set_max_connections_per_host(HostDistance.LOCAL, max_connections_per_host + 1)
self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1)
def test_submit_schema_refresh(self):
"""
Ensure new new schema is refreshed after submit_schema_refresh()
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertNotIn("newkeyspace", cluster.metadata.keyspaces)
other_cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = other_cluster.connect()
session.execute(
"""
CREATE KEYSPACE newkeyspace
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
future = cluster.submit_schema_refresh()
future.result()
self.assertIn("newkeyspace", cluster.metadata.keyspaces)
def test_trace(self):
"""
Ensure trace can be requested for async and non-async queries
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
self.assertRaises(TypeError, session.execute, "SELECT * FROM system.local", trace=True)
def check_trace(trace):
self.assertIsNot(None, trace.request_type)
self.assertIsNot(None, trace.duration)
self.assertIsNot(None, trace.started_at)
self.assertIsNot(None, trace.coordinator)
self.assertIsNot(None, trace.events)
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
session.execute(statement, trace=True)
check_trace(statement.trace)
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
session.execute(statement)
self.assertEqual(None, statement.trace)
statement2 = SimpleStatement(query)
future = session.execute_async(statement2, trace=True)
future.result()
check_trace(future.get_query_trace())
statement2 = SimpleStatement(query)
future = session.execute_async(statement2)
future.result()
self.assertEqual(None, future.get_query_trace())
prepared = session.prepare("SELECT * FROM system.local")
future = session.execute_async(prepared, parameters=(), trace=True)
future.result()
check_trace(future.get_query_trace())
def test_trace_timeout(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement, trace=True)
future.result()
self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0)
def test_string_coverage(self):
"""
Ensure str(future) returns without error
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement)
self.assertIn(query, str(future))
future.result()
self.assertIn(query, str(future))
self.assertIn('result', str(future))
| 37.359259
| 129
| 0.674234
|
from tests.integration import PROTOCOL_VERSION
try:
import unittest2 as unittest
except ImportError:
import unittest
import cassandra
from cassandra.query import SimpleStatement, TraceUnavailable
from cassandra.policies import RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance
from cassandra.cluster import Cluster, NoHostAvailable
class ClusterTests(unittest.TestCase):
def test_basic(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
CREATE KEYSPACE clustertests
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
self.assertEqual(None, result)
result = session.execute(
"""
CREATE TABLE clustertests.cf0 (
a text,
b text,
c text,
PRIMARY KEY (a, b)
)
""")
self.assertEqual(None, result)
result = session.execute(
"""
INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c')
""")
self.assertEqual(None, result)
result = session.execute("SELECT * FROM clustertests.cf0")
self.assertEqual([('a', 'b', 'c')], result)
cluster.shutdown()
def test_connect_on_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
INSERT INTO test3rf.test (k, v) VALUES (8889, 8889)
""")
self.assertEqual(None, result)
result = session.execute("SELECT * FROM test3rf.test")
self.assertEqual([(8889, 8889)], result)
session2 = cluster.connect('test3rf')
result2 = session2.execute("SELECT * FROM test")
self.assertEqual(result, result2)
def test_set_keyspace_twice(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("USE system")
session.execute("USE system")
def test_default_connections(self):
Cluster(
load_balancing_policy=RoundRobinPolicy(),
reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0),
default_retry_policy=RetryPolicy(),
conviction_policy_factory=SimpleConvictionPolicy,
protocol_version=PROTOCOL_VERSION
)
def test_connect_to_already_shutdown_cluster(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.shutdown()
self.assertRaises(Exception, cluster.connect)
def test_auth_provider_is_callable(self):
self.assertRaises(TypeError, Cluster, auth_provider=1, protocol_version=1)
c = Cluster(protocol_version=1)
self.assertRaises(TypeError, setattr, c, 'auth_provider', 1)
def test_v2_auth_provider(self):
bad_auth_provider = lambda x: {'username': 'foo', 'password': 'bar'}
self.assertRaises(TypeError, Cluster, auth_provider=bad_auth_provider, protocol_version=2)
c = Cluster(protocol_version=2)
self.assertRaises(TypeError, setattr, c, 'auth_provider', bad_auth_provider)
def test_conviction_policy_factory_is_callable(self):
self.assertRaises(ValueError, Cluster, conviction_policy_factory=1)
def test_connect_to_bad_hosts(self):
cluster = Cluster(['127.1.2.9', '127.1.2.10'],
protocol_version=PROTOCOL_VERSION)
self.assertRaises(NoHostAvailable, cluster.connect)
def test_cluster_settings(self):
if PROTOCOL_VERSION >= 3:
raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol")
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
min_requests_per_connection = cluster.get_min_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_REQUESTS, min_requests_per_connection)
cluster.set_min_requests_per_connection(HostDistance.LOCAL, min_requests_per_connection + 1)
self.assertEqual(cluster.get_min_requests_per_connection(HostDistance.LOCAL), min_requests_per_connection + 1)
max_requests_per_connection = cluster.get_max_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_REQUESTS, max_requests_per_connection)
cluster.set_max_requests_per_connection(HostDistance.LOCAL, max_requests_per_connection + 1)
self.assertEqual(cluster.get_max_requests_per_connection(HostDistance.LOCAL), max_requests_per_connection + 1)
core_connections_per_host = cluster.get_core_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, core_connections_per_host)
cluster.set_core_connections_per_host(HostDistance.LOCAL, core_connections_per_host + 1)
self.assertEqual(cluster.get_core_connections_per_host(HostDistance.LOCAL), core_connections_per_host + 1)
max_connections_per_host = cluster.get_max_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, max_connections_per_host)
cluster.set_max_connections_per_host(HostDistance.LOCAL, max_connections_per_host + 1)
self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1)
def test_submit_schema_refresh(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertNotIn("newkeyspace", cluster.metadata.keyspaces)
other_cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = other_cluster.connect()
session.execute(
"""
CREATE KEYSPACE newkeyspace
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
future = cluster.submit_schema_refresh()
future.result()
self.assertIn("newkeyspace", cluster.metadata.keyspaces)
def test_trace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
self.assertRaises(TypeError, session.execute, "SELECT * FROM system.local", trace=True)
def check_trace(trace):
self.assertIsNot(None, trace.request_type)
self.assertIsNot(None, trace.duration)
self.assertIsNot(None, trace.started_at)
self.assertIsNot(None, trace.coordinator)
self.assertIsNot(None, trace.events)
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
session.execute(statement, trace=True)
check_trace(statement.trace)
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
session.execute(statement)
self.assertEqual(None, statement.trace)
statement2 = SimpleStatement(query)
future = session.execute_async(statement2, trace=True)
future.result()
check_trace(future.get_query_trace())
statement2 = SimpleStatement(query)
future = session.execute_async(statement2)
future.result()
self.assertEqual(None, future.get_query_trace())
prepared = session.prepare("SELECT * FROM system.local")
future = session.execute_async(prepared, parameters=(), trace=True)
future.result()
check_trace(future.get_query_trace())
def test_trace_timeout(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement, trace=True)
future.result()
self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0)
def test_string_coverage(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement)
self.assertIn(query, str(future))
future.result()
self.assertIn(query, str(future))
self.assertIn('result', str(future))
| true
| true
|
1c486091f1cf476ba15dd0ac7d22a9dfbee4c1fc
| 7,319
|
py
|
Python
|
code/src/d00_utils/feat_utils.py
|
edugm94/temporal-feat-emotion-prediction
|
6548bbf5f5d8969de97c076ebc9b5462d7b8bdd4
|
[
"MIT"
] | null | null | null |
code/src/d00_utils/feat_utils.py
|
edugm94/temporal-feat-emotion-prediction
|
6548bbf5f5d8969de97c076ebc9b5462d7b8bdd4
|
[
"MIT"
] | null | null | null |
code/src/d00_utils/feat_utils.py
|
edugm94/temporal-feat-emotion-prediction
|
6548bbf5f5d8969de97c076ebc9b5462d7b8bdd4
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Author: Eduardo Gutierrez Maestro
# Date: 2021.12.14
# email: eduardo.gutierrez-maestro@oru.se
#
# Center for Applied Autonomous Sensor Systems (AASS), Cognitive Robotic Systems Labs
# University of Orebro, Sweden
import numpy as np
import pandas as pd
from scipy.fft import fft, ifft
from csaps import csaps
def clean_dataset(labels, discard=0.1):
# Obtain an accounting of hbiw many vectors there is for each emotion
unique, counts = np.unique(labels, return_counts=True)
counting = dict(zip(unique, counts))
# Get the total amount of vectors and the threshold to filter dictionary
tot = sum(counting.values())
threshold = tot * discard
# Get a dictionary with the emotions that should be cleaned from the initial variables
# It is kept a dictionary to check the lenght of the cleaned values at the end
emo_del_dict = dict(filter(lambda elem: elem[1] < threshold, counting.items()))
# Array that contains the value of the emotions to be cleaned in the "labels" variable
emo_del_arr = np.array(list(emo_del_dict.keys()))
# Array containing the index that should be deleted from "data" and "label"
indx_del_arr = np.where(labels == emo_del_arr)[0]
assert indx_del_arr.shape[0] == sum(
emo_del_dict.values()), "The amount of vectors to delete does not match! Check it."
return indx_del_arr
def filter_nrows(feature_, lab_):
# 1 Step: Find min number of rows within signals
min_nrow = np.inf
for arr in feature_:
nrow_ = arr.shape[0]
min_nrow = nrow_ if nrow_ < min_nrow else min_nrow
# 2 Step: Modify each array in the list in case
feat_signals_filter = []
lab_signals_filter = []
for arr, lab in zip(feature_, lab_):
arr_ = arr[0:min_nrow, :]
lab_ = lab[0:min_nrow,]
feat_signals_filter.append(arr_)
lab_signals_filter.append(lab_)
return feat_signals_filter, lab_signals_filter[0]
#def extract_ts_features(df, emotion, weda, patient, day, signal):
def extract_ts_features(df, signal):
#DATASET = "/home/eduardo/phd/projects/opt-physio-feat-extractor/2-emotion-to-vector/out/filter/"
#DATASET = "/home/eduardo/phd/projects/physio-feat-extractor/physio-feat-extractor/2-emotion-to-vector/out/filter/"
signal2Freq = {
"HR": 1,
"ACC": 32,
"EDA": 4,
"TEMP": 4
}
FREQ = signal2Freq[signal]
Ts = 1 / FREQ
WINDOW = 60 # sliding window size
OVERLAP = 0.1
#datapath = DATASET + "{}/{}/{}/{}/{}.csv".format(emotion, weda, patient, day, signal)
#df = pd.read_csv(datapath, sep='\t')
df = df.reset_index()
if signal == 'EDA':
eda = df['eda'].to_numpy()
x = np.arange(0, len(eda), 1)
scl = csaps(x, eda, x, smooth=0.5)
scr = np.real_if_close(ifft(fft(eda) / fft(scl)))
df['scr'] = scr
df['scl'] = scl
if df.shape[0] == 1:
# You should create an Dataframe with -1 to indicate that there is no data available
print("Empty DataFrame. Exiting program...")
return -1, -1
init_id = df['index'].iloc[0]
end_id = df['index'].iloc[-1]
# Boundaries to control cases where EMA is rigth at beginning and end of Dataframe
init_bound_ind = int(init_id + WINDOW * FREQ / 2)
end_bound_ind = int(end_id - WINDOW * FREQ / 2)
# Get id that correspond to a label; Filtering: checking boundaries
idx = df.index[(df['label'] != -1) & (init_bound_ind < df['index']) & (df['index'] < end_bound_ind)]
idxs = np.asarray(idx)
idxs_aux = idxs[0:len(idxs) - 1]
init_ema_aux = (idxs[1:len(idxs)] - idxs_aux).reshape(-1, 1)
init_ema_id = np.where(np.any(init_ema_aux > 1, axis=1))[0] + 1
init_ema_indices = idxs[init_ema_id].tolist()
counter = 1
w_central = idxs[0] # It is chosen first element: filtered list
id_ = 0
offset = (1 - OVERLAP) * WINDOW * FREQ
ts_df = pd.DataFrame(columns=['id', 'time', 'kind', 'value'])
ts_df_label = pd.DataFrame(columns=['id', 'label'])
while id_ <= len(idxs):
w_left = w_central - (WINDOW * FREQ / 2 - 1)
w_right = w_central + (WINDOW * FREQ / 2 - 1)
df_window = df.loc[df['index'].between(w_left, w_right)]
label_ = int(df['label'][df['index'].iloc[w_central]])
if signal == 'ACC':
x_ = df_window['x'].to_numpy()
y_ = df_window['y'].to_numpy()
z_ = df_window['z'].to_numpy()
n_ = df_window['n'].to_numpy()
ts_ = np.arange(len(x_))
data_x = {'id': counter, 'time': ts_, 'kind': 'acc_x', 'value': x_}
data_y = {'id': counter, 'time': ts_, 'kind': 'acc_y', 'value': y_}
data_z = {'id': counter, 'time': ts_, 'kind': 'acc_z', 'value': z_}
data_n = {'id': counter, 'time': ts_, 'kind': 'acc_n', 'value': n_}
aux_x = pd.DataFrame(data=data_x)
aux_y = pd.DataFrame(data=data_y)
aux_z = pd.DataFrame(data=data_z)
aux_n = pd.DataFrame(data=data_n)
aux_ = pd.concat([aux_x, aux_y, aux_z, aux_n], axis=0)
elif signal == "EDA":
eda_ = df_window[signal.lower()].to_numpy()
ts_ = np.arange(len(eda_))
data_eda = {'id': counter, 'time': ts_, 'kind': "eda", 'value': eda_}
aux_eda = pd.DataFrame(data=data_eda)
scl_ = df_window['scl'].to_numpy()
ts_ = np.arange(len(scl_))
data_scl = {'id': counter, 'time': ts_, 'kind': "scl", 'value': scl_}
aux_scl = pd.DataFrame(data=data_scl)
scr_ = df_window['scr'].to_numpy()
ts_ = np.arange(len(scr_))
data_scr = {'id': counter, 'time': ts_, 'kind': "scr", 'value': scr_}
aux_scr = pd.DataFrame(data=data_scr)
aux_ = pd.concat([aux_eda, aux_scl, aux_scr], axis=0)
elif signal == "TEMP":
data_ = df_window[signal.lower()].to_numpy()
ts_ = np.arange(len(data_))
data = {'id': counter, 'time': ts_, 'kind': "temp", 'value': data_}
aux_ = pd.DataFrame(data=data)
else:
data_ = df_window[signal.lower()].to_numpy()
ts_ = np.arange(len(data_))
data = {'id': counter, 'time': ts_, 'kind': "hr", 'value': data_}
aux_ = pd.DataFrame(data=data)
data_label = {'id': [counter], "label": [label_]}
aux_label = pd.DataFrame(data=data_label)
ts_df = pd.concat([ts_df, aux_], axis=0)
ts_df_label = pd.concat([ts_df_label, aux_label], axis=0)
# Logic code to move sliding window
w_central_aux = int(w_central + offset)
if w_central_aux not in idxs:
if not init_ema_indices: # if this list is empty it means that you arrived to the end
break
# Skip to next starting EMA indicated by init_ema_indices
w_central = init_ema_indices.pop(0)
# modify variable id_
id_ = np.where(idxs == w_central)[0][0]
else:
# id_ = indice que ocupa w_central_aux en la lista idxs
w_central = w_central_aux
id_ = np.where(idxs == w_central_aux)[0][0]
counter += 1
return ts_df, ts_df_label
| 38.521053
| 119
| 0.599399
|
import numpy as np
import pandas as pd
from scipy.fft import fft, ifft
from csaps import csaps
def clean_dataset(labels, discard=0.1):
unique, counts = np.unique(labels, return_counts=True)
counting = dict(zip(unique, counts))
tot = sum(counting.values())
threshold = tot * discard
emo_del_dict = dict(filter(lambda elem: elem[1] < threshold, counting.items()))
emo_del_arr = np.array(list(emo_del_dict.keys()))
indx_del_arr = np.where(labels == emo_del_arr)[0]
assert indx_del_arr.shape[0] == sum(
emo_del_dict.values()), "The amount of vectors to delete does not match! Check it."
return indx_del_arr
def filter_nrows(feature_, lab_):
min_nrow = np.inf
for arr in feature_:
nrow_ = arr.shape[0]
min_nrow = nrow_ if nrow_ < min_nrow else min_nrow
feat_signals_filter = []
lab_signals_filter = []
for arr, lab in zip(feature_, lab_):
arr_ = arr[0:min_nrow, :]
lab_ = lab[0:min_nrow,]
feat_signals_filter.append(arr_)
lab_signals_filter.append(lab_)
return feat_signals_filter, lab_signals_filter[0]
def extract_ts_features(df, signal):
signal2Freq = {
"HR": 1,
"ACC": 32,
"EDA": 4,
"TEMP": 4
}
FREQ = signal2Freq[signal]
Ts = 1 / FREQ
WINDOW = 60
OVERLAP = 0.1
df = df.reset_index()
if signal == 'EDA':
eda = df['eda'].to_numpy()
x = np.arange(0, len(eda), 1)
scl = csaps(x, eda, x, smooth=0.5)
scr = np.real_if_close(ifft(fft(eda) / fft(scl)))
df['scr'] = scr
df['scl'] = scl
if df.shape[0] == 1:
print("Empty DataFrame. Exiting program...")
return -1, -1
init_id = df['index'].iloc[0]
end_id = df['index'].iloc[-1]
init_bound_ind = int(init_id + WINDOW * FREQ / 2)
end_bound_ind = int(end_id - WINDOW * FREQ / 2)
idx = df.index[(df['label'] != -1) & (init_bound_ind < df['index']) & (df['index'] < end_bound_ind)]
idxs = np.asarray(idx)
idxs_aux = idxs[0:len(idxs) - 1]
init_ema_aux = (idxs[1:len(idxs)] - idxs_aux).reshape(-1, 1)
init_ema_id = np.where(np.any(init_ema_aux > 1, axis=1))[0] + 1
init_ema_indices = idxs[init_ema_id].tolist()
counter = 1
w_central = idxs[0]
id_ = 0
offset = (1 - OVERLAP) * WINDOW * FREQ
ts_df = pd.DataFrame(columns=['id', 'time', 'kind', 'value'])
ts_df_label = pd.DataFrame(columns=['id', 'label'])
while id_ <= len(idxs):
w_left = w_central - (WINDOW * FREQ / 2 - 1)
w_right = w_central + (WINDOW * FREQ / 2 - 1)
df_window = df.loc[df['index'].between(w_left, w_right)]
label_ = int(df['label'][df['index'].iloc[w_central]])
if signal == 'ACC':
x_ = df_window['x'].to_numpy()
y_ = df_window['y'].to_numpy()
z_ = df_window['z'].to_numpy()
n_ = df_window['n'].to_numpy()
ts_ = np.arange(len(x_))
data_x = {'id': counter, 'time': ts_, 'kind': 'acc_x', 'value': x_}
data_y = {'id': counter, 'time': ts_, 'kind': 'acc_y', 'value': y_}
data_z = {'id': counter, 'time': ts_, 'kind': 'acc_z', 'value': z_}
data_n = {'id': counter, 'time': ts_, 'kind': 'acc_n', 'value': n_}
aux_x = pd.DataFrame(data=data_x)
aux_y = pd.DataFrame(data=data_y)
aux_z = pd.DataFrame(data=data_z)
aux_n = pd.DataFrame(data=data_n)
aux_ = pd.concat([aux_x, aux_y, aux_z, aux_n], axis=0)
elif signal == "EDA":
eda_ = df_window[signal.lower()].to_numpy()
ts_ = np.arange(len(eda_))
data_eda = {'id': counter, 'time': ts_, 'kind': "eda", 'value': eda_}
aux_eda = pd.DataFrame(data=data_eda)
scl_ = df_window['scl'].to_numpy()
ts_ = np.arange(len(scl_))
data_scl = {'id': counter, 'time': ts_, 'kind': "scl", 'value': scl_}
aux_scl = pd.DataFrame(data=data_scl)
scr_ = df_window['scr'].to_numpy()
ts_ = np.arange(len(scr_))
data_scr = {'id': counter, 'time': ts_, 'kind': "scr", 'value': scr_}
aux_scr = pd.DataFrame(data=data_scr)
aux_ = pd.concat([aux_eda, aux_scl, aux_scr], axis=0)
elif signal == "TEMP":
data_ = df_window[signal.lower()].to_numpy()
ts_ = np.arange(len(data_))
data = {'id': counter, 'time': ts_, 'kind': "temp", 'value': data_}
aux_ = pd.DataFrame(data=data)
else:
data_ = df_window[signal.lower()].to_numpy()
ts_ = np.arange(len(data_))
data = {'id': counter, 'time': ts_, 'kind': "hr", 'value': data_}
aux_ = pd.DataFrame(data=data)
data_label = {'id': [counter], "label": [label_]}
aux_label = pd.DataFrame(data=data_label)
ts_df = pd.concat([ts_df, aux_], axis=0)
ts_df_label = pd.concat([ts_df_label, aux_label], axis=0)
w_central_aux = int(w_central + offset)
if w_central_aux not in idxs:
if not init_ema_indices:
break
w_central = init_ema_indices.pop(0)
id_ = np.where(idxs == w_central)[0][0]
else:
w_central = w_central_aux
id_ = np.where(idxs == w_central_aux)[0][0]
counter += 1
return ts_df, ts_df_label
| true
| true
|
1c4860b88312afac4669bab44eca6d6d09937ccf
| 1,704
|
py
|
Python
|
python/open3d/ml/torch/pipelines.py
|
Dudulle/Open3D
|
ffed2d1bee6d45b6acc4b7ae7133752e50d6ecab
|
[
"MIT"
] | 28
|
2021-03-02T09:51:12.000Z
|
2022-03-17T09:27:46.000Z
|
python/open3d/ml/torch/pipelines.py
|
Dudulle/Open3D
|
ffed2d1bee6d45b6acc4b7ae7133752e50d6ecab
|
[
"MIT"
] | 27
|
2021-03-08T06:56:35.000Z
|
2022-03-25T14:00:32.000Z
|
python/open3d/ml/torch/pipelines.py
|
Dudulle/Open3D
|
ffed2d1bee6d45b6acc4b7ae7133752e50d6ecab
|
[
"MIT"
] | 7
|
2021-08-24T02:20:13.000Z
|
2021-12-31T09:45:02.000Z
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2020 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""3D ML pipelines for PyTorch."""
import os as _os
from open3d import _build_config
if _build_config['BUNDLE_OPEN3D_ML']:
if 'OPEN3D_ML_ROOT' in _os.environ:
from ml3d.torch.pipelines import *
else:
from open3d._ml3d.torch.pipelines import *
| 47.333333
| 79
| 0.632042
|
import os as _os
from open3d import _build_config
if _build_config['BUNDLE_OPEN3D_ML']:
if 'OPEN3D_ML_ROOT' in _os.environ:
from ml3d.torch.pipelines import *
else:
from open3d._ml3d.torch.pipelines import *
| true
| true
|
1c4861abce1abcc4cf3861647e4c50dd84b20861
| 5,951
|
py
|
Python
|
util.py
|
seanliu96/R-Net
|
8462330451079a2ff67cd431fe30a57a6ca3d802
|
[
"MIT"
] | null | null | null |
util.py
|
seanliu96/R-Net
|
8462330451079a2ff67cd431fe30a57a6ca3d802
|
[
"MIT"
] | null | null | null |
util.py
|
seanliu96/R-Net
|
8462330451079a2ff67cd431fe30a57a6ca3d802
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import re
from collections import Counter
import string
def get_record_parser(config, is_test=False):
def parse(example):
para_limit = config.test_para_limit if is_test else config.para_limit
ques_limit = config.test_ques_limit if is_test else config.ques_limit
char_limit = config.char_limit
features = tf.parse_single_example(example,
features={
"context_idxs": tf.FixedLenFeature([], tf.string),
"ques_idxs": tf.FixedLenFeature([], tf.string),
"context_char_idxs": tf.FixedLenFeature([], tf.string),
"ques_char_idxs": tf.FixedLenFeature([], tf.string),
"y1": tf.FixedLenFeature([], tf.string),
"y2": tf.FixedLenFeature([], tf.string),
"id": tf.FixedLenFeature([], tf.int64)
})
context_idxs = tf.reshape(tf.decode_raw(
features["context_idxs"], tf.int32), [para_limit])
ques_idxs = tf.reshape(tf.decode_raw(
features["ques_idxs"], tf.int32), [ques_limit])
context_char_idxs = tf.reshape(tf.decode_raw(
features["context_char_idxs"], tf.int32), [para_limit, char_limit])
ques_char_idxs = tf.reshape(tf.decode_raw(
features["ques_char_idxs"], tf.int32), [ques_limit, char_limit])
y1 = tf.reshape(tf.decode_raw(
features["y1"], tf.float32), [para_limit])
y2 = tf.reshape(tf.decode_raw(
features["y2"], tf.float32), [para_limit])
qa_id = features["id"]
return context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id
return parse
def get_batch_dataset(record_file, parser, config):
"""
Read a file and construct batches
"""
num_threads = tf.constant(config.num_threads, dtype=tf.int32)
dataset = tf.data.TFRecordDataset(record_file).map(
parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
if config.is_bucket:
buckets = [tf.constant(num) for num in range(*config.bucket_range)]
def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
c_len = tf.reduce_sum(
tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
buckets_min = [np.iinfo(np.int32).min] + buckets
buckets_max = buckets + [np.iinfo(np.int32).max]
conditions_c = tf.logical_and(
tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def reduce_func(key, elements):
return elements.batch(config.batch_size)
dataset = dataset.apply(tf.contrib.data.group_by_window(
key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
else:
dataset = dataset.batch(config.batch_size)
return dataset
def get_dataset(record_file, parser, config):
num_threads = tf.constant(config.num_threads, dtype=tf.int32)
dataset = tf.data.TFRecordDataset(record_file).map(
parser, num_parallel_calls=num_threads).repeat().batch(config.batch_size)
return dataset
def convert_tokens(eval_file, qa_id, pp1, pp2):
answer_dict = {}
remapped_dict = {}
for qid, p1, p2 in zip(qa_id, pp1, pp2):
context = eval_file[str(qid)]["context"]
spans = eval_file[str(qid)]["spans"]
uuid = eval_file[str(qid)]["uuid"]
start_idx = spans[p1][0]
end_idx = spans[p2][1]
answer_dict[str(qid)] = context[start_idx: end_idx]
remapped_dict[uuid] = context[start_idx: end_idx]
return answer_dict, remapped_dict
def evaluate(eval_file, answer_dict):
f1 = exact_match = total = 0
for key, value in answer_dict.items():
total += 1
ground_truths = eval_file[key]["answers"]
prediction = value
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score,
prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
| 40.482993
| 102
| 0.623593
|
import tensorflow as tf
import numpy as np
import re
from collections import Counter
import string
def get_record_parser(config, is_test=False):
def parse(example):
para_limit = config.test_para_limit if is_test else config.para_limit
ques_limit = config.test_ques_limit if is_test else config.ques_limit
char_limit = config.char_limit
features = tf.parse_single_example(example,
features={
"context_idxs": tf.FixedLenFeature([], tf.string),
"ques_idxs": tf.FixedLenFeature([], tf.string),
"context_char_idxs": tf.FixedLenFeature([], tf.string),
"ques_char_idxs": tf.FixedLenFeature([], tf.string),
"y1": tf.FixedLenFeature([], tf.string),
"y2": tf.FixedLenFeature([], tf.string),
"id": tf.FixedLenFeature([], tf.int64)
})
context_idxs = tf.reshape(tf.decode_raw(
features["context_idxs"], tf.int32), [para_limit])
ques_idxs = tf.reshape(tf.decode_raw(
features["ques_idxs"], tf.int32), [ques_limit])
context_char_idxs = tf.reshape(tf.decode_raw(
features["context_char_idxs"], tf.int32), [para_limit, char_limit])
ques_char_idxs = tf.reshape(tf.decode_raw(
features["ques_char_idxs"], tf.int32), [ques_limit, char_limit])
y1 = tf.reshape(tf.decode_raw(
features["y1"], tf.float32), [para_limit])
y2 = tf.reshape(tf.decode_raw(
features["y2"], tf.float32), [para_limit])
qa_id = features["id"]
return context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id
return parse
def get_batch_dataset(record_file, parser, config):
num_threads = tf.constant(config.num_threads, dtype=tf.int32)
dataset = tf.data.TFRecordDataset(record_file).map(
parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
if config.is_bucket:
buckets = [tf.constant(num) for num in range(*config.bucket_range)]
def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
c_len = tf.reduce_sum(
tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
buckets_min = [np.iinfo(np.int32).min] + buckets
buckets_max = buckets + [np.iinfo(np.int32).max]
conditions_c = tf.logical_and(
tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def reduce_func(key, elements):
return elements.batch(config.batch_size)
dataset = dataset.apply(tf.contrib.data.group_by_window(
key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
else:
dataset = dataset.batch(config.batch_size)
return dataset
def get_dataset(record_file, parser, config):
num_threads = tf.constant(config.num_threads, dtype=tf.int32)
dataset = tf.data.TFRecordDataset(record_file).map(
parser, num_parallel_calls=num_threads).repeat().batch(config.batch_size)
return dataset
def convert_tokens(eval_file, qa_id, pp1, pp2):
answer_dict = {}
remapped_dict = {}
for qid, p1, p2 in zip(qa_id, pp1, pp2):
context = eval_file[str(qid)]["context"]
spans = eval_file[str(qid)]["spans"]
uuid = eval_file[str(qid)]["uuid"]
start_idx = spans[p1][0]
end_idx = spans[p2][1]
answer_dict[str(qid)] = context[start_idx: end_idx]
remapped_dict[uuid] = context[start_idx: end_idx]
return answer_dict, remapped_dict
def evaluate(eval_file, answer_dict):
f1 = exact_match = total = 0
for key, value in answer_dict.items():
total += 1
ground_truths = eval_file[key]["answers"]
prediction = value
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score,
prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
| true
| true
|
1c48620c64fa51850f5fd4bc16ab11b4b2f1dfac
| 4,735
|
py
|
Python
|
example.py
|
lbenassi/InstagramBot
|
49f8bad5de8d5df719f102c66acb6779b677bc5f
|
[
"MIT"
] | 1
|
2019-08-05T23:02:58.000Z
|
2019-08-05T23:02:58.000Z
|
example.py
|
lbenassi/InstagramBot
|
49f8bad5de8d5df719f102c66acb6779b677bc5f
|
[
"MIT"
] | null | null | null |
example.py
|
lbenassi/InstagramBot
|
49f8bad5de8d5df719f102c66acb6779b677bc5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
from src import InstaBot
from src.check_status import check_status
from src.feed_scanner import feed_scanner
from src.follow_protocol import follow_protocol
from src.unfollow_protocol import unfollow_protocol
bot = InstaBot(
login="",
password="",
like_per_day=600,
comments_per_day=200,
tag_list=['follow4follow', 'f4f', 'cute', 'l:212999109','party','time','luxary','happy','birthday','robatkarim','eshq','dostdashtani','boro','dawshi','nab','love','party','smile','style','eslamshahr','parand','like','dawshi','lovely','instagramers','socialenvy','اینستاگرام','ایران','فالوور','لایک','لاکچری','بام تهران','کرج','محرم','تاسوعا','عاشورا','مشهد','عکس','عکاسی','ویدیو','فالوبک','photographer','Desine','karaj','teh','tehran','mashhad','robatkarim','parand','پرند','park','خمینی','esfahan','arak','parkmoalem','پارک معلم','قلعه حسن خان','تجریش','دربند'],
tag_blacklist=['rain', 'thunderstorm'],
user_blacklist={},
max_like_for_one_tag=50,
follow_per_day=700,
follow_time=1 * 12,
unfollow_per_day=300,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy='',
# List of list of words, each of which will be used to generate comment
# For example: "This shot feels wow!"
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good", "wow",
"WOW", "cool", "GREAT","magnificent", "magical",
"very cool", "stylish", "beautiful", "so beautiful",
"so stylish", "so professional", "lovely",
"so lovely", "very lovely", "glorious","so glorious",
"very glorious", "jazab", "excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
# Use unwanted_username_list to block usernames containing a string
## Will do partial matches; i.e. 'mozart' will block 'legend_mozart'
### 'free_followers' will be blocked because it contains 'free'
unwanted_username_list=[
'second', 'stuff', 'art', 'project', 'love', 'life', 'food', 'blog',
'free', 'keren', 'photo', 'graphy', 'indo', 'travel', 'art', 'shop',
'store', 'sex', 'toko', 'jual', 'online', 'murah', 'jam', 'kaos',
'case', 'baju', 'fashion', 'corp', 'tas', 'butik', 'grosir', 'karpet',
'sosis', 'salon', 'skin', 'care', 'cloth', 'tech', 'rental', 'kamera',
'beauty', 'express', 'kredit', 'collection', 'impor', 'preloved',
'follow', 'follower', 'gain', '.id', '_id', 'bags'
],
unfollow_whitelist=['example_user_1', 'example_user_2'])
while True:
#print("# MODE 0 = ORIGINAL MODE BY LEVPASHA")
#print("## MODE 1 = MODIFIED MODE BY KEMONG")
#print("### MODE 2 = ORIGINAL MODE + UNFOLLOW WHO DON'T FOLLOW BACK")
#print("#### MODE 3 = MODIFIED MODE : UNFOLLOW USERS WHO DON'T FOLLOW YOU BASED ON RECENT FEED")
#print("##### MODE 4 = MODIFIED MODE : FOLLOW USERS BASED ON RECENT FEED ONLY")
#print("###### MODE 5 = MODIFIED MODE : JUST UNFOLLOW EVERYBODY, EITHER YOUR FOLLOWER OR NOT")
################################
## WARNING ###
################################
# DON'T USE MODE 5 FOR A LONG PERIOD. YOU RISK YOUR ACCOUNT FROM GETTING BANNED
## USE MODE 5 IN BURST MODE, USE IT TO UNFOLLOW PEOPLE AS MANY AS YOU WANT IN SHORT TIME PERIOD
mode = 0
#print("You choose mode : %i" %(mode))
#print("CTRL + C to cancel this operation or wait 30 seconds to start")
#time.sleep(30)
if mode == 0:
bot.new_auto_mod()
elif mode == 1:
check_status(bot)
while bot.self_following - bot.self_follower > 200:
unfollow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
while bot.self_following - bot.self_follower < 400:
while len(bot.user_info_list) < 50:
feed_scanner(bot)
time.sleep(5 * 60)
follow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
elif mode == 2:
bot.bot_mode = 1
bot.new_auto_mod()
elif mode == 3:
unfollow_protocol(bot)
time.sleep(10 * 60)
elif mode == 4:
feed_scanner(bot)
time.sleep(60)
follow_protocol(bot)
time.sleep(10 * 60)
elif mode == 5:
bot.bot_mode = 2
unfollow_protocol(bot)
else:
print("Wrong mode!")
| 42.276786
| 569
| 0.563675
|
import os
import time
from src import InstaBot
from src.check_status import check_status
from src.feed_scanner import feed_scanner
from src.follow_protocol import follow_protocol
from src.unfollow_protocol import unfollow_protocol
bot = InstaBot(
login="",
password="",
like_per_day=600,
comments_per_day=200,
tag_list=['follow4follow', 'f4f', 'cute', 'l:212999109','party','time','luxary','happy','birthday','robatkarim','eshq','dostdashtani','boro','dawshi','nab','love','party','smile','style','eslamshahr','parand','like','dawshi','lovely','instagramers','socialenvy','اینستاگرام','ایران','فالوور','لایک','لاکچری','بام تهران','کرج','محرم','تاسوعا','عاشورا','مشهد','عکس','عکاسی','ویدیو','فالوبک','photographer','Desine','karaj','teh','tehran','mashhad','robatkarim','parand','پرند','park','خمینی','esfahan','arak','parkmoalem','پارک معلم','قلعه حسن خان','تجریش','دربند'],
tag_blacklist=['rain', 'thunderstorm'],
user_blacklist={},
max_like_for_one_tag=50,
follow_per_day=700,
follow_time=1 * 12,
unfollow_per_day=300,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy='',
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good", "wow",
"WOW", "cool", "GREAT","magnificent", "magical",
"very cool", "stylish", "beautiful", "so beautiful",
"so stylish", "so professional", "lovely",
"so lovely", "very lovely", "glorious","so glorious",
"very glorious", "jazab", "excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
'store', 'sex', 'toko', 'jual', 'online', 'murah', 'jam', 'kaos',
'case', 'baju', 'fashion', 'corp', 'tas', 'butik', 'grosir', 'karpet',
'sosis', 'salon', 'skin', 'care', 'cloth', 'tech', 'rental', 'kamera',
'beauty', 'express', 'kredit', 'collection', 'impor', 'preloved',
'follow', 'follower', 'gain', '.id', '_id', 'bags'
],
unfollow_whitelist=['example_user_1', 'example_user_2'])
while True:
#print("#### MODE 3 = MODIFIED MODE : UNFOLLOW USERS WHO DON'T FOLLOW YOU BASED ON RECENT FEED")
protocol(bot)
time.sleep(10 * 60)
elif mode == 5:
bot.bot_mode = 2
unfollow_protocol(bot)
else:
print("Wrong mode!")
| true
| true
|
1c4862fb07b2c9db29a9081ca5087a83b0ba2309
| 1,396
|
py
|
Python
|
python2_guiding_test.py
|
ammumaddy/dhivya-railway
|
152a64e16ba07d62aa9aa159f503ed0b1a09d5b6
|
[
"MIT"
] | 97
|
2015-01-02T10:58:05.000Z
|
2022-03-11T14:00:52.000Z
|
python2_guiding_test.py
|
ammumaddy/dhivya-railway
|
152a64e16ba07d62aa9aa159f503ed0b1a09d5b6
|
[
"MIT"
] | 3
|
2020-02-14T15:55:21.000Z
|
2020-02-19T17:33:05.000Z
|
python2_guiding_test.py
|
ammumaddy/dhivya-railway
|
152a64e16ba07d62aa9aa159f503ed0b1a09d5b6
|
[
"MIT"
] | 58
|
2015-05-28T02:09:51.000Z
|
2022-03-20T16:37:40.000Z
|
"""
Equivalent of 'guiding_test.py' except for Python2.x, which comes as standard on many systems.
Run it with:
python python2_guiding_test.py
"""
import json
import subprocess
import unittest
import os
import urllib2, urllib
url = "http://127.0.0.1:8083"
interpreter = "python"
reservation_script = os.path.join("python", "reserve.py")
class TrainReservationTest(unittest.TestCase):
def test_reserve_seats_via_POST(self):
form_data = {"train_id": "express_2000", "seat_count": 4}
data = urllib.urlencode(form_data)
response = urllib2.urlopen(url + "/reserve", data=data).read()
reservation = json.loads(response)
assert "express_2000" == reservation["train_id"]
assert 4 == len(reservation["seats"])
assert "1A" == reservation["seats"][0]
assert "75bcd15" == reservation["booking_reference"]
def test_reserve_seats_via_cmd(self):
response = subprocess.check_output([interpreter, reservation_script, "express2000", "4"], stderr=subprocess.STDOUT, universal_newlines = True)
reservation = json.loads(response)
assert "express_2000" == reservation["train_id"]
assert 4 == len(reservation["seats"])
assert "1A" == reservation["seats"][0]
assert "75bcd15" == reservation["booking_reference"]
if __name__ == "__main__":
unittest.main()
| 30.347826
| 150
| 0.670487
|
import json
import subprocess
import unittest
import os
import urllib2, urllib
url = "http://127.0.0.1:8083"
interpreter = "python"
reservation_script = os.path.join("python", "reserve.py")
class TrainReservationTest(unittest.TestCase):
def test_reserve_seats_via_POST(self):
form_data = {"train_id": "express_2000", "seat_count": 4}
data = urllib.urlencode(form_data)
response = urllib2.urlopen(url + "/reserve", data=data).read()
reservation = json.loads(response)
assert "express_2000" == reservation["train_id"]
assert 4 == len(reservation["seats"])
assert "1A" == reservation["seats"][0]
assert "75bcd15" == reservation["booking_reference"]
def test_reserve_seats_via_cmd(self):
response = subprocess.check_output([interpreter, reservation_script, "express2000", "4"], stderr=subprocess.STDOUT, universal_newlines = True)
reservation = json.loads(response)
assert "express_2000" == reservation["train_id"]
assert 4 == len(reservation["seats"])
assert "1A" == reservation["seats"][0]
assert "75bcd15" == reservation["booking_reference"]
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c4863108b4acd15cabb6c18697226bdc4fae51a
| 7,174
|
py
|
Python
|
causallib/contrib/tests/test_shared_sparsity_selection.py
|
liranszlak/causallib
|
2636149f6b1e307672aff638a53f8eaf2be56bc9
|
[
"Apache-2.0"
] | 350
|
2019-06-19T15:56:19.000Z
|
2022-03-28T23:47:46.000Z
|
causallib/contrib/tests/test_shared_sparsity_selection.py
|
liranszlak/causallib
|
2636149f6b1e307672aff638a53f8eaf2be56bc9
|
[
"Apache-2.0"
] | 13
|
2019-08-14T22:04:21.000Z
|
2022-03-14T07:44:12.000Z
|
causallib/contrib/tests/test_shared_sparsity_selection.py
|
liranszlak/causallib
|
2636149f6b1e307672aff638a53f8eaf2be56bc9
|
[
"Apache-2.0"
] | 48
|
2019-11-02T16:40:56.000Z
|
2022-02-09T12:55:12.000Z
|
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import ConvergenceWarning
from causallib.contrib.shared_sparsity_selection import SharedSparsityConfounderSelection
from causallib.tests.test_confounder_selection import _TestConfounderSelection
class TestSharedSparsitySelection(_TestConfounderSelection):
def make_xay(self, n_confounders_a, n_max_confounders_y, n_samples, xay_cols=10, seed=None):
# rng = np.random.default_rng(seed)
if seed:
np.random.seed(seed)
X, a = make_classification(
n_samples=n_samples,
n_features=xay_cols + 1,
n_informative=int(min(n_confounders_a, xay_cols)),
n_redundant=0, n_repeated=0, class_sep=10.0,
n_clusters_per_class=1,
shuffle=False, # random_state=seed
)
y_confounder_indicator = np.zeros(X.shape[1], dtype=bool)
y_confounder_indicator[:int(min(n_max_confounders_y, xay_cols))] = 1
np.random.shuffle(y_confounder_indicator)
y = X[:, y_confounder_indicator] @ np.random.normal(size=y_confounder_indicator.sum())
X = StandardScaler().fit_transform(X)
X = pd.DataFrame(X, columns=["x_" + str(i) for i in range(X.shape[1])])
a = pd.Series(a)
y = pd.Series(y)
return X, a, y
def test_covariate_subset(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
true_subset_confounders = ['x_0', 'x_2'] # Matches random seed: 6
covariates_subset = ['x_0', 'x_2', f'x_{X.shape[1] - 1}', f'x_{X.shape[1] - 3}']
sss = SharedSparsityConfounderSelection(covariates=covariates_subset)
sss = self.ensure_covariate_subset(sss, X, a, y, true_subset_confounders)
np.testing.assert_array_equal(covariates_subset, sss.covariates)
self.assertEqual(len(covariates_subset), sss.selector_.theta_.shape[0])
self.assertEqual(2, sss.selector_.theta_.shape[1]) # Two treatments
self.assertEqual(len(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 0]) > 0))
self.assertEqual(len(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 1]) > 0))
def test_covariate_subset_binary(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
true_subset_confounders = ['x_0', 'x_2'] # Matches random seed: 6
covariates_subset = ['x_0', 'x_2', f'x_{X.shape[1] - 1}', f'x_{X.shape[1] - 3}']
# Convert to binary:
true_subset_confounders = X.columns.isin(true_subset_confounders)
covariates_subset = X.columns.isin(covariates_subset)
sss = SharedSparsityConfounderSelection(covariates=covariates_subset)
sss = self.ensure_covariate_subset_binary(sss, X, a, y, true_subset_confounders)
np.testing.assert_array_equal(covariates_subset, sss.covariates)
self.assertEqual(covariates_subset.sum(), sss.selector_.theta_.shape[0])
self.assertEqual(2, sss.selector_.theta_.shape[1]) # Two treatments
self.assertEqual(sum(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 0]) > 0))
self.assertEqual(sum(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 1]) > 0))
def test_alphas(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
alphas = [0, 1]
for alpha in alphas:
sss = SharedSparsityConfounderSelection(mcp_alpha=alpha)
sss.fit(X, a, y)
Xt = sss.transform(X)
self.assertSetEqual(set(Xt.columns), {'x_0', 'x_2'})
with self.assertRaises(AssertionError):
sss = SharedSparsityConfounderSelection(mcp_alpha=-1)
sss.fit(X, a, y)
with self.subTest("shrinkage"):
strong = SharedSparsityConfounderSelection(mcp_alpha=0.1).fit(X, a, y).selector_.theta_
weak = SharedSparsityConfounderSelection(mcp_alpha=100).fit(X, a, y).selector_.theta_
self.assertLess(np.linalg.norm(strong), np.linalg.norm(weak))
def test_lambdas(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
with self.subTest("Automatic (default) lambda"):
sss = SharedSparsityConfounderSelection(mcp_lambda="auto")
sss.fit(X, a, y)
expected = 0.2 * np.sqrt(2 * np.log(X.shape[1]) / (X.shape[0] / 2))
self.assertAlmostEqual(sss.selector_.lmda_, expected)
with self.subTest("Pre-specified lambda"):
lmda = 2.1
sss = SharedSparsityConfounderSelection(mcp_lambda=lmda)
sss.fit(X, a, y)
self.assertEqual(sss.selector_.lmda_, lmda)
with self.subTest("Illegal lambda"):
with self.assertRaises(AssertionError):
sss = SharedSparsityConfounderSelection(mcp_lambda=-1)
sss.fit(X, a, y)
with self.subTest("shrinkage"):
weak = SharedSparsityConfounderSelection(mcp_lambda=0.1).fit(X, a, y).selector_.theta_
strong = SharedSparsityConfounderSelection(mcp_lambda=1).fit(X, a, y).selector_.theta_
self.assertLess(np.linalg.norm(strong), np.linalg.norm(weak))
def test_max_iter(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
with self.subTest("Force convergence warning"):
sss = SharedSparsityConfounderSelection(max_iter=2)
with self.assertWarns(ConvergenceWarning):
sss.fit(X, a, y)
# with self.subTest("Convergence happens in less than max_iter"):
# import timeit
# n_repeats = 50
# times = []
# for max_iter in [10000, 100000]:
# # Algorithm will converge long before exceeding `max_iter` and so time should remain similar
# sss = SharedSparsityConfounderSelection(max_iter=max_iter)
# avg_time = timeit.timeit(lambda: sss.fit(X, a, y), number=n_repeats)
# times.append(avg_time)
# self.assertAlmostEqual(times[0], times[1], places=1)
def test_final_selection(self):
"""Test against current implementation to allow for refactoring"""
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
sss = SharedSparsityConfounderSelection()
sss.fit(X, a, y)
Xt = sss.transform(X)
self.assertSetEqual(set(Xt.columns), {'x_0', 'x_2'})
def test_importance_getter(self):
from causallib.preprocessing.confounder_selection import _get_feature_importances
X, a, y = self.make_xay(2, 2, xay_cols=2, n_samples=100, seed=1)
sss = SharedSparsityConfounderSelection()
sss.fit(X, a, y)
importance = _get_feature_importances(sss, sss.importance_getter)
expected = np.array([[0.0, 0.0],
[5.86299046, 5.94375083],
[0.0, 0.0]
])
np.testing.assert_array_almost_equal(expected.transpose(), importance)
np.testing.assert_array_almost_equal(sss.selector_.theta_.transpose(), importance)
| 47.197368
| 110
| 0.64678
|
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import ConvergenceWarning
from causallib.contrib.shared_sparsity_selection import SharedSparsityConfounderSelection
from causallib.tests.test_confounder_selection import _TestConfounderSelection
class TestSharedSparsitySelection(_TestConfounderSelection):
def make_xay(self, n_confounders_a, n_max_confounders_y, n_samples, xay_cols=10, seed=None):
if seed:
np.random.seed(seed)
X, a = make_classification(
n_samples=n_samples,
n_features=xay_cols + 1,
n_informative=int(min(n_confounders_a, xay_cols)),
n_redundant=0, n_repeated=0, class_sep=10.0,
n_clusters_per_class=1,
shuffle=False,
)
y_confounder_indicator = np.zeros(X.shape[1], dtype=bool)
y_confounder_indicator[:int(min(n_max_confounders_y, xay_cols))] = 1
np.random.shuffle(y_confounder_indicator)
y = X[:, y_confounder_indicator] @ np.random.normal(size=y_confounder_indicator.sum())
X = StandardScaler().fit_transform(X)
X = pd.DataFrame(X, columns=["x_" + str(i) for i in range(X.shape[1])])
a = pd.Series(a)
y = pd.Series(y)
return X, a, y
def test_covariate_subset(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
true_subset_confounders = ['x_0', 'x_2']
covariates_subset = ['x_0', 'x_2', f'x_{X.shape[1] - 1}', f'x_{X.shape[1] - 3}']
sss = SharedSparsityConfounderSelection(covariates=covariates_subset)
sss = self.ensure_covariate_subset(sss, X, a, y, true_subset_confounders)
np.testing.assert_array_equal(covariates_subset, sss.covariates)
self.assertEqual(len(covariates_subset), sss.selector_.theta_.shape[0])
self.assertEqual(2, sss.selector_.theta_.shape[1])
self.assertEqual(len(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 0]) > 0))
self.assertEqual(len(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 1]) > 0))
def test_covariate_subset_binary(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
true_subset_confounders = ['x_0', 'x_2']
covariates_subset = ['x_0', 'x_2', f'x_{X.shape[1] - 1}', f'x_{X.shape[1] - 3}']
true_subset_confounders = X.columns.isin(true_subset_confounders)
covariates_subset = X.columns.isin(covariates_subset)
sss = SharedSparsityConfounderSelection(covariates=covariates_subset)
sss = self.ensure_covariate_subset_binary(sss, X, a, y, true_subset_confounders)
np.testing.assert_array_equal(covariates_subset, sss.covariates)
self.assertEqual(covariates_subset.sum(), sss.selector_.theta_.shape[0])
self.assertEqual(2, sss.selector_.theta_.shape[1])
self.assertEqual(sum(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 0]) > 0))
self.assertEqual(sum(true_subset_confounders), np.sum(np.abs(sss.selector_.theta_[:, 1]) > 0))
def test_alphas(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
alphas = [0, 1]
for alpha in alphas:
sss = SharedSparsityConfounderSelection(mcp_alpha=alpha)
sss.fit(X, a, y)
Xt = sss.transform(X)
self.assertSetEqual(set(Xt.columns), {'x_0', 'x_2'})
with self.assertRaises(AssertionError):
sss = SharedSparsityConfounderSelection(mcp_alpha=-1)
sss.fit(X, a, y)
with self.subTest("shrinkage"):
strong = SharedSparsityConfounderSelection(mcp_alpha=0.1).fit(X, a, y).selector_.theta_
weak = SharedSparsityConfounderSelection(mcp_alpha=100).fit(X, a, y).selector_.theta_
self.assertLess(np.linalg.norm(strong), np.linalg.norm(weak))
def test_lambdas(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
with self.subTest("Automatic (default) lambda"):
sss = SharedSparsityConfounderSelection(mcp_lambda="auto")
sss.fit(X, a, y)
expected = 0.2 * np.sqrt(2 * np.log(X.shape[1]) / (X.shape[0] / 2))
self.assertAlmostEqual(sss.selector_.lmda_, expected)
with self.subTest("Pre-specified lambda"):
lmda = 2.1
sss = SharedSparsityConfounderSelection(mcp_lambda=lmda)
sss.fit(X, a, y)
self.assertEqual(sss.selector_.lmda_, lmda)
with self.subTest("Illegal lambda"):
with self.assertRaises(AssertionError):
sss = SharedSparsityConfounderSelection(mcp_lambda=-1)
sss.fit(X, a, y)
with self.subTest("shrinkage"):
weak = SharedSparsityConfounderSelection(mcp_lambda=0.1).fit(X, a, y).selector_.theta_
strong = SharedSparsityConfounderSelection(mcp_lambda=1).fit(X, a, y).selector_.theta_
self.assertLess(np.linalg.norm(strong), np.linalg.norm(weak))
def test_max_iter(self):
X, a, y = self.make_xay(6, 4, n_samples=100, seed=1)
with self.subTest("Force convergence warning"):
sss = SharedSparsityConfounderSelection(max_iter=2)
with self.assertWarns(ConvergenceWarning):
sss.fit(X, a, y)
self.make_xay(6, 4, n_samples=100, seed=1)
sss = SharedSparsityConfounderSelection()
sss.fit(X, a, y)
Xt = sss.transform(X)
self.assertSetEqual(set(Xt.columns), {'x_0', 'x_2'})
def test_importance_getter(self):
from causallib.preprocessing.confounder_selection import _get_feature_importances
X, a, y = self.make_xay(2, 2, xay_cols=2, n_samples=100, seed=1)
sss = SharedSparsityConfounderSelection()
sss.fit(X, a, y)
importance = _get_feature_importances(sss, sss.importance_getter)
expected = np.array([[0.0, 0.0],
[5.86299046, 5.94375083],
[0.0, 0.0]
])
np.testing.assert_array_almost_equal(expected.transpose(), importance)
np.testing.assert_array_almost_equal(sss.selector_.theta_.transpose(), importance)
| true
| true
|
1c4864c7568edd42683a2109677a37b005cc8076
| 48
|
py
|
Python
|
sortedm2m/__init__.py
|
Freston5/daysiweb
|
95751b467f0e76c3cb60bb09693c59af9c74ded2
|
[
"MIT"
] | null | null | null |
sortedm2m/__init__.py
|
Freston5/daysiweb
|
95751b467f0e76c3cb60bb09693c59af9c74ded2
|
[
"MIT"
] | null | null | null |
sortedm2m/__init__.py
|
Freston5/daysiweb
|
95751b467f0e76c3cb60bb09693c59af9c74ded2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = '1.3.3'
| 9.6
| 23
| 0.5
|
__version__ = '1.3.3'
| true
| true
|
1c4865911cd82746699b01afdfe934853aeba6b9
| 3,337
|
py
|
Python
|
selenium_pipeline/hyatt_hotels_fetch_addresses.py
|
Praneethvvs/CircleCi_FastApi
|
0aec14fcffcfe7053cf7db688728347feea26f70
|
[
"MIT"
] | null | null | null |
selenium_pipeline/hyatt_hotels_fetch_addresses.py
|
Praneethvvs/CircleCi_FastApi
|
0aec14fcffcfe7053cf7db688728347feea26f70
|
[
"MIT"
] | null | null | null |
selenium_pipeline/hyatt_hotels_fetch_addresses.py
|
Praneethvvs/CircleCi_FastApi
|
0aec14fcffcfe7053cf7db688728347feea26f70
|
[
"MIT"
] | null | null | null |
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import traceback
import itertools
from selenium.webdriver.common.keys import Keys
DRIVER_PATH = r"C:\Program Files (x86)\chromedriver.exe"
class Address_Scraping():
def __init__(self):
self.chrome_driver = webdriver.Chrome(DRIVER_PATH)
def get_hyperlinks(self):
self.chrome_driver.get("https://www.hyatt.com/explore-hotels")
try:
# The WebDriverWait method waits until it locates the presence of the element"
WebDriverWait(self.chrome_driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, "countries.b-ph0")))
us_add = self.chrome_driver.find_element_by_xpath(
"//ul[@class='countries b-ph0']//li[@data-js-country='United States']")
links = us_add.find_elements_by_tag_name('a')
hyperlinks = [link_field.get_attribute("href") for link_field in links]
return hyperlinks
except:
print("error")
traceback.print_exc()
time.sleep(2)
# chrome_driver.quit()
def fetch_addresses_to_df(self):
links_list = self.get_hyperlinks()
# assert links_list != []
results_list = []
error_links_list = []
for index, link in enumerate(links_list, start=1):
if index == 5:
break
try:
print("passing through link ------------>", link)
self.chrome_driver.get(link)
address_div = self.chrome_driver.find_elements_by_xpath(
"//div[@class='site-info-container b-mt2 b-mb2 b-mt0@sm b-mb0@sm']//a[@class='site-info-address b-d-inline-block b-d-flex@lg b-d-inline-block@xl b-mb2@sm b-mb1@md b-mr2']//span[@class='b-d-inline-block']")
phone_num_div = self.chrome_driver.find_element_by_xpath(
"//div[@class='site-info-container b-mt2 b-mb2 b-mt0@sm b-mb0@sm']//a[@class='site-info-phone b-d-inline-block b-d-block@lg b-mb1@sm b-mr2']//span[@class='hover-border b-d-none b-d-inline@lg']")
address = "".join(map(lambda x: x.text, address_div))
phone_number = ", " + phone_num_div.text
# self.chrome_driver.find_element_by_partial_link_text("Hoover, Alabama, United States, 35244").click()
# time.sleep(3)
# self.chrome_driver.close()
# get_url = self.chrome_driver.current_url
# print(get_url)
# exit()
combined_output = "".join([address, phone_number])
results_list.append(combined_output.split(","))
except:
traceback.print_exc()
error_links_list.append(link)
final_df = pd.DataFrame(results_list, columns=["street", "city", "state", "country", "zip", "phone_number"],
index=None)
final_df.to_excel("hyatt_hotels.xlsx", index=False)
if __name__ == "__main__":
Address_Scraping().fetch_addresses_to_df()
| 40.695122
| 225
| 0.620617
|
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import traceback
import itertools
from selenium.webdriver.common.keys import Keys
DRIVER_PATH = r"C:\Program Files (x86)\chromedriver.exe"
class Address_Scraping():
def __init__(self):
self.chrome_driver = webdriver.Chrome(DRIVER_PATH)
def get_hyperlinks(self):
self.chrome_driver.get("https://www.hyatt.com/explore-hotels")
try:
WebDriverWait(self.chrome_driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, "countries.b-ph0")))
us_add = self.chrome_driver.find_element_by_xpath(
"//ul[@class='countries b-ph0']//li[@data-js-country='United States']")
links = us_add.find_elements_by_tag_name('a')
hyperlinks = [link_field.get_attribute("href") for link_field in links]
return hyperlinks
except:
print("error")
traceback.print_exc()
time.sleep(2)
# chrome_driver.quit()
def fetch_addresses_to_df(self):
links_list = self.get_hyperlinks()
# assert links_list != []
results_list = []
error_links_list = []
for index, link in enumerate(links_list, start=1):
if index == 5:
break
try:
print("passing through link ------------>", link)
self.chrome_driver.get(link)
address_div = self.chrome_driver.find_elements_by_xpath(
"//div[@class='site-info-container b-mt2 b-mb2 b-mt0@sm b-mb0@sm']//a[@class='site-info-address b-d-inline-block b-d-flex@lg b-d-inline-block@xl b-mb2@sm b-mb1@md b-mr2']//span[@class='b-d-inline-block']")
phone_num_div = self.chrome_driver.find_element_by_xpath(
"//div[@class='site-info-container b-mt2 b-mb2 b-mt0@sm b-mb0@sm']//a[@class='site-info-phone b-d-inline-block b-d-block@lg b-mb1@sm b-mr2']//span[@class='hover-border b-d-none b-d-inline@lg']")
address = "".join(map(lambda x: x.text, address_div))
phone_number = ", " + phone_num_div.text
# self.chrome_driver.find_element_by_partial_link_text("Hoover, Alabama, United States, 35244").click()
# time.sleep(3)
# self.chrome_driver.close()
# get_url = self.chrome_driver.current_url
# print(get_url)
# exit()
combined_output = "".join([address, phone_number])
results_list.append(combined_output.split(","))
except:
traceback.print_exc()
error_links_list.append(link)
final_df = pd.DataFrame(results_list, columns=["street", "city", "state", "country", "zip", "phone_number"],
index=None)
final_df.to_excel("hyatt_hotels.xlsx", index=False)
if __name__ == "__main__":
Address_Scraping().fetch_addresses_to_df()
| true
| true
|
1c4866945a2ab251b49fcecdc203276c56d51585
| 379
|
py
|
Python
|
Day28/Solution.py
|
MarceloKabbalah/30-Days-Of-Code
|
094de037347b00105c5385add9de7bf605277e16
|
[
"MIT"
] | null | null | null |
Day28/Solution.py
|
MarceloKabbalah/30-Days-Of-Code
|
094de037347b00105c5385add9de7bf605277e16
|
[
"MIT"
] | null | null | null |
Day28/Solution.py
|
MarceloKabbalah/30-Days-Of-Code
|
094de037347b00105c5385add9de7bf605277e16
|
[
"MIT"
] | null | null | null |
#!/bin/python
# compatible with python3
import sys
import re
N = int(input().strip())
names = []
for a0 in range(N):
firstName,emailID = input().strip().split(' ')
firstName,emailID = [str(firstName),str(emailID)]
match = re.search(r'[\w\.-]+@gmail.com', emailID)
if match:
names.append(firstName)
names.sort()
for name in names:
print( name )
| 19.947368
| 53
| 0.62533
|
import sys
import re
N = int(input().strip())
names = []
for a0 in range(N):
firstName,emailID = input().strip().split(' ')
firstName,emailID = [str(firstName),str(emailID)]
match = re.search(r'[\w\.-]+@gmail.com', emailID)
if match:
names.append(firstName)
names.sort()
for name in names:
print( name )
| true
| true
|
1c486728773d0fecc3487bf43c095f48ffa913bc
| 1,645
|
py
|
Python
|
detailsScrape/oilymoistd/oilymoistd17.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | null | null | null |
detailsScrape/oilymoistd/oilymoistd17.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | 2
|
2021-02-03T01:55:13.000Z
|
2021-04-30T12:46:33.000Z
|
detailsScrape/oilymoistd/oilymoistd17.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | null | null | null |
import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO oilymoistd (about, rate, top, comment, dari) VALUES (%s, %s, %s, %s, %s)"""
def crawl_url(pageUrl, moistoilyd_arr):
url = 'https://www.skinstore.com/high-expectations-cannabis-facial-oil-32-cannabis-sativa-seed-oil-1-oz-30ml/12289881.html'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
#######################################################for product 1############################################################################
moist = soup.find_all('div', class_='primary-wrap column-row')
try:
for moistd in moist :
about = moistd.find("div",{"class":"productDescription_synopsisContent"}).get_text().strip()
rate = moistd.find("span",{"class":"visually-hidden productReviews_aggregateRating_hiddenLabel"}).get_text().strip()
top = moistd.find("h2",{"class":"productReviews_topReviewsTitle"}).get_text().strip()
comment = moistd.find("p",{"class":"productReviews_topReviewsExcerpt"}).get_text().strip()
dari = moistd.find("div",{"class":"productReviews_footerDateAndName"}).get_text().strip()
moistoilyd_arr.append((about, rate, top, comment, dari))
finally:
return moistoilyd_arr
moistoilyd_arr = crawl_url("", [])
print(len(moistoilyd_arr))
cursor.executemany(sql, moistoilyd_arr)
conn.commit()
cursor.close()
conn.close()
| 36.555556
| 148
| 0.630395
|
import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO oilymoistd (about, rate, top, comment, dari) VALUES (%s, %s, %s, %s, %s)"""
def crawl_url(pageUrl, moistoilyd_arr):
url = 'https://www.skinstore.com/high-expectations-cannabis-facial-oil-32-cannabis-sativa-seed-oil-1-oz-30ml/12289881.html'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
| true
| true
|
1c486733754ce24861ac7025b7b44eb64a9b0479
| 742
|
py
|
Python
|
api/endpoints/fruit/get.py
|
DarkbordermanTemplate/fastapi-redis-sqlalchemy
|
80fbdc419b19592b08bc2227c9d7c2925b7b91e2
|
[
"BSD-2-Clause"
] | 5
|
2021-02-08T06:37:48.000Z
|
2021-09-12T14:55:34.000Z
|
api/endpoints/fruit/get.py
|
DarkbordermanTemplate/fastapi-redis-sqlalchemy
|
80fbdc419b19592b08bc2227c9d7c2925b7b91e2
|
[
"BSD-2-Clause"
] | null | null | null |
api/endpoints/fruit/get.py
|
DarkbordermanTemplate/fastapi-redis-sqlalchemy
|
80fbdc419b19592b08bc2227c9d7c2925b7b91e2
|
[
"BSD-2-Clause"
] | null | null | null |
from cache import REDIS
from common.enums import EnumResponse
from fastapi.responses import JSONResponse
from loguru import logger
DOC = {
200: {
"description": "API response successfully",
"content": {"application/json": {"example": {"name": "apple"}}},
},
400: EnumResponse.BAD_REQUEST.value.doc,
500: EnumResponse.INTERNAL_SERVER_ERROR.value.doc,
}
def get(name: str):
try:
if REDIS.get(name) is None:
return EnumResponse.BAD_REQUEST.value.response
return JSONResponse({"name": name, "count": int(REDIS.get(name).decode())}, 200) # type: ignore
except Exception as error:
logger.warning(error)
return EnumResponse.INTERNAL_SERVER_ERROR.value.response
| 30.916667
| 104
| 0.679245
|
from cache import REDIS
from common.enums import EnumResponse
from fastapi.responses import JSONResponse
from loguru import logger
DOC = {
200: {
"description": "API response successfully",
"content": {"application/json": {"example": {"name": "apple"}}},
},
400: EnumResponse.BAD_REQUEST.value.doc,
500: EnumResponse.INTERNAL_SERVER_ERROR.value.doc,
}
def get(name: str):
try:
if REDIS.get(name) is None:
return EnumResponse.BAD_REQUEST.value.response
return JSONResponse({"name": name, "count": int(REDIS.get(name).decode())}, 200)
except Exception as error:
logger.warning(error)
return EnumResponse.INTERNAL_SERVER_ERROR.value.response
| true
| true
|
1c48680130184ca429dd07e0772847c963db0ed3
| 368
|
py
|
Python
|
LVM-Tool/function.py
|
Shashwatsingh22/Linux-Automated-Tools
|
2e9c0f064ac70571a1a59e30f69e24d8ae05616a
|
[
"MIT"
] | null | null | null |
LVM-Tool/function.py
|
Shashwatsingh22/Linux-Automated-Tools
|
2e9c0f064ac70571a1a59e30f69e24d8ae05616a
|
[
"MIT"
] | null | null | null |
LVM-Tool/function.py
|
Shashwatsingh22/Linux-Automated-Tools
|
2e9c0f064ac70571a1a59e30f69e24d8ae05616a
|
[
"MIT"
] | null | null | null |
from pyfiglet import Figlet
def render(text,style,num):
f=Figlet(font=style)
print('\n')
print(f.renderText(text))
def sh_menu():
print("""\t\t\t
Press 1: Create.
Press 2: Complete Detail.
Press 3: Specific.
Press 4: Exit.
\n
Enter the Choice: """,end=" ")
| 24.533333
| 44
| 0.486413
|
from pyfiglet import Figlet
def render(text,style,num):
f=Figlet(font=style)
print('\n')
print(f.renderText(text))
def sh_menu():
print("""\t\t\t
Press 1: Create.
Press 2: Complete Detail.
Press 3: Specific.
Press 4: Exit.
\n
Enter the Choice: """,end=" ")
| true
| true
|
1c4868035b91b83cc30e54e14d221dc6f5c6ac0e
| 2,474
|
py
|
Python
|
src/appier/test/exceptions.py
|
veryprofessionaldodo/appier
|
1a0c146753428a3d1a8c484467766ee871047757
|
[
"Apache-2.0"
] | null | null | null |
src/appier/test/exceptions.py
|
veryprofessionaldodo/appier
|
1a0c146753428a3d1a8c484467766ee871047757
|
[
"Apache-2.0"
] | null | null | null |
src/appier/test/exceptions.py
|
veryprofessionaldodo/appier
|
1a0c146753428a3d1a8c484467766ee871047757
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import unittest
import appier
class ExceptionsTest(unittest.TestCase):
def test_encoding(self):
exception = appier.AppierException(message = "Olá Mundo")
self.assertEqual(str(exception), "Olá Mundo")
message_u = appier.legacy.u("Olá Mundo")
exception = appier.AppierException(message = message_u)
self.assertEqual(str(exception), "Olá Mundo")
self.assertEqual(appier.legacy.UNICODE(exception), appier.legacy.u("Olá Mundo"))
def test_validation(self):
errors = dict(name = ["is empty"])
error = appier.ValidationError(errors, object)
errors_s = error.errors_s()
self.assertEqual(errors_s, "name => is empty")
errors = dict(name = ["Olá Mundo"])
error = appier.ValidationError(errors, object)
errors_s = error.errors_s()
self.assertEqual(errors_s, appier.legacy.u("name => Olá Mundo"))
errors = dict(name = [appier.legacy.u("Olá Mundo")])
error = appier.ValidationError(errors, object)
errors_s = error.errors_s()
self.assertEqual(errors_s, appier.legacy.u("name => Olá Mundo"))
| 33.890411
| 89
| 0.67017
|
__author__ = "João Magalhães <joamag@hive.pt>"
__version__ = "1.0.0"
__revision__ = "$LastChangedRevision$"
__date__ = "$LastChangedDate$"
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
__license__ = "Apache License, Version 2.0"
import unittest
import appier
class ExceptionsTest(unittest.TestCase):
def test_encoding(self):
exception = appier.AppierException(message = "Olá Mundo")
self.assertEqual(str(exception), "Olá Mundo")
message_u = appier.legacy.u("Olá Mundo")
exception = appier.AppierException(message = message_u)
self.assertEqual(str(exception), "Olá Mundo")
self.assertEqual(appier.legacy.UNICODE(exception), appier.legacy.u("Olá Mundo"))
def test_validation(self):
errors = dict(name = ["is empty"])
error = appier.ValidationError(errors, object)
errors_s = error.errors_s()
self.assertEqual(errors_s, "name => is empty")
errors = dict(name = ["Olá Mundo"])
error = appier.ValidationError(errors, object)
errors_s = error.errors_s()
self.assertEqual(errors_s, appier.legacy.u("name => Olá Mundo"))
errors = dict(name = [appier.legacy.u("Olá Mundo")])
error = appier.ValidationError(errors, object)
errors_s = error.errors_s()
self.assertEqual(errors_s, appier.legacy.u("name => Olá Mundo"))
| true
| true
|
1c48683aa3013e98712b3e7bf3aafb554f2f1671
| 955
|
py
|
Python
|
cybox/objects/uri_object.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | null | null | null |
cybox/objects/uri_object.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | null | null | null |
cybox/objects/uri_object.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | 1
|
2019-04-16T18:37:32.000Z
|
2019-04-16T18:37:32.000Z
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.uri_object as uri_binding
from cybox.common import ObjectProperties, AnyURI
class URI(ObjectProperties):
_binding = uri_binding
_binding_class = uri_binding.URIObjectType
_namespace = 'http://cybox.mitre.org/objects#URIObject-2'
_XSI_NS = 'URIObj'
_XSI_TYPE = "URIObjectType"
TYPE_URL = "URL"
TYPE_GENERAL = "General URN"
TYPE_DOMAIN = "Domain Name"
TYPES = (TYPE_URL, TYPE_GENERAL, TYPE_DOMAIN)
value = cybox.TypedField("Value", AnyURI)
type_ = cybox.TypedField("type_", key_name="type")
def __init__(self, value=None, type_=None):
super(URI, self).__init__()
self.value = value
self.type_ = type_
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
return unicode(self.value)
| 26.527778
| 65
| 0.690052
|
import cybox
import cybox.bindings.uri_object as uri_binding
from cybox.common import ObjectProperties, AnyURI
class URI(ObjectProperties):
_binding = uri_binding
_binding_class = uri_binding.URIObjectType
_namespace = 'http://cybox.mitre.org/objects#URIObject-2'
_XSI_NS = 'URIObj'
_XSI_TYPE = "URIObjectType"
TYPE_URL = "URL"
TYPE_GENERAL = "General URN"
TYPE_DOMAIN = "Domain Name"
TYPES = (TYPE_URL, TYPE_GENERAL, TYPE_DOMAIN)
value = cybox.TypedField("Value", AnyURI)
type_ = cybox.TypedField("type_", key_name="type")
def __init__(self, value=None, type_=None):
super(URI, self).__init__()
self.value = value
self.type_ = type_
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
return unicode(self.value)
| true
| true
|
1c486ab1f4c57339efdceb4e5602b8c3f5c54e15
| 1,317
|
py
|
Python
|
FastAPI/app/main.py
|
bing9/raspberrypi_projects
|
5ca1b8101517f856af3f86a49518a89c1d8e29f9
|
[
"MIT"
] | null | null | null |
FastAPI/app/main.py
|
bing9/raspberrypi_projects
|
5ca1b8101517f856af3f86a49518a89c1d8e29f9
|
[
"MIT"
] | null | null | null |
FastAPI/app/main.py
|
bing9/raspberrypi_projects
|
5ca1b8101517f856af3f86a49518a89c1d8e29f9
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI, UploadFile
# from typing import Optional
# from pydantic import BaseModel
from subprocess import Popen #check_output
# from starlette.responses import
# from dotenv import load_dotenv
import os
app = FastAPI()
@app.get('/')
def index():
return 'My Personal Server'
# @app.get('/apple-touch-icon-120x120-precomposed.png')
# def image_png():
# with open('./FastAPI/app/apple-touch-icon-120x120-precomposed.png', 'r') as file:
# img = file.read()
# return UploadFile(filename="apple-touch-icon-120x120-precomposed.png", file=img, content_type="image/png")
@app.get('/start_kodi')
def start_kodi():
# load_dotenv()
# return check_output(['sshpass', '-p', os.environ['SSHPASS'], 'ssh', '-p', '1990', 'jetson@192.168.0.170', 'kd'])
try:
Popen(["startx", "kodi"])
return {'successfully launched kodi'}
except:
return {'failed to launch kodi'}
@app.get('/start_chrome')
def start_chrome():
# load_dotenv()
# return check_output(['sshpass', '-p', os.environ['SSHPASS'], 'ssh', '-p', '1990', 'jetson@192.168.0.170', 'kd'])
try:
Popen(["startx", "chromium-browser"])
return {'successfully launched chrome'}
except:
return {'failed to launch chrome'}
| 31.357143
| 119
| 0.631739
|
from fastapi import FastAPI, UploadFile
from subprocess import Popen
import os
app = FastAPI()
@app.get('/')
def index():
return 'My Personal Server'
@app.get('/start_kodi')
def start_kodi():
try:
Popen(["startx", "kodi"])
return {'successfully launched kodi'}
except:
return {'failed to launch kodi'}
@app.get('/start_chrome')
def start_chrome():
try:
Popen(["startx", "chromium-browser"])
return {'successfully launched chrome'}
except:
return {'failed to launch chrome'}
| true
| true
|
1c486bab8c2b70b4200b22d717bec1830d4b9e0e
| 1,297
|
py
|
Python
|
solum/objects/assembly.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
solum/objects/assembly.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
solum/objects/assembly.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 - Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solum.objects import base
class Assembly(base.CrudMixin):
# Version 1.0: Initial version
VERSION = '1.0'
class AssemblyList(list, base.CrudListMixin):
"""List of Assemblies."""
class States(object):
QUEUED = 'QUEUED'
UNIT_TESTING = 'UNIT_TESTING'
UNIT_TESTING_FAILED = 'UNIT_TESTING_FAILED'
UNIT_TESTING_PASSED = 'UNIT_TESTING_PASSED'
BUILDING = 'BUILDING'
BUILT = 'BUILT'
DEPLOYING = 'DEPLOYING'
ERROR = 'ERROR'
READY = 'READY'
DELETING = 'DELETING'
ERROR_STACK_DELETE_FAILED = 'ERROR_STACK_DELETE_FAILED'
ERROR_STACK_CREATE_FAILED = 'ERROR_STACK_CREATE_FAILED'
ERROR_CODE_DEPLOYMENT = 'ERROR_CODE_DEPLOYMENT'
STARTING_APP = 'STARTING_APP'
| 30.880952
| 75
| 0.733231
|
from solum.objects import base
class Assembly(base.CrudMixin):
VERSION = '1.0'
class AssemblyList(list, base.CrudListMixin):
class States(object):
QUEUED = 'QUEUED'
UNIT_TESTING = 'UNIT_TESTING'
UNIT_TESTING_FAILED = 'UNIT_TESTING_FAILED'
UNIT_TESTING_PASSED = 'UNIT_TESTING_PASSED'
BUILDING = 'BUILDING'
BUILT = 'BUILT'
DEPLOYING = 'DEPLOYING'
ERROR = 'ERROR'
READY = 'READY'
DELETING = 'DELETING'
ERROR_STACK_DELETE_FAILED = 'ERROR_STACK_DELETE_FAILED'
ERROR_STACK_CREATE_FAILED = 'ERROR_STACK_CREATE_FAILED'
ERROR_CODE_DEPLOYMENT = 'ERROR_CODE_DEPLOYMENT'
STARTING_APP = 'STARTING_APP'
| true
| true
|
1c486cbfbc9842418bee088aeb5300aed2824063
| 29,399
|
py
|
Python
|
core/domain/story_services.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 2
|
2021-04-08T01:06:08.000Z
|
2021-06-02T08:20:13.000Z
|
core/domain/story_services.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | null | null | null |
core/domain/story_services.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 1
|
2020-12-11T06:56:31.000Z
|
2020-12-11T06:56:31.000Z
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on stories.
All functions here should be agnostic of how StoryModel objects are
stored in the database. In particular, the various query methods should
delegate to the Story model class. This will enable the story
storage model to be changed without affecting this module and others above it.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from constants import constants
from core.domain import android_validation_constants
from core.domain import caching_services
from core.domain import exp_fetchers
from core.domain import opportunity_services
from core.domain import rights_manager
from core.domain import story_domain
from core.domain import story_fetchers
from core.domain import suggestion_services
from core.domain import topic_fetchers
from core.platform import models
import feconf
import utils
(exp_models, story_models, user_models,) = models.Registry.import_models(
[models.NAMES.exploration, models.NAMES.story, models.NAMES.user])
def get_new_story_id():
"""Returns a new story id.
Returns:
str. A new story id.
"""
return story_models.StoryModel.get_new_id('')
def _create_story(committer_id, story, commit_message, commit_cmds):
"""Creates a new story.
Args:
committer_id: str. ID of the committer.
story: Story. The story domain object.
commit_message: str. A description of changes made to the story.
commit_cmds: list(StoryChange). A list of change commands made to the
given story.
"""
story.validate()
model = story_models.StoryModel(
id=story.id,
description=story.description,
title=story.title,
thumbnail_bg_color=story.thumbnail_bg_color,
thumbnail_filename=story.thumbnail_filename,
language_code=story.language_code,
story_contents_schema_version=story.story_contents_schema_version,
notes=story.notes,
story_contents=story.story_contents.to_dict(),
corresponding_topic_id=story.corresponding_topic_id,
url_fragment=story.url_fragment,
meta_tag_content=story.meta_tag_content
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
story.version += 1
create_story_summary(story.id)
def save_new_story(committer_id, story):
"""Saves a new story.
Args:
committer_id: str. ID of the committer.
story: Story. Story to be saved.
"""
commit_message = (
'New story created with title \'%s\'.' % story.title)
_create_story(
committer_id, story, commit_message, [story_domain.StoryChange({
'cmd': story_domain.CMD_CREATE_NEW,
'title': story.title
})])
# Repository SAVE and DELETE methods.
def apply_change_list(story_id, change_list):
"""Applies a changelist to a story and returns the result.
Args:
story_id: str. ID of the given story.
change_list: list(StoryChange). A change list to be applied to the given
story.
Returns:
Story, list(str), list(str). The resulting story domain object, the
exploration IDs removed from story and the exploration IDs added to
the story.
"""
story = story_fetchers.get_story_by_id(story_id)
exp_ids_in_old_story = story.story_contents.get_all_linked_exp_ids()
try:
for change in change_list:
if not isinstance(change, story_domain.StoryChange):
raise Exception('Expected change to be of type StoryChange')
if change.cmd == story_domain.CMD_ADD_STORY_NODE:
story.add_node(change.node_id, change.title)
elif change.cmd == story_domain.CMD_DELETE_STORY_NODE:
story.delete_node(change.node_id)
elif (change.cmd ==
story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS):
if change.new_value:
story.mark_node_outline_as_finalized(change.node_id)
else:
story.mark_node_outline_as_unfinalized(change.node_id)
elif change.cmd == story_domain.CMD_UPDATE_STORY_NODE_PROPERTY:
if (change.property_name ==
story_domain.STORY_NODE_PROPERTY_OUTLINE):
story.update_node_outline(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_TITLE):
story.update_node_title(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESCRIPTION):
story.update_node_description(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME):
story.update_node_thumbnail_filename(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_node_thumbnail_bg_color(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS):
story.update_node_acquired_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS):
story.update_node_prerequisite_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS):
story.update_node_destination_node_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID):
story.update_node_exploration_id(
change.node_id, change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_PROPERTY:
if (change.property_name ==
story_domain.STORY_PROPERTY_TITLE):
story.update_title(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_FILENAME):
story.update_thumbnail_filename(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_thumbnail_bg_color(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_DESCRIPTION):
story.update_description(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_NOTES):
story.update_notes(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_LANGUAGE_CODE):
story.update_language_code(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_URL_FRAGMENT):
story.update_url_fragment(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_META_TAG_CONTENT):
story.update_meta_tag_content(change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY:
if (change.property_name ==
story_domain.INITIAL_NODE_ID):
story.update_initial_node(change.new_value)
if change.property_name == story_domain.NODE:
story.rearrange_node_in_story(
change.old_value, change.new_value)
elif (
change.cmd ==
story_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the story model from the datastore into a
# Story domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# story is sufficient to apply the schema migration.
continue
exp_ids_in_modified_story = (
story.story_contents.get_all_linked_exp_ids())
exp_ids_removed_from_story = list(
set(exp_ids_in_old_story).difference(exp_ids_in_modified_story))
exp_ids_added_to_story = list(
set(exp_ids_in_modified_story).difference(exp_ids_in_old_story))
return story, exp_ids_removed_from_story, exp_ids_added_to_story
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, story_id, change_list)
)
raise
def does_story_exist_with_url_fragment(url_fragment):
"""Checks if the url fragment for the story exists.
Args:
url_fragment: str. The url_fragment of the story.
Returns:
bool. Whether the the url fragment for the story exists or not.
"""
story = story_fetchers.get_story_by_url_fragment(url_fragment)
return story is not None
def validate_explorations_for_story(exp_ids, raise_error):
"""Validates the explorations in the given story and checks whether they
are compatible with the mobile app and ready for publishing.
Args:
exp_ids: list(str). The exp IDs to validate.
raise_error: bool. Whether to raise an Exception when a validation error
is encountered. If not, a list of the error messages are
returned. raise_error should be True when this is called before
saving the story and False when this function is called from the
frontend.
Returns:
list(str). The various validation error messages (if raise_error is
False).
Raises:
ValidationError. Expected story to only reference valid explorations.
ValidationError. Exploration with ID is not public. Please publish
explorations before adding them to a story.
ValidationError. All explorations in a story should be of the same
category.
ValidationError. Invalid language found for exploration.
ValidationError. Expected no exploration to have parameter values in it.
ValidationError. Invalid interaction in exploration.
ValidationError. RTE content in state of exploration with ID is not
supported on mobile.
"""
validation_error_messages = []
# Strict = False, since the existence of explorations is checked below.
exps_dict = (
exp_fetchers.get_multiple_explorations_by_id(exp_ids, strict=False))
exp_rights = (
rights_manager.get_multiple_exploration_rights_by_ids(exp_ids))
exp_rights_dict = {}
for rights in exp_rights:
if rights is not None:
exp_rights_dict[rights.id] = rights.status
for exp_id in exp_ids:
if exp_id not in exps_dict:
error_string = (
'Expected story to only reference valid explorations, but found'
' a reference to an invalid exploration with ID: %s'
% exp_id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
else:
if exp_rights_dict[exp_id] != constants.ACTIVITY_STATUS_PUBLIC:
error_string = (
'Exploration with ID %s is not public. Please publish '
'explorations before adding them to a story.'
% exp_id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if exps_dict:
for exp_id in exp_ids:
if exp_id in exps_dict:
sample_exp_id = exp_id
break
common_exp_category = exps_dict[sample_exp_id].category
for exp_id in exps_dict:
exp = exps_dict[exp_id]
if exp.category != common_exp_category:
error_string = (
'All explorations in a story should be of the '
'same category. The explorations with ID %s and %s have'
' different categories.' % (sample_exp_id, exp_id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if (
exp.language_code not in
android_validation_constants.SUPPORTED_LANGUAGES):
error_string = (
'Invalid language %s found for exploration '
'with ID %s.' % (exp.language_code, exp_id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if exp.param_specs or exp.param_changes:
error_string = (
'Expected no exploration to have parameter '
'values in it. Invalid exploration: %s' % exp.id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if not exp.correctness_feedback_enabled:
error_string = (
'Expected all explorations to have correctness feedback '
'enabled. Invalid exploration: %s' % exp.id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
for state_name in exp.states:
state = exp.states[state_name]
if not state.interaction.is_supported_on_android_app():
error_string = (
'Invalid interaction %s in exploration '
'with ID: %s.' % (state.interaction.id, exp.id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if not state.is_rte_content_supported_on_android():
error_string = (
'RTE content in state %s of exploration '
'with ID %s is not supported on mobile.'
% (state_name, exp.id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if state.interaction.id == 'EndExploration':
recommended_exploration_ids = (
state.interaction.customization_args[
'recommendedExplorationIds'].value)
if len(recommended_exploration_ids) != 0:
error_string = (
'Exploration with ID: %s contains exploration '
'recommendations in its EndExploration interaction.'
% (exp.id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
return validation_error_messages
def _save_story(
committer_id, story, commit_message, change_list, story_is_published):
"""Validates a story and commits it to persistent storage. If
successful, increments the version number of the incoming story domain
object by 1.
Args:
committer_id: str. ID of the given committer.
story: Story. The story domain object to be saved.
commit_message: str. The commit message.
change_list: list(StoryChange). List of changes applied to a story.
story_is_published: bool. Whether the supplied story is published.
Raises:
ValidationError. An invalid exploration was referenced in the
story.
Exception. The story model and the incoming story domain
object have different version numbers.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save story %s: %s' % (story.id, change_list))
story.validate()
if story_is_published:
exp_ids = []
for node in story.story_contents.nodes:
if not node.exploration_id:
raise Exception(
'Story node with id %s does not contain an '
'exploration id.' % node.id)
exp_ids.append(node.exploration_id)
validate_explorations_for_story(exp_ids, True)
# Story model cannot be None as story is passed as parameter here and that
# is only possible if a story model with that story id exists. Also this is
# a private function and so it cannot be called independently with any
# story object.
story_model = story_models.StoryModel.get(story.id)
if story.version > story_model.version:
raise Exception(
'Unexpected error: trying to update version %s of story '
'from version %s. Please reload the page and try again.'
% (story_model.version, story.version))
elif story.version < story_model.version:
raise Exception(
'Trying to update version %s of story from version %s, '
'which is too old. Please reload the page and try again.'
% (story_model.version, story.version))
story_model.description = story.description
story_model.title = story.title
story_model.thumbnail_bg_color = story.thumbnail_bg_color
story_model.thumbnail_filename = story.thumbnail_filename
story_model.notes = story.notes
story_model.language_code = story.language_code
story_model.story_contents_schema_version = (
story.story_contents_schema_version)
story_model.story_contents = story.story_contents.to_dict()
story_model.corresponding_topic_id = story.corresponding_topic_id
story_model.version = story.version
story_model.url_fragment = story.url_fragment
story_model.meta_tag_content = story.meta_tag_content
change_dicts = [change.to_dict() for change in change_list]
story_model.commit(committer_id, commit_message, change_dicts)
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_STORY, None, [story.id])
story.version += 1
def _is_story_published_and_present_in_topic(story):
"""Returns whether a story is published. Raises an exception if the story
is not present in the corresponding topic's story references.
Args:
story: Story. The story domain object.
Returns:
bool. Whether the supplied story is published.
"""
topic = topic_fetchers.get_topic_by_id(
story.corresponding_topic_id, strict=False)
if topic is None:
raise utils.ValidationError(
'Expected story to only belong to a valid topic, but found no '
'topic with ID: %s' % story.corresponding_topic_id)
story_is_published = False
story_is_present_in_topic = False
for story_reference in topic.get_all_story_references():
if story_reference.story_id == story.id:
story_is_present_in_topic = True
story_is_published = story_reference.story_is_published
if not story_is_present_in_topic:
raise Exception(
'Expected story to belong to the topic %s, but it is '
'neither a part of the canonical stories or the additional '
'stories of the topic.' % story.corresponding_topic_id)
return story_is_published
def update_story(
committer_id, story_id, change_list, commit_message):
"""Updates a story. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
story_id: str. The story id.
change_list: list(StoryChange). These changes are applied in sequence to
produce the resulting story.
commit_message: str or None. A description of changes made to the
story.
Raises:
ValidationError. Exploration is already linked to a different story.
"""
if not commit_message:
raise ValueError('Expected a commit message but received none.')
old_story = story_fetchers.get_story_by_id(story_id)
new_story, exp_ids_removed_from_story, exp_ids_added_to_story = (
apply_change_list(story_id, change_list))
story_is_published = _is_story_published_and_present_in_topic(new_story)
if (
old_story.url_fragment != new_story.url_fragment and
does_story_exist_with_url_fragment(new_story.url_fragment)):
raise utils.ValidationError(
'Story Url Fragment is not unique across the site.')
_save_story(
committer_id, new_story, commit_message, change_list,
story_is_published)
create_story_summary(new_story.id)
if story_is_published and _is_topic_published(new_story):
opportunity_services.update_exploration_opportunities(
old_story, new_story)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids_removed_from_story)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_removed_from_story))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
exploration_context_models_collisions_list = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_added_to_story))
for context_model in exploration_context_models_collisions_list:
if context_model is not None and context_model.story_id != story_id:
raise utils.ValidationError(
'The exploration with ID %s is already linked to story '
'with ID %s' % (context_model.id, context_model.story_id))
new_exploration_context_models = [exp_models.ExplorationContextModel(
id=exp_id,
story_id=story_id
) for exp_id in exp_ids_added_to_story]
exp_models.ExplorationContextModel.update_timestamps_multi(
new_exploration_context_models)
exp_models.ExplorationContextModel.put_multi(new_exploration_context_models)
def _is_topic_published(story):
"""Returns whether the story's corresponding topic is published.
Args:
story: Story. The story domain object.
Returns:
bool. Whether the the story's corresponding topic is published.
"""
topic_rights = topic_fetchers.get_topic_rights(story.corresponding_topic_id)
return topic_rights.topic_is_published
def delete_story(committer_id, story_id, force_deletion=False):
"""Deletes the story with the given story_id.
Args:
committer_id: str. ID of the committer.
story_id: str. ID of the story to be deleted.
force_deletion: bool. If true, the story and its history are fully
deleted and are unrecoverable. Otherwise, the story and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
"""
story_model = story_models.StoryModel.get(story_id)
story = story_fetchers.get_story_from_model(story_model)
exp_ids = story.story_contents.get_all_linked_exp_ids()
story_model.delete(
committer_id, feconf.COMMIT_MESSAGE_STORY_DELETED,
force_deletion=force_deletion)
exp_ids_to_be_removed = []
for node in story.story_contents.nodes:
exp_ids_to_be_removed.append(node.exploration_id)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_to_be_removed))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
# This must come after the story is retrieved. Otherwise the memcache
# key will be reinstated.
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_STORY, None, [story_id])
# Delete the summary of the story (regardless of whether
# force_deletion is True or not).
delete_story_summary(story_id)
# Delete the opportunities available and reject the suggestions related to
# the exploration used in the story.
opportunity_services.delete_exploration_opportunities(exp_ids)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids)
def delete_story_summary(story_id):
"""Delete a story summary model.
Args:
story_id: str. ID of the story whose story summary is to
be deleted.
"""
story_models.StorySummaryModel.get(story_id).delete()
def compute_summary_of_story(story):
"""Create a StorySummary domain object for a given Story domain
object and return it.
Args:
story: Story. The story object, for which the summary is to be computed.
Returns:
StorySummary. The computed summary for the given story.
"""
story_model_node_titles = [
node.title for node in story.story_contents.nodes]
story_summary = story_domain.StorySummary(
story.id, story.title, story.description, story.language_code,
story.version, story_model_node_titles, story.thumbnail_bg_color,
story.thumbnail_filename, story.url_fragment, story.created_on,
story.last_updated
)
return story_summary
def create_story_summary(story_id):
"""Creates and stores a summary of the given story.
Args:
story_id: str. ID of the story.
"""
story = story_fetchers.get_story_by_id(story_id)
story_summary = compute_summary_of_story(story)
save_story_summary(story_summary)
def save_story_summary(story_summary):
"""Save a story summary domain object as a StorySummaryModel
entity in the datastore.
Args:
story_summary: StorySummary. The story summary object to be saved in the
datastore.
"""
story_summary_dict = {
'title': story_summary.title,
'description': story_summary.description,
'language_code': story_summary.language_code,
'version': story_summary.version,
'node_titles': story_summary.node_titles,
'thumbnail_bg_color': story_summary.thumbnail_bg_color,
'thumbnail_filename': story_summary.thumbnail_filename,
'url_fragment': story_summary.url_fragment,
'story_model_last_updated': (
story_summary.story_model_last_updated),
'story_model_created_on': (
story_summary.story_model_created_on)
}
story_summary_model = (
story_models.StorySummaryModel.get_by_id(story_summary.id))
if story_summary_model is not None:
story_summary_model.populate(**story_summary_dict)
story_summary_model.update_timestamps()
story_summary_model.put()
else:
story_summary_dict['id'] = story_summary.id
model = story_models.StorySummaryModel(**story_summary_dict)
model.update_timestamps()
model.put()
def record_completed_node_in_story_context(user_id, story_id, node_id):
"""Records a node by a given user in a given story
context as having been played.
Args:
user_id: str. ID of the given user.
story_id: str. ID of the given story.
node_id: str. ID of the given node.
"""
progress_model = user_models.StoryProgressModel.get_or_create(
user_id, story_id)
if node_id not in progress_model.completed_node_ids:
progress_model.completed_node_ids.append(node_id)
progress_model.update_timestamps()
progress_model.put()
| 41.759943
| 80
| 0.661689
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.domain import android_validation_constants
from core.domain import caching_services
from core.domain import exp_fetchers
from core.domain import opportunity_services
from core.domain import rights_manager
from core.domain import story_domain
from core.domain import story_fetchers
from core.domain import suggestion_services
from core.domain import topic_fetchers
from core.platform import models
import feconf
import utils
(exp_models, story_models, user_models,) = models.Registry.import_models(
[models.NAMES.exploration, models.NAMES.story, models.NAMES.user])
def get_new_story_id():
return story_models.StoryModel.get_new_id('')
def _create_story(committer_id, story, commit_message, commit_cmds):
story.validate()
model = story_models.StoryModel(
id=story.id,
description=story.description,
title=story.title,
thumbnail_bg_color=story.thumbnail_bg_color,
thumbnail_filename=story.thumbnail_filename,
language_code=story.language_code,
story_contents_schema_version=story.story_contents_schema_version,
notes=story.notes,
story_contents=story.story_contents.to_dict(),
corresponding_topic_id=story.corresponding_topic_id,
url_fragment=story.url_fragment,
meta_tag_content=story.meta_tag_content
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
story.version += 1
create_story_summary(story.id)
def save_new_story(committer_id, story):
commit_message = (
'New story created with title \'%s\'.' % story.title)
_create_story(
committer_id, story, commit_message, [story_domain.StoryChange({
'cmd': story_domain.CMD_CREATE_NEW,
'title': story.title
})])
def apply_change_list(story_id, change_list):
story = story_fetchers.get_story_by_id(story_id)
exp_ids_in_old_story = story.story_contents.get_all_linked_exp_ids()
try:
for change in change_list:
if not isinstance(change, story_domain.StoryChange):
raise Exception('Expected change to be of type StoryChange')
if change.cmd == story_domain.CMD_ADD_STORY_NODE:
story.add_node(change.node_id, change.title)
elif change.cmd == story_domain.CMD_DELETE_STORY_NODE:
story.delete_node(change.node_id)
elif (change.cmd ==
story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS):
if change.new_value:
story.mark_node_outline_as_finalized(change.node_id)
else:
story.mark_node_outline_as_unfinalized(change.node_id)
elif change.cmd == story_domain.CMD_UPDATE_STORY_NODE_PROPERTY:
if (change.property_name ==
story_domain.STORY_NODE_PROPERTY_OUTLINE):
story.update_node_outline(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_TITLE):
story.update_node_title(change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESCRIPTION):
story.update_node_description(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME):
story.update_node_thumbnail_filename(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_node_thumbnail_bg_color(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS):
story.update_node_acquired_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS):
story.update_node_prerequisite_skill_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS):
story.update_node_destination_node_ids(
change.node_id, change.new_value)
elif (change.property_name ==
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID):
story.update_node_exploration_id(
change.node_id, change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_PROPERTY:
if (change.property_name ==
story_domain.STORY_PROPERTY_TITLE):
story.update_title(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_FILENAME):
story.update_thumbnail_filename(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_THUMBNAIL_BG_COLOR):
story.update_thumbnail_bg_color(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_DESCRIPTION):
story.update_description(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_NOTES):
story.update_notes(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_LANGUAGE_CODE):
story.update_language_code(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_URL_FRAGMENT):
story.update_url_fragment(change.new_value)
elif (change.property_name ==
story_domain.STORY_PROPERTY_META_TAG_CONTENT):
story.update_meta_tag_content(change.new_value)
elif change.cmd == story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY:
if (change.property_name ==
story_domain.INITIAL_NODE_ID):
story.update_initial_node(change.new_value)
if change.property_name == story_domain.NODE:
story.rearrange_node_in_story(
change.old_value, change.new_value)
elif (
change.cmd ==
story_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
continue
exp_ids_in_modified_story = (
story.story_contents.get_all_linked_exp_ids())
exp_ids_removed_from_story = list(
set(exp_ids_in_old_story).difference(exp_ids_in_modified_story))
exp_ids_added_to_story = list(
set(exp_ids_in_modified_story).difference(exp_ids_in_old_story))
return story, exp_ids_removed_from_story, exp_ids_added_to_story
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, story_id, change_list)
)
raise
def does_story_exist_with_url_fragment(url_fragment):
story = story_fetchers.get_story_by_url_fragment(url_fragment)
return story is not None
def validate_explorations_for_story(exp_ids, raise_error):
validation_error_messages = []
exps_dict = (
exp_fetchers.get_multiple_explorations_by_id(exp_ids, strict=False))
exp_rights = (
rights_manager.get_multiple_exploration_rights_by_ids(exp_ids))
exp_rights_dict = {}
for rights in exp_rights:
if rights is not None:
exp_rights_dict[rights.id] = rights.status
for exp_id in exp_ids:
if exp_id not in exps_dict:
error_string = (
'Expected story to only reference valid explorations, but found'
' a reference to an invalid exploration with ID: %s'
% exp_id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
else:
if exp_rights_dict[exp_id] != constants.ACTIVITY_STATUS_PUBLIC:
error_string = (
'Exploration with ID %s is not public. Please publish '
'explorations before adding them to a story.'
% exp_id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if exps_dict:
for exp_id in exp_ids:
if exp_id in exps_dict:
sample_exp_id = exp_id
break
common_exp_category = exps_dict[sample_exp_id].category
for exp_id in exps_dict:
exp = exps_dict[exp_id]
if exp.category != common_exp_category:
error_string = (
'All explorations in a story should be of the '
'same category. The explorations with ID %s and %s have'
' different categories.' % (sample_exp_id, exp_id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if (
exp.language_code not in
android_validation_constants.SUPPORTED_LANGUAGES):
error_string = (
'Invalid language %s found for exploration '
'with ID %s.' % (exp.language_code, exp_id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if exp.param_specs or exp.param_changes:
error_string = (
'Expected no exploration to have parameter '
'values in it. Invalid exploration: %s' % exp.id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if not exp.correctness_feedback_enabled:
error_string = (
'Expected all explorations to have correctness feedback '
'enabled. Invalid exploration: %s' % exp.id)
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
for state_name in exp.states:
state = exp.states[state_name]
if not state.interaction.is_supported_on_android_app():
error_string = (
'Invalid interaction %s in exploration '
'with ID: %s.' % (state.interaction.id, exp.id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if not state.is_rte_content_supported_on_android():
error_string = (
'RTE content in state %s of exploration '
'with ID %s is not supported on mobile.'
% (state_name, exp.id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
if state.interaction.id == 'EndExploration':
recommended_exploration_ids = (
state.interaction.customization_args[
'recommendedExplorationIds'].value)
if len(recommended_exploration_ids) != 0:
error_string = (
'Exploration with ID: %s contains exploration '
'recommendations in its EndExploration interaction.'
% (exp.id))
if raise_error:
raise utils.ValidationError(error_string)
validation_error_messages.append(error_string)
return validation_error_messages
def _save_story(
committer_id, story, commit_message, change_list, story_is_published):
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save story %s: %s' % (story.id, change_list))
story.validate()
if story_is_published:
exp_ids = []
for node in story.story_contents.nodes:
if not node.exploration_id:
raise Exception(
'Story node with id %s does not contain an '
'exploration id.' % node.id)
exp_ids.append(node.exploration_id)
validate_explorations_for_story(exp_ids, True)
story_model = story_models.StoryModel.get(story.id)
if story.version > story_model.version:
raise Exception(
'Unexpected error: trying to update version %s of story '
'from version %s. Please reload the page and try again.'
% (story_model.version, story.version))
elif story.version < story_model.version:
raise Exception(
'Trying to update version %s of story from version %s, '
'which is too old. Please reload the page and try again.'
% (story_model.version, story.version))
story_model.description = story.description
story_model.title = story.title
story_model.thumbnail_bg_color = story.thumbnail_bg_color
story_model.thumbnail_filename = story.thumbnail_filename
story_model.notes = story.notes
story_model.language_code = story.language_code
story_model.story_contents_schema_version = (
story.story_contents_schema_version)
story_model.story_contents = story.story_contents.to_dict()
story_model.corresponding_topic_id = story.corresponding_topic_id
story_model.version = story.version
story_model.url_fragment = story.url_fragment
story_model.meta_tag_content = story.meta_tag_content
change_dicts = [change.to_dict() for change in change_list]
story_model.commit(committer_id, commit_message, change_dicts)
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_STORY, None, [story.id])
story.version += 1
def _is_story_published_and_present_in_topic(story):
topic = topic_fetchers.get_topic_by_id(
story.corresponding_topic_id, strict=False)
if topic is None:
raise utils.ValidationError(
'Expected story to only belong to a valid topic, but found no '
'topic with ID: %s' % story.corresponding_topic_id)
story_is_published = False
story_is_present_in_topic = False
for story_reference in topic.get_all_story_references():
if story_reference.story_id == story.id:
story_is_present_in_topic = True
story_is_published = story_reference.story_is_published
if not story_is_present_in_topic:
raise Exception(
'Expected story to belong to the topic %s, but it is '
'neither a part of the canonical stories or the additional '
'stories of the topic.' % story.corresponding_topic_id)
return story_is_published
def update_story(
committer_id, story_id, change_list, commit_message):
if not commit_message:
raise ValueError('Expected a commit message but received none.')
old_story = story_fetchers.get_story_by_id(story_id)
new_story, exp_ids_removed_from_story, exp_ids_added_to_story = (
apply_change_list(story_id, change_list))
story_is_published = _is_story_published_and_present_in_topic(new_story)
if (
old_story.url_fragment != new_story.url_fragment and
does_story_exist_with_url_fragment(new_story.url_fragment)):
raise utils.ValidationError(
'Story Url Fragment is not unique across the site.')
_save_story(
committer_id, new_story, commit_message, change_list,
story_is_published)
create_story_summary(new_story.id)
if story_is_published and _is_topic_published(new_story):
opportunity_services.update_exploration_opportunities(
old_story, new_story)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids_removed_from_story)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_removed_from_story))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
exploration_context_models_collisions_list = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_added_to_story))
for context_model in exploration_context_models_collisions_list:
if context_model is not None and context_model.story_id != story_id:
raise utils.ValidationError(
'The exploration with ID %s is already linked to story '
'with ID %s' % (context_model.id, context_model.story_id))
new_exploration_context_models = [exp_models.ExplorationContextModel(
id=exp_id,
story_id=story_id
) for exp_id in exp_ids_added_to_story]
exp_models.ExplorationContextModel.update_timestamps_multi(
new_exploration_context_models)
exp_models.ExplorationContextModel.put_multi(new_exploration_context_models)
def _is_topic_published(story):
topic_rights = topic_fetchers.get_topic_rights(story.corresponding_topic_id)
return topic_rights.topic_is_published
def delete_story(committer_id, story_id, force_deletion=False):
story_model = story_models.StoryModel.get(story_id)
story = story_fetchers.get_story_from_model(story_model)
exp_ids = story.story_contents.get_all_linked_exp_ids()
story_model.delete(
committer_id, feconf.COMMIT_MESSAGE_STORY_DELETED,
force_deletion=force_deletion)
exp_ids_to_be_removed = []
for node in story.story_contents.nodes:
exp_ids_to_be_removed.append(node.exploration_id)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_to_be_removed))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_STORY, None, [story_id])
delete_story_summary(story_id)
opportunity_services.delete_exploration_opportunities(exp_ids)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids)
def delete_story_summary(story_id):
story_models.StorySummaryModel.get(story_id).delete()
def compute_summary_of_story(story):
story_model_node_titles = [
node.title for node in story.story_contents.nodes]
story_summary = story_domain.StorySummary(
story.id, story.title, story.description, story.language_code,
story.version, story_model_node_titles, story.thumbnail_bg_color,
story.thumbnail_filename, story.url_fragment, story.created_on,
story.last_updated
)
return story_summary
def create_story_summary(story_id):
story = story_fetchers.get_story_by_id(story_id)
story_summary = compute_summary_of_story(story)
save_story_summary(story_summary)
def save_story_summary(story_summary):
story_summary_dict = {
'title': story_summary.title,
'description': story_summary.description,
'language_code': story_summary.language_code,
'version': story_summary.version,
'node_titles': story_summary.node_titles,
'thumbnail_bg_color': story_summary.thumbnail_bg_color,
'thumbnail_filename': story_summary.thumbnail_filename,
'url_fragment': story_summary.url_fragment,
'story_model_last_updated': (
story_summary.story_model_last_updated),
'story_model_created_on': (
story_summary.story_model_created_on)
}
story_summary_model = (
story_models.StorySummaryModel.get_by_id(story_summary.id))
if story_summary_model is not None:
story_summary_model.populate(**story_summary_dict)
story_summary_model.update_timestamps()
story_summary_model.put()
else:
story_summary_dict['id'] = story_summary.id
model = story_models.StorySummaryModel(**story_summary_dict)
model.update_timestamps()
model.put()
def record_completed_node_in_story_context(user_id, story_id, node_id):
progress_model = user_models.StoryProgressModel.get_or_create(
user_id, story_id)
if node_id not in progress_model.completed_node_ids:
progress_model.completed_node_ids.append(node_id)
progress_model.update_timestamps()
progress_model.put()
| true
| true
|
1c486d9dbfb62cdd738c2ff2e418532e0d2734b8
| 6,437
|
py
|
Python
|
data/p3BR/R2/benchmark/startQiskit_noisy239.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_noisy239.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_noisy239.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=45
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.cx(input_qubit[0],input_qubit[2]) # number=35
prog.x(input_qubit[2]) # number=36
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.cx(input_qubit[0],input_qubit[2]) # number=33
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy239.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 30.799043
| 140
| 0.634457
|
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
zero = np.binary_repr(0, n)
b = f(zero)
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
prog.x(input_qubit[n])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[1])
prog.rx(0.17592918860102857,input_qubit[2])
prog.rx(-0.3989822670059037,input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[1])
prog.h(input_qubit[1])
prog.y(input_qubit[1])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[1])
prog.h(input_qubit[1])
prog.z(input_qubit[2])
prog.z(input_qubit[1])
prog.x(input_qubit[1])
prog.y(input_qubit[2])
prog.x(input_qubit[2])
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
quantum_state = get_statevector(prog)
backend = Aer.get_backend(backend_str)
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy239.csv", "w")
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| true
| true
|
1c486da49bd95ed0382213bd70102161815cc3de
| 3,912
|
py
|
Python
|
google/cloud/pubsublite/v1/pubsublite-v1-py/google/cloud/pubsublite_v1/types/publisher.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/pubsublite/v1/pubsublite-v1-py/google/cloud/pubsublite_v1/types/publisher.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/pubsublite/v1/pubsublite-v1-py/google/cloud/pubsublite_v1/types/publisher.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.pubsublite_v1.types import common
__protobuf__ = proto.module(
package='google.cloud.pubsublite.v1',
manifest={
'InitialPublishRequest',
'InitialPublishResponse',
'MessagePublishRequest',
'MessagePublishResponse',
'PublishRequest',
'PublishResponse',
},
)
class InitialPublishRequest(proto.Message):
r"""The first request that must be sent on a newly-opened stream.
Attributes:
topic (str):
The topic to which messages will be written.
partition (int):
The partition within the topic to which messages will be
written. Partitions are zero indexed, so ``partition`` must
be in the range [0, topic.num_partitions).
"""
topic = proto.Field(
proto.STRING,
number=1,
)
partition = proto.Field(
proto.INT64,
number=2,
)
class InitialPublishResponse(proto.Message):
r"""Response to an InitialPublishRequest.
"""
class MessagePublishRequest(proto.Message):
r"""Request to publish messages to the topic.
Attributes:
messages (Sequence[google.cloud.pubsublite_v1.types.PubSubMessage]):
The messages to publish.
"""
messages = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=common.PubSubMessage,
)
class MessagePublishResponse(proto.Message):
r"""Response to a MessagePublishRequest.
Attributes:
start_cursor (google.cloud.pubsublite_v1.types.Cursor):
The cursor of the first published message in
the batch. The cursors for any remaining
messages in the batch are guaranteed to be
sequential.
"""
start_cursor = proto.Field(
proto.MESSAGE,
number=1,
message=common.Cursor,
)
class PublishRequest(proto.Message):
r"""Request sent from the client to the server on a stream.
Attributes:
initial_request (google.cloud.pubsublite_v1.types.InitialPublishRequest):
Initial request on the stream.
message_publish_request (google.cloud.pubsublite_v1.types.MessagePublishRequest):
Request to publish messages.
"""
initial_request = proto.Field(
proto.MESSAGE,
number=1,
oneof='request_type',
message='InitialPublishRequest',
)
message_publish_request = proto.Field(
proto.MESSAGE,
number=2,
oneof='request_type',
message='MessagePublishRequest',
)
class PublishResponse(proto.Message):
r"""Response to a PublishRequest.
Attributes:
initial_response (google.cloud.pubsublite_v1.types.InitialPublishResponse):
Initial response on the stream.
message_response (google.cloud.pubsublite_v1.types.MessagePublishResponse):
Response to publishing messages.
"""
initial_response = proto.Field(
proto.MESSAGE,
number=1,
oneof='response_type',
message='InitialPublishResponse',
)
message_response = proto.Field(
proto.MESSAGE,
number=2,
oneof='response_type',
message='MessagePublishResponse',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 27.356643
| 89
| 0.660532
|
import proto
from google.cloud.pubsublite_v1.types import common
__protobuf__ = proto.module(
package='google.cloud.pubsublite.v1',
manifest={
'InitialPublishRequest',
'InitialPublishResponse',
'MessagePublishRequest',
'MessagePublishResponse',
'PublishRequest',
'PublishResponse',
},
)
class InitialPublishRequest(proto.Message):
topic = proto.Field(
proto.STRING,
number=1,
)
partition = proto.Field(
proto.INT64,
number=2,
)
class InitialPublishResponse(proto.Message):
class MessagePublishRequest(proto.Message):
messages = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=common.PubSubMessage,
)
class MessagePublishResponse(proto.Message):
start_cursor = proto.Field(
proto.MESSAGE,
number=1,
message=common.Cursor,
)
class PublishRequest(proto.Message):
initial_request = proto.Field(
proto.MESSAGE,
number=1,
oneof='request_type',
message='InitialPublishRequest',
)
message_publish_request = proto.Field(
proto.MESSAGE,
number=2,
oneof='request_type',
message='MessagePublishRequest',
)
class PublishResponse(proto.Message):
initial_response = proto.Field(
proto.MESSAGE,
number=1,
oneof='response_type',
message='InitialPublishResponse',
)
message_response = proto.Field(
proto.MESSAGE,
number=2,
oneof='response_type',
message='MessagePublishResponse',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.