hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b5817ba438460ef7d6dca985889536c4db85761 | 977 | py | Python | run_spider.py | jen-soft/easy_web_crawler | 3c2c4ed612b4ef52b1306a948dd9adaa5ac80c4e | [
"MIT"
] | 1 | 2018-03-04T00:08:11.000Z | 2018-03-04T00:08:11.000Z | run_spider.py | jen-soft/easy_web_crawler | 3c2c4ed612b4ef52b1306a948dd9adaa5ac80c4e | [
"MIT"
] | 8 | 2021-02-08T20:19:31.000Z | 2022-03-11T23:17:07.000Z | run_spider.py | jen-soft/easy_web_crawler | 3c2c4ed612b4ef52b1306a948dd9adaa5ac80c4e | [
"MIT"
] | null | null | null | import os
from scrapy.crawler import CrawlerProcess
from scrapy.cmdline import execute, get_project_settings
from my_crawler.spiders.yts__am import YtsAmSpider
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def main():
result_file_path = os.path.join(BASE_DIR, 'data/debug_data.json')
if os.path.exists(result_file_path):
os.remove(result_file_path)
settings = get_project_settings()
settings.update({
'LOG_FILE': None, # default stdout
# 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'FEED_URI': result_file_path,
'FEED_FORMAT': 'json',
})
crawler = CrawlerProcess(settings)
spider = YtsAmSpider()
crawler.crawl(spider)
crawler.start()
crawler.stop()
spider.log('--------------------------------------------------------------')
spider.log('file saved at {file_path}'.format(file_path=result_file_path))
if __name__ == "__main__":
main()
| 28.735294 | 80 | 0.639713 |
8a77969ce682f1699557944f973430668f5f0898 | 11,535 | py | Python | parallel_misc/mnist_replica.py | Animadversio/FloodFillNetwork-Notes | c4d207e53db1c2befc79fbc0ef0451d6f877c868 | [
"Apache-2.0"
] | 2 | 2019-03-09T22:07:10.000Z | 2019-07-30T11:55:59.000Z | parallel_misc/mnist_replica.py | Animadversio/FloodFillNetwork-Notes | c4d207e53db1c2befc79fbc0ef0451d6f877c868 | [
"Apache-2.0"
] | 1 | 2019-07-30T12:07:47.000Z | 2019-07-30T12:07:47.000Z | parallel_misc/mnist_replica.py | Animadversio/FloodFillNetwork-Notes | c4d207e53db1c2befc79fbc0ef0451d6f877c868 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed MNIST training and validation, with model replicas.
A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on one parameter server (ps), while the ops
are executed on two worker nodes by default. The TF sessions also run on the
worker node.
Multiple invocations of this script can be done in parallel, with different
values for --task_index. There should be exactly one invocation with
--task_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.
The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
"Directory for storing mnist data")
flags.DEFINE_boolean("download_only", False,
"Only perform downloading of data; Do not proceed to "
"session preparation, model definition or training")
flags.DEFINE_integer("task_index", None,
"Worker task index, should be >= 0. task_index=0 is "
"the master worker task the performs the variable "
"initialization ")
flags.DEFINE_integer("num_gpus", 1, "Total number of gpus for each machine."
"If you don't use GPU, please set it to '0'")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update "
"is applied (For sync_replicas mode only; default: "
"num_workers)")
flags.DEFINE_integer("hidden_units", 100,
"Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 200,
"Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_boolean(
"sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
flags.DEFINE_boolean(
"existing_servers", False, "Whether servers already exists. If True, "
"will use the worker hosts via their GRPC URLs (one client process "
"per worker host). Otherwise, will create an in-process TensorFlow "
"server.")
flags.DEFINE_string("ps_hosts", "localhost:2222",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("worker_hosts", "localhost:2223,localhost:2224",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("job_name", None, "job name: worker or ps")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
def main(unused_argv):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
if FLAGS.job_name is None or FLAGS.job_name == "":
raise ValueError("Must specify an explicit `job_name`")
if FLAGS.task_index is None or FLAGS.task_index == "":
raise ValueError("Must specify an explicit `task_index`")
print("job name = %s" % FLAGS.job_name)
print("task index = %d" % FLAGS.task_index)
# Construct the cluster and start the server
ps_spec = FLAGS.ps_hosts.split(",")
worker_spec = FLAGS.worker_hosts.split(",")
# Get the number of workers.
num_workers = len(worker_spec)
cluster = tf.train.ClusterSpec({"ps": ps_spec, "worker": worker_spec})
if not FLAGS.existing_servers:
# Not using existing servers. Create an in-process server.
server = tf.train.Server(
cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
is_chief = (FLAGS.task_index == 0)
if FLAGS.num_gpus > 0:
# Avoid gpu allocation conflict: now allocate task_num -> #gpu
# for each worker in the corresponding machine
gpu = (FLAGS.task_index % FLAGS.num_gpus)
worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
elif FLAGS.num_gpus == 0:
# Just allocate the CPU to worker server
cpu = 0
worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
# The ps use CPU and workers use corresponding GPU
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal(
[FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.task_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy, global_step=global_step)
if FLAGS.sync_replicas:
local_init_op = opt.local_step_init_op
if is_chief:
local_init_op = opt.chief_init_op
ready_for_local_init_op = opt.ready_for_local_init_op
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
sync_init_op = opt.get_init_tokens_op()
init_op = tf.global_variables_initializer()
train_dir = tempfile.mkdtemp()
if FLAGS.sync_replicas:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
recovery_wait_secs=1,
global_step=global_step)
else:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps",
"/job:worker/task:%d" % FLAGS.task_index],
gpu_options=gpu_options)
sess_config.gpu_options.allow_growth = True
# The chief worker (task_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.task_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.task_index)
if FLAGS.existing_servers:
server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index]
print("Using existing server at: %s" % server_grpc_url)
sess = sv.prepare_or_wait_for_session(server_grpc_url, config=sess_config)
else:
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
print("Worker %d: Session initialization complete." % FLAGS.task_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op.
sess.run(sync_init_op)
sv.start_queue_runners(sess, [chief_queue_runner])
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
while True:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs, y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.task_index, local_step, step))
if step >= FLAGS.train_steps:
break
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
if __name__ == "__main__":
tf.app.run()
| 42.252747 | 99 | 0.641179 |
85f0008e600bee5a564459a66b9b0a316c267c5a | 8,357 | py | Python | PythonVirtEnv/Lib/site-packages/openpyxl/styles/stylesheet.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 6 | 2021-09-18T07:19:54.000Z | 2021-09-18T07:20:07.000Z | PythonVirtEnv/Lib/site-packages/openpyxl/styles/stylesheet.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 5 | 2021-08-06T09:41:32.000Z | 2021-08-17T08:37:47.000Z | PythonVirtEnv/Lib/site-packages/openpyxl/styles/stylesheet.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 3 | 2021-10-24T01:01:01.000Z | 2021-11-29T23:13:02.000Z | # Copyright (c) 2010-2021 openpyxl
from warnings import warn
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
)
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.constants import ARC_STYLE, SHEET_MAIN_NS
from openpyxl.xml.functions import fromstring
from .builtins import styles
from .colors import ColorList, COLOR_INDEX
from .differential import DifferentialStyle
from .table import TableStyleList
from .borders import Border
from .fills import Fill
from .fonts import Font
from .numbers import (
NumberFormatList,
BUILTIN_FORMATS,
BUILTIN_FORMATS_MAX_SIZE,
BUILTIN_FORMATS_REVERSE,
is_date_format,
is_timedelta_format,
builtin_format_code
)
from .named_styles import (
_NamedCellStyleList
)
from .cell_style import CellStyle, CellStyleList
class Stylesheet(Serialisable):
tagname = "styleSheet"
numFmts = Typed(expected_type=NumberFormatList)
fonts = NestedSequence(expected_type=Font, count=True)
fills = NestedSequence(expected_type=Fill, count=True)
borders = NestedSequence(expected_type=Border, count=True)
cellStyleXfs = Typed(expected_type=CellStyleList)
cellXfs = Typed(expected_type=CellStyleList)
cellStyles = Typed(expected_type=_NamedCellStyleList)
dxfs = NestedSequence(expected_type=DifferentialStyle, count=True)
tableStyles = Typed(expected_type=TableStyleList, allow_none=True)
colors = Typed(expected_type=ColorList, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('numFmts', 'fonts', 'fills', 'borders', 'cellStyleXfs',
'cellXfs', 'cellStyles', 'dxfs', 'tableStyles', 'colors')
def __init__(self,
numFmts=None,
fonts=(),
fills=(),
borders=(),
cellStyleXfs=None,
cellXfs=None,
cellStyles=None,
dxfs=(),
tableStyles=None,
colors=None,
extLst=None,
):
if numFmts is None:
numFmts = NumberFormatList()
self.numFmts = numFmts
self.number_formats = IndexedList()
self.fonts = fonts
self.fills = fills
self.borders = borders
if cellStyleXfs is None:
cellStyleXfs = CellStyleList()
self.cellStyleXfs = cellStyleXfs
if cellXfs is None:
cellXfs = CellStyleList()
self.cellXfs = cellXfs
if cellStyles is None:
cellStyles = _NamedCellStyleList()
self.cellStyles = cellStyles
self.dxfs = dxfs
self.tableStyles = tableStyles
self.colors = colors
self.cell_styles = self.cellXfs._to_array()
self.alignments = self.cellXfs.alignments
self.protections = self.cellXfs.prots
self._normalise_numbers()
self.named_styles = self._merge_named_styles()
@classmethod
def from_tree(cls, node):
# strip all attribs
attrs = dict(node.attrib)
for k in attrs:
del node.attrib[k]
return super(Stylesheet, cls).from_tree(node)
def _merge_named_styles(self):
"""
Merge named style names "cellStyles" with their associated styles
"cellStyleXfs"
"""
named_styles = self.cellStyles.names
for style in named_styles:
self._expand_named_style(style)
return named_styles
def _expand_named_style(self, named_style):
"""
Bind format definitions for a named style from the associated style
record
"""
xf = self.cellStyleXfs[named_style.xfId]
named_style.font = self.fonts[xf.fontId]
named_style.fill = self.fills[xf.fillId]
named_style.border = self.borders[xf.borderId]
if xf.numFmtId < BUILTIN_FORMATS_MAX_SIZE:
formats = BUILTIN_FORMATS
else:
formats = self.custom_formats
if xf.numFmtId in formats:
named_style.number_format = formats[xf.numFmtId]
if xf.alignment:
named_style.alignment = xf.alignment
if xf.protection:
named_style.protection = xf.protection
def _split_named_styles(self, wb):
"""
Convert NamedStyle into separate CellStyle and Xf objects
"""
for style in wb._named_styles:
self.cellStyles.cellStyle.append(style.as_name())
self.cellStyleXfs.xf.append(style.as_xf())
@property
def custom_formats(self):
return dict([(n.numFmtId, n.formatCode) for n in self.numFmts.numFmt])
def _normalise_numbers(self):
"""
Rebase custom numFmtIds with a floor of 164 when reading stylesheet
And index datetime formats
"""
date_formats = set()
timedelta_formats = set()
custom = self.custom_formats
formats = self.number_formats
for idx, style in enumerate(self.cell_styles):
if style.numFmtId in custom:
fmt = custom[style.numFmtId]
if fmt in BUILTIN_FORMATS_REVERSE: # remove builtins
style.numFmtId = BUILTIN_FORMATS_REVERSE[fmt]
else:
style.numFmtId = formats.add(fmt) + BUILTIN_FORMATS_MAX_SIZE
else:
fmt = builtin_format_code(style.numFmtId)
if is_date_format(fmt):
# Create an index of which styles refer to datetimes
date_formats.add(idx)
if is_timedelta_format(fmt):
# Create an index of which styles refer to timedeltas
timedelta_formats.add(idx)
self.date_formats = date_formats
self.timedelta_formats = timedelta_formats
def to_tree(self, tagname=None, idx=None, namespace=None):
tree = super(Stylesheet, self).to_tree(tagname, idx, namespace)
tree.set("xmlns", SHEET_MAIN_NS)
return tree
def apply_stylesheet(archive, wb):
"""
Add styles to workbook if present
"""
try:
src = archive.read(ARC_STYLE)
except KeyError:
return wb
node = fromstring(src)
stylesheet = Stylesheet.from_tree(node)
wb._borders = IndexedList(stylesheet.borders)
wb._fonts = IndexedList(stylesheet.fonts)
wb._fills = IndexedList(stylesheet.fills)
wb._differential_styles.styles = stylesheet.dxfs
wb._number_formats = stylesheet.number_formats
wb._protections = stylesheet.protections
wb._alignments = stylesheet.alignments
wb._table_styles = stylesheet.tableStyles
# need to overwrite openpyxl defaults in case workbook has different ones
wb._cell_styles = stylesheet.cell_styles
wb._named_styles = stylesheet.named_styles
wb._date_formats = stylesheet.date_formats
wb._timedelta_formats = stylesheet.timedelta_formats
for ns in wb._named_styles:
ns.bind(wb)
if not wb._named_styles:
normal = styles['Normal']
wb.add_named_style(normal)
warn("Workbook contains no default style, apply openpyxl's default")
if stylesheet.colors is not None:
wb._colors = stylesheet.colors.index
def write_stylesheet(wb):
stylesheet = Stylesheet()
stylesheet.fonts = wb._fonts
stylesheet.fills = wb._fills
stylesheet.borders = wb._borders
stylesheet.dxfs = wb._differential_styles.styles
stylesheet.colors = ColorList(indexedColors=wb._colors)
from .numbers import NumberFormat
fmts = []
for idx, code in enumerate(wb._number_formats, BUILTIN_FORMATS_MAX_SIZE):
fmt = NumberFormat(idx, code)
fmts.append(fmt)
stylesheet.numFmts.numFmt = fmts
xfs = []
for style in wb._cell_styles:
xf = CellStyle.from_array(style)
if style.alignmentId:
xf.alignment = wb._alignments[style.alignmentId]
if style.protectionId:
xf.protection = wb._protections[style.protectionId]
xfs.append(xf)
stylesheet.cellXfs = CellStyleList(xf=xfs)
stylesheet._split_named_styles(wb)
stylesheet.tableStyles = wb._table_styles
return stylesheet.to_tree()
| 32.266409 | 80 | 0.658969 |
d11864d0e9ce8b34a08668d6a870cd9245569fcd | 115 | py | Python | commlib/transports/__init__.py | robotics-4-all/commlib-py | 9d56e0a2e13410feac0e10d9866a1c4a60ade2c7 | [
"MIT"
] | 1 | 2021-06-09T09:32:53.000Z | 2021-06-09T09:32:53.000Z | commlib/transports/__init__.py | robotics-4-all/commlib-py | 9d56e0a2e13410feac0e10d9866a1c4a60ade2c7 | [
"MIT"
] | 7 | 2022-03-10T23:57:25.000Z | 2022-03-13T19:12:54.000Z | commlib/transports/__init__.py | robotics-4-all/commlib-py | 9d56e0a2e13410feac0e10d9866a1c4a60ade2c7 | [
"MIT"
] | 1 | 2021-06-07T16:25:05.000Z | 2021-06-07T16:25:05.000Z | """Protocol Transports sub-package"""
__author__ = """Konstantinos Panayiotou"""
__email__ = 'klpanagi@gmail.com'
| 23 | 42 | 0.73913 |
24d15732a7fa222d1d3879eb0e633fe4e21b3fd7 | 3,702 | py | Python | shards/mishards/grpc_utils/grpc_args_parser.py | JinHai-CN/milvus | b9ae5983776aef497ef91e994a3a9ae120a98868 | [
"Apache-2.0"
] | 3 | 2020-01-20T02:35:25.000Z | 2020-04-29T03:54:21.000Z | shards/mishards/grpc_utils/grpc_args_parser.py | liangwlw/milvus | 7e7f626b9c7288c1c82f5dafed87d33897f4b64e | [
"Apache-2.0"
] | null | null | null | shards/mishards/grpc_utils/grpc_args_parser.py | liangwlw/milvus | 7e7f626b9c7288c1c82f5dafed87d33897f4b64e | [
"Apache-2.0"
] | 2 | 2021-03-15T11:47:38.000Z | 2021-03-15T12:27:16.000Z | import ujson
from milvus import Status
from functools import wraps
def error_status(func):
@wraps(func)
def inner(*args, **kwargs):
try:
results = func(*args, **kwargs)
except Exception as e:
return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None
return Status(code=0, message="Success"), results
return inner
class GrpcArgsParser(object):
@classmethod
@error_status
def parse_proto_CollectionSchema(cls, param):
_collection_schema = {
'collection_name': param.collection_name,
'dimension': param.dimension,
'index_file_size': param.index_file_size,
'metric_type': param.metric_type
}
return param.status, _collection_schema
@classmethod
@error_status
def parse_proto_CollectionName(cls, param):
return param.collection_name
@classmethod
@error_status
def parse_proto_PreloadCollectionParam(cls, param):
return param.collection_name, list(param.partition_tag_array)
@classmethod
@error_status
def parse_proto_FlushParam(cls, param):
return list(param.collection_name_array)
@classmethod
@error_status
def parse_proto_Index(cls, param):
_index = {
'index_type': param.index_type,
'params': param.extra_params[0].value
}
return _index
@classmethod
@error_status
def parse_proto_IndexParam(cls, param):
_collection_name = param.collection_name
_index_type = param.index_type
_index_param = {}
for params in param.extra_params:
if params.key == 'params':
_index_param = ujson.loads(str(params.value))
return _collection_name, _index_type, _index_param
@classmethod
@error_status
def parse_proto_Command(cls, param):
_cmd = param.cmd
return _cmd
@classmethod
@error_status
def parse_proto_RowRecord(cls, param):
return list(param.vector_data)
@classmethod
def parse_proto_PartitionParam(cls, param):
_collection_name = param.collection_name
_tag = param.tag
return _collection_name, _tag
@classmethod
@error_status
def parse_proto_SearchParam(cls, param):
_collection_name = param.collection_name
_topk = param.topk
if len(param.extra_params) == 0:
raise Exception("Search param loss")
_params = ujson.loads(str(param.extra_params[0].value))
_query_record_array = []
if param.query_record_array:
for record in param.query_record_array:
if record.float_data:
_query_record_array.append(list(record.float_data))
else:
_query_record_array.append(bytes(record.binary_data))
else:
raise Exception("Search argument parse error: record array is empty")
return _collection_name, _query_record_array, _topk, _params
@classmethod
@error_status
def parse_proto_DeleteByIDParam(cls, param):
_collection_name = param.collection_name
_id_array = list(param.id_array)
return _collection_name, _id_array
@classmethod
@error_status
def parse_proto_VectorIdentity(cls, param):
_collection_name = param.collection_name
_ids = list(param.id_array)
return _collection_name, _ids
@classmethod
@error_status
def parse_proto_GetVectorIDsParam(cls, param):
_collection__name = param.collection_name
_segment_name = param.segment_name
return _collection__name, _segment_name
| 27.422222 | 81 | 0.657482 |
9841eaaed0ec90a0beb6803bc78cdb3248bac4a0 | 22,534 | py | Python | scripts/commands/open.py | tartakynov/enso | 3f5f7a3dcd15253889d40dc6414a24ad690ce74c | [
"BSD-3-Clause"
] | 6 | 2016-01-26T23:21:42.000Z | 2022-01-30T04:20:04.000Z | scripts/commands/open.py | tartakynov/enso | 3f5f7a3dcd15253889d40dc6414a24ad690ce74c | [
"BSD-3-Clause"
] | null | null | null | scripts/commands/open.py | tartakynov/enso | 3f5f7a3dcd15253889d40dc6414a24ad690ce74c | [
"BSD-3-Clause"
] | 4 | 2017-04-28T04:51:57.000Z | 2020-04-01T14:14:33.000Z | from win32com.shell import shell, shellcon
import os
import glob
import operator
import re
import time
import win32api
import win32con
import win32process
import pythoncom
import logging
unlearn_open_undo = []
my_documents_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0)
LEARN_AS_DIR = os.path.join(my_documents_dir, u"Enso's Learn As Open Commands")
# Check if Learn-as dir exist and create it if not
if (not os.path.isdir(LEARN_AS_DIR)):
os.makedirs(LEARN_AS_DIR)
SHORTCUT_TYPE_EXECUTABLE = 'x'
SHORTCUT_TYPE_FOLDER = 'f'
SHORTCUT_TYPE_URL = 'u'
SHORTCUT_TYPE_DOCUMENT = 'd'
SHORTCUT_TYPE_CONTROL_PANEL = 'c'
def _cpl_exists(cpl_name):
return (
os.path.isfile(
os.path.expandvars("${WINDIR}\\%s.cpl") % cpl_name)
or os.path.isfile(
os.path.expandvars("${WINDIR}\\system32\\%s.cpl") % cpl_name)
)
control_panel_applets = [i[:3] for i in (
(SHORTCUT_TYPE_CONTROL_PANEL,
u"control panel",
"rundll32.exe shell32.dll,Control_RunDLL"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"accessibility options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL access.cpl"),
#accessibility options (Keyboard):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,1
#accessibility options (Sound):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,2
#accessibility options (Display):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,3
#accessibility options (Mouse):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,4
#accessibility options (General):
# rundll32.exe shell32.dll,Control_RunDLL access.cpl,,5
(SHORTCUT_TYPE_CONTROL_PANEL,
u"add or remove programs (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl"),
#add or remove programs (Install/Uninstall):
# rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl,,1
#add or remove programs (Windows Setup):
# rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl,,2
#add or remove programs (Startup Disk):
# rundll32.exe shell32.dll,Control_RunDLL appwiz.cpl,,3
(SHORTCUT_TYPE_CONTROL_PANEL,
u"display properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL desk.cpl"),
#Display Properties (Background):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,0
#Display Properties (Screen Saver):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,1
#Display Properties (Appearance):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,2
#Display Properties (Settings):
# rundll32.exe shell32.dll,Control_RunDLL desk.cpl,,3
(SHORTCUT_TYPE_CONTROL_PANEL,
u"regional and language options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL intl.cpl"),
#Regional Settings Properties (Regional Settings):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,0
#Regional Settings Properties (Number):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,1
#Regional Settings Properties (Currency):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,2
#Regional Settings Properties (Time):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,3
#Regional Settings Properties (Date):
# rundll32.exe shell32.dll,Control_RunDLL intl.cpl,,4
(SHORTCUT_TYPE_CONTROL_PANEL,
u"game controllers (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL joy.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"mouse properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL main.cpl @0"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"keyboard properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL main.cpl @1"),
# DOES NOT WORK
#Printers:
# rundll32.exe shell32.dll,Control_RunDLL main.cpl @2
# DOES NOT WORK
#Fonts:
# rundll32.exe shell32.dll,Control_RunDLL main.cpl @3
(SHORTCUT_TYPE_CONTROL_PANEL,
u"microsoft exchange profiles (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL mlcfg32.cpl",
_cpl_exists("mlcfg32")),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"sounds and audio devices (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl"),
#Multimedia Properties (Audio):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,0
#Multimedia Properties (Video):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,1
#Multimedia Properties (MIDI):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,2
#Multimedia Properties (CD Music):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,3
#Multimedia Properties (Advanced):
# rundll32.exe shell32.dll,Control_RunDLL mmsys.cpl,,4
(SHORTCUT_TYPE_CONTROL_PANEL,
u"modem properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL modem.cpl",
_cpl_exists("modem")),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"network connections (control panel)",
"RUNDLL32.exe SHELL32.DLL,Control_RunDLL NCPA.CPL"),
#Password Properties (Change Passwords):
# rundll32.exe shell32.dll,Control_RunDLL password.cpl
(SHORTCUT_TYPE_CONTROL_PANEL,
u"system properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,0"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"device manager (control panel)",
#"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,1"
"devmgmt.msc"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"disk management (control panel)",
"diskmgmt.msc"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"scanners and cameras (control panel)",
"control.exe sticpl.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"removable storage (control panel)",
"ntmsmgr.msc"),
#dfrg.msc Disk defrag
#eventvwr.msc Event viewer
#eventvwr.exe \\computername View the Event Log at a remote computer
#fsmgmt.msc Shared folders
#gpedit.msc Group policies
#lusrmgr.msc Local users and groups
#perfmon.msc Performance monitor
#rsop.msc Resultant set of policies
#secpol.msc Local security settings
#services.msc Various Services
(SHORTCUT_TYPE_CONTROL_PANEL,
u"hardware profiles (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,2"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"advanced system properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl,,3"),
#Add New Hardware Wizard:
# rundll32.exe shell32.dll,Control_RunDLL sysdm.cpl @1
(SHORTCUT_TYPE_CONTROL_PANEL,
u"date and time (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL timedate.cpl"),
#Microsoft Workgroup Postoffice Admin:
# rundll32.exe shell32.dll,Control_RunDLL wgpocpl.cpl
#Open With (File Associations):
# rundll32.exe shell32.dll,OpenAs_RunDLL d:\path\filename.ext
#Run Diskcopy Dialog:
# rundll32 diskcopy.dll,DiskCopyRunDll
#Create New Shortcut Wizard:
# 'puts the new shortcut in the location specified by %1
# rundll32.exe AppWiz.Cpl,NewLinkHere %1
(SHORTCUT_TYPE_CONTROL_PANEL,
u"add new hardware wizard (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL hdwwiz.cpl @1"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"add printer wizard (control panel)",
"rundll32.exe shell32.dll,SHHelpShortcuts_RunDLL AddPrinter"),
#(SHORTCUT_TYPE_CONTROL_PANEL,
# u"dialup networking wizard (cp)",
# "rundll32.exe rnaui.dll,RnaWizard"),
#Open a Scrap Document:
# rundll32.exe shscrap.dll,OpenScrap_RunDLL /r /x %1
#Create a Briefcase:
# rundll32.exe syncui.dll,Briefcase_Create
(SHORTCUT_TYPE_CONTROL_PANEL,
u"printers and faxes (control panel)",
"rundll32.exe shell32.dll,SHHelpShortcuts_RunDLL PrintersFolder"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"fonts (control panel)",
"rundll32.exe shell32.dll,SHHelpShortcuts_RunDLL FontsFolder"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"windows firewall (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL firewall.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"speech properties (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL \"${COMMONPROGRAMFILES}\\Microsoft Shared\\Speech\\sapi.cpl\"",
os.path.isfile(os.path.expandvars("${COMMONPROGRAMFILES}\\Microsoft Shared\\Speech\\sapi.cpl"))),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"internet options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL inetcpl.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"odbc data source administrator (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL odbccp32.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"power options (control panel)",
"rundll32.exe shell32.dll,Control_RunDLL powercfg.cpl"),
(SHORTCUT_TYPE_CONTROL_PANEL,
u"bluetooth properties (control panel)",
"control.exe bhtprops.cpl",
_cpl_exists("bhtprops")),
#Pick a Time Zone Dialog:
# rundll32.exe shell32.dll,Control_RunDLL timedate.cpl,,/f
) if len(i) < 4 or i[3]]
#print control_panel_applets
class _PyShortcut():
def __init__( self, base ):
self._base = base
self._base_loaded = False
self._shortcut_type = None
def load( self, filename = None):
if filename:
self._filename = filename
try:
self._base.QueryInterface( pythoncom.IID_IPersistFile ).Load( self._filename )
except:
logging.error("Error loading shell-link for file %s" % self._filename)
self._base_loaded = True
def save( self, filename = None):
if filename:
self._filename = filename
self._base.QueryInterface( pythoncom.IID_IPersistFile ).Save( self._filename, 0 )
def get_filename(self):
return self._filename
def get_type(self):
if not self._base_loaded:
raise Exception("Shortcut data has not been loaded yet. Use load(filename) before using get_type()")
name, ext = os.path.splitext(self._filename)
if ext.lower() == '.lnk':
file_path = self._base.GetPath(0)
if file_path and file_path[0]:
if os.path.isdir(file_path[0]):
self._shortcut_type = SHORTCUT_TYPE_FOLDER
elif (os.path.splitext(file_path[0])[1].lower()
in ('.exe', '.com', '.cmd', '.bat')):
self._shortcut_type = SHORTCUT_TYPE_EXECUTABLE
else:
self._shortcut_type = SHORTCUT_TYPE_DOCUMENT
else:
self._shortcut_type = SHORTCUT_TYPE_DOCUMENT
elif ext.lower() == '.url':
self._shortcut_type = SHORTCUT_TYPE_URL
else:
self._shortcut_type = SHORTCUT_TYPE_DOCUMENT
return self._shortcut_type
def __getattr__( self, name ):
if name != "_base":
return getattr( self._base, name )
class PyShellLink(_PyShortcut):
def __init__( self ):
base = pythoncom.CoCreateInstance(
shell.CLSID_ShellLink,
None,
pythoncom.CLSCTX_INPROC_SERVER,
shell.IID_IShellLink
)
_PyShortcut.__init__(self, base)
class PyInternetShortcut(_PyShortcut):
def __init__( self ):
base = pythoncom.CoCreateInstance(
shell.CLSID_InternetShortcut,
None,
pythoncom.CLSCTX_INPROC_SERVER,
shell.IID_IUniformResourceLocator
)
_PyShortcut.__init__(self, base)
def expand_path_variables(file_path):
import re
re_env = re.compile(r'%\w+%')
def expander(mo):
return os.environ.get(mo.group()[1:-1], 'UNKNOWN')
return os.path.expandvars(re_env.sub(expander, file_path))
def displayMessage(msg):
import enso.messages
enso.messages.displayMessage("<p>%s</p>" % msg)
ignored = re.compile("(uninstall|read ?me|faq|f.a.q|help)", re.IGNORECASE)
"""
def get_control_panel_applets():
import _winreg as reg
reghandle = None
cpl_applets = []
try:
regkey = None
try:
reghandle = reg.ConnectRegistry(None, reg.HKEY_LOCAL_MACHINE)
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Control Panel\\Cpls"
regkey = reg.OpenKey(reghandle, key)
index = 0
try:
while True:
regval = reg.EnumValue(regkey, index)
cpl_applets.append((
SHORTCUT_TYPE_CONTROL_PANEL,
regval[0].lower().replace("/"," ") + " (control panel)",
regval[1]))
index += 1
except Exception, e:
pass
except Exception, e:
print e
finally:
if regkey:
reg.CloseKey(regkey)
regkey = None
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ControlPanel\\Namespace"
regkey = reg.OpenKey(reghandle, key)
index = 0
try:
while True:
cplkey = reg.EnumKey(regkey, index)
regkey1 = None
try:
regkey1 = reg.OpenKey(reghandle, key + "\\" + cplkey)
cpl_applets.append((
SHORTCUT_TYPE_CONTROL_PANEL,
reg.QueryValueEx(regkey1, "Name")[0].lower().replace("/"," ") + " (control panel)",
reg.QueryValueEx(regkey1, "Module")[0]))
except:
pass
finally:
if regkey1:
reg.CloseKey(regkey1)
index += 1
except Exception, e:
pass
except Exception, e:
print e
finally:
if regkey:
reg.CloseKey(regkey)
finally:
if reghandle:
reg.CloseKey(reghandle)
return cpl_applets
print get_control_panel_applets()
"""
def get_shortcuts(directory):
shortcuts = []
sl = PyShellLink()
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
if ignored.search(filename):
continue
name, ext = os.path.splitext(filename)
if not ext.lower() in (".lnk", ".url"):
continue
#print name, ext
shortcut_type = SHORTCUT_TYPE_DOCUMENT
if ext.lower() == ".lnk":
sl.load(os.path.join(dirpath, filename))
shortcut_type = sl.get_type()
elif ext.lower() == ".url":
shortcut_type = SHORTCUT_TYPE_URL
shortcuts.append((shortcut_type, name.lower(), os.path.join(dirpath, filename)))
return shortcuts
def reload_shortcuts_map():
desktop_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_DESKTOPDIRECTORY, 0, 0)
quick_launch_dir = os.path.join(
shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0),
"Microsoft",
"Internet Explorer",
"Quick Launch")
start_menu_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_STARTMENU, 0, 0)
common_start_menu_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_STARTMENU, 0, 0)
#control_panel = shell.SHGetFolderPath(0, shellcon.CSIDL_CONTROLS, 0, 0)
shortcuts = get_shortcuts(LEARN_AS_DIR) + \
get_shortcuts(desktop_dir) + \
get_shortcuts(quick_launch_dir) + \
get_shortcuts(start_menu_dir) + \
get_shortcuts(common_start_menu_dir) + \
control_panel_applets
return dict((s[1], s) for s in shortcuts)
shortcuts_map = reload_shortcuts_map()
def cmd_open(ensoapi, target):
""" Continue typing to open an application or document """
displayMessage(u"Opening <command>%s</command>..." % target)
try:
global shortcuts_map
shortcut_type, shortuct_id, file_path = shortcuts_map[target]
file_path = os.path.normpath(expand_path_variables(file_path))
logging.info("Executing '%s'" % file_path)
if shortcut_type == SHORTCUT_TYPE_CONTROL_PANEL:
if " " in file_path:
executable = file_path[0:file_path.index(' ')]
params = file_path[file_path.index(' ')+1:]
else:
executable = file_path
params = None
try:
rcode = win32api.ShellExecute(
0,
'open',
executable,
params,
None,
win32con.SW_SHOWDEFAULT)
except Exception, e:
logging.error(e)
else:
os.startfile(file_path)
return True
except Exception, e:
logging.error(e)
return False
cmd_open.valid_args = [s[1] for s in shortcuts_map.values()]
def cmd_open_with(ensoapi, application):
""" Opens your currently selected file(s) or folder with the specified application """
seldict = ensoapi.get_selection()
if seldict.get('files'):
file = seldict['files'][0]
elif seldict.get('text'):
file = seldict['text'].strip()
else:
file = None
if not (file and (os.path.isfile(file) or os.path.isdir(file))):
ensoapi.display_message(u"No file or folder is selected")
return
displayMessage(u"Opening <command>%s</command>..." % application)
#print file, application
global shortcuts_map
try:
print shortcuts_map[application][2]
print shortcuts_map[application]
executable = expand_path_variables(shortcuts_map[application][2])
except:
print application
print shortcuts_map.keys()
print shortcuts_map.values()
try:
rcode = win32api.ShellExecute(
0,
'open',
executable,
'"%s"' % file,
os.path.dirname(file),
win32con.SW_SHOWDEFAULT)
except Exception, e:
logging.error(e)
cmd_open_with.valid_args = [s[1] for s in shortcuts_map.values() if s[0] == SHORTCUT_TYPE_EXECUTABLE]
def is_url(text):
urlfinders = [
re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?/[-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]*[^]'\\.}>\\),\\\"]"),
re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?"),
re.compile("(~/|/|\\./)([-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]|\\\\)+"),
re.compile("'\\<((mailto:)|)[-A-Za-z0-9\\.]+@[-A-Za-z0-9\\.]+"),
]
for urltest in urlfinders:
if urltest.search(text, re.I):
return True
return False
def cmd_learn_as_open(ensoapi, name):
""" Learn to open a document or application as {name} """
if name is None:
displayMessage(u"You must provide name")
return
seldict = ensoapi.get_selection()
if seldict.get('files'):
file = seldict['files'][0]
elif seldict.get('text'):
file = seldict['text'].strip()
else:
ensoapi.display_message(u"No file is selected")
return
if not os.path.isfile(file) and not os.path.isdir(file) and not is_url(file):
displayMessage(
u"Selection represents no existing file, folder or URL.")
return
file_name = name.replace(":", "").replace("?", "").replace("\\", "")
file_path = os.path.join(LEARN_AS_DIR, file_name)
if os.path.isfile(file_path + ".url") or os.path.isfile(file_path + ".lnk"):
displayMessage(
u"<command>open %s</command> already exists. Please choose another name."
% name)
return
if is_url(file):
shortcut = PyInternetShortcut()
shortcut.SetURL(file)
shortcut.QueryInterface( pythoncom.IID_IPersistFile ).Save(
file_path + ".url", 0 )
else:
shortcut = PyShellLink()
shortcut.SetPath(file)
shortcut.SetWorkingDirectory(os.path.dirname(file))
shortcut.SetIconLocation(file, 0)
shortcut.QueryInterface( pythoncom.IID_IPersistFile ).Save(
file_path + ".lnk", 0 )
#time.sleep(0.5)
global shortcuts_map
shortcuts_map = reload_shortcuts_map()
cmd_open.valid_args = [s[1] for s in shortcuts_map.values()]
cmd_open_with.valid_args = [s[1] for s in shortcuts_map.values() if s[0] == SHORTCUT_TYPE_EXECUTABLE]
cmd_unlearn_open.valid_args = [s[1] for s in shortcuts_map.values()]
displayMessage(u"<command>open %s</command> is now a command" % name)
def cmd_unlearn_open(ensoapi, name):
u""" Unlearn \u201copen {name}\u201d command """
file_path = os.path.join(LEARN_AS_DIR, name)
if os.path.isfile(file_path + ".lnk"):
sl = PyShellLink()
sl.load(file_path + ".lnk")
unlearn_open_undo.append([name, sl])
os.remove(file_path + ".lnk")
elif os.path.isfile(file_path + ".url"):
sl = PyInternetShortcut()
sl.load(file_path + ".url")
unlearn_open_undo.append([name, sl])
os.remove(file_path + ".url")
global shortcuts_map
shortcuts_map = reload_shortcuts_map()
cmd_open.valid_args = [s[1] for s in shortcuts_map.values()]
cmd_open_with.valid_args = [s[1] for s in shortcuts_map.values() if s[0] == SHORTCUT_TYPE_EXECUTABLE]
cmd_unlearn_open.valid_args = [s[1] for s in shortcuts_map.values()]
displayMessage(u"Unlearned <command>open %s</command>" % name)
cmd_unlearn_open.valid_args = [s[1] for s in shortcuts_map.values()]
def cmd_undo_unlearn(ensoapi):
u""" Undoes your last \u201cunlearn open\u201d command """
if len(unlearn_open_undo) > 0:
name, sl = unlearn_open_undo.pop()
sl.save()
displayMessage(u"Undo successful. <command>open %s</command> is now a command" % name)
else:
ensoapi.display_message(u"There is nothing to undo")
if __name__ == "__main__":
import doctest
doctest.testmod()
# vim:set ff=unix tabstop=4 shiftwidth=4 expandtab:
| 35.542587 | 243 | 0.62581 |
d7e27d56dc3a7d8c931a6319b9c2fbe0ba5e9ce0 | 2,865 | py | Python | visu_regression_nn.py | NiteshBharadwaj/JDOT | 86412332f809f67584fda5b51b1b1e90097ac6b5 | [
"MIT"
] | 75 | 2017-10-31T08:44:41.000Z | 2022-03-04T13:18:46.000Z | visu_regression_nn.py | NiteshBharadwaj/JDOT | 86412332f809f67584fda5b51b1b1e90097ac6b5 | [
"MIT"
] | null | null | null | visu_regression_nn.py | NiteshBharadwaj/JDOT | 86412332f809f67584fda5b51b1b1e90097ac6b5 | [
"MIT"
] | 29 | 2017-10-18T06:21:46.000Z | 2022-02-15T09:22:08.000Z | # -*- coding: utf-8 -*-
"""
Neural network regression example for JDOT
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
# Nicolas Courty <ncourty@irisa.fr>
#
# License: MIT License
import numpy as np
import pylab as pl
import jdot
#from sklearn import datasets
from scipy.spatial.distance import cdist
import ot
import keras
#%% data generation
seed=1985
np.random.seed(seed)
n = 200
ntest=200
nz=.3
theta=0.8
n2=int(n/2)
sigma=0.05
xs=np.random.randn(n,1)+2
xs[:n2,:]-=4
ys=sigma*np.random.randn(n,1)+np.sin(xs/2)
fs_s = lambda x: np.sin(x/2)
xt=np.random.randn(n,1)+2
xt[:n2,:]/=2
xt[:n2,:]-=3
gauss = lambda x,s,m: np.exp((x-m)**2/(2*s*s))/(s*np.sqrt(2*np.pi))
mus_x = lambda x: gauss(x,1,2)/2+gauss(x,1,-2)/2
yt=sigma*np.random.randn(n,1)+np.sin(xt/2)
xt+=2
fs_t = lambda x: np.sin((x-2)/2)
mut_x = lambda x: gauss(x,1,2)/2+gauss(x,1./2,-4)/2
xvisu=np.linspace(-4,6.5,100)
pl.figure(1)
pl.clf()
pl.subplot()
pl.scatter(xs,ys,label='Source samples',edgecolors='k')
pl.scatter(xt,yt,label='Target samples',edgecolors='k')
pl.plot(xvisu,fs_s(xvisu),'b',label='Source model')
pl.plot(xvisu,fs_t(xvisu),'g',label='Target model')
pl.xlabel('x')
pl.ylabel('y')
pl.legend()
pl.title('Toy regression example')
#pl.savefig('imgs/visu_data_reg.eps')
#%% learn on source
def get_model():
# simple 1D nn
model=keras.models.Sequential()
model.add(keras.layers.Dense(100,activation='tanh',input_shape=(1,)))
model.add(keras.layers.Dense(50,activation='tanh'))
model.add(keras.layers.Dense(1,activation='linear'))
model.compile('sgd','mse')
return model
model=get_model()
fit_params={'epochs':60}
model.fit(xs,ys,**fit_params)
ypred=model.predict(xvisu)
pl.figure(2)
pl.clf()
pl.scatter(xs,ys,label='Source samples',edgecolors='k')
pl.scatter(xt,yt,label='Target samples',edgecolors='k')
pl.plot(xvisu,fs_s(xvisu),'b',label='Source model')
pl.plot(xvisu,fs_t(xvisu),'g',label='Target model')
pl.plot(xvisu,ypred,'r',label='Source estimated model')
pl.xlabel('x')
pl.ylabel('y')
pl.legend()
pl.title('Toy regression example')
#%% TLOT
itermax=5
alpha=1
C0=cdist(xs,xt,metric='sqeuclidean')
#print np.max(C0)
C0=C0/np.median(C0)
fcost = cdist(ys,yt,metric='sqeuclidean')
C=alpha*C0+fcost
G0=ot.emd(ot.unif(n),ot.unif(n),C)
fit_params={'epochs':100}
model,loss = jdot.jdot_nn_l2(get_model,xs,ys,xt,ytest=yt,fit_params=fit_params,numIterBCD = itermax, alpha=alpha)
ypred=model.predict(xvisu.reshape((-1,1)))
pl.figure(2)
pl.clf()
pl.scatter(xs,ys,label='Source samples',edgecolors='k')
pl.scatter(xt,yt,label='Target samples',edgecolors='k')
pl.plot(xvisu,fs_s(xvisu),'b',label='Source model')
pl.plot(xvisu,fs_t(xvisu),'g',label='Target model')
pl.plot(xvisu,ypred,'r',label='JDOT model')
pl.xlabel('x')
pl.ylabel('y')
pl.legend()
pl.title('Toy regression example')
| 20.611511 | 113 | 0.682723 |
5fa0b6e1640307175a1935491c382843cd1faf72 | 1,754 | py | Python | invenio_previewer/api.py | max-moser/invenio-previewer | c2f86b9284f5c57a4b881e97735f5a9fbbc710b6 | [
"MIT"
] | 3 | 2015-08-19T12:50:16.000Z | 2020-12-14T04:06:04.000Z | invenio_previewer/api.py | max-moser/invenio-previewer | c2f86b9284f5c57a4b881e97735f5a9fbbc710b6 | [
"MIT"
] | 99 | 2015-09-13T17:59:28.000Z | 2022-03-08T17:21:34.000Z | invenio_previewer/api.py | max-moser/invenio-previewer | c2f86b9284f5c57a4b881e97735f5a9fbbc710b6 | [
"MIT"
] | 52 | 2015-08-13T13:42:26.000Z | 2022-03-28T07:54:17.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""File reader utility."""
from __future__ import absolute_import, print_function
from os.path import basename, splitext
from flask import url_for
class PreviewFile(object):
"""Preview file default implementation."""
def __init__(self, pid, record, fileobj):
"""Initialize object.
:param file: ObjectVersion instance from Invenio-Files-REST.
"""
self.file = fileobj
self.pid = pid
self.record = record
@property
def size(self):
"""Get file size."""
return self.file['size']
@property
def filename(self):
"""Get filename."""
return basename(self.file.key)
@property
def bucket(self):
"""Get bucket."""
return self.file.bucket_id
@property
def uri(self):
"""Get file download link.
.. note::
The URI generation assumes that you can download the file using the
view ``invenio_records_ui.<pid_type>_files``.
"""
return url_for(
'.{0}_files'.format(self.pid.pid_type),
pid_value=self.pid.pid_value,
filename=self.file.key)
def is_local(self):
"""Check if file is local."""
return True
def has_extensions(self, *exts):
"""Check if file has one of the extensions."""
file_ext = splitext(self.filename)[1].lower()
return file_ext in exts
def open(self):
"""Open the file."""
return self.file.file.storage().open()
| 24.704225 | 79 | 0.604333 |
212c2375bc1b80a3c18f8d94801c25b3d5a503fc | 15,408 | py | Python | venv/Lib/site-packages/win32comext/axscript/client/pyscript.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 150 | 2021-11-02T05:31:51.000Z | 2022-03-24T06:22:22.000Z | venv/Lib/site-packages/win32comext/axscript/client/pyscript.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 4 | 2021-12-01T11:55:58.000Z | 2022-02-24T16:14:37.000Z | venv/Lib/site-packages/win32comext/axscript/client/pyscript.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 33 | 2021-11-03T00:29:41.000Z | 2022-03-15T13:15:56.000Z | """Python ActiveX Scripting Implementation
This module implements the Python ActiveX Scripting client.
To register the implementation, simply "run" this Python program - ie
either double-click on it, or run "python.exe pyscript.py" from the
command line.
"""
import winerror
import win32com
import win32api
import pythoncom
import sys
import traceback
import re
import win32com.client.dynamic
from win32com.axscript.client import framework, scriptdispatch
from win32com.axscript import axscript
import win32com.server.register
from win32com.axscript.client.framework import (
RaiseAssert,
trace,
Exception,
SCRIPTTEXT_FORCEEXECUTION,
SCRIPTTEXT_ISEXPRESSION,
SCRIPTTEXT_ISPERSISTENT,
)
PyScript_CLSID = "{DF630910-1C1D-11d0-AE36-8C0F5E000000}"
debugging_attr = 0
def debug_attr_print(*args):
if debugging_attr:
trace(*args)
def ExpandTabs(text):
return re.sub("\t", " ", text)
def AddCR(text):
return re.sub("\n", "\r\n", text)
class AXScriptCodeBlock(framework.AXScriptCodeBlock):
def GetDisplayName(self):
return "PyScript - " + framework.AXScriptCodeBlock.GetDisplayName(self)
# There is only ever _one_ ax object - it exists in the global namespace
# for all script items.
# It performs a search from all global/visible objects
# down.
# This means that if 2 sub-objects of the same name are used
# then only one is ever reachable using the ax shortcut.
class AXScriptAttribute:
"An attribute in a scripts namespace."
def __init__(self, engine):
self.__dict__["_scriptEngine_"] = engine
def __getattr__(self, attr):
if attr[1] == "_" and attr[:-1] == "_":
raise AttributeError(attr)
rc = self._FindAttribute_(attr)
if rc is None:
raise AttributeError(attr)
return rc
def _Close_(self):
self.__dict__["_scriptEngine_"] = None
def _DoFindAttribute_(self, obj, attr):
try:
return obj.subItems[attr.lower()].attributeObject
except KeyError:
pass
# Check out the sub-items
for item in obj.subItems.values():
try:
return self._DoFindAttribute_(item, attr)
except AttributeError:
pass
raise AttributeError(attr)
def _FindAttribute_(self, attr):
for item in self._scriptEngine_.subItems.values():
try:
return self._DoFindAttribute_(item, attr)
except AttributeError:
pass
# All else fails, see if it is a global
# (mainly b/w compat)
return getattr(self._scriptEngine_.globalNameSpaceModule, attr)
# raise AttributeError(attr)
class NamedScriptAttribute:
"An explicitely named object in an objects namespace"
# Each named object holds a reference to one of these.
# Whenever a sub-item appears in a namespace, it is really one of these
# objects. Has a circular reference back to the item itself, which is
# closed via _Close_()
def __init__(self, scriptItem):
self.__dict__["_scriptItem_"] = scriptItem
def __repr__(self):
return "<NamedItemAttribute" + repr(self._scriptItem_) + ">"
def __getattr__(self, attr):
# If a known subitem, return it.
try:
return self._scriptItem_.subItems[attr.lower()].attributeObject
except KeyError:
# Otherwise see if the dispatch can give it to us
if self._scriptItem_.dispatchContainer:
return getattr(self._scriptItem_.dispatchContainer, attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
# XXX - todo - if a known item, then should call its default
# dispatch method.
attr = attr.lower()
if self._scriptItem_.dispatchContainer:
try:
return setattr(self._scriptItem_.dispatchContainer, attr, value)
except AttributeError:
pass
raise AttributeError(attr)
def _Close_(self):
self.__dict__["_scriptItem_"] = None
class ScriptItem(framework.ScriptItem):
def __init__(self, parentItem, name, dispatch, flags):
framework.ScriptItem.__init__(self, parentItem, name, dispatch, flags)
self.scriptlets = {}
self.attributeObject = None
def Reset(self):
framework.ScriptItem.Reset(self)
if self.attributeObject:
self.attributeObject._Close_()
self.attributeObject = None
def Close(self):
framework.ScriptItem.Close(self) # calls reset.
self.dispatchContainer = None
self.scriptlets = {}
def Register(self):
framework.ScriptItem.Register(self)
self.attributeObject = NamedScriptAttribute(self)
if self.dispatch:
# Need to avoid the new Python "lazy" dispatch behaviour.
try:
engine = self.GetEngine()
olerepr = clsid = None
typeinfo = self.dispatch.GetTypeInfo()
clsid = typeinfo.GetTypeAttr()[0]
try:
olerepr = engine.mapKnownCOMTypes[clsid]
except KeyError:
pass
except pythoncom.com_error:
typeinfo = None
if olerepr is None:
olerepr = win32com.client.dynamic.MakeOleRepr(
self.dispatch, typeinfo, None
)
if clsid is not None:
engine.mapKnownCOMTypes[clsid] = olerepr
self.dispatchContainer = win32com.client.dynamic.CDispatch(
self.dispatch, olerepr, self.name
)
# self.dispatchContainer = win32com.client.dynamic.Dispatch(self.dispatch, userName = self.name)
# self.dispatchContainer = win32com.client.dynamic.DumbDispatch(self.dispatch, userName = self.name)
# def Connect(self):
# framework.ScriptItem.Connect(self)
# def Disconnect(self):
# framework.ScriptItem.Disconnect(self)
class PyScript(framework.COMScript):
# Setup the auto-registration stuff...
_reg_verprogid_ = "Python.AXScript.2"
_reg_progid_ = "Python"
# _reg_policy_spec_ = default
_reg_catids_ = [axscript.CATID_ActiveScript, axscript.CATID_ActiveScriptParse]
_reg_desc_ = "Python ActiveX Scripting Engine"
_reg_clsid_ = PyScript_CLSID
_reg_class_spec_ = "win32com.axscript.client.pyscript.PyScript"
_reg_remove_keys_ = [(".pys",), ("pysFile",)]
_reg_threading_ = "both"
def __init__(self):
framework.COMScript.__init__(self)
self.globalNameSpaceModule = None
self.codeBlocks = []
self.scriptDispatch = None
def InitNew(self):
framework.COMScript.InitNew(self)
import imp
self.scriptDispatch = None
self.globalNameSpaceModule = imp.new_module("__ax_main__")
self.globalNameSpaceModule.__dict__["ax"] = AXScriptAttribute(self)
self.codeBlocks = []
self.persistedCodeBlocks = []
self.mapKnownCOMTypes = {} # Map of known CLSID to typereprs
self.codeBlockCounter = 0
def Stop(self):
# Flag every pending script as already done
for b in self.codeBlocks:
b.beenExecuted = 1
return framework.COMScript.Stop(self)
def Reset(self):
# Reset all code-blocks that are persistent, and discard the rest
oldCodeBlocks = self.codeBlocks[:]
self.codeBlocks = []
for b in oldCodeBlocks:
if b.flags & SCRIPTTEXT_ISPERSISTENT:
b.beenExecuted = 0
self.codeBlocks.append(b)
return framework.COMScript.Reset(self)
def _GetNextCodeBlockNumber(self):
self.codeBlockCounter = self.codeBlockCounter + 1
return self.codeBlockCounter
def RegisterNamedItem(self, item):
wasReg = item.isRegistered
framework.COMScript.RegisterNamedItem(self, item)
if not wasReg:
# Insert into our namespace.
# Add every item by name
if item.IsVisible():
self.globalNameSpaceModule.__dict__[item.name] = item.attributeObject
if item.IsGlobal():
# Global items means sub-items are also added...
for subitem in item.subItems.values():
self.globalNameSpaceModule.__dict__[
subitem.name
] = subitem.attributeObject
# Also add all methods
for name, entry in item.dispatchContainer._olerepr_.mapFuncs.items():
if not entry.hidden:
self.globalNameSpaceModule.__dict__[name] = getattr(
item.dispatchContainer, name
)
def DoExecutePendingScripts(self):
try:
globs = self.globalNameSpaceModule.__dict__
for codeBlock in self.codeBlocks:
if not codeBlock.beenExecuted:
if self.CompileInScriptedSection(codeBlock, "exec"):
self.ExecInScriptedSection(codeBlock, globs)
finally:
pass
def DoRun(self):
pass
def Close(self):
self.ResetNamespace()
self.globalNameSpaceModule = None
self.codeBlocks = []
self.scriptDispatch = None
framework.COMScript.Close(self)
def GetScriptDispatch(self, name):
# trace("GetScriptDispatch with", name)
# if name is not None: return None
if self.scriptDispatch is None:
self.scriptDispatch = scriptdispatch.MakeScriptDispatch(
self, self.globalNameSpaceModule
)
return self.scriptDispatch
def MakeEventMethodName(self, subItemName, eventName):
return (
subItemName[0].upper()
+ subItemName[1:]
+ "_"
+ eventName[0].upper()
+ eventName[1:]
)
def DoAddScriptlet(
self,
defaultName,
code,
itemName,
subItemName,
eventName,
delimiter,
sourceContextCookie,
startLineNumber,
):
# Just store the code away - compile when called. (JIT :-)
item = self.GetNamedItem(itemName)
if (
itemName == subItemName
): # Explicit handlers - eg <SCRIPT LANGUAGE="Python" for="TestForm" Event="onSubmit">
subItem = item
else:
subItem = item.GetCreateSubItem(item, subItemName, None, None)
funcName = self.MakeEventMethodName(subItemName, eventName)
codeBlock = AXScriptCodeBlock(
"Script Event %s" % funcName, code, sourceContextCookie, startLineNumber, 0
)
self._AddScriptCodeBlock(codeBlock)
subItem.scriptlets[funcName] = codeBlock
def DoProcessScriptItemEvent(self, item, event, lcid, wFlags, args):
# trace("ScriptItemEvent", self, item, event, event.name, lcid, wFlags, args)
funcName = self.MakeEventMethodName(item.name, event.name)
codeBlock = function = None
try:
function = item.scriptlets[funcName]
if type(function) == type(self): # ie, is a CodeBlock instance
codeBlock = function
function = None
except KeyError:
pass
if codeBlock is not None:
realCode = "def %s():\n" % funcName
for line in framework.RemoveCR(codeBlock.codeText).split("\n"):
realCode = realCode + "\t" + line + "\n"
realCode = realCode + "\n"
if not self.CompileInScriptedSection(codeBlock, "exec", realCode):
return
dict = {}
self.ExecInScriptedSection(
codeBlock, self.globalNameSpaceModule.__dict__, dict
)
function = dict[funcName]
# cache back in scriptlets as a function.
item.scriptlets[funcName] = function
if function is None:
# still no function - see if in the global namespace.
try:
function = self.globalNameSpaceModule.__dict__[funcName]
except KeyError:
# Not there _exactly_ - do case ins search.
funcNameLook = funcName.lower()
for attr in self.globalNameSpaceModule.__dict__.keys():
if funcNameLook == attr.lower():
function = self.globalNameSpaceModule.__dict__[attr]
# cache back in scriptlets, to avoid this overhead next time
item.scriptlets[funcName] = function
if function is None:
raise Exception(scode=winerror.DISP_E_MEMBERNOTFOUND)
return self.ApplyInScriptedSection(codeBlock, function, args)
def DoParseScriptText(
self, code, sourceContextCookie, startLineNumber, bWantResult, flags
):
code = framework.RemoveCR(code) + "\n"
if flags & SCRIPTTEXT_ISEXPRESSION:
name = "Script Expression"
exec_type = "eval"
else:
name = "Script Block"
exec_type = "exec"
num = self._GetNextCodeBlockNumber()
if num == 1:
num = ""
name = "%s %s" % (name, num)
codeBlock = AXScriptCodeBlock(
name, code, sourceContextCookie, startLineNumber, flags
)
self._AddScriptCodeBlock(codeBlock)
globs = self.globalNameSpaceModule.__dict__
if bWantResult: # always immediate.
if self.CompileInScriptedSection(codeBlock, exec_type):
if flags & SCRIPTTEXT_ISEXPRESSION:
return self.EvalInScriptedSection(codeBlock, globs)
else:
return self.ExecInScriptedSection(codeBlock, globs)
# else compile failed, but user chose to keep running...
else:
if flags & SCRIPTTEXT_FORCEEXECUTION:
if self.CompileInScriptedSection(codeBlock, exec_type):
self.ExecInScriptedSection(codeBlock, globs)
else:
self.codeBlocks.append(codeBlock)
def GetNamedItemClass(self):
return ScriptItem
def ResetNamespace(self):
if self.globalNameSpaceModule is not None:
try:
self.globalNameSpaceModule.ax._Reset_()
except AttributeError:
pass # ???
globalNameSpaceModule = None
def DllRegisterServer():
klass = PyScript
win32com.server.register._set_subkeys(
klass._reg_progid_ + "\\OLEScript", {}
) # Just a CreateKey
# Basic Registration for wsh.
win32com.server.register._set_string(".pys", "pysFile")
win32com.server.register._set_string("pysFile\\ScriptEngine", klass._reg_progid_)
guid_wsh_shellex = "{60254CA5-953B-11CF-8C96-00AA00B8708C}"
win32com.server.register._set_string(
"pysFile\\ShellEx\\DropHandler", guid_wsh_shellex
)
win32com.server.register._set_string(
"pysFile\\ShellEx\\PropertySheetHandlers\\WSHProps", guid_wsh_shellex
)
def Register(klass=PyScript):
import sys
ret = win32com.server.register.UseCommandLine(
klass, finalize_register=DllRegisterServer
)
return ret
if __name__ == "__main__":
Register()
| 34.469799 | 103 | 0.617796 |
d7d01df82a3ddcae975b4282dd7b1800f7bf5a09 | 2,231 | py | Python | setup.py | suzil/pycodestyle | a238201edd2883fcc0400df4a11cbb6fd5f6e5d7 | [
"MIT"
] | 1 | 2021-05-08T07:32:20.000Z | 2021-05-08T07:32:20.000Z | vimfiles/bundle/vim-python/submodules/pycodestyle/setup.py | OrangeGzY/vimrc | ddcaedce2effbbd1014eddbceebeb8c621cd9f95 | [
"MIT"
] | null | null | null | vimfiles/bundle/vim-python/submodules/pycodestyle/setup.py | OrangeGzY/vimrc | ddcaedce2effbbd1014eddbceebeb8c621cd9f95 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import with_statement
from setuptools import setup
def get_version():
with open('pycodestyle.py') as f:
for line in f:
if line.startswith('__version__'):
return eval(line.split('=')[-1])
def get_long_description():
descr = []
for fname in 'README.rst', 'CHANGES.txt':
with open(fname) as f:
descr.append(f.read())
return '\n\n'.join(descr)
setup(
name='pycodestyle',
version=get_version(),
description="Python style guide checker",
long_description=get_long_description(),
keywords='pycodestyle, pep8, PEP 8, PEP-8, PEP8',
author='Johann C. Rocholl',
author_email='johann@rocholl.net',
maintainer='Ian Lee',
maintainer_email='IanLee1521@gmail.com',
url='https://pycodestyle.readthedocs.io/',
license='Expat license',
py_modules=['pycodestyle'],
namespace_packages=[],
include_package_data=True,
zip_safe=False,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=[
# Broken with Python 3: https://github.com/pypa/pip/issues/650
# 'setuptools',
],
entry_points={
'console_scripts': [
'pycodestyle = pycodestyle:_main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='testsuite.test_all.suite',
)
| 32.808824 | 71 | 0.601076 |
b86b8dba2acecc88ac204e659fcc0ed45c4f7143 | 2,771 | py | Python | vergoldemich/strategies/adx_rsi.py | FMeirinhos/VergoldeMich | 69fd9f629bccfd2de8b74c9939755ece8135417a | [
"Unlicense"
] | 1 | 2020-02-13T16:53:36.000Z | 2020-02-13T16:53:36.000Z | vergoldemich/strategies/adx_rsi.py | FMeirinhos/VergoldeMich | 69fd9f629bccfd2de8b74c9939755ece8135417a | [
"Unlicense"
] | null | null | null | vergoldemich/strategies/adx_rsi.py | FMeirinhos/VergoldeMich | 69fd9f629bccfd2de8b74c9939755ece8135417a | [
"Unlicense"
] | null | null | null | from .signal import *
from .strategy import *
import talib
import numpy as np
class ADX_RSI(Strategy):
"""
Basic strategy based on RSI, BBs and ADX.
If RSI is satisfied, a timewindow of of trade is opened.
If the cross is satisfied within the timeframe, the trading is
"""
params = dict(
RSI_timeperiod=14,
RSI_oversold=33,
RSI_overbought=70,
ADX_timeperiod=3,
threshold=60,
candlesize='5T',
trading_window=7,
long_trigger=3,
short_trigger=2,
)
def __init__(self, **kwargs):
super(ADX_RSI, self).__init__(**kwargs)
def check(self):
pass
def eval(self, market, context, data):
try:
prices = data.history(
market,
fields=['low', 'high', 'close', 'price'],
bar_count=50,
frequency=self.p.candlesize
)
except Exception as e:
self.logger.warn("Historical data not available {}".format(e))
return
return self.signal(prices)
def signal(self, prices):
rsi = talib.RSI(prices['price'].values,
timeperiod=self.p.RSI_timeperiod)
d_p = talib.PLUS_DI(prices['high'].values, prices['low'].values, prices[
'close'].values, timeperiod=self.p.ADX_timeperiod)
d_m = talib.MINUS_DI(prices['high'].values, prices['low'].values, prices[
'close'].values, timeperiod=self.p.ADX_timeperiod)
crosscall = d_p[-self.p.ADX_timeperiod:] > d_m[-self.p.ADX_timeperiod:]
# if rsi[-1] <= self.p.RSI_oversold:
# if crosscall[-1]:
# return SIGNAL_LONG, 'Crosscall and RSI'.format(rsi[-1])
# elif rsi[-1] >= self.p.RSI_overbought:
# if not crosscall[-1]:
# return SIGNAL_SHORT, 'Crossput and RSI'.format(rsi[-1])
# if crosscall[-1]:
# if np.any(rsi[-self.p.trading_window::] <= self.p.RSI_oversold):
# return SIGNAL_LONG, 'Crosscall and RSI'.format(rsi[-1])
# else:
# if np.any(rsi[-self.p.trading_window::] >= self.p.RSI_overbought):
# return SIGNAL_SHORT, 'Crossput and RSI'.format(rsi[-1])
if np.sum(crosscall) >= self.p.long_trigger:
if np.any(rsi[-self.p.trading_window:] <= self.p.RSI_oversold):
return SIGNAL_LONG, 'Crosscall and RSI {}'.format(rsi[-1])
# elif not crosscall[-1]:
if self.p.trading_window - np.sum(crosscall) >= self.p.short_trigger:
if np.any(rsi[-self.p.trading_window:] >= self.p.RSI_overbought):
return SIGNAL_SHORT, 'Crossput and RSI {}'.format(rsi[-1])
return SIGNAL_NONE, ''
| 31.850575 | 81 | 0.570913 |
b60a415b4419b55f4fd740aa53a8e4f44aaf6455 | 32,042 | py | Python | sklearn/calibration.py | myazann/scikit-learn | d30a419fd5589495e5d9fafb6968bb305bc0ae18 | [
"BSD-3-Clause"
] | 1 | 2021-06-20T01:00:38.000Z | 2021-06-20T01:00:38.000Z | sklearn/calibration.py | myazann/scikit-learn | d30a419fd5589495e5d9fafb6968bb305bc0ae18 | [
"BSD-3-Clause"
] | null | null | null | sklearn/calibration.py | myazann/scikit-learn | d30a419fd5589495e5d9fafb6968bb305bc0ae18 | [
"BSD-3-Clause"
] | 1 | 2021-03-17T03:23:50.000Z | 2021-03-17T03:23:50.000Z | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
import warnings
from inspect import signature
from contextlib import suppress
from functools import partial
from math import log
import numpy as np
from joblib import Parallel
from scipy.special import expit
from scipy.special import xlogy
from scipy.optimize import fmin_bfgs
from .base import (BaseEstimator, ClassifierMixin, RegressorMixin, clone,
MetaEstimatorMixin)
from .preprocessing import label_binarize, LabelEncoder
from .utils import (
column_or_1d,
deprecated,
indexable,
)
from .utils.multiclass import check_classification_targets
from .utils.fixes import delayed
from .utils.validation import check_is_fitted, check_consistent_length
from .utils.validation import _check_sample_weight, _num_samples
from .pipeline import Pipeline
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv, cross_val_predict
from .utils.validation import _deprecate_positional_args
class CalibratedClassifierCV(ClassifierMixin,
MetaEstimatorMixin,
BaseEstimator):
"""Probability calibration with isotonic regression or logistic regression.
This class uses cross-validation to both estimate the parameters of a
classifier and subsequently calibrate a classifier. With default
`ensemble=True`, for each cv split it
fits a copy of the base estimator to the training subset, and calibrates it
using the testing subset. For prediction, predicted probabilities are
averaged across these individual calibrated classifiers. When
`ensemble=False`, cross-validation is used to obtain unbiased predictions,
via :func:`~sklearn.model_selection.cross_val_predict`, which are then
used for calibration. For prediction, the base estimator, trained using all
the data, is used. This is the method implemented when `probabilities=True`
for :mod:`sklearn.svm` estimators.
Already fitted classifiers can be calibrated via the parameter
`cv="prefit"`. In this case, no cross-validation is used and all provided
data is used for calibration. The user has to take care manually that data
for model fitting and calibration are disjoint.
The calibration is based on the :term:`decision_function` method of the
`base_estimator` if it exists, else on :term:`predict_proba`.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : estimator instance, default=None
The classifier whose output need to be calibrated to provide more
accurate `predict_proba` outputs. The default classifier is
a :class:`~sklearn.svm.LinearSVC`.
method : {'sigmoid', 'isotonic'}, default='sigmoid'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method (i.e. a logistic regression model) or
'isotonic' which is a non-parametric approach. It is not advised to
use isotonic calibration with too few calibration samples
``(<<1000)`` since it tends to overfit.
cv : int, cross-validation generator, iterable or "prefit", \
default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
is used.
Refer to the :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that `base_estimator` has been
fitted already and all data is used for calibration.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
Base estimator clones are fitted in parallel across cross-validation
iterations. Therefore parallelism happens only when `cv != "prefit"`.
See :term:`Glossary <n_jobs>` for more details.
.. versionadded:: 0.24
ensemble : bool, default=True
Determines how the calibrator is fitted when `cv` is not `'prefit'`.
Ignored if `cv='prefit'`.
If `True`, the `base_estimator` is fitted using training data and
calibrated using testing data, for each `cv` fold. The final estimator
is an ensemble of `n_cv` fitted classifer and calibrator pairs, where
`n_cv` is the number of cross-validation folds. The output is the
average predicted probabilities of all pairs.
If `False`, `cv` is used to compute unbiased predictions, via
:func:`~sklearn.model_selection.cross_val_predict`, which are then
used for calibration. At prediction time, the classifier used is the
`base_estimator` trained on all the data.
Note that this method is also internally implemented in
:mod:`sklearn.svm` estimators with the `probabilities=True` parameter.
.. versionadded:: 0.24
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels.
calibrated_classifiers_ : list (len() equal to cv or 1 if `cv="prefit"` \
or `ensemble=False`)
The list of classifier and calibrator pairs.
- When `cv="prefit"`, the fitted `base_estimator` and fitted
calibrator.
- When `cv` is not "prefit" and `ensemble=True`, `n_cv` fitted
`base_estimator` and calibrator pairs. `n_cv` is the number of
cross-validation folds.
- When `cv` is not "prefit" and `ensemble=False`, the `base_estimator`,
fitted on all the data, and fitted calibrator.
.. versionchanged:: 0.24
Single calibrated classifier case when `ensemble=False`.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.calibration import CalibratedClassifierCV
>>> X, y = make_classification(n_samples=100, n_features=2,
... n_redundant=0, random_state=42)
>>> base_clf = GaussianNB()
>>> calibrated_clf = CalibratedClassifierCV(base_estimator=base_clf, cv=3)
>>> calibrated_clf.fit(X, y)
CalibratedClassifierCV(base_estimator=GaussianNB(), cv=3)
>>> len(calibrated_clf.calibrated_classifiers_)
3
>>> calibrated_clf.predict_proba(X)[:5, :]
array([[0.110..., 0.889...],
[0.072..., 0.927...],
[0.928..., 0.071...],
[0.928..., 0.071...],
[0.071..., 0.928...]])
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, n_features=2,
... n_redundant=0, random_state=42)
>>> X_train, X_calib, y_train, y_calib = train_test_split(
... X, y, random_state=42
... )
>>> base_clf = GaussianNB()
>>> base_clf.fit(X_train, y_train)
GaussianNB()
>>> calibrated_clf = CalibratedClassifierCV(
... base_estimator=base_clf,
... cv="prefit"
... )
>>> calibrated_clf.fit(X_calib, y_calib)
CalibratedClassifierCV(base_estimator=GaussianNB(), cv='prefit')
>>> len(calibrated_clf.calibrated_classifiers_)
1
>>> calibrated_clf.predict_proba([[-0.5, 0.5]])
array([[0.936..., 0.063...]])
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
@_deprecate_positional_args
def __init__(self, base_estimator=None, *, method='sigmoid',
cv=None, n_jobs=None, ensemble=True):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
self.n_jobs = n_jobs
self.ensemble = ensemble
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
check_classification_targets(y)
X, y = indexable(X, y)
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
self.calibrated_classifiers_ = []
if self.cv == "prefit":
# `classes_` and `n_features_in_` should be consistent with that
# of base_estimator
if isinstance(self.base_estimator, Pipeline):
check_is_fitted(self.base_estimator[-1])
else:
check_is_fitted(self.base_estimator)
with suppress(AttributeError):
self.n_features_in_ = base_estimator.n_features_in_
self.classes_ = self.base_estimator.classes_
pred_method = _get_prediction_method(base_estimator)
n_classes = len(self.classes_)
predictions = _compute_predictions(pred_method, X, n_classes)
calibrated_classifier = _fit_calibrator(
base_estimator, predictions, y, self.classes_, self.method,
sample_weight
)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
X, y = self._validate_data(
X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False, allow_nd=True
)
# Set `classes_` using all `y`
label_encoder_ = LabelEncoder().fit(y)
self.classes_ = label_encoder_.classes_
n_classes = len(self.classes_)
# sample_weight checks
fit_parameters = signature(base_estimator.fit).parameters
supports_sw = "sample_weight" in fit_parameters
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if not supports_sw:
estimator_name = type(base_estimator).__name__
warnings.warn(f"Since {estimator_name} does not support "
"sample_weights, sample weights will only be"
" used for the calibration itself.")
# Check that each cross-validation fold can have at least one
# example per class
if isinstance(self.cv, int):
n_folds = self.cv
elif hasattr(self.cv, "n_splits"):
n_folds = self.cv.n_splits
else:
n_folds = None
if n_folds and np.any([np.sum(y == class_) < n_folds
for class_ in self.classes_]):
raise ValueError(f"Requesting {n_folds}-fold "
"cross-validation but provided less than "
f"{n_folds} examples for at least one class.")
cv = check_cv(self.cv, y, classifier=True)
if self.ensemble:
parallel = Parallel(n_jobs=self.n_jobs)
self.calibrated_classifiers_ = parallel(
delayed(_fit_classifier_calibrator_pair)(
clone(base_estimator), X, y, train=train, test=test,
method=self.method, classes=self.classes_,
supports_sw=supports_sw, sample_weight=sample_weight)
for train, test in cv.split(X, y)
)
else:
this_estimator = clone(base_estimator)
method_name = _get_prediction_method(this_estimator).__name__
pred_method = partial(
cross_val_predict, estimator=this_estimator, X=X, y=y,
cv=cv, method=method_name, n_jobs=self.n_jobs
)
predictions = _compute_predictions(pred_method, X, n_classes)
if sample_weight is not None and supports_sw:
this_estimator.fit(X, y, sample_weight)
else:
this_estimator.fit(X, y)
calibrated_classifier = _fit_calibrator(
this_estimator, predictions, y, self.classes_, self.method,
sample_weight
)
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Calibrated probabilities of classification.
This function returns calibrated probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : The samples, as accepted by base_estimator.predict_proba
Returns
-------
C : ndarray of shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((_num_samples(X), len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. The predicted class is the
class that has the highest probability, and can thus be different
from the prediction of the uncalibrated classifier.
Parameters
----------
X : The samples, as accepted by base_estimator.predict
Returns
-------
C : ndarray of shape (n_samples,)
The predicted class.
"""
check_is_fitted(self)
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
def _fit_classifier_calibrator_pair(estimator, X, y, train, test, supports_sw,
method, classes, sample_weight=None):
"""Fit a classifier/calibration pair on a given train/test split.
Fit the classifier on the train set, compute its predictions on the test
set and use the predictions as input to fit the calibrator along with the
test labels.
Parameters
----------
estimator : estimator instance
Cloned base estimator.
X : array-like, shape (n_samples, n_features)
Sample data.
y : array-like, shape (n_samples,)
Targets.
train : ndarray, shape (n_train_indicies,)
Indices of the training subset.
test : ndarray, shape (n_test_indicies,)
Indices of the testing subset.
supports_sw : bool
Whether or not the `estimator` supports sample weights.
method : {'sigmoid', 'isotonic'}
Method to use for calibration.
classes : ndarray, shape (n_classes,)
The target classes.
sample_weight : array-like, default=None
Sample weights for `X`.
Returns
-------
calibrated_classifier : _CalibratedClassifier instance
"""
if sample_weight is not None and supports_sw:
estimator.fit(X[train], y[train],
sample_weight=sample_weight[train])
else:
estimator.fit(X[train], y[train])
n_classes = len(classes)
pred_method = _get_prediction_method(estimator)
predictions = _compute_predictions(pred_method, X[test], n_classes)
sw = None if sample_weight is None else sample_weight[test]
calibrated_classifier = _fit_calibrator(
estimator, predictions, y[test], classes, method, sample_weight=sw
)
return calibrated_classifier
def _get_prediction_method(clf):
"""Return prediction method.
`decision_function` method of `clf` returned, if it
exists, otherwise `predict_proba` method returned.
Parameters
----------
clf : Estimator instance
Fitted classifier to obtain the prediction method from.
Returns
-------
prediction_method : callable
The prediction method.
"""
if hasattr(clf, 'decision_function'):
method = getattr(clf, 'decision_function')
elif hasattr(clf, 'predict_proba'):
method = getattr(clf, 'predict_proba')
else:
raise RuntimeError("'base_estimator' has no 'decision_function' or "
"'predict_proba' method.")
return method
def _compute_predictions(pred_method, X, n_classes):
"""Return predictions for `X` and reshape binary outputs to shape
(n_samples, 1).
Parameters
----------
pred_method : callable
Prediction method.
X : array-like or None
Data used to obtain predictions.
n_classes : int
Number of classes present.
Returns
-------
predictions : array-like, shape (X.shape[0], len(clf.classes_))
The predictions. Note if there are 2 classes, array is of shape
(X.shape[0], 1).
"""
predictions = pred_method(X=X)
if hasattr(pred_method, '__name__'):
method_name = pred_method.__name__
else:
method_name = signature(pred_method).parameters['method'].default
if method_name == 'decision_function':
if predictions.ndim == 1:
predictions = predictions[:, np.newaxis]
elif method_name == 'predict_proba':
if n_classes == 2:
predictions = predictions[:, 1:]
else: # pragma: no cover
# this branch should be unreachable.
raise ValueError(f"Invalid prediction method: {method_name}")
return predictions
def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):
"""Fit calibrator(s) and return a `_CalibratedClassifier`
instance.
`n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
However, if `n_classes` equals 2, one calibrator is fitted.
Parameters
----------
clf : estimator instance
Fitted classifier.
predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \
when binary.
Raw predictions returned by the un-calibrated base classifier.
y : array-like, shape (n_samples,)
The targets.
classes : ndarray, shape (n_classes,)
All the prediction classes.
method : {'sigmoid', 'isotonic'}
The method to use for calibration.
sample_weight : ndarray, shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
pipeline : _CalibratedClassifier instance
"""
Y = label_binarize(y, classes=classes)
label_encoder = LabelEncoder().fit(classes)
pos_class_indices = label_encoder.transform(clf.classes_)
calibrators = []
for class_idx, this_pred in zip(pos_class_indices, predictions.T):
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError("'method' should be one of: 'sigmoid' or "
f"'isotonic'. Got {method}.")
calibrator.fit(this_pred, Y[:, class_idx], sample_weight)
calibrators.append(calibrator)
pipeline = _CalibratedClassifier(
clf, calibrators, method=method, classes=classes
)
return pipeline
class _CalibratedClassifier:
"""Pipeline-like chaining a fitted classifier and its fitted calibrators.
Parameters
----------
base_estimator : estimator instance
Fitted classifier.
calibrators : list of fitted estimator instances
List of fitted calibrators (either 'IsotonicRegression' or
'_SigmoidCalibration'). The number of calibrators equals the number of
classes. However, if there are 2 classes, the list contains only one
fitted calibrator.
classes : array-like of shape (n_classes,)
All the prediction classes.
method : {'sigmoid', 'isotonic'}, default='sigmoid'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
Attributes
----------
calibrators_ : list of fitted estimator instances
Same as `calibrators`. Exposed for backward-compatibility. Use
`calibrators` instead.
.. deprecated:: 0.24
`calibrators_` is deprecated from 0.24 and will be removed in
1.1 (renaming of 0.26). Use `calibrators` instead.
"""
def __init__(self, base_estimator, calibrators, *, classes,
method='sigmoid'):
self.base_estimator = base_estimator
self.calibrators = calibrators
self.classes = classes
self.method = method
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"calibrators_ is deprecated in 0.24 and will be removed in 1.1"
"(renaming of 0.26). Use calibrators instead."
)
@property
def calibrators_(self):
return self.calibrators
def predict_proba(self, X):
"""Calculate calibrated probabilities.
Calculates classification calibrated probabilities
for each class, in a one-vs-all manner, for `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The sample data.
Returns
-------
proba : array, shape (n_samples, n_classes)
The predicted probabilities. Can be exact zeros.
"""
n_classes = len(self.classes)
pred_method = _get_prediction_method(self.base_estimator)
predictions = _compute_predictions(pred_method, X, n_classes)
label_encoder = LabelEncoder().fit(self.classes)
pos_class_indices = label_encoder.transform(
self.base_estimator.classes_
)
proba = np.zeros((_num_samples(X), n_classes))
for class_idx, this_pred, calibrator in \
zip(pos_class_indices, predictions.T, self.calibrators):
if n_classes == 2:
# When binary, `predictions` consists only of predictions for
# clf.classes_[1] but `pos_class_indices` = 0
class_idx += 1
proba[:, class_idx] = calibrator.predict(this_pred)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
denominator = np.sum(proba, axis=1)[:, np.newaxis]
# In the edge case where for each class calibrator returns a null
# probability for a given sample, use the uniform distribution
# instead.
uniform_proba = np.full_like(proba, 1 / n_classes)
proba = np.divide(proba, denominator, out=uniform_proba,
where=denominator != 0)
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(predictions, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
predictions = column_or_1d(predictions)
y = column_or_1d(y)
F = predictions # F follows Platt's notations
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
P = expit(-(AB[0] * F + AB[1]))
loss = -(xlogy(T, P) + xlogy(T1, 1. - P))
if sample_weight is not None:
return (sample_weight * loss).sum()
else:
return loss.sum()
def grad(AB):
# gradient of the objective function
P = expit(-(AB[0] * F + AB[1]))
TEP_minus_T1P = T - P
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(RegressorMixin, BaseEstimator):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,)
Data to predict from.
Returns
-------
T_ : ndarray of shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return expit(-(self.a_ * T + self.b_))
@_deprecate_positional_args
def calibration_curve(y_true, y_prob, *, normalize=False, n_bins=5,
strategy='uniform'):
"""Compute true and predicted probabilities for a calibration curve.
The method assumes the inputs come from a binary classifier, and
discretize the [0, 1] interval into bins.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True targets.
y_prob : array-like of shape (n_samples,)
Probabilities of the positive class.
normalize : bool, default=False
Whether y_prob needs to be normalized into the [0, 1] interval, i.e.
is not a proper probability. If True, the smallest value in y_prob
is linearly mapped onto 0 and the largest one onto 1.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval. A bigger number
requires more data. Bins with no samples (i.e. without
corresponding values in `y_prob`) will not be returned, thus the
returned arrays may have less than `n_bins` values.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
uniform
The bins have identical widths.
quantile
The bins have the same number of samples and depend on `y_prob`.
Returns
-------
prob_true : ndarray of shape (n_bins,) or smaller
The proportion of samples whose class is the positive class, in each
bin (fraction of positives).
prob_pred : ndarray of shape (n_bins,) or smaller
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
Examples
--------
>>> import numpy as np
>>> from sklearn.calibration import calibration_curve
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
>>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
>>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
>>> prob_true
array([0. , 0.5, 1. ])
>>> prob_pred
array([0.2 , 0.525, 0.85 ])
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
check_consistent_length(y_true, y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
y_true = label_binarize(y_true, classes=labels)[:, 0]
if strategy == 'quantile': # Determine bin edges by distribution of data
quantiles = np.linspace(0, 1, n_bins + 1)
bins = np.percentile(y_prob, quantiles * 100)
bins[-1] = bins[-1] + 1e-8
elif strategy == 'uniform':
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
else:
raise ValueError("Invalid entry to 'strategy' input. Strategy "
"must be either 'quantile' or 'uniform'.")
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = bin_true[nonzero] / bin_total[nonzero]
prob_pred = bin_sums[nonzero] / bin_total[nonzero]
return prob_true, prob_pred
| 35.961841 | 79 | 0.62468 |
63c8b1286a4c1b87deea9092cfd57d7e410da590 | 1,282 | py | Python | setup.py | snoop2head/kss | e607c572ab5678b3e3f40af95d4aa7c61e4dacb6 | [
"BSD-3-Clause"
] | null | null | null | setup.py | snoop2head/kss | e607c572ab5678b3e3f40af95d4aa7c61e4dacb6 | [
"BSD-3-Clause"
] | null | null | null | setup.py | snoop2head/kss | e607c572ab5678b3e3f40af95d4aa7c61e4dacb6 | [
"BSD-3-Clause"
] | null | null | null | import codecs
from setuptools import setup, find_packages
required = [
"emoji",
]
def read_file(filename, cb):
with codecs.open(filename, "r", "utf8") as f:
return cb(f)
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="kss",
version="3.3.1.1",
author="Hyunwoong Ko",
author_email="kevin.ko@tunib.ai",
url="https://github.com/hyunwoongko/kss",
license='BSD 3-Clause "New" or "Revised" License',
description="A Toolkit for Korean sentence segmentation",
long_description_content_type="text/markdown",
platforms=["any"],
install_requires=required,
long_description=long_description,
packages=find_packages(exclude=["tests"]),
python_requires=">=3",
zip_safe=False,
package_data={"": ["kss/pynori/resources/*"]},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 28.488889 | 61 | 0.630265 |
386f74e1506c0da2ecdca073106e93bdc9b063cb | 1,497 | py | Python | instagram/migrations/0006_auto_20190522_0937.py | badruu/insta | d09d82b123beb3b682f6047ac4ba4c254acf2cdd | [
"MIT"
] | 2 | 2019-12-23T03:00:47.000Z | 2022-02-28T17:25:34.000Z | instagram/migrations/0006_auto_20190522_0937.py | badruu/insta | d09d82b123beb3b682f6047ac4ba4c254acf2cdd | [
"MIT"
] | 5 | 2020-06-05T20:51:05.000Z | 2022-01-13T01:14:41.000Z | instagram/migrations/0006_auto_20190522_0937.py | badruu/insta | d09d82b123beb3b682f6047ac4ba4c254acf2cdd | [
"MIT"
] | 1 | 2022-02-28T17:19:50.000Z | 2022-02-28T17:19:50.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-05-22 06:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instagram', '0005_auto_20190521_1559'),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.AlterUniqueTogether(
name='image_vote',
unique_together=set([]),
),
migrations.RemoveField(
model_name='image_vote',
name='voted',
),
migrations.RemoveField(
model_name='image_vote',
name='voter',
),
migrations.RemoveField(
model_name='image',
name='down_vote',
),
migrations.RemoveField(
model_name='image',
name='up_vote',
),
migrations.DeleteModel(
name='Image_Vote',
),
migrations.AddField(
model_name='choice',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Image'),
),
]
| 28.245283 | 114 | 0.550434 |
5d02bff33614c34530898671cde9f8d3e43b26e9 | 11,715 | py | Python | eva_submission/xlsx/xlsx_parser.py | sundarvenkata-EBI/eva-submission-1 | abc06a1a102ea1ad50e6308ba898d3dd726be70d | [
"Apache-2.0"
] | null | null | null | eva_submission/xlsx/xlsx_parser.py | sundarvenkata-EBI/eva-submission-1 | abc06a1a102ea1ad50e6308ba898d3dd726be70d | [
"Apache-2.0"
] | 14 | 2021-01-21T17:35:19.000Z | 2022-03-02T16:22:08.000Z | eva_submission/xlsx/xlsx_parser.py | sundarvenkata-EBI/eva-submission-1 | abc06a1a102ea1ad50e6308ba898d3dd726be70d | [
"Apache-2.0"
] | 8 | 2021-01-03T10:57:32.000Z | 2021-12-13T10:21:06.000Z | # Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module was borrow and modified from
https://github.com/EBIvariation/amp-t2d-submissions/blob/master/xls2xml/xls2xml/XLSReader.py
This module reads an Excel file and allows the user to get all the valid worksheet names,
get the 1st line in a worksheet and iterate over the rest of the worksheet row by row
(next_row). The returned row is a hash which contains only the keys that are defined in
a configuration file.
This module depends on openpyxl and pyyaml.
"""
import yaml
from ebi_eva_common_pyutils.logger import AppLogger
from openpyxl import load_workbook
WORKSHEETS_KEY_NAME = 'worksheets'
REQUIRED_HEADERS_KEY_NAME = 'required'
OPTIONAL_HEADERS_KEY_NAME = 'optional'
HEADERS_KEY_ROW = 'header_row'
CAST_KEY_NAME = 'cast'
class XlsxBaseParser(AppLogger):
"""
Base parser for Excel file for the fields from worksheets defined in a configuration file.
It implements the base functioanlity allowing to open and validate the spreadsheet
"""
def __init__(self, xls_filename, conf_filename, read_only=True):
"""
Constructor
:param xls_filename: Excel file path
:type xls_filename: basestring
:param conf_filename: configuration file path
:type conf_filename: basestring
"""
with open(conf_filename, 'r') as conf_file:
self.xls_conf = yaml.full_load(conf_file)
try:
self.workbook = load_workbook(xls_filename, read_only=read_only)
except Exception as e:
self.error('Error loading %s', xls_filename)
raise e
self.worksheets = None
self._active_worksheet = None
self.row_offset = {}
self.headers = {}
self.valid = None
@property
def active_worksheet(self):
return self._active_worksheet
@active_worksheet.setter
def active_worksheet(self, worksheet):
if self.worksheets is None:
self.valid_worksheets()
if worksheet not in self.worksheets:
raise ValueError('Worksheet ' + worksheet + ' is not valid!')
self._active_worksheet = worksheet
def valid_worksheets(self):
"""
Get the list of the names of worksheets which have all the configured required headers
:return: list of valid worksheet names in the Excel file
:rtype: list
"""
if self.worksheets is not None:
return self.worksheets
self.worksheets = []
sheet_titles = self.workbook.sheetnames
for title in self.xls_conf[WORKSHEETS_KEY_NAME]:
# Check worksheet exists
if title not in sheet_titles:
continue
# Check number of rows
worksheet = self.workbook[title]
header_row = self.xls_conf[title].get(HEADERS_KEY_ROW, 1)
if worksheet.max_row < header_row + 1:
continue
# Check required headers are present
self.headers[title] = [cell.value if cell.value is None else cell.value.strip()
for cell in worksheet[header_row]]
required_headers = self.xls_conf[title].get(REQUIRED_HEADERS_KEY_NAME, [])
if set(required_headers) <= set(self.headers[title]): # issubset
self.worksheets.append(title)
else:
self.warning('Worksheet '+title+' does not have all the required headers!')
self.valid = False
return self.worksheets
def get_valid_conf_keys(self):
"""
:return: the list of valid worksheet names
:rtype: list
"""
return self.valid_worksheets()
def is_valid(self):
"""
Check that is all the worksheets contain required headers
:return: True if all the worksheets contain required headers. False otherwise
:rtype: bool
"""
if self.valid is None:
self.valid = True
self.valid_worksheets()
return self.valid
@staticmethod
def cast_value(value, type_name):
# Do not cast None values
if type_name and value is not None:
if type_name == 'string':
return str(value)
return value
class XlsxReader(XlsxBaseParser):
"""
Reader for Excel file for the fields from worksheets defined in a configuration file
"""
def __init__(self, xls_filename, conf_filename):
"""
Constructor
:param xls_filename: Excel file path
:type xls_filename: basestring
:param conf_filename: configuration file path
:type conf_filename: basestring
"""
super().__init__(xls_filename, conf_filename, read_only=True)
def __iter__(self):
return self
def base_row_offset(self, worksheet):
return self.xls_conf[worksheet].get(HEADERS_KEY_ROW, 1)
def next(self):
"""
Retrieve next data row
:return: A hash containing all the REQUIRED and OPTIONAL fields as keys
and the corresponding data as values
:rtype: dict
"""
worksheet = self.active_worksheet
if worksheet is None:
self.warning('No worksheet is specified!')
raise StopIteration
if worksheet not in self.row_offset:
self.row_offset[worksheet] = self.base_row_offset(worksheet)
self.row_offset[worksheet] += 1
required_headers = self.xls_conf[worksheet].get(REQUIRED_HEADERS_KEY_NAME, [])
optional_headers = self.xls_conf[worksheet].get(OPTIONAL_HEADERS_KEY_NAME, [])
for row in self.workbook[worksheet].iter_rows(min_row=self.row_offset[worksheet]):
num_cells = 0
for cell in row:
num_cells += 1
data = {}
has_notnull = False
for header in required_headers+optional_headers:
header_index = num_cells
if header in self.headers[worksheet]:
header_index = self.headers[worksheet].index(header)
if header_index >= num_cells:
data[header] = None
continue
cell = row[header_index]
if cell.value is not None:
has_notnull = True
data[header] = self.cast_value(cell.value, self.xls_conf[worksheet].get(CAST_KEY_NAME, {}).get(header))
if has_notnull:
data['row_num'] = self.row_offset[worksheet]
return data
# no data on this row, continue to next
self.row_offset[worksheet] += 1
raise StopIteration
def get_rows(self):
"""
Retrieve all the data rows.
:return: list of hash containing all the REQUIRED and OPTIONAL fields as keys
and the corresponding data as values
:rtype: list
"""
worksheet = self.active_worksheet
if worksheet is None:
self.warning('No worksheet is specified!')
return None
if worksheet not in self.row_offset:
self.row_offset[worksheet] = self.base_row_offset(worksheet)
self.row_offset[worksheet] += 1
required_headers = self.xls_conf[worksheet].get(REQUIRED_HEADERS_KEY_NAME, [])
optional_headers = self.xls_conf[worksheet].get(OPTIONAL_HEADERS_KEY_NAME, [])
rows = []
for row in self.workbook[worksheet].iter_rows(min_row=self.row_offset[worksheet]):
num_cells = 0
for cell in row:
num_cells += 1
data = {}
has_notnull = False
for header in required_headers+optional_headers:
header_index = num_cells
if header in self.headers[worksheet]:
header_index = self.headers[worksheet].index(header)
if header_index >= num_cells:
data[header] = None
continue
cell = row[header_index]
if cell.value is not None:
has_notnull = True
data[header] = self.cast_value(cell.value, self.xls_conf[worksheet].get(CAST_KEY_NAME, {}).get(header))
if has_notnull:
data['row_num'] = self.row_offset[worksheet]
rows.append(data)
# no data on this row, continue to next
self.row_offset[worksheet] += 1
return rows
class XlsxWriter(XlsxBaseParser):
"""
Writer for Excel file for the fields from worksheets defined in a configuration file
"""
def __init__(self, xls_filename, conf_filename):
"""
Constructor
:param xls_filename: Excel file path
:type xls_filename: basestring
:param conf_filename: configuration file path
:type conf_filename: basestring
"""
super().__init__(xls_filename, conf_filename, read_only=False)
def edit_row(self, row_data: dict, remove_when_missing_values=True):
worksheet = self.active_worksheet
if worksheet is None:
raise ValueError('No worksheet is specified!')
if 'row_num' not in row_data:
raise KeyError('No row specified in dict ' + str(row_data))
row_num = row_data['row_num']
required_headers = self.xls_conf[worksheet].get(REQUIRED_HEADERS_KEY_NAME, [])
optional_headers = self.xls_conf[worksheet].get(OPTIONAL_HEADERS_KEY_NAME, [])
for header in required_headers:
header_index = self.headers[worksheet].index(header)
if header not in row_data:
raise ValueError('Header {0} is required but is not provided in row {1}'.format(header, row_num))
self.workbook[worksheet].cell(column=header_index+1, row=row_num, value=row_data[header])
for header in optional_headers:
if header in self.headers[worksheet]:
header_index = self.headers[worksheet].index(header)
if header not in row_data and remove_when_missing_values:
# When data is missing remove the value from the cell
self.workbook[worksheet].cell(column=header_index+1, row=row_num, value='')
elif header in row_data:
self.workbook[worksheet].cell(column=header_index+1, row=row_num, value=row_data[header])
def set_rows(self, rows, empty_remaining_rows=False):
"""
Write a set of rows from the top of the spreadsheet.
"""
worksheet = self.active_worksheet
if worksheet is None:
raise ValueError('No worksheet is specified!')
first_row = self.xls_conf[worksheet].get(HEADERS_KEY_ROW, 1) + 1
for i, row in enumerate(rows):
row['row_num'] = first_row + i
self.edit_row(row)
if empty_remaining_rows:
self.workbook[worksheet].delete_rows(first_row + len(rows), amount=self.workbook[worksheet].max_row - first_row + len(rows))
def save(self, filename):
self.workbook.save(filename)
| 36.839623 | 136 | 0.627913 |
aa9d9d9b494e67c8cb073e7c651de6a9a03f20b7 | 2,007 | py | Python | keystoneclient/v2_0/ec2.py | dreamhost/python-keystoneclient | aa9761433065a260dc2b571deb10957347a76d4e | [
"Apache-1.1"
] | null | null | null | keystoneclient/v2_0/ec2.py | dreamhost/python-keystoneclient | aa9761433065a260dc2b571deb10957347a76d4e | [
"Apache-1.1"
] | null | null | null | keystoneclient/v2_0/ec2.py | dreamhost/python-keystoneclient | aa9761433065a260dc2b571deb10957347a76d4e | [
"Apache-1.1"
] | null | null | null | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class EC2(base.Resource):
def __repr__(self):
return "<EC2 %s>" % self._info
def delete(self):
return self.manager.delete(self)
class CredentialsManager(base.ManagerWithFind):
resource_class = EC2
def create(self, user_id, tenant_id):
"""
Create a new access/secret pair for the user/tenant pair
:rtype: object of type :class:`EC2`
"""
params = {'tenant_id': tenant_id}
return self._create('/users/%s/credentials/OS-EC2' % user_id,
params, "credential")
def list(self, user_id):
"""
Get a list of access/secret pairs for a user_id
:rtype: list of :class:`EC2`
"""
return self._list("/users/%s/credentials/OS-EC2" % user_id,
"credentials")
def get(self, user_id, access):
"""
Get the access/secret pair for a given access key
:rtype: object of type :class:`EC2`
"""
return self._get("/users/%s/credentials/OS-EC2/%s" %
(user_id, base.getid(access)), "credential")
def delete(self, user_id, access):
"""
Delete an access/secret pair for a user
"""
return self._delete("/users/%s/credentials/OS-EC2/%s" %
(user_id, base.getid(access)))
| 31.857143 | 78 | 0.613852 |
438b1169c321488abf8a47fca4cb10baee795b17 | 649 | py | Python | jacquard/storage/tests/test_dummy.py | peteowlett/jacquard | 772fd633e521501688e0933482cba45f48c23ef9 | [
"MIT"
] | null | null | null | jacquard/storage/tests/test_dummy.py | peteowlett/jacquard | 772fd633e521501688e0933482cba45f48c23ef9 | [
"MIT"
] | null | null | null | jacquard/storage/tests/test_dummy.py | peteowlett/jacquard | 772fd633e521501688e0933482cba45f48c23ef9 | [
"MIT"
] | null | null | null | import unittest
import pytest
from jacquard.storage.dummy import DummyStore
from jacquard.storage.testing_utils import StorageGauntlet
class DummyGauntletTest(StorageGauntlet, unittest.TestCase):
def open_storage(self):
return DummyStore('')
def test_transaction_raises_error_for_bad_commit(self):
store = self.open_storage()
transaction = store.transaction(read_only=True)
transaction_map = transaction.__enter__()
transaction_map['new_key'] = 'new_value'
with pytest.raises(RuntimeError):
transaction.__exit__(None, None, None)
assert 'new_key' not in store.data
| 27.041667 | 60 | 0.725732 |
43033bc1a58edf8e9acea7107eb1eeac4e33e6ed | 12,585 | py | Python | shark/shark/report/bom_rm_status/bom_rm_status.py | umaepoch/Shark | 2ebf715efba796f96c2d9807bbe930e354606492 | [
"MIT"
] | null | null | null | shark/shark/report/bom_rm_status/bom_rm_status.py | umaepoch/Shark | 2ebf715efba796f96c2d9807bbe930e354606492 | [
"MIT"
] | null | null | null | shark/shark/report/bom_rm_status/bom_rm_status.py | umaepoch/Shark | 2ebf715efba796f96c2d9807bbe930e354606492 | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Epoch and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from erpnext.stock.stock_balance import get_balance_qty_from_sle
import datetime
import time
import math
import json
import ast
def execute(filters=None):
global summ_data
global company
summ_data = []
bom_items = []
whse_items = []
whse_qty = 0
bom_qty = 0
reserved_whse_qty = 0
sum_qty = 0
delta1_qty = 0
whse_stock_entry_qty = 0
reserved_whse_stock_entry_qty = 0
company = filters.get("company")
bom = filters.get("bom")
quantity_to_make = filters.get("qty_to_make")
include_exploded_items = filters.get("include_exploded_items")
warehouse = filters.get("warehouse")
reserve_warehouse = filters.get("reserve_warehouse")
project_start_date = filters.get("start_date")
columns = get_columns()
if warehouse is not None:
#print "warehouse-----------", warehouse
whse_items = get_items_from_warehouse(warehouse)
if whse_items is None:
whse_items = []
#print "whse_items-----------", whse_items
if bom is not None:
bom_items = get_bom_items(filters)
if bom_items is None:
bom_items = []
#print "bom_items-----------", bom_items
items_data = merger_items_list(whse_items,bom_items)
#print "items_data-----------", items_data
for item in sorted(items_data):
dict_items = items_data[item]
item_code = str(dict_items.item_code)
bom_qty = dict_items.bom_qty
bom_item_qty = dict_items.bi_qty
if bom_qty!=0:
required_qty = (bom_item_qty/bom_qty) * (float(quantity_to_make))
else:
required_qty = 0
if warehouse is not None and warehouse !="":
whse_qty = get_warehouse_qty(warehouse,item_code)
whse_stock_entry_qty = get_stock_entry_quantities(warehouse,item_code,project_start_date)
else:
whse_qty = 0
whse_stock_entry_qty = 0
if whse_stock_entry_qty:
whse_qty = whse_qty + whse_stock_entry_qty
if reserve_warehouse is not None and reserve_warehouse!="":
reserved_whse_qty = get_warehouse_qty(reserve_warehouse,item_code)
reserved_whse_stock_entry_qty = get_stock_entry_quantities(reserve_warehouse,item_code,project_start_date)
else:
reserved_whse_qty = 0
reserved_whse_stock_entry_qty = 0
if reserved_whse_stock_entry_qty:
reserved_whse_qty = reserved_whse_qty + reserved_whse_stock_entry_qty
#delta_qty = whse_qty - bom_qty
delta_qty = whse_qty - required_qty
sum_qty = whse_qty + reserved_whse_qty
#delta1_qty = sum_qty - bom_qty
delta1_qty = sum_qty - required_qty
item_group=get_item_group(item_code)
stock_uom=get_stock_uom(item_code)
summ_data.append([str(item_code),str(item_group),str(stock_uom), str(bom_qty), str(bom_item_qty), str(required_qty), str(whse_qty), str(delta_qty),
str(reserved_whse_qty), str(sum_qty), str(delta1_qty)])
print ("summ_data-----------", summ_data)
return columns, summ_data
def get_item_group(item_code):
item_group = frappe.db.sql("""select item_group from `tabItem` where item_code=%s""",(item_code), as_dict=1)
return item_group[0]['item_group']
def get_stock_uom(item_code):
stock_uom = frappe.db.sql("""select stock_uom from `tabItem` where item_code=%s""",(item_code), as_dict=1)
return stock_uom[0]['stock_uom']
def get_stock_entry_quantities(warehouse,item_code,project_start_date):
total_qty = 0
current_date = str(datetime.datetime.now())
details = frappe.db.sql("""select sed.item_code,sed.qty,se.purpose from `tabStock Entry Detail` sed, `tabStock Entry` se where sed.item_code=%s and sed.s_warehouse=%s and se.purpose='Manufacture' and sed.modified >='""" + str(project_start_date) +"""'
and sed.modified <='""" + current_date + """' and sed.parent=se.name and se.docstatus=1""", (item_code,warehouse), as_dict=1)
if len(details)!=0:
#print "details------------", details
for se_qty in details:
if se_qty['qty'] is None:
qty = 0
else:
qty = float(se_qty['qty'])
total_qty = total_qty + qty
return total_qty
def merger_items_list(whse_items,bom_items):
items_map = {}
if bom_items:
for data in bom_items:
item_code = data['item_code']
bi_qty = data['bi_qty']
bo_qty = data['bo_qty']
#print "bom_item_qty--------", bi_qty
key = (item_code)
if key not in items_map:
items_map[key] = frappe._dict({"item_code": item_code,"bi_qty": float(bi_qty),"bom_qty": bo_qty})
if whse_items:
for whse_items_data in whse_items:
whse_item = whse_items_data['item_code']
if whse_item not in items_map:
key = whse_item
items_map[key] = frappe._dict({"item_code": whse_item,"bi_qty": 0.0,"bom_qty": 0.0})
return items_map
def get_warehouse_qty(warehouse,item_code):
whse_qty = 0
details = frappe.db.sql("""select actual_qty from `tabBin` where warehouse=%s and item_code=%s and actual_qty > 0 """,(warehouse,item_code), as_dict=1)
if len(details)!=0:
if details[0]['actual_qty'] is not None:
whse_qty = details[0]['actual_qty']
return whse_qty
def get_bom_qty(bom,item_code):
bom_qty = 0
details = frappe.db.sql("""select qty from `tabBOM Item` where parent=%s and item_code=%s""", (bom,item_code), as_dict=1)
if len(details)!=0:
if details[0]['qty'] is not None:
bom_qty = details[0]['qty']
return bom_qty
def get_items_from_warehouse(warehouse):
whse_items = frappe.db.sql("""select item_code,actual_qty from `tabBin` where warehouse = %s and actual_qty > 0 group by item_code""", warehouse, as_dict=1)
if len(whse_items)==0:
whse_items = None
return whse_items
def get_bom_items(filters):
conditions = get_conditions(filters)
#print "---------conditions::", conditions
if filters.get("include_exploded_items") == "Y":
#print "in------------Y"
return frappe.db.sql("""select bo.name as bom_name, bo.company, bo.item as bo_item, bo.quantity as bo_qty, bo.project, bi.item_code, bi.stock_qty as bi_qty from `tabBOM` bo, `tabBOM Explosion Item` bi where bo.name = bi.parent and bo.is_active=1 and bo.docstatus = "1" %s order by bo.name, bi.item_code""" % conditions, as_dict=1)
else:
return frappe.db.sql("""select bo.name as bom_name, bo.company, bo.item as bo_item, bo.quantity as bo_qty, bo.project, bi.item_code, bi.stock_qty as bi_qty from `tabBOM` bo, `tabBOM Item` bi where bo.name = bi.parent and bo.is_active=1 and bo.docstatus = "1" %s order by bo.name, bi.item_code""" % conditions, as_dict=1)
def get_conditions(filters):
conditions = ""
if filters.get("company"):
conditions += 'and bo.company = %s' % frappe.db.escape(filters.get("company"), percent=False)
if filters.get("bom"):
conditions += 'and bi.parent = %s' % frappe.db.escape(filters.get("bom"), percent=False)
return conditions
@frappe.whitelist()
def fetch_project_details(project):
details = frappe.db.sql("""select start_date,project_warehouse,reserve_warehouse,master_bom,core_team_coordinator,planner from `tabProject` where name=%s""", project, as_dict=1)
return details
@frappe.whitelist()
def get_report_data():
report_data = []
details = {}
for rows in summ_data:
item_code = rows[0]
bom_qty = rows[1]
qty = rows[8]
qty_in_reserved_whse = rows[6]
qty_in_production_whse = rows[4]
total_qty = rows[7]
details = {"item_code":item_code,"bom_qty":bom_qty,"qty":qty,"qty_in_reserved_whse":qty_in_reserved_whse,"qty_in_production_whse":qty_in_production_whse,"total_qty":total_qty}
report_data.append(details)
print("details",details)
return report_data
def get_columns():
"""return columns"""
columns = [
_("Item Code")+":Link/BOM:100",
_("Item Group")+":Link/Item Group:100",
_("Stock UOM")+":Link/UOM:100",
_("BOM Qty")+"::100",
_("BOM Item Qty")+"::100",
_("Required Qty")+"::100",
_("Qty issued for Production")+"::140",
_("Delta Qty")+"::100",
_("Reserved Warehouse Qty")+"::150",
_("Sum Qty")+":Link/UOM:90",
_("Sum Qty - Required Qty")+"::100"
]
return columns
@frappe.whitelist()
def check_for_whole_number(bomno):
return (frappe.db.sql("""select must_be_whole_number from `tabUOM` where name IN (select uom from `tabBOM` where name = %s) """,(bomno)))
@frappe.whitelist()
#def make_issue(item_code,project,qty,planner,core_team_coordinator):
def make_issue(issue_items):
return_doc = ""
issue_list = json.loads(issue_items)
issue_list = json.dumps(issue_list)
issue_list = ast.literal_eval(issue_list)
#print "----------------------issue_list", issue_list
for item in issue_list:
issue_assign = []
item_code = issue_list[0]['item_code']
bom_qty = issue_list[0]['bom_qty']
project = issue_list[0]['project']
planner = issue_list[0]['planner']
qty_in_reserved_whse = issue_list[0]['qty_in_reserved_whse']
qty_in_production_whse = issue_list[0]['qty_in_production_whse']
total_qty = issue_list[0]['total_qty']
core_team_coordinator = issue_list[0]['core_team_coordinator']
if bom_qty is None:
bom_qty = 0
if qty_in_reserved_whse is None:
qty_in_reserved_whse = 0
if qty_in_production_whse is None:
qty_in_production_whse = 0
if total_qty is None:
total_qty = 0
if core_team_coordinator != "null" and core_team_coordinator is not None:
issue_assign.append(core_team_coordinator)
if planner != "null" and planner is not None:
issue_assign.append(planner)
#Start of Table format data for Description field in Issue doc..
description = "<table border=2>"
description = description + "<tr backgroundcolor=green>"
description = description + "<td>Raw Material: </td>" + "<td> " + item_code +"</td>"
description = description + "</tr>"
description = description + "<tr backgroundcolor=green>"
description = description + "<td>Required Quantity: </td>" + "<td> " + str(bom_qty) +"</td>"
description = description + "</tr>"
description = description + "<tr backgroundcolor=green>"
description = description + "<td>Qty in Reserve Warehouse: </td>" + "<td> " + str(qty_in_reserved_whse) +"</td>"
description = description + "</tr>"
description = description + "<tr backgroundcolor=green>"
description = description + "<td>Qty in Production Warehouse: </td>" + "<td> " + str(qty_in_production_whse) +"</td>"
description = description + "</tr>"
description = description + "<tr backgroundcolor=green>"
description = description + "<td>Total Quantity: </td>" + "<td> " + str(total_qty) +"</td>"
description = description + "</tr>"
description = description + "</table>"
bottom_description = "<table>"
bottom_description = bottom_description + "<tr>"
bottom_description = bottom_description + "<td>As you can see the Quantity of raw materials issued for this project is more than the quantity required.</td>"
bottom_description = bottom_description + "</tr>"
bottom_description = bottom_description + "<tr>"
bottom_description = bottom_description + "<td>Thiscould be because:</td>"
bottom_description = bottom_description + "</tr>"
bottom_description = bottom_description + "<tr>"
bottom_description = bottom_description + "<td>1.The BOM is not updated - Please update the BOM, and/or</td>"
bottom_description = bottom_description + "</tr>"
bottom_description = bottom_description + "<tr>"
bottom_description = bottom_description + "<td>2.Errors have been made while transferring quantities - Please correct the errors.</td>"
bottom_description = bottom_description + "</tr>"
description = description + "<br>" + "<br>" + bottom_description
#End of making Table format data for Description field in Issue doc..
#Start of Making ToDo List for an Specific Issue..
OuterJson_Transfer = {
"company": company,
"doctype": "Issue",
"issue_type": "Planning/Production",
"subject": "More Raw Material than Required for Project " + str(project),
"description" : description
}
doc = frappe.new_doc("Issue")
doc.update(OuterJson_Transfer)
doc.save()
return_doc = doc.doctype
docName = doc.name
for assined_user in issue_assign:
to_do_object = {
"description": "More Raw Material than Required for Project " + str(project),
"company": company,
"doctype": "ToDo",
"owner": assined_user,
"reference_type": "Issue",
"reference_name": docName
}
to_do_doc = frappe.new_doc("ToDo")
to_do_doc.update(to_do_object)
to_do_doc.save()
#End of Making ToDo List for an Specific Issue..
if return_doc:
return return_doc
| 38.723077 | 332 | 0.717362 |
417e3728e138f7964791aa73ec1b8c7736edb2bf | 3,186 | py | Python | paper/scripts/qaoa_treewidth_times.py | TheoryInPractice/ConSequences | acf9dcfd931137cbca71251cd0c09c5009aee99d | [
"BSD-3-Clause"
] | 7 | 2018-10-23T21:59:50.000Z | 2021-06-18T23:49:12.000Z | paper/scripts/qaoa_treewidth_times.py | TheoryInPractice/ConSequences | acf9dcfd931137cbca71251cd0c09c5009aee99d | [
"BSD-3-Clause"
] | null | null | null | paper/scripts/qaoa_treewidth_times.py | TheoryInPractice/ConSequences | acf9dcfd931137cbca71251cd0c09c5009aee99d | [
"BSD-3-Clause"
] | 2 | 2019-11-04T06:06:10.000Z | 2020-03-03T06:41:35.000Z | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import sys
import warnings
from macros import colors
def algorithm_font(algorithm):
return r'\textsf{{{}}}'.format(algorithm)
def plot_treewidth_time_comparison(data_filename, plot_filename, verbose):
# Use latex font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# sns.set_context("paper", rc={"font.size": 80,
# "axes.titlesize": 80,
# "axes.labelsize": 50})
# Set up Seaborn style
sns.set(style="darkgrid")
# Import the dataframe
dataframe = pd.read_csv(data_filename)
dataframe = dataframe.loc[dataframe['vertices'].isin([10, 14, 18,
22, 26, 30])]
dataframe['algorithm'] =\
np.vectorize(algorithm_font)(dataframe['algorithm'])
if verbose:
print(dataframe)
# Compute the plot
facet_kws = dict()
warnings.simplefilter(action='ignore', category=FutureWarning)
plot = sns.factorplot(data=dataframe,
row="regularity",
x="vertices",
y="tree-decomp-time",
hue="algorithm",
palette=[colors[x] for x in ['freetdi',
'meiji',
'quickbb']],
facet_kws=facet_kws,
kind="strip",
dodge=True,
jitter=True,
alpha=0.7,
linewidth=0.1,
aspect=1.7,
size=2.5,
hue_order=['\\textsf{freetdi}',
'\\textsf{meiji-e}',
'\\textsf{quickbb}'],
legend=False)
# Manually add dashed lines to facets
for axis in plot.fig.get_axes():
for i in range(len(dataframe["vertices"]) - 1):
axis.axvline(x=i+.5, c="white", dashes=(2, 1))
axis.axhline(y=900, c='black', dashes=(3, 3))
# Set axis lengths and format
plot.set(ylim=(.0001, 100000000), yscale='log')
# Set axis labels
plot.fig.get_axes()[-1].set(xlabel="Vertices")
for axis in plot.fig.get_axes():
axis.set(ylabel="Run Time (sec)")
# Set axis labels
plot.set_titles(row_template="{row_name}-Regular")
# Add legend
plot.fig.get_axes()[0].legend(loc="upper left")
# Save figure
for extension in ['.pdf', '.png']:
plot.savefig(plot_filename + extension)
if __name__ == '__main__':
data_filename = sys.argv[1]
plot_filename = sys.argv[2]
plot_treewidth_time_comparison(data_filename, plot_filename, False)
| 35.010989 | 75 | 0.47081 |
245ec3867e852f8453815580668a88d6cae3eff0 | 410 | py | Python | products/migrations/0003_product_detail.py | annakovesdi/eco-era-store | 0b83b168bb09bdd382dc5cee3f6d161dd90faa7b | [
"ADSL"
] | null | null | null | products/migrations/0003_product_detail.py | annakovesdi/eco-era-store | 0b83b168bb09bdd382dc5cee3f6d161dd90faa7b | [
"ADSL"
] | null | null | null | products/migrations/0003_product_detail.py | annakovesdi/eco-era-store | 0b83b168bb09bdd382dc5cee3f6d161dd90faa7b | [
"ADSL"
] | 1 | 2021-08-30T16:04:54.000Z | 2021-08-30T16:04:54.000Z | # Generated by Django 3.2.4 on 2021-07-24 09:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20210710_1053'),
]
operations = [
migrations.AddField(
model_name='product',
name='detail',
field=models.TextField(blank=True, max_length=600, null=True),
),
]
| 21.578947 | 74 | 0.607317 |
dc5032299031a2b9d5c651f94c5cf51ee2f6f967 | 730 | py | Python | setup.py | DipanshuGolan/feature_analysis | 78361c3aeb433e005291a9b0ef118c5c8ca77362 | [
"MIT"
] | null | null | null | setup.py | DipanshuGolan/feature_analysis | 78361c3aeb433e005291a9b0ef118c5c8ca77362 | [
"MIT"
] | null | null | null | setup.py | DipanshuGolan/feature_analysis | 78361c3aeb433e005291a9b0ef118c5c8ca77362 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Feature_Analysis",
version="0.0.1",
author="Dipanshu Golan",
author_email="dipanshugolan96@gmail.com",
description="It performs feature analysis for data preprocessing or usage of data in Machine Learning.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DipanshuGolan/feature_analysis",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | 33.181818 | 108 | 0.690411 |
5c2a04dc979ef9c654034d2fb12e41ea9b4c5a60 | 3,463 | py | Python | asm/ops/abc.py | martmists-gh/pyasm | 038765f389b0403d37f3e0080da52d07f11827f9 | [
"MIT"
] | 1 | 2021-11-17T12:56:53.000Z | 2021-11-17T12:56:53.000Z | asm/ops/abc.py | martmists-gh/pyasm | 038765f389b0403d37f3e0080da52d07f11827f9 | [
"MIT"
] | null | null | null | asm/ops/abc.py | martmists-gh/pyasm | 038765f389b0403d37f3e0080da52d07f11827f9 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from struct import pack
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from asm.serializer import Label, Serializer
@dataclass(eq=False)
class Opcode:
id: int
arg: object = 0
def serialize(self, ctx: 'Serializer') -> bytes:
return self.int_arg(self.arg)
def int_arg(self, x: int) -> bytes:
return pack("Bb" if x < 0 else "BB", self.id, x & 0xFF)
class JumpOp(Opcode):
def __init__(self, id: int, arg: 'Label'):
super().__init__(id, arg)
arg.parents.append(self)
self.target_index = None
class ConstOp(Opcode):
def __init__(self, id: int, arg: Any):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
from asm.serializer import code_replace
if self.arg not in ctx.code.co_consts:
ctx.code = code_replace(
ctx.code,
co_consts=ctx.code.co_consts + (self.arg,),
)
return self.int_arg(ctx.code.co_consts.index(self.arg))
class NameOp(Opcode):
def __init__(self, id: int, arg: str):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
from asm.serializer import code_replace
if self.arg not in ctx.code.co_names:
ctx.code = code_replace(
ctx.code,
co_names=ctx.code.co_names + (self.arg,),
)
return self.int_arg(ctx.code.co_names.index(self.arg))
class VarOp(Opcode):
def __init__(self, id: int, arg: str):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
from asm.serializer import code_replace
if self.arg not in ctx.code.co_varnames:
ctx.code = code_replace(
ctx.code,
co_varnames=ctx.code.co_varnames + (self.arg,),
)
return self.int_arg(ctx.code.co_varnames.index(self.arg))
class CellOp(Opcode):
def __init__(self, id: int, arg: str):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
if self.arg not in ctx.code.co_freevars:
if self.arg not in ctx.code.co_cellvars:
raise ValueError("Could not find {0} in freevars or cellvars!".format(self.arg))
return self.int_arg(ctx.code.co_cellvars.index(self.arg))
return self.int_arg(ctx.code.co_freevars.index(self.arg) + len(ctx.code.co_cellvars))
class AbsJumpOp(JumpOp):
def __init__(self, id: int, arg: 'Label'):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
return self.int_arg(self.arg)
class RelJumpOp(JumpOp):
def __init__(self, id: int, arg: 'Label'):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
from_index = ctx.current_index
to_index = self.arg
return self.int_arg(to_index - from_index)
# Used only in 3.11+
class MultiOp(Opcode):
def __init__(self, id: int, arg: tuple):
super().__init__(id, arg)
def serialize(self, ctx: 'Serializer') -> bytes:
l = self.serialize_left(ctx, self.arg[0])
r = self.serialize_right(ctx, self.arg[1])
return l + r
def serialize_left(self, ctx: 'Serializer', arg) -> bytes:
raise NotImplementedError()
def serialize_right(self, ctx: 'Serializer', arg) -> bytes:
raise NotImplementedError()
| 28.858333 | 96 | 0.616517 |
e224941ef001fda86a8d3a935291a495ee35d90e | 5,832 | py | Python | Amplo/Utils/io.py | Amplo-GmbH/AutoML | eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606 | [
"MIT"
] | 5 | 2022-01-07T13:34:37.000Z | 2022-03-17T06:40:28.000Z | Amplo/Utils/io.py | Amplo-GmbH/AutoML | eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606 | [
"MIT"
] | 5 | 2022-03-22T13:42:22.000Z | 2022-03-31T16:20:44.000Z | Amplo/Utils/io.py | Amplo-GmbH/AutoML | eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606 | [
"MIT"
] | 1 | 2021-12-17T22:41:11.000Z | 2021-12-17T22:41:11.000Z | import os
import json
import warnings
from typing import Union, Tuple
from pathlib import Path
import pandas as pd
__all__ = ['boolean_input', 'parse_json', 'read_pandas', 'merge_logs', 'get_log_metadata']
FILE_READERS = {'.csv': pd.read_csv, '.json': pd.read_json, '.xml': pd.read_xml,
'.feather': pd.read_feather, '.parquet': pd.read_parquet,
'.stata': pd.read_stata, '.pickle': pd.read_pickle}
def boolean_input(question: str) -> bool:
x = input(question + ' [y / n]')
if x.lower() == 'n' or x.lower() == 'no':
return False
elif x.lower() == 'y' or x.lower() == 'yes':
return True
else:
print('Sorry, I did not understand. Please answer with "n" or "y"')
return boolean_input(question)
def parse_json(json_string: Union[str, dict]) -> Union[str, dict]:
if isinstance(json_string, dict):
return json_string
else:
try:
return json.loads(json_string
.replace("'", '"')
.replace("True", "true")
.replace("False", "false")
.replace("nan", "NaN")
.replace("None", "null"))
except json.decoder.JSONDecodeError:
print('[AutoML] Cannot validate, impassable JSON.')
print(json_string)
return json_string
def read_pandas(path: Union[str, Path]) -> pd.DataFrame:
"""
Wrapper for various read functions
Returns
-------
pd.DataFrame
"""
file_extension = Path(path).suffix
if file_extension not in FILE_READERS:
raise NotImplementedError(f'File format {file_extension} not supported.')
else:
reader = FILE_READERS[file_extension]
return reader(path)
def merge_logs(path_to_folder, target='labels'):
r"""
Combine log files from given directory into a multi-indexed dataframe
Notes
-----
Make sure that each protocol is located in a sub folder whose name represents the respective label.
A directory structure example:
| ``path_to_folder``
| ``├─ Label_1``
| ``│ ├─ Log_1.*``
| ``│ └─ Log_2.*``
| ``├─ Label_2``
| ``│ └─ Log_3.*``
| ``└─ ...``
Parameters
----------
path_to_folder : str or Path
Parent directory
target : str
Column name for target
Returns
-------
data : pd.DataFrame
All logs concatenated into one multi-indexed dataframe.
Multi-index names are ``log`` and ``index``.
Target column depicts the folder name.
metadata : dict
File metadata
"""
# Tests
if not Path(path_to_folder).is_dir():
raise ValueError(f'The provided path is no directory: {path_to_folder}')
if not Path(path_to_folder).exists():
raise FileNotFoundError(f'Directory does not exist: {path_to_folder}')
if not isinstance(target, str) or target == '':
raise ValueError('Target name must be a non-empty string.')
# Result init
data = []
# Get file names
metadata = get_log_metadata(path_to_folder)
# Loop through file paths in metadata
for file_id in metadata:
# Read data
datum = read_pandas(metadata[file_id]['full_path'])
# Set labels
datum[target] = metadata[file_id]['folder']
# Set index
datum.set_index(pd.MultiIndex.from_product([[file_id], datum.index.values], names=['log', 'index']),
inplace=True)
# Add to list
data.append(datum)
if len(data) == 1:
# Omit concatenation when only one item
return data[0], metadata
else:
# Concatenate dataframes
return pd.concat(data), metadata
def get_log_metadata(path_to_folder):
"""Get metadata of log files
Parameters
----------
path_to_folder
Notes
-----
Make sure that each protocol is located in a sub folder whose name represents the respective label.
A directory structure example:
| ``path_to_folder``
| ``├─ Label_1``
| ``│ ├─ Log_1.*``
| ``│ └─ Log_2.*``
| ``├─ Label_2``
| ``│ └─ Log_3.*``
| ``└─ ...``
Returns
-------
metadata : dict
Dictionary whose keys depict the file id (integer) and each value contains a dictionary with
``folder``, ``file``, ``full_path`` and ``last_modified`` key.
"""
# Checks
if not Path(path_to_folder).is_dir():
raise ValueError(f'The provided path is no directory: {path_to_folder}')
if not Path(path_to_folder).exists():
raise FileNotFoundError(f'Directory does not exist: {path_to_folder}')
# Init
metadata = dict()
file_id = 0
# Loop through folders
for folder in sorted(Path(path_to_folder).iterdir()):
# Loop through files (ignore hidden files)
for file in sorted(folder.glob('[!.]*.*')):
# Check file
if file.suffix not in FILE_READERS:
warnings.warn(f'[AutoML] Skipped unsupported file format: {file}')
continue
elif file.stat().st_size == 0:
warnings.warn(f'[AutoML] Skipped empty file: {file}')
continue
# Add to metadata
metadata[file_id] = {
'folder': str(folder.name),
'file': str(file.name),
'full_path': str(file.resolve()),
'last_modified': os.path.getmtime(str(file)),
}
# Increment
file_id += 1
if file_id == 0:
raise FileNotFoundError('Directory seems to be empty. Check whether you specified the correct path.')
return metadata
| 29.755102 | 109 | 0.567387 |
db74cee452d6a55a32bd429c4f178bed1e1f956d | 4,699 | py | Python | glartifacts/gitaly/proto/blob_pb2_grpc.py | TimothySprague/glartifacts | 1d2400593fa7fa261b15c1c7f3494daf009586f8 | [
"MIT"
] | null | null | null | glartifacts/gitaly/proto/blob_pb2_grpc.py | TimothySprague/glartifacts | 1d2400593fa7fa261b15c1c7f3494daf009586f8 | [
"MIT"
] | null | null | null | glartifacts/gitaly/proto/blob_pb2_grpc.py | TimothySprague/glartifacts | 1d2400593fa7fa261b15c1c7f3494daf009586f8 | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import blob_pb2 as blob__pb2
class BlobServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetBlob = channel.unary_stream(
'/gitaly.BlobService/GetBlob',
request_serializer=blob__pb2.GetBlobRequest.SerializeToString,
response_deserializer=blob__pb2.GetBlobResponse.FromString,
)
self.GetBlobs = channel.unary_stream(
'/gitaly.BlobService/GetBlobs',
request_serializer=blob__pb2.GetBlobsRequest.SerializeToString,
response_deserializer=blob__pb2.GetBlobsResponse.FromString,
)
self.GetLFSPointers = channel.unary_stream(
'/gitaly.BlobService/GetLFSPointers',
request_serializer=blob__pb2.GetLFSPointersRequest.SerializeToString,
response_deserializer=blob__pb2.GetLFSPointersResponse.FromString,
)
self.GetNewLFSPointers = channel.unary_stream(
'/gitaly.BlobService/GetNewLFSPointers',
request_serializer=blob__pb2.GetNewLFSPointersRequest.SerializeToString,
response_deserializer=blob__pb2.GetNewLFSPointersResponse.FromString,
)
self.GetAllLFSPointers = channel.unary_stream(
'/gitaly.BlobService/GetAllLFSPointers',
request_serializer=blob__pb2.GetAllLFSPointersRequest.SerializeToString,
response_deserializer=blob__pb2.GetAllLFSPointersResponse.FromString,
)
class BlobServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetBlob(self, request, context):
"""GetBlob returns the contents of a blob object referenced by its object
ID. We use a stream to return a chunked arbitrarily large binary
response
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlobs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLFSPointers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNewLFSPointers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAllLFSPointers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BlobServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetBlob': grpc.unary_stream_rpc_method_handler(
servicer.GetBlob,
request_deserializer=blob__pb2.GetBlobRequest.FromString,
response_serializer=blob__pb2.GetBlobResponse.SerializeToString,
),
'GetBlobs': grpc.unary_stream_rpc_method_handler(
servicer.GetBlobs,
request_deserializer=blob__pb2.GetBlobsRequest.FromString,
response_serializer=blob__pb2.GetBlobsResponse.SerializeToString,
),
'GetLFSPointers': grpc.unary_stream_rpc_method_handler(
servicer.GetLFSPointers,
request_deserializer=blob__pb2.GetLFSPointersRequest.FromString,
response_serializer=blob__pb2.GetLFSPointersResponse.SerializeToString,
),
'GetNewLFSPointers': grpc.unary_stream_rpc_method_handler(
servicer.GetNewLFSPointers,
request_deserializer=blob__pb2.GetNewLFSPointersRequest.FromString,
response_serializer=blob__pb2.GetNewLFSPointersResponse.SerializeToString,
),
'GetAllLFSPointers': grpc.unary_stream_rpc_method_handler(
servicer.GetAllLFSPointers,
request_deserializer=blob__pb2.GetAllLFSPointersRequest.FromString,
response_serializer=blob__pb2.GetAllLFSPointersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gitaly.BlobService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 40.162393 | 84 | 0.752075 |
9962a8e8c1669afd2952248afe664e1ed0c33cf3 | 6,227 | py | Python | meido_mall/meido_mall/apps/goods/models.py | zhangbin9597/py22_django | 6b3ee1f4ade6b16d55496d8d695acfe5981ddd18 | [
"MIT"
] | null | null | null | meido_mall/meido_mall/apps/goods/models.py | zhangbin9597/py22_django | 6b3ee1f4ade6b16d55496d8d695acfe5981ddd18 | [
"MIT"
] | null | null | null | meido_mall/meido_mall/apps/goods/models.py | zhangbin9597/py22_django | 6b3ee1f4ade6b16d55496d8d695acfe5981ddd18 | [
"MIT"
] | null | null | null | from django.db import models
from meido_mall.utils.models import BaseModel
# Create your models here.
class GoodsCategory(BaseModel):
"""商品类别"""
name = models.CharField(max_length=10, verbose_name='名称')
parent = models.ForeignKey('self', related_name='subs', null=True, blank=True, on_delete=models.CASCADE,
verbose_name='父类别')
class Meta:
db_table = 'tb_goods_category'
verbose_name = '商品类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsChannelGroup(BaseModel):
"""商品频道组"""
name = models.CharField(max_length=20, verbose_name='频道组名')
class Meta:
db_table = 'tb_channel_group'
verbose_name = '商品频道组'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsChannel(BaseModel):
"""商品频道"""
group = models.ForeignKey(GoodsChannelGroup, verbose_name='频道组名')
category = models.ForeignKey(GoodsCategory, on_delete=models.CASCADE, verbose_name='顶级商品类别')
url = models.CharField(max_length=50, verbose_name='频道页面链接')
sequence = models.IntegerField(verbose_name='组内顺序')
class Meta:
db_table = 'tb_goods_channel'
verbose_name = '商品频道'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name
class Brand(BaseModel):
"""品牌"""
name = models.CharField(max_length=20, verbose_name='名称')
logo = models.ImageField(verbose_name='Logo图片')
first_letter = models.CharField(max_length=1, verbose_name='品牌首字母')
class Meta:
db_table = 'tb_brand'
verbose_name = '品牌'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class SPU(BaseModel):
"""商品SPU"""
name = models.CharField(max_length=50, verbose_name='名称')
brand = models.ForeignKey(Brand, on_delete=models.PROTECT, verbose_name='品牌')
category1 = models.ForeignKey(GoodsCategory, on_delete=models.PROTECT, related_name='cat1_spu', verbose_name='一级类别')
category2 = models.ForeignKey(GoodsCategory, on_delete=models.PROTECT, related_name='cat2_spu', verbose_name='二级类别')
category3 = models.ForeignKey(GoodsCategory, on_delete=models.PROTECT, related_name='cat3_spu', verbose_name='三级类别')
sales = models.IntegerField(default=0, verbose_name='销量')
comments = models.IntegerField(default=0, verbose_name='评价数')
desc_detail = models.TextField(default='', verbose_name='详细介绍')
desc_pack = models.TextField(default='', verbose_name='包装信息')
desc_service = models.TextField(default='', verbose_name='售后服务')
class Meta:
db_table = 'tb_spu'
verbose_name = '商品SPU'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class SKU(BaseModel):
"""商品SKU"""
name = models.CharField(max_length=50, verbose_name='名称')
caption = models.CharField(max_length=100, verbose_name='副标题')
spu = models.ForeignKey(SPU, on_delete=models.CASCADE, verbose_name='商品')
category = models.ForeignKey(GoodsCategory, on_delete=models.PROTECT, verbose_name='从属类别')
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='单价')
cost_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='进价')
market_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='市场价')
stock = models.IntegerField(default=0, verbose_name='库存')
sales = models.IntegerField(default=0, verbose_name='销量')
comments = models.IntegerField(default=0, verbose_name='评价数')
is_launched = models.BooleanField(default=True, verbose_name='是否上架销售')
default_image = models.ImageField(max_length=200, default='', null=True, blank=True, verbose_name='默认图片')
class Meta:
db_table = 'tb_sku'
verbose_name = '商品SKU'
verbose_name_plural = verbose_name
def __str__(self):
return '%s: %s' % (self.id, self.name)
class SKUImage(BaseModel):
"""SKU图片"""
sku = models.ForeignKey(SKU, on_delete=models.CASCADE, verbose_name='sku')
image = models.ImageField(verbose_name='图片')
class Meta:
db_table = 'tb_sku_image'
verbose_name = 'SKU图片'
verbose_name_plural = verbose_name
def __str__(self):
return '%s %s' % (self.sku.name, self.id)
class SPUSpecification(BaseModel):
"""商品SPU规格"""
spu = models.ForeignKey(SPU, on_delete=models.CASCADE, related_name='specs', verbose_name='商品SPU')
name = models.CharField(max_length=20, verbose_name='规格名称')
class Meta:
db_table = 'tb_spu_specification'
verbose_name = '商品SPU规格'
verbose_name_plural = verbose_name
def __str__(self):
return '%s: %s' % (self.spu.name, self.name)
class SpecificationOption(BaseModel):
"""规格选项"""
spec = models.ForeignKey(SPUSpecification, related_name='options', on_delete=models.CASCADE, verbose_name='规格')
value = models.CharField(max_length=20, verbose_name='选项值')
class Meta:
db_table = 'tb_specification_option'
verbose_name = '规格选项'
verbose_name_plural = verbose_name
def __str__(self):
return '%s - %s' % (self.spec, self.value)
class SKUSpecification(BaseModel):
"""SKU具体规格"""
sku = models.ForeignKey(SKU, related_name='specs', on_delete=models.CASCADE, verbose_name='sku')
spec = models.ForeignKey(SPUSpecification, on_delete=models.PROTECT, verbose_name='规格名称')
option = models.ForeignKey(SpecificationOption, on_delete=models.PROTECT, verbose_name='规格值')
class Meta:
db_table = 'tb_sku_specification'
verbose_name = 'SKU规格'
verbose_name_plural = verbose_name
def __str__(self):
return '%s: %s - %s' % (self.sku, self.spec.name, self.option.value)
class GoodsVisitCount(BaseModel):
"""统计分类商品访问量模型类"""
category = models.ForeignKey(GoodsCategory, on_delete=models.CASCADE, verbose_name='商品分类')
count = models.IntegerField(verbose_name='访问量', default=0)
date = models.DateField(auto_now_add=True, verbose_name='统计日期')
class Meta:
db_table = 'tb_goods_visit'
verbose_name = '统计分类商品访问量'
verbose_name_plural = verbose_name
| 35.180791 | 120 | 0.691987 |
3230a72c9482df9462d6bed80f699e2b059489c0 | 56 | py | Python | diggrtoolbox/unified_api/__init__.py | diggr/diggrtoolbox | 25f5cf73d5bc4d3a54096aa8417a42025693f0b6 | [
"MIT"
] | 2 | 2018-08-31T22:02:04.000Z | 2019-01-25T15:51:04.000Z | diggrtoolbox/unified_api/__init__.py | diggr/diggrtoolbox | 25f5cf73d5bc4d3a54096aa8417a42025693f0b6 | [
"MIT"
] | null | null | null | diggrtoolbox/unified_api/__init__.py | diggr/diggrtoolbox | 25f5cf73d5bc4d3a54096aa8417a42025693f0b6 | [
"MIT"
] | null | null | null | from .diggr_api import DiggrAPI
__all__ = ['DiggrAPI']
| 14 | 31 | 0.75 |
4a1de71dcd740ea68c0b556dce82fc3b88c4a6f5 | 662 | py | Python | third_party/cyw30739_sdk/btp_reader.py | carol-apple/connectedhomeip | b1d40eb423ba5c2f4bbe15ff42a2b5d1b78ba2ce | [
"Apache-2.0"
] | 1 | 2022-02-22T02:02:10.000Z | 2022-02-22T02:02:10.000Z | third_party/cyw30739_sdk/btp_reader.py | carol-apple/connectedhomeip | b1d40eb423ba5c2f4bbe15ff42a2b5d1b78ba2ce | [
"Apache-2.0"
] | null | null | null | third_party/cyw30739_sdk/btp_reader.py | carol-apple/connectedhomeip | b1d40eb423ba5c2f4bbe15ff42a2b5d1b78ba2ce | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import json
import sys
def main():
btp_file = sys.argv[1]
items = {}
with open(btp_file) as btp:
for line in btp:
item = line.strip().split("=")
if len(item) == 2:
key = item[0].strip()
value = item[1].strip()
items[key] = value
items["XIP_DS_OFFSET"] = "0x0001e000"
items["XIP_LEN"] = "0x{:08x}".format(
int(items["ConfigDS2Location"], 16)
- int(items["ConfigDSLocation"], 16)
- int(items["XIP_DS_OFFSET"], 16)
)
print(json.dumps(items))
return 0
if __name__ == "__main__":
sys.exit(main())
| 20.6875 | 44 | 0.522659 |
63d5fe140552e8dc49e81763afe2f6d594753819 | 3,373 | py | Python | pii_generator/constants.py | AvinashRajPurohit/pii-data-generator | 92482575de2c093f2f49282fe3377924d6a6370d | [
"MIT"
] | 1 | 2021-12-14T23:19:12.000Z | 2021-12-14T23:19:12.000Z | pii_generator/constants.py | AvinashRajPurohit/pii-data-generator | 92482575de2c093f2f49282fe3377924d6a6370d | [
"MIT"
] | null | null | null | pii_generator/constants.py | AvinashRajPurohit/pii-data-generator | 92482575de2c093f2f49282fe3377924d6a6370d | [
"MIT"
] | null | null | null | from datetime import datetime
from google.cloud import bigquery
MASTER_CARD_PREFIXS = [
['5', '1'],
['5', '2'],
['5', '3'],
['5', '4'],
['5', '5']
]
VISA_PREFIXS = [
['4', '5', '3', '9'],
['4', '5', '5', '6'],
['4', '9', '1', '6'],
['4', '5', '3', '2'],
['4', '9', '2', '9'],
['4', '0', '2', '4', '0', '0', '7', '1'],
['4', '4', '8', '6'],
['4', '7', '1', '6'],
['4']]
VISA = 'visa'
MASTER_CARD = 'master card'
BLOOD_GROUP = ['A+', 'A-', 'B+', 'B-', "O+", "O-", "AB+", "AB-"]
MONTHS = list(range(1, 13))
YEARS = list(range(datetime.today().year, datetime.today().year + 14))
CARDS_LIST = [VISA, MASTER_CARD]
MARIADB = 'maria'
MYSQL = 'msql'
POSTGRES = 'psql'
SQL = 'sql'
MARIADB_EXCLUDED_DBS = ['information_schema', 'mysql', 'performance_schema']
PSQL_EXCLUDED_DBS = ["information_schema", "mysql", "performance_schema", "sys", "postgres", "azure_maintenance", "azure_sys"]
PII_TABLE_SQL_QUERY = ( "CREATE TABLE if not exists PII ("
"first_name VARCHAR(255),"
"last_name VARCHAR(255),"
"address VARCHAR(255),"
"email VARCHAR(255),"
"country VARCHAR(255),"
"dob VARCHAR(255),"
"credit_card VARCHAR(255),"
"card_type VARCHAR(255),"
"cvv INTEGER,"
"height REAL,"
"weight INTEGER,"
"blood_group VARCHAR(255),"
"expiry_date VARCHAR(255))")
INSERT_QUERY_MARIADB = """INSERT INTO PII (
first_name, last_name, address, email, country, dob, credit_card, card_type, cvv, expiry_date, height, weight, blood_group)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
"""
AZURE_DATABASES = ['maria', 'sql', 'mysql', 'psql']
GCP_DATABASES = ["sql", "mysql", "psql"]
BIG_QUERY_SCHEMA = [
bigquery.SchemaField("first_name", "STRING"),
bigquery.SchemaField("last_name", "STRING"),
bigquery.SchemaField("email", "STRING"),
bigquery.SchemaField("dob", "STRING"),
bigquery.SchemaField("address", "STRING"),
bigquery.SchemaField("country", "STRING"),
bigquery.SchemaField("blood_group", "STRING"),
bigquery.SchemaField("height", "FLOAT"),
bigquery.SchemaField("weight", "INTEGER"),
bigquery.SchemaField("credit_card", "STRING"),
bigquery.SchemaField("card_type", "STRING"),
bigquery.SchemaField("last_name", "STRING"),
bigquery.SchemaField("expiry_date", "STRING"),
]
SQL_QUERY = ( "CREATE TABLE PII ("
"first_name VARCHAR(255),"
"last_name VARCHAR(255),"
"address VARCHAR(255),"
"email VARCHAR(255),"
"country VARCHAR(255),"
"dob VARCHAR(255),"
"credit_card VARCHAR(255),"
"card_type VARCHAR(255),"
"cvv INTEGER,"
"height REAL,"
"weight INTEGER,"
"blood_group VARCHAR(255),"
"expiry_date VARCHAR(255))")
| 31.231481 | 126 | 0.480878 |
14f01607389e1c35676e61339aad79b9b958e2e7 | 6,738 | py | Python | CarRentalAgencyManagement/print_helper.py | mantoomine/Amir_Projects | 1386a0234b279884e743185b4072c0c3e01ebe87 | [
"MIT"
] | null | null | null | CarRentalAgencyManagement/print_helper.py | mantoomine/Amir_Projects | 1386a0234b279884e743185b4072c0c3e01ebe87 | [
"MIT"
] | null | null | null | CarRentalAgencyManagement/print_helper.py | mantoomine/Amir_Projects | 1386a0234b279884e743185b4072c0c3e01ebe87 | [
"MIT"
] | null | null | null | import pyinputplus as pyip
def auto_company_menu():
print('Welcome to the lnu auto company')
print('Menu')
print('1: Manage customer')
print('2: Manage cars')
print('3: Manage leasing')
print('4: The most popular car')
print('5: The most high rated customer')
print('6: exit')
return pyip.inputInt('Please enter one of the above options: ')
def car_menu():
print('1: Add a car')
print('2: Remove a car')
print('3: Change the car information')
print('4: back to the previous menu')
return pyip.inputInt('Please enter one of the above options: ')
def customer_menu():
print('1: Add a customer')
print('2: Remove a customer membership')
print('3: Change the customers information')
print('4: Get the information of the car based on the specific customer')
print('5: Back to the previous menu')
return pyip.inputInt('Please enter one of the above options: ')
def leasing_menu():
print('1: Lease a car')
print('2: Return the leased car')
print('3: Get the customers information based on the return date')
print('4: Back to the previous menu')
return pyip.inputInt('Please enter one of the above options: ')
def register_car():
print('to register a new car , fill out the following information ')
car_model = input('Car model: ')
manufacture = input('Car manufacture: ')
product_year = pyip.inputInt('Car production year: ')
c_category = car_category()
quantity = pyip.inputInt('Quantity: ')
car = (car_model, manufacture, product_year, c_category, int(quantity))
return car
def car_category():
print('Choose on of the below car categories that you are interested in:')
print('1. Sedan \n2. Coupe \n3. Sports \n4. Station wagon \n5. Hatchback \n6. Convertible \n7. SUV \n8. Minivan '
'\n9. Pickup truck \n10. Other')
categories = pyip.inputInt('Enter : ')
if categories == 1:
return 'Sedan'
elif categories == 2:
return 'Coupe'
elif categories == 3:
return 'Sports'
elif categories == 4:
return 'Station wagon'
elif categories == 5:
return 'Hatchback'
elif categories == 6:
return 'Convertible'
elif categories == 7:
return 'SUV'
elif categories == 8:
return 'Minivan'
elif categories == 9:
return 'Pickup truck'
else:
return 'Other'
def register_customer():
print('to register a new customer , fill out the following information ')
first_name = input('First model: ')
surname = input('Surname: ')
gender_specification = gender_menu()
customer_address = input('Customer address: ')
social_security_number = pyip.inputNum('Social security number (YYMMDDXXXX): ')
customer = (first_name, surname, gender_specification, customer_address, social_security_number)
return customer
def gender_menu():
categories = pyip.inputInt('1: Male \n2: Female \n3: Other \nEnter: ')
if categories == 1:
return 'Male'
elif categories == 2:
return 'Female'
else:
return 'Other'
def remove_customer():
social_security_number = pyip.inputInt('Social security number (YYMMDDXXXX): ')
return social_security_number
def remove_car():
car_model = input('Car model: ')
product_year = pyip.inputInt('Car production year: ')
return car_model, product_year
def invalid_input():
print('Invalid input')
def return_leased_car():
car_return_date = input('Enter the return date (YYYY-MM-DD): ')
year, month, day = car_return_date.split('-')
return_date = (year, month, day)
return return_date
def display_invalid_alternatives(alternatives):
if alternatives == 'customer':
print('Customer has not been found')
elif alternatives == 'register':
print('Registration has not been found')
elif alternatives == 'leasing':
print('No information about the entered leasing has been found')
elif alternatives == 'return':
print('No information about the entered return date has been found')
elif alternatives == 'social_security_number':
print('Wrong social security number format (the correct format = YYMMDDXXXX)')
def exit_program():
print('Exit the Program, See You!')
def social_number():
print('social_security_number')
def entering_leasing_info():
print('Fill out the following information to do a new leasing')
def entering_returning_car_info():
print('Fill out the following information to return the leased car')
def terminating_system():
print("Program terminated manually")
def database_connection_error():
print('Database connection failed')
def print_high_rated_customer():
print('The most high rated customers (customers with the most leased cars) are:')
def customer_rate(count, customer):
print('Number %d is Name: %s with the total leased of: %s' % (count, customer[0], customer[1]))
def print_high_rated_car():
print('The most popular cars (cars which has the highest number of leased) are:')
def car_rate(car, count):
print('Number %d is The car model: %s with the total number of leased: %s\n\tManufacture: %s Category: %s product '
'year: %s' % (
count, car[0], car[4], car[1], car[2], car[3]))
def customers_name_on_return_date(customer):
print('Customer Name is: %s %s' % (customer[0], customer[1]))
def customers_on_return_date():
print('The following customers has reached to the return date of their leasing time')
def add_customer_to_database():
print('A new customer added to the database')
def add_quantity_to_database():
print('The car quantity data added to the database')
def add_leasing_info_to_database():
print('The leasing info added to the database')
def drop_customer_from_database():
print('The entered customer removed from the database')
def drop_car_from_database():
print('The entered car quantity removed from the database')
def drop_leasing_info_from_database():
print('The entered leasing information removed from the database')
def update_customer_in_database():
print('The entered customer information has been updated')
def update_car_in_database():
print('The entered car information has been updated')
def print_cars_per_customer():
print('The following cars have been leased by the following customer')
def print_car_models(car, count):
print('%d car model: %s \n\tManufacture: %s Product Year: %s' % (count, car[1], car[2], car[3]))
def car_out_of_quantity():
print('The info is not available due to depletion of inventory')
def print_exception(exception):
print('The exception is: ', exception)
| 29.168831 | 119 | 0.685367 |
00571b378ab2efe033a97ef545d1c4d82717e1ae | 1,078 | py | Python | send_email.py | Smendowski/hidden-communication-using-covert-channels | 47c04822925cedbac9a356e49705eae81db3419f | [
"MIT"
] | 4 | 2021-12-08T08:32:52.000Z | 2021-12-14T10:54:27.000Z | send_email.py | Smendowski/hidden-communication-using-covert-channels | 47c04822925cedbac9a356e49705eae81db3419f | [
"MIT"
] | null | null | null | send_email.py | Smendowski/hidden-communication-using-covert-channels | 47c04822925cedbac9a356e49705eae81db3419f | [
"MIT"
] | null | null | null | from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib, ssl
import lorem
import os
sender_email = os.environ.get("SCS_SENDER_EMAIL")
receiver_email = os.environ.get("SCS_RECEIVER_EMAIL")
password = os.environ.get("SCS_EMAIL_PASSWORD")
if None in [password, sender_email, receiver_email]:
print("Set the necessary environment variables.")
raise SystemExit
message = MIMEMultipart("alternative")
message["Subject"] = "SCS2021"
message["From"] = sender_email
message["To"] = receiver_email
hidden_information = ("".join([lorem.text() for _ in range(5)])).replace("\n", "")
message.add_header('SCS2021', "%s" % hidden_information)
content = """\
Hi,
Are You able to find me?
"""
mail_content = MIMEText(content, "plain")
message.attach(mail_content)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
)
| 26.95 | 82 | 0.749536 |
8cfe0f5a58769543d05d013976434a5403ecd5f9 | 361 | py | Python | day7/d7p2.py | flowgrow/adventofcode2021 | b6e316a388726031ff500ce77d1ec5d5b7da0658 | [
"MIT"
] | null | null | null | day7/d7p2.py | flowgrow/adventofcode2021 | b6e316a388726031ff500ce77d1ec5d5b7da0658 | [
"MIT"
] | null | null | null | day7/d7p2.py | flowgrow/adventofcode2021 | b6e316a388726031ff500ce77d1ec5d5b7da0658 | [
"MIT"
] | null | null | null | input = open('input.txt', 'r')
data = list(map(int, input.readline().strip().split(',')))
data.sort()
min_data = min(data)
max_data = max(data)
spread = max_data - min_data
summed_fuel = [0] * spread
for key in data:
for i in range(min_data, max_data):
diff = abs(i - key)
summed_fuel[i] += (diff * (diff+1)) / 2
print(min(summed_fuel))
| 21.235294 | 58 | 0.620499 |
dac95bf184fa5089258b31851a1a105a90e75040 | 1,155 | py | Python | src/PySurprised/svms_preprocessing.py | FDUJiaG/PyML-Course | f606b5eab8dca928532263a9fe9c169549971d42 | [
"MIT"
] | 1 | 2020-05-21T08:23:08.000Z | 2020-05-21T08:23:08.000Z | src/PySurprised/svms_preprocessing.py | FDUJiaG/PyML-Course | f606b5eab8dca928532263a9fe9c169549971d42 | [
"MIT"
] | null | null | null | src/PySurprised/svms_preprocessing.py | FDUJiaG/PyML-Course | f606b5eab8dca928532263a9fe9c169549971d42 | [
"MIT"
] | 1 | 2020-12-07T07:04:34.000Z | 2020-12-07T07:04:34.000Z | from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, random_state=0)
# 计算训练集中每个特征的最小值
min_on_training = X_train.min(axis=0)
# 计算训练集中每个特征的范围(最大值-最小值)
range_on_training = (X_train-min_on_training).max(axis=0)
# 减去最小值,然后除以范围
# 这样每个特征都是 min=0 和 max=1
X_train_scaled = (X_train - min_on_training) / range_on_training
print("Minimum for each feature\n{}".format(X_train_scaled.min(axis=0)))
print("Maximum for each feature\n{}".format(X_train_scaled.max(axis=0)))
# 利用训练集的最小值和范围对测试集做相同的变换
X_test_scaled = (X_test-min_on_training)/range_on_training
svc = SVC()
svc.fit(X_train_scaled, y_train)
print("Accuracy on training set:{:.3f}".format(svc.score(X_train_scaled, y_train)))
print("Accuracy on test set:{:.3f}".format(svc.score(X_test_scaled, y_test)))
svc = SVC(C=1000)
svc.fit(X_train_scaled, y_train)
print("Accuracy on training set:{:.3f}".format(svc.score(X_train_scaled, y_train)))
print("Accuracy on test set:{:.3f}".format(svc.score(X_test_scaled, y_test)))
| 36.09375 | 95 | 0.779221 |
8d02ea67b6d2debc8bb1b368166ccb773abde142 | 3,576 | py | Python | lib/requests/structures.py | ihdavids/sublime-text-trello | ee54bfad5ba7128e560cf515db57d8c2ed586a42 | [
"MIT"
] | 303 | 2015-01-03T23:47:42.000Z | 2022-02-07T11:50:12.000Z | lib/requests/structures.py | ihdavids/sublime-text-trello | ee54bfad5ba7128e560cf515db57d8c2ed586a42 | [
"MIT"
] | 94 | 2015-01-01T00:26:48.000Z | 2021-08-24T10:40:25.000Z | vendor/packages/requests/requests/structures.py | willingc/new-mini-tasks | 9699f6b99f5dd2d4613b188bda58d8da6f44861e | [
"Apache-2.0"
] | 162 | 2015-01-01T00:21:16.000Z | 2022-02-23T02:36:04.000Z | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| 27.72093 | 75 | 0.608781 |
42071b842c972d845c50b57f1258dd14dc0f9e43 | 26,075 | py | Python | foundation/place.py | futursolo/furtherland | 33ead7d4e651ed3154c8047e3bdc4bb2871e4468 | [
"Apache-2.0"
] | null | null | null | foundation/place.py | futursolo/furtherland | 33ead7d4e651ed3154c8047e3bdc4bb2871e4468 | [
"Apache-2.0"
] | null | null | null | foundation/place.py | futursolo/furtherland | 33ead7d4e651ed3154c8047e3bdc4bb2871e4468 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Futur Solo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tornado.web import *
from tornado.gen import *
from tornado.escape import *
import tornado.httpclient
from collections import OrderedDict
import json
import os
import re
import markdown
import hashlib
import random
import string
import functools
import time
import datetime
import feedgen.feed
def decorator_with_args(decorator_to_enhance):
def decorator_maker(*args, **kwargs):
def decorator_wrapper(func):
return decorator_to_enhance(func, *args, **kwargs)
return decorator_wrapper
return decorator_maker
@decorator_with_args
def slug_validation(func, *args, **kwargs):
@functools.wraps(func)
def wrapper(self, *func_args, **func_kwargs):
valid_list = args[0]
new_slug = []
for number in range(0, len(valid_list)):
value = self.value_validation(
valid_list[number], func_args[number])
if value is not False:
new_slug.append(value)
else:
raise HTTPError(404)
return func(self, *new_slug, **func_kwargs)
return wrapper
def visitor_only(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.current_user:
self.redirect(self.next_url)
return
return func(self, *args, **kwargs)
return wrapper
class PlacesOfInterest(RequestHandler):
current_user = None
@coroutine
def prepare(self):
self.start_time = time.time()
self.furtherland = self.settings["further_land"]
self.render_list = {}
self.memories = self.settings["historial_records"]
self.memories.initialize()
self.current_user = yield self.get_current_user()
self.config = yield self.get_config()
self.next_url = self.get_arg("next", arg_type="link", default="/")
self.remote_ip = self.request.headers.get(
"X-Forwarded-For", self.request.headers.get(
"X-Real-Ip", self.request.remote_ip))
self.using_ssl = (self.request.headers.get(
"X-Scheme", "http") == "https")
self.safe_land = self.settings["safe_land"]
if self.safe_land:
self.set_header("strict-transport-security",
"max-age=39420000")
@coroutine
def get_config(self):
if not hasattr(self, "_config"):
book = self.memories.select("Configs")
book.find().length(0)
yield book.do()
result = book.result()
self._config = {}
for value in result.values():
self._config[value["_id"]] = value["value"]
return self._config
@coroutine
def get_current_user(self):
if not hasattr(self, "_current_user"):
user_id = self.get_scookie("user_id", arg_type="number")
device_id = self.get_scookie("device_id", arg_type="hash")
agent_auth = self.get_scookie("agent_auth", arg_type="hash")
if not (user_id and device_id and agent_auth):
self._current_user = None
else:
user = yield self.get_user(_id=user_id)
if self.hash((device_id + user["password"]),
"sha256") != agent_auth:
self._current_user = None
else:
self._current_user = user
return (self._current_user)
def get_arg(self, arg, default=None, arg_type="origin"):
result = RequestHandler.get_argument(self, arg, None)
if isinstance(result, bytes):
result = str(result.decode())
else:
result = str(result)
if (not result) or (result == "None"):
return default
return self.value_validation(arg_type, result)
def get_scookie(self, arg, default=None, arg_type="origin"):
result = RequestHandler.get_secure_cookie(
self, arg, None, max_age_days=181)
if isinstance(result, bytes):
result = str(result.decode())
else:
result = str(result)
if (not result) or (result == "None"):
return default
return self.value_validation(arg_type, result)
def set_scookie(self, arg, value="", expires_days=30, httponly=False):
if not isinstance(value, str):
value = str(value)
if self.safe_land:
secure = True
else:
secure = False
RequestHandler.set_secure_cookie(
self, arg, value, expires_days,
httponly=httponly, secure=secure)
def value_validation(self, arg_type, value):
if arg_type == "origin":
return value
elif arg_type == "mail_address":
mail_address = str(value)
if re.match(
r"^([\._+\-a-zA-Z0-9]+)@{1}([a-zA-Z0-9\-]+)\.([a-zA-Z0-9\-]+)$",
mail_address) == None:
return False
else:
return mail_address
elif arg_type == "hash":
hash_value = str(value)
if re.match(r"^([a-zA-Z0-9]+)$", hash_value) == None:
return False
else:
return hash_value
elif arg_type == "slug":
hash_value = str(value)
if re.match(r"^([\-a-zA-Z0-9]+)$", hash_value) == None:
return False
else:
return hash_value
elif arg_type == "number":
number = str(value)
if re.match(r"^([\-\+0-9]+)$", number) == None:
return False
else:
return int(number)
elif arg_type == "boolean":
boo = str(value).lower()
if boo == "1" or boo == "true" or boo == "on":
return True
else:
return False
elif arg_type == "username":
string = str(value)
if re.match(r"^([ a-zA-Z]+)$", string) == None:
return False
else:
return string
elif arg_type == "link":
link = str(value)
if re.match(r"^(.*)$", link) == None:
return False
else:
return link
def hash(self, target, method):
if not isinstance(target, bytes):
target = target.encode(encoding="utf-8")
if method == "sha1":
return hashlib.sha1(target).hexdigest()
elif method == "sha256":
return hashlib.sha256(target).hexdigest()
elif method == "md5":
return hashlib.md5(target).hexdigest()
@coroutine
def get_user(self, with_privacy=True, **kwargs):
condition = list(kwargs.keys())[0]
value = kwargs[condition]
if condition != "user_list":
if not hasattr(self, "_master_list"):
self._master_list = {}
if condition not in self._master_list.keys():
self._master_list[condition] = {}
if value not in self._master_list[condition].keys():
book = self.memories.select("Masters")
book.find({condition: value}).length(1)
yield book.do()
self._master_list[condition][value] = book.result()
user = {}
user.update(self._master_list[condition][value])
if not with_privacy:
del user["password"]
del user["otp_key"]
del user["email"]
return user
def get_random(self, length):
return "".join(random.sample(string.ascii_letters + string.digits,
length))
@coroutine
def get_class(self):
pass
@coroutine
def get_writing(self, only_published=True, **kwargs):
book = self.memories.select("Writings")
find_condition = {}
if only_published is True:
find_condition["publish"] = True
if "class_id" in kwargs.keys():
if kwargs["class_id"] != 0:
find_condition["class_id"] = kwargs["class_id"]
book.find(find_condition)
book.sort([["time", False]])
book.length(0, force_dict=True)
elif "writing_list" in kwargs.keys():
find_condition["_id"] = {"$in": kwargs["writing_list"]}
book.find(find_condition, ["content"])
book.sort([["time", False]])
book.length(0, force_dict=True)
elif "slug" in kwargs.keys():
find_condition["slug"] = kwargs["slug"]
book.find(find_condition)
elif "id" in kwargs.keys():
find_condition["_id"] = kwargs["id"]
book.find(find_condition)
yield book.do()
return book.result()
@coroutine
def get_page(self, only_published=True, **kwargs):
book = self.memories.select("Pages")
find_condition = {}
if only_published is True:
find_condition["publish"] = True
if "class_id" in kwargs.keys():
if kwargs["class_id"] != 0:
find_condition["class_id"] = kwargs["class_id"]
book.find(find_condition)
book.sort([["time", False]])
book.length(0, force_dict=True)
elif "slug" in kwargs.keys():
find_condition["slug"] = kwargs["slug"]
book.find(find_condition)
elif "id" in kwargs.keys():
find_condition["_id"] = kwargs["id"]
book.find(find_condition)
yield book.do()
return book.result()
@coroutine
def get_reply(self, only_permitted=True, with_privacy=False, **kwargs):
book = self.memories.select("Replies")
ignore = None
if not with_privacy:
ignore = ["email", "ip"]
find_condition = {}
if only_permitted is True:
find_condition["permit"] = True
if "writing_id" in kwargs.keys():
if kwargs["writing_id"] != 0:
find_condition["writing_id"] = kwargs["writing_id"]
book.find(find_condition, ignore)
book.sort([["time", True]])
book.length(0, force_dict=True)
elif "id" in kwargs.keys():
find_condition["_id"] = kwargs["id"]
book.find(find_condition, ignore)
yield book.do()
return book.result()
@coroutine
def issue_id(self, working_type):
book = self.memories.select("Counts")
book.find_modify({"_id": working_type}, ["number"])
yield book.do()
return int(book.result()["number"])
def make_md(self, content, more=True):
if not more:
content = content.split("<!--more-->")[0]
return markdown.markdown(content, extensions=["gfm"])
def static_url(self, path, include_host=None, nutrition=True, **kwargs):
if nutrition:
path = "nutrition/" + self.config["nutrition_type"] + "/" + path
return RequestHandler.static_url(
self, path, include_host=include_host, **kwargs)
def render(self, page, nutrition=True):
if ("page_title" not in self.render_list.keys() and
"origin_title" in self.render_list.keys()):
self.render_list["page_title"] = (
self.render_list["origin_title"] +
" - " + self.config["site_name"])
if not self.render_list.pop("__without_database", False):
self.render_list["config"] = self.config
self.render_list["FurtherLand"] = self.furtherland
self.set_header("Furtherland-Used-Time",
int((time.time() - self.start_time) * 1000))
self.xsrf_form_html()
if nutrition:
page = "nutrition/" + self.config["nutrition_type"] + "/" + page
RequestHandler.render(self, page, **self.render_list)
@coroutine
def get_count(self):
result = {}
book = self.memories.select("Writings").count()
yield book.do()
result["writings"] = book.result()
book.count(do_find=True, condition={"publish": False})
yield book.do()
result["writings_draft"] = book.result()
book = self.memories.select("Pages").count()
yield book.do()
result["pages"] = book.result()
book.count(do_find=True, condition={"publish": False})
yield book.do()
result["pages_draft"] = book.result()
book = self.memories.select("Replies").count()
yield book.do()
result["replies"] = book.result()
book.count(do_find=True, condition={"permit": False})
yield book.do()
result["replies_waiting_permit"] = book.result()
return result
def escape(self, item, item_type="html"):
if item_type == "html":
return xhtml_escape(item)
elif item_type == "url":
return url_escape(item)
else:
raise HTTPError(500)
def write_error(self, status_code, **kwargs):
if status_code == 404:
self.render_list["origin_title"] = "出错了!"
self.render_list["slug"] = "not-found"
self.render_list["sub_slug"] = ""
self.render_list["current_content_id"] = 0
self.render("model.htm")
return
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
self.set_header("Content-Type", "text/plain")
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.render_list["status_code"] = status_code
self.render_list["error_message"] = self._reason
self.finish(
self.render_string(
"management/error.htm",
__without_database=True,
**self.render_list))
class CentralSquare(PlacesOfInterest):
@coroutine
def get(self):
contents = yield self.get_writing(class_id=0)
for key in contents:
contents[key]["author"] = yield self.get_user(
_id=contents[key]["author"], with_privacy=False)
contents[key]["content"] = self.make_md(contents[key]["content"],
more=False)
self.render_list["contents"] = contents
self.render_list["origin_title"] = "首页"
self.render_list["slug"] = "index"
self.render_list["sub_slug"] = ""
self.render_list["current_content_id"] = 0
self.render("model.htm")
class ConferenceHall(PlacesOfInterest):
@coroutine
@slug_validation(["slug"])
def get(self, writing_slug):
writing = yield self.get_writing(slug=writing_slug)
if not writing:
raise HTTPError(404)
writing["author"] = yield self.get_user(_id=writing["author"],
with_privacy=False)
writing["content"] = self.make_md(writing["content"])
self.render_list["content"] = writing
self.render_list["origin_title"] = writing["title"]
self.render_list["slug"] = "writing"
self.render_list["sub_slug"] = writing["slug"]
self.render_list["current_content_id"] = writing["_id"]
self.render("model.htm")
class MemorialWall(PlacesOfInterest):
@coroutine
@slug_validation(["slug"])
def get(self, page_slug):
page = yield self.get_page(slug=page_slug)
if not page:
raise HTTPError(404)
page["author"] = yield self.get_user(_id=page["author"],
with_privacy=False)
page["content"] = self.make_md(page["content"])
self.render_list["content"] = page
self.render_list["origin_title"] = page["title"]
self.render_list["slug"] = "page"
self.render_list["sub_slug"] = page["slug"]
self.render_list["current_content_id"] = page["_id"]
self.render("model.htm")
class NewsAnnouncement(PlacesOfInterest):
@coroutine
def get(self):
self.set_header("Content-Type", "application/xml; charset=\"utf-8\"")
content = yield self.get_writing(class_id=0)
fg = feedgen.feed.FeedGenerator()
update_time = 0
author = yield self.get_user(_id=1)
fg.id(self.config["site_url"])
fg.title(self.config["site_name"])
fg.author({"name": author["username"], "email": author["email"]})
fg.link(href=self.config["site_url"], rel="alternate")
fg.link(href=self.config["site_url"] + "/feed.xml", rel="self")
fg.language("zh-CN")
fg.logo(self.config["site_url"] + "/spirit/favicon.jpg")
for key in content.keys():
current = fg.add_entry()
current.id((self.config["site_url"] + "/writings/{0}.htm").format(
content[key]["slug"])
)
current.link(href=(self.config[
"site_url"] + "/writings/{0}.htm").format(
content[key]["slug"]))
current.title(content[key]["title"])
current.content(self.make_md(content[key]["content"]))
if content[key]["time"] > update_time:
update_time = content[key]["time"]
current.updated(
datetime.datetime.fromtimestamp(content[key]["time"]).replace(
tzinfo=datetime.timezone.utc))
fg.updated(datetime.datetime.fromtimestamp(
update_time).replace(
tzinfo=datetime.timezone.utc))
atomfeed = fg.atom_str(pretty=True)
self.write(atomfeed)
class HistoryLibrary(PlacesOfInterest):
pass
class TerminalService(PlacesOfInterest):
@coroutine
def post(self):
action = self.get_arg("action", default=None, arg_type="link")
if hasattr(self, action):
yield getattr(self, action)()
else:
raise HTTPError(500)
@coroutine
def load_index(self):
contents = yield self.get_writing(class_id=0)
for key in contents:
contents[key]["author"] = yield self.get_user(
_id=contents[key]["author"], with_privacy=False)
contents[key]["content"] = contents[key]["content"].split(
"<!--more-->")[0]
self.finish(json.dumps(list(contents.values())))
@coroutine
def load_writing(self):
writing_slug = self.get_arg("slug", arg_type="slug")
writing = yield self.get_writing(slug=writing_slug)
if not writing:
self.finish(json.dumps({
"success": False,
"reason": "notfound"
}))
return
writing["author"] = yield self.get_user(_id=writing["author"],
with_privacy=False)
writing["success"] = True
self.finish(json.dumps(writing))
@coroutine
def load_page(self):
page_slug = self.get_arg("slug", arg_type="slug")
page = yield self.get_page(slug=page_slug)
if not page:
self.finish(json.dumps({
"success": False,
"reason": "notfound"
}))
return
page["author"] = yield self.get_user(_id=page["author"],
with_privacy=False)
page["success"] = True
self.finish(json.dumps(page))
@coroutine
def load_reply(self):
writing_id = self.get_arg("writing", arg_type="number")
reply_id = self.get_arg("reply", arg_type="number")
method = self.get_arg("method", arg_type="hash")
if method == "list" and writing_id:
result = yield self.get_reply(writing_id=writing_id)
elif method == "single" and reply_id:
result = yield self.get_reply(id=reply_id)
else:
raise HTTPError(500)
self.finish(json.dumps(result))
@coroutine
def new_reply(self):
writing_id = self.get_arg("writing", arg_type="number")
reply_id = self.get_arg("reply", arg_type="number")
reply = OrderedDict()
reply["writing_id"] = writing_id
if not self.current_user:
reply["master"] = False
reply["name"] = self.get_arg("name", arg_type="origin")
reply["email"] = self.get_arg("email", arg_type="mail_address")
reply["homepage"] = self.get_arg("homepage", arg_type="link")
if not (reply["name"] and reply["email"]):
result = {
"success": False,
"reason": "incomplation"
}
self.finish(json.dumps(result))
return
reply["name"] = self.escape(reply["name"], item_type="html")
reply["permit"] = False
else:
reply["master"] = True
reply["name"] = self.current_user["username"]
reply["email"] = self.current_user["email"]
reply["homepage"] = self.current_user["homepage"]
reply["permit"] = True
reply["ip"] = self.remote_ip
reply["time"] = int(time.time())
reply["emailmd5"] = self.hash(reply["email"].lower(),
"md5")
content = self.escape(self.get_arg("content", arg_type="origin"),
item_type="html")
content = re.sub(
re.compile(r"(data:)", re.IGNORECASE), "data:", content)
content = re.sub(
re.compile(
r"(javascript:)", re.IGNORECASE), "javascript:", content)
reply["content"] = content
reply["_id"] = yield self.issue_id("Replies")
book = self.memories.select("Replies")
book.add(reply)
result = {}
try:
yield book.do()
result["success"] = reply["master"]
result["id"] = reply["_id"]
if not reply["master"]:
result["reason"] = "waitforcheck"
if result["success"]:
result.update(reply)
except:
result["success"] = False
result["reason"] = "unkonwn"
self.finish(json.dumps(result))
class IllustratePlace(PlacesOfInterest):
@coroutine
@slug_validation(["hash"])
def get(self, slug):
size = self.get_arg("s", default=80, arg_type="number")
default = self.get_arg("d", default=404, arg_type="hash")
current_time = int(time.time())
path = self.settings["static_path"] + "/public/avatar/" + slug
if not os.path.exists(path):
os.makedirs(path)
file_path = path + "/" + str(size)
if os.path.exists(file_path):
book = self.memories.select("Publics")
book.find(
{"filename": str(size), "email_md5": slug, "type": "avatar"})
yield book.do()
avatar_info = book.result()
if not avatar_info:
os.remove(file_path)
book.erase(
{
"filename": str(size),
"email_md5": slug,
"type": "avatar"
}
)
yield book.do()
elif (current_time - avatar_info["time"]) <= (15 * 24 * 60 * 60):
self.set_header(
"content-type", avatar_info["content_type"])
with open(file_path, "rb") as f:
self.finish(f.read())
return
else:
os.remove(file_path)
book.erase(
{
"filename": str(size),
"email_md5": slug,
"type": "avatar"
}
)
yield book.do()
client = tornado.httpclient.AsyncHTTPClient()
link = (
"https://secure.gravatar.com/avatar/" + slug + "?s=" +
str(size) + "&d=" + str(default))
response = yield client.fetch(link)
if response.error:
raise HTTPError(response.code)
avatar = response.body
content_type = response.headers.get("content-type")
avatar_info = OrderedDict()
avatar_info["time"] = current_time
avatar_info["type"] = "avatar"
avatar_info["content_type"] = content_type
avatar_info["filename"] = str(size)
avatar_info["filepath"] = file_path
avatar_info["fileurl"] = None
avatar_info["email_md5"] = slug
avatar_info["_id"] = yield self.issue_id("Publics")
with open(file_path, "wb") as f:
f.write(avatar)
book = self.memories.select("Publics")
book.find(
{"filename": str(size), "email_md5": slug, "type": "avatar"})
yield book.do()
if book.result():
book.erase(
{
"filename": str(size),
"email_md5": slug,
"type": "avatar"
}
)
yield book.do()
book.add(avatar_info)
yield book.do()
self.set_header("content-type", content_type)
self.finish(avatar)
class LostAndFoundPlace(PlacesOfInterest):
def get(self, *args, **kwargs):
raise HTTPError(404)
post = get
| 36.015193 | 78 | 0.548878 |
008ff549bcba37cdd266c0ed206db02f0bc12448 | 1,421 | py | Python | convert_image.py | MajFontana/pil-picture-mosaic | 336beb1f8d2e7066df3fe029b5260deb167ff866 | [
"MIT"
] | null | null | null | convert_image.py | MajFontana/pil-picture-mosaic | 336beb1f8d2e7066df3fe029b5260deb167ff866 | [
"MIT"
] | null | null | null | convert_image.py | MajFontana/pil-picture-mosaic | 336beb1f8d2e7066df3fe029b5260deb167ff866 | [
"MIT"
] | null | null | null | import os
import pickle
import random
from PIL import Image
IMAGE = "flock/HIRES/KittyHawk_E3_withLogo_001-1.jpg"
PALETTE = "PALETTE/"
MINRES = 35
RESAMP = Image.LANCZOS
PREVIEW_SCALE = 20
BLUEPRINT = "blueprint.png"
PREVIEW = "preview.png"
COOLDOWN = 1
RANDOMIZE_NOISE = True
path = os.path.join(PALETTE, "palette.pickle")
with open(path, "rb") as f:
palette = pickle.load(f)
colors = list(palette.keys())
print("Processing image ...")
img = Image.open(IMAGE)
side = min(img.size)
newres = [int(img.size[i] / side * MINRES) for i in range(2)]
img = img.resize(newres, RESAMP)
if COOLDOWN:
hist = [None] * COOLDOWN
bp = Image.new("RGB", newres)
for y in range(newres[1]):
for x in range(newres[0]):
pixel = img.getpixel((x, y))
deltas = [sum([abs(pixel[i] - color[i]) for i in range(3)]) / 3 for color in colors]
while True:
idx = deltas.index(min(deltas))
mapped = colors[idx]
if COOLDOWN and mapped in hist:
deltas[idx] = float("inf")
else:
break
bp.putpixel((x, y), mapped)
if COOLDOWN:
if not RANDOMIZE_NOISE or random.randint(0, 1):
hist.pop(0)
hist.append(mapped)
bp.save(BLUEPRINT)
prevsize = [newres[i] * PREVIEW_SCALE for i in range(2)]
bp = bp.resize(prevsize, Image.NEAREST)
bp.save(PREVIEW)
print("Finished")
| 25.836364 | 92 | 0.611541 |
ab7a7ff86caa0abbb98ea192464297fcf49874ae | 121 | py | Python | tests/urls.py | htpeter/P | 050601f51efa21c6a10596c8e191fee3cbd4f533 | [
"MIT"
] | 1 | 2022-02-14T19:01:44.000Z | 2022-02-14T19:01:44.000Z | tests/urls.py | htpeter/P | 050601f51efa21c6a10596c8e191fee3cbd4f533 | [
"MIT"
] | null | null | null | tests/urls.py | htpeter/P | 050601f51efa21c6a10596c8e191fee3cbd4f533 | [
"MIT"
] | 1 | 2021-05-11T17:18:00.000Z | 2021-05-11T17:18:00.000Z | from django.urls import include, path
urlpatterns = [path("", include("project-name.urls", namespace="project-name"))]
| 24.2 | 80 | 0.727273 |
70451092c377cb7ffed33fa6e3d5499dff491729 | 4,396 | py | Python | src/k8s-configuration/azext_k8s_configuration/commands.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-01-25T07:33:18.000Z | 2022-01-25T07:33:18.000Z | src/k8s-configuration/azext_k8s_configuration/commands.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/k8s-configuration/azext_k8s_configuration/commands.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_k8s_configuration._client_factory import (
k8s_configuration_fluxconfig_client,
k8s_configuration_sourcecontrol_client,
)
from .format import (
fluxconfig_deployed_object_list_table_format,
fluxconfig_deployed_object_show_table_format,
fluxconfig_list_table_format,
fluxconfig_show_table_format,
fluxconfig_kustomization_list_table_format,
fluxconfig_kustomization_show_table_format,
sourcecontrol_list_table_format,
sourcecontrol_show_table_format,
)
def load_command_table(self, _):
flux_configuration_custom_type = CliCommandType(
operations_tmpl="azext_k8s_configuration.providers.FluxConfigurationProvider#{}",
client_factory=k8s_configuration_fluxconfig_client,
)
source_control_configuration_custom_type = CliCommandType(
operations_tmpl="azext_k8s_configuration.providers.SourceControlConfigurationProvider#{}",
client_factory=k8s_configuration_sourcecontrol_client,
)
with self.command_group(
"k8s-configuration flux",
k8s_configuration_fluxconfig_client,
custom_command_type=flux_configuration_custom_type,
) as g:
g.custom_command("create", "create_config", supports_no_wait=True)
g.custom_command("update", "update_config", supports_no_wait=True)
g.custom_command(
"list", "list_configs", table_transformer=fluxconfig_list_table_format
)
g.custom_show_command(
"show", "show_config", table_transformer=fluxconfig_show_table_format
)
g.custom_command("delete", "delete_config", supports_no_wait=True)
with self.command_group(
"k8s-configuration flux kustomization",
k8s_configuration_fluxconfig_client,
custom_command_type=flux_configuration_custom_type,
) as g:
g.custom_command("create", "create_kustomization", supports_no_wait=True)
g.custom_command("update", "update_kustomization", supports_no_wait=True)
g.custom_command("delete", "delete_kustomization", supports_no_wait=True)
g.custom_command(
"list",
"list_kustomization",
table_transformer=fluxconfig_kustomization_list_table_format,
)
g.custom_show_command(
"show",
"show_kustomization",
table_transformer=fluxconfig_kustomization_show_table_format,
)
with self.command_group(
"k8s-configuration flux deployed-object",
k8s_configuration_fluxconfig_client,
custom_command_type=flux_configuration_custom_type,
) as g:
g.custom_command(
"list",
"list_deployed_object",
table_transformer=fluxconfig_deployed_object_list_table_format,
)
g.custom_show_command(
"show",
"show_deployed_object",
table_transformer=fluxconfig_deployed_object_show_table_format,
)
with self.command_group(
"k8s-configuration",
k8s_configuration_sourcecontrol_client,
custom_command_type=source_control_configuration_custom_type,
) as g:
g.custom_command(
"create",
"create_config",
deprecate_info=self.deprecate(redirect="k8s-configuration flux create"),
)
g.custom_command(
"list",
"list_configs",
table_transformer=sourcecontrol_list_table_format,
deprecate_info=self.deprecate(redirect="k8s-configuration flux list"),
)
g.custom_show_command(
"show",
"show_config",
table_transformer=sourcecontrol_show_table_format,
deprecate_info=self.deprecate(redirect="k8s-configuration flux show"),
)
g.custom_command(
"delete",
"delete_config",
confirmation=True,
deprecate_info=self.deprecate(redirect="k8s-configuration flux delete"),
)
| 38.902655 | 98 | 0.667197 |
44d71e1253eff59196ab4545f912e5f31937bd79 | 390 | py | Python | project/templatetags/app_filters.py | abhishekm47/restaurant-django | 56993f2269e27c9b932b5f172cdf4db1e95292aa | [
"MIT"
] | null | null | null | project/templatetags/app_filters.py | abhishekm47/restaurant-django | 56993f2269e27c9b932b5f172cdf4db1e95292aa | [
"MIT"
] | null | null | null | project/templatetags/app_filters.py | abhishekm47/restaurant-django | 56993f2269e27c9b932b5f172cdf4db1e95292aa | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.filter(name='previous')
def previous(some_list, current_index):
"""
Returns the previous element of the list using the current index if it exists.
Otherwise returns an empty string.
"""
try:
return some_list[int(current_index) - 1] # access the previous element
except:
return '' | 27.857143 | 82 | 0.692308 |
769685e4f82ba1ee1e3e3a8b2c2ea0d6dfc12023 | 1,204 | py | Python | portscanner/classportscanner.py | Twiths/Ethical-hacking | 9d06326ca9890abfb396386fd00bc3d2eab49981 | [
"MIT"
] | null | null | null | portscanner/classportscanner.py | Twiths/Ethical-hacking | 9d06326ca9890abfb396386fd00bc3d2eab49981 | [
"MIT"
] | null | null | null | portscanner/classportscanner.py | Twiths/Ethical-hacking | 9d06326ca9890abfb396386fd00bc3d2eab49981 | [
"MIT"
] | null | null | null | import socket
from IPy import IP
from termcolor import colored
def scan(target):
converted_ip = check_ip(target)
print('\n' + '[- 0 scanning Target]: ' + str(target))
for port in range(1, 100):
scan_port(converted_ip, port)
#convert target name into ip address
def check_ip(ip):
try:
IP(ip)
return(ip)
except ValueError:
return socket.gethostbyname(ip)
def get_banner(s):
return s.recv(1024)
#Try connection
def scan_port(ip_address, port):
try:
sock = socket.socket()
sock.settimeout(0.5)
sock.connect((ip_address, port))
try:
banner = get_banner(sock)
print('[+] Open Port ' + str(port) + ' : ' + str(banner.decode().strip('\n')))
except:
print('[+] Open Port ' + str(port))
except:
pass
if __name__ == '__main__':
#define port and ip_address
targets = input('[+] Enter Target/s to scan(Split multiple targets with a comma ,): ')
# port_num = input('[+] Enter the number of ports you want to scan: ')
if ',' in targets:
for ip_add in targets.split(','):
scan(ip_add.strip(' '))
else:
scan(targets)
| 25.083333 | 90 | 0.586379 |
d1abb706e5769ee8c5f6b42efd680ab5b9fa11bc | 2,467 | py | Python | dsa/challenges/queue_with_stacks/queue_with_stacks.py | joseph-zabaleta/data-structures-and-algorithms | b11b5ef50f52e3d505474fe5fffe4357933da251 | [
"MIT"
] | null | null | null | dsa/challenges/queue_with_stacks/queue_with_stacks.py | joseph-zabaleta/data-structures-and-algorithms | b11b5ef50f52e3d505474fe5fffe4357933da251 | [
"MIT"
] | null | null | null | dsa/challenges/queue_with_stacks/queue_with_stacks.py | joseph-zabaleta/data-structures-and-algorithms | b11b5ef50f52e3d505474fe5fffe4357933da251 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, value, next_=None):
self.value = value
self.next = next_
if not isinstance(next_, Node) and next_ != None:
raise TypeError("Next must be a Node")
def __repr__(self):
return f"{self.value} : {self.next}"
class Stack:
def __init__(self, item=None):
self.top = item
def __str__(self):
output = ""
current = self.top
while current:
output += f"{ [current.value]} -> "
current = current.next
return output + "NULL"
def push(self, item):
"""Takes in any value as an argument and adds a new node with that value to the top of the stack with an O(1) Time performance."""
if self.top == None:
self.top = Node(item)
else:
new_node = Node(item)
new_node.next = self.top
self.top = new_node
def pop(self):
"""Takes no arguments, removes the node from the top of the stack, and returns the node's value."""
if not self.top:
raise AttributeError("Can't pop item from an empty stack")
popped_node = self.top
self.top = self.top.next
popped_node.next = None
return popped_node.value
def peek(self):
"""Takes no arguments and returns the value of the node located on top of the stack, without removing it from the stack."""
if not self.top:
raise AttributeError("Can't peek top from an empty stack")
return self.top.value
def is_empty(self):
"""Takes no arguments, and returns a boolean indicating whether or not the stack is empty."""
return not bool(self.top)
class PseudoQueue:
def __init__(self):
self.storage1 = Stack()
self.storage2 = Stack()
def enqueue(self, item):
"""Takes any value as an argument and adds a new node with that value to the back of the queue."""
self.storage1.push(item)
def dequeue(self):
"""Takes no arguments, remove the node from the front of the queue."""
current = self.storage1.top
while current:
self.storage2.push(current.value)
current = current.next
self.storage2.pop()
self.storage1 = Stack()
current = self.storage2.top
while current:
self.storage1.push(current.value)
current = current.next
self.storage2 = Stack()
| 30.8375 | 138 | 0.591407 |
2b768ba843050b4077cac69e222ea05b37e0d41e | 31 | py | Python | lms/schoology/__init__.py | djpohly/lms | b092a8d1ae0aa3a7c7cfbb3e57ab82de65a3a7b2 | [
"MIT"
] | 2 | 2019-12-17T02:18:26.000Z | 2020-12-01T22:05:51.000Z | lms/schoology/__init__.py | djpohly/lms | b092a8d1ae0aa3a7c7cfbb3e57ab82de65a3a7b2 | [
"MIT"
] | null | null | null | lms/schoology/__init__.py | djpohly/lms | b092a8d1ae0aa3a7c7cfbb3e57ab82de65a3a7b2 | [
"MIT"
] | null | null | null | from .backend import Schoology
| 15.5 | 30 | 0.83871 |
a4b11d0196022f56e70d5a50c18851dbfa53a82f | 1,395 | py | Python | drivers/moveMotor.py | MFOSSociety/Automate-AI-Chess | 614d6bc587978ce083394169eb1d792f01ffc34b | [
"MIT"
] | 4 | 2019-06-26T10:09:50.000Z | 2020-11-21T07:55:59.000Z | drivers/moveMotor.py | MFOSSociety/Automate-AI-Chess | 614d6bc587978ce083394169eb1d792f01ffc34b | [
"MIT"
] | 2 | 2019-06-27T22:52:43.000Z | 2019-10-02T17:48:45.000Z | drivers/moveMotor.py | MFOSSociety/8-8 | 614d6bc587978ce083394169eb1d792f01ffc34b | [
"MIT"
] | 3 | 2019-06-30T18:40:22.000Z | 2019-10-16T09:42:56.000Z | from .motor1 import rotatemotor as rotateY
from .motor2 import rotatemotor as rotateX
from .ElectroMagnet import toggleMagnet
import time
motorcurentx = 0
motorcurenty = 0
xscale = 5
yscale = 5
half_square = 2
def moveMotor(move):
convertMove(move)
def trackInitial(x2,y2):
moveY(y2,0)
moveX(x2,0)
def moveX(x1, x2):
dx = x2-x1
rotateX(dx*xscale)
def moveY(y1, y2):
dy = y2-y1
rotateY(-1*dy*yscale)
def moveToOldPos(x1, y1):
moveY(0,y1)
moveX(0,x1)
print("current position "+str(x1) +" "+str(y1))
print("picking the peice up")
toggleMagnet(True)
rotateX(-1*half_square)
rotateY(1*half_square)
def moveToNewPos(x1,y1,x2,y2):
moveY(y1,y2)
moveX(x1,x2)
print("current position "+str(x2) +" "+str(y2))
print("dropping the piece moving towards zero")
rotateX(1*half_square)
rotateY(-1*half_square)
toggleMagnet(False)
moveMotortozero(x2,y2)
def moveMotortozero(x,y):
moveY(y,0)
moveX(x,0)
print("finally at zero")
def convertMove(move):
print("inside the motor")
x1 = move.oldPos[0]
y1 = move.oldPos[1]
x2 = move.newPos[0]
y2 = move.newPos[1]
x1 = abs(7-x1)
y1 = abs(7-y1)
x2 = abs(7-x2)
y2 = abs(7-y2)
print("("+str(x1)+","+str(y1)+") ("+str(x2)+","+str(y2)+")")
print("current position 0,0")
moveToOldPos(x1,y1)
moveToNewPos(x1,y1,x2,y2)
| 19.928571 | 64 | 0.632258 |
c743c2653607bffc4779588f22f3a4c8918b54d6 | 2,192 | py | Python | cfgov/v1/tests/models/test_snippets.py | Colin-Seifer/consumerfinance.gov | a1a943f7170b498707d642d6be97b9a97a2b52e3 | [
"CC0-1.0"
] | 156 | 2015-01-16T15:16:46.000Z | 2020-08-04T04:48:01.000Z | cfgov/v1/tests/models/test_snippets.py | Colin-Seifer/consumerfinance.gov | a1a943f7170b498707d642d6be97b9a97a2b52e3 | [
"CC0-1.0"
] | 3,604 | 2015-01-05T22:09:12.000Z | 2020-08-14T17:09:19.000Z | cfgov/v1/tests/models/test_snippets.py | Colin-Seifer/consumerfinance.gov | a1a943f7170b498707d642d6be97b9a97a2b52e3 | [
"CC0-1.0"
] | 102 | 2015-01-28T14:51:18.000Z | 2020-08-10T00:00:39.000Z | # -*- coding: utf-8 -*-
from django.test import TestCase
from wagtail.core.models import Site
from wagtail.tests.testapp.models import SimplePage
from v1.blocks import ReusableTextChooserBlock
from v1.models.snippets import Contact, RelatedResource, ReusableText
class TestUnicodeCompatibility(TestCase):
def test_unicode_contact_heading_unicode(self):
contact = Contact(heading="Unicod\xeb")
self.assertEqual(str(contact), "Unicod\xeb")
self.assertIsInstance(str(contact), str)
class TestTranslations(TestCase):
def test_related_resource_translations(self):
test_resource = RelatedResource(
title="English title",
title_es="Spanish title",
text="English text.",
text_es="Spanish text.",
)
self.assertEqual(str(test_resource), test_resource.title)
self.assertEqual(test_resource.trans_title(), test_resource.title)
self.assertEqual(test_resource.trans_text(), test_resource.text)
self.assertEqual(
test_resource.trans_title("es"), test_resource.title_es
)
self.assertEqual(test_resource.trans_text("es"), test_resource.text_es)
class TestModelStrings(TestCase):
def test_reusable_text_string(self):
test_snippet = ReusableText(
title="Snippet title",
sidefoot_heading="Sidefoot heading",
text="Snippet text",
)
self.assertEqual(str(test_snippet), test_snippet.title)
class TestReusableTextRendering(TestCase):
def test_links_get_expanded(self):
page = SimplePage(title="foo", slug="foo", content="content")
default_site = Site.objects.get(is_default_site=True)
default_site.root_page.add_child(instance=page)
html = '<a linktype="page" id="{}">Link</a>'.format(page.pk)
block = ReusableTextChooserBlock(ReusableText)
self.assertIn('<a href="/foo/">', block.render({"text": html}))
def test_nonexistent_links_return_empty_link(self):
html = '<a linktype="page" id="12345">Link</a>'
block = ReusableTextChooserBlock(ReusableText)
self.assertIn("<a>", block.render({"text": html}))
| 36.533333 | 79 | 0.685219 |
0c697f23039fcc00712fd82b21c9a90a3c8267f5 | 460 | py | Python | tests.py | oeg-upm/tada-gam | 99f56d0cdfe39fc78f58480d274a22bb63698745 | [
"Apache-2.0"
] | 1 | 2019-05-23T13:52:13.000Z | 2019-05-23T13:52:13.000Z | tests.py | oeg-upm/tada-gam | 99f56d0cdfe39fc78f58480d274a22bb63698745 | [
"Apache-2.0"
] | null | null | null | tests.py | oeg-upm/tada-gam | 99f56d0cdfe39fc78f58480d274a22bb63698745 | [
"Apache-2.0"
] | null | null | null | import unittest
import captain
import subprocess
# def start_services():
# pass
class TestGam(unittest.TestCase):
def test_start_services(self):
subprocess.call(["docker-compose", "down"])
captain.parse_args(["up", "--services", "score=3", "combine=2"])
output = subprocess.check_output(["docker-compose", "ps"])
self.assertEqual(7, len(output.strip().split('\n')))
if __name__ == '__main__':
unittest.main()
| 21.904762 | 72 | 0.65 |
86cd351cd3133bb2d0790423f47b6319ecbbfaf1 | 1,186 | py | Python | Machinelearningalgorithms/svmwithlibrary.py | Lonakshi/Data-Analysis | 47f44bc0afd5e8891828651d0ba0a145d2f8b66b | [
"Apache-2.0"
] | null | null | null | Machinelearningalgorithms/svmwithlibrary.py | Lonakshi/Data-Analysis | 47f44bc0afd5e8891828651d0ba0a145d2f8b66b | [
"Apache-2.0"
] | null | null | null | Machinelearningalgorithms/svmwithlibrary.py | Lonakshi/Data-Analysis | 47f44bc0afd5e8891828651d0ba0a145d2f8b66b | [
"Apache-2.0"
] | null | null | null | import numpy as np
from sklearn import preprocessing,neighbors,svm
from sklearn.model_selection import train_test_split
import pandas as pd
df= pd.read_csv('breastcancer')
df.replace('?', -99999, inplace=True)
#we dont want an id column as it does not have any significance related to
#benign and malignant.
#k nearest neighbor is worst for outlier
df.drop(['id'],1, inplace=True) #if we will include this id column then the accuracy
#will be almost 50% that is too worse for predicting a cancer
X = np.array(df.drop(['class'],1)) # df is everything but not class
y = np.array(df['class']) # y is just the class column
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.2)
#defining the classifier
clf= svm.SVC(kernel="linear")
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
#Now we are going to make prediction
example_measures = np.array([[4,2,1,1,1,2,3,2,1], [4,2,1,2,2,2,3,2,1]])
example_measures = example_measures.reshape(len(example_measures),-1)
#except id and class put some
#random numbers, make sure it is not occur in the train dataset
prediction = clf.predict(example_measures)
print(prediction)
| 21.962963 | 84 | 0.743676 |
22493603bc9ce0b9ecad617452c93da74a8eea2e | 538 | bzl | Python | js/packages/go-bridge/config.bzl | aeddi/berty-1 | 616f6419b19ca74bcef7f35b96bf249a5f55e70c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | js/packages/go-bridge/config.bzl | aeddi/berty-1 | 616f6419b19ca74bcef7f35b96bf249a5f55e70c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | js/packages/go-bridge/config.bzl | aeddi/berty-1 | 616f6419b19ca74bcef7f35b96bf249a5f55e70c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-07-10T06:28:24.000Z | 2020-07-10T06:28:24.000Z | load("@berty//go:config.bzl", "berty_go_config")
load("@co_znly_rules_gomobile//:repositories.bzl", "gomobile_repositories")
load("@build_bazel_apple_support//lib:repositories.bzl", "apple_support_dependencies")
load("@build_bazel_rules_swift//swift:repositories.bzl", "swift_rules_dependencies")
def berty_bridge_config():
# fetch and config berty go dependencies
berty_go_config()
# config gomobile repositories
gomobile_repositories()
# config ios
apple_support_dependencies()
swift_rules_dependencies()
| 33.625 | 86 | 0.775093 |
753b6b221007d31942fc0d51f936a0c83dfae390 | 2,758 | py | Python | scrips/search_eval/run_selfcenter_eval_search_all_structs.py | lonelu/Metalprot | e51bee472c975aa171bdb6ee426a07ca69f110ee | [
"MIT"
] | null | null | null | scrips/search_eval/run_selfcenter_eval_search_all_structs.py | lonelu/Metalprot | e51bee472c975aa171bdb6ee426a07ca69f110ee | [
"MIT"
] | null | null | null | scrips/search_eval/run_selfcenter_eval_search_all_structs.py | lonelu/Metalprot | e51bee472c975aa171bdb6ee426a07ca69f110ee | [
"MIT"
] | null | null | null | import os
import sys
import prody as pr
import numpy as np
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot.search import search, search_eval
from metalprot.basic import filter
import pickle
import multiprocessing as mp
'''
python /mnt/e/GitHub_Design/Metalprot/scrips/search_eval/run_eval_search_all_structs.py
'''
query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/20211013_selfcenter/pickle_noCYS_alignBB/'
with open(query_dir + 'all_metal_vdm.pkl', 'rb') as f:
query_all_metal = pickle.load(f)
with open(query_dir + 'AAMetalPhiPsi.pkl', 'rb') as f:
all_querys = pickle.load(f)
with open(query_dir + 'cluster_centroid_dict.pkl', 'rb') as f:
cluster_centroid_dict = pickle.load(f)
print(len(all_querys))
# run Search_struct
def run_search(workdir, target_file, query_all_metal, all_querys, cluster_centroid_dict):
outdir = workdir + 'output_eval_' + target_file + '/'
target_path = workdir + target_file
print(target_path)
rmsd_cuts = 0.45
num_iters = [3]
win_filter = []
_filter = filter.Search_filter(filter_abple = False, filter_phipsi = True, max_phipsi_val = 25,
filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20,
after_search_filter = True, pair_angle_range = [92, 116], pair_aa_aa_dist_range = [3.0, 3.5], pair_metal_aa_dist_range = None,
filter_qt_clash = True, write_filtered_result = True, selfcenter_filter_member_phipsi = True)
ss = search_eval.Search_eval(target_path, outdir, all_querys, cluster_centroid_dict,
query_all_metal, num_iters, rmsd_cuts,
win_filter, validateOriginStruct = True, search_filter= _filter, parallel = False)
win_search = set()
try:
ss.run_eval_selfcenter_search()
except:
return (target_file + ' Error', win_search)
for k in ss.neighbor_comb_dict.keys():
win_search.add(k[0])
return (target_file, win_search)
num_cores = int(mp.cpu_count() - 1)
pool = mp.Pool(num_cores)
workdir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/_Seq_core_date_3contact/'
target_files = []
for target_file in os.listdir(workdir):
if target_file.endswith('.pdb'):
target_files.append(target_file)
results = [pool.apply_async(run_search, args=(workdir, target_file, query_all_metal, all_querys, cluster_centroid_dict)) for target_file in target_files]
results = [p.get() for p in results]
with open(workdir + '_summary.txt', 'w') as f:
f.write('target_file\twin_extract\n')
for r in results:
try:
f.write(r[0] + '\t')
f.write(str(r[1]) + '\t')
except:
f.write(r[0] + '\t\n')
| 30.988764 | 153 | 0.71066 |
18fe20b7932aad3e76a850204267bef6a51a75f6 | 1,661 | py | Python | main.py | arikarim/Rock_Paper_scissors_PYTHON | b95e80b473c2fb3276249f17879bcec2236ce9f2 | [
"MIT"
] | null | null | null | main.py | arikarim/Rock_Paper_scissors_PYTHON | b95e80b473c2fb3276249f17879bcec2236ce9f2 | [
"MIT"
] | null | null | null | main.py | arikarim/Rock_Paper_scissors_PYTHON | b95e80b473c2fb3276249f17879bcec2236ce9f2 | [
"MIT"
] | null | null | null | import random
import math
def play():
print('\n')
user = input("Enter your choice, 'R' for Rock, 'P' for Paper, 'S' for Scissors \n")
user = user.lower()
while user != 'r' and user != 'p' and user != 's':
print("Please enter a valid letter")
user = input("Enter your choice, 'R' for Rock, 'P' for Paper, 'S' for Scissors \n").lower()
print('\n')
computer = random.choice(['r', 'p', 's'])
if user == computer:
return (0,user,computer)
if is_win(user,computer):
return (1,user,computer)
return (-1,user,computer)
def is_win(player, opponent):
if (player == 'r' and opponent == 's') or (player == 's' and opponent == 'p') or (player == 'p' and opponent == 'r'):
return True
return False
def play_best_of(n):
player_wins = 0
computer_wins = 0
wins_necessary = math.ceil(n/2)
while player_wins < wins_necessary and computer_wins < wins_necessary:
result, user, computer = play()
if result == 0:
print(" You chose {}, and Computer chose {}. its a tie".format(user,computer))
elif result == 1:
player_wins += 1
print("You chose {}, and Computer chose {}, You won!".format(user,computer))
else:
computer_wins += 1
print("You chose {}, and Computer chose {}, You lost! ")
if player_wins > computer_wins:
print("You have won best of {} games".format(n))
else:
print("You lost, Copmuter has won the best of {} games".format(n))
# if _name_ == '_main_':
def start():
print("Welcome, Let's Play a Game \n")
play_best_of(3)
start() | 29.140351 | 121 | 0.577363 |
03dd871638c3469f2588798aa9779a1cfbb79485 | 58,465 | py | Python | google_images_download/google_images_download.py | lnowakow/google-images-download | 1655831e6760713544cb390883312f5acd05cfc2 | [
"MIT"
] | null | null | null | google_images_download/google_images_download.py | lnowakow/google-images-download | 1655831e6760713544cb390883312f5acd05cfc2 | [
"MIT"
] | null | null | null | google_images_download/google_images_download.py | lnowakow/google-images-download | 1655831e6760713544cb390883312f5acd05cfc2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
# Import Libraries
import sys
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
import urllib.request
from urllib.request import Request, urlopen
from urllib.request import URLError, HTTPError
from urllib.parse import quote
import http.client
from http.client import IncompleteRead, BadStatusLine
http.client._MAXHEADERS = 1000
else: # If the Current Version of Python is 2.x
import urllib2
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from urllib import quote
import httplib
from httplib import IncompleteRead, BadStatusLine
httplib._MAXHEADERS = 1000
import time # Importing the time library to check the time of code execution
import os
import argparse
import ssl
import datetime
import json
import re
import codecs
import socket
args_list = ["keywords", "keywords_from_file", "prefix_keywords", "suffix_keywords",
"limit", "format", "color", "color_type", "usage_rights", "size",
"exact_size", "aspect_ratio", "type", "time", "time_range", "delay", "url", "single_image",
"output_directory", "image_directory", "no_directory", "proxy", "similar_images", "specific_site",
"print_urls", "print_size", "print_paths", "metadata", "extract_metadata", "socket_timeout",
"thumbnail", "thumbnail_only", "language", "prefix", "chromedriver", "related_images", "safe_search",
"no_numbering",
"offset", "no_download", "save_source", "silent_mode", "ignore_urls"]
def user_input():
config = argparse.ArgumentParser()
config.add_argument('-cf', '--config_file', help='config file name', default='', type=str, required=False)
config_file_check = config.parse_known_args()
object_check = vars(config_file_check[0])
if object_check['config_file'] != '':
records = []
json_file = json.load(open(config_file_check[0].config_file))
for record in range(0, len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
records_count = len(records)
else:
# Taking command line arguments from users
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--keywords', help='delimited list input', type=str, required=False)
parser.add_argument('-kf', '--keywords_from_file', help='extract list of keywords from a text file', type=str,
required=False)
parser.add_argument('-sk', '--suffix_keywords',
help='comma separated additional words added after to main keyword', type=str,
required=False)
parser.add_argument('-pk', '--prefix_keywords',
help='comma separated additional words added before main keyword', type=str, required=False)
parser.add_argument('-l', '--limit', help='delimited list input', type=str, required=False)
parser.add_argument('-f', '--format', help='download images with specific format', type=str, required=False,
choices=['jpg', 'gif', 'png', 'bmp', 'svg', 'webp', 'ico'])
parser.add_argument('-u', '--url', help='search with google image URL', type=str, required=False)
parser.add_argument('-x', '--single_image', help='downloading a single image from URL', type=str,
required=False)
parser.add_argument('-o', '--output_directory', help='download images in a specific main directory', type=str,
required=False)
parser.add_argument('-i', '--image_directory', help='download images in a specific sub-directory', type=str,
required=False)
parser.add_argument('-n', '--no_directory', default=False,
help='download images in the main directory but no sub-directory', action="store_true")
parser.add_argument('-d', '--delay', help='delay in seconds to wait between downloading two images', type=int,
required=False)
parser.add_argument('-co', '--color', help='filter on color', type=str, required=False,
choices=['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white',
'gray', 'black', 'brown'])
parser.add_argument('-ct', '--color_type', help='filter on color', type=str, required=False,
choices=['full-color', 'black-and-white', 'transparent'])
parser.add_argument('-r', '--usage_rights', help='usage rights', type=str, required=False,
choices=['labeled-for-reuse-with-modifications', 'labeled-for-reuse',
'labeled-for-noncommercial-reuse-with-modification',
'labeled-for-nocommercial-reuse'])
parser.add_argument('-s', '--size', help='image size', type=str, required=False,
choices=['large', 'medium', 'icon', '>400*300', '>640*480', '>800*600', '>1024*768', '>2MP',
'>4MP', '>6MP', '>8MP', '>10MP', '>12MP', '>15MP', '>20MP', '>40MP', '>70MP'])
parser.add_argument('-es', '--exact_size', help='exact image resolution "WIDTH,HEIGHT"', type=str,
required=False)
parser.add_argument('-t', '--type', help='image type', type=str, required=False,
choices=['face', 'photo', 'clipart', 'line-drawing', 'animated'])
parser.add_argument('-w', '--time', help='image age', type=str, required=False,
choices=['past-24-hours', 'past-7-days', 'past-month', 'past-year'])
parser.add_argument('-wr', '--time_range',
help='time range for the age of the image. should be in the format {"time_min":"YYYY-MM-DD","time_max":"YYYY-MM-DD"}',
type=str, required=False)
parser.add_argument('-a', '--aspect_ratio', help='comma separated additional words added to keywords', type=str,
required=False,
choices=['tall', 'square', 'wide', 'panoramic'])
parser.add_argument('-si', '--similar_images',
help='downloads images very similar to the image URL you provide', type=str, required=False)
parser.add_argument('-ss', '--specific_site', help='downloads images that are indexed from a specific website',
type=str, required=False)
parser.add_argument('-p', '--print_urls', default=False, help="Print the URLs of the images",
action="store_true")
parser.add_argument('-ps', '--print_size', default=False, help="Print the size of the images on disk",
action="store_true")
parser.add_argument('-pp', '--print_paths', default=False,
help="Prints the list of absolute paths of the images", action="store_true")
parser.add_argument('-m', '--metadata', default=False, help="Print the metadata of the image",
action="store_true")
parser.add_argument('-e', '--extract_metadata', default=False, help="Dumps all the logs into a text file",
action="store_true")
parser.add_argument('-st', '--socket_timeout', default=False,
help="Connection timeout waiting for the image to download", type=float)
parser.add_argument('-th', '--thumbnail', default=False,
help="Downloads image thumbnail along with the actual image", action="store_true")
parser.add_argument('-tho', '--thumbnail_only', default=False,
help="Downloads only thumbnail without downloading actual images", action="store_true")
parser.add_argument('-la', '--language', default=False,
help="Defines the language filter. The search results are authomatically returned in that language",
type=str, required=False,
choices=['Arabic', 'Chinese (Simplified)', 'Chinese (Traditional)', 'Czech', 'Danish',
'Dutch', 'English', 'Estonian', 'Finnish', 'French', 'German', 'Greek', 'Hebrew',
'Hungarian', 'Icelandic', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Lithuanian',
'Norwegian', 'Portuguese', 'Polish', 'Romanian', 'Russian', 'Spanish', 'Swedish',
'Turkish'])
parser.add_argument('-pr', '--prefix', default=False,
help="A word that you would want to prefix in front of each image name", type=str,
required=False)
parser.add_argument('-px', '--proxy', help='specify a proxy address and port', type=str, required=False)
parser.add_argument('-cd', '--chromedriver',
help='specify the path to chromedriver executable in your local machine', type=str,
required=False)
parser.add_argument('-ri', '--related_images', default=False,
help="Downloads images that are similar to the keyword provided", action="store_true")
parser.add_argument('-sa', '--safe_search', default=False,
help="Turns on the safe search filter while searching for images", action="store_true")
parser.add_argument('-nn', '--no_numbering', default=False,
help="Allows you to exclude the default numbering of images", action="store_true")
parser.add_argument('-of', '--offset', help="Where to start in the fetched links", type=str, required=False)
parser.add_argument('-nd', '--no_download', default=False,
help="Prints the URLs of the images and/or thumbnails without downloading them",
action="store_true")
parser.add_argument('-iu', '--ignore_urls', default=False,
help="delimited list input of image urls/keywords to ignore", type=str)
parser.add_argument('-sil', '--silent_mode', default=False,
help="Remains silent. Does not print notification messages on the terminal",
action="store_true")
parser.add_argument('-is', '--save_source',
help="creates a text file containing a list of downloaded images along with source page url",
type=str, required=False)
args = parser.parse_args()
arguments = vars(args)
records = []
records.append(arguments)
return records
class googleimagesdownload:
def __init__(self):
pass
def _extract_data_pack(self, page):
start_line = page.find("AF_initDataCallback({key: \\'ds:1\\'") - 10
start_object = page.find('[', start_line + 1)
end_object = page.rfind(']',0,page.find('</script>', start_object + 1))+1
object_raw = str(page[start_object:end_object])
return bytes(object_raw, "utf-8").decode("unicode_escape")
def _extract_data_pack_extended(self, page):
start_line = page.find("AF_initDataCallback({key: 'ds:1'") - 10
start_object = page.find('[', start_line + 1)
end_object = page.rfind(']',0,page.find('</script>', start_object + 1)) + 1
return str(page[start_object:end_object])
def _extract_data_pack_ajax(self, data):
lines = data.split('\n')
return json.loads(lines[3])[0][2]
def _image_objects_from_pack(self, data):
image_objects = json.loads(data)[31][-1][12][2]
image_objects = [x for x in image_objects if x[0] == 1]
return image_objects
# Downloading entire Web Document (Raw Page Content)
def download_page(self, url):
version = (3, 0)
cur_version = sys.version_info
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
except:
print("Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
else: # If the Current Version of Python is 2.x
try:
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except URLError: # Handling SSL certificate failed
context = ssl._create_unverified_context()
response = urlopen(req, context=context)
respData = response.read()
except:
print("Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
return "Page Not found"
try:
return self._image_objects_from_pack(self._extract_data_pack(respData)), self.get_all_tabs(respData)
except Exception as e:
print(e)
print('Image objects data unpacking failed. Please leave a comment with the above error at https://github.com/hardikvasa/google-images-download/pull/298')
sys.exit()
# Download Page for more than 100 images
def download_extended_page(self, url, chromedriver="/usr/lib/chromium-browser/chromedriver"):
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
chrome_service = Service(chromedriver)
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf8')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
try:
browser = webdriver.Chrome(service=chromedriver, options=options)
except Exception as e:
print("Looks like we cannot locate the path the 'chromedriver' (use the '--chromedriver' "
"argument to specify the path to the executable.) or google chrome browser is not "
"installed on your machine (exception: %s)" % e)
sys.exit()
browser.set_window_size(1024, 768)
# Open the link
browser.get(url)
browser.execute_script("""
(function(XHR){
"use strict";
var open = XHR.prototype.open;
var send = XHR.prototype.send;
var data = [];
XHR.prototype.open = function(method, url, async, user, pass) {
this._url = url;
open.call(this, method, url, async, user, pass);
}
XHR.prototype.send = function(data) {
var self = this;
var url = this._url;
function stateChanged() {
if (self.readyState == 4) {
console.log("data available for: " + url)
XHR.prototype._data.push(self.response);
}
}
if (url.includes("/batchexecute?")) {
this.addEventListener("readystatechange", stateChanged, false);
}
send.call(this, data);
};
XHR.prototype._data = [];
})(XMLHttpRequest);
""")
time.sleep(1)
print("Getting you a lot of images. This may take a few moments...")
element = browser.find_element(By.TAG_NAME, "body")
# Scroll down
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3)
try:
browser.find_element(By.XPATH, '//input[@value="Show more results"]').click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
except:
for i in range(10):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
print("Reached end of Page.")
time.sleep(0.5)
source = browser.page_source # page source
images = self._image_objects_from_pack(self._extract_data_pack_extended(source))
ajax_data = browser.execute_script("return XMLHttpRequest.prototype._data")
for chunk in ajax_data:
images += self._image_objects_from_pack(self._extract_data_pack_ajax(chunk))
# close the browser
browser.close()
return images, self.get_all_tabs(source)
# Correcting the escape characters for python2
def replace_with_byte(self, match):
return chr(int(match.group(0)[1:], 8))
def repair(self, brokenjson):
invalid_escape = re.compile(r'\\[0-7]{1,3}') # up to 3 digits for byte values up to FF
return invalid_escape.sub(self.replace_with_byte, brokenjson)
# Finding 'Next Image' from the given raw page
def get_next_tab(self, s):
start_line = s.find('class="dtviD"')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_tabs"
return link, '', end_quote
else:
start_line = s.find('class="dtviD"')
start_content = s.find('href="', start_line + 1)
end_content = s.find('">', start_content + 1)
url_item = "https://www.google.com" + str(s[start_content + 6:end_content])
url_item = url_item.replace('&', '&')
start_line_2 = s.find('class="dtviD"')
s = s.replace('&', '&')
start_content_2 = s.find(':', start_line_2 + 1)
end_content_2 = s.find('&usg=', start_content_2 + 1)
url_item_name = str(s[start_content_2 + 1:end_content_2])
chars = url_item_name.find(',g_1:')
chars_end = url_item_name.find(":", chars + 6)
if chars_end == -1:
updated_item_name = (url_item_name[chars + 5:]).replace("+", " ")
else:
updated_item_name = (url_item_name[chars + 5:chars_end]).replace("+", " ")
return url_item, updated_item_name, end_content
# Getting all links with the help of '_images_get_next_image'
def get_all_tabs(self, page):
tabs = {}
while True:
item, item_name, end_content = self.get_next_tab(page)
if item == "no_tabs":
break
else:
if len(item_name) > 100 or item_name == "background-color":
break
else:
tabs[item_name] = item # Append all the links in the list named 'Links'
time.sleep(0.1) # Timer could be used to slow down the request for image downloads
page = page[end_content:]
return tabs
# Format the object in readable format
def format_object(self, object):
data = object[1]
main = data[3]
info = data[9]
if info is None:
info = data[11]
formatted_object = {}
try:
formatted_object['image_height'] = main[2]
formatted_object['image_width'] = main[1]
formatted_object['image_link'] = main[0]
formatted_object['image_format'] = main[0][-1 * (len(main[0]) - main[0].rfind(".") - 1):]
formatted_object['image_description'] = info['2003'][3]
formatted_object['image_host'] = info['2003'][17]
formatted_object['image_source'] = info['2003'][2]
formatted_object['image_thumbnail_url'] = data[2][0]
except Exception as e:
print(e)
return None
return formatted_object
# function to download single image
def single_image(self, image_url):
main_directory = "downloads"
extensions = (".jpg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
url = image_url
try:
os.makedirs(main_directory)
except OSError as e:
if e.errno != 17:
raise
pass
req = Request(url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
response = urlopen(req, None, 10)
data = response.read()
response.close()
image_name = str(url[(url.rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
# if ".jpg" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name:
if any(map(lambda extension: extension in image_name, extensions)):
file_name = main_directory + "/" + image_name
else:
file_name = main_directory + "/" + image_name + ".jpg"
image_name = image_name + ".jpg"
try:
output_file = open(file_name, 'wb')
output_file.write(data)
output_file.close()
except IOError as e:
raise e
except OSError as e:
raise e
print("completed ====> " + image_name.encode('raw_unicode_escape').decode('utf-8'))
return
def similar_images(self, similar_images):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req1 = urllib.request.Request(searchUrl, headers=headers)
resp1 = urllib.request.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib.request.Request(newurl, headers=headers)
resp2 = urllib.request.urlopen(req2)
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return urll2
except:
return "Cloud not connect to Google Images endpoint"
else: # If the Current Version of Python is 2.x
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req1 = urllib2.Request(searchUrl, headers=headers)
resp1 = urllib2.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib2.Request(newurl, headers=headers)
resp2 = urllib2.urlopen(req2)
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return (urll2)
except:
return "Cloud not connect to Google Images endpoint"
# Building URL parameters
def build_url_parameters(self, arguments):
if arguments['language']:
lang = "&lr="
lang_param = {"Arabic": "lang_ar", "Chinese (Simplified)": "lang_zh-CN",
"Chinese (Traditional)": "lang_zh-TW", "Czech": "lang_cs", "Danish": "lang_da",
"Dutch": "lang_nl", "English": "lang_en", "Estonian": "lang_et", "Finnish": "lang_fi",
"French": "lang_fr", "German": "lang_de", "Greek": "lang_el", "Hebrew": "lang_iw ",
"Hungarian": "lang_hu", "Icelandic": "lang_is", "Italian": "lang_it", "Japanese": "lang_ja",
"Korean": "lang_ko", "Latvian": "lang_lv", "Lithuanian": "lang_lt", "Norwegian": "lang_no",
"Portuguese": "lang_pt", "Polish": "lang_pl", "Romanian": "lang_ro", "Russian": "lang_ru",
"Spanish": "lang_es", "Swedish": "lang_sv", "Turkish": "lang_tr"}
lang_url = lang + lang_param[arguments['language']]
else:
lang_url = ''
if arguments['exact_size']:
size_array = [x.strip() for x in arguments['exact_size'].split(',')]
exact_size = ",isz:ex,iszw:" + str(size_array[0]) + ",iszh:" + str(size_array[1])
else:
exact_size = ''
built_url = "&tbs="
counter = 0
params = {'color': [arguments['color'], {'red': 'ic:specific,isc:red', 'orange': 'ic:specific,isc:orange',
'yellow': 'ic:specific,isc:yellow', 'green': 'ic:specific,isc:green',
'teal': 'ic:specific,isc:teel', 'blue': 'ic:specific,isc:blue',
'purple': 'ic:specific,isc:purple', 'pink': 'ic:specific,isc:pink',
'white': 'ic:specific,isc:white', 'gray': 'ic:specific,isc:gray',
'black': 'ic:specific,isc:black', 'brown': 'ic:specific,isc:brown'}],
'color_type': [arguments['color_type'],
{'full-color': 'ic:color', 'black-and-white': 'ic:gray', 'transparent': 'ic:trans'}],
'usage_rights': [arguments['usage_rights'],
{'labeled-for-reuse-with-modifications': 'sur:fmc', 'labeled-for-reuse': 'sur:fc',
'labeled-for-noncommercial-reuse-with-modification': 'sur:fm',
'labeled-for-nocommercial-reuse': 'sur:f'}],
'size': [arguments['size'],
{'large': 'isz:l', 'medium': 'isz:m', 'icon': 'isz:i', '>400*300': 'isz:lt,islt:qsvga',
'>640*480': 'isz:lt,islt:vga', '>800*600': 'isz:lt,islt:svga',
'>1024*768': 'visz:lt,islt:xga', '>2MP': 'isz:lt,islt:2mp', '>4MP': 'isz:lt,islt:4mp',
'>6MP': 'isz:lt,islt:6mp', '>8MP': 'isz:lt,islt:8mp', '>10MP': 'isz:lt,islt:10mp',
'>12MP': 'isz:lt,islt:12mp', '>15MP': 'isz:lt,islt:15mp', '>20MP': 'isz:lt,islt:20mp',
'>40MP': 'isz:lt,islt:40mp', '>70MP': 'isz:lt,islt:70mp'}],
'type': [arguments['type'], {'face': 'itp:face', 'photo': 'itp:photo', 'clipart': 'itp:clipart',
'line-drawing': 'itp:lineart', 'animated': 'itp:animated'}],
'time': [arguments['time'], {'past-24-hours': 'qdr:d', 'past-7-days': 'qdr:w', 'past-month': 'qdr:m',
'past-year': 'qdr:y'}],
'aspect_ratio': [arguments['aspect_ratio'],
{'tall': 'iar:t', 'square': 'iar:s', 'wide': 'iar:w', 'panoramic': 'iar:xw'}],
'format': [arguments['format'],
{'jpg': 'ift:jpg', 'gif': 'ift:gif', 'png': 'ift:png', 'bmp': 'ift:bmp', 'svg': 'ift:svg',
'webp': 'webp', 'ico': 'ift:ico', 'raw': 'ift:craw'}]}
for key, value in params.items():
if value[0] is not None:
ext_param = value[1][value[0]]
# counter will tell if it is first param added or not
if counter == 0:
# add it to the built url
built_url = built_url + ext_param
counter += 1
else:
built_url = built_url + ',' + ext_param
counter += 1
built_url = lang_url + built_url + exact_size
return built_url
# building main search URL
def build_search_url(self, search_term, params, url, similar_images, specific_site, safe_search):
# check safe_search
safe_search_string = "&safe=active"
# check the args and choose the URL
if url:
url = url
elif similar_images:
print(similar_images)
keywordem = self.similar_images(similar_images)
url = 'https://www.google.com/search?q=' + keywordem + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
elif specific_site:
url = 'https://www.google.com/search?q=' + quote(
search_term.encode(
'utf-8')) + '&as_sitesearch=' + specific_site + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
else:
url = 'https://www.google.com/search?q=' + quote(
search_term.encode(
'utf-8')) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
# safe search check
if safe_search:
url = url + safe_search_string
return url
# measures the file size
def file_size(self, file_path):
if os.path.isfile(file_path):
file_info = os.stat(file_path)
size = file_info.st_size
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
return size
# keywords from file
def keywords_from_file(self, file_name):
search_keyword = []
with codecs.open(file_name, 'r', encoding='utf-8-sig') as f:
if '.csv' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
elif '.txt' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
else:
print("Invalid file type: Valid file types are either .txt or .csv \n"
"exiting...")
sys.exit()
return search_keyword
# make directories
def create_directories(self, main_directory, dir_name, thumbnail, thumbnail_only):
dir_name_thumbnail = dir_name + " - thumbnail"
# make a search keyword directory
try:
if not os.path.exists(main_directory):
os.makedirs(main_directory)
time.sleep(0.15)
path = (dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
else:
path = (dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
except OSError as e:
if e.errno != 17:
raise
pass
return
# Download Image thumbnails
def download_image_thumbnail(self, image_url, main_directory, dir_name, return_image_name, print_urls,
socket_timeout, print_size, no_download, save_source, img_src, ignore_urls):
if print_urls or no_download:
print("Image URL: " + image_url)
if no_download:
return "success", "Printed url without downloading"
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
path = main_directory + "/" + dir_name + " - thumbnail" + "/" + return_image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
list_file = open(list_path, 'a')
list_file.write(path + '\t' + img_src + '\n')
list_file.close()
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
except IOError as e:
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
download_status = 'success'
download_message = "Completed Image Thumbnail ====> " + return_image_name
# image size parameter
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return download_status, download_message
# Download Images
def download_image(self, image_url, image_format, main_directory, dir_name, count, print_urls, socket_timeout,
prefix, print_size, no_numbering, no_download, save_source, img_src, silent_mode, thumbnail_only,
format, ignore_urls):
if not silent_mode:
if print_urls or no_download:
print("Image URL: " + image_url)
if ignore_urls:
if any(url in image_url for url in ignore_urls.split(',')):
return "fail", "Image ignored due to 'ignore url' parameter", None, image_url
if thumbnail_only:
return "success", "Skipping image download...", str(image_url[(image_url.rfind('/')) + 1:]), image_url
if no_download:
return "success", "Printed url without downloading", None, image_url
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
info = response.info()
response.close()
qmark = image_url.rfind('?')
if qmark == -1:
qmark = len(image_url)
slash = image_url.rfind('/', 0, qmark) + 1
image_name = str(image_url[slash:qmark]).lower()
type = info.get_content_type()
if type == "image/jpeg" or type == "image/jpg":
if not image_name.endswith(".jpg") and not image_name.endswith(".jpeg"):
image_name += ".jpg"
elif type == "image/png":
if not image_name.endswith(".png"):
image_name += ".png"
elif type == "image/webp":
if not image_name.endswith(".webp"):
image_name += ".webp"
elif type == "image/gif":
if not image_name.endswith(".gif"):
image_name += ".gif"
elif type == "image/bmp" or type == "image/x-windows-bmp":
if not image_name.endswith(".bmp"):
image_name += ".bmp"
elif type == "image/x-icon" or type == "image/vnd.microsoft.icon":
if not image_name.endswith(".ico"):
image_name += ".ico"
elif type == "image/svg+xml":
if not image_name.endswith(".svg"):
image_name += ".svg"
else:
download_status = 'fail'
download_message = "Invalid image format '" + type + "'. Skipping..."
return_image_name = ''
absolute_path = ''
return download_status, download_message, return_image_name, absolute_path
# prefix name in image
if prefix:
prefix = prefix + " "
else:
prefix = ''
if no_numbering:
path = main_directory + "/" + dir_name + "/" + prefix + image_name
else:
path = main_directory + "/" + dir_name + "/" + prefix + str(count) + "." + image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
list_file = open(list_path, 'a')
list_file.write(path + '\t' + img_src + '\n')
list_file.close()
absolute_path = os.path.abspath(path)
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
# return image name back to calling method to use it for thumbnail downloads
download_status = 'success'
download_message = "Completed Image ====> " + prefix + str(count) + "." + image_name
return_image_name = prefix + str(count) + "." + image_name
# image size parameter
if not silent_mode:
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except BadStatusLine as e:
download_status = 'fail'
download_message = "BadStatusLine on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IncompleteRead as e:
download_status = 'fail'
download_message = "IncompleteReadError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
return download_status, download_message, return_image_name, absolute_path
def _get_all_items(self, image_objects, main_directory, dir_name, limit, arguments):
items = []
abs_path = []
errorCount = 0
i = 0
count = 1
while count < limit + 1 and i < len(image_objects):
if len(image_objects) == 0:
print("no_links")
break
#code added here to attempt to implement offset correctly
#was "count < int(arguments['offset'])" in hardikvasa code, this seems
# to be contrary to the implementation details.
elif arguments['offset'] and count <= int(arguments['offset']):
count += 1
#page = page[end_content:]
else:
# format the item for readability
object = self.format_object(image_objects[i])
if arguments['metadata']:
if not arguments["silent_mode"]:
print("\nImage Metadata: " + str(object))
# download the images
download_status, download_message, return_image_name, absolute_path = self.download_image(
object['image_link'], object['image_format'], main_directory, dir_name, count,
arguments['print_urls'], arguments['socket_timeout'], arguments['prefix'], arguments['print_size'],
arguments['no_numbering'], arguments['no_download'], arguments['save_source'],
object['image_source'], arguments["silent_mode"], arguments["thumbnail_only"], arguments['format'],
arguments['ignore_urls'])
if not arguments["silent_mode"]:
print(download_message)
if download_status == "success":
# download image_thumbnails
if arguments['thumbnail'] or arguments["thumbnail_only"]:
download_status, download_message_thumbnail = self.download_image_thumbnail(
object['image_thumbnail_url'], main_directory, dir_name, return_image_name,
arguments['print_urls'], arguments['socket_timeout'], arguments['print_size'],
arguments['no_download'], arguments['save_source'], object['image_source'],
arguments['ignore_urls'])
if not arguments["silent_mode"]:
print(download_message_thumbnail)
count += 1
object['image_filename'] = return_image_name
items.append(object) # Append all the links in the list named 'Links'
abs_path.append(absolute_path)
else:
errorCount += 1
# delay param
if arguments['delay']:
time.sleep(int(arguments['delay']))
i += 1
if count < limit:
print("\n\nUnfortunately all " + str(
limit) + " could not be downloaded because some images were not downloadable. " + str(
count - 1) + " is all we got for this search filter!")
return items, errorCount, abs_path
# Bulk Download
def download(self, arguments):
paths_agg = {}
# for input coming from other python files
if __name__ != "__main__":
# if the calling file contains config_file param
if 'config_file' in arguments:
records = []
json_file = json.load(open(arguments['config_file']))
for record in range(0, len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
total_errors = 0
for rec in records:
paths, errors = self.download_executor(rec)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
total_errors = total_errors + errors
return paths_agg, total_errors
# if the calling file contains params directly
else:
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
return paths_agg, errors
# for input coming from CLI
else:
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
return paths_agg, errors
def download_executor(self, arguments):
paths = {}
errorCount = None
for arg in args_list:
if arg not in arguments:
arguments[arg] = None
######Initialization and Validation of user arguments
if arguments['keywords']:
search_keyword = [str(item) for item in arguments['keywords'].split(',')]
if arguments['keywords_from_file']:
search_keyword = self.keywords_from_file(arguments['keywords_from_file'])
# both time and time range should not be allowed in the same query
if arguments['time'] and arguments['time_range']:
raise ValueError(
'Either time or time range should be used in a query. Both cannot be used at the same time.')
# both time and time range should not be allowed in the same query
if arguments['size'] and arguments['exact_size']:
raise ValueError(
'Either "size" or "exact_size" should be used in a query. Both cannot be used at the same time.')
# both image directory and no image directory should not be allowed in the same query
if arguments['image_directory'] and arguments['no_directory']:
raise ValueError('You can either specify image directory or specify no image directory, not both!')
# Additional words added to keywords
if arguments['suffix_keywords']:
suffix_keywords = [" " + str(sk) for sk in arguments['suffix_keywords'].split(',')]
else:
suffix_keywords = ['']
# Additional words added to keywords
if arguments['prefix_keywords']:
prefix_keywords = [str(sk) + " " for sk in arguments['prefix_keywords'].split(',')]
else:
prefix_keywords = ['']
# Setting limit on number of images to be downloaded
if arguments['limit']:
limit = int(arguments['limit'])
else:
limit = 100
if arguments['url']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
if arguments['similar_images']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
# If single_image or url argument not present then keywords is mandatory argument
if arguments['single_image'] is None and arguments['url'] is None and arguments['similar_images'] is None and \
arguments['keywords'] is None and arguments['keywords_from_file'] is None:
print('-------------------------------\n'
'Uh oh! Keywords is a required argument \n\n'
'Please refer to the documentation on guide to writing queries \n'
'https://github.com/hardikvasa/google-images-download#examples'
'\n\nexiting!\n'
'-------------------------------')
sys.exit()
# If this argument is present, set the custom output directory
if arguments['output_directory']:
main_directory = arguments['output_directory']
else:
main_directory = "downloads"
# Proxy settings
if arguments['proxy']:
os.environ["http_proxy"] = arguments['proxy']
os.environ["https_proxy"] = arguments['proxy']
# Add time range to keywords if asked
time_range = ''
if arguments['time_range']:
json_acceptable_string = arguments['time_range'].replace("'", "\"")
d = json.loads(json_acceptable_string)
time_range = ' after:' + d['time_min'] + ' before:' + d['time_max']
######Initialization Complete
total_errors = 0
for pky in prefix_keywords: # 1.for every prefix keywords
for sky in suffix_keywords: # 2.for every suffix keywords
i = 0
while i < len(search_keyword): # 3.for every main keyword
iteration = "\n" + "Item no.: " + str(i + 1) + " -->" + " Item name = " + (pky) + (
search_keyword[i]) + (sky)
if not arguments["silent_mode"]:
print(iteration.encode('raw_unicode_escape').decode('utf-8'))
print("Evaluating...")
else:
print("Downloading images for: " + (pky) + (search_keyword[i]) + (sky) + " ...")
search_term = pky + search_keyword[i] + sky
if arguments['image_directory']:
dir_name = arguments['image_directory']
elif arguments['no_directory']:
dir_name = ''
else:
dir_name = search_term + (
'-' + arguments['color'] if arguments['color'] else '') # sub-directory
if not arguments["no_download"]:
self.create_directories(main_directory, dir_name, arguments['thumbnail'],
arguments['thumbnail_only']) # create directories in OS
params = self.build_url_parameters(arguments) # building URL with params
search_term += time_range
url = self.build_search_url(search_term, params, arguments['url'], arguments['similar_images'],
arguments['specific_site'],
arguments['safe_search']) # building main search url
if limit < 101:
images, tabs = self.download_page(url) # download page
else:
images, tabs = self.download_extended_page(url, arguments['chromedriver'])
if not arguments["silent_mode"]:
if arguments['no_download']:
print("Getting URLs without downloading images...")
else:
print("Starting Download...")
items, errorCount, abs_path = self._get_all_items(images, main_directory, dir_name, limit,
arguments) # get all image items and download images
paths[pky + search_keyword[i] + sky] = abs_path
# dumps into a json file
if arguments['extract_metadata']:
try:
if not os.path.exists("logs"):
os.makedirs("logs")
except OSError as e:
print(e)
json_file = open("logs/" + search_keyword[i] + ".json", "w")
json.dump(items, json_file, indent=4, sort_keys=True)
json_file.close()
# Related images
if arguments['related_images']:
print("\nGetting list of related keywords...this may take a few moments")
for key, value in tabs.items():
final_search_term = (search_term + " - " + key)
print("\nNow Downloading - " + final_search_term)
if limit < 101:
images, _ = self.download_page(value) # download page
else:
images, _ = self.download_extended_page(value, arguments['chromedriver'])
self.create_directories(main_directory, final_search_term, arguments['thumbnail'],
arguments['thumbnail_only'])
self._get_all_items(images, main_directory, search_term + " - " + key, limit, arguments)
i += 1
total_errors = total_errors + errorCount
if not arguments["silent_mode"]:
print("\nErrors: " + str(errorCount) + "\n")
return paths, total_errors
# ------------- Main Program -------------#
def main():
records = user_input()
total_errors = 0
t0 = time.time() # start the timer
for arguments in records:
if arguments['single_image']: # Download Single Image using a URL
response = googleimagesdownload()
response.single_image(arguments['single_image'])
else: # or download multiple images based on keywords/keyphrase search
response = googleimagesdownload()
paths, errors = response.download(arguments) # wrapping response in a variable just for consistency
total_errors = total_errors + errors
t1 = time.time() # stop the timer
total_time = t1 - t0 # Calculating the total time required to crawl, find and download all the links of 60,000 images
if not arguments["silent_mode"]:
print("\nEverything downloaded!")
print("Total errors: " + str(total_errors))
print("Total time taken: " + str(total_time) + " Seconds")
if __name__ == "__main__":
main()
| 50.314114 | 184 | 0.535927 |
e7ed4c2dbf01570225fb13554d97f130c296fe9d | 505 | py | Python | cride/circles/urls.py | DavidBarcenas/django-advance-course | 9cff5750d92d5039e84541c0799f72dbd361ac43 | [
"MIT"
] | null | null | null | cride/circles/urls.py | DavidBarcenas/django-advance-course | 9cff5750d92d5039e84541c0799f72dbd361ac43 | [
"MIT"
] | null | null | null | cride/circles/urls.py | DavidBarcenas/django-advance-course | 9cff5750d92d5039e84541c0799f72dbd361ac43 | [
"MIT"
] | null | null | null | from django.urls import path
from django.urls.conf import include
from rest_framework.routers import DefaultRouter
from .views import circles as circle_views
from .views import memberships as membership_views
router = DefaultRouter()
router.register(r'circles', circle_views.CircleViewSet, basename='circle')
router.register(
r'circles/(?P<slug_name>[a-zA-Z0-9_]+)/members',
membership_views.MembershipViewSet,
basename='membership'
)
urlpatterns = [
path('', include(router.urls))
]
| 25.25 | 74 | 0.768317 |
1874e1e47be6a911609fc38d242a7806cad93db8 | 3,508 | py | Python | ncovt_sample.py | Prateek23n/COVID19 | f0099fa1584586ea567c17df979fc6234f71f203 | [
"MIT"
] | null | null | null | ncovt_sample.py | Prateek23n/COVID19 | f0099fa1584586ea567c17df979fc6234f71f203 | [
"MIT"
] | null | null | null | ncovt_sample.py | Prateek23n/COVID19 | f0099fa1584586ea567c17df979fc6234f71f203 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import datetime
import time
import sqlite3
conn = sqlite3.connect('ncov.db')
c = conn.cursor()
#c.execute("INSERT INTO nCOV_test VALUES(976363)")
now = datetime.datetime.now()
print ("Data as on - " + str(now.strftime("%d/%m/%Y %H:%M:%S")))
URL = 'https://www.icmr.gov.in/'
headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
page = requests.get(URL,headers=headers).text
soup = BeautifulSoup(page, 'html.parser')
print(soup)
print("Finding Tests in India...")
time.sleep(1)
tests = soup.find('span', class_='counter').text
print(tests)
#print(soup)
#print(tests.content)
print("Tests Conducted:"+ str(tests[:7])+"\n")
#c.execute('CREATE TABLE IF NOT EXISTS nCOV_test(test INT)')
#c.execute("INSERT INTO nCOV_test VALUES(1107233)")
#c.execute("DELETE FROM nCOV_test WHERE test='1107233'")
#c.execute("INSERT INTO nCOV_test VALUES(1046450)")
conn.commit()
c.execute("SELECT * FROM nCOV_test")
month=""
for i in c.fetchall():
print(i[0])
if(now.month==1):
month="January"
elif(now.month==2):
month="February"
elif(now.month==3):
month="March"
elif(now.month==4):
month="April"
elif(now.month==5):
month="May"
elif(now.month==6):
month="June"
elif(now.month==7):
month="July"
elif(now.month==8):
month="August"
elif(now.month==9):
month="September"
elif(now.month==10):
month="October"
elif(now.month==11):
month="November"
elif(now.month==12):
month="December"
time.sleep(2)
day=0
m=0
if(now.day==1 and (now.month==4 or now.month==6 or now.month==9 or now.month==11)):
day=31
m=now.month-1
elif(now.day==1 and (now.month==1 or now.month==3 or now.month==5 or now.month==7 or now.month==8 or now.month==10 or now.month==12)):
day=30
m=now.month-1
else:
day=now.day-1
m=now.month
#Equivalent number for month
str_m=""
if(m==1):
str_m="January"
elif(m==2):
str_m="February"
elif(m==3):
str_m="March"
elif(m==4):
str_m="April"
elif(m==5):
str_m="May"
elif(m==6):
str_m="June"
elif(m==7):
str_m="July"
elif(m==8):
str_m="August"
elif(m==9):
str_m="September"
elif(m==10):
str_m="October"
elif(m==11):
str_m="November"
elif(m==12):
str_m="December"
num_test=int(tests[:7])
test_day=num_test-i[0]
print("Tests done on "+ str(day)+" "+str(str_m)+":"+str(test_day))
c.execute("SELECT * FROM nCOVt")
indexing_t=[]
x_test=[]
for i in c.fetchall():
indexing_t.append(i[2])
x_test.append(i[1])
print(x_test)
choice=input("Enter y or n:")
check=test_day not in x_test
print("Result:",x_test)
if(choice=='y' and test_day not in x_test):
print("Data to be added:",str(day)+" "+str(str_m[0]),test_day,len(indexing_t))
c.execute("INSERT INTO nCOVt(dt,test,ind) VALUES(?,?,?)",(str(day)+str(str_m[0]),test_day,len(indexing_t)))
conn.commit()
#query="UPDATE nCOV_test SET test='%d' WHERE test='%d'"
#data=[int(tests.text[:7]),i[0]]
#print(type(data))
c.execute("INSERT INTO nCOV_test(test) VALUES(?)",(num_test,))
print("Values Updated...")
time.sleep(2)
conn.commit()
c.execute("SELECT * FROM nCOV_test")
for i in c.fetchall():
print("Number of tests:",i[0])
#conn.commit()
c.close()
conn.close()
| 27.193798 | 135 | 0.6126 |
73f65b720a969f8426e34b506bbb408763dd5ced | 804 | py | Python | examples/configs/sac_config.py | patrickhart/jaxdl | 032df55292410c2976703213e67fff7bcafaedbe | [
"MIT"
] | 1 | 2022-02-09T09:19:40.000Z | 2022-02-09T09:19:40.000Z | examples/configs/sac_config.py | patrickhart/jaxdl | 032df55292410c2976703213e67fff7bcafaedbe | [
"MIT"
] | null | null | null | examples/configs/sac_config.py | patrickhart/jaxdl | 032df55292410c2976703213e67fff7bcafaedbe | [
"MIT"
] | null | null | null | import ml_collections
from jaxdl.rl.networks.actor_nets import create_normal_dist_policy_fn
from jaxdl.rl.networks.critic_nets import create_double_critic_network_fn
from jaxdl.rl.networks.temperature_nets import create_temperature_network_fn
def get_config():
config = ml_collections.ConfigDict()
config.algorithm = 'SAC'
config.critic_net_fn = create_double_critic_network_fn(
hidden_dims=[256, 256])
config.actor_net_fn = create_normal_dist_policy_fn(
hidden_dims=[256, 256])
config.temperature_net_fn = create_temperature_network_fn()
config.actor_lr = 3e-4
config.critic_lr = 3e-4
config.temperature_lr = 3e-4
config.tau = 0.005
config.discount = 0.99
config.target_update_period = 1
config.target_entropy = None
config.replay_buffer_size = 10000
return config | 33.5 | 76 | 0.800995 |
6bb6286f53cd919699c47b88ed20c921f608077b | 806 | py | Python | modules/app_settings.py | Nstalgic/Artemis | fb9ab745b08281268737a929273f631db5eb27b3 | [
"MIT"
] | 7 | 2021-06-17T14:51:14.000Z | 2021-09-20T05:54:05.000Z | modules/app_settings.py | Nstalgic/Artemis | fb9ab745b08281268737a929273f631db5eb27b3 | [
"MIT"
] | 2 | 2021-06-17T14:07:28.000Z | 2022-02-25T10:03:16.000Z | modules/app_settings.py | Nstalgic/Artemis | fb9ab745b08281268737a929273f631db5eb27b3 | [
"MIT"
] | 1 | 2021-11-10T13:23:12.000Z | 2021-11-10T13:23:12.000Z | """
///////////////////////////////////////////////////////////////
APPLICATION BY: MICHAEL HUFF
Artemis created with: Qt Designer and PySide6
JUNE 10, 2021
V: 1.0.0
///////////////////////////////////////////////////////////////
"""
class Settings:
ENABLE_CUSTOM_TITLE_BAR = True
MENU_WIDTH = 240
LEFT_BOX_WIDTH = 240
RIGHT_BOX_WIDTH = 240
TIME_ANIMATION = 500
# BTNS LEFT AND RIGHT BOX COLORS
BTN_LEFT_BOX_COLOR = "background-color: rgb(44, 49, 58);"
BTN_RIGHT_BOX_COLOR = "background-color: rgb(44, 49, 58);"
# MENU SELECTED STYLESHEET
MENU_SELECTED_STYLESHEET = """
border-left: 22px solid qlineargradient(spread:pad, x1:0.034, y1:0, x2:0.216, y2:0, stop:0.499 rgba(44, 49, 58), stop:0.5 rgba(44, 49, 58));
background-color: rgb(44, 49, 58);
"""
| 29.851852 | 144 | 0.565757 |
3a6fb7b9b7c9577ec500e2a9d97b1d4be382154e | 285 | py | Python | crawler/crawler/utils/get_links_from_txt.py | tienduy-nguyen/ecommerce | fede82c80325670e5706efbc0877509cbe6d4fea | [
"MIT"
] | 5 | 2021-07-29T19:10:45.000Z | 2022-03-16T22:30:01.000Z | crawler/crawler/utils/get_links_from_txt.py | tienduy-nguyen/ecommerce | fede82c80325670e5706efbc0877509cbe6d4fea | [
"MIT"
] | null | null | null | crawler/crawler/utils/get_links_from_txt.py | tienduy-nguyen/ecommerce | fede82c80325670e5706efbc0877509cbe6d4fea | [
"MIT"
] | 2 | 2022-01-23T04:19:58.000Z | 2022-03-18T14:02:15.000Z | def get_links_from_txt(file):
links = []
with open(file, "r") as f:
try:
lines = f.readlines()
for line in lines:
if line.rstrip():
links.append(line.strip())
except:
pass
return links
| 23.75 | 46 | 0.459649 |
72909c64998a7b1b34eeef870c312ae1f1e5be41 | 210 | py | Python | twitter/cache.py | BattleWoLFz99/Twitchain | 6f12aa932488063619482f0ffc1658064151e091 | [
"MIT"
] | null | null | null | twitter/cache.py | BattleWoLFz99/Twitchain | 6f12aa932488063619482f0ffc1658064151e091 | [
"MIT"
] | 1 | 2021-05-27T11:03:53.000Z | 2021-05-27T11:03:53.000Z | twitter/cache.py | BattleWoLFz99/Twitchain | 6f12aa932488063619482f0ffc1658064151e091 | [
"MIT"
] | null | null | null | # memcached
FOLLOWINGS_PATTERN = 'followings:{user_id}'
USER_PROFILE_PATTERN = 'userprofile:{user_id}'
# redis
USER_TWEETS_PATTERN = 'user_tweets:{user_id}'
USER_NEWSFEEDS_PATTERN = 'user_newsfeeds:{user_id}'
| 26.25 | 51 | 0.795238 |
0156097955c3092bb422a51aee9e2c93064ff0b3 | 21,056 | py | Python | Packs/Elasticsearch/Integrations/Elasticsearch_v2/Elasticsearch_v2_test.py | SergeBakharev/content | d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf | [
"MIT"
] | 1 | 2022-03-05T02:23:32.000Z | 2022-03-05T02:23:32.000Z | Packs/Elasticsearch/Integrations/Elasticsearch_v2/Elasticsearch_v2_test.py | SergeBakharev/content | d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf | [
"MIT"
] | 42 | 2022-03-11T10:52:26.000Z | 2022-03-31T01:50:42.000Z | Packs/Elasticsearch/Integrations/Elasticsearch_v2/Elasticsearch_v2_test.py | SergeBakharev/content | d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf | [
"MIT"
] | 2 | 2021-12-13T13:07:21.000Z | 2022-03-05T02:23:34.000Z | from datetime import datetime
from unittest.mock import patch
from dateutil.parser import parse
import requests
import unittest
from unittest import mock
"""MOCKED RESPONSES"""
ES_V6_RESPONSE = {
'took': 1,
'timed_out': False,
'_shards': {
'total': 5,
'successful': 5,
'skipped': 0,
'failed': 0
},
'hits': {
'total': 17,
'max_score': 1.3862944,
'hits': [
{
'_index': 'users',
'_type': '_doc',
'_id': '123',
'_score': 1.3862944,
'_source': {
'Date': '2019-08-29T14:45:00Z'
}
}, {
'_index': 'users',
'_type': '_doc',
'_id': '456',
'_score': 0.9517491,
'_source': {
'Date': '2019-08-29T14:46:00Z'
}
}
]
}
}
ES_V7_RESPONSE = {
'took': 1,
'timed_out': False,
'_shards': {
'total': 1,
'successful': 1,
'skipped': 0,
'failed': 0
},
'hits': {
'total': {
'value': 9,
'relation': 'eq'
},
'max_score': 0.6814878,
'hits': [
{
'_index': 'customer',
'_type': 'doc',
'_id': '123',
'_score': 0.6814878,
'_source': {
'Date': '2019-08-27T18:00:00Z'
}
}, {
'_index': 'customer',
'_type': 'doc',
'_id': '456',
'_score': 0.6814878,
'_source': {
'Date': '2019-08-27T18:01:00Z'
}
}
]
}
}
MOCK_ES7_SEARCH_CONTEXT = str({
'Server': '',
'Index': 'customer',
'Query': 'check',
'Page': 0,
'Size': 2,
'total': {
'value': 9,
'relation': 'eq'
},
'max_score': 0.6814878,
'took': 1,
'timed_out': False,
'Results': [
{
'_index': 'customer',
'_type': 'doc',
'_id': '123',
'_score': 0.6814878,
'_source': {'Date': '2019-08-27T18:00:00Z'}
},
{
'_index': 'customer',
'_type': 'doc',
'_id': '456',
'_score': 0.6814878,
'_source': {'Date': '2019-08-27T18:01:00Z'}
}
]
})
MOCK_ES7_HIT_CONTEXT = str([
{
'_index': 'customer',
'_id': '123',
'_type': 'doc',
'_score': 0.6814878,
'Date': '2019-08-27T18:00:00Z'
},
{
'_index': 'customer',
'_id': '456',
'_type': 'doc',
'_score': 0.6814878,
'Date': '2019-08-27T18:01:00Z'
}
])
MOCK_ES6_SEARCH_CONTEXT = str({
'Server': '',
'Index': 'users',
'Query': 'incident',
'Page': 0,
'Size': 2,
'total': {
'value': 17
},
'max_score': 1.3862944,
'took': 1,
'timed_out': False,
'Results': [
{
'_index': 'users',
'_type': '_doc',
'_id': '123',
'_score': 1.3862944,
'_source': {'Date': '2019-08-29T14:45:00Z'}
},
{
'_index': 'users',
'_type': '_doc',
'_id': '456',
'_score': 0.9517491,
'_source': {'Date': '2019-08-29T14:46:00Z'}
}
]
})
MOCK_ES6_HIT_CONTEXT = str([
{
'_index': 'users',
'_id': '123',
'_type': '_doc',
'_score': 1.3862944,
'Date': '2019-08-29T14:45:00Z'
},
{
'_index': 'users',
'_id': '456',
'_type': '_doc',
'_score': 0.9517491,
'Date': '2019-08-29T14:46:00Z'
}
])
MOCK_ES7_INCIDENTS = str([
{
'name': 'Elasticsearch: Index: customer, ID: 123',
'rawJSON': '{'
'"_index": "customer", '
'"_type": "doc", '
'"_id": "123", '
'"_score": 0.6814878, '
'"_source": {"Date": "2019-08-27T18:00:00Z"}'
'}',
'labels': [
{
'type': 'Date',
'value': '2019-08-27T18:00:00Z'
}
],
'occurred': '2019-08-27T18:00:00Z'
}, {
'name': 'Elasticsearch: Index: customer, ID: 456',
'rawJSON': '{'
'"_index": "customer", '
'"_type": "doc", '
'"_id": "456", '
'"_score": 0.6814878, '
'"_source": {"Date": "2019-08-27T18:01:00Z"}'
'}',
'labels': [
{
'type': 'Date',
'value': '2019-08-27T18:01:00Z'
}
],
'occurred': '2019-08-27T18:01:00Z'
}
])
MOCK_ES6_INCIDETNS = str([
{
'name': 'Elasticsearch: Index: users, ID: 123',
'rawJSON': '{'
'"_index": "users", '
'"_type": "_doc", '
'"_id": "123", '
'"_score": 1.3862944, '
'"_source": {"Date": "2019-08-29T14:45:00Z"}'
'}',
'labels':
[
{
'type': 'Date',
'value': '2019-08-29T14:45:00Z'
}
],
'occurred': '2019-08-29T14:45:00Z'
}, {
'name': 'Elasticsearch: Index: users, ID: 456',
'rawJSON': '{'
'"_index": "users", '
'"_type": "_doc", '
'"_id": "456", '
'"_score": 0.9517491, '
'"_source": {"Date": "2019-08-29T14:46:00Z"}'
'}',
'labels':
[
{
'type': 'Date',
'value': '2019-08-29T14:46:00Z'
}
],
'occurred': '2019-08-29T14:46:00Z'
}
])
ES_V7_RESPONSE_WITH_TIMESTAMP = {
'took': 1,
'timed_out': False,
'_shards': {
'total': 1,
'successful': 1,
'skipped': 0,
'failed': 0
},
'hits': {
'total': {
'value': 9,
'relation': 'eq'
},
'max_score': 0.6814878,
'hits': [
{
'_index': 'customer',
'_type': 'doc',
'_id': '123',
'_score': 0.6814878,
'_source': {
'Date': '1572502634'
}
}, {
'_index': 'customer',
'_type': 'doc',
'_id': '456',
'_score': 0.6814878,
'_source': {
'Date': '1572502640'
}
}
]
}
}
MOCK_ES7_INCIDENTS_FROM_TIMESTAMP = str([
{
'name': 'Elasticsearch: Index: customer, ID: 123',
'rawJSON': '{'
'"_index": "customer", '
'"_type": "doc", '
'"_id": "123", '
'"_score": 0.6814878, '
'"_source": {"Date": "1572502634"}'
'}',
'labels': [
{
'type': 'Date',
'value': '1572502634'
}
],
'occurred': '2019-10-31T06:17:14Z'
}, {
'name': 'Elasticsearch: Index: customer, ID: 456',
'rawJSON': '{'
'"_index": "customer", '
'"_type": "doc", '
'"_id": "456", '
'"_score": 0.6814878, '
'"_source": {"Date": "1572502640"}'
'}',
'labels': [
{
'type': 'Date',
'value': '1572502640'
}
],
'occurred': '2019-10-31T06:17:20Z'
}
])
MOCK_ES7_SCHEMA_INPUT = {
"bytes": {
"type": "long"
},
"clientip": {
"type": "ip"
}
}
MOCK_ES7_SCHEMA_OUTPUT = {
"bytes": "type: long",
"clientip": "type: ip"
}
MOC_ES7_SERVER_RESPONSE = {
"kibana_sample_data_logs": {
"mappings": {
"properties": {
"@timestamp": {
"type": "alias",
"path": "timestamp"
},
"agent": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"bytes": {
"type": "long"
},
"clientip": {
"type": "ip"
},
"event": {
"properties": {
"dataset": {
"type": "keyword"
}
}
},
"extension": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"geo": {
"properties": {
"coordinates": {
"type": "geo_point"
},
"dest": {
"type": "keyword"
},
"src": {
"type": "keyword"
},
"srcdest": {
"type": "keyword"
}
}
},
"host": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"index": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"ip": {
"type": "ip"
},
"machine": {
"properties": {
"os": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"ram": {
"type": "long"
}
}
},
"memory": {
"type": "double"
},
"message": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"phpmemory": {
"type": "long"
},
"referer": {
"type": "keyword"
},
"request": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"response": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"tags": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"timestamp": {
"type": "date"
},
"url": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"utc_time": {
"type": "date"
}
}
}
}
}
def test_context_creation_es7():
from Elasticsearch_v2 import results_to_context, get_total_results
base_page = 0
size = 2
total_dict, total_results = get_total_results(ES_V7_RESPONSE)
query = 'check'
index = 'customer'
search_context, meta_headers, hit_tables, hit_headers = results_to_context(index, query, base_page,
size, total_dict, ES_V7_RESPONSE)
assert str(search_context) == MOCK_ES7_SEARCH_CONTEXT
assert str(meta_headers) == "['Query', 'took', 'timed_out', 'total', 'max_score', 'Server', 'Page', 'Size']"
assert str(hit_tables) == MOCK_ES7_HIT_CONTEXT
assert str(hit_headers) == "['_id', '_index', '_type', '_score', 'Date']"
def test_context_creation_es6():
from Elasticsearch_v2 import results_to_context, get_total_results
base_page = 0
size = 2
total_dict, total_results = get_total_results(ES_V6_RESPONSE)
query = 'incident'
index = 'users'
search_context, meta_headers, hit_tables, hit_headers = results_to_context(index, query, base_page,
size, total_dict, ES_V6_RESPONSE)
assert str(search_context) == MOCK_ES6_SEARCH_CONTEXT
assert str(meta_headers) == "['Query', 'took', 'timed_out', 'total', 'max_score', 'Server', 'Page', 'Size']"
assert str(hit_tables) == MOCK_ES6_HIT_CONTEXT
assert str(hit_headers) == "['_id', '_index', '_type', '_score', 'Date']"
@patch("Elasticsearch_v2.TIME_METHOD", 'Simple-Date')
@patch("Elasticsearch_v2.TIME_FIELD", 'Date')
@patch("Elasticsearch_v2.FETCH_INDEX", "users")
def test_incident_creation_e6():
from Elasticsearch_v2 import results_to_incidents_datetime
last_fetch = parse('2019-08-29T14:44:00Z')
incidents, last_fetch2 = results_to_incidents_datetime(ES_V6_RESPONSE, last_fetch)
assert str(last_fetch2) == '2019-08-29T14:46:00Z'
assert str(incidents) == MOCK_ES6_INCIDETNS
@patch("Elasticsearch_v2.TIME_METHOD", 'Simple-Date')
@patch("Elasticsearch_v2.TIME_FIELD", 'Date')
@patch("Elasticsearch_v2.FETCH_INDEX", "customer")
def test_incident_creation_e7():
from Elasticsearch_v2 import results_to_incidents_datetime
last_fetch = parse('2019-08-27T17:59:00')
incidents, last_fetch2 = results_to_incidents_datetime(ES_V7_RESPONSE, last_fetch)
assert str(last_fetch2) == '2019-08-27T18:01:00Z'
assert str(incidents) == MOCK_ES7_INCIDENTS
@patch("Elasticsearch_v2.TIME_METHOD", 'Timestamp-Seconds')
def test_timestamp_to_date_converter_seconds():
from Elasticsearch_v2 import timestamp_to_date
seconds_since_epoch = "1572164838"
assert str(timestamp_to_date(seconds_since_epoch)) == "2019-10-27 08:27:18"
@patch("Elasticsearch_v2.TIME_METHOD", 'Timestamp-Milliseconds')
def test_timestamp_to_date_converter_milliseconds():
from Elasticsearch_v2 import timestamp_to_date
milliseconds_since_epoch = "1572164838123"
assert str(timestamp_to_date(milliseconds_since_epoch)) == "2019-10-27 08:27:18.123000"
@patch("Elasticsearch_v2.TIME_METHOD", 'Timestamp-Seconds')
@patch("Elasticsearch_v2.TIME_FIELD", 'Date')
@patch("Elasticsearch_v2.FETCH_INDEX", "customer")
def test_incident_creation_with_timestamp_e7():
from Elasticsearch_v2 import results_to_incidents_timestamp
lastfetch = int(datetime.strptime('2019-08-27T17:59:00Z', '%Y-%m-%dT%H:%M:%SZ').timestamp())
incidents, last_fetch2 = results_to_incidents_timestamp(ES_V7_RESPONSE_WITH_TIMESTAMP, lastfetch)
assert last_fetch2 == 1572502640
assert str(incidents) == MOCK_ES7_INCIDENTS_FROM_TIMESTAMP
def test_format_to_iso():
from Elasticsearch_v2 import format_to_iso
date_string_1 = "2020-02-03T10:00:00"
date_string_2 = "2020-02-03T10:00:00+02:00"
date_string_3 = "2020-02-03T10:00:00-02:00"
iso_format = "2020-02-03T10:00:00Z"
assert format_to_iso(date_string_1) == iso_format
assert format_to_iso(date_string_2) == iso_format
assert format_to_iso(date_string_3) == iso_format
assert format_to_iso(iso_format) == iso_format
@patch("Elasticsearch_v2.USERNAME", "mock")
@patch("Elasticsearch_v2.PASSWORD", "demisto")
@patch("Elasticsearch_v2.FETCH_INDEX", "customer")
def test_elasticsearch_builder_called_with_username_password(mocker):
from elasticsearch import Elasticsearch
from Elasticsearch_v2 import elasticsearch_builder
es_mock = mocker.patch.object(Elasticsearch, '__init__', return_value=None)
elasticsearch_builder(None)
assert es_mock.call_args[1].get('http_auth') == ('mock', 'demisto')
assert es_mock.call_args[1].get('api_key') is None
def test_elasticsearch_builder_called_with_no_creds(mocker):
from elasticsearch import Elasticsearch
from Elasticsearch_v2 import elasticsearch_builder
es_mock = mocker.patch.object(Elasticsearch, '__init__', return_value=None)
elasticsearch_builder(None)
assert es_mock.call_args[1].get('http_auth') is None
assert es_mock.call_args[1].get('api_key') is None
def test_elasticsearch_parse_subtree():
from Elasticsearch_v2 import parse_subtree
sub_tree = parse_subtree(MOCK_ES7_SCHEMA_INPUT)
assert str(sub_tree) == str(MOCK_ES7_SCHEMA_OUTPUT)
# This is the class we want to test
'''
The get-mapping-fields command perform a GET /<index name>/_mapping http command
for e.g http://elasticserver.com/customers/_mapping the output is then formatted and arranged by the parse-tree function
The test created a mock response.
'''
class GetMapping:
def fetch_json(self, url):
response = requests.get(url)
return response.json()
# This method will be used by the mock to replace requests.get
@patch("Elasticsearch_v2.FETCH_INDEX", "customer")
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if args[0] == 'http://someurl.com/' + 'index' + '/_mapping':
return MockResponse(MOC_ES7_SERVER_RESPONSE, 200)
else:
return MockResponse(None, 404)
# Our test case class
class GetMappingFields(unittest.TestCase):
# We patch 'requests.get' with our own method. The mock object is passed in to our test case method.
@mock.patch('requests.get', side_effect=mocked_requests_get)
def test_fetch(self, mock_get):
# Assert requests.get calls
gmf = GetMapping()
server_response = gmf.fetch_json('http://someurl.com/' + 'index' + '/_mapping')
self.assertEqual(server_response, MOC_ES7_SERVER_RESPONSE)
class TestIncidentLabelMaker(unittest.TestCase):
def test_sanity(self):
from Elasticsearch_v2 import incident_label_maker
sources = {
'first_name': 'John',
'sur_name': 'Snow',
}
expected_labels = [
{
'type': 'first_name',
'value': 'John'
},
{
'type': 'sur_name',
'value': 'Snow'
},
]
labels = incident_label_maker(sources)
self.assertEqual(labels, expected_labels)
def test_complex_value(self):
from Elasticsearch_v2 import incident_label_maker
sources = {
'name': 'Ash',
'action': 'catch',
'targets': ['Pikachu', 'Charmander', 'Squirtle', 'Bulbasaur'],
}
expected_labels = [
{
'type': 'name',
'value': 'Ash',
},
{
'type': 'action',
'value': 'catch',
},
{
'type': 'targets',
'value': '["Pikachu", "Charmander", "Squirtle", "Bulbasaur"]',
},
]
labels = incident_label_maker(sources)
self.assertEqual(labels, expected_labels)
| 29.782178 | 120 | 0.437358 |
91008bce97f368970f929047b1a8c9688332e8fd | 43,878 | py | Python | arrow/arrow.py | uburuntu/arrow | ada176d510a678b4d4654ec2120726e6bb97248b | [
"Apache-2.0"
] | null | null | null | arrow/arrow.py | uburuntu/arrow | ada176d510a678b4d4654ec2120726e6bb97248b | [
"Apache-2.0"
] | null | null | null | arrow/arrow.py | uburuntu/arrow | ada176d510a678b4d4654ec2120726e6bb97248b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Provides the :class:`Arrow <arrow.arrow.Arrow>` class, an enhanced ``datetime``
replacement.
"""
from __future__ import absolute_import
import calendar
import sys
import warnings
from datetime import datetime, timedelta, tzinfo
from math import trunc
from dateutil import tz as dateutil_tz
from dateutil.relativedelta import relativedelta
from arrow import formatter, locales, parser, util
from arrow.util import Constants
class Arrow(object):
"""An :class:`Arrow <arrow.arrow.Arrow>` object.
Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing
additional functionality.
:param year: the calendar year.
:param month: the calendar month.
:param day: the calendar day.
:param hour: (optional) the hour. Defaults to 0.
:param minute: (optional) the minute, Defaults to 0.
:param second: (optional) the second, Defaults to 0.
:param microsecond: (optional) the microsecond. Defaults 0.
:param tzinfo: (optional) A timezone expression. Defaults to UTC.
.. _tz-expr:
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO-8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage::
>>> import arrow
>>> arrow.Arrow(2013, 5, 5, 12, 30, 45)
<Arrow [2013-05-05T12:30:45+00:00]>
"""
resolution = datetime.resolution
_ATTRS = ["year", "month", "day", "hour", "minute", "second", "microsecond"]
_ATTRS_PLURAL = ["{}s".format(a) for a in _ATTRS]
_MONTHS_PER_QUARTER = 3
def __init__(
self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None
):
if util.isstr(tzinfo):
tzinfo = parser.TzinfoParser.parse(tzinfo)
tzinfo = tzinfo if tzinfo is not None else dateutil_tz.tzutc()
self._datetime = datetime(
year, month, day, hour, minute, second, microsecond, tzinfo
)
# factories: single object, both original and from datetime.
@classmethod
def now(cls, tzinfo=None):
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in the given
timezone.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
Usage::
>>> arrow.now('Asia/Baku')
<Arrow [2019-01-24T20:26:31.146412+04:00]>
"""
tzinfo = tzinfo if tzinfo is not None else dateutil_tz.tzlocal()
dt = datetime.now(tzinfo)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
@classmethod
def utcnow(cls):
""" Constructs an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in UTC
time.
Usage::
>>> arrow.utcnow()
<Arrow [2019-01-24T16:31:40.651108+00:00]>
"""
dt = datetime.now(dateutil_tz.tzutc())
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
@classmethod
def fromtimestamp(cls, timestamp, tzinfo=None):
""" Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, converted to
the given timezone.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
Timestamps should always be UTC. If you have a non-UTC timestamp::
>>> arrow.Arrow.utcfromtimestamp(1367900664).replace(tzinfo='US/Pacific')
<Arrow [2013-05-07T04:24:24-07:00]>
"""
tzinfo = tzinfo if tzinfo is not None else dateutil_tz.tzlocal()
timestamp = cls._get_timestamp_from_input(timestamp)
dt = datetime.fromtimestamp(timestamp, tzinfo)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
@classmethod
def utcfromtimestamp(cls, timestamp):
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, in UTC time.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
"""
timestamp = cls._get_timestamp_from_input(timestamp)
dt = datetime.utcfromtimestamp(timestamp)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dateutil_tz.tzutc(),
)
@classmethod
def fromdatetime(cls, dt, tzinfo=None):
""" Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a ``datetime`` and
optional replacement timezone.
:param dt: the ``datetime``
:param tzinfo: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to ``dt``'s
timezone, or UTC if naive.
If you only want to replace the timezone of naive datetimes::
>>> dt
datetime.datetime(2013, 5, 5, 0, 0, tzinfo=tzutc())
>>> arrow.Arrow.fromdatetime(dt, dt.tzinfo or 'US/Pacific')
<Arrow [2013-05-05T00:00:00+00:00]>
"""
if tzinfo is None:
if dt.tzinfo is None:
tzinfo = dateutil_tz.tzutc()
else:
tzinfo = dt.tzinfo
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo,
)
@classmethod
def fromdate(cls, date, tzinfo=None):
""" Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a ``date`` and optional
replacement timezone. Time values are set to 0.
:param date: the ``date``
:param tzinfo: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to UTC.
"""
tzinfo = tzinfo if tzinfo is not None else dateutil_tz.tzutc()
return cls(date.year, date.month, date.day, tzinfo=tzinfo)
@classmethod
def strptime(cls, date_str, fmt, tzinfo=None):
""" Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a date string and format,
in the style of ``datetime.strptime``. Optionally replaces the parsed timezone.
:param date_str: the date string.
:param fmt: the format string.
:param tzinfo: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to the parsed
timezone if ``fmt`` contains a timezone directive, otherwise UTC.
Usage::
>>> arrow.Arrow.strptime('20-01-2019 15:49:10', '%d-%m-%Y %H:%M:%S')
<Arrow [2019-01-20T15:49:10+00:00]>
"""
dt = datetime.strptime(date_str, fmt)
tzinfo = tzinfo if tzinfo is not None else dt.tzinfo
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo,
)
# factories: ranges and spans
@classmethod
def range(cls, frame, start, end=None, tz=None, limit=None):
""" Returns an iterator of :class:`Arrow <arrow.arrow.Arrow>` objects, representing
points in time between two inputs.
:param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param tz: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to
``start``'s timezone, or UTC if ``start`` is naive.
:param limit: (optional) A maximum number of tuples to return.
**NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to
return the entire range. Call with ``limit`` alone to return a maximum # of results from
the start. Call with both to cap a range at a maximum # of results.
**NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before
iterating. As such, either call with naive objects and ``tz``, or aware objects from the
same timezone and no ``tz``.
Supported frame values: year, quarter, month, week, day, hour, minute, second.
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
Usage::
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.range('hour', start, end):
... print(repr(r))
...
<Arrow [2013-05-05T12:30:00+00:00]>
<Arrow [2013-05-05T13:30:00+00:00]>
<Arrow [2013-05-05T14:30:00+00:00]>
<Arrow [2013-05-05T15:30:00+00:00]>
<Arrow [2013-05-05T16:30:00+00:00]>
**NOTE**: Unlike Python's ``range``, ``end`` *may* be included in the returned iterator::
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 13, 30)
>>> for r in arrow.Arrow.range('hour', start, end):
... print(repr(r))
...
<Arrow [2013-05-05T12:30:00+00:00]>
<Arrow [2013-05-05T13:30:00+00:00]>
"""
_, frame_relative, relative_steps = cls._get_frames(frame)
tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
start = cls._get_datetime(start).replace(tzinfo=tzinfo)
end, limit = cls._get_iteration_params(end, limit)
end = cls._get_datetime(end).replace(tzinfo=tzinfo)
current = cls.fromdatetime(start)
i = 0
while current <= end and i < limit:
i += 1
yield current
values = [getattr(current, f) for f in cls._ATTRS]
current = cls(*values, tzinfo=tzinfo) + relativedelta(
**{frame_relative: relative_steps}
)
@classmethod
def span_range(cls, frame, start, end, tz=None, limit=None):
""" Returns an iterator of tuples, each :class:`Arrow <arrow.arrow.Arrow>` objects,
representing a series of timespans between two inputs.
:param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param tz: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to
``start``'s timezone, or UTC if ``start`` is naive.
:param limit: (optional) A maximum number of tuples to return.
**NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to
return the entire range. Call with ``limit`` alone to return a maximum # of results from
the start. Call with both to cap a range at a maximum # of results.
**NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before
iterating. As such, either call with naive objects and ``tz``, or aware objects from the
same timezone and no ``tz``.
Supported frame values: year, quarter, month, week, day, hour, minute, second.
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
**NOTE**: Unlike Python's ``range``, ``end`` will *always* be included in the returned
iterator of timespans.
Usage:
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.span_range('hour', start, end):
... print(r)
...
(<Arrow [2013-05-05T12:00:00+00:00]>, <Arrow [2013-05-05T12:59:59.999999+00:00]>)
(<Arrow [2013-05-05T13:00:00+00:00]>, <Arrow [2013-05-05T13:59:59.999999+00:00]>)
(<Arrow [2013-05-05T14:00:00+00:00]>, <Arrow [2013-05-05T14:59:59.999999+00:00]>)
(<Arrow [2013-05-05T15:00:00+00:00]>, <Arrow [2013-05-05T15:59:59.999999+00:00]>)
(<Arrow [2013-05-05T16:00:00+00:00]>, <Arrow [2013-05-05T16:59:59.999999+00:00]>)
(<Arrow [2013-05-05T17:00:00+00:00]>, <Arrow [2013-05-05T17:59:59.999999+00:00]>)
"""
tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
start = cls.fromdatetime(start, tzinfo).span(frame)[0]
_range = cls.range(frame, start, end, tz, limit)
return (r.span(frame) for r in _range)
@classmethod
def interval(cls, frame, start, end, interval=1, tz=None):
""" Returns an iterator of tuples, each :class:`Arrow <arrow.arrow.Arrow>` objects,
representing a series of intervals between two inputs.
:param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param interval: (optional) Time interval for the given time frame.
:param tz: (optional) A timezone expression. Defaults to UTC.
Supported frame values: year, quarter, month, week, day, hour, minute, second
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO-8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage:
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.interval('hour', start, end, 2):
... print r
...
(<Arrow [2013-05-05T12:00:00+00:00]>, <Arrow [2013-05-05T13:59:59.999999+00:00]>)
(<Arrow [2013-05-05T14:00:00+00:00]>, <Arrow [2013-05-05T15:59:59.999999+00:00]>)
(<Arrow [2013-05-05T16:00:00+00:00]>, <Arrow [2013-05-05T17:59:59.999999+00:0]>)
"""
if interval < 1:
raise ValueError("interval has to be a positive integer")
spanRange = iter(cls.span_range(frame, start, end, tz))
while True:
try:
intvlStart, intvlEnd = next(spanRange)
for _ in range(interval - 1):
_, intvlEnd = next(spanRange)
yield intvlStart, intvlEnd
except StopIteration:
return
# representations
def __repr__(self):
return "<{} [{}]>".format(self.__class__.__name__, self.__str__())
def __str__(self):
return self._datetime.isoformat()
def __format__(self, formatstr):
if len(formatstr) > 0:
return self.format(formatstr)
return str(self)
def __hash__(self):
return self._datetime.__hash__()
# attributes & properties
def __getattr__(self, name):
if name == "week":
return self.isocalendar()[1]
if name == "quarter":
return int((self.month - 1) / self._MONTHS_PER_QUARTER) + 1
if not name.startswith("_"):
value = getattr(self._datetime, name, None)
if value is not None:
return value
return object.__getattribute__(self, name)
@property
def tzinfo(self):
""" Gets the ``tzinfo`` of the :class:`Arrow <arrow.arrow.Arrow>` object.
Usage::
>>> arw=arrow.utcnow()
>>> arw.tzinfo
tzutc()
"""
return self._datetime.tzinfo
@tzinfo.setter
def tzinfo(self, tzinfo):
""" Sets the ``tzinfo`` of the :class:`Arrow <arrow.arrow.Arrow>` object. """
self._datetime = self._datetime.replace(tzinfo=tzinfo)
@property
def datetime(self):
""" Returns a datetime representation of the :class:`Arrow <arrow.arrow.Arrow>` object.
Usage::
>>> arw=arrow.utcnow()
>>> arw.datetime
datetime.datetime(2019, 1, 24, 16, 35, 27, 276649, tzinfo=tzutc())
"""
return self._datetime
@property
def naive(self):
""" Returns a naive datetime representation of the :class:`Arrow <arrow.arrow.Arrow>`
object.
Usage::
>>> nairobi = arrow.now('Africa/Nairobi')
>>> nairobi
<Arrow [2019-01-23T19:27:12.297999+03:00]>
>>> nairobi.naive
datetime.datetime(2019, 1, 23, 19, 27, 12, 297999)
"""
return self._datetime.replace(tzinfo=None)
@property
def timestamp(self):
""" Returns a timestamp representation of the :class:`Arrow <arrow.arrow.Arrow>` object, in
UTC time.
Usage::
>>> arrow.utcnow().timestamp
1548260567
"""
return calendar.timegm(self._datetime.utctimetuple())
@property
def float_timestamp(self):
""" Returns a floating-point representation of the :class:`Arrow <arrow.arrow.Arrow>`
object, in UTC time.
Usage::
>>> arrow.utcnow().float_timestamp
1548260516.830896
"""
return self.timestamp + float(self.microsecond) / 1000000
# mutation and duplication.
def clone(self):
""" Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, cloned from the current one.
Usage:
>>> arw = arrow.utcnow()
>>> cloned = arw.clone()
"""
return self.fromdatetime(self._datetime)
def replace(self, **kwargs):
""" Returns a new :class:`Arrow <arrow.arrow.Arrow>` object with attributes updated
according to inputs.
Use property names to set their value absolutely::
>>> import arrow
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2013-05-11T22:27:34.787885+00:00]>
>>> arw.replace(year=2014, month=6)
<Arrow [2014-06-11T22:27:34.787885+00:00]>
You can also replace the timezone without conversion, using a
:ref:`timezone expression <tz-expr>`::
>>> arw.replace(tzinfo=tz.tzlocal())
<Arrow [2013-05-11T22:27:34.787885-07:00]>
"""
absolute_kwargs = {}
relative_kwargs = {} # TODO: DEPRECATED; remove in next release
for key, value in kwargs.items():
if key in self._ATTRS:
absolute_kwargs[key] = value
elif key in self._ATTRS_PLURAL or key in ["weeks", "quarters"]:
# TODO: DEPRECATED
warnings.warn(
"replace() with plural property to shift value "
"is deprecated, use shift() instead",
DeprecationWarning,
)
relative_kwargs[key] = value
elif key in ["week", "quarter"]:
raise AttributeError("setting absolute {} is not supported".format(key))
elif key != "tzinfo":
raise AttributeError('unknown attribute: "{}"'.format(key))
# core datetime does not support quarters, translate to months.
relative_kwargs.setdefault("months", 0)
relative_kwargs["months"] += (
relative_kwargs.pop("quarters", 0) * self._MONTHS_PER_QUARTER
)
current = self._datetime.replace(**absolute_kwargs)
current += relativedelta(**relative_kwargs) # TODO: DEPRECATED
tzinfo = kwargs.get("tzinfo")
if tzinfo is not None:
tzinfo = self._get_tzinfo(tzinfo)
current = current.replace(tzinfo=tzinfo)
return self.fromdatetime(current)
def shift(self, **kwargs):
""" Returns a new :class:`Arrow <arrow.arrow.Arrow>` object with attributes updated
according to inputs.
Use pluralized property names to shift their current value relatively:
>>> import arrow
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2013-05-11T22:27:34.787885+00:00]>
>>> arw.shift(years=1, months=-1)
<Arrow [2014-04-11T22:27:34.787885+00:00]>
Day-of-the-week relative shifting can use either Python's weekday numbers
(Monday = 0, Tuesday = 1 .. Sunday = 6) or using dateutil.relativedelta's
day instances (MO, TU .. SU). When using weekday numbers, the returned
date will always be greater than or equal to the starting date.
Using the above code (which is a Saturday) and asking it to shift to Saturday:
>>> arw.shift(weekday=5)
<Arrow [2013-05-11T22:27:34.787885+00:00]>
While asking for a Monday:
>>> arw.shift(weekday=0)
<Arrow [2013-05-13T22:27:34.787885+00:00]>
"""
relative_kwargs = {}
for key, value in kwargs.items():
if key in self._ATTRS_PLURAL or key in ["weeks", "quarters", "weekday"]:
relative_kwargs[key] = value
else:
raise AttributeError()
# core datetime does not support quarters, translate to months.
relative_kwargs.setdefault("months", 0)
relative_kwargs["months"] += (
relative_kwargs.pop("quarters", 0) * self._MONTHS_PER_QUARTER
)
current = self._datetime + relativedelta(**relative_kwargs)
return self.fromdatetime(current)
def to(self, tz):
""" Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, converted
to the target timezone.
:param tz: A :ref:`timezone expression <tz-expr>`.
Usage::
>>> utc = arrow.utcnow()
>>> utc
<Arrow [2013-05-09T03:49:12.311072+00:00]>
>>> utc.to('US/Pacific')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to(tz.tzlocal())
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('-07:00')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('local')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('local').to('utc')
<Arrow [2013-05-09T03:49:12.311072+00:00]>
"""
if not isinstance(tz, tzinfo):
tz = parser.TzinfoParser.parse(tz)
dt = self._datetime.astimezone(tz)
return self.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
def span(self, frame, count=1):
""" Returns two new :class:`Arrow <arrow.arrow.Arrow>` objects, representing the timespan
of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param count: (optional) the number of frames to span.
Supported frame values: year, quarter, month, week, day, hour, minute, second.
Usage::
>>> arrow.utcnow()
<Arrow [2013-05-09T03:32:36.186203+00:00]>
>>> arrow.utcnow().span('hour')
(<Arrow [2013-05-09T03:00:00+00:00]>, <Arrow [2013-05-09T03:59:59.999999+00:00]>)
>>> arrow.utcnow().span('day')
(<Arrow [2013-05-09T00:00:00+00:00]>, <Arrow [2013-05-09T23:59:59.999999+00:00]>)
>>> arrow.utcnow().span('day', count=2)
(<Arrow [2013-05-09T00:00:00+00:00]>, <Arrow [2013-05-10T23:59:59.999999+00:00]>)
"""
frame_absolute, frame_relative, relative_steps = self._get_frames(frame)
if frame_absolute == "week":
attr = "day"
elif frame_absolute == "quarter":
attr = "month"
else:
attr = frame_absolute
index = self._ATTRS.index(attr)
frames = self._ATTRS[: index + 1]
values = [getattr(self, f) for f in frames]
for _ in range(3 - len(values)):
values.append(1)
floor = self.__class__(*values, tzinfo=self.tzinfo)
if frame_absolute == "week":
floor = floor + relativedelta(days=-(self.isoweekday() - 1))
elif frame_absolute == "quarter":
floor = floor + relativedelta(months=-((self.month - 1) % 3))
ceil = (
floor
+ relativedelta(**{frame_relative: count * relative_steps})
+ relativedelta(microseconds=-1)
)
return floor, ceil
def floor(self, frame):
""" Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, representing the "floor"
of the timespan of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
Equivalent to the first element in the 2-tuple returned by
:func:`span <arrow.arrow.Arrow.span>`.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow().floor('hour')
<Arrow [2013-05-09T03:00:00+00:00]>
"""
return self.span(frame)[0]
def ceil(self, frame):
""" Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, representing the "ceiling"
of the timespan of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
Equivalent to the second element in the 2-tuple returned by
:func:`span <arrow.arrow.Arrow.span>`.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow().ceil('hour')
<Arrow [2013-05-09T03:59:59.999999+00:00]>
"""
return self.span(frame)[1]
# string output and formatting.
def format(self, fmt="YYYY-MM-DD HH:mm:ssZZ", locale="en_us"):
""" Returns a string representation of the :class:`Arrow <arrow.arrow.Arrow>` object,
formatted according to a format string.
:param fmt: the format string.
Usage::
>>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ')
'2013-05-09 03:56:47 -00:00'
>>> arrow.utcnow().format('X')
'1368071882'
>>> arrow.utcnow().format('MMMM DD, YYYY')
'May 09, 2013'
>>> arrow.utcnow().format()
'2013-05-09 03:56:47 -00:00'
"""
return formatter.DateTimeFormatter(locale).format(self._datetime, fmt)
def humanize(
self, other=None, locale="en_us", only_distance=False, granularity="auto"
):
""" Returns a localized, humanized representation of a relative difference in time.
:param other: (optional) an :class:`Arrow <arrow.arrow.Arrow>` or ``datetime`` object.
Defaults to now in the current :class:`Arrow <arrow.arrow.Arrow>` object's timezone.
:param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'.
:param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part.
:param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute', 'hour', 'day', 'month' or 'year'.
Usage::
>>> earlier = arrow.utcnow().shift(hours=-2)
>>> earlier.humanize()
'2 hours ago'
>>> later = earlier.shift(hours=4)
>>> later.humanize(earlier)
'in 4 hours'
"""
locale = locales.get_locale(locale)
if other is None:
utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
dt = utc.astimezone(self._datetime.tzinfo)
elif isinstance(other, Arrow):
dt = other._datetime
elif isinstance(other, datetime):
if other.tzinfo is None:
dt = other.replace(tzinfo=self._datetime.tzinfo)
else:
dt = other.astimezone(self._datetime.tzinfo)
else:
raise TypeError()
delta = int(round(util.total_seconds(self._datetime - dt)))
sign = -1 if delta < 0 else 1
diff = abs(delta)
delta = diff
if granularity == "auto":
if diff < 10:
return locale.describe("now", only_distance=only_distance)
if diff < 45:
seconds = sign * delta
return locale.describe("seconds", seconds, only_distance=only_distance)
elif diff < 90:
return locale.describe("minute", sign, only_distance=only_distance)
elif diff < 2700:
minutes = sign * int(max(delta / 60, 2))
return locale.describe("minutes", minutes, only_distance=only_distance)
elif diff < 5400:
return locale.describe("hour", sign, only_distance=only_distance)
elif diff < 79200:
hours = sign * int(max(delta / 3600, 2))
return locale.describe("hours", hours, only_distance=only_distance)
elif diff < 129600:
return locale.describe("day", sign, only_distance=only_distance)
elif diff < 2160000:
days = sign * int(max(delta / 86400, 2))
return locale.describe("days", days, only_distance=only_distance)
elif diff < 3888000:
return locale.describe("month", sign, only_distance=only_distance)
elif diff < 29808000:
self_months = self._datetime.year * 12 + self._datetime.month
other_months = dt.year * 12 + dt.month
months = sign * int(max(abs(other_months - self_months), 2))
return locale.describe("months", months, only_distance=only_distance)
elif diff < 47260800:
return locale.describe("year", sign, only_distance=only_distance)
else:
years = sign * int(max(delta / 31536000, 2))
return locale.describe("years", years, only_distance=only_distance)
else:
if granularity == "second":
delta = sign * delta
if abs(delta) < 2:
return locale.describe("now", only_distance=only_distance)
elif granularity == "minute":
delta = sign * delta / float(60)
elif granularity == "hour":
delta = sign * delta / float(60 * 60)
elif granularity == "day":
delta = sign * delta / float(60 * 60 * 24)
elif granularity == "month":
delta = sign * delta / float(60 * 60 * 24 * 30.5)
elif granularity == "year":
delta = sign * delta / float(60 * 60 * 24 * 365.25)
else:
raise AttributeError(
'Error. Could not understand your level of granularity. Please select between \
"second", "minute", "hour", "day", "week", "month" or "year"'
)
if trunc(abs(delta)) != 1:
granularity += "s"
return locale.describe(granularity, delta, only_distance=only_distance)
# query functions
def is_between(self, start, end, bounds="()"):
""" Returns a boolean denoting whether the specified date and time is between
the start and end dates and times.
:param start: an :class:`Arrow <arrow.arrow.Arrow>` object.
:param end: an :class:`Arrow <arrow.arrow.Arrow>` object.
:param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
whether to include or exclude the start and end values in the range. '(' excludes
the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
If the bounds are not specified, the default bound '()' is used.
Usage::
>>> start = arrow.get(datetime(2013, 5, 5, 12, 30, 10))
>>> end = arrow.get(datetime(2013, 5, 5, 12, 30, 36))
>>> arrow.get(datetime(2013, 5, 5, 12, 30, 27)).is_between(start, end)
True
>>> start = arrow.get(datetime(2013, 5, 5))
>>> end = arrow.get(datetime(2013, 5, 8))
>>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[]')
True
>>> start = arrow.get(datetime(2013, 5, 5))
>>> end = arrow.get(datetime(2013, 5, 8))
>>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[)')
False
"""
if bounds != "()" and bounds != "(]" and bounds != "[)" and bounds != "[]":
raise AttributeError(
'Error. Could not understand the specified bounds. Please select between \
"()", "(]", "[)", or "[]"'
)
if not isinstance(start, Arrow):
raise TypeError(
"Can't parse start date argument type of '{}'".format(type(start))
)
if not isinstance(end, Arrow):
raise TypeError(
"Can't parse end date argument type of '{}'".format(type(end))
)
include_start = bounds[0] == "["
include_end = bounds[1] == "]"
target_timestamp = self.float_timestamp
start_timestamp = start.float_timestamp
end_timestamp = end.float_timestamp
if include_start and include_end:
return (
target_timestamp >= start_timestamp
and target_timestamp <= end_timestamp
)
elif include_start and not include_end:
return (
target_timestamp >= start_timestamp and target_timestamp < end_timestamp
)
elif not include_start and include_end:
return (
target_timestamp > start_timestamp and target_timestamp <= end_timestamp
)
else:
return (
target_timestamp > start_timestamp and target_timestamp < end_timestamp
)
# math
def __add__(self, other):
if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime + other, self._datetime.tzinfo)
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime - other, self._datetime.tzinfo)
elif isinstance(other, datetime):
return self._datetime - other
elif isinstance(other, Arrow):
return self._datetime - other._datetime
return NotImplemented
def __rsub__(self, other):
if isinstance(other, datetime):
return other - self._datetime
return NotImplemented
# comparisons
def __eq__(self, other):
if not isinstance(other, (Arrow, datetime)):
return False
return self._datetime == self._get_datetime(other)
def __ne__(self, other):
if not isinstance(other, (Arrow, datetime)):
return True
return not self.__eq__(other)
def __gt__(self, other):
if not isinstance(other, (Arrow, datetime)):
return NotImplemented
return self._datetime > self._get_datetime(other)
def __ge__(self, other):
if not isinstance(other, (Arrow, datetime)):
return NotImplemented
return self._datetime >= self._get_datetime(other)
def __lt__(self, other):
if not isinstance(other, (Arrow, datetime)):
return NotImplemented
return self._datetime < self._get_datetime(other)
def __le__(self, other):
if not isinstance(other, (Arrow, datetime)):
return NotImplemented
return self._datetime <= self._get_datetime(other)
def __cmp__(self, other):
if sys.version_info[0] < 3: # pragma: no cover
if not isinstance(other, (Arrow, datetime)):
raise TypeError(
"can't compare '{}' to '{}'".format(type(self), type(other))
)
# datetime methods
def date(self):
""" Returns a ``date`` object with the same year, month and day.
Usage::
>>> arrow.utcnow().date()
datetime.date(2019, 1, 23)
"""
return self._datetime.date()
def time(self):
""" Returns a ``time`` object with the same hour, minute, second, microsecond.
Usage::
>>> arrow.utcnow().time()
datetime.time(12, 15, 34, 68352)
"""
return self._datetime.time()
def timetz(self):
""" Returns a ``time`` object with the same hour, minute, second, microsecond and
tzinfo.
Usage::
>>> arrow.utcnow().timetz()
datetime.time(12, 5, 18, 298893, tzinfo=tzutc())
"""
return self._datetime.timetz()
def astimezone(self, tz):
""" Returns a ``datetime`` object, converted to the specified timezone.
:param tz: a ``tzinfo`` object.
Usage::
>>> pacific=arrow.now('US/Pacific')
>>> nyc=arrow.now('America/New_York').tzinfo
>>> pacific.astimezone(nyc)
datetime.datetime(2019, 1, 20, 10, 24, 22, 328172, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
"""
return self._datetime.astimezone(tz)
def utcoffset(self):
""" Returns a ``timedelta`` object representing the whole number of minutes difference from
UTC time.
Usage::
>>> arrow.now('US/Pacific').utcoffset()
datetime.timedelta(-1, 57600)
"""
return self._datetime.utcoffset()
def dst(self):
""" Returns the daylight savings time adjustment.
Usage::
>>> arrow.utcnow().dst()
datetime.timedelta(0)
"""
return self._datetime.dst()
def timetuple(self):
""" Returns a ``time.struct_time``, in the current timezone.
Usage::
>>> arrow.utcnow().timetuple()
time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0)
"""
return self._datetime.timetuple()
def utctimetuple(self):
""" Returns a ``time.struct_time``, in UTC time.
Usage::
>>> arrow.utcnow().utctimetuple()
time.struct_time(tm_year=2019, tm_mon=1, tm_mday=19, tm_hour=21, tm_min=41, tm_sec=7, tm_wday=5, tm_yday=19, tm_isdst=0)
"""
return self._datetime.utctimetuple()
def toordinal(self):
""" Returns the proleptic Gregorian ordinal of the date.
Usage::
>>> arrow.utcnow().toordinal()
737078
"""
return self._datetime.toordinal()
def weekday(self):
""" Returns the day of the week as an integer (0-6).
Usage::
>>> arrow.utcnow().weekday()
5
"""
return self._datetime.weekday()
def isoweekday(self):
""" Returns the ISO day of the week as an integer (1-7).
Usage::
>>> arrow.utcnow().isoweekday()
6
"""
return self._datetime.isoweekday()
def isocalendar(self):
""" Returns a 3-tuple, (ISO year, ISO week number, ISO weekday).
Usage::
>>> arrow.utcnow().isocalendar()
(2019, 3, 6)
"""
return self._datetime.isocalendar()
def isoformat(self, sep="T"):
"""Returns an ISO 8601 formatted representation of the date and time.
Usage::
>>> arrow.utcnow().isoformat()
'2019-01-19T18:30:52.442118+00:00'
"""
return self._datetime.isoformat(sep)
def ctime(self):
""" Returns a ctime formatted representation of the date and time.
Usage::
>>> arrow.utcnow().ctime()
'Sat Jan 19 18:26:50 2019'
"""
return self._datetime.ctime()
def strftime(self, format):
""" Formats in the style of ``datetime.strftime``.
:param format: the format string.
Usage::
>>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S')
'23-01-2019 12:28:17'
"""
return self._datetime.strftime(format)
def for_json(self):
"""Serializes for the ``for_json`` protocol of simplejson.
Usage::
>>> arrow.utcnow().for_json()
'2019-01-19T18:25:36.760079+00:00'
"""
return self.isoformat()
# internal tools.
@staticmethod
def _get_tzinfo(tz_expr):
if tz_expr is None:
return dateutil_tz.tzutc()
if isinstance(tz_expr, tzinfo):
return tz_expr
else:
try:
return parser.TzinfoParser.parse(tz_expr)
except parser.ParserError:
raise ValueError("'{}' not recognized as a timezone".format(tz_expr))
@classmethod
def _get_datetime(cls, expr):
if isinstance(expr, Arrow):
return expr.datetime
if isinstance(expr, datetime):
return expr
try:
expr = float(expr)
return cls.utcfromtimestamp(expr).datetime
except Exception:
raise ValueError(
"'{}' not recognized as a timestamp or datetime".format(expr)
)
@classmethod
def _get_frames(cls, name):
if name in cls._ATTRS:
return name, "{}s".format(name), 1
elif name in ["week", "weeks"]:
return "week", "weeks", 1
elif name in ["quarter", "quarters"]:
return "quarter", "months", 3
supported = ", ".join(cls._ATTRS + ["week", "weeks"] + ["quarter", "quarters"])
raise AttributeError(
"range/span over frame {} not supported. Supported frames: {}".format(
name, supported
)
)
@classmethod
def _get_iteration_params(cls, end, limit):
if end is None:
if limit is None:
raise Exception("one of 'end' or 'limit' is required")
return cls.max, limit
else:
if limit is None:
return end, sys.maxsize
return end, limit
@staticmethod
def _get_timestamp_from_input(timestamp):
try:
timestamp = float(timestamp)
except Exception:
raise ValueError("cannot parse '{}' as a timestamp".format(timestamp))
if timestamp < Constants.MAX_TIMESTAMP:
return timestamp
if timestamp < Constants.MAX_TIMESTAMP_MS:
return timestamp / 1000.0
if timestamp < Constants.MAX_TIMESTAMP_US:
return timestamp / 1000000.0
raise ValueError(
"specified timestamp '{}' too big, use seconds, milliseconds (ms) or microseconds (us)".format(
timestamp
)
)
Arrow.min = Arrow.fromdatetime(datetime.min)
Arrow.max = Arrow.fromdatetime(datetime.max)
| 31.589633 | 147 | 0.561511 |
e6caf4a50c5a107d849f3eec331039a6ec258128 | 158 | py | Python | sidekick/passwords/apps.py | ffroggy/sidekick | 8a1e461669991b853b331df052151332f499358b | [
"Apache-2.0"
] | 7 | 2018-01-19T23:19:01.000Z | 2018-10-26T23:42:18.000Z | sidekick/passwords/apps.py | ffroggy/sidekick | 8a1e461669991b853b331df052151332f499358b | [
"Apache-2.0"
] | 27 | 2018-01-20T23:35:09.000Z | 2018-10-01T23:16:01.000Z | sidekick/passwords/apps.py | ffroggy/sidekick | 8a1e461669991b853b331df052151332f499358b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class PasswordsConfig(AppConfig):
name = 'passwords'
| 17.555556 | 39 | 0.740506 |
f06c3006fa9095ce90f0d9bcd44f7f740a5a9823 | 15,882 | py | Python | code/wrappers.py | mmcaulif/F1Tenth-RL | dfa6b380440649f81309293164cec9c944c5422e | [
"MIT"
] | null | null | null | code/wrappers.py | mmcaulif/F1Tenth-RL | dfa6b380440649f81309293164cec9c944c5422e | [
"MIT"
] | null | null | null | code/wrappers.py | mmcaulif/F1Tenth-RL | dfa6b380440649f81309293164cec9c944c5422e | [
"MIT"
] | 1 | 2021-06-11T10:41:24.000Z | 2021-06-11T10:41:24.000Z | # MIT License
# Copyright (c) 2020 FT Autonomous Team One
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gym
import numpy as np
from gym import spaces
from pathlib import Path
from code.random_trackgen import create_track, convert_track
mapno = ["Austin","BrandsHatch","Budapest","Catalunya","Hockenheim","IMS","Melbourne","MexicoCity","Montreal","Monza","MoscowRaceway",
"Nuerburgring","Oschersleben","Sakhir","SaoPaulo","Sepang","Shanghai","Silverstone","Sochi","Spa","Spielberg","YasMarina","Zandvoort"]
randmap = mapno[0]
globwaypoints = np.genfromtxt(f"./f1tenth_racetracks/{randmap}/{randmap}_centerline.csv", delimiter=',')
def convert_range(value, input_range, output_range):
# converts value(s) from range to another range
# ranges ---> [min, max]
(in_min, in_max), (out_min, out_max) = input_range, output_range
in_range = in_max - in_min
out_range = out_max - out_min
return (((value - in_min) * out_range) / in_range) + out_min
class F110_Wrapped(gym.Wrapper):
"""
This is a wrapper for the F1Tenth Gym environment intended
for only one car, but should be expanded to handle multi-agent scenarios
"""
def __init__(self, env):
super().__init__(env)
# normalised action space, steer and speed
self.action_space = spaces.Box(low=np.array(
[-1.0, -1.0]), high=np.array([1.0, 1.0]), dtype=np.float)
# normalised observations, just take the lidar scans
self.observation_space = spaces.Box(
low=-1.0, high=1.0, shape=(1080,), dtype=np.float)
# store allowed steering/speed/lidar ranges for normalisation
self.s_min = self.env.params['s_min']
self.s_max = self.env.params['s_max']
self.v_min = self.env.params['v_min']
self.v_max = self.env.params['v_max']
self.lidar_min = 0
self.lidar_max = 30 # see ScanSimulator2D max_range
# store car dimensions and some track info
self.car_length = self.env.params['length']
self.car_width = self.env.params['width']
self.track_width = 3.2 # ~= track width, see random_trackgen.py
# radius of circle where car can start on track, relative to a centerpoint
self.start_radius = (self.track_width / 2) - \
((self.car_length + self.car_width) / 2) # just extra wiggle room
self.step_count = 0
# set threshold for maximum angle of car, to prevent spinning
self.max_theta = 100
self.count = 0
def step(self, action):
# convert normalised actions (from RL algorithms) back to actual actions for simulator
action_convert = self.un_normalise_actions(action)
observation, _, done, info = self.env.step(np.array([action_convert]))
self.step_count += 1
# TODO -> do some reward engineering here and mess around with this
reward = 0
#eoins reward function
vel_magnitude = np.linalg.norm(
[observation['linear_vels_x'][0], observation['linear_vels_y'][0]])
reward = vel_magnitude #/10 maybe include if speed is having too much of an effect
# reward function that returns percent of lap left, maybe try incorporate speed into the reward too
#waypoints = np.genfromtxt(f"./f1tenth_racetracks/{randmap}/{randmap}_centerline.csv", delimiter=',')
if self.count < len(globwaypoints):
wx, wy = globwaypoints[self.count][:2]
X, Y = observation['poses_x'][0], observation['poses_y'][0]
dist = np.sqrt(np.power((X - wx), 2) + np.power((Y - wy), 2))
#print("Dist:", dist, " to waypoint: ", self.count + 1)
if dist > 2:
self.count += 1
complete = (self.count/len(globwaypoints)) * 0.5
#print("Percent complete: ", complete)
reward += complete
else:
self.count = 0
if observation['collisions'][0]:
self.count = 0
# reward = -100
reward = -1
# end episode if car is spinning
if abs(observation['poses_theta'][0]) > self.max_theta:
done = True
"""
vel_magnitude = np.linalg.norm([observation['linear_vels_x'][0], observation['linear_vels_y'][0]])
#print("V:",vel_magnitude)
if vel_magnitude > 0.2: # > 0 is very slow and safe, sometimes just stops in its tracks at corners
reward += 0.1"""
# penalise changes in car angular orientation (reward smoothness)
"""ang_magnitude = abs(observation['ang_vels_z'][0])
#print("Ang:",ang_magnitude)
if ang_magnitude > 0.75:
reward += -ang_magnitude/10
ang_magnitude = abs(observation['ang_vels_z'][0])
if ang_magnitude > 5:
reward = -(vel_magnitude/10)
# if collisions is true, then the car has crashed
if observation['collisions'][0]:
self.count = 0
#reward = -100
reward = -1
# end episode if car is spinning
if abs(observation['poses_theta'][0]) > self.max_theta:
self.count = 0
reward = -100
reward = -1
done = True
# just a simple counter that increments when the car completes a lap
if self.env.lap_counts[0] > 0:
self.count = 0
reward += 1
if self.env.lap_counts[0] > 1:
reward += 1
self.env.lap_counts[0] = 0"""
return self.normalise_observations(observation['scans'][0]), reward, bool(done), info
def reset(self, start_xy=None, direction=None):
# should start off in slightly different position every time
# position car anywhere along line from wall to wall facing
# car will never face backwards, can face forwards at an angle
# start from origin if no pose input
if start_xy is None:
start_xy = np.zeros(2)
# start in random direction if no direction input
if direction is None:
direction = np.random.uniform(0, 2 * np.pi)
# get slope perpendicular to track direction
slope = np.tan(direction + np.pi / 2)
# get magintude of slope to normalise parametric line
magnitude = np.sqrt(1 + np.power(slope, 2))
# get random point along line of width track
rand_offset = np.random.uniform(-1, 1)
rand_offset_scaled = rand_offset * self.start_radius
# convert position along line to position between walls at current point
x, y = start_xy + rand_offset_scaled * np.array([1, slope]) / magnitude
# point car in random forward direction, not aiming at walls
t = -np.random.uniform(max(-rand_offset * np.pi / 2, 0) - np.pi / 2,
min(-rand_offset * np.pi / 2, 0) + np.pi / 2) + direction
# reset car with chosen pose
observation, _, _, _ = self.env.reset(np.array([[x, y, t]]))
# reward, done, info can't be included in the Gym format
return self.normalise_observations(observation['scans'][0])
def un_normalise_actions(self, actions):
# convert actions from range [-1, 1] to normal steering/speed range
steer = convert_range(actions[0], [-1, 1], [self.s_min, self.s_max])
speed = convert_range(actions[1], [-1, 1], [self.v_min, self.v_max])
return np.array([steer, speed], dtype=np.float)
def normalise_observations(self, observations):
# convert observations from normal lidar distances range to range [-1, 1]
return convert_range(observations, [self.lidar_min, self.lidar_max], [-1, 1])
def update_map(self, map_name, map_extension, update_render=True):
self.env.map_name = map_name
self.env.map_ext = map_extension
self.env.update_map(f"{map_name}.yaml", map_extension)
if update_render and self.env.renderer:
self.env.renderer.close()
self.env.renderer = None
def seed(self, seed):
self.current_seed = seed
np.random.seed(self.current_seed)
print(f"Seed -> {self.current_seed}")
class RandomMap(gym.Wrapper):
"""
Generates random maps at chosen intervals, when resetting car,
and positions car at random point around new track
"""
# stop function from trying to generate map after multiple failures
MAX_CREATE_ATTEMPTS = 20
def __init__(self, env, step_interval=5000):
super().__init__(env)
# initialise step counters
self.step_interval = step_interval
self.step_count = 0
def reset(self):
# check map update interval
if self.step_count % self.step_interval == 0:
# create map
for _ in range(self.MAX_CREATE_ATTEMPTS):
try:
track, track_int, track_ext = create_track()
convert_track(track,
track_int,
track_ext,
self.current_seed)
break
except Exception:
print(
f"Random generator [{self.current_seed}] failed, trying again...")
# update map
self.update_map(f"./maps/map{self.current_seed}", ".png")
# store waypoints
self.waypoints = np.genfromtxt(f"centerline/map{self.current_seed}.csv",
delimiter=',')
# get random starting position from centerline
random_index = np.random.randint(len(self.waypoints))
start_xy = self.waypoints[random_index]
print(start_xy)
next_xy = self.waypoints[(random_index + 1) % len(self.waypoints)]
# get forward direction by pointing at next point
direction = np.arctan2(next_xy[1] - start_xy[1],
next_xy[0] - start_xy[0])
# reset environment
return self.env.reset(start_xy=start_xy, direction=direction)
def step(self, action):
# increment class step counter
self.step_count += 1
# step environment
return self.env.step(action)
def seed(self, seed):
# seed class
self.env.seed(seed)
# delete old maps and centerlines
for f in Path('centerline').glob('*'):
if not ((seed - 100) < int(''.join(filter(str.isdigit, str(f)))) < (seed + 100)):
try:
f.unlink()
except:
pass
for f in Path('maps').glob('*'):
if not ((seed - 100) < int(''.join(filter(str.isdigit, str(f)))) < (seed + 100)):
try:
f.unlink()
except:
pass
class RandomF1TenthMap(gym.Wrapper):
"""
Places the car in a random map from F1Tenth
"""
# stop function from trying to generate map after multiple failures
MAX_CREATE_ATTEMPTS = 20
def __init__(self, env, step_interval=5000):
super().__init__(env)
# initialise step counters
self.step_interval = step_interval
self.step_count = 0
def reset(self):
# check map update interval
if self.step_count % self.step_interval == 0:
# update map
randmap = mapno[np.random.randint(low=0, high=22)]
#self.update_map(f"./maps/map{self.current_seed}", ".png")
self.update_map(f"./f1tenth_racetracks/{randmap}/{randmap}_map", ".png")
# store waypoints
#self.waypoints = np.genfromtxt(f"centerline/map{self.current_seed}.csv",delimiter=',')
self.waypoints = np.genfromtxt(f"./f1tenth_racetracks/{randmap}/{randmap}_centerline.csv", delimiter=',')
globwaypoints = self.waypoints
# get random starting position from centerline
random_index = np.random.randint(len(self.waypoints))
start_xy = self.waypoints[random_index] #len = 4
start_xy = start_xy[:2]
next_xy = self.waypoints[(random_index + 1) % len(self.waypoints)]
# get forward direction by pointing at next point
direction = np.arctan2(next_xy[1] - start_xy[1],
next_xy[0] - start_xy[0])
# reset environment
return self.env.reset(start_xy=start_xy, direction=direction)
def step(self, action):
# increment class step counter
self.step_count += 1
# step environment
return self.env.step(action)
def seed(self, seed):
# seed class
self.env.seed(seed)
# delete old maps and centerlines
for f in Path('centerline').glob('*'):
if not ((seed - 100) < int(''.join(filter(str.isdigit, str(f)))) < (seed + 100)):
try:
f.unlink()
except:
pass
for f in Path('maps').glob('*'):
if not ((seed - 100) < int(''.join(filter(str.isdigit, str(f)))) < (seed + 100)):
try:
f.unlink()
except:
pass
class ThrottleMaxSpeedReward(gym.RewardWrapper):
"""
Slowly increase maximum reward for going fast, so that car learns
to drive well before trying to improve speed
"""
def __init__(self, env, start_step, end_step, start_max_reward, end_max_reward=None):
super().__init__(env)
# initialise step boundaries
self.end_step = end_step
self.start_step = start_step
self.start_max_reward = start_max_reward
# set finishing maximum reward to be maximum possible speed by default
self.end_max_reward = self.v_max if end_max_reward is None else end_max_reward
# calculate slope for reward changing over time (steps)
self.reward_slope = (self.end_max_reward - self.start_max_reward) / (self.end_step - self.start_step)
def reward(self, reward):
# maximum reward is start_max_reward
if self.step_count < self.start_step:
return min(reward, self.start_max_reward)
# maximum reward is end_max_reward
elif self.step_count > self.end_step:
return min(reward, self.end_max_reward)
# otherwise, proportional reward between two step endpoints
else:
return min(reward, self.start_max_reward + (self.step_count - self.start_step) * self.reward_slope)
| 42.239362 | 144 | 0.59835 |
24a8b4a8c83eda7a7a89a48d89b024a9def27783 | 524 | py | Python | pyutilib/ply/__init__.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 24 | 2016-04-02T10:00:02.000Z | 2021-03-02T16:40:18.000Z | pyutilib/ply/__init__.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 105 | 2015-10-29T03:29:58.000Z | 2021-12-30T22:00:45.000Z | pyutilib/ply/__init__.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 22 | 2016-01-21T15:35:25.000Z | 2021-05-15T20:17:44.000Z | # _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
from pyutilib.ply.ply import ply_init, t_newline, t_ignore, t_COMMENT, _find_column, p_error
| 47.636364 | 92 | 0.828244 |
3d4ad754cfd114b62fcc55101b7622fabcb1b5ee | 154 | py | Python | CMSLogic/apps.py | AbhijithGanesh/Student-Portal-CMS | 2b29a4b5ee52c3e821de3e440bfbb02d12564ba4 | [
"BSD-3-Clause"
] | null | null | null | CMSLogic/apps.py | AbhijithGanesh/Student-Portal-CMS | 2b29a4b5ee52c3e821de3e440bfbb02d12564ba4 | [
"BSD-3-Clause"
] | 1 | 2021-10-11T04:36:54.000Z | 2021-10-11T04:36:54.000Z | CMSLogic/apps.py | AbhijithGanesh/Student-Portal-CMS | 2b29a4b5ee52c3e821de3e440bfbb02d12564ba4 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class CmslogicConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "CMSLogic"
| 22 | 57 | 0.733766 |
b7f7332ef1a2a611cdd58fe4cbc3aeb6b9151fb3 | 666 | py | Python | apps/profiles/forms.py | dxp/django-lms | 4d984f9e97f7d2695e3f3495f83d1b15ed8e88ba | [
"BSD-3-Clause"
] | null | null | null | apps/profiles/forms.py | dxp/django-lms | 4d984f9e97f7d2695e3f3495f83d1b15ed8e88ba | [
"BSD-3-Clause"
] | null | null | null | apps/profiles/forms.py | dxp/django-lms | 4d984f9e97f7d2695e3f3495f83d1b15ed8e88ba | [
"BSD-3-Clause"
] | 1 | 2021-05-12T02:53:01.000Z | 2021-05-12T02:53:01.000Z | from django import forms
from tinymce.widgets import TinyMCE
from libs.widgets import ShortNameClearableFileInput
class ProfileForm(forms.Form):
mugshot = forms.FileField(label = 'Profile image', required = False, widget=ShortNameClearableFileInput)
resume = forms.FileField(label = 'Resume', required = False, widget=ShortNameClearableFileInput)
biography = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def save(self, profile):
profile.mugshot = self.cleaned_data['mugshot']
profile.resume = self.cleaned_data['resume']
profile.data['biography'] = self.cleaned_data['biography']
profile.save()
| 39.176471 | 108 | 0.728228 |
f9e12d4a63caf37fbf36c0c69ba63a5ef64441a8 | 3,323 | py | Python | examples/pybullet/examples/saveRestoreState.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/examples/saveRestoreState.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/examples/saveRestoreState.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | import pybullet as p
import math, time
import difflib, sys
import pybullet_data
numSteps = 500
numSteps2 = 30
p.connect(p.GUI, options="--width=1024 --height=768")
numObjects = 50
verbose = 0
p.setAdditionalSearchPath(pybullet_data.getDataPath())
logId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "saveRestoreTimings.log")
def setupWorld():
p.resetSimulation()
p.setPhysicsEngineParameter(deterministicOverlappingPairs=1)
p.loadURDF("planeMesh.urdf")
kukaId = p.loadURDF("kuka_iiwa/model_free_base.urdf", [0, 0, 10])
for i in range(p.getNumJoints(kukaId)):
p.setJointMotorControl2(kukaId, i, p.POSITION_CONTROL, force=0)
for i in range(numObjects):
cube = p.loadURDF("cube_small.urdf", [0, i * 0.02, (i + 1) * 0.2])
#p.changeDynamics(cube,-1,mass=100)
p.stepSimulation()
p.setGravity(0, 0, -10)
def dumpStateToFile(file):
for i in range(p.getNumBodies()):
pos, orn = p.getBasePositionAndOrientation(i)
linVel, angVel = p.getBaseVelocity(i)
txtPos = "pos=" + str(pos) + "\n"
txtOrn = "orn=" + str(orn) + "\n"
txtLinVel = "linVel" + str(linVel) + "\n"
txtAngVel = "angVel" + str(angVel) + "\n"
file.write(txtPos)
file.write(txtOrn)
file.write(txtLinVel)
file.write(txtAngVel)
def compareFiles(file1, file2):
diff = difflib.unified_diff(
file1.readlines(),
file2.readlines(),
fromfile='saveFile.txt',
tofile='restoreFile.txt',
)
numDifferences = 0
for line in diff:
numDifferences = numDifferences + 1
sys.stdout.write(line)
if (numDifferences > 0):
print("Error:", numDifferences, " lines are different between files.")
else:
print("OK, files are identical")
setupWorld()
for i in range(numSteps):
p.stepSimulation()
p.saveBullet("state.bullet")
if verbose:
p.setInternalSimFlags(1)
p.stepSimulation()
if verbose:
p.setInternalSimFlags(0)
print("contact points=")
for q in p.getContactPoints():
print(q)
for i in range(numSteps2):
p.stepSimulation()
file = open("saveFile.txt", "w")
dumpStateToFile(file)
file.close()
#################################
setupWorld()
#both restore from file or from in-memory state should work
p.restoreState(fileName="state.bullet")
stateId = p.saveState()
print("stateId=", stateId)
p.removeState(stateId)
stateId = p.saveState()
print("stateId=", stateId)
if verbose:
p.setInternalSimFlags(1)
p.stepSimulation()
if verbose:
p.setInternalSimFlags(0)
print("contact points restored=")
for q in p.getContactPoints():
print(q)
for i in range(numSteps2):
p.stepSimulation()
file = open("restoreFile.txt", "w")
dumpStateToFile(file)
file.close()
p.restoreState(stateId)
if verbose:
p.setInternalSimFlags(1)
p.stepSimulation()
if verbose:
p.setInternalSimFlags(0)
print("contact points restored=")
for q in p.getContactPoints():
print(q)
for i in range(numSteps2):
p.stepSimulation()
file = open("restoreFile2.txt", "w")
dumpStateToFile(file)
file.close()
file1 = open("saveFile.txt", "r")
file2 = open("restoreFile.txt", "r")
compareFiles(file1, file2)
file1.close()
file2.close()
file1 = open("saveFile.txt", "r")
file2 = open("restoreFile2.txt", "r")
compareFiles(file1, file2)
file1.close()
file2.close()
p.stopStateLogging(logId)
#while (p.getConnectionInfo()["isConnected"]):
# time.sleep(1)
| 23.735714 | 86 | 0.695456 |
f36580b1fa4da7800769fdc06adf22ca86bd86f8 | 3,146 | py | Python | web/settings.py | slop3n/bachelors_degree | fdc32520419836dc0e29bb7ef0dbc30dcd99995f | [
"Apache-2.0"
] | null | null | null | web/settings.py | slop3n/bachelors_degree | fdc32520419836dc0e29bb7ef0dbc30dcd99995f | [
"Apache-2.0"
] | null | null | null | web/settings.py | slop3n/bachelors_degree | fdc32520419836dc0e29bb7ef0dbc30dcd99995f | [
"Apache-2.0"
] | null | null | null | """
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^8do)ybrj+qcmg!72x@vwh4dsjh9q6cb$2z&ljn(x6txx1odz@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'tensorflow_api.apps.TensorflowApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Sofia'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.786885 | 91 | 0.698029 |
c732d3a683d142ca556ae695413ad0919b3b985a | 131 | py | Python | server/proctor/examinees/urls.py | jphacks/B_2011 | 3888ab8b99274734cb6ba04a54ce2ede03fcb446 | [
"MIT"
] | 1 | 2021-12-18T22:34:46.000Z | 2021-12-18T22:34:46.000Z | server/proctor/examinees/urls.py | jphacks/B_2011 | 3888ab8b99274734cb6ba04a54ce2ede03fcb446 | [
"MIT"
] | 44 | 2020-11-02T06:28:36.000Z | 2020-11-27T23:33:17.000Z | server/proctor/examinees/urls.py | jphacks/B_2011 | 3888ab8b99274734cb6ba04a54ce2ede03fcb446 | [
"MIT"
] | null | null | null | from django.urls import path
from examinees import views
urlpatterns = [
path('list', views.ExamineeListAPIView.as_view()),
]
| 18.714286 | 54 | 0.748092 |
6426d8fbdc6aefcd50f0e08679eb3183104c61e7 | 510 | py | Python | python/setup.py | DylanAnderson/life | d331c29f2d07a2f880ce8ae2448e0520ffc7c554 | [
"MIT"
] | null | null | null | python/setup.py | DylanAnderson/life | d331c29f2d07a2f880ce8ae2448e0520ffc7c554 | [
"MIT"
] | null | null | null | python/setup.py | DylanAnderson/life | d331c29f2d07a2f880ce8ae2448e0520ffc7c554 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="life",
version="1.0.0",
description="Conway's game of life.",
author="Dylan Anderson",
url="https://github.com/DylanAnderson/life",
py_modules=["life"],
python_requires=">=3.6.0",
install_requires=["numpy"],
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Environment :: Console :: Curses"
]
)
| 25.5 | 49 | 0.6 |
98c2e5ea346ea45ccc3c98ff6ee4b073bf33d407 | 82 | py | Python | simple/lambdas/greet/lambda_function.py | tlinhart/aws-step-functions-demo | ef435e842213e5965555a8a3ad00f0a32fa06cc9 | [
"MIT"
] | null | null | null | simple/lambdas/greet/lambda_function.py | tlinhart/aws-step-functions-demo | ef435e842213e5965555a8a3ad00f0a32fa06cc9 | [
"MIT"
] | null | null | null | simple/lambdas/greet/lambda_function.py | tlinhart/aws-step-functions-demo | ef435e842213e5965555a8a3ad00f0a32fa06cc9 | [
"MIT"
] | null | null | null | def lambda_handler(event, context):
print(f'Hi, my name is {event["name"]}.')
| 27.333333 | 45 | 0.658537 |
1579a375d387d5e9c620b05a5ac35ce647feaf8e | 687 | py | Python | examples/basic/manyspheres.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 836 | 2020-06-14T02:38:12.000Z | 2022-03-31T15:39:50.000Z | examples/basic/manyspheres.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 418 | 2020-06-14T10:51:32.000Z | 2022-03-31T23:23:14.000Z | examples/basic/manyspheres.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 136 | 2020-06-14T02:26:41.000Z | 2022-03-31T12:47:18.000Z | """Example that shows how to draw very large number
of spheres (same for Points, Lines) with different
colors or different radii, N="""
from vedo import show, Spheres
from random import gauss
N = 50000
cols = range(N) # color numbers
pts = [(gauss(0, 1), gauss(0, 2), gauss(0, 1)) for i in cols]
rads = [abs(pts[i][1]) / 10 for i in cols] # radius=0 for y=0
# all have same radius but different colors:
s0 = Spheres(pts, c=cols, r=0.1, res=5) # res= theta-phi resolution
show(s0, __doc__+str(N), at=0, N=2, axes=1, viewup=(-0.7, 0.7, 0))
# all have same color but different radius along y:
s1 = Spheres(pts, r=rads, c="lb", res=8)
show(s1, at=1, axes=2, interactive=True).close()
| 34.35 | 68 | 0.673945 |
1bbec0769e3373f8624681a71275313cf67008cc | 1,734 | py | Python | sandbox_mediator/embed_string_to_rdata.py | SafeBreach-Labs/blog-snippets | 8ce37b5636c2bba55eaf0cb38aa55dec0235facc | [
"BSD-3-Clause"
] | 4 | 2018-03-19T08:21:01.000Z | 2021-03-09T04:29:25.000Z | sandbox_mediator/embed_string_to_rdata.py | SafeBreach-Labs/blog-snippets | 8ce37b5636c2bba55eaf0cb38aa55dec0235facc | [
"BSD-3-Clause"
] | null | null | null | sandbox_mediator/embed_string_to_rdata.py | SafeBreach-Labs/blog-snippets | 8ce37b5636c2bba55eaf0cb38aa55dec0235facc | [
"BSD-3-Clause"
] | 3 | 2017-07-31T08:07:14.000Z | 2018-04-13T01:59:21.000Z | #!/usr/bin/env python
import sys
import pefile
import logging
RDATA_SEC_NAME = ".rdata\x00\x00"
LOGGING_FORMAT = "[*] %(levelname)s - %(message)s"
OUT_FILE_SUFFIX = "_embeddedstring"
# Initialize logging
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
def get_file_data(file):
data = ""
with open(file, "rb") as f:
data = f.read()
logging.info("Got %s bytes from file %s" % (len(data), file))
return data
def output_to_file(original_file, data):
out_file_name = original_file + OUT_FILE_SUFFIX
with open(out_file_name, "wb") as of:
of.write(data)
logging.info("%s bytes written to file %s" % (len(data), out_file_name))
def embed_string_to_rdata_sec(str_data, file):
pe = pefile.PE(file)
rdata_sec = [sec for sec in pe.sections if sec.Name == RDATA_SEC_NAME][0]
logging.debug("RDATA section in PE found")
file_data = get_file_data(file)
# calculate how much free (null) space is in the rdata section
window_length = len(file_data[rdata_sec.VirtualAddress+rdata_sec.Misc:
rdata_sec.VirtualAddress+rdata_sec.SizeOfRawData])
logging.debug("%s free (null) bytes available at end of section" % window_length)
if window_length < len(str_data):
logging.error("Insufficient space for string in rdata section")
exit()
# embed desired data string into the rdata section, keeping the section size the same
new_data = file_data[:rdata_sec.VirtualAddress+rdata_sec.Misc] + \
str_data + chr(0)*(window_length - len(str_data)) + \
file_data[rdata_sec.VirtualAddress+rdata_sec.SizeOfRawData:]
logging.debug("Original file length: %s, New file length: %s" % (len(file_data), len(new_data)))
output_to_file(file, new_data)
if __name__ == '__main__':
embed_string_to_rdata_sec(*sys.argv[1:]) | 33.346154 | 97 | 0.746251 |
d81d9865f5bf2eb38b521262beece6b991add2e7 | 5,151 | py | Python | bootstrap.py | gturco/find_cns | 63e08d17d9c81e250ef2637216fbf947cc295823 | [
"MIT"
] | 4 | 2016-03-21T19:19:24.000Z | 2019-10-23T09:20:13.000Z | bootstrap.py | hengbingao/find_cns | 63e08d17d9c81e250ef2637216fbf947cc295823 | [
"MIT"
] | 10 | 2016-03-21T16:55:29.000Z | 2022-03-22T07:26:03.000Z | bootstrap.py | hengbingao/find_cns | 63e08d17d9c81e250ef2637216fbf947cc295823 | [
"MIT"
] | 5 | 2016-03-02T16:20:05.000Z | 2021-07-28T02:31:23.000Z | import os
import platform
import subprocess
import sys
def create_env(dir_name):
"""creates virtenv with pip and python 2.7"""
print >>sys.stderr, 'Make sure all requirements of INSTALL file are downloaded before running!!!'
create_env = subprocess.call(['virtualenv','--distribute', dir_name,'--python=python2.7'])
#assert: create_env == 0
def pip_install(dir_name):
"""pip install packages to virenv dir"""
numpy = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'numpy'])
if numpy != 0: raise Exception('can not download numpy READ REQUIREMENTS and TRY again!')
processing = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'processing'])
if processing != 0: raise Exception('can not download processing READ REQUIREMENTS and TRY again!')
shapely = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'shapely==1.2.19'])
if shapely != 0: raise Exception('can not download shapely READ REQUIREMENTS and TRY again!')
pyfasta = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install','-Iv', 'pyfasta==0.4.5'])
if pyfasta != 0: raise Exception('can not download pyfasta READ REQUIREMENTS and TRY again!')
scipy = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'scipy'])
if scipy != 0: raise Exception('can not download scipy READ REQUIREMENTS and TRY again!')
Cython = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'Cython'])
if Cython != 0: raise Exception('can not download Cython READ REQUIREMENTS and TRY again!')
pyrex = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'Pyrex'])
biopython = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'biopython'])
if biopython != 0: raise Exception('can not download biopython READ REQUIREMENTS and TRY again!')
pandas = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'pandas'])
if pandas != 0: raise Exception('can not download pandas READ REQUIREMENTS and TRY again!')
def git_install(dir_name):
"""downloads git scripts to virenv bin"""
print >>sys.stderr, 'Be patient, takes a long time to download'
flatfeature = subprocess.call(['{0}/bin/pip'.format(dir_name), 'install', 'git+https://github.com/brentp/flatfeature.git'])
if flatfeature != 0:
raise Exception("Download git to contiune")
quota = subprocess.Popen(['git', 'clone','https://github.com/tanghaibao/quota-alignment.git'],cwd=r'{0}/bin/'.format(dir_name)).wait()
bcbb = subprocess.Popen(['git', 'clone', 'https://github.com/chapmanb/bcbb.git'],cwd=r'{0}/bin/'.format(dir_name)).wait()
def setup_install(dir_name):
"""installs setup install files to virenv directory"""
subprocess.Popen(['../../python2.7','setup.py','install'],cwd=r'{0}/bin/bcbb/gff/'.format(dir_name)).wait()
subprocess.Popen(['../../../../{0}/bin/python2.7'.format(dir_name),'setup.py','install'],cwd=r'pipeline/coann/brents_bpbio/biostuff/').wait()
subprocess.Popen(['../../../../../{0}/bin/python2.7'.format(dir_name),'setup.py','install'],cwd=r'pipeline/coann/brents_bpbio/blasttools/blast_misc/').wait()
subprocess.Popen(['../../../../../{0}/bin/python2.7'.format(dir_name),'setup.py','install'],cwd=r'pipeline/coann/brents_bpbio/scripts/bblast/').wait()
co_anno = subprocess.Popen(['../../../../{0}/bin/python2.7'.format(dir_name),'setup.py','install'],cwd=r'pipeline/coann/brents_bpbio/co-anno/').wait()
def install_blast(dir_name):
opersys = platform.system()
if opersys == 'Darwin':
link = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/release/LATEST/blast-2.2.26-universal-macosx.tar.gz'
elif opersys == 'Linux':
if '_32' in platform.version():
link = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/release/LATEST/blast-2.2.26-ia32-linux.tar.gz'
else:
link = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/release/LATEST/blast-2.2.26-x64-linux.tar.gz'
subprocess.Popen(['wget','-O','blast.tar.gz',link],cwd=r'{0}/bin/'.format(dir_name)).wait()
subprocess.Popen(['tar', '-xvzf','blast.tar.gz'],cwd=r'{0}/bin/'.format(dir_name)).wait()
def install_lastz(dir_name):
if "lastz-distrib" not in os.listdir(dir_name):
link = 'http://www.bx.psu.edu/~rsharris/lastz/newer/lastz-1.03.02.tar.gz'
subprocess.Popen(['wget','-O','lastz.tar.gz',link],cwd=r'{0}/bin/'.format(dir_name)).wait()
subprocess.Popen(['tar', '-xvzf','lastz.tar.gz'],cwd=r'{0}/bin/'.format(dir_name)).wait()
passed = subprocess.Popen(['make'],cwd=r'{0}/bin/lastz-distrib-1.03.02/'.format(dir_name)).wait()
if passed != 0: print >>sys.stderr, 'Delete the word -Werror at the line 31 in cns_pipeline/bin/lastz-distrib-1.03.02/src/Makefile and re-run bootstrap.py!!!!!!'
cwd = r'{0}/bin/lastz-distrib-1.03.02/'.format(dir_name)
opts = 'LASTZ_INSTALL={0}/cns_pipeline/bin/'.format(os.getcwd())
subprocess.Popen([opts,'make','install'],cwd=cwd, shell=True).wait()
create_env('cns_pipeline')
pip_install('cns_pipeline')
git_install('cns_pipeline')
setup_install('cns_pipeline')
install_blast('cns_pipeline')
install_lastz('cns_pipeline')
| 60.6 | 165 | 0.677732 |
ed11a0d7dc723198b6df12b2a68f5ca124c6d527 | 3,030 | py | Python | src/models/debug_fetch_data.py | voreille/plc_seg | e2238bc3342d54531fcab74ac84d24a056f16645 | [
"MIT"
] | null | null | null | src/models/debug_fetch_data.py | voreille/plc_seg | e2238bc3342d54531fcab74ac84d24a056f16645 | [
"MIT"
] | null | null | null | src/models/debug_fetch_data.py | voreille/plc_seg | e2238bc3342d54531fcab74ac84d24a056f16645 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from random import shuffle
import datetime
import dotenv
import h5py
import pandas as pd
from src.models.fetch_data_from_hdf5 import get_tf_data
project_dir = Path(__file__).resolve().parents[2]
dotenv_path = project_dir / ".env"
dotenv.load_dotenv(str(dotenv_path))
log_dir = project_dir / ("logs/fit/" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
path_data_nii = Path(os.environ["NII_PATH"])
path_mask_lung_nii = Path(os.environ["NII_LUNG_PATH"])
path_clinical_info = Path(os.environ["CLINIC_INFO_PATH"])
image_size = (256, 256)
bs = 2
def get_trainval_patient_list(df, patient_list):
id_list = [int(p.split('_')[1]) for p in patient_list]
df = df.loc[id_list, :]
id_patient_plc_neg_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 0)].index)
id_patient_plc_pos_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 1)].index)
shuffle(id_patient_plc_neg_training)
shuffle(id_patient_plc_pos_training)
id_patient_plc_neg_val = id_patient_plc_neg_training[:2]
id_patient_plc_pos_val = id_patient_plc_pos_training[:4]
id_val = id_patient_plc_neg_val + id_patient_plc_pos_val
id_patient_plc_neg_train = id_patient_plc_neg_training[2:]
id_patient_plc_pos_train = id_patient_plc_pos_training[4:]
id_train = id_patient_plc_neg_train + id_patient_plc_pos_train
patient_list_val = [f"PatientLC_{i}" for i in id_val]
patient_list_train = [f"PatientLC_{i}" for i in id_train]
return patient_list_train, patient_list_val
def main():
file_train = h5py.File(
"/home/val/python_wkspce/plc_seg/data/processed/2d_pet_normalized/train.hdf5",
"r")
clinical_df = pd.read_csv(path_clinical_info).set_index("patient_id")
patient_list = list(file_train.keys())
patient_list = [p for p in patient_list if p not in ["PatientLC_63"]]
patient_list_train, patient_list_val = get_trainval_patient_list(
clinical_df, patient_list)
data_val = get_tf_data(
file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=False,
centered_on_gtvt=True,
patient_list_copy=patient_list_val,
).cache().batch(2)
data_train = get_tf_data(file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=True,
random_shift=20,
n_repeat=10,
num_parallel_calls='auto',
oversample_plc_neg=True,
patient_list_copy=patient_list_train).batch(bs)
for x, y, plc_status in data_val.as_numpy_iterator():
print(
f"voici, voilé le x {x.shape}, le y {y.shape} et le plc_status {plc_status}"
)
file_train.close()
if __name__ == '__main__':
main()
| 35.647059 | 88 | 0.644554 |
84751a636768bca7ce2d48644205ff3e02c6748d | 2,470 | py | Python | vendor/bundle/ruby/2.6.0/gems/nokogiri-1.10.4/ext/nokogiri/tmp/x86_64-apple-darwin19/ports/libxslt/1.1.33/libxslt-1.1.33/win32/runtests.py | Burton-David/Jalpc | 5f22fbc245b0bb18220d3a61c3a5d20557e64288 | [
"MIT"
] | null | null | null | vendor/bundle/ruby/2.6.0/gems/nokogiri-1.10.4/ext/nokogiri/tmp/x86_64-apple-darwin19/ports/libxslt/1.1.33/libxslt-1.1.33/win32/runtests.py | Burton-David/Jalpc | 5f22fbc245b0bb18220d3a61c3a5d20557e64288 | [
"MIT"
] | null | null | null | vendor/bundle/ruby/2.6.0/gems/nokogiri-1.10.4/ext/nokogiri/tmp/x86_64-apple-darwin19/ports/libxslt/1.1.33/libxslt-1.1.33/win32/runtests.py | Burton-David/Jalpc | 5f22fbc245b0bb18220d3a61c3a5d20557e64288 | [
"MIT"
] | 1 | 2019-03-27T16:24:43.000Z | 2019-03-27T16:24:43.000Z | import difflib
import io
import os
import sys
from os import path
from subprocess import Popen, PIPE
xsltproc = path.join(os.getcwd(), "win32", "bin.msvc", "xsltproc.exe")
if not path.isfile(xsltproc):
xsltproc = path.join(os.getcwd(), "win32", "bin.mingw", "xsltproc.exe")
if not path.isfile(xsltproc):
raise FileNotFoundError(xsltproc)
def runtests(xsl_dir, xml_dir="."):
old_dir = os.getcwd()
os.chdir(xsl_dir)
for xsl_file in os.listdir():
if not xsl_file.endswith(".xsl"):
continue
xsl_path = "./" + xsl_file
name = path.splitext(xsl_file)[0]
xml_path = path.join(xml_dir + "/" + name + ".xml")
if not path.isfile(xml_path):
continue
args = [ xsltproc, "--maxdepth", "200", xsl_path, xml_path ]
p = Popen(args, stdout=PIPE, stderr=PIPE)
out_path = path.join(xml_dir, name + ".out")
err_path = path.join(xml_dir, name + ".err")
out_diff = diff(p.stdout, "<stdout>", name + ".out")
err_diff = diff(p.stderr, "<stderr>", name + ".err")
if (len(out_diff) or len(err_diff)):
sys.stdout.writelines(out_diff)
sys.stdout.writelines(err_diff)
print()
os.chdir(old_dir)
def diff(got_stream, got_name, expected_path):
text_stream = io.TextIOWrapper(got_stream, encoding="latin_1")
got_lines = text_stream.readlines()
if path.isfile(expected_path):
file = open(expected_path, "r", encoding="latin_1")
expected_lines = file.readlines()
else:
expected_lines = []
diff = difflib.unified_diff(expected_lines, got_lines,
fromfile=expected_path,
tofile=got_name)
return list(diff)
print("## Running REC tests")
runtests("tests/REC")
print("## Running general tests")
runtests("tests/general", "./../docs")
print("## Running exslt common tests")
runtests("tests/exslt/common")
print("## Running exslt functions tests")
runtests("tests/exslt/functions")
print("## Running exslt math tests")
runtests("tests/exslt/math")
print("## Running exslt saxon tests")
runtests("tests/exslt/saxon")
print("## Running exslt sets tests")
runtests("tests/exslt/sets")
print("## Running exslt strings tests")
runtests("tests/exslt/strings")
print("## Running exslt dynamic tests")
runtests("tests/exslt/dynamic")
print("## Running exslt date tests")
runtests("tests/exslt/date")
| 28.390805 | 75 | 0.636437 |
5a95af4e4a2411970e6b7abff06dbfdb188cc523 | 2,553 | py | Python | betteranvils.py | Syriiin/mc-betteranvils | 23e818dfd06d8e8647bfffbe1e80fcb25a305444 | [
"MIT"
] | null | null | null | betteranvils.py | Syriiin/mc-betteranvils | 23e818dfd06d8e8647bfffbe1e80fcb25a305444 | [
"MIT"
] | null | null | null | betteranvils.py | Syriiin/mc-betteranvils | 23e818dfd06d8e8647bfffbe1e80fcb25a305444 | [
"MIT"
] | null | null | null | import os
import sys
from tqdm import tqdm
from nbt import nbt, region
world_path = sys.argv[1] or "./world/"
if not os.path.exists(world_path):
print("World folder not found. Specify path or place in directory with world folder.")
exit()
playerdata_path = os.path.join(world_path, "playerdata/")
region_path = os.path.join(world_path, "region/")
player_files = [os.path.join(playerdata_path, filename) for filename in os.listdir(playerdata_path) if filename.endswith(".dat")]
region_files = [os.path.join(region_path, filename) for filename in os.listdir(region_path) if filename.endswith(".mca")]
def reset_repair_cost(item_tag):
try:
if item_tag["tag"]["RepairCost"].value > 0:
item_tag["tag"]["RepairCost"].value = 0
return True
else:
return False
except KeyError:
return False
print("Searching players")
for path in tqdm(player_files):
reset_count = 0
nbtfile = nbt.NBTFile(path)
# search ender chest items and inventory
for item_tag in nbtfile["EnderItems"].tags + nbtfile["Inventory"].tags:
if reset_repair_cost(item_tag):
reset_count += 1
if reset_count > 0:
nbtfile.write_file(path)
tqdm.write(f"{reset_count} items reset")
reset_count = 0
print("Searching map")
for path in tqdm(region_files):
regionfile = region.RegionFile(path)
for chunk in tqdm(regionfile.iter_chunks(), total=regionfile.chunk_count()):
reset_count = 0
# search dropped items and chest minecarts
for entity in chunk["Level"]["Entities"]:
try:
items = entity["Items"]
for item in items:
if reset_repair_cost(item):
reset_count += 1
except KeyError:
try:
if reset_repair_cost(entity):
reset_count += 1
except KeyError:
pass
# search blocks with storage
for entity in chunk["Level"]["TileEntities"]:
try:
items = entity["Items"]
for item in items:
if reset_repair_cost(item):
reset_count += 1
except KeyError:
pass
if reset_count > 0:
regionfile.write_chunk(chunk.loc.x, chunk.loc.z, chunk)
tqdm.write(f"{reset_count} items reset")
reset_count = 0
print("\r\nWorld successfully rid of anvil repair cost yay")
| 31.9125 | 129 | 0.595378 |
8d44a94a8110fe3f1599709a23d4c8478bc350fd | 1,999 | py | Python | docs/source/conf.py | SvenMarcus/hpc-rocket | b28917e7afe6e2e839d1ae58f2e21fba6e3eb61c | [
"MIT"
] | 7 | 2022-01-03T13:52:40.000Z | 2022-03-10T16:26:04.000Z | docs/source/conf.py | SvenMarcus/hpclaunch | 1a0459167bf5d7b26b1d7e46a1b1d073a4a55650 | [
"MIT"
] | 10 | 2021-09-16T15:25:04.000Z | 2021-11-10T13:22:07.000Z | docs/source/conf.py | SvenMarcus/hpclaunch | 1a0459167bf5d7b26b1d7e46a1b1d073a4a55650 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "HPC Rocket"
copyright = "2022, Sven Marcus"
author = "Sven Marcus"
# The full version, including alpha/beta/rc tags
release = "0.2.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["myst_parser"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
| 32.770492 | 79 | 0.656328 |
354f6d922b92b37254a8bc4b4f073216fa62c186 | 1,063 | py | Python | backend/test/formacao_tests/formacao_home_tests.py | erikabarros/naguil | fcc1592b1ac4235b5c35615a1f3a13a1d0a081e9 | [
"MIT"
] | null | null | null | backend/test/formacao_tests/formacao_home_tests.py | erikabarros/naguil | fcc1592b1ac4235b5c35615a1f3a13a1d0a081e9 | [
"MIT"
] | null | null | null | backend/test/formacao_tests/formacao_home_tests.py | erikabarros/naguil | fcc1592b1ac4235b5c35615a1f3a13a1d0a081e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from formacao_app.formacao_model import Formacao
from routes.formacaos.home import index, delete
from gaebusiness.business import CommandExecutionException
from gaegraph.model import Node
from mommygae import mommy
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(Formacao)
template_response = index()
self.assert_can_render(template_response)
class DeleteTests(GAETestCase):
def test_success(self):
formacao = mommy.save_one(Formacao)
redirect_response = delete(formacao.key.id())
self.assertIsInstance(redirect_response, RedirectResponse)
self.assertIsNone(formacao.key.get())
def test_non_formacao_deletion(self):
non_formacao = mommy.save_one(Node)
self.assertRaises(CommandExecutionException, delete, non_formacao.key.id())
self.assertIsNotNone(non_formacao.key.get())
| 34.290323 | 83 | 0.761994 |
5289fbe5c9c466f0e0cdfa0a2e6a8607fc87876c | 1,083 | py | Python | setup.py | ihgazni2/pynoz | 828e84ff6d1496b1cea0cc488ef5558b772c39a6 | [
"MIT"
] | null | null | null | setup.py | ihgazni2/pynoz | 828e84ff6d1496b1cea0cc488ef5558b772c39a6 | [
"MIT"
] | null | null | null | setup.py | ihgazni2/pynoz | 828e84ff6d1496b1cea0cc488ef5558b772c39a6 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
requirements = []
setup(
name="pynoz",
version = "0.0.9", #@version@#
description="simple tools for time date timezone",
author="ihgazni2",
url="https://github.com/ihgazni2/pynoz",
author_email='',
license="MIT",
long_description = "refer to .md files in https://github.com/ihgazni2/pynoz",
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
],
packages= find_packages(),
entry_points={
'console_scripts': [
'pynoz=pynoz.bin:main'
]
},
package_data={
'resources':['RESOURCES/*']
},
include_package_data=True,
install_requires=requirements,
py_modules=['pynoz'],
)
# python3 setup.py bdist --formats=tar
# python3 setup.py sdist
| 22.5625 | 83 | 0.580794 |
1289b2df3f309065a954656bf7e686c09d3e6700 | 6,538 | py | Python | scripts/1_exp_runners/4_run_get_multi_get.py | gokul-uf/asl-fall-2017 | 83e882d9d4c52bdd279b4e3eed8cd7ac768e88d7 | [
"MIT"
] | 1 | 2018-06-13T16:57:59.000Z | 2018-06-13T16:57:59.000Z | scripts/1_exp_runners/4_run_get_multi_get.py | gokul-uf/asl-fall-2017 | 83e882d9d4c52bdd279b4e3eed8cd7ac768e88d7 | [
"MIT"
] | null | null | null | scripts/1_exp_runners/4_run_get_multi_get.py | gokul-uf/asl-fall-2017 | 83e882d9d4c52bdd279b4e3eed8cd7ac768e88d7 | [
"MIT"
] | null | null | null | from __future__ import print_function
from os import system
from time import sleep
# system = print
# sleep = print
'''
3 MT, 2 MW, 3 MC
'''
num_reps = 3
num_thread = 64
shard_modes = [True, False]
multi_gets = [1, 3, 6, 9]
def start_vms():
print("starting VMs")
for i in range(1, 9):
print("Starting foraslvms{}".format(i))
system("az vm start --name foraslvms{} --resource-group ASL".format(i))
print("All VMs started")
def stop_vms():
print("stopping VMs")
for i in range(1, 9):
print("Stopping foraslvms{}".format(i))
system("az vm deallocate --name foraslvms{} --resource-group ASL".format(i))
print("All VMs shutdown")
def start_mc():
print("Starting an MC instance on the VM")
for i in range(6, 9):
print("Starting on foraslvms{}".format(i))
system('ssh sgokula@sgokulaforaslvms{}.westeurope.cloudapp.azure.com "memcached -t 1 -p 6969 >/dev/null </dev/null 2>&1 & "'.format(i))
print("MC Instances running")
def make_dirs():
mt_ips = "sgokula@sgokulaforaslvms1.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms2.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms3.westeurope.cloudapp.azure.com"
print("creating dirs in MT")
system("parallel-ssh -H '{}' 'mkdir /home/sgokula/gets_exp_log'".format(mt_ips))
def ping_tests():
print("Runing ping tests")
mt_ips = "sgokula@sgokulaforaslvms1.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms2.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms3.westeurope.cloudapp.azure.com"
mc_ips = [
"10.0.0.6",
"10.0.0.11",
"10.0.0.7"
]
mw_ips = [
"10.0.0.4",
"10.0.0.8"
]
print("pinging MC instances")
for i, mc_ip in enumerate(mc_ips):
system("parallel-ssh -t 0 -H '{}' 'ping -c 5 {} > /home/sgokula/gets_exp_log/ping_mc_{}_$HOSTNAME.txt'".format(mt_ips, mc_ip, i))
print("pinging MW instances")
for i, mw_ip in enumerate(mw_ips):
system("parallel-ssh -t 0 -H '{}' 'ping -c 5 {} > /home/sgokula/gets_exp_log/ping_mw_{}_$HOSTNAME.txt'".format(mt_ips, mw_ip, i))
print("ping tests completed")
def populate_all_mc():
print("Populating MCs with keys")
mt_ips = [
"sgokula@sgokulaforaslvms1.westeurope.cloudapp.azure.com",
"sgokula@sgokulaforaslvms2.westeurope.cloudapp.azure.com",
"sgokula@sgokulaforaslvms3.westeurope.cloudapp.azure.com"
]
mc_ips = [
"10.0.0.6",
"10.0.0.11",
"10.0.0.7"
]
for mt_ip, mc_ip in zip(mt_ips, mc_ips):
print("populating {} with {}".format(mc_ip, mt_ip))
command = "ssh {} 'memtier_benchmark -s {} -p 6969 -P memcache_text -n allkeys --ratio 1:0".format(mt_ip, mc_ip) \
+ " --key-maximum 10000 --hide-histogram --expiry-range 99999-100000 -d 1024 --key-pattern=S:S> /dev/null 2>&1'"
system(command)
print("MC population completed!")
def run_config(shard_mode, num_multi_get, rep):
mt_ips = "sgokula@sgokulaforaslvms1.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms2.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms3.westeurope.cloudapp.azure.com"
mw_ips = "sgokula@sgokulaforaslvms4.westeurope.cloudapp.azure.com sgokula@sgokulaforaslvms5.westeurope.cloudapp.azure.com"
# start MWs
print("starting the middleware")
system("ssh sgokula@sgokulaforaslvms4.westeurope.cloudapp.azure.com 'java -jar /home/sgokula/middleware-sgokula.jar -l 10.0.0.4 -p 6969 -t {} -s {} -m 10.0.0.6:6969 10.0.0.7:6969 10.0.0.11:6969 >/dev/null </dev/null 2>&1 &'".format(num_thread, shard_mode))
system("ssh sgokula@sgokulaforaslvms5.westeurope.cloudapp.azure.com 'java -jar /home/sgokula/middleware-sgokula.jar -l 10.0.0.8 -p 6969 -t {} -s {} -m 10.0.0.6:6969 10.0.0.7:6969 10.0.0.11:6969 >/dev/null </dev/null 2>&1 &'".format(num_thread, shard_mode))
# sleep to let the MW establish connections
print("sleep for 20secs before running MT")
sleep(20)
# start dstat
print("starting dstat")
system("parallel-ssh -t 0 -H '{}' 'dstat -cmn --nocolor ".format(mt_ips) \
+ "--output /home/sgokula/gets_exp_log/dstat_s-{}_g-{}_rep-{}_$HOSTNAME.csv ".format(shard_mode, num_multi_get, rep) \
+ "5 20 >/dev/null </dev/null 2>&1 &'")
# run the config, MT
print("running config") # ratio=1:8 --multi-key-get=8
system("parallel-ssh -t 0 -H '{}' 'memtier_benchmark -s 10.0.0.4 -p 6969 -P memcache_text -t 1 -c 2 --ratio 1:{} --multi-key-get={} ".format(mt_ips, num_multi_get, num_multi_get) \
+ " --test-time 80 --key-maximum 10000 -d 1024 --json-out-file /home/sgokula/gets_exp_log/mt_s-{}_g-{}_rep-{}_mw-1_$HOSTNAME.json </dev/null >/dev/null 2>&1 &'".format(shard_mode, num_multi_get, rep))
system("parallel-ssh -t 0 -H '{}' 'memtier_benchmark -s 10.0.0.8 -p 6969 -P memcache_text -t 1 -c 2 --ratio 1:{} --multi-key-get={} ".format(mt_ips, num_multi_get, num_multi_get) \
+ " --test-time 80 --key-maximum 10000 -d 1024 --json-out-file /home/sgokula/gets_exp_log/mt_s-{}_g-{}_rep-{}_mw-2_$HOSTNAME.json </dev/null >/dev/null 2>&1 '".format(shard_mode, num_multi_get, rep))
# kill middleware, RIP
print("killing middleware")
system("parallel-ssh -t 0 -H '{}' 'killall java' ".format(mw_ips))
# Move / rename the log files in MC to some place else, because it doesn't keep track of reps, or VC
print("moving MW logs")
system("ssh sgokula@sgokulaforaslvms4.westeurope.cloudapp.azure.com 'mv /home/sgokula/logs /home/sgokula/gets-exp_s-{}_g-{}_rep-{}_mw-1_log'".format(shard_mode, num_multi_get, rep))
system("ssh sgokula@sgokulaforaslvms5.westeurope.cloudapp.azure.com 'mv /home/sgokula/logs /home/sgokula/gets-exp_s-{}_g-{}_rep-{}_mw-2_log'".format(shard_mode, num_multi_get, rep))
def run_exp():
count = 1
for shard_mode in shard_modes:
for multi_get_size in multi_gets:
for rep in range(1, num_reps+1):
print("runing config {} / 24, shard: {}, multi_get_size {}, rep: {}".format(count, shard_mode, multi_get_size, rep))
run_config(shard_mode, multi_get_size, rep)
count += 1
# Sleep for 5 seconds
print("sleeping for 5 secs before next config")
sleep(5)
if __name__ == "__main__":
start_vms()
print("sleeping for 2 minutes before starting experiments")
sleep(120)
start_mc()
populate_all_mc()
make_dirs()
ping_tests()
run_exp()
stop_vms() | 45.089655 | 260 | 0.65647 |
1a5c6e8aec6b4c3f483a7683df7e9f62d5e77ba8 | 37,166 | py | Python | tensorflow/contrib/eager/python/network.py | master-hzz/tensorflow | 4b4b51cdd9e8c3c748b76dd8649bcd5556e84d76 | [
"Apache-2.0"
] | 2 | 2021-07-07T13:55:09.000Z | 2021-12-04T22:51:46.000Z | tensorflow/contrib/eager/python/network.py | Yeesn/tensorflow | 31b79e42b9e1643b3bcdc9df992eb3ce216804c5 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/eager/python/network.py | Yeesn/tensorflow | 31b79e42b9e1643b3bcdc9df992eb3ce216804c5 | [
"Apache-2.0"
] | 1 | 2019-01-10T08:34:08.000Z | 2019-01-10T08:34:08.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Network is a composition of Layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import weakref
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import ops
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
# pylint: disable=protected-access
# Explanation for protected-access disable: Network has lots of same-class and
# parent-class references across different objects, and some to private
# functions in base.py which should be reused.
_DeferredRestoration = collections.namedtuple(
"_DeferredRestoration",
[
# The map_func to use (either user-specified or the default).
"map_func",
# Boolean, True if the user specified an explicit map_func, for error
# messages.
"map_func_is_user",
# A mapping from checkpoint names to initial values of not-yet-created
# variables which should be restored. These values come from parsing a
# checkpoint.
"checkpointed_variables_to_restore",
# A mapping from checkpoint name to variable objects of variables which
# have already been restored, for error checking.
"restored_variables",
# The session to restore with (if in graph mode).
"session",
# Names of the Network where the restore was requested, for error
# messages.
"network_name",
"network_scope_name"
])
def _default_naming_conflict_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The default checkpoint variable name mapping strategy for Network "
"'%s' resulted in a naming conflict. We attempted to strip off the "
"variable prefix for the Network ('%s'), but this resulted in two "
"variables named '%s' (originally '%s' and '%s'). This should only "
"happen when using variable sharing (i.e. the Network contains Networks "
"or Layers which were first added to another Network, and therefore "
"have that Network's variable prefix). One solution is to pass "
"`map_func=lambda n: n` to Network.save and Network.restore to use "
"fully qualified variable names in the checkpoint, although this will "
"require that the variable prefix of the Network being restored into "
"is also '%s'. You may alternatively write an arbitrary mapping.")
% (
network_name, network_scope_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name, network_scope_name
))
def _restore_custom_map_func_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The map_func passed to Network.restore for the Network '%s' "
"resulted in two variables named '%s' (originally '%s' and '%s'). Since "
"this is also an error on Network.save, this Network was "
"probably not saved with this map_func. Note that map_func "
"always maps from full variable names to checkpoint names; "
"there is no need to specify an inverse mapping.\n\n"
"Try stripping less from the variable names, or renaming parts "
"of the Network. For reference, variables created by sub-Layers "
"of this Network are prefixed with '%s', but if they are "
"re-used after being added to another Network they will have "
"that Network's full variable prefix instead.") % (
network_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name,
network_scope_name))
def _make_custom_getter_for_deferred_restorations():
"""Returns a custom getter which searches `deferred_restorations`.
Returns: A tuple of (_custom_getter, deferred_restorations)
_custom_getter: The getter which should be added to variable_scopes where
variables will be created.
deferred_restorations: A list for _DeferredRestoration objects. Typically
empty when the getter is set, and expanded as deferred restorations are
requested. All new deferred restorations should be appended to the end of
the list, where they will have priority over older deferred restorations.
"""
deferred_restorations = []
def _custom_getter(getter, name, shape=None, dtype=None,
initializer=None,
*args, **kwargs):
"""A custom getter which processes deferred restorations."""
# Iterate over restorations, newest first (newer restorations will take
# precedence over older restorations, just like with immediate restorations
# into existing variables).
delayed_restoration = None
found_value = False
value_to_restore = None
for delayed_restoration in reversed(
deferred_restorations):
checkpoint_name = delayed_restoration.map_func(name)
if (checkpoint_name
in delayed_restoration.checkpointed_variables_to_restore):
found_value = True
value_to_restore = (
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name])
if found_value:
break
# value_to_restore may be False because this variable is not in any
# checkpoint we are restoring, or None because we have explicitly set it to
# None when it was previously fetched. In either case, we don't need to
# set an initializer.
if found_value and value_to_restore is not None:
initializer = value_to_restore
shape = None
variable = getter(name, shape=shape, dtype=dtype, initializer=initializer,
*args, **kwargs)
if found_value and value_to_restore is not None:
# Mark as already restored from this checkpoint.
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name] = None
if context.in_graph_mode():
delayed_restoration.session.run(variable.initializer)
if found_value:
# Error checking should run even if we've already restored a value.
if delayed_restoration.restored_variables.setdefault(
checkpoint_name, variable) is not variable:
# Naming conflict. We've tried to initialize two variables with the
# same value from the checkpoint.
if delayed_restoration.map_func_is_user:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
else:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
return variable
return _custom_getter, deferred_restorations
def _make_prefix_stripping_map_fn(scope_name):
"""Closure for stripping the scope name of a Network.
Implemented as a closure rather than a member function to avoid reference
cycles in deferred restorations (this function should not have a reference to
the Network which created it).
Args:
scope_name: The Network.scope_name to strip from variables.
Returns:
A scope_name-stripping default `map_fn` for the Network.
"""
def _strip_variable_prefix(original_variable_name):
"""The default map_func for saving or restoring variables.
Strips the variable prefix for the Network on which save/restore was called,
and leaves other variable names fully qualified in the checkpoint.
Args:
original_variable_name: The _shared_name of the variable (no :0
suffix) to map.
Returns:
The checkpoint name of the variable.
"""
scope_name_with_slash = scope_name + "/"
if original_variable_name.startswith(scope_name_with_slash):
return original_variable_name[len(scope_name_with_slash):]
else:
return original_variable_name
return _strip_variable_prefix
class Network(base.Layer):
"""Represents the composition of a set of Layers.
TODO(josh11b,ashankar):
- Should "trainable" be changeable on the Network object?
- Do we allow add_variable in Network?
- Detect layers used in __call__ that weren't registered with track_layer.
- Convert inputs to __call__ to tensors.
- Prevent variables from being created after the first __call__?
(Think about restoring from a checkpoint).
"""
def __init__(self, name=None):
if isinstance(name, variable_scope.VariableScope):
raise ValueError("VariableScopes are not valid Network names.")
if name is not None and "/" in name:
raise ValueError(
"Forward slashes ('/') are not allowed in Network names.")
super(Network, self).__init__(name=name)
self._layers = []
self._sub_layer_name_uids = collections.defaultdict(int)
# Initially None, but set to False for networks which are first built as
# top-level.
self._first_parent = None # A weak reference to our first parent.
self._non_network_sublayers = []
self._owned_layers = {}
# The scope to use if we end up without a parent.
self._default_parent_variable_scope = variable_scope.get_variable_scope()
# Hold on to the variable scope counts from init to check whether a scope
# with the name we want was ever created in our parent scope. Without this
# check we might have name collisions if the parent scope on init gets
# closed before build is called.
self._variable_scope_counts_on_init = (
variable_scope._get_default_variable_store().variable_scopes_count)
self._custom_getter, self._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
def _init_set_name(self, name):
# Anonymous Networks (name=None) defer setting a final name until they are
# (1) added to another Network, or (2) built/called (where (2) is only used
# for a "top level" network).
#
# However, if we were provided an explicit name (name is not None), that
# will always be the final name of the Network; if it turns out not to be
# unique or if variable names can't be prefixed by it we will throw an
# error.
self._name = name
self._base_name = None
def _finalize_name(self, parent_network):
if not self._name:
# Were were not passed a name explicitly (or it was blank), so this is an
# anonymous Network. We make up a unique name.
if parent_network:
avoid_names = parent_network._owned_layers
name_uid_map = parent_network._sub_layer_name_uids
else:
name_uid_map = base._get_default_graph_uid_map()
# Figure out which names we have to avoid based on which variable scope
# we're nested in.
strip_name = self._default_parent_variable_scope.name
if strip_name:
strip_name += "/"
def _strip_on_init_scope(name):
if name.startswith(strip_name):
return name[len(strip_name):]
else:
return None
avoid_names = set(
_strip_on_init_scope(name)
for name in self._variable_scope_counts_on_init.keys() if name)
self._name, self._base_name = self._make_unique_name(
name_uid_map=name_uid_map, avoid_names=avoid_names,
namespace=self._default_parent_variable_scope.name)
if self._first_parent is None or (self._first_parent # False = no parent
and self._first_parent() is None):
# Save a pointer to the parent Network so that we can later check that the
# scope name we get is correct.
if not parent_network:
self._first_parent = parent_network
else:
self._first_parent = weakref.ref(parent_network)
def _set_scope(self, scope=None):
if self._scope is None:
if not self._first_parent:
first_parent = self._first_parent
else:
first_parent = self._first_parent()
if first_parent is None:
# If we were never added to another Network, or that Network has beed
# garbage collected before being called, then we're a top-level Network.
self._finalize_name(
# Use False to make sure the value sticks and we don't inherit a
# parent if we're added to a network later.
parent_network=False)
if scope is not None:
raise ValueError("Networks may not be created with explicit scopes.")
if first_parent:
first_parent._set_scope()
parent_scope = first_parent._scope
else:
parent_scope = self._default_parent_variable_scope
with variable_scope.variable_scope(parent_scope) as parent_vs:
expected_scope_name = parent_vs.name + "/" + self._name
if expected_scope_name in self._variable_scope_counts_on_init:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
# Make sure variables with this prefix will be unique.
with variable_scope.variable_scope(
None, use_resource=True, default_name=self._name) as scope:
self._scope = scope
scope_name = scope.name
suffix_start = scope_name.rfind("/") + 1
# rfind is -1 if there is no slash in the string, in which case the
# suffix starts at the beginning of the string (there is no prefix).
scope_suffix = scope_name[suffix_start:]
scope_prefix = scope_name[:suffix_start]
if scope_suffix != self._name:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
if (first_parent
and scope_prefix[:-1] != first_parent.scope_name):
raise ValueError(
("Network variable names must match a nesting of sub-Network "
"names. Expected prefix '%s' from parent network, but got "
"'%s' when attempting to create a variable_scope for Network "
"'%s'. Likely an explicit variable_scope was inserted into "
"the nesting.") % (
first_parent.scope_name,
scope_prefix[:-1],
self._name))
elif not first_parent and scope_prefix:
# For the case when this Network is not nested inside any other
# Network, but is in a variable_scope. This Network's name takes on
# the full variable scope prefix.
self._name = scope_name
for non_network_sublayer in self._non_network_sublayers:
self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please file a feature request.") %
(self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True,
default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
@base.Layer.name.getter
def name(self):
if self._name is None:
raise ValueError(
"The network does not yet have a final name, but a name was "
"requested for it. Networks get a name when they are added to "
"another Network via track_layer, or when they are first "
"called/built.")
return self._name
def track_layer(self, layer):
"""Track a Layer in this Network.
`Network` requires that all `Layer`s used in `call()` be tracked so that the
`Network` can export a complete list of variables.
Args:
layer: A `tf.layers.Layer` object.
Returns:
The passed in `layer`.
Raises:
RuntimeError: If __init__ has not been called.
TypeError: If `layer` is the wrong type.
ValueError: If a `Layer` with the same name has already been added.
"""
if not hasattr(self, "_layers"):
raise RuntimeError("Need to call Network.__init__ before adding layers")
if not isinstance(layer, base.Layer):
raise TypeError(
"Network.track_layer() passed type %s, not a tf.layers.Layer" %
(type(layer),))
if isinstance(layer, Network):
layer._finalize_name(parent_network=self)
else:
# `layer` is a non-Network, so it hasn't been named to follow Network
# conventions for contained Layers (i.e. the same conventions as for
# sub-Networks). This renaming is necessary to isolate Network variable
# naming from Layers constructed outside the Network and never added to it
# (because Layers are named globally).
if not layer.built:
if not hasattr(layer, "_first_parent"):
dereferenced_layer_first_parent = None
else:
dereferenced_layer_first_parent = layer._first_parent()
if dereferenced_layer_first_parent is None:
if layer._name != layer._base_name:
# If name and base_name do not match, then this Layer used anonymous
# naming and we have to rename it. Otherwise there's an explicit
# name, and we should respect it (subject to error checking).
layer._name, layer._base_name = layer._make_unique_name(
name_uid_map=self._sub_layer_name_uids,
avoid_names=self._owned_layers
# No namespace required, since we've specified our own UID map.
)
layer._first_parent = weakref.ref(self)
self._non_network_sublayers.append(layer)
if (not layer.built
and layer._first_parent
and self is layer._first_parent()):
if layer.name in self._owned_layers:
if self._owned_layers[layer.name] is layer:
return layer
raise ValueError(
"Attempt to add two Layers with the name '%s' to the same Network."
% (layer.name))
self._owned_layers[layer.name] = layer
self._layers.append(layer)
return layer
def get_layer(self, name=None, index=None):
"""Get a contained `tf.layers.Layer` either by name or index.
Args:
name: String matching one of the names of a contained `Layer`. Note that
the names of `Layer`s added to `Network`s may not be unique when doing
layer sharing (i.e. adding a `Layer` to this `Network` which was already
added to another `Network`). The lowest index `Layer` with a matching
name will be returned.
index: Integer in [0, number of layers). Layers are assigned an index
by the order they are added.
Returns:
A `tf.layers.Layer` object.
Raises:
ValueError: If neither or both of 'index' or 'name' is specified, or the
lookup failed.
"""
if index is not None:
if name is not None:
raise ValueError("Exactly one of 'index' or 'name' must be provided")
if len(self._layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self._layers)) +
" layers.")
else:
return self._layers[index]
else:
if not name:
raise ValueError("Provide either a layer name or layer index.")
for layer in self._layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name)
# The following methods are for implementing the Layer interface.
@property
def weights(self):
# TODO(josh11b): Should this return a set or perform de-duplication of
# variables in the case of shared layers/variables that appear in
# multiple places in the Network?
weights = []
for layer in self._layers:
weights += layer.weights
return weights
@property
def trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.non_trainable_weights
return weights
@property
def trainable(self):
return True
@trainable.setter
def trainable(self, value):
if not value:
# We believe it better to decide which layers & networks are trainable
# at the Trainer level than here. Otherwise you can run into trouble if a
# layer/network is shared between two models, but is trainable in one
# but not the other (like with adversarial networks).
raise AttributeError("cannot mark Network as not trainable")
@property
def layers(self):
return self._layers
def add_variable(self, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, constraint=None):
raise RuntimeError(
"add_variable not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
def save(self, save_path, global_step=None, map_func=None):
"""Save variables from the Network to a checkpoint.
Args:
save_path: Either a checkpoint prefix or the name of a directory to save
the checkpoint in (in which case the checkpoint will be named based on
the Network name).
global_step: The global step to use when naming the checkpoint. If None
(default), we will first try to get the default global step. If that
fails because no default global step exists, then the checkpoint is
created without a global step suffix.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged.
Returns:
The checkpoint prefix for the saved checkpoint, which may be passed to
`Network.restore`.
Raises:
ValueError: If the Network has not yet been called, or if map_func results
in a name collision.
"""
if not self.built:
raise ValueError(
"Attempt to save the Network before it was first called. This means "
"variables have not yet been created, so there is nothing to save.")
self._set_scope() # scope_name should be available to map_funcs
if global_step is None:
global_step = training_util.get_global_step()
if os.path.isdir(save_path):
# If we were passed a directory, default to naming based on the Network
# name.
save_path = os.path.join(save_path, self.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(self.scope_name)
variable_map = {}
for variable in self.variables:
mapped_name = map_func(variable._shared_name)
if variable_map.setdefault(mapped_name, variable) is not variable:
if user_map_func is None:
# Instead of erroring out, we could just re-try and silently use the
# full variable names in the checkpoint. This could be odd for deeply
# nested sub-Networks (since the full prefix from the nesting would
# get added), so for now we'll let the user deal with this case.
raise ValueError(_default_naming_conflict_error_message(
mapped_name=mapped_name,
first_variable=variable_map[mapped_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
# The user passed their own problematic map_func.
raise ValueError(
("The map_func passed to Network.save for the Network '%s' "
"resulted in two variables named '%s' ('%s' and '%s'). Try "
"stripping less from the variable names, or renaming parts of "
"the Network. For reference, variables created by sub-Layers of "
"this Network are prefixed with '%s', but if they are re-used "
"after being added to another Network, they will have that "
"Network's full variable prefix instead.") % (
self.name, mapped_name,
variable_map[mapped_name]._shared_name,
variable._shared_name,
self.scope_name))
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
return saver_lib.Saver(variable_map).save(
sess=sess, save_path=save_path, write_meta_graph=False,
global_step=global_step)
def _restore_existing_variables(self, save_path, map_func, user_map_func):
"""Use a standard Saver to restore existing variables from a checkpoint.
Args:
save_path: The checkpoint prefix or directory to read from.
map_func: The function to use when mapping from variable names to
checkpoint names.
user_map_func: The original map_func passed by the user, for error
checking.
Returns:
A dictionary mapping from checkpoint names to variable objects which have
been restored (for bookkeeping to avoid deferred restorations on these
variables).
Raises:
ValueError: If there is a name collision.
"""
existing_variables_by_checkpoint_name = {}
for variable in self.variables:
checkpoint_name = map_func(variable._shared_name)
if existing_variables_by_checkpoint_name.setdefault(
checkpoint_name, variable) is not variable:
if user_map_func is None:
raise ValueError(_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
raise ValueError(_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
if existing_variables_by_checkpoint_name:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(
sess=sess, save_path=save_path)
return existing_variables_by_checkpoint_name
def _set_restore_on_create(self, save_path, map_func, user_map_func,
existing_variables_by_checkpoint_name):
"""If necessary, request deferred restorations of variables."""
checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)
checkpointed_variables_to_restore = {}
for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):
if checkpoint_name in existing_variables_by_checkpoint_name:
# This variable was already created and restored.
continue
# Save the variable for later restoration in a custom getter.
checkpointed_variables_to_restore[checkpoint_name] = (
checkpoint_reader.get_tensor(checkpoint_name))
# Only set a deferred restoration if there are checkpoint variables which
# have not been assigned to existing variables. Note that this loses out on
# some opportunity for error checking, but avoids creating
# _DeferredRestoration objects once a Network has been built (so that
# restoring in a loop does not take increasing amounts of memory).
if checkpointed_variables_to_restore:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
# We need a name for error messages. If we haven't been added to another
# Network yet, we're top-level.
self._finalize_name(False)
self._set_scope()
# Save a record of this restoration for use in the custom getter.
deferred_restoration = _DeferredRestoration(
map_func=map_func,
map_func_is_user=(user_map_func is not None),
checkpointed_variables_to_restore=checkpointed_variables_to_restore,
restored_variables={},
session=sess,
network_name=self.name,
network_scope_name=self.scope_name)
self._deferred_restorations.append(deferred_restoration)
# Add the deferred registration to non-Network children, and request that
# Networks propagate the request to their children.
self._add_deferred_restoration(deferred_restoration)
def _add_deferred_restoration(self, deferred_restoration):
"""Add a deferred restoration to this Network and all children.
Restorations which are requested later have higher priority, and the highest
priority matching restoration is applied to a variable when it is created.
Args:
deferred_restoration: A _DeferredRestoration object.
"""
# Networks don't create variables at the moment, so this append isn't
# strictly necessary. We could get by with only adding deferred restorations
# to non-Network Layers.
self._set_scope()
# We use set_custom_getter because it avoids recursively calling up the
# variable_scope tree. We've done the tree traversal ourselves and have
# added the request to each Layer which needs it.
self._scope.set_custom_getter(self._custom_getter)
self._deferred_restorations.append(deferred_restoration)
for layer in self.layers:
if isinstance(layer, Network):
# For Networks, request that they propagate this deferred restoration
# to all of their children recursively.
layer._add_deferred_restoration(deferred_restoration)
else:
# For non-Network Layers, make sure they have a deferred restoration
# queue and a custom getter, then add our request to it.
if not hasattr(layer, "_custom_getter"):
assert not hasattr(layer, "_deferred_restorations")
layer._custom_getter, layer._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
self._set_scope_for_nonnetwork_sublayer(layer)
layer._scope.set_custom_getter(layer._custom_getter)
layer._deferred_restorations.append(deferred_restoration)
def restore(self, save_path, map_func=None):
"""Restore the Network from a checkpoint.
If variables have already been created (typically when some or all of the
`Network` is built), they are assigned values from the checkpoint
immediately, overwriting any existing values (in graph mode the default
session is used for the assignments).
If there are checkpoint entries which do not correspond to any existing
variables in the `Network`, these values are saved for deferred restoration;
their initial values will be the checkpointed values once they are
created. Requests for multiple deferred restorations behave the same way as
immediate restorations, in that later requests will take priority over
earlier requests relevant to the same variable.
If this `Network` shares `Layer`s with another network, those `Layer`s will
also have their variables restored from the checkpoint.
Args:
save_path: The return value of `Network.save`, or a directory to search
for a checkpoint.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged. Note that this is the _same_ map_func as `Network.save`, not
an inverse mapping.
"""
self._finalize_name(parent_network=False)
self._set_scope() # scope_name should be available to map_funcs
if os.path.isdir(save_path):
# If we don't have a name yet, set no parent.
save_path = os.path.join(save_path, self.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(self.scope_name)
# Step one is to restore any existing variables from the checkpoint.
existing_variables_by_checkpoint_name = self._restore_existing_variables(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func)
# Step two is to set a custom getter which restores variables on creation,
# for those variables which have not been added to sub-Layers yet.
self._set_restore_on_create(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func,
existing_variables_by_checkpoint_name=(
existing_variables_by_checkpoint_name))
# TODO(josh11b): Support other Layer methods needed for graph mode, such as for
# losses and updates
class Sequential(Network):
"""Represents a linear sequence of Layers or functions.
The output of each layer/function is provided as the input to the next.
The inputs passed to `__call__` are passed to the inputs of the first
Layer, and it returns the outputs of the last Layer.
Args:
layers_funcs: An optional sequence where each element is either a
tf.layers.Layer object or a callable.
name: An optional string name to use for this Network.
"""
def __init__(self, layers_funcs=None, name=None):
super(Sequential, self).__init__(name=name)
self._layers_funcs = []
if layers_funcs:
for l in layers_funcs:
self.add(l)
def add(self, layer_func):
if isinstance(layer_func, base.Layer):
args = estimator_util.fn_args(layer_func.call)
self.track_layer(layer_func)
elif callable(layer_func):
args = estimator_util.fn_args(layer_func)
else:
raise TypeError(
"Sequential.add() takes only tf.layers.Layer objects or callables; "
"not '%s' of type '%s'." % (layer_func, type(layer_func)))
self._layers_funcs.append((("training" in args), layer_func))
def call(self, inputs, training=None):
"""Call each Layer in the order they were added."""
# TODO(josh11b): Support "mode" and maybe other arguments
if training is None:
for _, l in self._layers_funcs:
inputs = l(inputs)
else:
for has_training_arg, l in self._layers_funcs:
if has_training_arg:
inputs = l(inputs, training)
else:
inputs = l(inputs)
return inputs
| 44.192628 | 81 | 0.683958 |
57b4fbd7a86518318928d00196e1a8136c6f36db | 8,769 | py | Python | story/utils.py | mershant/AIDONE | f4fec23ad208446e91a91f76e97d3a707b368cc0 | [
"MIT"
] | null | null | null | story/utils.py | mershant/AIDONE | f4fec23ad208446e91a91f76e97d3a707b368cc0 | [
"MIT"
] | 5 | 2020-09-26T00:38:56.000Z | 2022-02-10T01:15:09.000Z | story/utils.py | mershant/AIDONE | f4fec23ad208446e91a91f76e97d3a707b368cc0 | [
"MIT"
] | null | null | null | # coding: utf-8
import re
from difflib import SequenceMatcher
import yaml
from profanityfilter import ProfanityFilter
from func_timeout import func_timeout, FunctionTimedOut
YAML_FILE = "story/story_data.yaml"
with open("story/censored_words.txt", "r") as f:
censored_words = [l.replace("\n", "") for l in f.readlines()]
pf = ProfanityFilter(custom_censor_list=censored_words)
def console_print(text, width=75):
last_newline = 0
i = 0
while i < len(text):
if text[i] == "\n":
last_newline = 0
elif last_newline > width and text[i] == " ":
text = text[:i] + "\n" + text[i:]
last_newline = 0
else:
last_newline += 1
i += 1
print(text)
def get_similarity(a, b):
return SequenceMatcher(None, a, b).ratio()
def get_num_options(num):
while True:
choice = input("Enter the number of your choice: ")
try:
result = int(choice)
if result >= 0 and result < num:
return result
else:
print("Error invalid choice. ")
except ValueError:
print("Error invalid choice. ")
def player_died(text):
"""
TODO: Add in more sophisticated NLP, maybe a custom classifier
trained on hand-labelled data that classifies second-person
statements as resulting in death or not.
"""
lower_text = text.lower()
you_dead_regexps = [
"you('re| are) (dead|killed|slain|no more|nonexistent)",
"you (die|pass away|perish|suffocate|drown|bleed out)",
"you('ve| have) (died|perished|suffocated|drowned|been (killed|slain))",
"you (\w* )?(yourself )?to death",
"you (\w* )*(collapse|bleed out|chok(e|ed|ing)|drown|dissolve) (\w* )*and (die(|d)|pass away|cease to exist|(\w* )+killed)",
]
return any(re.search(regexp, lower_text) for regexp in you_dead_regexps)
def player_won(text):
lower_text = text.lower()
won_phrases = [
"you ((\w* )*and |)live happily ever after",
"you ((\w* )*and |)live (forever|eternally|for eternity)",
"you ((\w* )*and |)(are|become|turn into) ((a|now) )?(deity|god|immortal)",
"you ((\w* )*and |)((go|get) (in)?to|arrive (at|in)) (heaven|paradise)",
"you ((\w* )*and |)celebrate your (victory|triumph)",
"you ((\w* )*and |)retire",
]
return any(re.search(regexp, lower_text) for regexp in won_phrases)
def remove_profanity(text):
return pf.censor(text)
def cut_trailing_quotes(text):
num_quotes = text.count('"')
if num_quotes % 2 == 0:
return text
else:
final_ind = text.rfind('"')
return text[:final_ind]
def fix_trailing_quotes(text):
num_quotes = text.count('"')
if num_quotes % 2 == 0:
return text
else:
return text + '"'
def split_first_sentence(text):
first_period = text.find(".")
first_exclamation = text.find("!")
if first_exclamation < first_period and first_exclamation > 0:
split_point = first_exclamation + 1
elif first_period > 0:
split_point = first_period + 1
else:
split_point = text[0:20]
return text[0:split_point], text[split_point:]
def cut_trailing_action(text):
lines = text.rstrip().split("\n")
last_para = re.findall(r".+?(?:\.{1,3}|[!\?]|$)(?!\")", lines[-1])
if len(last_para) < 1:
return ""
last_line = last_para[-1].rstrip()
if (
"you ask" in last_line.lower()
or "you say" in last_line.lower()
) and len(lines) > 1:
if len(last_para) > 1:
last_para = last_para[:-1]
lines[-1] = " ".join(last_para)
else:
lines = lines[:-1]
text = "\n".join(lines)
return text
def cut_trailing_sentence(text):
text = standardize_punctuation(text)
last_punc = max(text.rfind("."), text.rfind("!"), text.rfind("?"))
if last_punc <= 0:
last_punc = len(text) - 1
et_token = text.find("<")
if et_token > 0:
last_punc = min(last_punc, et_token - 1)
elif et_token == 0:
last_punc = min(last_punc, et_token)
act_token = text.find(">")
if act_token > 0:
last_punc = min(last_punc, act_token - 1)
elif act_token == 0:
last_punc = min(last_punc, act_token)
text = text[:last_punc+1]
text = fix_trailing_quotes(text)
text = cut_trailing_action(text)
return text
def replace_outside_quotes(text, current_word, repl_word):
text = standardize_punctuation(text)
reg_expr = re.compile(current_word + '(?=([^"]*"[^"]*")*[^"]*$)')
output = reg_expr.sub(repl_word, text)
return output
def is_first_person(text):
count = 0
for pair in first_to_second_mappings:
variations = mapping_variation_pairs(pair)
for variation in variations:
reg_expr = re.compile(variation[0] + '(?=([^"]*"[^"]*")*[^"]*$)')
matches = re.findall(reg_expr, text)
count += len(matches)
if count > 3:
return True
else:
return False
def is_second_person(text):
count = 0
for pair in second_to_first_mappings:
variations = mapping_variation_pairs(pair)
for variation in variations:
reg_expr = re.compile(variation[0] + '(?=([^"]*"[^"]*")*[^"]*$)')
matches = re.findall(reg_expr, text)
count += len(matches)
if count > 3:
return True
else:
return False
def capitalize(word):
return word[0].upper() + word[1:]
def mapping_variation_pairs(mapping):
mapping_list = []
mapping_list.append((" " + mapping[0] + " ", " " + mapping[1] + " "))
mapping_list.append(
(" " + capitalize(mapping[0]) + " ", " " + capitalize(mapping[1]) + " ")
)
# Change you it's before a punctuation
if mapping[0] is "you":
mapping = ("you", "me")
mapping_list.append((" " + mapping[0] + ",", " " + mapping[1] + ","))
mapping_list.append((" " + mapping[0] + "\?", " " + mapping[1] + "\?"))
mapping_list.append((" " + mapping[0] + "\!", " " + mapping[1] + "\!"))
mapping_list.append((" " + mapping[0] + "\.", " " + mapping[1] + "."))
return mapping_list
first_to_second_mappings = [
("I'm", "you're"),
("Im", "you're"),
("Ive", "you've"),
("I am", "you are"),
("was I", "were you"),
("am I", "are you"),
("wasn't I", "weren't you"),
("I", "you"),
("I'd", "you'd"),
("i", "you"),
("I've", "you've"),
("was I", "were you"),
("am I", "are you"),
("wasn't I", "weren't you"),
("I", "you"),
("I'd", "you'd"),
("i", "you"),
("I've", "you've"),
("I was", "you were"),
("my", "your"),
("we", "you"),
("we're", "you're"),
("mine", "yours"),
("me", "you"),
("us", "you"),
("our", "your"),
("I'll", "you'll"),
("myself", "yourself"),
]
second_to_first_mappings = [
("you're", "I'm"),
("your", "my"),
("you are", "I am"),
("you were", "I was"),
("are you", "am I"),
("you", "I"),
("you", "me"),
("you'll", "I'll"),
("yourself", "myself"),
("you've", "I've"),
]
def capitalize_helper(string):
string_list = list(string)
string_list[0] = string_list[0].upper()
return "".join(string_list)
def capitalize_first_letters(text):
first_letters_regex = re.compile(r"((?<=[\.\?!]\s)(\w+)|(^\w+))")
def cap(match):
return capitalize_helper(match.group())
result = first_letters_regex.sub(cap, text)
return result
def standardize_punctuation(text):
text = text.replace("’", "'")
text = text.replace("`", "'")
text = text.replace("“", '"')
text = text.replace("”", '"')
return text
def first_to_second_person(text):
text = " " + text
text = standardize_punctuation(text)
for pair in first_to_second_mappings:
variations = mapping_variation_pairs(pair)
for variation in variations:
text = replace_outside_quotes(text, variation[0], variation[1])
return capitalize_first_letters(text[1:])
def second_to_first_person(text):
text = " " + text
text = standardize_punctuation(text)
for pair in second_to_first_mappings:
variations = mapping_variation_pairs(pair)
for variation in variations:
text = replace_outside_quotes(text, variation[0], variation[1])
return capitalize_first_letters(text[1:])
| 28.287097 | 133 | 0.550576 |
9f6ba9efb1ffa4a64483301d8a9ca68cbaa8139b | 1,638 | py | Python | homu/effects.py | Penguinum/Python-Homulib | 113bcceb813d749bedaf183c4516564f4a6b2bee | [
"MIT"
] | null | null | null | homu/effects.py | Penguinum/Python-Homulib | 113bcceb813d749bedaf183c4516564f4a6b2bee | [
"MIT"
] | null | null | null | homu/effects.py | Penguinum/Python-Homulib | 113bcceb813d749bedaf183c4516564f4a6b2bee | [
"MIT"
] | null | null | null | from ctypes import *
from homu import homulib
homulib.Delay_Create.restype = c_void_p
homulib.Delay_Start.argtypes = [c_void_p]
homulib.Delay_SetSize.argtypes = [c_void_p, c_double]
homulib.Delay_SetDecay.argtypes = [c_void_p, c_double]
homulib.Delay_NextSample.argtypes = [c_void_p, c_double]
homulib.Delay_NextSample.restype = c_double
homulib.Delay_Destroy.argtypes = [c_void_p]
homulib.Distortion_Create.restype = c_void_p
homulib.Distortion_Start.argtypes = [c_void_p]
homulib.Distortion_SetLevel.argtypes = [c_void_p, c_double]
homulib.Distortion_NextSample.argtypes = [c_void_p, c_double]
homulib.Distortion_NextSample.restype = c_double
homulib.Distortion_Destroy.argtypes = [c_void_p]
class Delay:
def __init__(self):
self.gen = homulib.Delay_Create()
def __del__(self):
self.gen = homulib.Delay_Destroy(self.gen)
def set_size(self, s):
homulib.Delay_SetSize(self.gen, s)
def set_decay(self, value):
homulib.Delay_SetDecay(self.gen, value)
def start(self):
homulib.Delay_Start(self.gen)
def next_sample(self, s):
return homulib.Delay_NextSample(self.gen, s)
class Distortion:
def __init__(self):
self.gen = homulib.Distortion_Create()
def __del__(self):
self.gen = homulib.Distortion_Destroy(self.gen)
def set_level(self, s):
homulib.Distortion_SetLevel(self.gen, s)
def start(self):
homulib.Distortion_Start(self.gen)
def next_sample(self, s):
return homulib.Distortion_NextSample(self.gen, s)
| 29.25 | 61 | 0.694139 |
f7254bdd4bb068fb20e4ad809d0645054278cee9 | 5,620 | py | Python | SimpleLoggingServerToCsvFile.py | II43/SimpleLoggingServerToCsvFile | d3d50778041a5995e58b6a8f623519e3cb41a5ce | [
"MIT"
] | null | null | null | SimpleLoggingServerToCsvFile.py | II43/SimpleLoggingServerToCsvFile | d3d50778041a5995e58b6a8f623519e3cb41a5ce | [
"MIT"
] | null | null | null | SimpleLoggingServerToCsvFile.py | II43/SimpleLoggingServerToCsvFile | d3d50778041a5995e58b6a8f623519e3cb41a5ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Simple HTTP server in Python for logging events to CSV file
Motivation: Use this CSV file later for data agregation and plotting
Inspired by: Very simple HTTP server in Python for logging requests
https://gist.github.com/mdonkers/63e115cc0c79b4f6b8b3a6b797e485c7
Usage::
./SimpleLoggingServerToCsvFile.py [<port>]
"""
#----------------------------------------------------------------------#
# Import #
#----------------------------------------------------------------------#
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
from datetime import datetime
import csv
from os import curdir, sep, path
from shutil import copyfile
#----------------------------------------------------------------------#
# Configuration #
#----------------------------------------------------------------------#
# Log file
LOG_FILE = r'events.log'
# Master key
MASTER_KEY = "jQw5xZVq9Kp4fm7hiZko"
# All the allowed keys
KEYS = ["q67idhrJ56oQj7IElukH",
MASTER_KEY]
#----------------------------------------------------------------------#
# Classes #
#----------------------------------------------------------------------#
class S(BaseHTTPRequestHandler):
def prepare_for_html_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
# datetime object containing current date and time
now = datetime.now()
print("now =", now)
# dd/mm/YY H:M:S
time_stamp = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", time_stamp)
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
self.prepare_for_html_response()
#self.wfile.write("<html><head><title>Title goes here.</title></head>")
#self.wfile.write("<body><p>This is a test.</p>")
#self.wfile.write("<p>You accessed path: %s</p>" % self.path)
#self.wfile.write("</body></html>")
# self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
# Return HTML,CSV or LOG file if requested
if self.path.endswith(".html") or self.path.endswith(".csv") or self.path.endswith(".log") or \
self.path.endswith(".js") or self.path.endswith(".css"):
f_path = curdir + sep + self.path
if not path.exists(f_path):
# Requested file doesn't exists
self.wfile.write("Request file does not exist!".encode('utf-8'))
else:
#Open the static HTML file requested and send it
f = open(f_path,'rb')
self.wfile.write(f.read())
f.close()
# Nothing more to do
return;
# Otherwise try to log the event for given key
received_key = str(self.path)[1:]
isKeyValid = False
for key in KEYS:
if key == received_key:
self.wfile.write("Valid key! Logging event to a output file!".encode('utf-8'))
isKeyValid = True
# If master key is received, logger file is replaced with new one
if received_key == MASTER_KEY:
method_to_log = 'w'
# Back the logger file
copyfile(LOG_FILE, LOG_FILE + ".backup")
else:
method_to_log = 'a'
# Logging an event to CSV
with open(LOG_FILE, method_to_log, newline='\n') as f:
writer = csv.writer(f)
if method_to_log == 'w':
writer.writerow(["Timestamp", "Key"])
writer.writerow([time_stamp, received_key])
if not isKeyValid:
# No valid key had been received
self.wfile.write("Unknown key! Nothing to do!".encode('utf-8'))
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
str(self.path), str(self.headers), post_data.decode('utf-8'))
self.prepare_for_html_response()
self.wfile.write("POST request for {}".format(self.path).encode('utf-8'))
#----------------------------------------------------------------------#
# Functions #
#----------------------------------------------------------------------#
def run(server_class=HTTPServer, handler_class=S, port=8080):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
#----------------------------------------------------------------------#
# Main #
#----------------------------------------------------------------------#
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run() | 37.972973 | 103 | 0.477224 |
530b33c70a164edb1ac37cce97a40edf0d370f7e | 4,231 | py | Python | jobs/pipeline.py | soyelherein/pyspark-tdd-template | 077746d9696efa5ec57c29000fbd17007cfbc107 | [
"Apache-2.0"
] | 1 | 2020-08-17T21:56:49.000Z | 2020-08-17T21:56:49.000Z | jobs/pipeline.py | soyelherein/pyspark-tdd-template | 077746d9696efa5ec57c29000fbd17007cfbc107 | [
"Apache-2.0"
] | null | null | null | jobs/pipeline.py | soyelherein/pyspark-tdd-template | 077746d9696efa5ec57c29000fbd17007cfbc107 | [
"Apache-2.0"
] | 3 | 2020-09-23T15:41:05.000Z | 2022-01-05T03:14:58.000Z | # Copyright 2020 soyel.alam@ucdconnect.ie
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, concat_ws, lit, coalesce
from typing import Dict, Tuple
from ddl import schema
def extract(spark: SparkSession, config: Dict, logger) -> Tuple[DataFrame, DataFrame]:
"""Read incremental file and historical data and return as DataFrames
:param spark: Spark session object.
:type spark: SparkSession
:param config: job configuration
:type config: Dict
:param logger: Py4j Logger
:rtype logger: Py4j.Logger
:return: Spark DataFrames.
:rtype: DataFrame
"""
inc_df: DataFrame = spark.read.load(path=config['page_views'], format='csv', header=True, schema=schema.page_views)
prev_df: DataFrame = spark.read.table(tableName=config['user_pageviews'])
return inc_df, prev_df
def transform(inc_df: DataFrame, prev_df: DataFrame, config: Dict, logger) -> DataFrame:
"""Transform the data for final loading.
:param inc_df: Incremental DataFrame.
:type inc_df: DataFrame
:param prev_df: Final DataFrame.
:type prev_df: DataFrame
:param config: job configuration
:type config: Dict
:param logger: Py4j Logger
:rtype logger: Py4j.Logger
:return: Transformed DataFrame.
:rtype: DataFrame
"""
# calculating the metrics
inc_df: DataFrame = (inc_df.groupBy('email').count().
select(['email',
col('count').alias('page_view'),
lit(config['process_date']).alias('last_active')
])
)
# merging the data with historical records
df_transformed: DataFrame = (inc_df.join(prev_df, inc_df.email == prev_df.email, 'full').
select([coalesce(prev_df.email, inc_df.email).alias('email'),
(coalesce(prev_df.page_view, lit(0)) + coalesce(inc_df.page_view,
lit(0))).alias('page_view'),
coalesce(prev_df.created_date, inc_df.last_active).cast('date').alias(
'created_date'),
coalesce(inc_df.last_active, prev_df.last_active).cast('date').alias(
'last_active')
])
)
return df_transformed
def load(df: DataFrame, config: Dict, logger) -> bool:
"""Write data in final destination
:param df: DataFrame to save.
:type df: DataFrame
:param config: job configuration
:type config: Dict
:param logger: Py4j Logger
:rtype logger: Py4j.Logger
:return: True
:rtype: bool
"""
df.write.save(path=config['output_path'], mode='overwrite')
return True
def run(spark: SparkSession, config: Dict, logger) -> bool:
"""
Entry point to the pipeline
:param spark: SparkSession object
:type spark: SparkSession
:param config: job configurations and command lines
:param logger: Log4j Logger
:type logger: Log4j.Logger
:type config: Dict
:return: True
:rtype: bool
"""
logger.warn('pipeline is starting')
# execute the pipeline
inc_data, prev_data = extract(spark=spark, config=config, logger=logger)
transformed_data = transform(inc_df=inc_data, prev_df=prev_data, config=config, logger=logger)
load(df=transformed_data, config=config, logger=logger)
logger.warn('pipeline is complete')
return True
| 36.791304 | 119 | 0.620421 |
4f5f14932f615712666d24be6e16d19b9f92b292 | 70 | py | Python | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic unit of 2nd hyperpolarizability.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic unit of 2nd hyperpolarizability.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic unit of 2nd hyperpolarizability.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | constants.physical_constants["atomic unit of 2nd hyperpolarizability"] | 70 | 70 | 0.871429 |
76c564e4266327921bfd7eddf3a9a363ddc18815 | 14,504 | py | Python | transform_binary_payload/src-payload-decoders/python/sentrius_rs1xx.py | arnd/aws-iot-core-lorawan | 945b7ceea07a17525cfdf15420a573a250fe1149 | [
"MIT-0"
] | 54 | 2020-12-15T21:57:58.000Z | 2022-03-27T14:05:14.000Z | transform_binary_payload/src-payload-decoders/python/sentrius_rs1xx.py | arnd/aws-iot-core-lorawan | 945b7ceea07a17525cfdf15420a573a250fe1149 | [
"MIT-0"
] | 20 | 2020-12-16T19:09:02.000Z | 2022-03-05T13:28:51.000Z | transform_binary_payload/src-payload-decoders/python/sentrius_rs1xx.py | arnd/aws-iot-core-lorawan | 945b7ceea07a17525cfdf15420a573a250fe1149 | [
"MIT-0"
] | 25 | 2020-12-16T01:18:22.000Z | 2022-03-04T12:05:24.000Z | # Copyright IoT Systems GmbH (www.iot-systems.at). All Rights Reserved.
# Affiliate of KaWa commerce GmbH, AWS Consulting Partner (www.kawa-commerce.com)
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Payload definition can be found here
# https://www.lairdconnect.com/documentation/application-note-rs1xx-lora-protocol
import base64
import json
import helpers
# DEBUG MODE
DEBUG_OUTPUT = False
# Send Temp RH Data Notification
# | byte | bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 |
# |------|--------|--------|------|------|------|------|------|------|
# | 0 | MsgType - uint8_t |
# |------|-----------------------------------------------------------|
# | 1 | Options - bitfield |
# |------|-----------------------------------------------------------|
# | 2 | Humidity Fractional - uint8_t |
# |------|-----------------------------------------------------------|
# | 3 | Humidity Integer - uint8_t |
# |------|-----------------------------------------------------------|
# | 4 | Temp Fractional - int8_t |
# |------|-----------------------------------------------------------|
# | 5 | Temp Integer - int8_t |
# |------|-----------------------------------------------------------|
# | 6 | Battery Capacity - uint8_t |
# |------|-----------------------------------------------------------|
# | 7 | AlarmMsg Count - uint16_t |
# | 8 | |
# |------|-----------------------------------------------------------|
# | 9 | BacklogMsg Count - uint8_t |
# | 10 | |
# Send FW Version Notification
# | byte | bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 |
# |------|--------|--------|------|------|------|------|------|------|
# | 0 | MsgType - uint8_t |
# |------|-----------------------------------------------------------|
# | 1 | Options - bitfield |
# |------|-----------------------------------------------------------|
# | 2 | Year - 0x00 |
# |------|-----------------------------------------------------------|
# | 3 | Month - 0x00 |
# |------|-----------------------------------------------------------|
# | 4 | Day - 0x00 |
# |------|-----------------------------------------------------------|
# | 5 | Version Major - 0x00 |
# |------|-----------------------------------------------------------|
# | 6 | Version Minor - 0x00 |
# |------|-----------------------------------------------------------|
# | 7 | Part Number - uint32_t |
# | 8 | |
# | 9 | |
# | 10 | |
# Send Battery Voltage
# | byte | bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 |
# |------|--------|--------|------|------|------|------|------|------|
# | 0 | MsgType - uint8_t |
# |------|-----------------------------------------------------------|
# | 1 | Options - enum |
# |------|-----------------------------------------------------------|
# | 2 | Voltage Fractional - int8_t |
# |------|-----------------------------------------------------------|
# | 3 | Voltage Integer - int8_t |
# Send RTD Data Notification
# | byte | bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 |
# |------|--------|--------|------|------|------|------|------|------|
# | 0 | MsgType - uint8_t |
# |------|-----------------------------------------------------------|
# | 1 | Options - bitfield |
# |------|-----------------------------------------------------------|
# | 2 | Temp Fractional - int16_t |
# | 3 | |
# |------|-----------------------------------------------------------|
# | 4 | Temp Integer - int16_t |
# | 5 | |
# |------|-----------------------------------------------------------|
# | 6 | Battery Capacity - uint8_t |
# |------|-----------------------------------------------------------|
# | 7 | AlarmMsg Count - uint16_t |
# | 8 | |
# |------|-----------------------------------------------------------|
# | 9 | BacklogMsg Count - uint16_t |
# | 10 | |
def dict_from_payload(base64_input: str, fport: int = None):
""" Decodes a base64-encoded binary payload into JSON.
Parameters
----------
base64_input : str
Base64-encoded binary payload
fport: int
FPort as provided in the metadata. Please note the fport is optional and can have value "None", if not
provided by the LNS or invoking function.
If fport is None and binary decoder can not proceed because of that, it should should raise an exception.
Returns
-------
JSON object with key/value pairs of decoded attributes
"""
decoded = base64.b64decode(base64_input)
# Printing the debug output
if DEBUG_OUTPUT:
print(f"Input: {decoded.hex().upper()}")
# when payload is available
if len(decoded):
# Type of message
msg_type = decoded[0]
# Dispatch on the message type
if msg_type == 0x01:
result = decode_temp_rh_data(decoded)
elif msg_type == 0x07:
result = decode_fw_version(decoded)
elif msg_type == 0x0A:
result = decode_battery_voltage(decoded)
elif msg_type == 0x0B:
result = decode_rtd_data(decoded)
else:
raise Exception(f"Message type {msg_type} not implemented")
if DEBUG_OUTPUT:
print(f"Output: {json.dumps(result, indent=2)}")
return result
def decode_temp_rh_data(decoded):
# Dict for result
result = {
"msg_type": "SendTempRHData",
"options": opt_sens2serv(decoded[1]),
"humidity": helpers.bytes_to_float(decoded, 2, 2),
"temperature": helpers.bytes_to_float(decoded, 4, 2),
"battery_capacity": battery_capacity(decoded[6]),
"alarm_msg_count": decoded[7] << 8 | decoded[8], # Number of backlog alarm messages in sensor FLASH
"backlog_msg_count": decoded[9] << 8 | decoded[10] # Number of backlog non-alarm messages in sensor FLASH
}
if DEBUG_OUTPUT:
print(f"Output: {json.dumps(result, indent=2)}")
return result
def decode_fw_version(decoded):
# Dict for result
result = {
"msg_type": "SendFWVersion",
"options": opt_sens2serv(decoded[1]),
"year": decoded[2],
"month": decoded[3],
"day": decoded[4],
"version_major": decoded[5],
"version_minor": decoded[6],
"part_number": decoded[7] << 24 | decoded[8] << 16 | decoded[9] << 8 | decoded[10]
}
if DEBUG_OUTPUT:
print(f"Output: {json.dumps(result, indent=2)}")
return result
def decode_battery_voltage(decoded):
# Dict for result
result = {
"msg_type": "SendBatteryVoltage",
"options": opt_sens2serv(decoded[1]),
"voltage": helpers.bytes_to_float(decoded, 2, 2)
}
if DEBUG_OUTPUT:
print(f"Output: {json.dumps(result, indent=2)}")
return result
# SendRTDData
def decode_rtd_data(decoded):
# Dict for result
result = {
"msg_type": "SendRTDData",
"options": opt_sens2serv(decoded[1]),
"temperature": helpers.bytes_to_float(decoded, 2, 4),
"battery_capacity": battery_capacity(decoded[6]),
"alarm_msg_count": decoded[7] << 8 | decoded[8], # Number of backlog alarm messages in sensor FLASH
"backlog_msg_count": decoded[9] << 8 | decoded[10] # Number of backlog non-alarm messages in sensor FLASH
}
if DEBUG_OUTPUT:
print(f"Output: {json.dumps(result, indent=2)}")
return result
# Returns battery capacity as int
def battery_capacity(bat_byte):
# Index for percentage of battery capacity remaining
if bat_byte == 0:
return 0 # 0-5%
elif bat_byte == 1:
return 5 # 5-20%
elif bat_byte == 2:
return 20 # 20-40%
elif bat_byte == 3:
return 40 # 40-60%
elif bat_byte == 4:
return 60 # 60-80%
elif bat_byte == 5:
return 80 # 80-100%
else:
return 999 # unsupported value
# results option flag
def opt_sens2serv(opt_byte):
if helpers.is_single_bit_set(opt_byte):
# Sensor to server message options
if (opt_byte & 0b00000001) == 0b1:
return "Sensor request for server time"
elif ((opt_byte & 0b00000010) >> 1) == 0b1:
return "Sensor configuration error"
elif ((opt_byte & 0b00000100) >> 2) == 0b1:
return "Sensor alarm flag"
elif ((opt_byte & 0b00001000) >> 3) == 0b1:
return "Sensor reset flag"
elif ((opt_byte & 0b00010000) >> 4) == 0b1:
return "Sensor fault flag"
else:
return "Undefined option"
else:
return "Undefined option"
# Tests
if __name__ == "__main__":
test_definition = [
{
"input_encoding": "hex",
"input_value": "01001E0141190200000000",
"output": {
"msg_type": "SendTempRHData",
"options": "Undefined option",
"humidity": 1.3,
"temperature": 25.65,
"battery_capacity": 20,
"alarm_msg_count": 0,
"backlog_msg_count": 0
}
},
{
"input_encoding": "base64",
"input_value": "BwkUAxoGAABJPnI=",
"output": {
"msg_type": "SendFWVersion",
"options": "Undefined option",
"year": 20,
"month": 3,
"day": 26,
"version_major": 6,
"version_minor": 0,
"part_number": 4800114
}
},
{
"input_encoding": "hex",
"input_value": "0A000A03",
"output": {
"msg_type": "SendBatteryVoltage",
"options": "Undefined option",
"voltage": 3.1
}
},
{
"input_encoding": "base64",
"input_value": "CxEAAAAABAAAAAA=",
"output": {
"msg_type": "SendRTDData",
"options": "Undefined option",
"temperature": 0.0,
"battery_capacity": 60,
"alarm_msg_count": 0,
"backlog_msg_count": 0
}
},
{
"input_encoding": "hex",
"input_value": "07 00 00 01 01 00 00 00 49 3E 6F",
"output": {
"msg_type": "SendFWVersion",
"options": "Undefined option",
"year": 0,
"month": 1,
"day": 1,
"version_major": 0,
"version_minor": 0,
"part_number": 4800111
}
},
{
"input_encoding": "hex",
"input_value": "0B 01 00 00 00 10 02 00 00 00 00",
"output": {
"msg_type": "SendRTDData",
"options": "Sensor request for server time",
"temperature": 16.0,
"battery_capacity": 20,
"alarm_msg_count": 0,
"backlog_msg_count": 0
}
},
]
for testcase in test_definition:
base64_input = None
if testcase.get("input_encoding") == "base64":
base64_input = testcase.get("input_value")
elif testcase.get("input_encoding") == "hex":
base64_input = base64.b64encode(
bytearray.fromhex(testcase.get("input_value"))).decode("utf-8")
output = dict_from_payload(base64_input)
for key in testcase.get("output"):
if testcase.get("output").get(key) != output.get(key):
raise Exception(
f'Assertion failed for input {testcase.get("input_value")}, key {key}, expected {testcase.get("output").get(key)}, got {output.get(key)}')
else:
print(
f'"{testcase.get("input_value")}" : Successful test for key "{key}", value "{testcase.get("output").get(key)}"')
| 40.971751 | 158 | 0.420091 |
2a37225b1ff1ac3d4951af1db0bbb18091eaf7a8 | 2,694 | py | Python | intent_classification/seed_transfer.py | YilunZhou/optimal-active-learning | 9ddd822377c42acb345681fcd638f912e8123c53 | [
"MIT"
] | 10 | 2021-01-30T21:48:34.000Z | 2022-03-18T10:39:47.000Z | intent_classification/seed_transfer.py | YilunZhou/optimal-active-learning | 9ddd822377c42acb345681fcd638f912e8123c53 | [
"MIT"
] | null | null | null | intent_classification/seed_transfer.py | YilunZhou/optimal-active-learning | 9ddd822377c42acb345681fcd638f912e8123c53 | [
"MIT"
] | 1 | 2022-02-12T11:48:00.000Z | 2022-02-12T11:48:00.000Z |
import shelve, argparse, os
import numpy as np
import matplotlib.pyplot as plt
import help_text as ht
def main(model='lstm', model_seeds=[0, 1, 2, 3, 4], domain='alarm', data_seed=0,
batchsize=20, max_epoch=100, patience=20, tot_acq=160):
qualities = np.zeros((len(model_seeds), len(model_seeds)))
quality_gaps = np.zeros((len(model_seeds), len(model_seeds)))
with shelve.open('statistics/perf_curves.shv') as curves:
for i, m_to in enumerate(model_seeds):
for j, m_from in enumerate(model_seeds):
spec_trans = f'{model} {m_from} {m_to} {domain} {data_seed} {batchsize} {max_epoch} { patience} {tot_acq}'
spec_native = f'{model} {m_to} {m_to} {domain} {data_seed} {batchsize} {max_epoch} { patience} {tot_acq}'
qualities[i, j] = np.mean(curves[spec_trans]['test'])
quality_gaps[i, j] = np.mean(curves[spec_trans]['test']) - np.mean(curves[spec_native]['test'])
plt.imshow(quality_gaps, cmap='coolwarm', vmin=-abs(quality_gaps).max(), vmax=abs(quality_gaps).max())
for i in range(len(model_seeds)):
for j in range(len(model_seeds)):
plt.annotate(f'{qualities[i, j]:0.3f}', xy=(j, i), ha='center', va='center', fontsize=13)
rect = plt.Rectangle((i - 0.5, j - 0.5), 1, 1, fill=False, linewidth=1)
plt.gca().add_patch(rect)
# plt.colorbar()
plt.xticks(range(len(model_seeds)), [str(s) if s != -1 else 'rand' for s in model_seeds], fontsize=13)
plt.yticks(range(len(model_seeds)), [str(s) if s != -1 else 'rand' for s in model_seeds], fontsize=13)
plt.xlabel('Source Seed')
plt.ylabel('Target Seed')
plt.title('Intent Classification', fontsize=13)
plt.savefig('../figures/intent_classification/seed_transfer.pdf', bbox_inches='tight')
def main_cli():
parser = argparse.ArgumentParser(description='Plot seed transfer quality matrix. ')
parser.add_argument('--model', type=str, default='lstm', help=ht.model)
parser.add_argument('--model-seeds', type=int, nargs='+', default=[0, 1, 2, 3, 4], help=ht.model_seeds)
parser.add_argument('--domain', type=str, default='alarm', help=ht.domain)
parser.add_argument('--data-seed', type=int, default=0, help=ht.data_seed)
parser.add_argument('--tot-acq', type=int, default=160, help=ht.tot_acq)
parser.add_argument('--batchsize', type=int, default=20, help=ht.batchsize)
parser.add_argument('--max-epoch', type=int, default=100, help=ht.max_epoch)
parser.add_argument('--patience', type=int, default=20, help=ht.patience)
args = parser.parse_args()
print(args)
main(**vars(args))
if __name__ == '__main__':
main_cli()
| 54.979592 | 122 | 0.659614 |
ee76ba7d6ef78d57b3a0b84f8840795756b7af8e | 95 | py | Python | DeepML/classifiers/layers.py | deepraj1729/DeepML | 5ecd57d370cac3252b000fc9e9ff2e6f0c965a1b | [
"MIT"
] | 9 | 2020-06-30T12:51:50.000Z | 2021-09-22T16:39:49.000Z | DeepML/classifiers/layers.py | deepraj1729/DeepML | 5ecd57d370cac3252b000fc9e9ff2e6f0c965a1b | [
"MIT"
] | null | null | null | DeepML/classifiers/layers.py | deepraj1729/DeepML | 5ecd57d370cac3252b000fc9e9ff2e6f0c965a1b | [
"MIT"
] | null | null | null | def Dense(input_layers =4):
pass
def Flatten():
pass
def Input(input_shape):
pass | 11.875 | 27 | 0.652632 |
2694eb6b4d4e3895368180fa607af5502e6fe0ce | 250 | py | Python | Django/DjangoT1.11_LTS/28_Latihan Blog + Name + Namespace/mywebsite/urls.py | Akhadafi/WEB-Framework | 4547a682ac1f007aa6f97512baf76b92ef1c9b9a | [
"MIT"
] | null | null | null | Django/DjangoT1.11_LTS/28_Latihan Blog + Name + Namespace/mywebsite/urls.py | Akhadafi/WEB-Framework | 4547a682ac1f007aa6f97512baf76b92ef1c9b9a | [
"MIT"
] | null | null | null | Django/DjangoT1.11_LTS/28_Latihan Blog + Name + Namespace/mywebsite/urls.py | Akhadafi/WEB-Framework | 4547a682ac1f007aa6f97512baf76b92ef1c9b9a | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.index, name = "index"),
url(r'^blog/',include('blog.urls', namespace="blog")),
url(r'^admin/',admin.site.urls),
]
| 22.727273 | 55 | 0.676 |
bbc4dabff550d360b182965292b575886a0fb7fd | 19,922 | py | Python | ansible/environments/prod/gce.py | Otus-DevOps-2018-05/donasktello_infra | d448ff79a1d161f34273ceb319ee02abbef7eeae | [
"MIT"
] | null | null | null | ansible/environments/prod/gce.py | Otus-DevOps-2018-05/donasktello_infra | d448ff79a1d161f34273ceb319ee02abbef7eeae | [
"MIT"
] | 3 | 2018-07-03T15:15:22.000Z | 2018-10-08T23:03:38.000Z | ansible/old/gce.py | Otus-DevOps-2018-05/lushndm_infra | c481830c051d3a3205f6ab7e070b8ba354af7b75 | [
"MIT"
] | 2 | 2018-12-06T12:41:53.000Z | 2018-12-06T12:55:44.000Z | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
''''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
John Roach <johnroach1985@gmail.com>
Version: 0.0.4
'''
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.drivers = self.get_gce_drivers()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = configparser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'instance_tags': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Set the instance_tags filter, env var overrides config from file
# and cli param overrides all
if self.args.instance_tags:
self.instance_tags = self.args.instance_tags
else:
self.instance_tags = os.environ.get(
'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
if self.instance_tags:
self.instance_tags = self.instance_tags.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_drivers(self):
"""Determine the GCE authorization settings and return a list of
libcloud drivers.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
gce_drivers = []
projects = kwargs['project'].split(',')
for project in projects:
kwargs['project'] = project
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
gce_drivers.append(gce)
return gce_drivers
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--instance-tags', action='store',
help='Only include instances with this tags, separated by comma')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
for driver in self.drivers:
driver.connection.gce_params = params
all_nodes.extend(driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
# This check filters on the desired instance tags defined in the
# config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
# or as the cli param --instance-tags.
#
# If the instance_tags list is _empty_ then _ALL_ instances are returned.
#
# If the instance_tags list is _populated_ then check the current
# instance tags against the instance_tags list. If the instance has
# at least one tag from the instance_tags list, it is returned.
if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()
| 36.892593 | 100 | 0.604206 |
ddb506ffffe5aff9bc92a0e5e3a7922306dd48e2 | 428 | py | Python | dojo/tests.py | bsomhz/tiesheet | d05fbcdbf43d20b05416b2f433f8c65a3d15118b | [
"BSD-3-Clause"
] | null | null | null | dojo/tests.py | bsomhz/tiesheet | d05fbcdbf43d20b05416b2f433f8c65a3d15118b | [
"BSD-3-Clause"
] | null | null | null | dojo/tests.py | bsomhz/tiesheet | d05fbcdbf43d20b05416b2f433f8c65a3d15118b | [
"BSD-3-Clause"
] | null | null | null | from dojo.models import Operator
from django.test import TestCase
# Create your tests here.
class UnitTestCase(TestCase):
def test_home_page_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'dojo/dojo.html')
def test_operator(self):
operator = Operator()
operator.first_name = 'Bikash'
operator.last_name = 'Maharjan'
operator.save()
| 23.777778 | 59 | 0.67757 |
c11bb7c3b8dcd2a58f82538520d805f9f7bcaa90 | 2,412 | py | Python | a10sdk/core/gslb/gslb_protocol_limit.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/gslb/gslb_protocol_limit.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/gslb/gslb_protocol_limit.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class Limit(A10BaseClass):
"""Class Description::
Specify limit for GSLB Message Protocol.
Class limit supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param ardt_response: {"description": "Response Messages of Active RDT, default is 1000 (Number)", "format": "number", "default": 1000, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param conn_response: {"description": "Response Messages of Connection Load, default is no limit (Number)", "format": "number", "default": 0, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param ardt_session: {"description": "Sessions of Active RDT, default is 32768 (Number)", "format": "number", "default": 32768, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param ardt_query: {"description": "Query Messages of Active RDT, default is 200 (Number)", "format": "number", "default": 200, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param message: {"description": "Amount of Messages, default is 10000 (Number)", "format": "number", "default": 10000, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param response: {"description": "Amount of Response Messages, default is 3600 (Number)", "format": "number", "default": 3600, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/gslb/protocol/limit`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "limit"
self.a10_url="/axapi/v3/gslb/protocol/limit"
self.DeviceProxy = ""
self.ardt_response = ""
self.uuid = ""
self.conn_response = ""
self.ardt_session = ""
self.ardt_query = ""
self.message = ""
self.response = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 50.25 | 215 | 0.643035 |
f4aafd55f7f64f9a453575cb8b6222b1cedf7db0 | 5,046 | py | Python | sciwx/plugins/histogram.py | dada1437903138/imagepy | 65d9ce088894eef587054e04018f9d34ff65084f | [
"BSD-4-Clause"
] | null | null | null | sciwx/plugins/histogram.py | dada1437903138/imagepy | 65d9ce088894eef587054e04018f9d34ff65084f | [
"BSD-4-Clause"
] | null | null | null | sciwx/plugins/histogram.py | dada1437903138/imagepy | 65d9ce088894eef587054e04018f9d34ff65084f | [
"BSD-4-Clause"
] | null | null | null | from ..widgets import HistPanel, CMapPanel, FloatSlider, CMapSelCtrl
import wx, numpy as np
from sciapp import Source
class Histogram( wx.Panel ):
title = 'Histogram Widget'
def __init__( self, parent, app):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 255,0 ), style = wx.TAB_TRAVERSAL )
self.app = app
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.histpan = HistPanel(self)
bSizer1.Add(self.histpan, 0, wx.ALL|wx.EXPAND, 5 )
self.sli_high = FloatSlider(self, (0,255), 0, '')
self.sli_high.SetValue(255)
bSizer1.Add( self.sli_high, 0, wx.ALL|wx.EXPAND, 0 )
self.sli_low = FloatSlider(self, (0,255), 0, '')
self.sli_low.SetValue(0)
bSizer1.Add( self.sli_low, 0, wx.ALL|wx.EXPAND, 0 )
bSizer2 = wx.BoxSizer( wx.HORIZONTAL )
self.btn_8bit = wx.Button( self, wx.ID_ANY, u"0-255", wx.DefaultPosition, wx.Size( -1,-1 ), wx.BU_EXACTFIT )
self.btn_8bit.SetMaxSize( wx.Size( -1,40 ) )
bSizer2.Add( self.btn_8bit, 0, wx.ALIGN_CENTER|wx.ALL, 0 )
bSizer2.AddStretchSpacer(prop=1)
self.btn_minmax = wx.Button( self, wx.ID_ANY, u"min-max", wx.DefaultPosition, wx.Size( -1,-1 ), wx.BU_EXACTFIT )
self.btn_minmax.SetMaxSize( wx.Size( -1,40 ) )
bSizer2.Add( self.btn_minmax, 0, wx.ALIGN_CENTER|wx.ALL, 0 )
bSizer2.AddStretchSpacer(prop=1)
self.btn_slice = wx.Button( self, wx.ID_ANY, u"slice", wx.DefaultPosition, wx.Size( -1,-1 ), wx.BU_EXACTFIT )
self.btn_slice.SetMaxSize( wx.Size( -1,40 ) )
bSizer2.Add( self.btn_slice, 0, wx.ALIGN_CENTER|wx.ALL, 0 )
bSizer2.AddStretchSpacer(prop=1)
self.btn_stack = wx.Button( self, wx.ID_ANY, u"stack", wx.DefaultPosition, wx.Size( -1,-1 ), wx.BU_EXACTFIT )
self.btn_stack.SetMaxSize( wx.Size( -1,40 ) )
bSizer2.Add( self.btn_stack, 0, wx.ALIGN_CENTER|wx.ALL, 0 )
bSizer1.Add( bSizer2, 0, wx.EXPAND |wx.ALL, 5 )
self.cmapsel = CMapSelCtrl(self)
bSizer1.Add(self.cmapsel, 0, wx.ALL|wx.EXPAND, 5 )
self.cmap = CMapPanel(self)
bSizer1.Add(self.cmap, 0, wx.ALL|wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
# Connect Events
self.sli_high.Bind( wx.EVT_SCROLL, self.on_low )
self.sli_low.Bind( wx.EVT_SCROLL, self.on_high )
self.btn_8bit.Bind( wx.EVT_BUTTON, self.on_8bit )
self.btn_minmax.Bind( wx.EVT_BUTTON, self.on_minmax )
self.btn_slice.Bind( wx.EVT_BUTTON, self.on_slice )
self.btn_stack.Bind( wx.EVT_BUTTON, self.on_stack )
self.cmap.set_handle(self.on_cmap)
self.cmapsel.Bind(wx.EVT_COMBOBOX, self.on_cmapsel)
self.range = (0, 255)
def on_cmap(self):
ips = self.app.get_img()
if ips is None: return
cmap = CMapPanel.linear_color(self.cmap.GetValue())
ips.lut = cmap
ips.update()
def on_cmapsel(self, event):
ips = self.app.get_img()
if ips is None: return
key = self.cmapsel.GetSelection()
ips.lut = self.cmapsel.vs[key]
ips.update()
# Virtual event handlers, overide them in your derived class
def on_low( self, event ):
ips = self.app.get_img()
if ips is None: return
if self.sli_high.GetValue()<self.sli_low.GetValue():
self.sli_high.SetValue(self.sli_low.GetValue())
ips.range = (self.sli_low.GetValue(), self.sli_high.GetValue())
ips.chan_rg = ips.range
lim1 = 1.0 * (self.sli_low.GetValue() - self.range[0])/(self.range[1]-self.range[0])
lim2 = 1.0 * (self.sli_high.GetValue() - self.range[0])/(self.range[1]-self.range[0])
self.histpan.set_lim(lim1*255, lim2*255)
ips.update()
def on_high( self, event ):
ips = self.app.get_img()
if ips is None: return
if self.sli_low.GetValue()>self.sli_high.GetValue():
self.sli_low.SetValue(self.sli_high.GetValue())
ips.range = (self.sli_low.GetValue(), self.sli_high.GetValue())
ips.chan_rg = ips.range
lim1 = 1.0 * (self.sli_low.GetValue() - self.range[0])/(self.range[1]-self.range[0])
lim2 = 1.0 * (self.sli_high.GetValue() - self.range[0])/(self.range[1]-self.range[0])
self.histpan.set_lim(lim1*255, lim2*255)
ips.update()
def on_8bit( self, event ):
ips = self.app.get_img()
if ips is None: return
self.range = ips.range = (0,255)
hist = ips.histogram()
self.histpan.SetValue(hist)
self.sli_low.set_para((0,255), 0)
self.sli_high.set_para((0,255), 0)
self.sli_low.SetValue(0)
self.sli_high.SetValue(255)
self.histpan.set_lim(0,255)
ips.update()
def on_minmax( self, event ):
ips = self.app.get_img()
if ips is None: return
minv, maxv = ips.get_updown()[0]
self.range = ips.range = (minv, maxv)
hist = ips.histogram()
self.histpan.SetValue(hist)
self.sli_low.set_para(self.range, 10)
self.sli_high.set_para(self.range, 10)
self.sli_low.SetValue(minv)
self.sli_high.SetValue(maxv)
self.histpan.set_lim(0,255)
ips.update()
def on_slice( self, event ):
ips = self.app.get_img()
if ips is None: return
hist = ips.histogram()
self.histpan.SetValue(hist)
def on_stack( self, event ):
ips = self.app.get_img()
if ips is None: return
hists = ips.histogram(slices='all', chans='all', step=512)
self.histpan.SetValue(hists) | 32.980392 | 129 | 0.686286 |
dadcb418fc94e8a4444e8d4af2a70ad3756237f1 | 30,080 | py | Python | patient/migrations/0019_auto__del_field_obstetrichistory_list_previous_obstetric_history__del_.py | aazhbd/medical_info01 | b08961089e6a7cdc567f879ab794e45067171418 | [
"BSD-3-Clause"
] | 1 | 2017-03-25T05:19:42.000Z | 2017-03-25T05:19:42.000Z | patient/migrations/0019_auto__del_field_obstetrichistory_list_previous_obstetric_history__del_.py | aazhbd/medical_info01 | b08961089e6a7cdc567f879ab794e45067171418 | [
"BSD-3-Clause"
] | null | null | null | patient/migrations/0019_auto__del_field_obstetrichistory_list_previous_obstetric_history__del_.py | aazhbd/medical_info01 | b08961089e6a7cdc567f879ab794e45067171418 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ObstetricHistory.list_previous_obstetric_history'
db.delete_column(u'patient_obstetrichistory', 'list_previous_obstetric_history_id')
# Deleting field 'GynaecologicalHistory.previous_surgery'
db.delete_column(u'patient_gynaecologicalhistory', 'previous_surgery_id')
def backwards(self, orm):
# Adding field 'ObstetricHistory.list_previous_obstetric_history'
db.add_column(u'patient_obstetrichistory', 'list_previous_obstetric_history',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['patient.PreviousObstetricHistory']),
keep_default=False)
# Adding field 'GynaecologicalHistory.previous_surgery'
db.add_column(u'patient_gynaecologicalhistory', 'previous_surgery',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['patient.PreviousSurgery']),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_injection': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient'] | 82.865014 | 187 | 0.581117 |
fcdfca03f9200fc10b3b7c401cea796a4eae0907 | 6,597 | py | Python | scripts/src/demo_mockchain.py | ltfschoen/pyethfinality | 6a1f3c73fd3f0cab1e4b6d1f2a902100931a17d1 | [
"BSD-3-Clause"
] | 1 | 2018-07-01T03:52:20.000Z | 2018-07-01T03:52:20.000Z | scripts/src/demo_mockchain.py | ltfschoen/pyethfinality | 6a1f3c73fd3f0cab1e4b6d1f2a902100931a17d1 | [
"BSD-3-Clause"
] | 8 | 2017-11-28T08:24:22.000Z | 2017-12-01T20:46:45.000Z | scripts/src/demo_mockchain.py | ltfschoen/pyethfinality | 6a1f3c73fd3f0cab1e4b6d1f2a902100931a17d1 | [
"BSD-3-Clause"
] | null | null | null | # Reference: https://gist.github.com/heikoheiko/a84b05c78d2971c26f2d3e3c49ec8d83
# Converted to Python 3 by Luke Schoen
import collections
import random
import json
import hashlib
import math
from datetime import datetime
def hexhash(x):
"""
Account address for given index
:param x:int
:return: address:string
"""
# References:
# - https://docs.python.org/3/library/hashlib.html
hash_str = str(x)
# encode the Unicode string into Bytes
hash_bytes = hash_str.encode('utf-8')
# Constructor for secure hash algorithm SHA224
hash_sha = hashlib.sha224(hash_bytes)
# Request digest concatenation of input strings fed so far
hash_sha_digest = hash_sha.hexdigest()[:6]
# print('hash sha224 digest: {}'.format(hash_sha.hexdigest()))
address = '0x' + hash_sha_digest
return address
def current_time():
return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
TransferEvent = collections.namedtuple('TransferEvent', 'sender, receiver, amount, created_at')
class Accounts(object):
"""
Generate accounts instances
"""
initial_supply = 0
def __init__(self, num_accounts=0, copy_from=None):
self.balances = dict()
if copy_from:
self.balances = copy_from.balances.copy()
else:
self._gen_accounts(num_accounts)
self.initial_supply = self.supply
def _gen_accounts(self, num):
for i in range(num):
k = hexhash(i)
v = random.randint(1, 100)
self.balances[k] = v
@property
def supply(self):
return sum(self.balances.values())
def median(self):
"""
Median of the list of values stored in balances hash
Reference: https://docs.python.org/3/howto/sorting.html
i.e. balances = [1,5,3,7,6,7] # => [1, 3, 5, 6, 7, 7]
sorted(balances)[int(len(balances)/2)] # => 6
"""
return sorted(self.balances.values())[len(self.balances) / 2]
def transfer(self, sender, receiver, amount):
# Transfer an amount from a sender account to a receiver account
self.balances[sender] -= amount
self.balances[receiver] += amount
assert self.supply == self.initial_supply
def random_transfer(self):
"""
Generate a valid random transfer by:
- Choosing a random sender and receiver.
- Sending a random proportion of the senders balance to the receiver.
"""
while True:
sender = random.choice(list(self.balances.keys()))
if not self.balances[sender]:
continue
receiver = random.choice(list(self.balances.keys()))
if sender == receiver:
continue
amount = random.randint(1, self.balances[sender])
created_at = current_time()
self.transfer(sender, receiver, amount)
return TransferEvent(sender, receiver, amount, created_at)
class Block(object):
def __init__(self, prevblock=None, num_accounts=0):
if not prevblock: # genesis block
self.accounts = Accounts(num_accounts=num_accounts)
self.prevhash = hexhash(-1)
self.number = 0
else:
self.accounts = Accounts(copy_from=prevblock.accounts)
self.number = prevblock.number + 1
self.prevhash = prevblock.hash
self.transfers = []
self.prevblock = prevblock
self.created_at = current_time()
def copy_transfers(self, other, fraction=0.5):
assert isinstance(other, Block)
for t in other.transfers[:int(len(other.transfers) * fraction)]:
self.transfers.append(t)
self.accounts.transfer(t.sender, t.receiver, t.amount, t.created_at)
@property
def hash(self):
return hexhash(repr(self.__dict__))
def random_transfers(self, num):
for i in range(num):
self.transfers.append(self.accounts.random_transfer())
def serialize(self, include_balances=False):
s = dict(number=self.number,
hash=self.hash,
prevhash=self.prevhash,
created_at=self.created_at,
transfers=[dict(x._asdict()) for x in self.transfers]
)
if include_balances or self.number == 0:
s['balances'] = self.accounts.balances
return s
def gen_chain(max_height, p_revert, num_accounts, max_transfers):
head = Block(num_accounts=num_accounts)
chain = [head]
while head.number < max_height:
if head.number > 0 and random.random() < p_revert:
head = head.prevblock
else:
head = Block(prevblock=head)
# check if there is a sibling (same block height)
if len(chain) > 2 and chain[-2].number == head.number:
sibling = chain[-2]
# include some of its txs
head.copy_transfers(sibling, 0.5)
head.random_transfers(random.randint(0, math.ceil(max_transfers / 2)))
else:
head.random_transfers(random.randint(0, max_transfers))
chain.append(head)
return chain
def longest_revert(chain):
highest_chain = 0
longest_revert = 0
for block in chain:
highest_chain = max(highest_chain, block.number)
longest_revert = max(longest_revert, highest_chain - block.number)
return longest_revert
def run():
"""
Generate a blockchain with:
- height - Max. amount of blocks
- p_revert - Probability of the state of the head of the blockchain reverting
to a previous block based on quantity of block confirmations until finalised
and the value of the transaction
- num_accounts - Amount of Accounts to generate in the block
- max_transfers - Amount of transfers allowed between accounts
"""
random.seed(43)
# chain = gen_chain(max_height=10, p_revert=0.5, num_accounts=100, max_transfers=10)
chain = gen_chain(max_height=3, p_revert=0.6, num_accounts=2, max_transfers=1)
serialized_blocks = [b.serialize(include_balances=True) for b in chain]
# random.shuffle(serialized_blocks)
print('json dumps: {}'.format(json.dumps(serialized_blocks, indent=4, sort_keys=True)))
print('blocks: {} max reverted:{}'.format(len(chain), longest_revert(chain)))
txs = []
for block in set(chain):
txs.extend(block.transfers)
print('total transfers:{} unique transfers:{}'.format(len(txs), len(set(txs))))
if __name__ == '__main__':
run() | 34.005155 | 95 | 0.629225 |
79bab41a2997a17f76d28de61fe290a3271a9f85 | 1,892 | py | Python | azure-batch/azure/batch/models/job_schedule_patch_parameter.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | azure-batch/azure/batch/models/job_schedule_patch_parameter.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | azure-batch/azure/batch/models/job_schedule_patch_parameter.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobSchedulePatchParameter(Model):
"""The set of changes to be made to a job schedule.
:param schedule: The schedule according to which jobs will be created. If
you do not specify this element, the existing schedule is left unchanged.
:type schedule: :class:`Schedule <azure.batch.models.Schedule>`
:param job_specification: The details of the jobs to be created on this
schedule. Updates affect only jobs that are started after the update has
taken place. Any currently active job continues with the older
specification.
:type job_specification: :class:`JobSpecification
<azure.batch.models.JobSpecification>`
:param metadata: A list of name-value pairs associated with the job
schedule as metadata. If you do not specify this element, existing
metadata is left unchanged.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
"""
_attribute_map = {
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, schedule=None, job_specification=None, metadata=None):
self.schedule = schedule
self.job_specification = job_specification
self.metadata = metadata
| 43 | 85 | 0.658034 |
f206e8141dcaedeb450ade911e1539fb9a4518bc | 200 | py | Python | coding/learn_pyspark/pyspark_demo.py | yatao91/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | 3 | 2021-05-25T16:58:52.000Z | 2022-02-05T09:37:17.000Z | coding/learn_pyspark/pyspark_demo.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | coding/learn_pyspark/pyspark_demo.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf=conf)
if __name__ == '__main__':
print(sc)
| 20 | 58 | 0.685 |
110bd348a76066fb9b8f7f75040d56ccb412dfe4 | 2,160 | py | Python | src/api/auth/models/others.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/auth/models/others.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/auth/models/others.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.db import models
class OperationStatus:
DISABLED = "disabled"
ACTIVE = "active"
INVISIBLE = "invisible"
class OperationConfig(models.Model):
"""
功能开关
"""
operation_id = models.CharField(primary_key=True, max_length=255)
operation_name = models.CharField(max_length=255)
operation_alias = models.CharField(max_length=255)
status = models.CharField(max_length=255, default=OperationStatus.DISABLED)
description = models.TextField()
users = models.TextField(null=True)
@property
def user_list(self):
if self.users is None:
return []
return [u.strip() for u in self.users.split(",")]
class Meta:
managed = False
app_label = "auth"
db_table = "operation_config"
| 40 | 111 | 0.718981 |
bdb86e7834a64aeb99eff7e7d74cc6334daf2703 | 3,552 | py | Python | pybn_exec.py | Nogarx/PyBN | 46493186a65c796f15312560e5d43a90b090a62d | [
"MIT"
] | 1 | 2021-04-23T15:40:10.000Z | 2021-04-23T15:40:10.000Z | pybn_exec.py | Nogarx/PyBN | 46493186a65c796f15312560e5d43a90b090a62d | [
"MIT"
] | null | null | null | pybn_exec.py | Nogarx/PyBN | 46493186a65c796f15312560e5d43a90b090a62d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pybn.execution as execution
from pybn.networks import FuzzyBooleanNetwork
from pybn.graphs import uniform_graph
from pybn.observers import EntropyObserver, TransitionsObserver
import numpy as np
import argparse
import re
#----------------------------------------------------------------------------
def run(networks, initial_states, num_nodes, steps, transient, base_values):
# Configure experiment.
connectivity_values = np.arange(1.0,5.01,0.1)
observers = [EntropyObserver, TransitionsObserver]
storage_path = '/storage/gershenson_g/mendez/pybn/'
configuration = {
'network': {'class': FuzzyBooleanNetwork},
'graph': {'function': uniform_graph},
'fuzzy': {'conjunction': lambda x,y : min(x,y), 'disjunction': lambda x,y : max(x,y), 'negation': lambda x : 1 - x},
'parameters': {'nodes': num_nodes, 'steps': steps, 'transient': transient},
'summary':{'per_node': True, 'precision': 6},
'execution': {'networks': networks, 'samples': initial_states},
'observers': observers,
'storage_path' : storage_path
}
# Initialize iterator.
iterator = execution.ExecutionIterator()
iterator.register_variable('base', base_values)
iterator.register_variable('k', connectivity_values)
# Dispatch the experiment.
execution.run_experiment(configuration, iterator)
#----------------------------------------------------------------------------
def _parse_num_range(s):
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
_cmdline_help_epilog = '''Example:
# Example of experiment.
python %(prog)s ---networks=1000 --initial_states=1000 --num_nodes=40 --steps=500 --transient=250 --base_values=2,3,4
'''
#----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='Run a PyBN experiment.',
epilog=_cmdline_help_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
group = parser.add_argument_group('General configuration')
group.add_argument('--networks', dest='networks', type=int, help='Number of networks per configuration tuple', default=1000)
group.add_argument('--initial_states', dest='initial_states', type=int, help='Number of initial states per network', default=1000)
group.add_argument('--num_nodes', dest='num_nodes', type=int, help='Number of nodes per network', default=40)
group.add_argument('--steps', dest='steps', type=int, help='Number of steps per network run', default=500)
group.add_argument('--transient', dest='transient', type=int, help='Number of pre-warm steps per network run', default=250)
group.add_argument('--base_values', dest='base_values', type=_parse_num_range, help='List of bases for the experiment', default=[2])
args = parser.parse_args()
try:
run(**vars(args))
except:
print(f'Error: Arguments mismatch.')
exit(1)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 41.302326 | 136 | 0.588682 |
35b7a9f37db695d8601da7acb4cafce29b1e1fba | 121 | py | Python | Modulo 2/Lista06/6.py | BelfortJoao/Programacao-1 | 2d463744379ad3e4b0f5882ad923aae7ff80197a | [
"MIT"
] | 2 | 2021-08-17T14:02:13.000Z | 2021-08-19T02:37:28.000Z | Modulo 2/Lista06/6.py | BelfortJoao/Programacao-1 | 2d463744379ad3e4b0f5882ad923aae7ff80197a | [
"MIT"
] | null | null | null | Modulo 2/Lista06/6.py | BelfortJoao/Programacao-1 | 2d463744379ad3e4b0f5882ad923aae7ff80197a | [
"MIT"
] | 1 | 2021-09-05T20:18:45.000Z | 2021-09-05T20:18:45.000Z | n = int(input("Qual o tamanho do vetor?"))
x = [int(input()) for x in range(n)]
for i in range(0, n, 2):
print(x[i])
| 24.2 | 42 | 0.578512 |
827cab8ec2a93b797b537df3c99370d7b343d751 | 2,984 | py | Python | src/oci/data_integration/models/error_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_integration/models/error_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_integration/models/error_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ErrorDetails(object):
"""
The details of an error that occured.
"""
def __init__(self, **kwargs):
"""
Initializes a new ErrorDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param code:
The value to assign to the code property of this ErrorDetails.
:type code: str
:param message:
The value to assign to the message property of this ErrorDetails.
:type message: str
"""
self.swagger_types = {
'code': 'str',
'message': 'str'
}
self.attribute_map = {
'code': 'code',
'message': 'message'
}
self._code = None
self._message = None
@property
def code(self):
"""
**[Required]** Gets the code of this ErrorDetails.
A short error code that defines the error, meant for programmatic parsing. See
`API Errors`__.
__ https://docs.cloud.oracle.com/Content/API/References/apierrors.htm
:return: The code of this ErrorDetails.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this ErrorDetails.
A short error code that defines the error, meant for programmatic parsing. See
`API Errors`__.
__ https://docs.cloud.oracle.com/Content/API/References/apierrors.htm
:param code: The code of this ErrorDetails.
:type: str
"""
self._code = code
@property
def message(self):
"""
**[Required]** Gets the message of this ErrorDetails.
A user-friendly error message.
:return: The message of this ErrorDetails.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this ErrorDetails.
A user-friendly error message.
:param message: The message of this ErrorDetails.
:type: str
"""
self._message = message
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 27.62963 | 245 | 0.617627 |
a24f9c3c9f9b32a8f6873c216950b28fc92d2712 | 4,725 | py | Python | RPG/bot_classes/locations/spaceship/computer.py | Flashweb14/TelegramRPG | 42619f7bb3059fd927a9eba5442c07d7e9430366 | [
"MIT"
] | 1 | 2020-04-15T11:52:19.000Z | 2020-04-15T11:52:19.000Z | RPG/bot_classes/locations/spaceship/computer.py | Flashweb14/TelegramRPG | 42619f7bb3059fd927a9eba5442c07d7e9430366 | [
"MIT"
] | null | null | null | RPG/bot_classes/locations/spaceship/computer.py | Flashweb14/TelegramRPG | 42619f7bb3059fd927a9eba5442c07d7e9430366 | [
"MIT"
] | 1 | 2020-05-07T13:16:22.000Z | 2020-05-07T13:16:22.000Z | from time import sleep
from datetime import datetime
from RPG.consts.game_states import COMPUTER
from RPG.bot_classes.base_handler import BaseHandler
class Computer(BaseHandler):
def __init__(self, game, spaceship):
super().__init__(game, COMPUTER)
self.spaceship = spaceship
def show(self, message):
self.game.bot.send_message(message.chat.id, "Ты подходишь к бортовому компьютеру и запускаешь его")
sleep(1)
self.game.bot.send_message(message.chat.id, "_Spaceship Minisoft console: starting._",
parse_mode='Markdown')
sleep(1)
self.game.bot.send_message(message.chat.id, "_Loading..._",
parse_mode='Markdown')
sleep(2)
self.game.bot.send_message(message.chat.id,
f"_Spaceship Minisoft console 3.8.2 _ {str(datetime.today())[:-7]}",
parse_mode='Markdown')
self.game.bot.send_message(message.chat.id, '_Введите "help", чтобы получить список основынх команд_',
parse_mode='Markdown')
def handle(self, message):
if message.text == 'help':
self.game.bot.send_message(message.chat.id,
'*srp <ИМЯ ПЛАНЕТЫ>* _- установить маршрут на выбранную планету_ \n'
'*sps inf eqp* _- посмотреть информацию о корабле и его снаряжении_ \n'
'*cpi <ИМЯ ПЛАНЕТЫ>* _- посмотреть информацию о планете_ \n'
'*pln* _- вывести список ближайших планет_ \n'
'*plo* _- вывести спиок открытых планет_ \n'
'*q* _- закрыть консоль Spaceship Minisoft_',
parse_mode='Markdown')
elif message.text.startswith('srp'):
planet_name = message.text[4:].strip().capitalize()
for planet in self.game.planets:
if planet.name == planet_name:
self.game.current_planet = planet
self.game.bot.send_message(message.chat.id, f'Вы успешно прибыли на планету {planet_name}')
if planet not in self.game.opened_planets:
self.game.opened_planets.append(planet)
else:
self.game.bot.send_message(message.chat.id, 'Невозможно проложить маршрут к данной планете. '
'Причина: планета не найдена')
elif message.text.strip() == 'sps inf eqp':
self.game.bot.send_message(message.chat.id, self.spaceship.get_info(), parse_mode='Markdown')
elif message.text.startswith('cpi'):
planet_name = message.text[4:].strip().capitalize()
for planet in self.game.planets:
if planet.name == planet_name:
self.game.bot.send_message(message.chat.id, planet.get_info(),
parse_mode='Markdown')
else:
self.game.bot.send_message(message.chat.id, 'Не удалось найти сведений о данной планете.')
elif message.text.strip() == 'pln': # TODO other planets
if not self.game.current_planet:
self.game.bot.send_message(message.chat.id, '🌎*Ближайшие планеты*\n'
' - Эстрад',
parse_mode='Markdown')
elif message.text.strip() == 'plo':
if self.game.opened_planets:
opened_planets = ' -' + '\n - '.join([str(planet) for planet in self.game.opened_planets])
self.game.bot.send_message(message.chat.id, f'🌎Открытые планеты\n'
f'{opened_planets}')
else:
self.game.bot.send_message(message.chat.id, 'Вы пока не открыли ни одной планеты.',
parse_mode='Markdown')
elif message.text == 'q':
self.game.bot.send_message(message.chat.id, '_Closing terminal..._',
parse_mode='Markdown')
sleep(1)
self.game.bot.send_message(message.chat.id, '_Process finished with exit code -1_',
parse_mode='Markdown')
sleep(1)
self.spaceship.captain_bridge.start(message)
else:
self.game.bot.send_message(message.chat.id, 'Введена неизвестная команда. Попробуйте ещё раз.')
| 58.333333 | 116 | 0.528677 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.