hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf2619c94c5df04dc2f760b5aa0d93866b4ebe7 | 2,976 | py | Python | python/ray/experimental/client/logsclient.py | lavanyashukla/ray | 9c1a75b6ff82a842131e6beb3c260188befc21df | [
"Apache-2.0"
] | 1 | 2020-10-21T22:24:27.000Z | 2020-10-21T22:24:27.000Z | python/ray/experimental/client/logsclient.py | mfitton/ray | fece8db70d703da1aad192178bd50923e83cc99a | [
"Apache-2.0"
] | null | null | null | python/ray/experimental/client/logsclient.py | mfitton/ray | fece8db70d703da1aad192178bd50923e83cc99a | [
"Apache-2.0"
] | null | null | null | """This file implements a threaded stream controller to return logs back from
the ray clientserver.
"""
import sys
import logging
import queue
import threading
import grpc
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
logger = logging.getLogger(__name__)
# TODO(barakmich): Running a logger in a logger causes loopback.
# The client logger need its own root -- possibly this one.
# For the moment, let's just not propogate beyond this point.
logger.propagate = False
class LogstreamClient:
def __init__(self, channel: "grpc._channel.Channel"):
"""Initializes a thread-safe log stream over a Ray Client gRPC channel.
Args:
channel: connected gRPC channel
"""
self.channel = channel
self.request_queue = queue.Queue()
self.log_thread = self._start_logthread()
self.log_thread.start()
def _start_logthread(self) -> threading.Thread:
return threading.Thread(target=self._log_main, args=(), daemon=True)
def _log_main(self) -> None:
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.channel)
log_stream = stub.Logstream(iter(self.request_queue.get, None))
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
except grpc.RpcError as e:
if grpc.StatusCode.CANCELLED != e.code():
# Not just shutting down normally
logger.error(
f"Got Error from logger channel -- shutting down: {e}")
raise e
def log(self, level: int, msg: str):
"""Log the message from the log stream.
By default, calls logger.log but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
logger.log(level=level, msg=msg)
def stdstream(self, level: int, msg: str):
"""Log the stdout/stderr entry from the log stream.
By default, calls print but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
print_file = sys.stderr if level == -2 else sys.stdout
print(msg, file=print_file)
def set_logstream_level(self, level: int):
logger.setLevel(level)
req = ray_client_pb2.LogSettingsRequest()
req.enabled = True
req.loglevel = level
self.request_queue.put(req)
def close(self) -> None:
self.request_queue.put(None)
if self.log_thread is not None:
self.log_thread.join()
def disable_logs(self) -> None:
req = ray_client_pb2.LogSettingsRequest()
req.enabled = False
self.request_queue.put(req)
| 34.206897 | 79 | 0.640793 |
acf26264d52658db74e8cd31d0daa210dbcf563f | 1,100 | py | Python | dynamic_profile/migrations/0014_auto_20190915_1826.py | ebsuku/wazimap-dynamic-profile | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | [
"MIT"
] | 1 | 2020-02-04T05:03:54.000Z | 2020-02-04T05:03:54.000Z | dynamic_profile/migrations/0014_auto_20190915_1826.py | ebsuku/wazimap-dynamic-profile | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | [
"MIT"
] | null | null | null | dynamic_profile/migrations/0014_auto_20190915_1826.py | ebsuku/wazimap-dynamic-profile | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | [
"MIT"
] | 1 | 2020-01-03T20:30:43.000Z | 2020-01-03T20:30:43.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-09-15 18:26
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.hstore
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dynamic_profile', '0013_auto_20190913_1036'),
]
operations = [
migrations.AlterField(
model_name='indicatorprofile',
name='exclude',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=20), blank=True, null=True, size=None),
),
migrations.AlterField(
model_name='indicatorprofile',
name='key_order',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=20), blank=True, null=True, size=None),
),
migrations.AlterField(
model_name='indicatorprofile',
name='recode',
field=django.contrib.postgres.fields.hstore.HStoreField(blank=True, null=True),
),
]
| 33.333333 | 138 | 0.660909 |
acf262c82c146c4b302d250af0b993f7003bc078 | 1,423 | py | Python | sample.py | royfxy/blivedm | a541a2ce990937688a3498608fb33f1a9ba34e00 | [
"MIT"
] | 421 | 2018-05-22T09:14:22.000Z | 2022-03-30T16:06:39.000Z | sample.py | royfxy/blivedm | a541a2ce990937688a3498608fb33f1a9ba34e00 | [
"MIT"
] | 21 | 2018-06-01T09:46:58.000Z | 2022-03-28T08:17:00.000Z | sample.py | royfxy/blivedm | a541a2ce990937688a3498608fb33f1a9ba34e00 | [
"MIT"
] | 102 | 2018-06-13T05:43:58.000Z | 2022-03-31T04:06:50.000Z | # -*- coding: utf-8 -*-
import asyncio
import blivedm
class MyBLiveClient(blivedm.BLiveClient):
# 演示如何自定义handler
_COMMAND_HANDLERS = blivedm.BLiveClient._COMMAND_HANDLERS.copy()
async def __on_vip_enter(self, command):
print(command)
_COMMAND_HANDLERS['WELCOME'] = __on_vip_enter # 老爷入场
async def _on_receive_popularity(self, popularity: int):
print(f'当前人气值:{popularity}')
async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
print(f'{danmaku.uname}:{danmaku.msg}')
async def _on_receive_gift(self, gift: blivedm.GiftMessage):
print(f'{gift.uname} 赠送{gift.gift_name}x{gift.num} ({gift.coin_type}币x{gift.total_coin})')
async def _on_buy_guard(self, message: blivedm.GuardBuyMessage):
print(f'{message.username} 购买{message.gift_name}')
async def _on_super_chat(self, message: blivedm.SuperChatMessage):
print(f'醒目留言 ¥{message.price} {message.uname}:{message.message}')
async def main():
# 参数1是直播间ID
# 如果SSL验证失败就把ssl设为False
room_id = 14917277
client = MyBLiveClient(room_id, ssl=True)
future = client.start()
try:
# 5秒后停止,测试用
# await asyncio.sleep(5)
# future = client.stop()
# 或者
# future.cancel()
await future
finally:
await client.close()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
| 27.365385 | 98 | 0.673928 |
acf263d5b38e58dc04e0f125d804254e2c28ceb3 | 1,682 | py | Python | setup.py | XanaduAI/PennyLane-qsharp | 331c0933c7fe6b883cdb6d6219038c9b2f15831d | [
"Apache-2.0"
] | 8 | 2019-05-02T19:54:52.000Z | 2020-07-15T05:27:25.000Z | setup.py | XanaduAI/PennyLane-qsharp | 331c0933c7fe6b883cdb6d6219038c9b2f15831d | [
"Apache-2.0"
] | 2 | 2019-07-29T15:52:57.000Z | 2019-11-26T16:09:19.000Z | setup.py | XanaduAI/PennyLane-qsharp | 331c0933c7fe6b883cdb6d6219038c9b2f15831d | [
"Apache-2.0"
] | 2 | 2020-07-12T17:51:14.000Z | 2020-07-15T05:31:33.000Z | #!/usr/bin/env python3
import sys
import os
from setuptools import setup
with open("pennylane_qsharp/_version.py") as f:
version = f.readlines()[-1].split()[-1].strip("\"'")
requirements = [
"qsharp",
"pennylane>=0.11"
]
info = {
'name': 'PennyLane-qsharp',
'version': version,
'maintainer': 'Xanadu Inc.',
'maintainer_email': 'josh@xanadu.ai',
'url': 'https://github.com/PennyLaneAI/pennylane-qsharp',
'license': 'Apache License 2.0',
'packages': [
'pennylane_qsharp'
],
'entry_points': {
'pennylane.plugins': [
'microsoft.QuantumSimulator = pennylane_qsharp:QuantumSimulatorDevice'
],
},
'description': 'Microsoft Quantum Development Kit backend for PennyLane',
'long_description': open('README.rst').read(),
'provides': ["pennylane_qsharp"],
'install_requires': requirements
}
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
"Topic :: Scientific/Engineering :: Physics"
]
setup(classifiers=classifiers, **(info))
| 29 | 83 | 0.598692 |
acf264e79fd4d6a7bbc649fb6ebe4aac31ab41cc | 1,020 | py | Python | kubernetes/test/test_v1_resource_field_selector.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:44:11.000Z | 2021-06-10T23:44:11.000Z | kubernetes/test/test_v1_resource_field_selector.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_resource_field_selector.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2018-11-06T16:33:43.000Z | 2018-11-06T16:33:43.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_resource_field_selector import V1ResourceFieldSelector
class TestV1ResourceFieldSelector(unittest.TestCase):
""" V1ResourceFieldSelector unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ResourceFieldSelector(self):
"""
Test V1ResourceFieldSelector
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_resource_field_selector.V1ResourceFieldSelector()
pass
if __name__ == '__main__':
unittest.main()
| 22.666667 | 105 | 0.72549 |
acf265d28ba10f85b909884301d7e7b4ce6fdfb5 | 725 | py | Python | training/fl_client_libs.py | theboxahaan/Oort | 6f2ddaaaad53b1e770e3b6d46f7d8a0a86331358 | [
"Apache-2.0"
] | 43 | 2021-05-27T09:20:30.000Z | 2022-03-11T03:55:37.000Z | training/fl_client_libs.py | zyxum/Oort | 05a3aa1677a10f8e621055b1626ef82e73d09759 | [
"Apache-2.0"
] | 3 | 2021-06-25T11:54:08.000Z | 2021-08-08T23:03:16.000Z | training/fl_client_libs.py | zyxum/Oort | 05a3aa1677a10f8e621055b1626ef82e73d09759 | [
"Apache-2.0"
] | 14 | 2021-05-30T14:24:30.000Z | 2022-02-23T23:14:50.000Z | # package for client
from flLibs import *
logDir = os.path.join(args.log_path, 'logs', args.job_name, args.time_stamp, 'worker')
logFile = os.path.join(logDir, 'log_'+str(args.this_rank))
def init_logging():
if not os.path.isdir(logDir):
os.makedirs(logDir, exist_ok=True)
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO,
handlers=[
logging.FileHandler(logFile, mode='a'),
logging.StreamHandler()
])
def initiate_client_setting():
init_logging()
| 32.954545 | 105 | 0.554483 |
acf266bc4d4f14d3e96c2b0b57f33f67cba89503 | 1,227 | py | Python | oled.py | casterbn/open-rtk | 21b8899792b5c63cde3801806cbeee78ebd503ae | [
"MIT"
] | 11 | 2019-09-14T15:12:08.000Z | 2021-04-26T08:46:11.000Z | oled.py | VimDrones/open-rtk | 35e579d3269a6c8dfa1e8160e0f447e620ea961c | [
"MIT"
] | null | null | null | oled.py | VimDrones/open-rtk | 35e579d3269a6c8dfa1e8160e0f447e620ea961c | [
"MIT"
] | 2 | 2020-01-09T02:18:31.000Z | 2020-04-24T03:43:27.000Z | import time
import sys
import struct
HEADER = 0xAA
END = 0x55
# gnss_count = 14
# ip = [192,168,1,100]
# acc = 2
# survey_in = False
# cpu_usage = 80
# memory_usage = 60
# empty = 0
class Oled(object):
def __init__(self, dev=False):
self.dev = dev
if not self.dev:
import spidev
self.spi = spidev.SpiDev()
self.spi.open(0,0)
self.spi.mode = 0b11
self.spi.max_speed_hz = 125000 * 16
def refresh(self, gnss_count, ip, acc, survey_in, cpu_usage, memory_usage, empty1=0, empty2=0):
if acc > 4294967295:
acc = 4294967295
data = struct.pack('<B B BBBB I B B B B B B', HEADER, gnss_count, *ip, acc, survey_in, cpu_usage, memory_usage, empty1, empty2, END)
if not self.dev:
self.spi.xfer(data)
print(struct.unpack('<B B BBBB I B B B B B B', data))
if False:
print("ublox.gps_count", gnss_count)
print("ublox.is_survey_in_success", survey_in)
print("ublox.survey_in_acc", acc)
print("host_ip", ip)
print("cpu_usage", cpu_usage)
print("memory_usage", memory_usage)
print(len(data))
| 27.886364 | 140 | 0.571312 |
acf266d09fbf7c9533e8e06f9e9400182dc95334 | 169 | py | Python | L-A-3/permutation.py | AsifHasanChowdhury/Airtificial-Intelligence-CSE422-BRACU- | 03acedf4694111eddde3c1ccce9d009571a7f546 | [
"MIT"
] | null | null | null | L-A-3/permutation.py | AsifHasanChowdhury/Airtificial-Intelligence-CSE422-BRACU- | 03acedf4694111eddde3c1ccce9d009571a7f546 | [
"MIT"
] | null | null | null | L-A-3/permutation.py | AsifHasanChowdhury/Airtificial-Intelligence-CSE422-BRACU- | 03acedf4694111eddde3c1ccce9d009571a7f546 | [
"MIT"
] | null | null | null | x=input()
lst=[int(x) for x in input().split()]
res=0
n=len(lst)
sum=int((n*(n+1))/2)
print(sum)
mainsum=0
for item in lst:
mainsum+=item
print(mainsum)
| 14.083333 | 38 | 0.597633 |
acf267650c1646a0a2560c1976f9b552d7b167f0 | 16,475 | py | Python | magenta/models/onsets_frames_transcription/model.py | cristianmtr/magenta | ac2d8ae455fdd07f4b46dec82aedab22fcb6bbbd | [
"Apache-2.0"
] | null | null | null | magenta/models/onsets_frames_transcription/model.py | cristianmtr/magenta | ac2d8ae455fdd07f4b46dec82aedab22fcb6bbbd | [
"Apache-2.0"
] | null | null | null | magenta/models/onsets_frames_transcription/model.py | cristianmtr/magenta | ac2d8ae455fdd07f4b46dec82aedab22fcb6bbbd | [
"Apache-2.0"
] | 1 | 2019-11-26T06:30:52.000Z | 2019-11-26T06:30:52.000Z | # Copyright 2018 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Onset-focused model for piano transcription."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.common import flatten_maybe_padded_sequences
from magenta.common import tf_utils
from magenta.models.onsets_frames_transcription import constants
import tensorflow as tf
import tensorflow.contrib.slim as slim
def conv_net(inputs, hparams):
"""Builds the ConvNet from Kelz 2016."""
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode='FAN_AVG', uniform=True)):
net = inputs
i = 0
for (conv_temporal_size, conv_freq_size,
num_filters, freq_pool_size, dropout_amt) in zip(
hparams.temporal_sizes, hparams.freq_sizes, hparams.num_filters,
hparams.pool_sizes, hparams.dropout_keep_amts):
net = slim.conv2d(
net,
num_filters, [conv_temporal_size, conv_freq_size],
scope='conv' + str(i),
normalizer_fn=slim.batch_norm)
if freq_pool_size > 1:
net = slim.max_pool2d(
net, [1, freq_pool_size],
stride=[1, freq_pool_size],
scope='pool' + str(i))
if dropout_amt < 1:
net = slim.dropout(net, dropout_amt, scope='dropout' + str(i))
i += 1
# Flatten while preserving batch and time dimensions.
dims = tf.shape(net)
net = tf.reshape(
net, (dims[0], dims[1], net.shape[2].value * net.shape[3].value),
'flatten_end')
net = slim.fully_connected(net, hparams.fc_size, scope='fc_end')
net = slim.dropout(net, hparams.fc_dropout_keep_amt, scope='dropout_end')
return net
def cudnn_lstm_layer(inputs,
batch_size,
num_units,
lengths=None,
stack_size=1,
rnn_dropout_drop_amt=0,
is_training=True,
bidirectional=True):
"""Create a LSTM layer that uses cudnn."""
inputs_t = tf.transpose(inputs, [1, 0, 2])
if lengths is not None:
all_outputs = [inputs_t]
for i in range(stack_size):
with tf.variable_scope('stack_' + str(i)):
with tf.variable_scope('forward'):
lstm_fw = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=1,
num_units=num_units,
direction='unidirectional',
dropout=rnn_dropout_drop_amt,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(
),
bias_initializer=tf.zeros_initializer(),
)
c_fw = tf.zeros([1, batch_size, num_units], tf.float32)
h_fw = tf.zeros([1, batch_size, num_units], tf.float32)
outputs_fw, _ = lstm_fw(
all_outputs[-1], (h_fw, c_fw), training=is_training)
combined_outputs = outputs_fw
if bidirectional:
with tf.variable_scope('backward'):
lstm_bw = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=1,
num_units=num_units,
direction='unidirectional',
dropout=rnn_dropout_drop_amt,
kernel_initializer=tf.contrib.layers
.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer(),
)
c_bw = tf.zeros([1, batch_size, num_units], tf.float32)
h_bw = tf.zeros([1, batch_size, num_units], tf.float32)
inputs_reversed = tf.reverse_sequence(
all_outputs[-1], lengths, seq_axis=0, batch_axis=1)
outputs_bw, _ = lstm_bw(
inputs_reversed, (h_bw, c_bw), training=is_training)
outputs_bw = tf.reverse_sequence(
outputs_bw, lengths, seq_axis=0, batch_axis=1)
combined_outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
all_outputs.append(combined_outputs)
# for consistency with cudnn, here we just return the top of the stack,
# although this can easily be altered to do other things, including be
# more resnet like
return tf.transpose(all_outputs[-1], [1, 0, 2])
else:
lstm = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=stack_size,
num_units=num_units,
direction='bidirectional' if bidirectional else 'unidirectional',
dropout=rnn_dropout_drop_amt,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer(),
)
stack_multiplier = 2 if bidirectional else 1
c = tf.zeros([stack_multiplier * stack_size, batch_size, num_units],
tf.float32)
h = tf.zeros([stack_multiplier * stack_size, batch_size, num_units],
tf.float32)
outputs, _ = lstm(inputs_t, (h, c), training=is_training)
outputs = tf.transpose(outputs, [1, 0, 2])
return outputs
def lstm_layer(inputs,
batch_size,
num_units,
lengths=None,
stack_size=1,
use_cudnn=False,
rnn_dropout_drop_amt=0,
is_training=True,
bidirectional=True):
"""Create a LSTM layer using the specified backend."""
if use_cudnn:
return cudnn_lstm_layer(inputs, batch_size, num_units, lengths, stack_size,
rnn_dropout_drop_amt, is_training, bidirectional)
else:
assert rnn_dropout_drop_amt == 0
cells_fw = [
tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(num_units)
for _ in range(stack_size)
]
cells_bw = [
tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(num_units)
for _ in range(stack_size)
]
with tf.variable_scope('cudnn_lstm'):
(outputs, unused_state_f,
unused_state_b) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw,
cells_bw,
inputs,
dtype=tf.float32,
sequence_length=lengths,
parallel_iterations=1)
return outputs
def acoustic_model(inputs, hparams, lstm_units, lengths, is_training=True):
"""Acoustic model that handles all specs for a sequence in one window."""
conv_output = conv_net(inputs, hparams)
if lstm_units:
return lstm_layer(
conv_output,
hparams.batch_size,
lstm_units,
lengths=lengths if hparams.use_lengths else None,
stack_size=hparams.acoustic_rnn_stack_size,
use_cudnn=hparams.use_cudnn,
is_training=is_training,
bidirectional=hparams.bidirectional)
else:
return conv_output
def get_model(transcription_data, hparams, is_training=True):
"""Builds the acoustic model."""
onset_labels = transcription_data.onsets
offset_labels = transcription_data.offsets
velocity_labels = transcription_data.velocities
frame_labels = transcription_data.labels
frame_label_weights = transcription_data.label_weights
lengths = transcription_data.lengths
spec = transcription_data.spec
if hparams.stop_activation_gradient and not hparams.activation_loss:
raise ValueError(
'If stop_activation_gradient is true, activation_loss must be true.')
losses = {}
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
with tf.variable_scope('onsets'):
onset_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.onset_lstm_units,
lengths=lengths,
is_training=is_training)
onset_probs = slim.fully_connected(
onset_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='onset_probs')
# onset_probs_flat is used during inference.
onset_probs_flat = flatten_maybe_padded_sequences(onset_probs, lengths)
onset_labels_flat = flatten_maybe_padded_sequences(onset_labels, lengths)
tf.identity(onset_probs_flat, name='onset_probs_flat')
tf.identity(onset_labels_flat, name='onset_labels_flat')
tf.identity(
tf.cast(tf.greater_equal(onset_probs_flat, .5), tf.float32),
name='onset_predictions_flat')
onset_losses = tf_utils.log_loss(onset_labels_flat, onset_probs_flat)
tf.losses.add_loss(tf.reduce_mean(onset_losses))
losses['onset'] = onset_losses
with tf.variable_scope('offsets'):
offset_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.offset_lstm_units,
lengths=lengths,
is_training=is_training)
offset_probs = slim.fully_connected(
offset_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='offset_probs')
# offset_probs_flat is used during inference.
offset_probs_flat = flatten_maybe_padded_sequences(offset_probs, lengths)
offset_labels_flat = flatten_maybe_padded_sequences(
offset_labels, lengths)
tf.identity(offset_probs_flat, name='offset_probs_flat')
tf.identity(offset_labels_flat, name='offset_labels_flat')
tf.identity(
tf.cast(tf.greater_equal(offset_probs_flat, .5), tf.float32),
name='offset_predictions_flat')
offset_losses = tf_utils.log_loss(offset_labels_flat, offset_probs_flat)
tf.losses.add_loss(tf.reduce_mean(offset_losses))
losses['offset'] = offset_losses
with tf.variable_scope('velocity'):
velocity_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.velocity_lstm_units,
lengths=lengths,
is_training=is_training)
velocity_values = slim.fully_connected(
velocity_outputs,
constants.MIDI_PITCHES,
activation_fn=None,
scope='onset_velocities')
velocity_values_flat = flatten_maybe_padded_sequences(
velocity_values, lengths)
tf.identity(velocity_values_flat, name='velocity_values_flat')
velocity_labels_flat = flatten_maybe_padded_sequences(
velocity_labels, lengths)
velocity_loss = tf.reduce_sum(
onset_labels_flat *
tf.square(velocity_labels_flat - velocity_values_flat),
axis=1)
tf.losses.add_loss(tf.reduce_mean(velocity_loss))
losses['velocity'] = velocity_loss
with tf.variable_scope('frame'):
if not hparams.share_conv_features:
# TODO(eriche): this is broken when hparams.frame_lstm_units > 0
activation_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.frame_lstm_units,
lengths=lengths,
is_training=is_training)
activation_probs = slim.fully_connected(
activation_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='activation_probs')
else:
activation_probs = slim.fully_connected(
onset_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='activation_probs')
probs = []
if hparams.stop_onset_gradient:
probs.append(tf.stop_gradient(onset_probs))
else:
probs.append(onset_probs)
if hparams.stop_activation_gradient:
probs.append(tf.stop_gradient(activation_probs))
else:
probs.append(activation_probs)
if hparams.stop_offset_gradient:
probs.append(tf.stop_gradient(offset_probs))
else:
probs.append(offset_probs)
combined_probs = tf.concat(probs, 2)
if hparams.combined_lstm_units > 0:
outputs = lstm_layer(
combined_probs,
hparams.batch_size,
hparams.combined_lstm_units,
lengths=lengths if hparams.use_lengths else None,
stack_size=hparams.combined_rnn_stack_size,
use_cudnn=hparams.use_cudnn,
is_training=is_training,
bidirectional=hparams.bidirectional)
else:
outputs = combined_probs
frame_probs = slim.fully_connected(
outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='frame_probs')
frame_labels_flat = flatten_maybe_padded_sequences(frame_labels, lengths)
frame_probs_flat = flatten_maybe_padded_sequences(frame_probs, lengths)
tf.identity(frame_probs_flat, name='frame_probs_flat')
frame_label_weights_flat = flatten_maybe_padded_sequences(
frame_label_weights, lengths)
if hparams.weight_frame_and_activation_loss:
frame_loss_weights = frame_label_weights_flat
else:
frame_loss_weights = None
frame_losses = tf_utils.log_loss(
frame_labels_flat,
frame_probs_flat,
weights=frame_loss_weights)
tf.losses.add_loss(tf.reduce_mean(frame_losses))
losses['frame'] = frame_losses
if hparams.activation_loss:
if hparams.weight_frame_and_activation_loss:
activation_loss_weights = frame_label_weights
else:
activation_loss_weights = None
activation_losses = tf_utils.log_loss(
frame_labels_flat,
flatten_maybe_padded_sequences(activation_probs, lengths),
weights=activation_loss_weights)
tf.losses.add_loss(tf.reduce_mean(activation_losses))
losses['activation'] = activation_losses
predictions_flat = tf.cast(tf.greater_equal(frame_probs_flat, .5), tf.float32)
# Creates a pianoroll labels in red and probs in green [minibatch, 88]
images = {}
onset_pianorolls = tf.concat(
[
onset_labels[:, :, :, tf.newaxis], onset_probs[:, :, :, tf.newaxis],
tf.zeros(tf.shape(onset_labels))[:, :, :, tf.newaxis]
],
axis=3)
images['OnsetPianorolls'] = onset_pianorolls
offset_pianorolls = tf.concat([
offset_labels[:, :, :, tf.newaxis], offset_probs[:, :, :, tf.newaxis],
tf.zeros(tf.shape(offset_labels))[:, :, :, tf.newaxis]
],
axis=3)
images['OffsetPianorolls'] = offset_pianorolls
activation_pianorolls = tf.concat(
[
frame_labels[:, :, :, tf.newaxis], frame_probs[:, :, :, tf.newaxis],
tf.zeros(tf.shape(frame_labels))[:, :, :, tf.newaxis]
],
axis=3)
images['ActivationPianorolls'] = activation_pianorolls
return (tf.losses.get_total_loss(), losses, frame_labels_flat,
predictions_flat, images)
def get_default_hparams():
"""Returns the default hyperparameters.
Returns:
A tf.contrib.training.HParams object representing the default
hyperparameters for the model.
"""
return tf.contrib.training.HParams(
batch_size=8,
spec_fmin=30.0,
spec_n_bins=229,
spec_type='mel',
spec_mel_htk=True,
spec_log_amplitude=True,
transform_audio=True,
learning_rate=0.0006,
clip_norm=3,
truncated_length=1500, # 48 seconds
onset_lstm_units=256,
offset_lstm_units=256,
velocity_lstm_units=0,
frame_lstm_units=0,
combined_lstm_units=256,
onset_mode='length_ms',
acoustic_rnn_stack_size=1,
combined_rnn_stack_size=1,
# using this will result in output not aligning with audio.
backward_shift_amount_ms=0,
activation_loss=False,
stop_activation_gradient=False,
onset_length=32,
offset_length=32,
decay_steps=10000,
decay_rate=0.98,
stop_onset_gradient=True,
stop_offset_gradient=True,
weight_frame_and_activation_loss=False,
share_conv_features=False,
temporal_sizes=[3, 3, 3],
freq_sizes=[3, 3, 3],
num_filters=[48, 48, 96],
pool_sizes=[1, 2, 2],
dropout_keep_amts=[1.0, 0.25, 0.25],
fc_size=768,
fc_dropout_keep_amt=0.5,
use_lengths=False,
use_cudnn=True,
rnn_dropout_drop_amt=0.0,
bidirectional=True,
onset_overlap=True,
)
| 35.278373 | 80 | 0.661002 |
acf2680ad79aacc8c0109a4f3e10282c4d27ac17 | 526 | py | Python | src/generate.py | tianhuil/number-generator | 2178f1e59af39bf21fd099cdf20d1f28c21f6248 | [
"Apache-2.0"
] | null | null | null | src/generate.py | tianhuil/number-generator | 2178f1e59af39bf21fd099cdf20d1f28c21f6248 | [
"Apache-2.0"
] | null | null | null | src/generate.py | tianhuil/number-generator | 2178f1e59af39bf21fd099cdf20d1f28c21f6248 | [
"Apache-2.0"
] | null | null | null | from num2words import num2words
import os
import gzip
from tqdm import tqdm
dir_path = os.path.dirname(os.path.realpath(__file__))
def data_path(file_name: str) -> str:
return os.path.join(dir_path, "..", "data", file_name)
MAX_INT = int(1e6)
with gzip.open(data_path("en.txt.gz"), "wt") as fh:
for i in tqdm(range(MAX_INT)):
fh.write(num2words(i, lang="en") + "\n")
with gzip.open(data_path("fr.txt.gz"), "wt") as fh:
for i in tqdm(range(MAX_INT)):
fh.write(num2words(i, lang="fr") + "\n")
| 23.909091 | 58 | 0.653992 |
acf269f59e2f436d02dd6eb304bd83353e5b4c4c | 471 | py | Python | taurex/chemistry.py | rychallener/TauREx3_public | eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677 | [
"BSD-3-Clause"
] | null | null | null | taurex/chemistry.py | rychallener/TauREx3_public | eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677 | [
"BSD-3-Clause"
] | null | null | null | taurex/chemistry.py | rychallener/TauREx3_public | eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677 | [
"BSD-3-Clause"
] | null | null | null |
from .data.profiles.chemistry import ChemistryFile
from .data.profiles.chemistry.chemistry import Chemistry
from .data.profiles.chemistry import TaurexChemistry
try:
from .data.profiles.chemistry.acechemistry import ACEChemistry
except ImportError:
print('Ace library not found. ACEChemistry could not be loaded')
from .data.profiles.chemistry.gas.gas import Gas
from .data.profiles.chemistry import ConstantGas
from .data.profiles.chemistry import TwoLayerGas
| 36.230769 | 68 | 0.825902 |
acf26a516dd05e4d00efbc8ff345416a74fe0ebe | 219 | py | Python | main/api/serializers.py | h0diush/welbex_tests | 68214188936e2dc432812eab17192ad70fedb1cc | [
"BSD-2-Clause"
] | null | null | null | main/api/serializers.py | h0diush/welbex_tests | 68214188936e2dc432812eab17192ad70fedb1cc | [
"BSD-2-Clause"
] | null | null | null | main/api/serializers.py | h0diush/welbex_tests | 68214188936e2dc432812eab17192ad70fedb1cc | [
"BSD-2-Clause"
] | null | null | null | from rest_framework import serializers
from main.models import BaseModel
class BaseModelSerializer(serializers.ModelSerializer):
class Meta:
model = BaseModel
fields = ('name', 'qty', 'distance')
| 21.9 | 55 | 0.721461 |
acf26ae1d381eb4e09ba3a592c1771a1acb9d2e4 | 2,916 | py | Python | wandb/sdk/data_types/plotly.py | soumik12345/client | 31e4c2b143e6c219ea005fe4477e294f383f6888 | [
"MIT"
] | null | null | null | wandb/sdk/data_types/plotly.py | soumik12345/client | 31e4c2b143e6c219ea005fe4477e294f383f6888 | [
"MIT"
] | null | null | null | wandb/sdk/data_types/plotly.py | soumik12345/client | 31e4c2b143e6c219ea005fe4477e294f383f6888 | [
"MIT"
] | null | null | null | import codecs
import os
from typing import Sequence, Type, TYPE_CHECKING, Union
from wandb import util
from ._private import MEDIA_TMP
from .base_types.media import _numpy_arrays_to_lists, Media
from .base_types.wb_value import WBValue
from .image import Image
if TYPE_CHECKING: # pragma: no cover
import matplotlib # type: ignore
import pandas as pd # type: ignore
import plotly # type: ignore
from ..wandb_artifacts import Artifact as LocalArtifact
from ..wandb_run import Run as LocalRun
ValToJsonType = Union[
dict,
"WBValue",
Sequence["WBValue"],
"plotly.Figure",
"matplotlib.artist.Artist",
"pd.DataFrame",
object,
]
class Plotly(Media):
"""
Wandb class for plotly plots.
Arguments:
val: matplotlib or plotly figure
"""
_log_type = "plotly-file"
@classmethod
def make_plot_media(
cls: Type["Plotly"], val: Union["plotly.Figure", "matplotlib.artist.Artist"]
) -> Union[Image, "Plotly"]:
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
return Image(val)
val = util.matplotlib_to_plotly(val)
return cls(val)
def __init__(self, val: Union["plotly.Figure", "matplotlib.artist.Artist"]):
super().__init__()
# First, check to see if the incoming `val` object is a plotfly figure
if not util.is_plotly_figure_typename(util.get_full_typename(val)):
# If it is not, but it is a matplotlib figure, then attempt to convert it to plotly
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
raise ValueError(
"Plotly does not currently support converting matplotlib figures containing images. \
You can convert the plot to a static image with `wandb.Image(plt)` "
)
val = util.matplotlib_to_plotly(val)
else:
raise ValueError(
"Logged plots must be plotly figures, or matplotlib plots convertible to plotly via mpl_to_plotly"
)
tmp_path = os.path.join(MEDIA_TMP.name, util.generate_id() + ".plotly.json")
val = _numpy_arrays_to_lists(val.to_plotly_json())
with codecs.open(tmp_path, "w", encoding="utf-8") as fp:
util.json_dump_safer(val, fp)
self._set_file(tmp_path, is_tmp=True, extension=".plotly.json")
@classmethod
def get_media_subdir(cls: Type["Plotly"]) -> str:
return os.path.join("media", "plotly")
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super().to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
| 35.560976 | 118 | 0.636145 |
acf26d2777059109d7b5aca2ec71f00731d5ee19 | 1,259 | py | Python | 05_numpy.py | rriquelme/python_3_tutorial | 8392d1996cfcca02da1a5975a59652e687b15b21 | [
"MIT"
] | null | null | null | 05_numpy.py | rriquelme/python_3_tutorial | 8392d1996cfcca02da1a5975a59652e687b15b21 | [
"MIT"
] | null | null | null | 05_numpy.py | rriquelme/python_3_tutorial | 8392d1996cfcca02da1a5975a59652e687b15b21 | [
"MIT"
] | null | null | null | # What is numpy? Numerical Python.
# What does it have? ndarray, efficient multidimensional array, mathematical functions.
# How to use it? the common way is:
import numpy as np
# Generate random data:
random_ndarray = np.random.randn(3,4)
print(random_ndarray)
random_ndarray = np.random.randn(3,4)
print(random_ndarray)
# convert to int:
print(random_ndarray.astype(np.int32))
# List can be converted to ndarray:
L = [ 1,2,3,4,5,6,7,8,9]
print(L)
print(np.array(L))
# or mnultple arrays in a multidementional ndarray:
print(np.array([L,L,L]))
# all arithmetic on nparray are made on each element:
nd_array = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(nd_array)
print(nd_array*2)
print(nd_array+2)
print(nd_array-2)
print(nd_array/2)
print(nd_array>5) # Boolean returns the ndarray of logic result and can be replaced like:
print(nd_array[nd_array>5])
#Slicing -> "same" as Lists
print(nd_array[1,2]) #6
print(nd_array[0,2]) #3
print(nd_array[:,2])
print(nd_array[1,:])
print(nd_array[1,-1])
print(nd_array[1,-1:])
# Reshape
print(nd_array.reshape(9,1))
print(nd_array.reshape(1,9))
# Traspose
print(nd_array.T)
# Inner matrix product:
print(np.dot(nd_array,nd_array))
## Could be improved adding more functions... max min sum mean... etc | 21.706897 | 89 | 0.72359 |
acf26dd21d5446718e67c1d6b9ba623bd845fb87 | 6,623 | py | Python | tests/test_tabular_dataset_properties.py | floscha/tabular-dataset | e1df3da2dc3197a24ddca893fa44712c337c20b9 | [
"MIT"
] | null | null | null | tests/test_tabular_dataset_properties.py | floscha/tabular-dataset | e1df3da2dc3197a24ddca893fa44712c337c20b9 | [
"MIT"
] | 40 | 2019-06-26T16:45:11.000Z | 2019-10-03T05:57:57.000Z | tests/test_tabular_dataset_properties.py | floscha/tabular-dataset | e1df3da2dc3197a24ddca893fa44712c337c20b9 | [
"MIT"
] | 1 | 2019-07-28T06:52:03.000Z | 2019-07-28T06:52:03.000Z | import datetime
import unittest
from typing import Iterator
import numpy as np
import pandas as pd
import pytest
from tabular_dataset import TabularDataset
from tabular_dataset.columns import (BinaryColumns, CategoricalColumns,
NumericalColumns)
def get_test_df():
return pd.DataFrame({
'A': [1, 2, 3, np.nan],
'B': [0, 1, 0, np.nan],
'C': list('abba'),
'target': list('xyzx')
})
def test_setting_both_target_column_and_target_columns_raises_exception():
df = get_test_df()
with pytest.raises(ValueError):
TabularDataset(df, target_column='target',
target_columns=['target'])
def test_infer_columns_types():
df = pd.DataFrame({
'boolean_bin': [False, False, True, True],
'numeric_bin': [0, 0, 1, 1],
'cat': list('abcd'),
'num': [1, 2, 3, np.nan],
'dt': [datetime.datetime(2018, 1, 1)] * 4
})
tds = TabularDataset(df, infer_column_types=True)
assert tds.bin.column_names == ['boolean_bin', 'numeric_bin']
assert tds.cat.column_names == ['cat']
assert tds.num.column_names == ['num']
assert tds.dt.column_names == ['dt']
def test_infer_columns_types_with_some_column_specified():
"""When manually specifying 'numeric_bin_2' as a numerical column, it
should not be automatically inferred as a binary column."""
df = pd.DataFrame({
'numeric_bin_1': [0, 0, 1, 1],
'numeric_bin_2': [0, 0, 1, 1]
})
tds = TabularDataset(df, numerical_columns=['numeric_bin_2'],
infer_column_types=True)
assert tds.bin.column_names == ['numeric_bin_1']
assert tds.num.column_names == ['numeric_bin_2']
def test_repr():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], datetime_columns=['D'],
target_column='target')
repr_output = repr(tds)
assert repr_output == ("TabularDataset (4 rows)\n" +
"\tNumerical Columns: ['A']\n" +
"\tBinary Columns: ['B']\n" +
"\tCategorical Columns: ['C']\n" +
"\tDatetime Columns: ['D']\n" +
"\tTarget Column: 'target'")
def test_repr_with_multiple_target_columns():
df = get_test_df()
tds = TabularDataset(df, target_columns=['A', 'B'])
repr_output = repr(tds)
assert repr_output == ("TabularDataset (4 rows)\n" +
"\tTarget Columns: ['A', 'B']")
def test_x():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert repr(tds.x) == repr(df[['A', 'B', 'C']].values)
def test_y():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert repr(tds.y) == repr(df[['target']].values)
def test_x_train():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert repr(tds.x_train) == repr(df[['A', 'B', 'C']].values)
def test_y_train():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert repr(tds.y_train) == repr(df[['target']].values)
def test_x_test():
df = get_test_df()
test_data = df.iloc[-2:]
tds = TabularDataset(df, test_data=test_data, numerical_columns=['A'],
binary_columns=['B'], categorical_columns=['C'],
target_column='target')
assert repr(tds.x_test) == repr(test_data[['A', 'B', 'C']].values)
def test_y_test():
df = get_test_df()
test_data = df.iloc[-2:]
tds = TabularDataset(df, test_data=test_data, numerical_columns=['A'],
binary_columns=['B'], categorical_columns=['C'],
target_column='target')
assert repr(tds.y_test) == repr(test_data[['target']].values)
def test_getting_test_data_raises_exception_without_specified_test_data():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
with pytest.raises(ValueError):
# TODO Assert error message as well
_ = tds.x_test
def test_num_abbreviation():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert isinstance(tds.num, NumericalColumns)
def test_bin_abbreviation():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert isinstance(tds.bin, BinaryColumns)
def test_cat_abbreviation():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
assert isinstance(tds.cat, CategoricalColumns)
def test_train_test_split():
df = get_test_df()
tds = TabularDataset(df, categorical_columns=['A'], target_column='target')
tds.categorical.impute()
tds.categorical.encode(add_unk_category=True)
tds.categorical.one_hot()
x_train, x_test, y_train, y_test = tds.train_test_split(test_size=0.25,
shuffle=False)
assert x_train.shape == (3, 4)
assert x_test.shape == (1, 4)
assert y_train.shape == (3, 1)
assert y_test.shape == (1, 1)
def test_k_fold_cross_validation():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'], binary_columns=['B'],
categorical_columns=['C'], target_column='target')
cv_iterator = tds.split(n_splits=4)
assert isinstance(cv_iterator, Iterator)
cv_list = list(cv_iterator)
assert len(cv_list) == 4
for fold in cv_list:
x_train, x_test, y_train, y_test = fold
assert len(x_train) == 3
assert len(x_test) == 1
assert len(y_train) == 3
assert len(y_test) == 1
if __name__ == '__main__':
unittest.main()
| 29.699552 | 79 | 0.597765 |
acf26f17e88f2f68d26cff236add9d07e570737b | 1,734 | py | Python | test/test_get_transaction_details_by_transaction_id_from_callback_ribsd_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | test/test_get_transaction_details_by_transaction_id_from_callback_ribsd_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | test/test_get_transaction_details_by_transaction_id_from_callback_ribsd_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_transaction_details_by_transaction_idribsd2_script_sig import GetTransactionDetailsByTransactionIDRIBSD2ScriptSig
globals()['GetTransactionDetailsByTransactionIDRIBSD2ScriptSig'] = GetTransactionDetailsByTransactionIDRIBSD2ScriptSig
from cryptoapis.model.get_transaction_details_by_transaction_id_from_callback_ribsd_vin import GetTransactionDetailsByTransactionIDFromCallbackRIBSDVin
class TestGetTransactionDetailsByTransactionIDFromCallbackRIBSDVin(unittest.TestCase):
"""GetTransactionDetailsByTransactionIDFromCallbackRIBSDVin unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetTransactionDetailsByTransactionIDFromCallbackRIBSDVin(self):
"""Test GetTransactionDetailsByTransactionIDFromCallbackRIBSDVin"""
# FIXME: construct object with mandatory attributes with example values
# model = GetTransactionDetailsByTransactionIDFromCallbackRIBSDVin() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 44.461538 | 484 | 0.813149 |
acf26f7cc4cff0bede7b22e94fb867de600bfc59 | 318 | py | Python | generator/components/text/text.py | dbzkunalss/pyDoodle2Web | 0c2a25468ae322b7fd6d37d718bde42e8f0c3fc3 | [
"MIT"
] | 6 | 2020-03-21T16:57:10.000Z | 2020-03-30T09:39:47.000Z | generator/components/text/text.py | dbzkunalss/pyDoodle2Web | 0c2a25468ae322b7fd6d37d718bde42e8f0c3fc3 | [
"MIT"
] | 7 | 2020-03-21T21:02:06.000Z | 2020-04-21T01:28:16.000Z | generator/components/text/text.py | dbzkunalss/pyDoodle2Web | 0c2a25468ae322b7fd6d37d718bde42e8f0c3fc3 | [
"MIT"
] | 2 | 2020-03-23T11:12:45.000Z | 2020-03-24T16:15:29.000Z | from bs4 import BeautifulSoup
import os
class Text:
def __init__(self):
self.name = 'card'
self.isParentLike = False
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'template.html')) as f:
soup = BeautifulSoup(f, 'html.parser')
self.template = soup | 28.909091 | 99 | 0.638365 |
acf26f80db144f0ce790ce09512174015d28f985 | 2,411 | py | Python | custom_components/powercalc/strategy/fixed.py | nepozs/homeassistant-powercalc | 7019414b63f340d04549439d308eda916685ffb4 | [
"MIT"
] | 128 | 2021-03-04T21:54:04.000Z | 2022-03-17T22:53:20.000Z | custom_components/powercalc/strategy/fixed.py | nepozs/homeassistant-powercalc | 7019414b63f340d04549439d308eda916685ffb4 | [
"MIT"
] | 4 | 2021-03-07T21:18:12.000Z | 2021-09-24T13:09:39.000Z | custom_components/powercalc/strategy/fixed.py | nepozs/homeassistant-powercalc | 7019414b63f340d04549439d308eda916685ffb4 | [
"MIT"
] | 15 | 2021-03-05T07:29:31.000Z | 2022-03-31T10:07:06.000Z | from __future__ import annotations
from typing import Optional, Union
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components import climate, vacuum
from homeassistant.core import State
from homeassistant.helpers.template import Template
from custom_components.powercalc.common import SourceEntity
from custom_components.powercalc.const import CONF_POWER, CONF_STATES_POWER
from custom_components.powercalc.errors import StrategyConfigurationError
from custom_components.powercalc.helpers import evaluate_power
from .strategy_interface import PowerCalculationStrategyInterface
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_POWER): vol.Any(vol.Coerce(float), cv.template),
vol.Optional(CONF_STATES_POWER): vol.Schema(
{cv.string: vol.Any(vol.Coerce(float), cv.template)}
),
}
)
STATE_BASED_ENTITY_DOMAINS = [
climate.DOMAIN,
vacuum.DOMAIN,
]
class FixedStrategy(PowerCalculationStrategyInterface):
def __init__(
self,
power: Optional[Union[Template, float]],
per_state_power: Optional[dict[str, float]],
) -> None:
self._power = power
self._per_state_power = per_state_power
async def calculate(self, entity_state: State) -> Optional[float]:
if self._per_state_power is not None:
# Lookup by state
if entity_state.state in self._per_state_power:
return await evaluate_power(
self._per_state_power.get(entity_state.state)
)
else:
# Lookup by state attribute (attribute|value)
for state_key, power in self._per_state_power.items():
if "|" in state_key:
attribute, value = state_key.split("|", 2)
if entity_state.attributes.get(attribute) == value:
return await evaluate_power(power)
return await evaluate_power(self._power)
async def validate_config(self, source_entity: SourceEntity):
"""Validate correct setup of the strategy"""
if (
source_entity.domain in STATE_BASED_ENTITY_DOMAINS
and self._per_state_power is None
):
raise StrategyConfigurationError(
"This entity can only work with 'states_power' not 'power'"
)
| 34.942029 | 75 | 0.670676 |
acf272a6032968c144bcd92dac6b858f6ddb1971 | 1,539 | py | Python | nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py | sebastientourbier/nipype | 99c5904176481520c5bf42a501aae1a12184e672 | [
"Apache-2.0"
] | 2 | 2019-01-25T18:20:51.000Z | 2019-07-30T20:51:51.000Z | nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py | sebastientourbier/nipype | 99c5904176481520c5bf42a501aae1a12184e672 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py | sebastientourbier/nipype | 99c5904176481520c5bf42a501aae1a12184e672 | [
"Apache-2.0"
] | 2 | 2018-01-25T19:48:17.000Z | 2019-01-25T18:20:52.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..preprocess import MNIBiasCorrection
def test_MNIBiasCorrection_inputs():
input_map = dict(args=dict(argstr='%s',
),
distance=dict(argstr='--distance %d',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='--i %s',
mandatory=True,
),
iterations=dict(argstr='--n %d',
),
mask=dict(argstr='--mask %s',
),
no_rescale=dict(argstr='--no-rescale',
),
out_file=dict(argstr='--o %s',
hash_files=False,
keep_extension=True,
name_source=['in_file'],
name_template='%s_output',
),
protocol_iterations=dict(argstr='--proto-iters %d',
),
shrink=dict(argstr='--shrink %d',
),
stop=dict(argstr='--stop %f',
),
subjects_dir=dict(),
terminal_output=dict(nohash=True,
),
transform=dict(argstr='--uchar %s',
),
)
inputs = MNIBiasCorrection.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MNIBiasCorrection_outputs():
output_map = dict(out_file=dict(),
)
outputs = MNIBiasCorrection.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 26.084746 | 67 | 0.630929 |
acf274059275c3f98e94de651e6dfcdabe29711f | 227 | py | Python | utils/__init__.py | snuffle-PX/2048-api | a43c74c3cfcf47e3f79ab631705b46ddbe3add1e | [
"Apache-2.0"
] | null | null | null | utils/__init__.py | snuffle-PX/2048-api | a43c74c3cfcf47e3f79ab631705b46ddbe3add1e | [
"Apache-2.0"
] | null | null | null | utils/__init__.py | snuffle-PX/2048-api | a43c74c3cfcf47e3f79ab631705b46ddbe3add1e | [
"Apache-2.0"
] | null | null | null | from .move import try_to_move
from .rot_invariance import get_train_data, get_train_data_12
from .onehot import conv_to_onehot, conv_log_to_onehot, flatten_onehot, conv_to_onehot_12
from .memory import ReplayMemory, Transition
| 45.4 | 89 | 0.867841 |
acf27438245ac82d5145891978637a604c0b517d | 281 | py | Python | alembic/versions/4097cd576be_mandate_candidate_pa.py | wenbs/mptracker | e011ab11954bbf785ae11fea7ed977440df2284a | [
"MIT"
] | 4 | 2015-01-20T15:03:15.000Z | 2017-03-15T09:56:07.000Z | alembic/versions/4097cd576be_mandate_candidate_pa.py | wenbs/mptracker | e011ab11954bbf785ae11fea7ed977440df2284a | [
"MIT"
] | 3 | 2021-03-31T18:53:12.000Z | 2022-03-21T22:16:35.000Z | alembic/versions/4097cd576be_mandate_candidate_pa.py | wenbs/mptracker | e011ab11954bbf785ae11fea7ed977440df2284a | [
"MIT"
] | 6 | 2015-12-13T08:56:49.000Z | 2021-08-07T20:36:29.000Z | revision = '4097cd576be'
down_revision = '22cfd89dfd7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('mandate',
sa.Column('candidate_party', sa.Text(), nullable=True))
def downgrade():
op.drop_column('mandate', 'candidate_party')
| 18.733333 | 63 | 0.708185 |
acf274517c4ecde00c60750e63c1602c99597c0d | 5,334 | py | Python | ferdinand_gui.py | scmanjarrez/Seguidores-de-Rozemyne-Telegram-Bot | 67ca4a0f69c57f005633f3f346d1320230599aac | [
"MIT"
] | null | null | null | ferdinand_gui.py | scmanjarrez/Seguidores-de-Rozemyne-Telegram-Bot | 67ca4a0f69c57f005633f3f346d1320230599aac | [
"MIT"
] | null | null | null | ferdinand_gui.py | scmanjarrez/Seguidores-de-Rozemyne-Telegram-Bot | 67ca4a0f69c57f005633f3f346d1320230599aac | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
# Copyright (c) 2021-2022 scmanjarrez. All rights reserved.
# This work is licensed under the terms of the MIT license.
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
import database as db
import utils as ut
def button(buttons):
return [InlineKeyboardButton(bt[0], callback_data=bt[1]) for bt in buttons]
def button_url(buttons):
return [InlineKeyboardButton(bt[0], bt[1]) for bt in buttons]
def button_redirect(bot, command):
return InlineKeyboardMarkup(
[button_url([("➡ Pulsa para usar herramienta inhibidora de sonido ⬅",
ut.deeplink(bot, command))])])
def menu(update, context):
uid = ut.uid(update)
if not db.cached(uid):
ut.not_started(update)
else:
main_menu(update)
def main_menu(update):
kb = [button([("🏛 Biblioteca 🏛", 'library_menu')]),
button([("📚 Anuario 📚", 'yearbook_menu')]),
button([("🙏 Altares a los Dioses 🙏", 'shrines_menu')]),
button([("📆 Libros Semanales 📆", 'weekly_menu')]),
button([("🕊 Ordonnanz 🕊", 'notifications_menu')])]
resp = ut.send
if update.callback_query is not None:
resp = ut.edit
resp(update, "Templo", reply_markup=InlineKeyboardMarkup(kb))
def library_menu(update):
kb = [button([("« Volver al Templo", 'main_menu')])]
parts = db.total_parts()
for idx, (part, title) in enumerate(parts):
kb.insert(idx, button([(f"Parte {part}: {title}", f'part_{part}')]))
ut.edit(update, "Biblioteca", InlineKeyboardMarkup(kb))
def part_menu(update, part):
kb = [button([("« Volver a la Biblioteca", 'library_menu'),
("« Volver al Templo", 'main_menu')])]
volumes = db.total_volumes(part)
for idx, (volume,) in enumerate(volumes):
kb.insert(idx,
button([(f"Volúmen {volume}", f'volume_{part}_{volume}')]))
ut.edit(update, f"Parte {part}: {db.name_part(part)}",
InlineKeyboardMarkup(kb))
def volume_menu(update, part, volume):
kb = [button([(f"« Volver a la Parte {part}", f'part_{part}'),
("« Volver al Templo", 'main_menu')])]
chapters = db.chapters(part, volume)
for idx, (ch_title, ch_url) in enumerate(chapters):
kb.insert(idx, button_url([(f"{ch_title}", ch_url)]))
ut.edit(update, f"Parte {part}: {db.name_part(part)}, volúmen {volume}",
InlineKeyboardMarkup(kb))
def yearbook_menu(update):
kb = [button([("« Volver al Templo", 'main_menu')])]
parts = db.total_pdfs()
for idx, (part, title) in enumerate(parts):
kb.insert(idx, button([(f"Parte {part}: {title}", f'ybook_{part}')]))
ut.edit(update, "Anuario", InlineKeyboardMarkup(kb))
def ybook_menu(update, part):
kb = [button([("« Volver al Anuario", 'yearbook_menu'),
("« Volver al Templo", 'main_menu')])]
volumes = db.total_pdf_volumes(part)
for idx, (volume,) in enumerate(volumes):
kb.insert(idx,
button_url(
[(f"Volúmen {volume}", db.pdf_url(part, volume))]))
ut.edit(update, f"Parte {part}: {db.name_part(part)}",
InlineKeyboardMarkup(kb))
def shrines_menu(update):
kb = [button_url([("👥 Seguidores de Rozemyne 👥",
ut.config('group'))]),
button_url([("👥 Salón de Eruditos (Spoilers) 👥",
ut.config('spoilers'))]),
button_url([("📢 Biblioteca de Mestionora 📢",
ut.config('channel'))]),
button_url([("🎧 Los Gutenbergs de Rozemyne (Youtube) 🎧",
ut.config('youtube'))]),
button_url([("🗣 Fans de Ascendance of a Bookworm (Discord) 🗣",
ut.config('discord'))]),
button_url([("👥 Honzuki no Gekokujou (Facebook) 👥",
ut.config('facebook'))]),
button([("« Volver al Templo", 'main_menu')])]
ut.edit(update, "Altares de los Dioses", InlineKeyboardMarkup(kb))
def weekly_menu(update):
kb = [button([("« Volver al Templo", 'main_menu')])]
chapters = db.mestionora_chapters()
for idx, ch_title in enumerate(chapters):
if not ch_title.startswith('+'):
kb.insert(idx,
button_url([(f"{ch_title}",
ut.config('channel'))]))
else:
kb.insert(idx,
button([(f"📖 {ch_title[1:]} 📖", 'nop')]))
ut.edit(update, "Libros Semanales", InlineKeyboardMarkup(kb))
def notifications_menu(update, context):
uid = ut.uid(update)
kb = [button([("« Volver al Templo", 'main_menu')])]
tit = "Ordonnanz"
if not ut.is_group(uid) or (ut.is_group(uid) and
ut.is_admin(update, context,
callback=True)):
notification_icon = '🔔' if db.notifications(uid) == 1 else '🔕'
kb.insert(0,
button([(f"Recibir Ordonnanz: {notification_icon}",
'notification_toggle')]))
else:
tit = "Sólo disponible para la facción del Aub."
ut.edit(update, tit, InlineKeyboardMarkup(kb))
def notification_toggle(update, context):
uid = ut.uid(update)
db.toggle_notifications(uid)
notifications_menu(update, context)
| 36.534247 | 79 | 0.582115 |
acf2748da57a03fe414cac55897a727c8665a786 | 4,382 | py | Python | ui/page_elements/page_controller/PageControllerUI.py | ArcherLuo233/election-s-prediction | 9da72cb855f6d61f9cdec6e15f7ca832629ba51a | [
"MIT"
] | null | null | null | ui/page_elements/page_controller/PageControllerUI.py | ArcherLuo233/election-s-prediction | 9da72cb855f6d61f9cdec6e15f7ca832629ba51a | [
"MIT"
] | 1 | 2022-01-26T01:23:26.000Z | 2022-01-26T01:23:34.000Z | ui/page_elements/page_controller/PageControllerUI.py | ArcherLuo233/election-s-prediction | 9da72cb855f6d61f9cdec6e15f7ca832629ba51a | [
"MIT"
] | 1 | 2021-11-08T10:58:23.000Z | 2021-11-08T10:58:23.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'page_controller.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(769, 42)
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(12)
Form.setFont(font)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.button_prev = QtWidgets.QPushButton(Form)
self.button_prev.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_prev.setObjectName("button_prev")
self.horizontalLayout.addWidget(self.button_prev)
self.button_left = QtWidgets.QPushButton(Form)
self.button_left.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_left.setCheckable(True)
self.button_left.setChecked(True)
self.button_left.setObjectName("button_left")
self.buttonGroup = QtWidgets.QButtonGroup(Form)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.button_left)
self.horizontalLayout.addWidget(self.button_left)
self.label_leftdot = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_leftdot.setFont(font)
self.label_leftdot.setObjectName("label_leftdot")
self.horizontalLayout.addWidget(self.label_leftdot)
self.layout_middle = QtWidgets.QHBoxLayout()
self.layout_middle.setObjectName("layout_middle")
self.horizontalLayout.addLayout(self.layout_middle)
self.label_rightdot = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_rightdot.setFont(font)
self.label_rightdot.setObjectName("label_rightdot")
self.horizontalLayout.addWidget(self.label_rightdot)
self.button_right = QtWidgets.QPushButton(Form)
self.button_right.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_right.setCheckable(True)
self.button_right.setObjectName("button_right")
self.buttonGroup.addButton(self.button_right)
self.horizontalLayout.addWidget(self.button_right)
self.button_next = QtWidgets.QPushButton(Form)
self.button_next.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_next.setObjectName("button_next")
self.horizontalLayout.addWidget(self.button_next)
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.spinBox = QtWidgets.QSpinBox(Form)
self.spinBox.setObjectName("spinBox")
self.horizontalLayout.addWidget(self.spinBox)
self.button_goto = QtWidgets.QPushButton(Form)
self.button_goto.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_goto.setObjectName("button_goto")
self.horizontalLayout.addWidget(self.button_goto)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "共 %d 页"))
self.button_prev.setText(_translate("Form", "<"))
self.button_left.setText(_translate("Form", "1"))
self.label_leftdot.setText(_translate("Form", "..."))
self.label_rightdot.setText(_translate("Form", "..."))
self.button_right.setText(_translate("Form", "%d"))
self.button_next.setText(_translate("Form", ">"))
self.label_2.setText(_translate("Form", "跳转到:"))
self.button_goto.setText(_translate("Form", "跳转"))
| 45.645833 | 114 | 0.698083 |
acf276a23245bf6144b996ecb66310dde1d665cf | 6,664 | py | Python | src/ircthread.py | ETJwallet/ETJwallet | 4ce17630ab5f82f73bab99a7375e9c49c99e1100 | [
"MIT"
] | null | null | null | src/ircthread.py | ETJwallet/ETJwallet | 4ce17630ab5f82f73bab99a7375e9c49c99e1100 | [
"MIT"
] | null | null | null | src/ircthread.py | ETJwallet/ETJwallet | 4ce17630ab5f82f73bab99a7375e9c49c99e1100 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'E_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#verocoin")
def on_join(self, connection, event):
m = re.match("(E_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(E_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(E_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("E_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
| 37.22905 | 147 | 0.62425 |
acf276ce79313003f06bd7169d2fdedc9ca2c89f | 633 | py | Python | permissions_qa_scripts/originals/UPLOADS/tools/bin/rst2pseudoxml.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | 9 | 2016-08-23T15:59:12.000Z | 2021-07-16T00:54:54.000Z | permissions_qa_scripts/originals/UPLOADS/tools/bin/rst2pseudoxml.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | 12 | 2016-11-18T18:56:42.000Z | 2021-03-11T20:25:14.000Z | permissions_qa_scripts/originals/UPLOADS/tools/bin/rst2pseudoxml.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | 14 | 2016-02-17T04:24:07.000Z | 2020-02-28T21:36:19.000Z | #!/Users/aditi/pyencoded-tools/tools/bin/python3.5
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| 26.375 | 73 | 0.742496 |
acf277dd2c32ef3d7ffd4a85df9ec05b171bf8ee | 22,504 | py | Python | ci/build.py | Tugraph/CD-SGD | 86b0be6dba8bd5b2d74e8e11ab882326f0ecd654 | [
"Apache-2.0"
] | 33 | 2017-05-31T15:14:08.000Z | 2020-12-23T08:52:34.000Z | ci/build.py | Tugraph/CD-SGD | 86b0be6dba8bd5b2d74e8e11ab882326f0ecd654 | [
"Apache-2.0"
] | 5 | 2018-01-16T04:36:34.000Z | 2021-01-05T06:46:37.000Z | ci/build.py | Tugraph/CD-SGD | 86b0be6dba8bd5b2d74e8e11ab882326f0ecd654 | [
"Apache-2.0"
] | 13 | 2017-11-09T15:31:02.000Z | 2020-04-28T07:09:21.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Multi arch dockerized build tool.
"""
__author__ = 'Marco de Abreu, Kellen Sunderland, Anton Chernov, Pedro Larroy'
__version__ = '0.3'
import argparse
import glob
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from itertools import chain
from subprocess import check_call, check_output
from typing import *
from util import *
import docker
import docker.models
import docker.errors
import signal
import atexit
import pprint
class Cleanup:
"""A class to cleanup containers"""
def __init__(self):
self.containers = set()
self.docker_stop_timeout = 3
def add_container(self, container: docker.models.containers.Container):
assert isinstance(container, docker.models.containers.Container)
self.containers.add(container)
def remove_container(self, container: docker.models.containers.Container):
assert isinstance(container, docker.models.containers.Container)
self.containers.remove(container)
def _cleanup_containers(self):
if self.containers:
logging.warning("Cleaning up containers")
else:
return
# noinspection PyBroadException
try:
stop_timeout = int(os.environ.get("DOCKER_STOP_TIMEOUT", self.docker_stop_timeout))
except Exception:
stop_timeout = 3
for container in self.containers:
try:
container.stop(timeout=stop_timeout)
logging.info("☠: stopped container %s", trim_container_id(container.id))
container.remove()
logging.info("🚽: removed container %s", trim_container_id(container.id))
except Exception as e:
logging.exception(e)
self.containers.clear()
logging.info("Cleaning up containers finished.")
def __call__(self):
"""Perform cleanup"""
self._cleanup_containers()
def get_dockerfiles_path():
return "docker"
def get_platforms(path: str = get_dockerfiles_path()) -> List[str]:
"""Get a list of architectures given our dockerfiles"""
dockerfiles = glob.glob(os.path.join(path, "Dockerfile.build.*"))
dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles))
files = list(map(lambda x: re.sub(r"Dockerfile.build.(.*)", r"\1", x), dockerfiles))
platforms = list(map(lambda x: os.path.split(x)[1], sorted(files)))
return platforms
def get_docker_tag(platform: str, registry: str) -> str:
""":return: docker tag to be used for the container"""
if not registry:
registry = "mxnet_local"
return "{0}/build.{1}".format(registry, platform)
def get_dockerfile(platform: str, path=get_dockerfiles_path()) -> str:
return os.path.join(path, "Dockerfile.build.{0}".format(platform))
def get_docker_binary(use_nvidia_docker: bool) -> str:
return "nvidia-docker" if use_nvidia_docker else "docker"
def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int, no_cache: bool) -> str:
"""
Build a container for the given platform
:param platform: Platform
:param docker_binary: docker binary to use (docker/nvidia-docker)
:param registry: Dockerhub registry name
:param num_retries: Number of retries to build the docker image
:param no_cache: pass no-cache to docker to rebuild the images
:return: Id of the top level image
"""
tag = get_docker_tag(platform=platform, registry=registry)
logging.info("Building docker container tagged '%s' with %s", tag, docker_binary)
#
# We add a user with the same group as the executing non-root user so files created in the
# container match permissions of the local user. Same for the group.
#
# These variables are used in the docker files to create user and group with these ids.
# see: docker/install/ubuntu_adduser.sh
#
# cache-from is needed so we use the cached images tagged from the remote via
# docker pull see: docker_cache.load_docker_cache
#
# This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002
# So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this
# script.
#
# This doesn't work with multi head docker files.
#
cmd = [docker_binary, "build",
"-f", get_dockerfile(platform),
"--build-arg", "USER_ID={}".format(os.getuid()),
"--build-arg", "GROUP_ID={}".format(os.getgid())]
if no_cache:
cmd.append("--no-cache")
elif registry:
cmd.extend(["--cache-from", tag])
cmd.extend(["-t", tag, get_dockerfiles_path()])
@retry(subprocess.CalledProcessError, tries=num_retries)
def run_cmd():
logging.info("Running command: '%s'", ' '.join(cmd))
check_call(cmd)
run_cmd()
# Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the
# check_call would have failed
image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag)
if not image_id:
raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag))
return image_id
def _get_local_image_id(docker_binary, docker_tag):
"""
Get the image id of the local docker layer with the passed tag
:param docker_tag: docker tag
:return: Image id as string or None if tag does not exist
"""
cmd = [docker_binary, "images", "-q", docker_tag]
image_id_b = check_output(cmd)
image_id = image_id_b.decode('utf-8').strip()
if not image_id:
raise RuntimeError('Unable to find docker image id matching with tag {}'.format(docker_tag))
return image_id
def buildir() -> str:
return os.path.join(get_mxnet_root(), "build")
def default_ccache_dir() -> str:
""":return: ccache directory for the current platform"""
# Share ccache across containers
if 'CCACHE_DIR' in os.environ:
ccache_dir = os.path.realpath(os.environ['CCACHE_DIR'])
try:
os.makedirs(ccache_dir, exist_ok=True)
return ccache_dir
except PermissionError:
logging.info('Unable to make dirs at %s, falling back to local temp dir', ccache_dir)
# In osx tmpdir is not mountable by default
import platform
if platform.system() == 'Darwin':
ccache_dir = "/tmp/_mxnet_ccache"
os.makedirs(ccache_dir, exist_ok=True)
return ccache_dir
return os.path.join(tempfile.gettempdir(), "ci_ccache")
def trim_container_id(cid):
""":return: trimmed container id"""
return cid[:12]
def container_run(platform: str,
nvidia_runtime: bool,
docker_registry: str,
shared_memory_size: str,
local_ccache_dir: str,
command: List[str],
cleanup: Cleanup,
dry_run: bool = False) -> int:
"""Run command in a container"""
container_wait_s = 600
#
# Environment setup
#
environment = {
'CCACHE_MAXSIZE': '500G',
'CCACHE_TEMPDIR': '/tmp/ccache', # temp dir should be local and not shared
'CCACHE_DIR': '/work/ccache', # this path is inside the container as /work/ccache is
# mounted
'CCACHE_LOGFILE': '/tmp/ccache.log', # a container-scoped log, useful for ccache
# verification.
}
# These variables are passed to the container to the process tree killer can find runaway
# process inside the container
# https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller
# https://github.com/jenkinsci/jenkins/blob/578d6bacb33a5e99f149de504c80275796f0b231/core/src/main/java/hudson/model/Run.java#L2393
#
jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG']
environment.update({k: os.environ[k] for k in jenkins_env_vars if k in os.environ})
environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if k in os.environ})
tag = get_docker_tag(platform=platform, registry=docker_registry)
mx_root = get_mxnet_root()
local_build_folder = buildir()
# We need to create it first, otherwise it will be created by the docker daemon with root only permissions
os.makedirs(local_build_folder, exist_ok=True)
os.makedirs(local_ccache_dir, exist_ok=True)
logging.info("Using ccache directory: %s", local_ccache_dir)
docker_client = docker.from_env()
# Equivalent command
docker_cmd_list = [
get_docker_binary(nvidia_runtime),
'run',
"--cap-add",
"SYS_PTRACE", # Required by ASAN
'--rm',
'--shm-size={}'.format(shared_memory_size),
# mount mxnet root
'-v', "{}:/work/mxnet".format(mx_root),
# mount mxnet/build for storing build
'-v', "{}:/work/build".format(local_build_folder),
'-v', "{}:/work/ccache".format(local_ccache_dir),
'-u', '{}:{}'.format(os.getuid(), os.getgid()),
'-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']),
# temp dir should be local and not shared
'-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']),
# this path is inside the container as /work/ccache is mounted
'-e', "CCACHE_DIR={}".format(environment['CCACHE_DIR']),
# a container-scoped log, useful for ccache verification.
'-e', "CCACHE_LOGFILE={}".format(environment['CCACHE_LOGFILE']),
'-ti',
tag]
docker_cmd_list.extend(command)
docker_cmd = ' \\\n\t'.join(docker_cmd_list)
logging.info("Running %s in container %s", command, tag)
logging.info("Executing the equivalent of:\n%s\n", docker_cmd)
# return code of the command inside docker
ret = 0
if not dry_run:
#############################
#
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
# noinspection PyShadowingNames
runtime = None
if nvidia_runtime:
# noinspection PyShadowingNames
# runc is default (docker info | grep -i runtime)
runtime = 'nvidia'
container = docker_client.containers.run(
tag,
runtime=runtime,
detach=True,
command=command,
shm_size=shared_memory_size,
user='{}:{}'.format(os.getuid(), os.getgid()),
cap_add='SYS_PTRACE',
volumes={
mx_root:
{'bind': '/work/mxnet', 'mode': 'rw'},
local_build_folder:
{'bind': '/work/build', 'mode': 'rw'},
local_ccache_dir:
{'bind': '/work/ccache', 'mode': 'rw'},
},
environment=environment)
try:
logging.info("Started container: %s", trim_container_id(container.id))
# Race condition:
# If the previous call is interrupted then it's possible that the container is not cleaned up
# We avoid by masking the signals temporarily
cleanup.add_container(container)
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
#
#############################
stream = container.logs(stream=True, stdout=True, stderr=True)
sys.stdout.flush()
for chunk in stream:
sys.stdout.buffer.write(chunk)
sys.stdout.buffer.flush()
sys.stdout.flush()
stream.close()
try:
logging.info("Waiting for status of container %s for %d s.",
trim_container_id(container.id),
container_wait_s)
wait_result = container.wait(timeout=container_wait_s)
logging.info("Container exit status: %s", wait_result)
ret = wait_result.get('StatusCode', 200)
if ret != 0:
logging.error("Container exited with an error 😞")
else:
logging.info("Container exited with success 👍")
except Exception as e:
logging.exception(e)
ret = 150
# Stop
try:
logging.info("Stopping container: %s", trim_container_id(container.id))
container.stop()
except Exception as e:
logging.exception(e)
ret = 151
# Remove
try:
logging.info("Removing container: %s", trim_container_id(container.id))
container.remove()
except Exception as e:
logging.exception(e)
ret = 152
cleanup.remove_container(container)
containers = docker_client.containers.list()
if containers:
logging.info("Other running containers: %s", [trim_container_id(x.id) for x in containers])
except docker.errors.NotFound as e:
logging.info("Container was stopped before cleanup started: %s", e)
return ret
def list_platforms() -> str:
return "\nSupported platforms:\n{}".format('\n'.join(get_platforms()))
def load_docker_cache(tag, docker_registry) -> None:
"""Imports tagged container from the given docker registry"""
if docker_registry:
# noinspection PyBroadException
try:
import docker_cache
logging.info('Docker cache download is enabled from registry %s', docker_registry)
docker_cache.load_docker_cache(registry=docker_registry, docker_tag=tag)
except Exception:
logging.exception('Unable to retrieve Docker cache. Continue without...')
else:
logging.info('Distributed docker cache disabled')
def log_environment():
instance_id = ec2_instance_id_hostname()
if instance_id:
logging.info("EC2 Instance id: %s", instance_id)
pp = pprint.PrettyPrinter(indent=4)
logging.debug("Build environment: %s", pp.pformat(dict(os.environ)))
def script_name() -> str:
""":returns: script name with leading paths removed"""
return os.path.split(sys.argv[0])[1]
def config_logging():
import time
logging.getLogger().setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(format='{}: %(asctime)sZ %(levelname)s %(message)s'.format(script_name()))
logging.Formatter.converter = time.gmtime
def main() -> int:
config_logging()
logging.info("MXNet container based build tool.")
log_environment()
chdir_to_script_directory()
parser = argparse.ArgumentParser(description="""Utility for building and testing MXNet on docker
containers""", epilog="")
parser.add_argument("-p", "--platform",
help="platform",
type=str)
parser.add_argument("-b", "--build-only",
help="Only build the container, don't build the project",
action='store_true')
parser.add_argument("-a", "--all",
help="build for all platforms",
action='store_true')
parser.add_argument("-n", "--nvidiadocker",
help="Use nvidia docker",
action='store_true')
parser.add_argument("--shm-size",
help="Size of the shared memory /dev/shm allocated in the container (e.g '1g')",
default='500m',
dest="shared_memory_size")
parser.add_argument("-l", "--list",
help="List platforms",
action='store_true')
parser.add_argument("--print-docker-run",
help="print docker run command for manual inspection",
action='store_true')
parser.add_argument("-d", "--docker-registry",
help="Dockerhub registry name to retrieve cache from.",
default='mxnetci',
type=str)
parser.add_argument("-r", "--docker-build-retries",
help="Number of times to retry building the docker image. Default is 1",
default=1,
type=int)
parser.add_argument("--no-cache", action="store_true",
help="passes --no-cache to docker build")
parser.add_argument("command",
help="command to run in the container",
nargs='*', action='append', type=str)
parser.add_argument("--ccache-dir",
default=default_ccache_dir(),
help="ccache directory",
type=str)
args = parser.parse_args()
command = list(chain(*args.command))
docker_binary = get_docker_binary(args.nvidiadocker)
# Cleanup on signals and exit
cleanup = Cleanup()
def signal_handler(signum, _):
signal.pthread_sigmask(signal.SIG_BLOCK, {signum})
logging.warning("Signal %d received, cleaning up...", signum)
cleanup()
logging.warning("done. Exiting with error.")
sys.exit(1)
atexit.register(cleanup)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
if args.list:
print(list_platforms())
elif args.platform:
platform = args.platform
tag = get_docker_tag(platform=platform, registry=args.docker_registry)
if args.docker_registry:
load_docker_cache(tag=tag, docker_registry=args.docker_registry)
build_docker(platform=platform, docker_binary=docker_binary, registry=args.docker_registry,
num_retries=args.docker_build_retries, no_cache=args.no_cache)
if args.build_only:
logging.warning("Container was just built. Exiting due to build-only.")
return 0
# noinspection PyUnusedLocal
ret = 0
if command:
ret = container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
local_ccache_dir=args.ccache_dir, cleanup=cleanup)
elif args.print_docker_run:
command = []
ret = container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
local_ccache_dir=args.ccache_dir, dry_run=True, cleanup=cleanup)
else:
# With no commands, execute a build function for the target platform
command = ["/work/mxnet/ci/docker/runtime_functions.sh", "build_{}".format(platform)]
logging.info("No command specified, trying default build: %s", ' '.join(command))
ret = container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
local_ccache_dir=args.ccache_dir, cleanup=cleanup)
if ret != 0:
logging.critical("Execution of %s failed with status: %d", command, ret)
return ret
elif args.all:
platforms = get_platforms()
logging.info("Building for all architectures: %s", platforms)
logging.info("Artifacts will be produced in the build/ directory.")
for platform in platforms:
tag = get_docker_tag(platform=platform, registry=args.docker_registry)
load_docker_cache(tag=tag, docker_registry=args.docker_registry)
build_docker(platform, docker_binary=docker_binary, registry=args.docker_registry,
num_retries=args.docker_build_retries, no_cache=args.no_cache)
if args.build_only:
continue
shutil.rmtree(buildir(), ignore_errors=True)
build_platform = "build_{}".format(platform)
plat_buildir = os.path.abspath(os.path.join(get_mxnet_root(), '..',
"mxnet_{}".format(build_platform)))
if os.path.exists(plat_buildir):
logging.warning("%s already exists, skipping", plat_buildir)
continue
command = ["/work/mxnet/ci/docker/runtime_functions.sh", build_platform]
container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
local_ccache_dir=args.ccache_dir, cleanup=cleanup)
shutil.move(buildir(), plat_buildir)
logging.info("Built files left in: %s", plat_buildir)
else:
parser.print_help()
list_platforms()
print("""
Examples:
./build.py -p armv7
Will build a docker container with cross compilation tools and build MXNet for armv7 by
running: ci/docker/runtime_functions.sh build_armv7 inside the container.
./build.py -p armv7 ls
Will execute the given command inside the armv7 container
./build.py -p armv7 --print-docker-run
Will print a docker run command to get inside the container in a shell
./build.py -a
Builds for all platforms and leaves artifacts in build_<platform>
""")
return 0
if __name__ == '__main__':
sys.exit(main())
| 39.342657 | 135 | 0.625578 |
acf27926ff8a9d3d18818f104b66313e230ad45f | 5,670 | py | Python | src/graphbin/__init__.py | Vini2/GraphBin_0.1 | b52a3fadc8999ab93ef340d1ef8b0b9ee8478469 | [
"BSD-3-Clause"
] | null | null | null | src/graphbin/__init__.py | Vini2/GraphBin_0.1 | b52a3fadc8999ab93ef340d1ef8b0b9ee8478469 | [
"BSD-3-Clause"
] | null | null | null | src/graphbin/__init__.py | Vini2/GraphBin_0.1 | b52a3fadc8999ab93ef340d1ef8b0b9ee8478469 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""graphbin: Refined binning of metagenomic contigs using assembly graphs."""
import os
import sys
from graphbin.utils import (
graphbin_Canu,
graphbin_Flye,
graphbin_MEGAHIT,
graphbin_Miniasm,
graphbin_Options,
graphbin_SGA,
graphbin_SPAdes,
)
__author__ = "Vijini Mallawaarachchi"
__copyright__ = "Copyright 2019-2022, GraphBin Project"
__credits__ = ["Vijini Mallawaarachchi", "Anuradha Wickramarachchi", "Yu Lin"]
__license__ = "BSD-3"
__version__ = "1.6.0"
__maintainer__ = "Vijini Mallawaarachchi"
__email__ = "vijini.mallawaarachchi@anu.edu.au"
__status__ = "Production"
def run(args):
RUNNER = {
"canu": graphbin_Canu.run,
"flye": graphbin_Flye.run,
"megahit": graphbin_MEGAHIT.run,
"miniasm": graphbin_Miniasm.run,
"sga": graphbin_SGA.run,
"spades": graphbin_SPAdes.run,
}
RUNNER[args.assembler](args)
def main():
parser = graphbin_Options.PARSER
parser.add_argument(
"--assembler",
type=str,
help="name of the assembler used (SPAdes, SGA or MEGAHIT). GraphBin supports Flye, Canu and Miniasm long-read assemblies as well.",
default="",
)
parser.add_argument(
"--paths",
default=None,
required=False,
help="path to the contigs.paths file, only needed for SPAdes",
)
parser.add_argument(
"--contigs", default=None, help="path to the contigs.fa file.",
)
parser.add_argument(
"--delimiter",
required=False,
type=str,
default=",",
help="delimiter for input/output results. Supports a comma (,), a semicolon (;), a tab ($'\\t'), a space (\" \") and a pipe (|) [default: , (comma)]",
)
args = parser.parse_args()
if args.version:
print("GraphBin version %s" % __version__)
sys.exit(0)
# Validation of inputs
# ---------------------------------------------------
# Check assembler type
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args.assembler = args.assembler.lower()
if not (
args.assembler.lower() == "spades"
or args.assembler.lower() == "sga"
or args.assembler.lower() == "megahit"
or args.assembler.lower() == "flye"
or args.assembler.lower() == "canu"
or args.assembler.lower() == "miniasm"
):
print(
"\nPlease make sure to provide the correct assembler type (SPAdes, SGA or MEGAHIT). GraphBin supports Flye, Canu and Miniasm long-read assemblies as well."
)
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Check assembly graph file
if not os.path.exists(args.graph):
print("\nFailed to open the assembly graph file.")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Check if paths files is provided when the assembler type is SPAdes
if args.assembler.lower() == "spades" and args.paths is None:
print("\nPlease make sure to provide the path to the contigs.paths file.")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Check contigs.paths file for SPAdes
if args.assembler.lower() == "spades" and not os.path.exists(args.paths):
print("\nFailed to open the contigs.paths file.")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Check if contigs.fa files is provided
if args.contigs is None:
print("\nPlease make sure to provide the path to the contigs file.")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Check contigs file
if not os.path.exists(args.contigs):
print("\nFailed to open the contigs file.")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Check the file with the initial binning output
if not os.path.exists(args.binned):
print("\nFailed to open the file with the initial binning output.")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Handle for missing trailing forwardslash in output folder path
if args.output[-1:] != "/":
args.output = args.output + "/"
# Create output folder if it does not exist
os.makedirs(args.output, exist_ok=True)
# Validate prefix
if args.prefix != "":
if not args.prefix.endswith("_"):
args.prefix = args.prefix + "_"
# Validate delimiter
delimiters = [",", ";", " ", "\t", "|"]
if args.delimiter not in delimiters:
print("\nPlease enter a valid delimiter")
print("Exiting GraphBin...\nBye...!\n")
sys.exit(1)
# Validate max_iteration
if args.max_iteration <= 0:
print("\nPlease enter a valid number for max_iteration")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Validate diff_threshold
if args.diff_threshold < 0:
print("\nPlease enter a valid number for diff_threshold")
print("\nExiting GraphBin...\nBye...!\n")
sys.exit(1)
# Remove previous files if they exist
if os.path.exists(args.output + args.prefix + "graphbin.log"):
os.remove(args.output + args.prefix + "graphbin.log")
if os.path.exists(args.output + args.prefix + "graphbin_output.csv"):
os.remove(args.output + args.prefix + "graphbin_output.csv")
if os.path.exists(args.output + args.prefix + "graphbin_unbinned.csv"):
os.remove(args.output + args.prefix + "graphbin_unbinned.csv")
# Run GraphBin
# ---------------------------------------------------
run(args)
if __name__ == "__main__":
main()
| 30.320856 | 167 | 0.60582 |
acf2792f2acc7429e5ee3ca0fa6d08ac805bd0f8 | 14,825 | py | Python | test/test_modelzoo.py | pluradj/onnx-tensorflow | 9a5801b68ea9dd4d92dcedce1643c0fdaad7f33a | [
"Apache-2.0"
] | null | null | null | test/test_modelzoo.py | pluradj/onnx-tensorflow | 9a5801b68ea9dd4d92dcedce1643c0fdaad7f33a | [
"Apache-2.0"
] | null | null | null | test/test_modelzoo.py | pluradj/onnx-tensorflow | 9a5801b68ea9dd4d92dcedce1643c0fdaad7f33a | [
"Apache-2.0"
] | null | null | null | """Generates a testing report for ONNX-TF with the ONNX ModelZoo models.
ONNX models found in the ModelZoo directory will be pulled down from
GitHub via `git lfs` (if necessary). The ONNX model will be validated
and converted to a TensorFlow model using ONNX-TensorFlow. A summary
of the conversion will be concatenated into a Markdown-formatted report.
Functions
---------
modelzoo_report(models_dir='models', output_dir=tempfile.gettempdir(),
include=None, verbose=False, dry_run=False)
"""
import argparse
import datetime
import math
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import onnx
import tensorflow as tf
import onnx_tf
# Reference matrix on ONNX version, File format version, Opset versions
# https://github.com/onnx/onnx/blob/master/docs/Versioning.md#released-versions
_CFG = {}
class Results:
"""Tracks the detailed status and counts for the report."""
def __init__(self):
self.details = []
self.model_count = 0
self.total_count = 0
self.pass_count = 0
self.warn_count = 0
self.fail_count = 0
self.skip_count = 0
def append_detail(self, line):
"""Append a line of detailed status."""
self.details.append(line)
@classmethod
def _report(cls, line):
if _CFG['verbose']:
print(line)
if not _CFG['dry_run']:
with open(_CFG['report_filename'], 'a') as file:
file.write(line + '\n')
def generate_report(self):
"""Generate the report file."""
if _CFG['verbose']:
print('Writing {}{}\n'.format(_CFG['report_filename'],
' (dry_run)' if _CFG['dry_run'] else ''))
self._report('*Report generated at {}{}.*'.format(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
_CFG['github_actions_md']))
self._report('\n## Environment')
self._report('Package | Version')
self._report('---- | -----')
self._report('Platform | {}'.format(platform.platform()))
self._report('Python | {}'.format(sys.version.replace('\n', ' ')))
self._report('onnx | {}'.format(onnx.__version__))
self._report('onnx-tf | {}'.format(_CFG['onnx_tf_version_md']))
self._report('tensorflow | {}'.format(tf.__version__))
self._report('\n## Summary')
self._report('Value | Count')
self._report('---- | -----')
self._report('Models | {}'.format(self.model_count))
self._report('Total | {}'.format(self.total_count))
self._report(':heavy_check_mark: Passed | {}'.format(self.pass_count))
self._report(':warning: Limitation | {}'.format(self.warn_count))
self._report(':x: Failed | {}'.format(self.fail_count))
self._report(':heavy_minus_sign: Skipped | {}'.format(self.skip_count))
self._report('\n## Details')
self._report('\n'.join(self.details))
self._report('')
def summary(self):
"""Return the report summary (counts, report location) as a string."""
return ('Total: {}, Passed: {}, Limitation: {}, Failed: {}, '
'Skipped: {}\nReport: {}{}').format(
self.total_count, self.pass_count, self.warn_count,
self.fail_count, self.skip_count, _CFG['report_filename'],
' (dry_run)' if _CFG['dry_run'] else '')
def _pull_model_file(file_path):
"""Use Git LFS to pull down a large file.
- If the model file is around ~130B, it's just a file pointer.
We'll download the file to test, then delete it afterwards
to minimize disk utilization (important in CI environment).
- If you previously downloaded the file, the file will remain
in place after processing. In your local environment, make
sure to pull the models you test often to avoid repetitive
downloads.
"""
model_path = os.path.join(_CFG['models_dir'], file_path)
file_size = os.stat(model_path).st_size
pulled = False
if file_size <= 150:
# need to pull the model file on-demand using git lfs
if _CFG['verbose']:
print('Pulling {}{}'.format(file_path,
' (dry_run)' if _CFG['dry_run'] else ''))
if not _CFG['dry_run']:
cmd_args = 'git lfs pull -I {} -X ""'.format(file_path)
subprocess.run(cmd_args,
cwd=_CFG['models_dir'],
shell=True,
check=True,
stdout=subprocess.DEVNULL)
new_size = os.stat(model_path).st_size
pulled = new_size != file_size
file_size = new_size
return (file_size, pulled)
def _revert_model_pointer(file_path):
"""Remove downloaded model, revert to pointer, remove cached file."""
cmd_args = ('rm -f {0} && '
'git checkout {0} && '
'rm -f $(find . | grep $(grep oid {0} | cut -d ":" -f 2))'
).format(file_path)
subprocess.run(cmd_args,
cwd=_CFG['models_dir'],
shell=True,
check=True,
stdout=subprocess.DEVNULL)
def _include_model(file_path):
if _CFG['include'] is None:
return True
for item in _CFG['include']:
if (file_path.startswith(item) or file_path.endswith(item + '.onnx') or
'/{}/model/'.format(item) in file_path):
return True
return False
def _has_models(dir_path):
for item in os.listdir(os.path.join(_CFG['models_dir'], dir_path, 'model')):
if item.endswith('.onnx'):
file_path = os.path.join(dir_path, 'model', item)
if _include_model(file_path):
return True
return False
def _del_location(loc):
if not _CFG['dry_run'] and os.path.exists(loc):
if os.path.isdir(loc):
shutil.rmtree(loc)
else:
os.remove(loc)
def _size_with_units(size):
if size < 1024:
units = '{}B'.format(size)
elif size < math.pow(1024, 2):
units = '{}K'.format(round(size / 1024))
elif size < math.pow(1024, 3):
units = '{}M'.format(round(size / math.pow(1024, 2)))
else:
units = '{}G'.format(round(size / math.pow(1024, 3)))
return units
def _report_check_model(model):
"""Use ONNX checker to test if model is valid and return a report string."""
try:
onnx.checker.check_model(model)
return ''
except Exception as ex:
first_line = str(ex).strip().split('\n')[0].strip()
return '{}: {}'.format(type(ex).__name__, first_line)
def _report_convert_model(model):
"""Test conversion and returns a report string."""
try:
tf_rep = onnx_tf.backend.prepare(model)
tf_rep.export_graph(_CFG['output_filename'])
_del_location(_CFG['output_filename'])
return ''
except Exception as ex:
_del_location(_CFG['output_filename'])
strack_trace = str(ex).strip().split('\n')
if len(strack_trace) > 1:
err_msg = strack_trace[-1].strip()
# OpUnsupportedException gets raised as a RuntimeError
if 'OP_UNSUPPORTED_EXCEPT' in str(ex):
err_msg = err_msg.replace(type(ex).__name__, 'OpUnsupportedException')
return err_msg
return '{}: {}'.format(type(ex).__name__, strack_trace[0].strip())
def _report_model(file_path, results=Results(), onnx_model_count=1):
"""Generate a report status for a single model, and append it to results."""
size_pulled = _pull_model_file(file_path)
if _CFG['dry_run']:
ir_version = ''
opset_version = ''
check_err = ''
convert_err = ''
emoji_validated = ''
emoji_converted = ''
emoji_overall = ':heavy_minus_sign:'
results.skip_count += 1
else:
if _CFG['verbose']:
print('Testing', file_path)
model = onnx.load(os.path.join(_CFG['models_dir'], file_path))
ir_version = model.ir_version
opset_version = model.opset_import[0].version
check_err = _report_check_model(model)
convert_err = '' if check_err else _report_convert_model(model)
if not check_err and not convert_err:
# https://github-emoji-list.herokuapp.com/
# validation and conversion passed
emoji_validated = ':ok:'
emoji_converted = ':ok:'
emoji_overall = ':heavy_check_mark:'
results.pass_count += 1
elif not check_err:
# validation pass, but conversion did not
emoji_validated = ':ok:'
emoji_converted = convert_err
if ('BackendIsNotSupposedToImplementIt' in convert_err or
'OpUnsupportedException' in convert_err):
# known limitations
# - BackendIsNotSupposedToImplementIt: Op not implemented
# - OpUnsupportedException: TensorFlow limitation
emoji_overall = ':warning:'
results.warn_count += 1
else:
# conversion failed
emoji_overall = ':x:'
results.fail_count += 1
else:
# validation failed
emoji_validated = check_err
emoji_converted = ':heavy_minus_sign:'
emoji_overall = ':x:'
results.fail_count += 1
results.append_detail('{} | {}. {} | {} | {} | {} | {} | {}'.format(
emoji_overall, onnx_model_count, file_path[file_path.rindex('/') + 1:],
_size_with_units(size_pulled[0]), ir_version, opset_version,
emoji_validated, emoji_converted))
if size_pulled[1]:
# only remove model if it was pulled above on-demand
_revert_model_pointer(file_path)
def _configure(models_dir='models',
output_dir=tempfile.gettempdir(),
include=None,
verbose=False,
dry_run=False):
"""Validate the configuration."""
if not os.path.isdir(models_dir):
raise NotADirectoryError(models_dir)
if not os.path.isdir(output_dir):
raise NotADirectoryError(output_dir)
subprocess.run('git lfs', shell=True, check=True, stdout=subprocess.DEVNULL)
_CFG['models_dir'] = os.path.normpath(models_dir)
_CFG['include'] = include.split(',') \
if isinstance(include, str) else include
_CFG['verbose'] = verbose
_CFG['dry_run'] = dry_run
_configure_env()
norm_output_dir = os.path.normpath(output_dir)
_CFG['output_filename'] = os.path.join(norm_output_dir, 'tmp_model.pb')
_CFG['report_filename'] = os.path.join(norm_output_dir,
_CFG['report_filename'])
def _configure_env():
"""Set additional configuration based on environment variables."""
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ref = os.getenv('GITHUB_REF')
repo = os.getenv('GITHUB_REPOSITORY')
sha = os.getenv('GITHUB_SHA')
run_id = os.getenv('GITHUB_RUN_ID')
if ref and '/' in ref:
ref_type = 'tag' if '/tags/' in ref else 'branch'
ref_name = ref[str(ref).rindex('/') + 1:]
report_md = 'ModelZoo-Status-({}={}).md'.format(ref_type, ref_name)
else:
report_md = 'ModelZoo-Status.md'
_CFG['report_filename'] = report_md
if repo:
# actions ([run_id](url))
actions_url = 'https://github.com/{}/actions'.format(repo)
_CFG['github_actions_md'] = ' via [GitHub Actions]({})'.format(actions_url)
if run_id:
run_link = ' ([{0}]({1}/runs/{0}))'.format(run_id, actions_url)
_CFG['github_actions_md'] += run_link
else:
_CFG['github_actions_md'] = ''
_CFG['onnx_tf_version_md'] = onnx_tf.version.version
if sha and repo:
# version ([sha](url))
commit_url = 'https://github.com/{}/commit/{}'.format(repo, sha)
_CFG['onnx_tf_version_md'] += ' ([{}]({}))'.format(sha[0:7], commit_url)
def modelzoo_report(models_dir='models',
output_dir=tempfile.gettempdir(),
include=None,
verbose=False,
dry_run=False):
"""Generate a testing report for the models found in the given directory.
ONNX models found in the ModelZoo directory will be pulled down from
GitHub via `git lfs` (if necessary). The ONNX model will be validated
and converted to a TensorFlow model using ONNX-TensorFlow. A summary
of the conversion will be concatenated into a Markdown-formatted report.
Args:
models_dir: directory that contains ONNX models
output_dir: directory for the generated report and converted model
include: comma-separated list of models or paths to include
verbose: verbose output
dry_run: process directory without doing conversion
Returns:
Results object containing detailed status and counts for the report.
"""
_configure(models_dir, output_dir, include, verbose, dry_run)
_del_location(_CFG['report_filename'])
_del_location(_CFG['output_filename'])
# run tests first, but append to report after summary
results = Results()
for root, subdir, files in os.walk(_CFG['models_dir']):
subdir.sort()
if 'model' in subdir:
dir_path = os.path.relpath(root, _CFG['models_dir'])
if _has_models(dir_path):
results.model_count += 1
results.append_detail('')
results.append_detail('### {}. {}'.format(results.model_count,
os.path.basename(root)))
results.append_detail(dir_path)
results.append_detail('')
results.append_detail(
'Status | Model | Size | IR | Opset | ONNX Checker | '
'ONNX-TF Converted')
results.append_detail(
'------ | ----- | ---- | -- | ----- | ------------ | '
'---------')
onnx_model_count = 0
for item in sorted(files):
if item.endswith('.onnx'):
file_path = os.path.relpath(os.path.join(root, item),
_CFG['models_dir'])
if _include_model(file_path):
onnx_model_count += 1
results.total_count += 1
_report_model(file_path, results, onnx_model_count)
return results
if __name__ == '__main__':
tempdir = tempfile.gettempdir()
parser = argparse.ArgumentParser(
description=('Test converting ONNX ModelZoo models to TensorFlow. '
'Prerequisite: `git lfs`'))
parser.add_argument('-m',
'--models',
default='models',
help=('ONNX ModelZoo directory (default: models)'))
parser.add_argument('-o',
'--output',
default=tempdir,
help=('output directory (default: {})'.format(tempdir)))
parser.add_argument(
'-i',
'--include',
help=('comma-separated list of models or paths to include. '
'Use `git lfs pull` to cache frequently tested models.'))
parser.add_argument('-v',
'--verbose',
action='store_true',
help=('verbose output'))
parser.add_argument('--dry-run',
action='store_true',
help=('process directory without doing conversion'))
args = parser.parse_args()
report = modelzoo_report(args.models, args.output, args.include, args.verbose,
args.dry_run)
report.generate_report()
print(report.summary())
| 35.213777 | 80 | 0.628668 |
acf27a345f0b3837f155e3b919a5861698d4c4b2 | 128 | py | Python | src/peregrinus/world/celestial/moon.py | tom65536/sabio | 406ef2f680c2c7b2b075250d060e223e6b3c55a9 | [
"Apache-2.0"
] | null | null | null | src/peregrinus/world/celestial/moon.py | tom65536/sabio | 406ef2f680c2c7b2b075250d060e223e6b3c55a9 | [
"Apache-2.0"
] | null | null | null | src/peregrinus/world/celestial/moon.py | tom65536/sabio | 406ef2f680c2c7b2b075250d060e223e6b3c55a9 | [
"Apache-2.0"
] | null | null | null | """Data model of a moon."""
from . import base
class Moon(base.ColdCelestialBody['Moon']):
"""Class describing a moon."""
| 18.285714 | 43 | 0.65625 |
acf27a3a294830104aecc59f6789508f66bcdcf7 | 10,076 | py | Python | vendor-local/lib/python/autoslug/tests.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | vendor-local/lib/python/autoslug/tests.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | vendor-local/lib/python/autoslug/tests.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008—2012 Andy Mikhailenko
#
# This file is part of django-autoslug.
#
# django-autoslug is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README for copying conditions.
#
# python
import datetime
# django
from django.db.models import Model, CharField, DateField, ForeignKey, Manager
# this app
from autoslug.settings import slugify as default_slugify
from autoslug import AutoSlugField
class SimpleModel(Model):
"""
>>> a = SimpleModel(name='test')
>>> a.save()
>>> a.slug
'simplemodel'
"""
name = CharField(max_length=200)
slug = AutoSlugField()
class ModelWithUniqueSlug(Model):
"""
>>> greeting = 'Hello world!'
>>> a = ModelWithUniqueSlug(name=greeting)
>>> a.save()
>>> a.slug
u'hello-world'
>>> b = ModelWithUniqueSlug(name=greeting)
>>> b.save()
>>> b.slug
u'hello-world-2'
"""
name = CharField(max_length=200)
slug = AutoSlugField(populate_from='name', unique=True)
class ModelWithUniqueSlugFK(Model):
"""
>>> sm1 = SimpleModel.objects.create(name='test')
>>> sm2 = SimpleModel.objects.create(name='test')
>>> sm3 = SimpleModel.objects.create(name='test2')
>>> greeting = 'Hello world!'
>>> a = ModelWithUniqueSlugFK.objects.create(name=greeting, simple_model=sm1)
>>> a.slug
u'hello-world'
>>> b = ModelWithUniqueSlugFK.objects.create(name=greeting, simple_model=sm2)
>>> b.slug
u'hello-world-2'
>>> c = ModelWithUniqueSlugFK.objects.create(name=greeting, simple_model=sm3)
>>> c.slug
u'hello-world'
>>> d = ModelWithUniqueSlugFK.objects.create(name=greeting, simple_model=sm1)
>>> d.slug
u'hello-world-3'
>>> sm3.name = 'test'
>>> sm3.save()
>>> c.slug
u'hello-world'
>>> c.save()
>>> c.slug
u'hello-world-4'
"""
name = CharField(max_length=200)
simple_model = ForeignKey(SimpleModel)
slug = AutoSlugField(populate_from='name', unique_with='simple_model__name')
class ModelWithUniqueSlugDate(Model):
"""
>>> a = ModelWithUniqueSlugDate(slug='test', date=datetime.date(2009,9,9))
>>> b = ModelWithUniqueSlugDate(slug='test', date=datetime.date(2009,9,9))
>>> c = ModelWithUniqueSlugDate(slug='test', date=datetime.date(2009,9,10))
>>> for m in a,b,c:
... m.save()
>>> a.slug
u'test'
>>> b.slug
u'test-2'
>>> c.slug
u'test'
"""
date = DateField()
slug = AutoSlugField(unique_with='date')
class ModelWithUniqueSlugDay(Model): # same as ...Date, just more explicit
"""
>>> a = ModelWithUniqueSlugDay(slug='test', date=datetime.date(2009, 9, 9))
>>> b = ModelWithUniqueSlugDay(slug='test', date=datetime.date(2009, 9, 9))
>>> c = ModelWithUniqueSlugDay(slug='test', date=datetime.date(2009, 9, 10))
>>> for m in a,b,c:
... m.save()
>>> a.slug
u'test'
>>> b.slug
u'test-2'
>>> c.slug
u'test'
"""
date = DateField()
slug = AutoSlugField(unique_with='date__day')
class ModelWithUniqueSlugMonth(Model):
"""
>>> a = ModelWithUniqueSlugMonth(slug='test', date=datetime.date(2009, 9, 9))
>>> b = ModelWithUniqueSlugMonth(slug='test', date=datetime.date(2009, 9, 10))
>>> c = ModelWithUniqueSlugMonth(slug='test', date=datetime.date(2009, 10, 9))
>>> for m in a,b,c:
... m.save()
>>> a.slug
u'test'
>>> b.slug
u'test-2'
>>> c.slug
u'test'
"""
date = DateField()
slug = AutoSlugField(unique_with='date__month')
class ModelWithUniqueSlugYear(Model):
"""
>>> a = ModelWithUniqueSlugYear(slug='test', date=datetime.date(2009, 9, 9))
>>> b = ModelWithUniqueSlugYear(slug='test', date=datetime.date(2009, 10, 9))
>>> c = ModelWithUniqueSlugYear(slug='test', date=datetime.date(2010, 9, 9))
>>> for m in a,b,c:
... m.save()
>>> a.slug
u'test'
>>> b.slug
u'test-2'
>>> c.slug
u'test'
"""
date = DateField()
slug = AutoSlugField(unique_with='date__year')
class ModelWithLongName(Model):
"""
>>> long_name = 'x' * 250
>>> a = ModelWithLongName(name=long_name)
>>> a.save()
>>> len(a.slug) # original slug is cropped by field length
50
"""
name = CharField(max_length=200)
slug = AutoSlugField(populate_from='name')
class ModelWithLongNameUnique(Model):
"""
>>> long_name = 'x' * 250
>>> a = ModelWithLongNameUnique(name=long_name)
>>> a.save()
>>> len(a.slug) # original slug is cropped by field length
50
>>> b = ModelWithLongNameUnique(name=long_name)
>>> b.save()
>>> b.slug[-3:] # uniqueness is forced
u'x-2'
>>> len(b.slug) # slug is cropped
50
"""
name = CharField(max_length=200)
slug = AutoSlugField(populate_from='name', unique=True)
class ModelWithCallable(Model):
"""
>>> a = ModelWithCallable.objects.create(name='larch')
>>> a.slug
u'the-larch'
"""
name = CharField(max_length=200)
slug = AutoSlugField(populate_from=lambda instance: u'the %s' % instance.name)
class ModelWithCallableAttr(Model):
"""
>>> a = ModelWithCallableAttr.objects.create(name='albatross')
>>> a.slug
u'spam-albatross-and-spam'
"""
name = CharField(max_length=200)
slug = AutoSlugField(populate_from='get_name')
def get_name(self):
return u'spam, %s and spam' % self.name
class ModelWithCustomPrimaryKey(Model):
"""
# just check if models are created without exceptions
>>> a = ModelWithCustomPrimaryKey.objects.create(custom_primary_key='a',
... name='name used in slug')
>>> b = ModelWithCustomPrimaryKey.objects.create(custom_primary_key='b',
... name='name used in slug')
>>> a.slug
u'name-used-in-slug'
"""
custom_primary_key = CharField(primary_key=True, max_length=1)
name = CharField(max_length=200)
slug = AutoSlugField(populate_from='name', unique=True)
custom_slugify = lambda value: default_slugify(value).replace('-', '_')
class ModelWithCustomSlugifier(Model):
"""
>>> a = ModelWithCustomSlugifier.objects.create(slug='hello world!')
>>> b = ModelWithCustomSlugifier.objects.create(slug='hello world!')
>>> b.slug
u'hello_world-2'
"""
slug = AutoSlugField(unique=True, slugify=custom_slugify)
class ModelWithCustomSeparator(Model):
"""
>>> a = ModelWithCustomSeparator.objects.create(slug='hello world!')
>>> b = ModelWithCustomSeparator.objects.create(slug='hello world!')
>>> b.slug
u'hello-world_2'
"""
slug = AutoSlugField(unique=True, sep='_')
class ModelWithReferenceToItself(Model):
"""
>>> a = ModelWithReferenceToItself(slug='test')
>>> a.save()
Traceback (most recent call last):
...
ValueError: Attribute ModelWithReferenceToItself.slug references itself \
in `unique_with`. Please use "unique=True" for this case.
"""
slug = AutoSlugField(unique_with='slug')
class ModelWithWrongReferencedField(Model):
"""
>>> a = ModelWithWrongReferencedField(slug='test')
>>> a.save()
Traceback (most recent call last):
...
ValueError: Could not find attribute ModelWithWrongReferencedField.wrong_field \
referenced by ModelWithWrongReferencedField.slug (see constraint `unique_with`)
"""
slug = AutoSlugField(unique_with='wrong_field')
class ModelWithWrongLookupInUniqueWith(Model):
"""
>>> a = ModelWithWrongLookupInUniqueWith(name='test', slug='test')
>>> a.save()
Traceback (most recent call last):
...
ValueError: Could not resolve lookup "name__foo" in `unique_with` of \
ModelWithWrongLookupInUniqueWith.slug
"""
slug = AutoSlugField(unique_with='name__foo')
name = CharField(max_length=10)
class ModelWithWrongFieldOrder(Model):
"""
>>> a = ModelWithWrongFieldOrder(slug='test')
>>> a.save()
Traceback (most recent call last):
...
ValueError: Could not check uniqueness of ModelWithWrongFieldOrder.slug with \
respect to ModelWithWrongFieldOrder.date because the latter is empty. Please \
ensure that "slug" is declared *after* all fields listed in unique_with.
"""
slug = AutoSlugField(unique_with='date')
date = DateField(blank=False, null=False)
class ModelWithAcceptableEmptyDependency(Model):
"""
>>> model = ModelWithAcceptableEmptyDependency
>>> instances = [model.objects.create(slug='hello') for x in range(0,2)]
>>> [x.slug for x in model.objects.all()]
[u'hello', u'hello-2']
"""
date = DateField(blank=True, null=True)
slug = AutoSlugField(unique_with='date')
class ModelWithAutoUpdateEnabled(Model):
"""
>>> a = ModelWithAutoUpdateEnabled(name='My name')
>>> a.save()
>>> a.slug
u'my-name'
>>> a.name = 'My new name'
>>> a.save()
>>> a.slug
u'my-new-name'
"""
name = CharField(max_length=200)
slug = AutoSlugField(populate_from='name', always_update=True)
class ModelWithSlugSpaceSharedIntegrityError(ModelWithUniqueSlug):
"""
>>> a = ModelWithUniqueSlug(name='My name')
>>> a.save()
>>> b = ModelWithSlugSpaceSharedIntegrityError(name='My name')
>>> b.save()
Traceback (most recent call last):
...
IntegrityError: column slug is not unique
"""
class SharedSlugSpace(Model):
objects = Manager()
name = CharField(max_length=200)
# ensure that any subclasses use the base model's manager for testing
# slug uniqueness
slug = AutoSlugField(populate_from='name', unique=True, manager=objects)
class ModelWithSlugSpaceShared(SharedSlugSpace):
"""
>>> a = SharedSlugSpace(name='My name')
>>> a.save()
>>> a.slug
u'my-name'
>>> b = ModelWithSlugSpaceShared(name='My name')
>>> b.save()
>>> b.slug
u'my-name-2'
"""
| 28.788571 | 84 | 0.633882 |
acf27a9842726c9e092a16d45d14b3a4285ec06b | 1,453 | py | Python | scripts/shelf/horizonLine.py | kohyuk91/hkTools | 0125c486b32375fb1dd30465f892e6bd23c07114 | [
"BSD-3-Clause"
] | 10 | 2020-04-30T21:48:07.000Z | 2022-03-07T04:02:50.000Z | scripts/shelf/horizonLine.py | kohyuk91/hkTools | 0125c486b32375fb1dd30465f892e6bd23c07114 | [
"BSD-3-Clause"
] | 2 | 2020-04-27T01:55:31.000Z | 2021-01-28T06:30:29.000Z | scripts/shelf/horizonLine.py | kohyuk91/mayaMatchmoveTools | 0125c486b32375fb1dd30465f892e6bd23c07114 | [
"BSD-3-Clause"
] | 1 | 2020-11-20T06:53:35.000Z | 2020-11-20T06:53:35.000Z | # Author : HYUK KO | kohyuk91@gmail.com | github.com/kohyuk91
import maya.cmds as mc
import maya.OpenMaya as om
import maya.OpenMayaUI as omui
def getActive3dViewCam():
active3dView = omui.M3dView.active3dView()
active3dViewCamDagPath = om.MDagPath()
active3dView.getCamera(active3dViewCamDagPath)
active3dViewCamShape = active3dViewCamDagPath.fullPathName()
active3dViewCamTrans = mc.listRelatives(active3dViewCamShape, parent=True, fullPath=True)[0]
return active3dViewCamShape, active3dViewCamTrans
def main():
if mc.objExists("*horizonLine*"):
mc.delete("*horizonLine*") # Delete existing "horizonLine"
return
active3dViewCamShape, active3dViewCamTrans = getActive3dViewCam()
horizonLineTrans = mc.circle(name='horizonLine', radius=2, normal=(0,1,0), sections=32)[0]
horizonLineShape = mc.listRelatives(horizonLineTrans, shapes=True, fullPath=True)[0]
mc.expression(s="""
{0}.sx = {1}.nearClipPlane;
{0}.sy = {1}.nearClipPlane;
{0}.sz = {1}.nearClipPlane;
""".format(horizonLineTrans, active3dViewCamShape), object=horizonLineTrans)
mc.setAttr(horizonLineShape + '.overrideEnabled', 1)
mc.setAttr(horizonLineShape + '.overrideColor', 14)
mc.pointConstraint(active3dViewCamTrans, horizonLineTrans, maintainOffset=False)
mc.select(clear=True)
if __name__ == "__main__":
main()
| 33.022727 | 96 | 0.698555 |
acf27a98b8ab184a2c9f58bdcb1adea4cb06dcbc | 2,698 | py | Python | app/models.py | bre-nda/blog-app | 7244e74e7ac67ed9076e37a0153aac9487eb9c00 | [
"MIT"
] | null | null | null | app/models.py | bre-nda/blog-app | 7244e74e7ac67ed9076e37a0153aac9487eb9c00 | [
"MIT"
] | null | null | null | app/models.py | bre-nda/blog-app | 7244e74e7ac67ed9076e37a0153aac9487eb9c00 | [
"MIT"
] | null | null | null | from . import db
from datetime import datetime
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Quote:
"""
Quote class to define Quote Objects"""
def __init__(self,id,author,quote):
self.id = id
self.author = author
self.quote = quote
class User(UserMixin,db.Model):
__tablename__="users"
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(255))
email = db.Column(db.String(255))
bio = db.Column(db.String(255))
image_path = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
blog = db.relationship("Blog",backref="users",lazy="dynamic")
comment = db.relationship("Comment",backref="users",lazy="dynamic")
def save_user(self):
db.session.add(self)
db.session.commit()
def delete_user(self):
db.session.delete(self)
db.session.commit()
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'{self.username}'
class Blog(db.Model):
__tablename__="blogs"
id = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(255))
blog = db.Column(db.String(255))
posted = db.Column(db.DateTime,default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
def save_blog(self):
db.session.add(self)
db.session.commit()
def delete_blog(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'{self.title}'
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer,primary_key=True)
comment = db.Column(db.String(255))
posted = db.Column(db.DateTime,default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
blog_id = db.Column(db.Integer,db.ForeignKey("blogs.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).all()
return comments
def __repr__(self):
return f'Comment {self.comment}' | 26.45098 | 72 | 0.668273 |
acf27b9becff8cca1f6ace6e6cbcaf7ce2588278 | 722 | py | Python | strings/caesar_cipher_encryptor.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
] | null | null | null | strings/caesar_cipher_encryptor.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
] | null | null | null | strings/caesar_cipher_encryptor.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
] | null | null | null | # O(n^2) time | O(n) space
#
# string += 'c' <- has time complexity O(n^2)
#
def caesarCipherEncryptor(string, key):
alphaList = list('abcdefghijklmnopqrstuvwxyz')
cipher = ''
for s in string:
cipher += alphaList[(alphaList.index(s) + key) % 26]
return cipher
# O(n) time | O(n) space
def caesarCipherEncryptor(string, key):
cipher = []
for s in string:
cipher.append(chr((ord(s) - 97 + key) % 26 + 97))
return ''.join(cipher)
# O(n) time | O(n) space
def caesarCipherEncryptor(string, key):
alphaList = list('abcdefghijklmnopqrstuvwxyz')
cipher = []
for s in string:
cipher.append(alphaList[(alphaList.index(s) + key) % 26])
return ''.join(cipher)
| 25.785714 | 65 | 0.617729 |
acf27ce77352422cf8ce777a654e73a8510e2c7e | 16,008 | py | Python | stingray/workbook/numbers_09.py | slott56/Stingray-Reader | 6be63d1656eba3005dd7c08eb9d30eb8c3766d70 | [
"MIT"
] | 5 | 2019-06-22T01:05:51.000Z | 2021-08-30T20:02:35.000Z | stingray/workbook/numbers_09.py | slott56/Stingray-Reader | 6be63d1656eba3005dd7c08eb9d30eb8c3766d70 | [
"MIT"
] | 4 | 2020-01-11T00:46:49.000Z | 2021-09-20T20:21:14.000Z | stingray/workbook/numbers_09.py | slott56/Stingray-Reader | 6be63d1656eba3005dd7c08eb9d30eb8c3766d70 | [
"MIT"
] | 2 | 2020-02-13T22:34:01.000Z | 2021-11-15T14:20:55.000Z | #!/usr/bin/env python3
# .. _`workbook_number09`:
#
#
# Apple iWorks Numbers '09 Workbook
# -----------------------------------
#
# The Stingray model of sheet/row/cell structure does not
# easily fit the Numbers sheet/table/row/cell structure.
# How can we handle the extra layer of names introduced by
# Numbers?
#
# Option 1: navigation hierarchy.
#
# Workbook ➞ new layer (Numbers "Workspace") ➞ Sheet (Numbers "Table") ➞ Row ➞ Cell
#
# Option 2: navigation hierarchy.
#
# Combine (Workspace,Table) into a 2-tuple, and call this a "sheet" name when working
# with Numbers documents.
#
# This will fit with Stingray acceptably.
#
# The imports required to process this kind of file.
#
# ::
import logging
import pprint
import xml.etree.cElementTree as dom
import zipfile
import datetime
import decimal
from stingray.workbook.base import Workbook
import stingray.sheet
import stingray.cell
# .. py:module:: workbook.numbers09
#
# The iWork Numbers 09 format is a Zip file with an XML document inside it.
# There may be slight variations between native Numbers '09 and Numbers '13 doing
# a "save as" in Numbers '09 format. It's not clear; we haven't done
# exhaustive checking.
#
# Numbers '13 is entirely different. See :ref:`workbook_number13`.
#
# .. py:class:: Numbers09_Workbook
#
# Extract sheets, rows and cells from a Numbers '09 format file.
#
# The ``.numbers`` "file" is a ZIP file.
#
# The :file:`index.xml` element the interesting part of the archive.
#
# In addition to the superclass attributes, some additional unique
# attributes are introduced here.
#
# .. py:attribute:: zip_archive
#
# A zip archive for this file.
#
# .. py:attribute:: workspace
#
# The "workspaces": pages with tables inside them.
#
# ::
class Numbers09_Workbook( Workbook ):
"""Mac OS X Numbers Workbook for iWork 09.
"""
NUMBERS_NS = {
"ls":"http://developer.apple.com/namespaces/ls",
"sf":"http://developer.apple.com/namespaces/sf",
"sfa":"http://developer.apple.com/namespaces/sfa",
}
row_debug= False
def __init__( self, name, file_object=None ):
"""Prepare the workbook for reading.
:param name: File name
:param file_object: Optional file-like object. Ignored for v3.2 numbers files.
"""
super().__init__( name, file_object )
self.zip_archive= zipfile.ZipFile( file_object or name, "r" )
self._prepare()
# As preparation for reading these files, we locate all the sheet names
# and all the number styles.
#
# ::
def _prepare( self ):
"""Locate sheets/tables and styles."""
root= dom.parse( self.zip_archive.open('index.xml') ).getroot()
self._locate_sheets(root)
self._get_styles(root)
# Locating all the sheets is a matter of doing an XPath search for
# :samp:`workspace-array/workspace` and getting the ``workspace-name`` attribute
# from the :samp:`<table name="{name}">` tags.
#
# Within each workspace we have to find :samp:`page-info/tabular-info/tabular-model` to
# get the tables within the workspaces.
#
# ::
def _locate_sheets( self, root ):
"""Create ``workspace_table`` map from name to workspace and table."""
self.workspace= dict()
ws_name_attr= dom.QName( self.NUMBERS_NS["ls"], 'workspace-name' )
name_attr= dom.QName( self.NUMBERS_NS["sf"], 'name' )
workspace_array= root.find("ls:workspace-array", namespaces=self.NUMBERS_NS )
for workspace in workspace_array.findall('.//ls:workspace', namespaces=self.NUMBERS_NS ):
# Populate tables within this workspace.
tables= dict()
page_info = workspace.find('ls:page-info', namespaces=self.NUMBERS_NS)
for tabular_info in page_info.findall('.//sf:tabular-info', namespaces=self.NUMBERS_NS):
tabular_model = tabular_info.find( 'sf:tabular-model', namespaces=self.NUMBERS_NS)
tables[ tabular_model.get(name_attr) ] = tabular_model
self.workspace[ workspace.get(ws_name_attr) ]= workspace, tables
# Locate a "data source" within the XML document. Create ``Cell`` instances.
#
# ::
def _datasource( self, grid ):
"""The data source for cell values within a grid.
This yields each individual cell value, transformed into
string, Decimal, datetime.
"""
datasource = grid.find('.//sf:datasource', namespaces=self.NUMBERS_NS)
for cell_doc in datasource:
yield self.cell( cell_doc )
# or return map( self.cell, datasource )
# .. py:method:: Numbers09_Workbook.cell( cell )
#
# Create a ``Cell`` instance from the decoded data.
#
# ::
def cell( self, cell ):
logging.debug( dom.tostring(cell) )
date_tag= dom.QName( self.NUMBERS_NS["sf"], 'd' )
date_attr= dom.QName( self.NUMBERS_NS["sf"], 'cell-date' )
formula_tag= dom.QName( self.NUMBERS_NS["sf"], 'f' )
s_attr= dom.QName( self.NUMBERS_NS["sf"], 's' )
v_attr= dom.QName( self.NUMBERS_NS["sf"], 'v' )
general_tag= dom.QName( self.NUMBERS_NS["sf"], 'g' )
number_tag= dom.QName( self.NUMBERS_NS["sf"], 'n' )
text_tag= dom.QName( self.NUMBERS_NS["sf"], 't' )
o_tag= dom.QName( self.NUMBERS_NS["sf"], 'o' )
span_tag= dom.QName( self.NUMBERS_NS["sf"], 's' )
bool_tag= dom.QName( self.NUMBERS_NS["sf"], 'b' )
popup_menu_tag= dom.QName( self.NUMBERS_NS["sf"], 'pm' )
IDREF_attr= dom.QName( self.NUMBERS_NS["sfa"], 'IDREF' )
ID_attr= dom.QName( self.NUMBERS_NS["sfa"], 'ID' )
fs_attr= dom.QName( self.NUMBERS_NS["sf"],"fs")
if cell.tag == date_tag:
seconds= int(cell.attrib[date_attr])
epoch= datetime.datetime(2001, 1, 1)
delta= datetime.timedelta( seconds=seconds )
theDate= epoch + delta
return stingray.cell.DateCell( theDate, self )
elif cell.tag == formula_tag: # formula or error.
s= cell.get(s_attr)
fo= cell.find('sf:fo', namespaces=self.NUMBERS_NS)
# Numeric Result? What about non-numeric results?
r= cell.find('sf:r', namespaces=self.NUMBERS_NS)
if r:
# Result:
rn= r.find('sf:rn', namespaces=self.NUMBERS_NS)
try:
value_txt= rn.attrib[v_attr]
value= self._to_decimal( value_txt, s )
except KeyError as ex:
#self._cell_warning("Formula with no value", cell)
value= self._to_decimal( '0', s )
return stingray.cell.NumberCell( value, self )
else:
# Error:
#self._cell_warning("Formula error", cell)
value= "#Error in {0}".format(fo.get(fs_attr))
return stingray.cell.ErrorCell( value, self )
elif cell.tag == general_tag: # General?
return stingray.cell.EmptyCell( '', self )
elif cell.tag == number_tag: # Number
value= self._decode_number( cell )
return stingray.cell.NumberCell( value, self )
elif cell.tag == o_tag: #??
self._cell_warning("Unknown cell type", cell)
return stingray.cell.EmptyCell( '', self )
elif cell.tag == span_tag: # Span?
self._cell_warning("Unknown cell type", cell)
return stingray.cell.EmptyCell( '', self )
elif cell.tag == text_tag: # Text
value= self._decode_text( cell )
return stingray.cell.TextCell( value, self )
elif cell.tag == bool_tag: # Boolean
value= self._decode_number( cell )
return stingray.cell.BooleanCell( value, self )
elif cell.tag == popup_menu_tag: # popup menu
# TODO:: Better Xpath query: ``menu-choices/*[@ID='name']``
value= None # In case we can't find anything.
selected= cell.find('sf:proxied-cell-ref', namespaces=self.NUMBERS_NS)
name= selected.get(IDREF_attr)
mc= cell.find('sf:menu-choices', namespaces=self.NUMBERS_NS)
for t in mc:
if t.get(ID_attr) == name:
# t's tag cold end in Could be "t", or "n".
if t.tag.endswith('t'): # Text
value= self._decode_text( t )
return stingray.cell.TextCell( value, self )
elif t.tag.endswith('n'): # Number
value= self._decode_number( t )
return stingray.cell.NumberCell( value, self )
else:
raise Exception( "Unknown popup menu {0}".format(dom.tostring(cell)))
else:
raise Exception( "Unknown cell {0}".format( dom.tostring(cell) ) )
# Some lower-level conversions.
#
# ::
def _to_decimal( self, value_txt, style_id ):
"""Convert a given numeric value_text using the named style.
TODO: From the style, get the number of decimal places, use that to
build a string version of the float value.
"""
fdp_attr= dom.QName( self.NUMBERS_NS["sf"], 'format-decimal-places' )
fs_attr= dom.QName( self.NUMBERS_NS["sf"], 'format-string' )
cell_style= self.cell_style.get(style_id)
#print( "TO_DECIMAL", value_txt, style_id, "=", cell_style )
fs= None # cell_style.get(fs_attr) # Doesn't seem correct
fdp= None # cell_style.get(fdp_attr) # Doesn't seem correct
# Transform fs into proper Python format, otherwise, use the number of
# decimal places.
if fs is not None:
fmt= self._rewrite_fmt( fs )
#print( "Decimal: {{0:{0}}}.format({1}) = ".format( fmt, value_txt ), end="" )
value= decimal.Decimal( "{:{fmt}}".format(float(value_txt), fmt=fmt) )
#print( value )
return value
elif fdp is not None:
#fmt= "{{0:.{0}f}}".format(fdp)
value= decimal.Decimal( "{:.{fdp}f}".format(float(value_txt), fdp=fdp) )
#print( "Decimal: {0}.format({1}) = {2!r}".format( fmt, value_txt, value ) )
return value
else:
value= decimal.Decimal( value_txt )
#print( "Decimal: {0} = {1!r}".format( value_txt, value ) )
return value
def _decode_text( self, cell ):
"""Decode a <t> tag's value."""
sfa_s_attr= dom.QName( self.NUMBERS_NS["sfa"], 's' )
ct= cell.find( 'sf:ct', namespaces=self.NUMBERS_NS )
value= ct.get(sfa_s_attr)
if value is None:
value= "\n".join( cell.itertext() )
return value
def _decode_number( self, cell ):
"""Decode a <n> tag's value, applying the style."""
s_attr= dom.QName( self.NUMBERS_NS["sf"], 's' )
v_attr= dom.QName( self.NUMBERS_NS["sf"], 'v' )
s= cell.get(s_attr)
cell_style= self.cell_style.get(s)
try:
value_txt= cell.attrib[v_attr]
value= self._to_decimal( value_txt, s )
except KeyError as ex:
#self._cell_warning("Number with no value", cell)
value= self._to_decimal( '0', s )
return value
# The styles are also important because we can use them to parse the numbers more
# precisely.
#
# ::
def _get_styles( self, root ):
"""Get the styles."""
ID_attr= dom.QName( self.NUMBERS_NS["sfa"], 'ID' )
ident_attr= dom.QName( self.NUMBERS_NS["sf"], 'ident' )
parent_ident_attr= dom.QName( self.NUMBERS_NS["sf"], 'parent-ident' )
self.cell_style= {}
for cs in root.findall('.//sf:cell-style', namespaces=self.NUMBERS_NS):
#print( "STYLE", dom.tostring(cs) )
ID= cs.get(ID_attr)
ident= cs.get(ident_attr)
parent_ident= cs.get(parent_ident_attr)
property_number_format= cs.find('.//sf:SFTCellStylePropertyNumberFormat', namespaces=self.NUMBERS_NS)
if property_number_format is None:
if parent_ident is not None:
self.cell_style[ID]= self.cell_style[parent_ident]
else:
number_format= property_number_format.find('sf:number-format', namespaces=self.NUMBERS_NS)
if number_format is None:
if parent_ident is not None:
self.cell_style[ID]= self.cell_style[parent_ident]
else:
self.cell_style[ID]= number_format.attrib
if ident is not None:
self.cell_style[ident]= number_format.attrib
#print( ID, self.cell_style.get(ID,None) )
# Rewrite a number format from Numbers to Python
#
# ::
def _rewrite_fmt( self, format_string ):
"""Parse the mini-language: '#,##0.###;-#,##0.###' is an example.
This becomes "{:10,.3f}"
"""
positive, _, negative = format_string.partition(";")
fmt= negative or positive
digits= len(fmt)
comma= "," if "," in fmt else ""
whole, _, frac= fmt.partition(".")
precision= len(frac)
return "{digits}{comma}.{precision}f".format(
digits= digits, comma=comma, precision=precision )
# .. py:method:: Numbers09_Workbook.sheets( )
#
# Return a list of "sheets" (actually underlying tables.)
#
# The "sheets" are ``[ (`` *workspace*\ `,` *table* ``), ... ]`` pairs.
#
# Picking a sheet involves matching a two-part name: (workspace, table).
#
# ::
def sheets( self ):
"""Build "sheet" names from workspace/table"""
sheet_list= []
for w_name in self.workspace:
ws, tables = self.workspace[w_name]
for t_name in tables:
sheet_list.append( (w_name, t_name) )
return sheet_list
# .. py:method:: Numbers09_Workbook.rows_of( sheet )
#
# Iterator through all rows of a sheet.
#
# ::
def rows_of( self, sheet ):
"""Iterator over rows.
Two parallel traversals:
Internal iterator over grid/datasource/* has d, t, n, pm, g, o and s
yields individual cell values.
Iterator over grid/rows/grid-row may have ``nc``, number of columns in that row.
Each grid-row fetches a number of cell values to assemble a row.
Row's may be variable length (sigh) but padded to the number of columns
specified in the grid.
:param sheet: a Sheet object to retrieve rows from.
"""
self.log.debug( "rows of {0}: {1}".format(sheet, sheet.name) )
ws_name, t_name = sheet.name
ws, tables= self.workspace[ws_name]
tabular_model= tables[t_name]
grid= tabular_model.find( 'sf:grid', namespaces=self.NUMBERS_NS )
numrows_attr= dom.QName( self.NUMBERS_NS["sf"], 'numrows' )
numcols_attr= dom.QName( self.NUMBERS_NS["sf"], 'numcols' )
numrows = int(grid.attrib[numrows_attr])
numcols = int(grid.attrib[numcols_attr])
nc_attr= dom.QName( self.NUMBERS_NS["sf"], 'nc' )
datasource= iter( self._datasource(grid) )
rows = grid.find('sf:rows', namespaces=self.NUMBERS_NS)
for n, r in enumerate(rows.findall( 'sf:grid-row', namespaces=self.NUMBERS_NS )):
#print( "ROW", dom.tostring(r) )
self.debug_row= n
# Is this really relevant for Numbers '09?
nc= int(r.get(nc_attr,numcols))
try:
row= [ next(datasource) for self.debug_col in range(nc) ]
except StopIteration as e:
pass # Last row will exhaust the datasource.
if len(row) == numcols:
yield row
else:
yield row + (numcols-nc)*[None]
| 39.331695 | 113 | 0.588581 |
acf27d4575eddc921ff2f1e28d89155f697d0c01 | 2,374 | py | Python | amime/modules/anime/MOVIE/movie_trend/movie_trend6.py | Myudi422/ccgnime_req | a0f7596ba101204539b4120dffa08912b6560efe | [
"MIT"
] | null | null | null | amime/modules/anime/MOVIE/movie_trend/movie_trend6.py | Myudi422/ccgnime_req | a0f7596ba101204539b4120dffa08912b6560efe | [
"MIT"
] | null | null | null | amime/modules/anime/MOVIE/movie_trend/movie_trend6.py | Myudi422/ccgnime_req | a0f7596ba101204539b4120dffa08912b6560efe | [
"MIT"
] | null | null | null | import httpx
from anilist.types import Anime
from pyrogram import filters
from pyrogram.types import CallbackQuery
from pyromod.helpers import ikb
from pyromod.nav import Pagination
from amime.amime import Amime
@Amime.on_callback_query(filters.regex(r"^trending_movie6 anime (?P<page>\d+)"))
async def anime_suggestions(bot: Amime, callback: CallbackQuery):
page = int(callback.matches[0]["page"])
message = callback.message
lang = callback._lang
keyboard = []
async with httpx.AsyncClient(http2=True) as client:
response = await client.post(
url="https://graphql.anilist.co",
json=dict(
query="""
query($per_page: Int) {
Page(page: 7, perPage: $per_page) {
media(type: ANIME, format: MOVIE, sort: TRENDING_DESC, status: FINISHED) {
id
title {
romaji
english
native
}
siteUrl
}
}
}
""",
variables=dict(
perPage=100,
),
),
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
)
data = response.json()
await client.aclose()
if data["data"]:
items = data["data"]["Page"]["media"]
suggestions = [
Anime(id=item["id"], title=item["title"], url=item["siteUrl"])
for item in items
]
layout = Pagination(
suggestions,
item_data=lambda i, pg: f"menu {i.id}",
item_title=lambda i, pg: i.title.romaji,
page_data=lambda pg: f"trending_movie6 anime {pg}",
)
lines = layout.create(page, lines=8)
if len(lines) > 0:
keyboard += lines
keyboard.append([(lang.Prev, "trending_movie5 anime 1"), (lang.Next, "trending_movie7 anime 1")])
keyboard.append([(lang.back_button, "movie-menu")])
await message.edit_text(
lang.movietrend_text,
reply_markup=ikb(keyboard),
)
| 32.081081 | 101 | 0.487363 |
acf27ef85f12aa053c6a64b0b3bbc434ed33aa43 | 17,364 | py | Python | tests/test_implementations/api_test/test_delete_one_api.py | fossabot/FastAPIQuickCRUD | 69226ec9959dfed41fdfe69f59d8c622bd3726fb | [
"MIT"
] | null | null | null | tests/test_implementations/api_test/test_delete_one_api.py | fossabot/FastAPIQuickCRUD | 69226ec9959dfed41fdfe69f59d8c622bd3726fb | [
"MIT"
] | null | null | null | tests/test_implementations/api_test/test_delete_one_api.py | fossabot/FastAPIQuickCRUD | 69226ec9959dfed41fdfe69f59d8c622bd3726fb | [
"MIT"
] | null | null | null | import json
from collections import OrderedDict
from starlette.testclient import TestClient
from src.fastapi_quickcrud.crud_router import crud_router_builder
from src.fastapi_quickcrud.crud_router import CrudService
from src.fastapi_quickcrud.misc.type import CrudMethods
from src.fastapi_quickcrud import sqlalchemy_to_pydantic
from tests.test_implementations.api_test import get_transaction_session, app, UntitledTable256
UntitledTable256_service = CrudService(model=UntitledTable256)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# create_one_model = api_model[CrudMethods.UPSERT_ONE].__dict__
# assert create_one_model['requestModel'] or create_one_model['responseModel']
# create_one_request_model = deepcopy(create_one_model['requestModel'].__dict__['__fields__'])
# create_one_response_model = deepcopy(create_one_model['responseModel'].__dict__['__fields__'])
# Request Test
# assert create_one_request_model.pop('on_conflict', False)
# for k, v in create_one_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Test
# for k, v in create_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_create_one = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_creation_one",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_MANY,
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# create_many_model = api_model[CrudMethods.UPSERT_MANY].__dict__
# assert create_many_model['requestModel'] or create_many_model['responseModel']
# create_many_request_model = deepcopy(create_many_model['requestModel'].__dict__['__fields__'])
# create_many_response_model = deepcopy(create_many_model['responseModel'].__dict__['__fields__'])
#
# # Request Model Test
# assert create_many_request_model.pop('on_conflict', None)
# insert_many_model = create_many_request_model['insert'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in insert_many_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
#
# # Response Model Test
# for k, v in create_many_response_model.items():
# create_many_response_model_item = v.type_.__dict__['__fields__']
# for k, v in create_many_response_model_item.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_create_many = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_creation_many",
tags=["test"]
)
# Response Mode Test
# response_many = create_many_response_model['__root__'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in response_many.items():
# assert not v.required
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.POST_REDIRECT_GET
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# post_redirect_get_model = api_model[CrudMethods.POST_REDIRECT_GET].__dict__
# assert post_redirect_get_model['requestModel'] or post_redirect_get_model['responseModel']
# post_redirect_get_request_model = deepcopy(post_redirect_get_model['requestModel'].__dict__['__fields__'])
# post_redirect_get_response_model = deepcopy(post_redirect_get_model['responseModel'].__dict__['__fields__'])
# Request Model Test
# for k, v in post_redirect_get_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Model Test
# for k, v in post_redirect_get_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# for k, v in post_redirect_get_response_model.items():
# assert v.required
test_post_and_redirect_get = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.FIND_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_get_data = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.DELETE_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_delete_data = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_delete_one",
tags=["test"]
)
[app.include_router(i) for i in
[test_post_and_redirect_get, test_delete_data, test_create_one, test_create_many, test_get_data]]
client = TestClient(app)
primary_key_name = UntitledTable256.primary_key_of_table
unique_fields = UntitledTable256.unique_fields
def test_create_one_and_delete_one():
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
data = {"insert": [
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "time_value": "18:18:18",
"timetz_value": "18:18:18+00:00"},
]}
response = client.post('/test_creation_many', headers=headers, data=json.dumps(data))
assert response.status_code == 201
insert_response_data = response.json()
primary_key ,= [i[primary_key_name] for i in insert_response_data]
params = {"bool_value____list": True,
"char_value____str": 'string%',
"char_value____str_____matching_pattern": 'case_sensitive',
"date_value____from": "2021-07-22",
"date_value____to": "2021-07-25",
"float4_value____from": -1,
"float4_value____to": 2,
"float4_value____list": 0,
"float8_value____from": -1,
"float8_value____to": 2,
"float8_value____list": 0,
"int2_value____from": -1,
"int2_value____to": 9,
"int2_value____list": 0,
"int4_value____from": -1,
"int4_value____to": 9,
"int4_value____list": 0,
"int8_value____from": -1,
"int8_value____to": 9,
"int8_value____list": 0,
"interval_value____from": -1,
"interval_value____to": 9,
"interval_value____list": 0,
"numeric_value____from": -1,
"numeric_value____to": 9,
"numeric_value____list": 0,
"text_value____list": "string",
"time_value____from": '18:18:18',
"time_value____to": '18:18:18',
"time_value____list": '18:18:18',
"timestamp_value_value____from": "2021-07-24T02:54:53.285",
"timestamp_value_value____to": "2021-07-24T02:54:53.285",
"timestamp_value_value____list": "2021-07-24T02:54:53.285",
"timestamptz_value_value____from": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____to": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____list": "2021-07-24T02:54:53.285Z",
"uuid_value_value____list": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"time_value____from": '18:18:18+00:00',
"time_value____to": '18:18:18+00:00',
"time_value____list": '18:18:18+00:00',
"varchar_value____str": 'string',
"varchar_value____str_____matching_pattern": 'case_sensitive',
"varchar_value____list": 'string',
}
from urllib.parse import urlencode
query_string = urlencode(OrderedDict(**params))
update_data = {"bool_value": False}
response = client.delete(f'/test_delete_one/{primary_key}?{query_string}')
response_data = response.json()
assert response.status_code == 200
assert response.headers['x-total-count'] == '1'
| 47.442623 | 114 | 0.621401 |
acf27f9673c9b22135be3b8bfdae3609e7a76b00 | 782 | py | Python | src/Exceptions/SampleSheetError.py | Public-Health-Bioinformatics/sequdas-upload | a22f090f9cd3b5ecfe0bae487016622b9b80651d | [
"MIT"
] | 9 | 2015-11-24T21:51:42.000Z | 2020-10-21T20:16:24.000Z | src/Exceptions/SampleSheetError.py | Public-Health-Bioinformatics/sequdas-upload | a22f090f9cd3b5ecfe0bae487016622b9b80651d | [
"MIT"
] | 6 | 2016-09-13T20:38:57.000Z | 2019-02-21T18:31:22.000Z | src/Exceptions/SampleSheetError.py | Public-Health-Bioinformatics/sequdas-upload | a22f090f9cd3b5ecfe0bae487016622b9b80651d | [
"MIT"
] | 1 | 2018-10-07T00:55:43.000Z | 2018-10-07T00:55:43.000Z | class SampleSheetError(Exception):
"""An exception raised when errors are encountered with a sample sheet.
Examples include when a sample sheet can't be parsed because it's garbled, or
if IRIDA rejects the creation of a run because fields are missing or invalid
from the sample sheet.
"""
def __init__(self, message, errors):
"""Initalize a SampleSheetError.
Args:
message: a summary message that's causing the error.
errors: a more detailed list of errors.
"""
self._message = message
self._errors = errors
@property
def message(self):
return self._message
@property
def errors(self):
return self._errors
def __str__(self):
return self.message
| 27.928571 | 81 | 0.647059 |
acf280500050b4cc2fd8c9146153b6b2abd1c40c | 329 | py | Python | tests/test_octodns_provider_ultra.py | slandry90/octodns | a7506f487c63164f85e89cafe72ec519989ed531 | [
"MIT"
] | 1,865 | 2017-04-06T18:03:10.000Z | 2020-12-07T21:53:31.000Z | tests/test_octodns_provider_ultra.py | slandry90/octodns | a7506f487c63164f85e89cafe72ec519989ed531 | [
"MIT"
] | 401 | 2017-04-08T22:58:06.000Z | 2020-12-08T15:52:29.000Z | tests/test_octodns_provider_ultra.py | slandry90/octodns | a7506f487c63164f85e89cafe72ec519989ed531 | [
"MIT"
] | 366 | 2017-04-10T15:40:02.000Z | 2020-12-08T01:37:29.000Z | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
class TestUltraShim(TestCase):
def test_missing(self):
with self.assertRaises(ModuleNotFoundError):
from octodns.provider.ultra import UltraProvider
UltraProvider
| 19.352941 | 67 | 0.726444 |
acf280997bc48bb3895e00e7996824a708b7071e | 3,036 | py | Python | test/federated_government/test_federated_clustering.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
] | 1 | 2021-03-18T07:31:36.000Z | 2021-03-18T07:31:36.000Z | test/federated_government/test_federated_clustering.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
] | null | null | null | test/federated_government/test_federated_clustering.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
] | null | null | null | from shfl.federated_government.federated_clustering import FederatedClustering, ClusteringDataBases
from shfl.federated_aggregator.cluster_fedavg_aggregator import ClusterFedAvgAggregator
from shfl.model.kmeans_model import KMeansModel
from unittest.mock import Mock, patch
import numpy as np
def test_FederatedClustering():
database = 'IRIS'
cfg = FederatedClustering(database, iid=True, num_nodes=3, percent=20)
module = ClusteringDataBases.__members__[database].value
data_base = module()
train_data, train_labels, test_data, test_labels = data_base.load_data()
assert cfg._test_data is not None
assert cfg._test_labels is not None
assert cfg._num_clusters == len(np.unique(train_labels))
assert cfg._num_features == train_data.shape[1]
assert isinstance(cfg._aggregator, ClusterFedAvgAggregator)
assert isinstance(cfg._model, KMeansModel)
assert cfg._federated_data is not None
cfg = FederatedClustering(database, iid=False, num_nodes=3, percent=20)
assert cfg._test_data is not None
assert cfg._test_labels is not None
assert cfg._num_clusters == len(np.unique(train_labels))
assert cfg._num_features == train_data.shape[1]
assert isinstance(cfg._aggregator, ClusterFedAvgAggregator)
assert isinstance(cfg._model, KMeansModel)
assert cfg._federated_data is not None
def test_FederatedClustering_wrong_database():
cfg = FederatedClustering('MNIST', iid=True, num_nodes=3, percent=20)
assert cfg._test_data is None
def test_run_rounds():
cfg = FederatedClustering('IRIS', iid=True, num_nodes=3, percent=20)
cfg.deploy_central_model = Mock()
cfg.train_all_clients = Mock()
cfg.evaluate_clients = Mock()
cfg.aggregate_weights = Mock()
cfg.evaluate_global_model = Mock()
cfg.run_rounds(1)
cfg.deploy_central_model.assert_called_once()
cfg.train_all_clients.assert_called_once()
cfg.evaluate_clients.assert_called_once_with(cfg._test_data, cfg._test_labels)
cfg.aggregate_weights.assert_called_once()
cfg.evaluate_global_model.assert_called_once_with(cfg._test_data, cfg._test_labels)
def test_run_rounds_wrong_database():
cfg = FederatedClustering('EMNIST', iid=True, num_nodes=3, percent=20)
cfg.deploy_central_model = Mock()
cfg.train_all_clients = Mock()
cfg.evaluate_clients = Mock()
cfg.aggregate_weights = Mock()
cfg.evaluate_global_model = Mock()
cfg.run_rounds(1)
cfg.deploy_central_model.assert_not_called()
cfg.train_all_clients.assert_not_called()
cfg.evaluate_clients.assert_not_called()
cfg.aggregate_weights.assert_not_called()
cfg.evaluate_global_model.assert_not_called()
@patch('shfl.federated_government.federated_clustering.KMeansModel')
def test_model_builder(mock_kmeans):
cfg = FederatedClustering('IRIS', iid=True, num_nodes=3, percent=20)
model = cfg.model_builder()
assert isinstance(model, Mock)
mock_kmeans.assert_called_with(n_clusters=cfg._num_clusters, n_features=cfg._num_features) | 35.717647 | 99 | 0.772398 |
acf28118d1fd305f4b99efb98d6edad4e57dd344 | 1,863 | py | Python | algorithms/TimSort.py | rohithaug/sorting-visualizer | b30e21ee5f135a1a7be887499da65667ab6d081a | [
"MIT"
] | 2 | 2020-10-19T12:10:27.000Z | 2022-03-25T04:17:12.000Z | algorithms/TimSort.py | rohithaug/sorting-visualizer | b30e21ee5f135a1a7be887499da65667ab6d081a | [
"MIT"
] | null | null | null | algorithms/TimSort.py | rohithaug/sorting-visualizer | b30e21ee5f135a1a7be887499da65667ab6d081a | [
"MIT"
] | 1 | 2021-06-08T03:44:12.000Z | 2021-06-08T03:44:12.000Z | '''
Tim Sort
Time Complexity: O(N*log(N))
Space Complexity: O(N)
'''
from algorithms.Algorithm import Algorithm
class TimSort(Algorithm):
def __init__(self):
super().__init__("Tim Sort")
def algorithm(self):
RUN = 32
n = len(self.array)
for i in range(0, n, RUN):
self.InsertionSort(i, min(i+32, n))
size = RUN
while size < n:
for left in range(0, n, 2*size):
mid = left + size
right = min(mid+size, n)
self.Merge(left, mid, right)
size *= 2
def InsertionSort(self, left, right):
for i in range(left, right):
key = self.array[i]
j = i-1
while j >= left and self.array[j] > key:
self.array[j+1] = self.array[j]
j = j-1
self.array[j+1] = key
#visualise the sorting
self.update(j+1)
def Merge(self, l, m, r):
left = self.array[l:m]
right = self.array[m:r]
#i - index of left array, j - index of right array, k - index of self.array
i = j = 0
k = l
while i < len(left) and j < len(right):
if left[i] < right[j]:
self.array[k] = left[i]
i += 1
else:
self.array[k] = right[j]
j += 1
#visualise the sorting
self.update(k)
k += 1
while i < len(left):
self.array[k] = left[i]
#visualise the sorting
self.update(k)
i += 1
k += 1
while j < len(right):
self.array[k] = right[j]
#visualise the sorting
self.update(k)
j += 1
k += 1
| 27 | 84 | 0.430488 |
acf281824d84546a5bb82c9546c8811922475f5b | 392 | py | Python | 25_capitalize.py | b-husein/HackerRank_Python_Challenges | 655b38e7e9fb23864eb56b72f647fee585159af6 | [
"MIT"
] | null | null | null | 25_capitalize.py | b-husein/HackerRank_Python_Challenges | 655b38e7e9fb23864eb56b72f647fee585159af6 | [
"MIT"
] | null | null | null | 25_capitalize.py | b-husein/HackerRank_Python_Challenges | 655b38e7e9fb23864eb56b72f647fee585159af6 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(s):
l = s.split(" ")
s = ''
for i in l:
s = s + i.capitalize() + ' '
return s
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = solve(s)
fptr.write(result + '\n')
fptr.close()
| 12.645161 | 47 | 0.561224 |
acf281aacae9b49cca96f8609f1ca51d0fbcc5b1 | 2,573 | py | Python | aiomultiprocess/scheduler.py | dferens/aiomultiprocess | 94b45794d221ad7c9adf0bf1469880d38866ab55 | [
"MIT"
] | 783 | 2018-05-11T15:11:38.000Z | 2020-04-29T08:40:36.000Z | aiomultiprocess/scheduler.py | dferens/aiomultiprocess | 94b45794d221ad7c9adf0bf1469880d38866ab55 | [
"MIT"
] | 80 | 2020-05-09T05:18:27.000Z | 2022-02-05T05:27:39.000Z | aiomultiprocess/scheduler.py | m4ta1l/aiomultiprocess | a0cd7d0e2d92fec2ceb4e54fb3067fd26e48237f | [
"MIT"
] | 62 | 2018-05-11T15:12:01.000Z | 2020-04-19T11:10:29.000Z | # Copyright 2019 John Reese
# Licensed under the MIT license
import itertools
from abc import ABC, abstractmethod
from typing import Any, Awaitable, Callable, Dict, Iterator, List, Sequence
from .types import Queue, QueueID, R, TaskID
class Scheduler(ABC):
@abstractmethod
def register_queue(self, tx: Queue) -> QueueID:
"""
Notify the scheduler when the pool creates a new transmit queue.
"""
@abstractmethod
def register_process(self, qid: QueueID) -> None:
"""
Notify the scheduler when a process is assigned to a queue.
This should be used for determining weights for the scheduler.
It will only be called during initial process mapping.
"""
@abstractmethod
def schedule_task(
self,
task_id: TaskID,
func: Callable[..., Awaitable[R]],
args: Sequence[Any],
kwargs: Dict[str, Any],
) -> QueueID:
"""
Given a task, return a queue ID that it should be sent to.
`func`, `args` and `kwargs` are just the exact same arguments
that `queue_work` takes, not every scheduler would be benefit from this.
Example that they would be useful, highly customized schedule may want
to schedule according to function/arguments weights.
"""
@abstractmethod
def complete_task(self, task_id: TaskID) -> None:
"""
Notify the scheduler that a task has been completed.
"""
class RoundRobin(Scheduler):
"""
The default scheduling algorithm that assigns tasks to queues in round robin order.
When multiple processes are assigned to the same queue, this will weight tasks
accordingly. For example, 12 processes over 8 queues should result in four queues
receiving double the number tasks compared to the other eight.
"""
def __init__(self) -> None:
super().__init__()
self.qids: List[QueueID] = []
self.next_id = itertools.count()
self.cycler: Iterator[QueueID] = itertools.cycle([])
def register_queue(self, tx: Queue) -> QueueID:
return QueueID(next(self.next_id))
def register_process(self, qid: QueueID) -> None:
self.qids.append(qid)
self.cycler = itertools.cycle(self.qids)
def schedule_task(
self,
_task_id: TaskID,
_func: Callable[..., Awaitable[R]],
_args: Sequence[Any],
_kwargs: Dict[str, Any],
) -> QueueID:
return next(self.cycler)
def complete_task(self, _task_id: TaskID) -> None:
pass
| 30.630952 | 87 | 0.643218 |
acf282d532e99d587d56a545567c6ea578735ba6 | 10,597 | py | Python | test/python/test_structure/test_tree.py | Karamaz0V1/Higra | 216d9e47641171b5a6f8b7e2b42c269b8dc34abd | [
"CECILL-B"
] | 64 | 2019-08-18T19:23:23.000Z | 2022-03-21T04:15:04.000Z | test/python/test_structure/test_tree.py | higra/Higra | e6d5984a585f652c87d303a6a6bec19f0eb7432e | [
"CECILL-B"
] | 120 | 2019-08-16T09:10:35.000Z | 2022-03-17T09:42:58.000Z | test/python/test_structure/test_tree.py | Karamaz0V1/Higra | 216d9e47641171b5a6f8b7e2b42c269b8dc34abd | [
"CECILL-B"
] | 12 | 2019-10-04T07:35:55.000Z | 2021-01-10T19:59:11.000Z | ############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : Benjamin Perret #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import unittest
import numpy as np
import higra as hg
class TestTree(unittest.TestCase):
@staticmethod
def get_tree():
parent_relation = np.asarray((5, 5, 6, 6, 6, 7, 7, 7), dtype=np.uint64)
return hg.Tree(parent_relation)
def test_size_tree(self):
t = TestTree.get_tree()
self.assertTrue(t.category() == hg.TreeCategory.PartitionTree)
self.assertTrue(t.root() == 7)
self.assertTrue(t.num_vertices() == 8)
self.assertTrue(t.num_edges() == 7)
self.assertTrue(t.num_leaves() == 5)
self.assertTrue(t.is_leaf(0))
self.assertTrue(not t.is_leaf(5))
self.assertTrue(np.all(t.is_leaf((0, 5, 2, 3, 7)) == (True, False, True, True, False)))
self.assertTrue(t.num_children(6) == 3)
self.assertTrue(np.all(t.num_children((5, 7, 6)) == (2, 2, 3)))
self.assertTrue(np.all(t.num_children() == (2, 3, 2)))
self.assertTrue(t.parent(4) == 6)
self.assertTrue(np.all(t.parent((0, 5, 2, 3, 7)) == (5, 7, 6, 6, 7)))
def test_dynamic_attributes(self):
t = TestTree.get_tree()
t.new_attribute = 42
self.assertTrue(t.new_attribute == 42)
def test_vertex_iterator(self):
t = TestTree.get_tree()
ref = [0, 1, 2, 3, 4, 5, 6, 7];
res = []
for v in t.vertices():
res.append(v)
self.assertTrue(res == ref)
def test_tree_degree(self):
t = TestTree.get_tree()
ref = [1, 1, 1, 1, 1, 3, 4, 2]
for v in t.vertices():
self.assertTrue(t.degree(v) == ref[v])
self.assertTrue(t.in_degree(v) == ref[v])
self.assertTrue(t.out_degree(v) == ref[v])
def test_ctr_fail(self):
with self.assertRaises(RuntimeError):
hg.Tree((5, 0, 6, 6, 6, 7, 7, 7))
with self.assertRaises(RuntimeError):
hg.Tree((5, 1, 6, 6, 6, 7, 7, 7))
with self.assertRaises(RuntimeError):
hg.Tree((5, 1, 6, 6, 6, 7, 7, 2))
with self.assertRaises(RuntimeError):
hg.Tree((2, 2, 4, 4, 4))
def test_edge_iterator(self):
t = TestTree.get_tree()
ref = [(0, 5),
(1, 5),
(2, 6),
(3, 6),
(4, 6),
(5, 7),
(6, 7)]
res = []
for e in t.edges():
res.append((t.source(e), t.target(e)))
self.assertTrue(res == ref)
def test_adjacent_vertex_iterator(self):
t = TestTree.get_tree()
ref = [[5],
[5],
[6],
[6],
[6],
[7, 0, 1],
[7, 2, 3, 4],
[5, 6]]
for v in t.vertices():
res = []
for a in t.adjacent_vertices(v):
res.append(a)
self.assertTrue(res == ref[v])
def test_out_edge_iterator(self):
t = TestTree.get_tree()
ref = [[(0, 5)],
[(1, 5)],
[(2, 6)],
[(3, 6)],
[(4, 6)],
[(5, 7), (5, 0), (5, 1)],
[(6, 7), (6, 2), (6, 3), (6, 4)],
[(7, 5), (7, 6)]];
for v in t.vertices():
res = []
for e in t.out_edges(v):
res.append((e[0], e[1]))
self.assertTrue(res == ref[v])
def test_in_edge_iterator(self):
t = TestTree.get_tree()
ref = [[(5, 0)],
[(5, 1)],
[(6, 2)],
[(6, 3)],
[(6, 4)],
[(7, 5), (0, 5), (1, 5)],
[(7, 6), (2, 6), (3, 6), (4, 6)],
[(5, 7), (6, 7)]];
for v in t.vertices():
res = []
for e in t.in_edges(v):
res.append((e[0], e[1]))
self.assertTrue(res == ref[v])
def test_edge_index_iterator(self):
t = TestTree.get_tree()
ref = [0, 1, 2, 3, 4, 5, 6]
res = []
for e in t.edges():
res.append(t.index(e))
self.assertTrue(res == ref)
def test_out_edge_index_iterator(self):
t = TestTree.get_tree()
ref = [[0],
[1],
[2],
[3],
[4],
[5, 0, 1],
[6, 2, 3, 4],
[5, 6]]
for v in t.vertices():
res = []
for e in t.out_edges(v):
res.append(e[2])
self.assertTrue(res == ref[v])
def test_in_edge_index_iterator(self):
t = TestTree.get_tree()
ref = [[0],
[1],
[2],
[3],
[4],
[5, 0, 1],
[6, 2, 3, 4],
[5, 6]]
for v in t.vertices():
res = []
for e in t.in_edges(v):
res.append(e[2])
self.assertTrue(res == ref[v])
def test_edge_list(self):
g = TestTree.get_tree()
ref_sources = (0, 1, 2, 3, 4, 5, 6)
ref_targets = (5, 5, 6, 6, 6, 7, 7)
sources = g.sources()
self.assertTrue(np.all(ref_sources == sources))
targets = g.targets()
self.assertTrue(np.all(ref_targets == targets))
sources, targets = g.edge_list()
self.assertTrue(np.all(ref_sources == sources))
self.assertTrue(np.all(ref_targets == targets))
def test_num_children(self):
t = TestTree.get_tree()
ref = [0, 0, 0, 0, 0, 2, 3, 2]
res = []
for v in t.vertices():
res.append(t.num_children(v))
self.assertTrue(res == ref)
def test_children_iterator(self):
t = TestTree.get_tree()
ref = [[],
[],
[],
[],
[],
[0, 1],
[2, 3, 4],
[5, 6]]
for v in t.vertices():
res = []
for c in t.children(v):
res.append(c)
self.assertTrue(res == ref[v])
self.assertTrue(t.child(1, 5) == 1)
self.assertTrue(np.all(t.child(0, (5, 7, 6)) == (0, 5, 2)))
self.assertTrue(np.all(t.child(1, (5, 7, 6)) == (1, 6, 3)))
def test_leaves_iterator(self):
t = TestTree.get_tree()
ref = [0, 1, 2, 3, 4]
self.assertTrue(ref == [l for l in t.leaves()])
def test_ancestors_iterator(self):
t = TestTree.get_tree()
self.assertTrue(np.all([1, 5, 7] == t.ancestors(1)))
self.assertTrue(np.all([6, 7] == t.ancestors(6)))
self.assertTrue(np.all([7] == t.ancestors(7)))
def test_find_region(self):
tree = hg.Tree((8, 8, 9, 7, 7, 11, 11, 9, 10, 10, 12, 12, 12))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 2, 2, 3), dtype=np.int32)
vertices = np.asarray((0, 0, 0, 2, 2, 9, 9, 12), dtype=np.int64)
lambdas = np.asarray((2, 3, 4, 1, 2, 2, 3, 3), dtype=np.float64)
expected_results = np.asarray((0, 10, 12, 2, 9, 9, 10, 12), dtype=np.int64)
for i in range(vertices.size):
self.assertTrue(tree.find_region(vertices[i], lambdas[i], altitudes) == expected_results[i])
self.assertTrue(np.all(tree.find_region(vertices, lambdas, altitudes) == expected_results))
def test_lowest_common_ancestor_scalar(self):
t = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
self.assertTrue(t.lowest_common_ancestor(0, 0) == 0)
self.assertTrue(t.lowest_common_ancestor(3, 3) == 3)
self.assertTrue(t.lowest_common_ancestor(5, 5) == 5)
self.assertTrue(t.lowest_common_ancestor(7, 7) == 7)
self.assertTrue(t.lowest_common_ancestor(0, 1) == 5)
self.assertTrue(t.lowest_common_ancestor(1, 0) == 5)
self.assertTrue(t.lowest_common_ancestor(2, 3) == 6)
self.assertTrue(t.lowest_common_ancestor(2, 4) == 6)
self.assertTrue(t.lowest_common_ancestor(3, 4) == 6)
self.assertTrue(t.lowest_common_ancestor(5, 6) == 7)
self.assertTrue(t.lowest_common_ancestor(0, 2) == 7)
self.assertTrue(t.lowest_common_ancestor(1, 4) == 7)
self.assertTrue(t.lowest_common_ancestor(2, 6) == 6)
def test_lowest_common_ancestor_vectorial(self):
t = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
v1 = np.asarray((0, 0, 1, 3), dtype=np.int64)
v2 = np.asarray((0, 3, 0, 0), dtype=np.int64)
res = t.lowest_common_ancestor(v1, v2)
ref = np.asarray((0, 7, 5, 7), dtype=np.int64)
self.assertTrue(np.all(res == ref))
def test_pickle(self):
import pickle
t = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
hg.set_attribute(t, "test", (1, 2, 3))
hg.add_tag(t, "foo")
data = pickle.dumps(t)
t2 = pickle.loads(data)
self.assertTrue(np.all(t.parents() == t2.parents()))
for v in t.vertices():
self.assertTrue(np.all(t.children(v) == t2.children(v)))
self.assertTrue(hg.get_attribute(t, "test") == hg.get_attribute(t2, "test"))
self.assertTrue(t.test == t2.test)
self.assertTrue(hg.has_tag(t2, "foo"))
def test_sub_tree(self):
tree = hg.Tree(np.asarray((8, 8, 9, 9, 10, 10, 11, 13, 12, 12, 11, 13, 14, 14, 14)))
# full tree
sub_tree, node_map = tree.sub_tree(14)
self.assertTrue(np.all(tree.parents() == sub_tree.parents()))
self.assertTrue(np.all(np.arange(tree.num_vertices()) == node_map))
# normal
sub_tree, node_map = tree.sub_tree(13)
self.assertTrue(np.all(sub_tree.parents() == (4, 4, 5, 6, 5, 6, 6)))
self.assertTrue(np.all(node_map == (4, 5, 6, 7, 10, 11, 13)))
# leaf
sub_tree, node_map = tree.sub_tree(3)
self.assertTrue(np.all(sub_tree.parents() == (0,)))
self.assertTrue(np.all(node_map == (3,)))
if __name__ == '__main__':
unittest.main()
| 31.727545 | 104 | 0.470605 |
acf2832d78dff64ada540f15500717cdbc2ee61b | 3,778 | py | Python | cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 1 | 2019-02-08T05:24:58.000Z | 2019-02-08T05:24:58.000Z | cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
from cinder.tests.unit.volume.drivers.emc.scaleio import mocks
class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume_from_snapshot()``"""
def setUp(self):
"""Setup a test case environment.
Creates fake volume and snapshot objects and sets up the required
API responses.
"""
super(TestCreateVolumeFromSnapShot, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.snapshot = fake_snapshot.fake_snapshot_obj(ctx)
self.snapshot_name_2x_enc = urllib.quote(
urllib.quote(self.driver._id_to_base64(self.snapshot.id))
)
self.volume = fake_volume.fake_volume_obj(ctx)
self.volume_name_2x_enc = urllib.quote(
urllib.quote(self.driver._id_to_base64(self.volume.id))
)
self.snapshot_reply = json.dumps(
{
'volumeIdList': [self.volume.id],
'snapshotGroupId': 'snap_group'
}
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.snapshot.id,
'instances/System/action/snapshotVolumes':
self.snapshot_reply,
},
self.RESPONSE_MODE.BadStatus: {
'instances/System/action/snapshotVolumes':
self.BAD_STATUS_RESPONSE,
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
},
self.RESPONSE_MODE.Invalid: {
'instances/System/action/snapshotVolumes':
mocks.MockHTTPSResponse(
{
'errorCode': self.VOLUME_NOT_FOUND_ERROR,
'message': 'BadStatus Volume Test',
}, 400
),
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: None,
},
}
def test_bad_login(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_invalid_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_create_volume_from_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
| 37.78 | 78 | 0.630492 |
acf2837466c23279607a287ec28a85f363de50d7 | 648 | py | Python | api/skills_matcher_db/experts/migrations/0003_auto_20220323_1840.py | WHOIGit/avast-skills-matcher-db | 3bb23b585c9e0a13b5e6ecaae7d1a8fdc346cb77 | [
"MIT"
] | null | null | null | api/skills_matcher_db/experts/migrations/0003_auto_20220323_1840.py | WHOIGit/avast-skills-matcher-db | 3bb23b585c9e0a13b5e6ecaae7d1a8fdc346cb77 | [
"MIT"
] | 2 | 2022-01-21T15:52:43.000Z | 2022-02-17T22:58:08.000Z | api/skills_matcher_db/experts/migrations/0003_auto_20220323_1840.py | WHOIGit/avast-skills-matcher-db | 3bb23b585c9e0a13b5e6ecaae7d1a8fdc346cb77 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.13 on 2022-03-23 18:40
from django.db import migrations, models
import skills_matcher_db.utils.fields
class Migration(migrations.Migration):
dependencies = [
('experts', '0002_auto_20220203_1633'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='availability',
field=skills_matcher_db.utils.fields.ChoiceArrayField(base_field=models.CharField(blank=True, choices=[('WEEKS', 'Weeks to months'), ('DAYS', 'Days to weeks'), ('INCIDENTAL', 'Incidental advice')], max_length=25), blank=True, null=True, size=None),
),
]
| 32.4 | 260 | 0.66821 |
acf284b080d18619e5ba172184ef6ebf1054cf47 | 664 | py | Python | manage.py | coronel08/flashcard_quiz | 4bc2ad36c0aca69d3ed295a64c767b70d11ac747 | [
"MIT"
] | null | null | null | manage.py | coronel08/flashcard_quiz | 4bc2ad36c0aca69d3ed295a64c767b70d11ac747 | [
"MIT"
] | null | null | null | manage.py | coronel08/flashcard_quiz | 4bc2ad36c0aca69d3ed295a64c767b70d11ac747 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'quiz_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.869565 | 73 | 0.679217 |
acf284cdff4ea509c02e58b68a5bf7f061e176e3 | 725 | py | Python | web/books/admin/inlines.py | hdknr/django-books | b4bf6d144240edc4bcfc94180377adaadc9e533c | [
"MIT"
] | null | null | null | web/books/admin/inlines.py | hdknr/django-books | b4bf6d144240edc4bcfc94180377adaadc9e533c | [
"MIT"
] | null | null | null | web/books/admin/inlines.py | hdknr/django-books | b4bf6d144240edc4bcfc94180377adaadc9e533c | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from django.utils.translation import ugettext_lazy as _
from books import models
class ContactInline(admin.TabularInline):
model = models.Contact
exclude = ['created_at']
readonly_fields = ['updated_at']
extra = 0
class FisicalYearInline(admin.TabularInline):
model = models.FisicalYear
exclude = ['created_at']
readonly_fields = ['updated_at']
extra = 0
class BankInline(GenericTabularInline):
model = models.Bank
exclude = ['created_at', 'organization']
readonly_fields = ['updated_at']
extra = 0
ct_field = 'owner_content_type'
ct_fk_field = 'owner_object_id' | 25.892857 | 66 | 0.732414 |
acf284e4017a1b5bdcebf2c50659ceccc64831e9 | 4,829 | py | Python | onlinepayments/sdk/domain/customer_device.py | wl-online-payments-direct/sdk-python3 | 99fca127334520cde4ffa3a34cbea3b3a0d3fbff | [
"Apache-2.0"
] | null | null | null | onlinepayments/sdk/domain/customer_device.py | wl-online-payments-direct/sdk-python3 | 99fca127334520cde4ffa3a34cbea3b3a0d3fbff | [
"Apache-2.0"
] | null | null | null | onlinepayments/sdk/domain/customer_device.py | wl-online-payments-direct/sdk-python3 | 99fca127334520cde4ffa3a34cbea3b3a0d3fbff | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This class was auto-generated.
#
from onlinepayments.sdk.data_object import DataObject
from onlinepayments.sdk.domain.browser_data import BrowserData
class CustomerDevice(DataObject):
"""
| Object containing information on the device and browser of the customer
"""
__accept_header = None
__browser_data = None
__ip_address = None
__locale = None
__timezone_offset_utc_minutes = None
__user_agent = None
@property
def accept_header(self) -> str:
"""
| The accept-header of the customer client from the HTTP Headers.
Type: str
"""
return self.__accept_header
@accept_header.setter
def accept_header(self, value: str):
self.__accept_header = value
@property
def browser_data(self) -> BrowserData:
"""
| Object containing information regarding the browser of the customer
Type: :class:`onlinepayments.sdk.domain.browser_data.BrowserData`
"""
return self.__browser_data
@browser_data.setter
def browser_data(self, value: BrowserData):
self.__browser_data = value
@property
def ip_address(self) -> str:
"""
| The IP address of the customer client from the HTTP Headers.
Type: str
"""
return self.__ip_address
@ip_address.setter
def ip_address(self, value: str):
self.__ip_address = value
@property
def locale(self) -> str:
"""
| Locale of the client device/browser. Returned in the browser from the navigator.language property.
| If you use the latest version of our JavaScript Client SDK, we will collect this data and include it in the encryptedCustomerInput property. We will then automatically populate this data if available.
Type: str
"""
return self.__locale
@locale.setter
def locale(self, value: str):
self.__locale = value
@property
def timezone_offset_utc_minutes(self) -> str:
"""
| Offset in minutes of timezone of the client versus the UTC. Value is returned by the JavaScript getTimezoneOffset() Method.
| If you use the latest version of our JavaScript Client SDK, we will collect this data and include it in the encryptedCustomerInput property. We will then automatically populate this data if available.
Type: str
"""
return self.__timezone_offset_utc_minutes
@timezone_offset_utc_minutes.setter
def timezone_offset_utc_minutes(self, value: str):
self.__timezone_offset_utc_minutes = value
@property
def user_agent(self) -> str:
"""
| User-Agent of the client device/browser from the HTTP Headers.
| As a fall-back we will use the userAgent that might be included in the encryptedCustomerInput, but this is captured client side using JavaScript and might be different.
Type: str
"""
return self.__user_agent
@user_agent.setter
def user_agent(self, value: str):
self.__user_agent = value
def to_dictionary(self):
dictionary = super(CustomerDevice, self).to_dictionary()
if self.accept_header is not None:
dictionary['acceptHeader'] = self.accept_header
if self.browser_data is not None:
dictionary['browserData'] = self.browser_data.to_dictionary()
if self.ip_address is not None:
dictionary['ipAddress'] = self.ip_address
if self.locale is not None:
dictionary['locale'] = self.locale
if self.timezone_offset_utc_minutes is not None:
dictionary['timezoneOffsetUtcMinutes'] = self.timezone_offset_utc_minutes
if self.user_agent is not None:
dictionary['userAgent'] = self.user_agent
return dictionary
def from_dictionary(self, dictionary):
super(CustomerDevice, self).from_dictionary(dictionary)
if 'acceptHeader' in dictionary:
self.accept_header = dictionary['acceptHeader']
if 'browserData' in dictionary:
if not isinstance(dictionary['browserData'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['browserData']))
value = BrowserData()
self.browser_data = value.from_dictionary(dictionary['browserData'])
if 'ipAddress' in dictionary:
self.ip_address = dictionary['ipAddress']
if 'locale' in dictionary:
self.locale = dictionary['locale']
if 'timezoneOffsetUtcMinutes' in dictionary:
self.timezone_offset_utc_minutes = dictionary['timezoneOffsetUtcMinutes']
if 'userAgent' in dictionary:
self.user_agent = dictionary['userAgent']
return self
| 34.741007 | 210 | 0.660178 |
acf28679e92aba85dbecb09b0422b04d65081c80 | 13,103 | py | Python | circularGame.py | chm15/pygamegenetic | 7945ce72d08b4a7c1ee1a46c822b31d8875a4696 | [
"MIT"
] | null | null | null | circularGame.py | chm15/pygamegenetic | 7945ce72d08b4a7c1ee1a46c822b31d8875a4696 | [
"MIT"
] | null | null | null | circularGame.py | chm15/pygamegenetic | 7945ce72d08b4a7c1ee1a46c822b31d8875a4696 | [
"MIT"
] | null | null | null | import math, pygame, time, sys, random, numpy as np
from pprint import pprint
pygame.init()
random.seed()
randTime = int(time.time() // 1)
np.random.seed(randTime)
metersToPixels = 20 #20
class Game:
def __init__(self, players):
"""
Contains the main game. Call startGame() to begin.
:players = [Player()]
"""
self.maxTime = 3
self.players = players
self.loopPause = 10000000 # 1/x
self.backgroundColor = (30, 40, 50)
self.screenSize = self.screenWidth, self.screenHeight = 900, 700
self.screen = pygame.display.set_mode(self.screenSize)
self.groundHeight = 10 # Pixels
self.groundPosition = self.screenHeight - self.groundHeight
self.groundColor = (70, 80, 110)
self.furthestxStartPoint = players[0].startingX * metersToPixels
self.furthestx = self.furthestxStartPoint
self.textFont = pygame.font.SysFont('Arial', 50)
def startGame(self):
startTime = time.time()
self.initializePlayers(startTime)
gameOver = False
currentRelease = 0
while not gameOver:
currentTime = time.time()
timeSinceStart = currentTime - startTime
self.screen.fill(self.backgroundColor)
# Check for pygame events.
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.players[currentRelease].releaseFromRope()
if currentRelease < len(self.players) - 1: currentRelease += 1
# Iterate through all players and run physics, neural network, and then draw to screen.
gameOver = True
for player in self.players:
# Check for gameOver Conditions.
if (not player.dead and not player.isSpinning) or (timeSinceStart < self.maxTime and not player.dead):
gameOver = False
# Run through player processes.
if not player.dead:
# Physics for all objects in scene.
player.runPhysics(currentTime)
if player.x > self.furthestx:
self.furthestx = player.x
# Check for collisions.
if player.y + player.mapToScreen(player.playerRadius) > self.groundPosition:
player.dead = True
# Network prediction for each player.
if timeSinceStart < self.maxTime:
if player.network.predict(player.currentAngle%(2*math.pi)) > 0.5:
player.releaseFromRope()
# Draw all objects to screen.
player.draw(self.screen)
# Draw static objects to screen.
pygame.draw.rect(self.screen, self.groundColor,(0, self.screenHeight - self.groundHeight, self.screenWidth, self.screenHeight))
pygame.draw.rect(self.screen, (220, 80, 80),(self.furthestx, 0, 3, self.screenHeight))
currentScore = self.textFont.render(str(round((self.furthestx - self.furthestxStartPoint) / metersToPixels, 1)) + " m", False, (250, 250, 250))
self.screen.blit(currentScore, (self.furthestx + 5, self.screenHeight - self.groundHeight - 40))
rotationx = (players[0].startingX - players[0].radiusOfOrbit) * metersToPixels
rotationy = players[0].startingY * metersToPixels
pygame.draw.circle(self.screen, (110, 120, 135), (rotationx, rotationy), 5)
# Update Screen.
pygame.display.flip()
# Check for gameOver conditions.
if gameOver:
time.sleep(0.25)
# Pause loop for set time.
time.sleep(1/self.loopPause)
# ================= End of game loop. =====================
return
def initializePlayers(self, startTime):
"""
:startTime = The starting time to synchronize all players to.
"""
for player in self.players:
player.startTime = startTime
player.resetValues()
class Player:
def __init__(self, playerId):
"""
Player with all physics calculations built in.
:startingCoords = (x, y) of starting coordinates in meters.
The startingX and startingY values will be stored for use when the resetValues() function is called.
"""
startingCoords = (7, 30)
self.playerId = playerId
self.startTime = None
self.startingX = startingCoords[0] # in meters.
self.startingY = startingCoords[1] # in meters.
self.velocity = 17 # m/s, must be mapped to pixels.
self.radiusOfOrbit = 3 # in meters.
self.rotationOrigin = (self.startingX - self.radiusOfOrbit, self.startingY)
self.playerRadius = 0.5 # Radius of the player.
self.acceleration = 9.81
self.color = (random.randint(20, 255), random.randint(20, 255), random.randint(20, 255))
self.network = Network((1,3,1), (Sigmoid, Sigmoid))
self.resetValues()
def releaseFromRope(self):
if self.isSpinning == True:
self.isSpinning = False
self.coordsAtRelease = (self.x, self.y)
self.timeOfRelease = self.currentTime
self.angleOfRelease = self.currentAngle + (math.pi / 2)
Vx = self.velocity * math.cos(self.angleOfRelease)
Vy = self.velocity * math.sin(self.angleOfRelease)
self.velocitiesAtRelease = (Vx, Vy)
def resetValues(self):
self.x = self.startingX
self.y = self.startingY
self.isSpinning = True
self.dead = False
self.timeOfRelease = None
self.coordsAtRelease = None
self.angleOfRelease = None
self.currentAngle = 0
self.velocitiesAtRelease = None # The x velocity parallel to the ground. Set upon release.
self.score = 0
def runPhysics(self, currentTime):
# Update the player's x and y values, update the player's local time..
self.currentTime = currentTime
if not self.dead:
if self.isSpinning:
dx, dy = self.originOfOrbit(self.rotationOrigin, self.velocity, self.radiusOfOrbit)
self.x = self.mapToScreen(self.startingX + dx)
self.y = self.mapToScreen(self.startingY + dy)
else:
dtSinceRelease = currentTime - self.timeOfRelease
self.x = (self.mapToScreen(dtSinceRelease * self.velocitiesAtRelease[0])) + self.coordsAtRelease[0]
self.y = self.mapToScreen((self.velocitiesAtRelease[1] * dtSinceRelease) + (0.5 * self.acceleration * (dtSinceRelease**2))) + self.coordsAtRelease[1]
self.getScore() # Updates the player's score
def originOfOrbit(self, coords, velocity, radius):
"""
Function returns (x, y) in units of meters of a point at (currentTime - startTime).
:coords = (x, y) Coordinates to orbit around.
:velocity = Centripetal velocity of the object.
:radius of circle.
"""
dt = self.currentTime - self.startTime
period = (2 * math.pi * radius) / velocity
angle = (2 * math.pi * dt) / period
self.currentAngle = angle
x = radius * math.cos(angle) - radius #coordinate system translated left so that at dt = 0, x = 0.
y = radius * math.sin(angle)
return (x, y)
def mapToScreen(self, meters):
return int(meters * metersToPixels)
def draw(self, screen):
center = (int(self.x), int(self.y))
radiusInPixels = self.mapToScreen(self.playerRadius)
pygame.draw.circle(screen, self.color, center, radiusInPixels)
def getScore(self):
score = self.x - self.mapToScreen(self.startingX)
if self.isSpinning:
self.score = 0
elif score < -5:
self.score = 5
else:
self.score = self.x - self.mapToScreen(self.startingX)
class Network:
# Contains functions and structure for neural network.
def __init__(self, dimensions, activations):
"""
:param dimensions: (tpl/ list) Dimensions of the neural net. (input, hidden layer, output) ex. [2, 3, 1], len(ex.) = 3
:param activations: (tpl/ list) Activations functions.
"""
self.totalLayers = len(dimensions)
self.loss = None
self.learningRate = None
# Weights and biases are initiated by index. For a one hidden layer net you will have a w[1] and w[2].
self.w = {}
self.b = {}
# Activations are also initiated by index. For the example we will have activations[2] and activations[3]
self.activations = {}
for i in range(len(dimensions) - 1):
self.w[i + 1] = np.random.rand(dimensions[i], dimensions[i + 1]) / np.sqrt(dimensions[i])
self.b[i + 1] = np.zeros(dimensions[i + 1])
self.activations[i + 2] = activations[i]
def feedForward(self, x):
"""
Execute a forward feed through the network
:param x: (array) Batch of input data vectors.
:return: (tpl) Node outputs and activations per layer.
The numbering of the output is equivalent to the layer numbers.
"""
# w(x) + b = z
z = {}
# activations: f(z)
a = {1: x} # First layer has no activations as input. The input x is the input.
for i in range(1, self.totalLayers):
# current layer = i
# activation layer = i + 1
z[i + 1] = np.dot(a[i], self.w[i]) + self.b[i]
a[i + 1] = self.activations[i + 1].activation(z[i + 1])
"""
a = {
1: "inputs x",
2: "activations of relu function in the hidden layer",
3: "activations of the sigmoid function in the output layer"
}
z = {
2: "z values of the hidden layer",
3: "z values of the output layer"
}
"""
return z, a
def predict(self, x):
"""
:param x: (array) Containing parameters
:return: (array) A 2D array of shape (n_cases, n_classes).
"""
_, a = self.feedForward(x)
return a[self.totalLayers]
def randomize(self, rate):
"""
Randomize weights and biases.
"""
for i in range(self.totalLayers - 1):
for j in range(len(self.w[i + 1])):
for k in range(len(self.w[i + 1][j])):
chanceOfMutation = random.randint(0, 5*rate)
if chanceOfMutation == 0:
self.w[i + 1][j][k] += (random.randint(0, 600) - 300 )/ (1000 )
print((random.randint(0, 600) - 300 )/ (2000 ))
for i in range(len(self.b)):
for j in range(len(self.b[i + 1])):
chanceOfMutation = random.randint(0, 5 * rate)
if chanceOfMutation == 0:
self.b[i + 1][j] += (random.randint(0, 600) - 300)/ (1000 )
class Relu:
@staticmethod
def activation(z):
z[z < 0] = 0
return z
class Sigmoid:
@staticmethod
def activation(z):
return 1 / (1 + np.exp(-z))
def sortPlayersByScore(players):
sortedPlayers = []
for player in players:
if len(sortedPlayers) == 0:
sortedPlayers.append(player)
continue
for j in range(len(sortedPlayers)):
score = player.score
scoreSorted = sortedPlayers[j].score
if score > scoreSorted:
sortedPlayers.insert(j, player)
break
elif score == scoreSorted:
sortedPlayers.insert(j, player)
break
elif j == len(sortedPlayers) - 1:
sortedPlayers.append(player)
break
else:
continue
return sortedPlayers
def geneticAlgorithm(unsortedPlayers, generation):
players = sortPlayersByScore(unsortedPlayers)
remainder = 10
for i in range(remainder, len(players)):
modulus = i%remainder
playerToCopy = players[modulus]
weight = playerToCopy.network.w
bias = playerToCopy.network.b
for j in range(len(weight)):
players[i].network.w[j + 1] = weight[j + 1].copy()
for j in range(len(bias)):
players[i].network.b[j + 1] = bias[j + 1].copy()
if i > len(players) // 4:
players[i].network.randomize(generation)
return players
totalPlayers = 100
totalGenerations = 100
players = []
for i in range(totalPlayers):
players.append(Player(i))
game = Game(players)
for generation in range(totalGenerations):
game.startGame()
game.players = geneticAlgorithm(game.players, generation)
for player in players:
player.resetValues()
| 37.330484 | 165 | 0.572999 |
acf287ac08fbe4217c0598f9785fd23dae3861b7 | 1,766 | py | Python | Tools/unicode/genwincodec.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | 1 | 2018-06-21T18:21:24.000Z | 2018-06-21T18:21:24.000Z | Tools/unicode/genwincodec.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | Tools/unicode/genwincodec.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | """This script generates a Python codec module z a Windows Code Page.
It uses the function MultiByteToWideChar to generate a decoding table.
"""
zaimportuj ctypes
z ctypes zaimportuj wintypes
z gencodec zaimportuj codegen
zaimportuj unicodedata
def genwinmap(codepage):
MultiByteToWideChar = ctypes.windll.kernel32.MultiByteToWideChar
MultiByteToWideChar.argtypes = [wintypes.UINT, wintypes.DWORD,
wintypes.LPCSTR, ctypes.c_int,
wintypes.LPWSTR, ctypes.c_int]
MultiByteToWideChar.restype = ctypes.c_int
enc2uni = {}
dla i w list(range(32)) + [127]:
enc2uni[i] = (i, 'CONTROL CHARACTER')
dla i w range(256):
buf = ctypes.create_unicode_buffer(2)
ret = MultiByteToWideChar(
codepage, 0,
bytes([i]), 1,
buf, 2)
assert ret == 1, "invalid code page"
assert buf[1] == '\x00'
spróbuj:
name = unicodedata.name(buf[0])
wyjąwszy ValueError:
spróbuj:
name = enc2uni[i][1]
wyjąwszy KeyError:
name = ''
enc2uni[i] = (ord(buf[0]), name)
zwróć enc2uni
def genwincodec(codepage):
zaimportuj platform
map = genwinmap(codepage)
encodingname = 'cp%d' % codepage
code = codegen("", map, encodingname)
# Replace first lines przy our own docstring
code = '''\
"""Python Character Mapping Codec %s generated on Windows:
%s przy the command:
python Tools/unicode/genwincodec.py %s
"""#"
''' % (encodingname, ' '.join(platform.win32_ver()), codepage
) + code.split('"""#"', 1)[1]
print(code)
jeżeli __name__ == '__main__':
zaimportuj sys
genwincodec(int(sys.argv[1]))
| 28.483871 | 70 | 0.607588 |
acf288fd439c3c063767e999b7b0f9f8087fb72b | 1,287 | py | Python | App/services/upload_service.py | CKVB/Pdf-Tiff-Converter | 6a3ab9a3bf6e376941c40278c759badd8d13412d | [
"MIT"
] | null | null | null | App/services/upload_service.py | CKVB/Pdf-Tiff-Converter | 6a3ab9a3bf6e376941c40278c759badd8d13412d | [
"MIT"
] | null | null | null | App/services/upload_service.py | CKVB/Pdf-Tiff-Converter | 6a3ab9a3bf6e376941c40278c759badd8d13412d | [
"MIT"
] | null | null | null | from fastapi import status
from fastapi.responses import JSONResponse
from .clear_static_service import clear_static_service
from .. import constants as cs
from .. import config as cg
import magic
async def upload_service(file):
file_content = await file.read()
mime = magic.Magic(mime=True)
file_path = f"{cs.PDF_TIFF_DIR}/{file.filename}"
clear_static_service(cs.PDF_TIFF_DIR)
try:
with open(file_path, "wb") as f:
f.write(file_content)
except Exception as e:
return JSONResponse(
content={
"message": f"Error occured : {e}"
},
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
file_type = mime.from_file(file_path)
if file_type in cg.SUPPORTED_FILE_TYPES.values():
return JSONResponse(
{
"message": "File Uploaded"
},
status_code=status.HTTP_201_CREATED
)
else:
return JSONResponse(
{
"message": f"file type {file_type} not supported."
},
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
)
| 32.175 | 74 | 0.554002 |
acf28b16c9f48970363e7253027552b4238a90ff | 2,245 | py | Python | AI_Challenger/Evaluation/caption_eval/test.py | hdy007007/show_attend_and_tell | f4990b113b0c7fa61e01a7d2ad2537d43270fd28 | [
"MIT"
] | 2 | 2018-05-12T08:45:54.000Z | 2018-06-09T13:10:20.000Z | AI_Challenger/Evaluation/caption_eval/test.py | hdy007007/show_attend_and_tell | f4990b113b0c7fa61e01a7d2ad2537d43270fd28 | [
"MIT"
] | null | null | null | AI_Challenger/Evaluation/caption_eval/test.py | hdy007007/show_attend_and_tell | f4990b113b0c7fa61e01a7d2ad2537d43270fd28 | [
"MIT"
] | 1 | 2019-11-18T06:43:52.000Z | 2019-11-18T06:43:52.000Z |
# encoding: utf-8
# Copyright 2017 challenger.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unittest for image Chinese captioning evaluation."""
# __author__ = 'ZhengHe'
# python2.7
# python run_evaluations.py
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from run_evaluations import compute_m1
import json
m1_score = compute_m1(json_predictions_file="./data/val_cadidate_captions_json.json",
reference_file="./data/val_references_json.json")
#with open('/home/houdanyang/tensorflow/show-attend-and-tell/AI_Challenger/Evaluation/caption_eval/data/val_captions_json.json') as f:
# val_captions = json.load(f)
#
#with open('/home/houdanyang/tensorflow/show-attend-and-tell/AI_Challenger/Evaluation/caption_eval/data/val_references_json.json') as f:
# val_references = json.load(f)
#
#with open('/home/houdanyang/tensorflow/show-attend-and-tell/AI_Challenger/Evaluation/caption_eval/data/id_to_words.json') as f:
# id_to_words = json.load(f)
#
#with open('/home/houdanyang/tensorflow/show-attend-and-tell/AI_Challenger/Evaluation/caption_eval/data/id_to_test_caption.json') as f:
# id_to_test_words = json.load(f)
#
#val_captions = val_captions[:3]
#val_references['annotations'] = val_references['annotations'][:14]
#val_references['images'] = val_references['images'][:14]
###
#with open('/home/houdanyang/tensorflow/show-attend-and-tell/AI_Challenger/Evaluation/caption_eval/data/val_captions_json1.json','w') as f:
# json.dump(val_captions,f,ensure_ascii=False)
#
#with open('/home/houdanyang/tensorflow/show-attend-and-tell/AI_Challenger/Evaluation/caption_eval/data/val_references_json1.json','w') as f:
# json.dump(val_references,f,ensure_ascii=False) | 43.173077 | 141 | 0.761693 |
acf28bf3186def8dddffec28371890e12aaf2672 | 257 | py | Python | pygunshot/__init__.py | dizcza/pygunshot | 2fafe75083246b5f6b4e71b9b4cd3ca7be170443 | [
"MIT"
] | null | null | null | pygunshot/__init__.py | dizcza/pygunshot | 2fafe75083246b5f6b4e71b9b4cd3ca7be170443 | [
"MIT"
] | null | null | null | pygunshot/__init__.py | dizcza/pygunshot | 2fafe75083246b5f6b4e71b9b4cd3ca7be170443 | [
"MIT"
] | null | null | null | """pygunshot is a set of functions to generate
gunshot sounds given the scene geometry and
ballistic parameters. Note that the module only
provides anechoic samples and appropriate
reverberation effects need to be added."""
__version__ = '0.1.0.dev1' | 36.714286 | 48 | 0.782101 |
acf28c277c2df146a5976da091f6c45c214f39e4 | 6,012 | py | Python | im2mesh/vnn_onet/training.py | supriya-gdptl/vnn-neural-implicits | 34118fac8ccc530c539693381120dbfedf2bc0f8 | [
"MIT"
] | 27 | 2021-07-24T17:45:31.000Z | 2022-03-16T01:33:45.000Z | im2mesh/vnn_onet/training.py | supriya-gdptl/vnn-neural-implicits | 34118fac8ccc530c539693381120dbfedf2bc0f8 | [
"MIT"
] | 3 | 2021-08-23T20:08:03.000Z | 2022-02-15T12:17:13.000Z | im2mesh/vnn_onet/training.py | supriya-gdptl/vnn-neural-implicits | 34118fac8ccc530c539693381120dbfedf2bc0f8 | [
"MIT"
] | 8 | 2021-07-24T20:40:13.000Z | 2022-02-15T11:01:43.000Z | import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from im2mesh.common import (
compute_iou, make_3d_grid
)
from im2mesh.utils import visualize as vis
from im2mesh.training import BaseTrainer
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None, threshold=0.5, eval_sample=False,
latent_reg=None, latent_reg_scale=1):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
self.latent_reg = latent_reg
self.latent_reg_scale = latent_reg_scale
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
# Compute elbo
points = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou').to(device)
occ_iou = data.get('points_iou.occ').to(device)
kwargs = {}
with torch.no_grad():
elbo, rec_error, kl = self.model.compute_elbo(
points, occ, inputs, **kwargs)
eval_dict['loss'] = -elbo.mean().item()
eval_dict['rec_error'] = rec_error.mean().item()
eval_dict['kl'] = kl.mean().item()
# Compute iou
batch_size = points.size(0)
with torch.no_grad():
p_out = self.model(points_iou, inputs,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# Estimate voxel iou
if voxels_occ is not None:
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, (32,) * 3)
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def visualize(self, data):
''' Performs a visualization step for the data.
Args:
data (dict): data dictionary
'''
device = self.device
batch_size = data['points'].size(0)
inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)
shape = (32, 32, 32)
p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
p = p.expand(batch_size, *p.size())
kwargs = {}
with torch.no_grad():
p_r = self.model(p, inputs, sample=self.eval_sample, **kwargs)
occ_hat = p_r.probs.view(batch_size, *shape)
voxels_out = (occ_hat >= self.threshold).cpu().numpy()
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
vis.visualize_voxels(
voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
p = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device)
kwargs = {}
c = self.model.encode_inputs(inputs)
if isinstance(c, tuple):
c, c_meta = c
q_z = self.model.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
# KL-divergence
kl = dist.kl_divergence(q_z, self.model.p0_z).sum(dim=-1)
loss = kl.mean()
# General points
logits = self.model.decode(p, z, c, **kwargs).logits
loss_i = F.binary_cross_entropy_with_logits(
logits, occ, reduction='none')
loss = loss + loss_i.sum(-1).mean()
# Latent space regularization
if self.latent_reg == 'invariant_reg':
std_frame = torch.eye(3, device=self.device).unsqueeze(0).repeat(c_meta.shape[0],1,1)
loss_reg = F.mse_loss(c_meta[:, :3], std_frame)
loss_reg *= self.latent_reg_scale
loss = loss + loss_reg
return loss | 33.586592 | 97 | 0.574185 |
acf28c893a220ef5f00d644ae8c9609a6ab39e1b | 2,131 | py | Python | tests/openbb_terminal/cryptocurrency/overview/test_pycoingecko_view.py | tehcoderer/GamestonkTerminal | 54a1b6f545a0016c576e9e00eef5c003d229dacf | [
"MIT"
] | 255 | 2022-03-29T16:43:51.000Z | 2022-03-31T23:57:08.000Z | tests/openbb_terminal/cryptocurrency/overview/test_pycoingecko_view.py | tehcoderer/GamestonkTerminal | 54a1b6f545a0016c576e9e00eef5c003d229dacf | [
"MIT"
] | 14 | 2022-03-29T14:20:33.000Z | 2022-03-31T23:39:20.000Z | tests/openbb_terminal/cryptocurrency/overview/test_pycoingecko_view.py | tehcoderer/GamestonkTerminal | 54a1b6f545a0016c576e9e00eef5c003d229dacf | [
"MIT"
] | 24 | 2022-03-29T15:28:56.000Z | 2022-03-31T23:54:15.000Z | from unittest import TestCase
import pytest
from openbb_terminal.cryptocurrency.overview import (
pycoingecko_view as ov_pycoingecko_view,
)
# pylint: disable=unused-import
# pylint: disable=R0904
class TestCoinGeckoAPI(TestCase):
@pytest.mark.skip
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_holdings_overview(self):
ov_pycoingecko_view.display_holdings_overview(
coin="bitcoin", show_bar=False, export="", top=20
)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_categories(self):
ov_pycoingecko_view.display_categories(
top=15, export="", pie=False, sortby="market_cap"
)
@pytest.mark.skip
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_stablecoins(self):
ov_pycoingecko_view.display_stablecoins(
top=15, export="", sortby="market_cap", pie=False, descend=False
)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_exchanges(self):
ov_pycoingecko_view.display_exchanges(
top=15, sortby="Rank", descend=True, links=False, export=""
)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_indexes(self):
ov_pycoingecko_view.display_indexes(
top=15, sortby="Rank", descend=True, export=""
)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_derivatives(self):
ov_pycoingecko_view.display_derivatives(
top=15, sortby="Rank", descend=True, export=""
)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_exchange_rates(self):
ov_pycoingecko_view.display_exchange_rates(
top=15, sortby="Index", descend=True, export=""
)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_global_market_info(self):
ov_pycoingecko_view.display_global_market_info(export="", pie=False)
@pytest.mark.record_stdout
@pytest.mark.vcr()
def test_coin_global_defi_info(self):
ov_pycoingecko_view.display_global_defi_info(export="")
| 29.191781 | 76 | 0.677616 |
acf28ce7421e2c9f9e8d6f174da66acdd923533b | 2,439 | py | Python | scoutandrove/apps/account/models.py | ninapavlich/scout-and-rove | 4f40b36f219ac4ab2bac1b5ca6130459138550c9 | [
"MIT"
] | null | null | null | scoutandrove/apps/account/models.py | ninapavlich/scout-and-rove | 4f40b36f219ac4ab2bac1b5ca6130459138550c9 | [
"MIT"
] | null | null | null | scoutandrove/apps/account/models.py | ninapavlich/scout-and-rove | 4f40b36f219ac4ab2bac1b5ca6130459138550c9 | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from scoutandrove.utils.models import BaseModel, BaseTitleModel
from .manager import UserManager
class User(BaseModel, AbstractBaseUser, PermissionsMixin):
objects = UserManager()
email = models.EmailField(_('email address'), unique=True, blank=True)
first_name = models.CharField(_('First name'), max_length=30, blank=True)
last_name = models.CharField(_('Last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
USERNAME_FIELD = 'email'
@staticmethod
def autocomplete_search_fields():
return ("email__icontains", "first_name__icontains", "last_name__icontains")
def get_short_name(self):
if self.first_name:
return self.first_name
return self.email
def get_public_name(self):
if self.first_name:
return self.first_name
return 'Anonymous User'
def get_full_name(self):
if self.first_name and self.last_name:
return u"%s %s" % (self.first_name, self.last_name)
elif self.first_name:
return u"%s (%s)" % (self.first_name, self.email)
elif self.last_name:
return u"%s (%s)" % (self.first_name, self.email)
else:
return self.email
def __unicode__(self):
return self.get_full_name()
class UserGroupMember(BaseModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
blank=True, null=True)
order = models.IntegerField(default=0)
group = models.ForeignKey('account.UserGroup',
blank=True, null=True)
class Meta:
ordering = ['order']
class UserGroup(BaseTitleModel):
member_class = UserGroupMember
def get_members(self):
return self.member_class.objects.filter(group=self).order_by('order')
| 32.52 | 84 | 0.680607 |
acf28d02154b030fc97c755d4915f6a9e34f75fe | 23,352 | py | Python | pypy/module/cpyext/test/test_bytesobject.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2021-06-02T23:02:09.000Z | 2021-06-02T23:02:09.000Z | pypy/module/cpyext/test/test_bytesobject.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2021-03-30T18:08:41.000Z | 2021-03-30T18:08:41.000Z | pypy/module/cpyext/test/test_bytesobject.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2022-03-30T11:42:37.000Z | 2022-03-30T11:42:37.000Z | # encoding: utf-8
import pytest
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.error import OperationError
from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.bytesobject import (
new_empty_str, PyBytesObject, _PyBytes_Resize, PyBytes_Concat,
_PyBytes_Eq, PyBytes_ConcatAndDel, _PyBytes_Join)
from pypy.module.cpyext.api import (PyObjectP, PyObject, Py_ssize_tP,
Py_buffer, Py_bufferP, generic_cpy_call)
from pypy.module.cpyext.pyobject import decref, from_ref, make_ref
from pypy.module.cpyext.buffer import PyObject_AsCharBuffer
from pypy.module.cpyext.unicodeobject import (PyUnicode_AsEncodedObject,
PyUnicode_InternFromString, PyUnicode_Format)
class AppTestBytesObject(AppTestCpythonExtensionBase):
def test_bytesobject(self):
module = self.import_extension('foo', [
("get_hello1", "METH_NOARGS",
"""
return PyBytes_FromStringAndSize(
"Hello world<should not be included>", 11);
"""),
("get_hello2", "METH_NOARGS",
"""
return PyBytes_FromString("Hello world");
"""),
("test_Size", "METH_NOARGS",
"""
PyObject* s = PyBytes_FromString("Hello world");
int result = PyBytes_Size(s);
Py_DECREF(s);
return PyLong_FromLong(result);
"""),
("test_Size_exception", "METH_NOARGS",
"""
PyObject* f = PyFloat_FromDouble(1.0);
PyBytes_Size(f);
Py_DECREF(f);
return NULL;
"""),
("test_is_bytes", "METH_VARARGS",
"""
return PyBool_FromLong(PyBytes_Check(PyTuple_GetItem(args, 0)));
""")], prologue='#include <stdlib.h>')
assert module.get_hello1() == b'Hello world'
assert module.get_hello2() == b'Hello world'
assert module.test_Size()
raises(TypeError, module.test_Size_exception)
assert module.test_is_bytes(b"")
assert not module.test_is_bytes(())
def test_bytes_buffer_init(self):
module = self.import_extension('foo', [
("getbytes", "METH_NOARGS",
"""
PyObject *s, *t;
char* c;
s = PyBytes_FromStringAndSize(NULL, 4);
if (s == NULL)
return NULL;
t = PyBytes_FromStringAndSize(NULL, 3);
if (t == NULL)
return NULL;
Py_DECREF(t);
c = PyBytes_AS_STRING(s);
c[0] = 'a';
c[1] = 'b';
c[2] = 0;
c[3] = 'c';
return s;
"""),
])
s = module.getbytes()
assert len(s) == 4
assert s == b'ab\x00c'
def test_bytes_tp_alloc(self):
module = self.import_extension('foo', [
("tpalloc", "METH_NOARGS",
"""
PyObject *base;
PyTypeObject * type;
PyObject *obj;
base = PyBytes_FromString("test");
if (PyBytes_GET_SIZE(base) != 4)
return PyLong_FromLong(-PyBytes_GET_SIZE(base));
type = base->ob_type;
if (type->tp_itemsize != 1)
return PyLong_FromLong(type->tp_itemsize);
obj = type->tp_alloc(type, 10);
if (PyBytes_GET_SIZE(obj) != 10)
return PyLong_FromLong(PyBytes_GET_SIZE(obj));
/* cannot work, there is only RO access
memcpy(PyBytes_AS_STRING(obj), "works", 6); */
Py_INCREF(obj);
return obj;
"""),
('alloc_rw', "METH_NOARGS",
'''
PyObject *obj = (PyObject*)_PyObject_NewVar(&PyBytes_Type, 10);
memcpy(PyBytes_AS_STRING(obj), "works", 6);
return (PyObject*)obj;
'''),
])
s = module.alloc_rw()
assert s[:6] == b'works\0' # s[6:10] contains random garbage
s = module.tpalloc()
assert s == b'\x00' * 10
def test_AsString(self):
module = self.import_extension('foo', [
("getbytes", "METH_NOARGS",
"""
char *c;
PyObject* s2, *s1 = PyBytes_FromStringAndSize("test", 4);
c = PyBytes_AsString(s1);
s2 = PyBytes_FromStringAndSize(c, 4);
Py_DECREF(s1);
return s2;
"""),
])
s = module.getbytes()
assert s == b'test'
def test_manipulations(self):
module = self.import_extension('foo', [
("bytes_as_string", "METH_VARARGS",
'''
return PyBytes_FromStringAndSize(PyBytes_AsString(
PyTuple_GetItem(args, 0)), 4);
'''
),
("concat", "METH_VARARGS",
"""
PyObject ** v;
PyObject * left = PyTuple_GetItem(args, 0);
Py_INCREF(left); /* the reference will be stolen! */
v = &left;
PyBytes_Concat(v, PyTuple_GetItem(args, 1));
return *v;
""")])
assert module.bytes_as_string(b"huheduwe") == b"huhe"
ret = module.concat(b'abc', b'def')
assert ret == b'abcdef'
def test_py_bytes_as_string_None(self):
module = self.import_extension('foo', [
("string_None", "METH_VARARGS",
'''
if (PyBytes_AsString(Py_None)) {
Py_RETURN_NONE;
}
return NULL;
'''
)])
raises(TypeError, module.string_None)
def test_AsStringAndSize(self):
module = self.import_extension('foo', [
("getbytes", "METH_NOARGS",
"""
PyObject* s1 = PyBytes_FromStringAndSize("te\\0st", 5);
char *buf;
Py_ssize_t len;
if (PyBytes_AsStringAndSize(s1, &buf, &len) < 0)
return NULL;
if (len != 5) {
PyErr_SetString(PyExc_AssertionError, "Bad Length");
return NULL;
}
if (PyBytes_AsStringAndSize(s1, &buf, NULL) >= 0) {
PyErr_SetString(PyExc_AssertionError, "Should Have failed");
return NULL;
}
PyErr_Clear();
Py_DECREF(s1);
Py_INCREF(Py_None);
return Py_None;
"""),
("c_only", "METH_NOARGS",
"""
int ret;
char * buf2;
PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024);
if (!obj)
return NULL;
buf2 = PyBytes_AsString(obj);
if (!buf2)
return NULL;
/* buf should not have been forced, issue #2395 */
ret = _PyBytes_Resize(&obj, 512);
if (ret < 0)
return NULL;
Py_DECREF(obj);
Py_INCREF(Py_None);
return Py_None;
"""),
])
module.getbytes()
module.c_only()
def test_FromFormat(self):
module = self.import_extension('foo', [
("fmt", "METH_VARARGS",
"""
PyObject* fmt = PyTuple_GetItem(args, 0);
int n = PyLong_AsLong(PyTuple_GetItem(args, 1));
PyObject* result = PyBytes_FromFormat(PyBytes_AsString(fmt), n);
return result;
"""),
])
print(module.fmt(b'd:%d', 10))
assert module.fmt(b'd:%d', 10) == b'd:10'
def test_suboffsets(self):
module = self.import_extension('foo', [
("check_suboffsets", "METH_O",
"""
Py_buffer view;
PyObject_GetBuffer(args, &view, 0);
return PyLong_FromLong(view.suboffsets == NULL);
""")])
assert module.check_suboffsets(b'1234') == 1
class TestBytes(BaseApiTest):
def test_bytes_resize(self, space):
py_str = new_empty_str(space, 10)
ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
py_str.c_ob_sval[0] = 'a'
py_str.c_ob_sval[1] = 'b'
py_str.c_ob_sval[2] = 'c'
ar[0] = rffi.cast(PyObject, py_str)
_PyBytes_Resize(space, ar, 3)
py_str = rffi.cast(PyBytesObject, ar[0])
assert py_str.c_ob_size == 3
assert py_str.c_ob_sval[1] == 'b'
assert py_str.c_ob_sval[3] == '\x00'
# the same for growing
ar[0] = rffi.cast(PyObject, py_str)
_PyBytes_Resize(space, ar, 10)
py_str = rffi.cast(PyBytesObject, ar[0])
assert py_str.c_ob_size == 10
assert py_str.c_ob_sval[1] == 'b'
assert py_str.c_ob_sval[10] == '\x00'
decref(space, ar[0])
lltype.free(ar, flavor='raw')
def test_Concat(self, space):
ref = make_ref(space, space.newbytes('abc'))
ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
ptr[0] = ref
prev_refcnt = ref.c_ob_refcnt
PyBytes_Concat(space, ptr, space.newbytes('def'))
assert ref.c_ob_refcnt == prev_refcnt - 1
assert space.bytes_w(from_ref(space, ptr[0])) == 'abcdef'
with raises_w(space, TypeError):
PyBytes_Concat(space, ptr, space.w_None)
assert not ptr[0]
ptr[0] = lltype.nullptr(PyObject.TO)
PyBytes_Concat(space, ptr, space.newbytes('def')) # should not crash
lltype.free(ptr, flavor='raw')
def test_ConcatAndDel1(self, space):
# XXX remove this or test_ConcatAndDel2
ref1 = make_ref(space, space.newbytes('abc'))
ref2 = make_ref(space, space.newbytes('def'))
ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
ptr[0] = ref1
prev_refcnf = ref2.c_ob_refcnt
PyBytes_ConcatAndDel(space, ptr, ref2)
assert space.bytes_w(from_ref(space, ptr[0])) == 'abcdef'
assert ref2.c_ob_refcnt == prev_refcnf - 1
decref(space, ptr[0])
ptr[0] = lltype.nullptr(PyObject.TO)
ref2 = make_ref(space, space.newbytes('foo'))
prev_refcnf = ref2.c_ob_refcnt
PyBytes_ConcatAndDel(space, ptr, ref2) # should not crash
assert ref2.c_ob_refcnt == prev_refcnf - 1
lltype.free(ptr, flavor='raw')
def test_asbuffer(self, space):
bufp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
lenp = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
w_text = space.newbytes("text")
ref = make_ref(space, w_text)
prev_refcnt = ref.c_ob_refcnt
assert PyObject_AsCharBuffer(space, ref, bufp, lenp) == 0
assert ref.c_ob_refcnt == prev_refcnt
assert lenp[0] == 4
assert rffi.charp2str(bufp[0]) == 'text'
lltype.free(bufp, flavor='raw')
lltype.free(lenp, flavor='raw')
decref(space, ref)
def test_eq(self, space):
assert 1 == _PyBytes_Eq(space, space.newbytes("hello"), space.newbytes("hello"))
assert 0 == _PyBytes_Eq(space, space.newbytes("hello"), space.newbytes("world"))
def test_join(self, space):
w_sep = space.newbytes('<sep>')
w_seq = space.newtuple([space.newbytes('a'), space.newbytes('b')])
w_joined = _PyBytes_Join(space, w_sep, w_seq)
assert space.bytes_w(w_joined) == 'a<sep>b'
def test_FromObject(self, space, api):
w_obj = space.newbytes("test")
assert space.eq_w(w_obj, api.PyBytes_FromObject(w_obj))
w_obj = space.call_function(space.w_bytearray, w_obj)
assert space.eq_w(w_obj, api.PyBytes_FromObject(w_obj))
w_obj = space.wrap(u"test")
with raises_w(space, TypeError):
api.PyBytes_FromObject(w_obj)
def test_hash_and_state(self):
module = self.import_extension('foo', [
("test_hash", "METH_VARARGS",
'''
PyObject* obj = (PyTuple_GetItem(args, 0));
long hash = ((PyBytesObject*)obj)->ob_shash;
return PyLong_FromLong(hash);
'''
),
("test_sstate", "METH_NOARGS",
'''
PyObject *s = PyString_FromString("xyz");
/*int sstate = ((PyBytesObject*)s)->ob_sstate;
printf("sstate now %d\\n", sstate);*/
PyString_InternInPlace(&s);
/*sstate = ((PyBytesObject*)s)->ob_sstate;
printf("sstate now %d\\n", sstate);*/
Py_DECREF(s);
return PyBool_FromLong(1);
'''),
], prologue='#include <stdlib.h>')
res = module.test_hash("xyz")
assert res == hash('xyz')
# doesn't really test, but if printf is enabled will prove sstate
assert module.test_sstate()
def test_subclass(self):
# taken from PyStringArrType_Type in numpy's scalartypes.c.src
module = self.import_extension('bar', [
("newsubstr", "METH_O",
"""
PyObject * obj;
char * data;
int len;
data = PyString_AS_STRING(args);
len = PyString_GET_SIZE(args);
if (data == NULL)
Py_RETURN_NONE;
obj = PyArray_Scalar(data, len);
return obj;
"""),
("get_len", "METH_O",
"""
return PyLong_FromLong(PyObject_Size(args));
"""),
('has_nb_add', "METH_O",
'''
if (args->ob_type->tp_as_number == NULL) {
Py_RETURN_FALSE;
}
if (args->ob_type->tp_as_number->nb_add == NULL) {
Py_RETURN_FALSE;
}
Py_RETURN_TRUE;
'''),
], prologue="""
#include <Python.h>
PyTypeObject PyStringArrType_Type = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size */
"bar.string_", /* tp_name*/
sizeof(PyBytesObject), /* tp_basicsize*/
0 /* tp_itemsize */
};
static PyObject *
stringtype_repr(PyObject *self)
{
const char *dptr, *ip;
int len;
PyObject *new;
ip = dptr = PyString_AS_STRING(self);
len = PyString_GET_SIZE(self);
dptr += len-1;
while(len > 0 && *dptr-- == 0) {
len--;
}
new = PyString_FromStringAndSize(ip, len);
if (new == NULL) {
return PyString_FromString("");
}
return new;
}
static PyObject *
stringtype_str(PyObject *self)
{
const char *dptr, *ip;
int len;
PyObject *new;
ip = dptr = PyString_AS_STRING(self);
len = PyString_GET_SIZE(self);
dptr += len-1;
while(len > 0 && *dptr-- == 0) {
len--;
}
new = PyString_FromStringAndSize(ip, len);
if (new == NULL) {
return PyString_FromString("");
}
return new;
}
PyObject *
PyArray_Scalar(char *data, int n)
{
PyTypeObject *type = &PyStringArrType_Type;
PyObject *obj;
void *destptr;
int itemsize = n;
obj = type->tp_alloc(type, itemsize);
if (obj == NULL) {
return NULL;
}
destptr = PyString_AS_STRING(obj);
((PyBytesObject *)obj)->ob_shash = -1;
memcpy(destptr, data, itemsize);
return obj;
}
""", more_init = '''
PyStringArrType_Type.tp_alloc = NULL;
PyStringArrType_Type.tp_free = NULL;
PyStringArrType_Type.tp_repr = stringtype_repr;
PyStringArrType_Type.tp_str = stringtype_str;
PyStringArrType_Type.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE;
PyStringArrType_Type.tp_itemsize = sizeof(char);
PyStringArrType_Type.tp_base = &PyString_Type;
PyStringArrType_Type.tp_hash = PyString_Type.tp_hash;
if (PyType_Ready(&PyStringArrType_Type) < 0) INITERROR;
''')
a = module.newsubstr('abc')
assert module.has_nb_add('a') is False
assert module.has_nb_add(a) is False
assert type(a).__name__ == 'string_'
assert a == 'abc'
assert 3 == module.get_len(a)
b = module.newsubstr('')
assert 0 == module.get_len(b)
class TestBytes(BaseApiTest):
def test_bytes_resize(self, space):
py_str = new_empty_str(space, 10)
ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
py_str.c_ob_sval[0] = 'a'
py_str.c_ob_sval[1] = 'b'
py_str.c_ob_sval[2] = 'c'
ar[0] = rffi.cast(PyObject, py_str)
_PyBytes_Resize(space, ar, 3)
py_str = rffi.cast(PyBytesObject, ar[0])
assert py_str.c_ob_size == 3
assert py_str.c_ob_sval[1] == 'b'
assert py_str.c_ob_sval[3] == '\x00'
# the same for growing
ar[0] = rffi.cast(PyObject, py_str)
_PyBytes_Resize(space, ar, 10)
py_str = rffi.cast(PyBytesObject, ar[0])
assert py_str.c_ob_size == 10
assert py_str.c_ob_sval[1] == 'b'
assert py_str.c_ob_sval[10] == '\x00'
decref(space, ar[0])
lltype.free(ar, flavor='raw')
def test_string_buffer(self, space):
py_str = new_empty_str(space, 10)
c_buf = py_str.c_ob_type.c_tp_as_buffer
assert c_buf
py_obj = rffi.cast(PyObject, py_str)
size = rffi.sizeof(Py_buffer)
ref = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw', zero=True)
ref = rffi.cast(Py_bufferP, ref)
assert generic_cpy_call(space, c_buf.c_bf_getbuffer,
py_obj, ref, rffi.cast(rffi.INT_real, 0)) == 0
lltype.free(ref, flavor='raw')
decref(space, py_obj)
def test_Concat(self, space):
ref = make_ref(space, space.newbytes('abc'))
ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
ptr[0] = ref
prev_refcnt = ref.c_ob_refcnt
PyBytes_Concat(space, ptr, space.newbytes('def'))
assert ref.c_ob_refcnt == prev_refcnt - 1
assert space.utf8_w(from_ref(space, ptr[0])) == 'abcdef'
with pytest.raises(OperationError):
PyBytes_Concat(space, ptr, space.w_None)
assert not ptr[0]
ptr[0] = lltype.nullptr(PyObject.TO)
PyBytes_Concat(space, ptr, space.wrap('def')) # should not crash
lltype.free(ptr, flavor='raw')
def test_ConcatAndDel2(self, space):
# XXX remove this or test_ConcatAndDel1
ref1 = make_ref(space, space.newbytes('abc'))
ref2 = make_ref(space, space.newbytes('def'))
ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
ptr[0] = ref1
prev_refcnf = ref2.c_ob_refcnt
PyBytes_ConcatAndDel(space, ptr, ref2)
assert space.utf8_w(from_ref(space, ptr[0])) == 'abcdef'
assert ref2.c_ob_refcnt == prev_refcnf - 1
decref(space, ptr[0])
ptr[0] = lltype.nullptr(PyObject.TO)
ref2 = make_ref(space, space.wrap('foo'))
prev_refcnf = ref2.c_ob_refcnt
PyBytes_ConcatAndDel(space, ptr, ref2) # should not crash
assert ref2.c_ob_refcnt == prev_refcnf - 1
lltype.free(ptr, flavor='raw')
def test_format(self, space):
# XXX move to test_unicodeobject
assert "1 2" == space.unwrap(
PyUnicode_Format(space, space.wrap('%s %d'), space.wrap((1, 2))))
def test_asbuffer(self, space):
bufp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
lenp = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
w_bytes = space.newbytes("text")
ref = make_ref(space, w_bytes)
prev_refcnt = ref.c_ob_refcnt
assert PyObject_AsCharBuffer(space, ref, bufp, lenp) == 0
assert ref.c_ob_refcnt == prev_refcnt
assert lenp[0] == 4
assert rffi.charp2str(bufp[0]) == 'text'
lltype.free(bufp, flavor='raw')
lltype.free(lenp, flavor='raw')
decref(space, ref)
def test_intern(self, space):
# XXX move to test_unicodeobject
buf = rffi.str2charp("test")
w_s1 = PyUnicode_InternFromString(space, buf)
w_s2 = PyUnicode_InternFromString(space, buf)
rffi.free_charp(buf)
assert w_s1 is w_s2
def test_AsEncodedObject(self, space):
# XXX move to test_unicodeobject
ptr = space.wrap('abc')
errors = rffi.str2charp("strict")
encoding = rffi.str2charp("ascii")
res = PyUnicode_AsEncodedObject(space, ptr, encoding, errors)
assert space.unwrap(res) == "abc"
res = PyUnicode_AsEncodedObject(space,
ptr, encoding, lltype.nullptr(rffi.CCHARP.TO))
assert space.unwrap(res) == "abc"
rffi.free_charp(encoding)
encoding = rffi.str2charp("unknown_encoding")
with raises_w(space, LookupError):
PyUnicode_AsEncodedObject(space, ptr, encoding, errors)
rffi.free_charp(encoding)
rffi.free_charp(errors)
NULL = lltype.nullptr(rffi.CCHARP.TO)
res = PyUnicode_AsEncodedObject(space, ptr, NULL, NULL)
assert space.unwrap(res) == "abc"
with raises_w(space, TypeError):
PyUnicode_AsEncodedObject(space, space.wrap(2), NULL, NULL)
def test_eq(self, space):
assert 1 == _PyBytes_Eq(
space, space.wrap("hello"), space.wrap("hello"))
assert 0 == _PyBytes_Eq(
space, space.wrap("hello"), space.wrap("world"))
def test_join(self, space):
w_sep = space.wrap('<sep>')
w_seq = space.wrap(['a', 'b'])
w_joined = _PyBytes_Join(space, w_sep, w_seq)
assert space.unwrap(w_joined) == 'a<sep>b'
| 38.790698 | 88 | 0.518243 |
acf28d1430ef3d40e069a60a9d02fe6ea39d518f | 6,133 | py | Python | model.py | ishmamt/Hierarchical-Co-attention-VQA | 94bd51e7c369bd9fa6d51eaceb1f621cb91fef62 | [
"MIT"
] | null | null | null | model.py | ishmamt/Hierarchical-Co-attention-VQA | 94bd51e7c369bd9fa6d51eaceb1f621cb91fef62 | [
"MIT"
] | null | null | null | model.py | ishmamt/Hierarchical-Co-attention-VQA | 94bd51e7c369bd9fa6d51eaceb1f621cb91fef62 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as fn
import torch.nn.utils.rnn as rnn
class CoattentionNet(nn.Module):
''' Model class for Hierarchical Co-Attention Net
'''
def __init__(self, vocabulary_size, num_classes, embedding_dimension=512, k=30):
''' Constructor for Hierarchical Co-Attention Net model. Predicts an answer to a question about
an image using the Hierarchical Question-Image Co-Attention for Visual Question Answering (Lu et al, 2017) paper.
Parameters:
vocabulary_size: int; Number of words in the vocabulary.
num_classes: int; Number of output classes.
embedding_dimension: int; Embedding dimension.
k; int;
'''
super().__init__()
self.embed = nn.Embedding(vocabulary_size, embedding_dimension) # embedding for each word in the vocabulary. Each tensor being of size 512
# question convolutions
self.unigram_conv = nn.Conv1d(embedding_dimension, embedding_dimension, 1, stride=1, padding=0)
self.bigram_conv = nn.Conv1d(embedding_dimension, embedding_dimension, 2, stride=1, padding=1, dilation=2)
self.trigram_conv = nn.Conv1d(embedding_dimension, embedding_dimension, 3, stride=1, padding=2, dilation=2)
self.max_pool = nn.MaxPool2d((3, 1))
self.lstm = nn.LSTM(input_size=embedding_dimension, hidden_size=embedding_dimension, num_layers=3, dropout=0.4)
self.tanh = nn.Tanh()
# weights for feature extraction and co-attention
self.W_b = nn.Parameter(torch.randn(embedding_dimension, embedding_dimension))
self.W_v = nn.Parameter(torch.randn(k, embedding_dimension))
self.W_q = nn.Parameter(torch.randn(k, embedding_dimension))
self.W_hv = nn.Parameter(torch.randn(k, 1))
self.W_hq = nn.Parameter(torch.randn(k, 1))
# weights for conjugation
self.W_w = nn.Linear(embedding_dimension, embedding_dimension)
self.W_p = nn.Linear(embedding_dimension * 2, embedding_dimension)
self.W_s = nn.Linear(embedding_dimension * 2, embedding_dimension)
# weights for classification
self.fc = nn.Linear(embedding_dimension, num_classes)
def forward(self, image_tensor, question_tensor):
''' Forward propagation.
Parameters:
image_tensor: pytorch tensor; The image.
ques_tensor: pytorch tensor; The question.
Returns:
output: pytorch tensor; The probability of the final answer.
'''
# Image: batch_size x 512 x 196 from the image encoder.
question, lens = rnn.pad_packed_sequence(question_tensor) # pads multiple sequences of differing lengths
question = question.permute(1, 0) # Question: batch_size x len_of_question
words = self.embed(question).permute(0, 2, 1) # Words: batch_size x len_of_question x 512
unigrams = torch.unsqueeze(self.tanh(self.unigram_conv(words)), 2) # batch_size x 512 x len_of_question
bigrams = torch.unsqueeze(self.tanh(self.bigram_conv(words)), 2) # batch_size x 512 x len_of_question
trigrams = torch.unsqueeze(self.tanh(self.trigram_conv(words)), 2) # batch_size x 512 x len_of_question
words = words.permute(0, 2, 1) # Words: batch_size x len_of_question x 512
phrase = torch.squeeze(self.max_pool(torch.cat((unigrams, bigrams, trigrams), 2)))
phrase = phrase.permute(0, 2, 1) # Phrase: batch_size x len_of_question x 512
# pass the question through an LSTM
hidden_input = None # hidden_input is None for the first time.
phrase_packed = nn.utils.rnn.pack_padded_sequence(torch.transpose(phrase, 0, 1), lens) # packs multiple padded sequences with the given lengths.
sentence_packed, hidden_input = self.lstm(phrase_packed, hidden_input)
sentence, _ = rnn.pad_packed_sequence(sentence_packed)
sentence = torch.transpose(sentence, 0, 1) # Sentence: batch_size x len_of_question x 512
# Feature extraction
v_word, q_word = self.parallel_co_attention(image_tensor, words) # word-based image-text co-attention
v_phrase, q_phrase = self.parallel_co_attention(image_tensor, phrase) # phrase-based image-text co-attention
v_sentence, q_sentence = self.parallel_co_attention(image_tensor, sentence) # sentecne-based image-text co-attention
# Classification
h_w = self.tanh(self.W_w(q_word + v_word))
h_p = self.tanh(self.W_p(torch.cat(((q_phrase + v_phrase), h_w), dim=1)))
h_s = self.tanh(self.W_s(torch.cat(((q_sentence + v_sentence), h_p), dim=1)))
output = self.fc(h_s)
return output
def parallel_co_attention(self, V, Q):
''' Parallel Co-Attention of Image and text.
Parameters:
V: pytorch tensor; Extracted image features.
Q: pytorch tensor; Extracted question features.
Returns:
v: pytorch tensor; Attention vector for the image features.
q: pytorch tensor; Attention vector for question features.
'''
# V: batch_size x 512 x 196, Q: batch_size x length_of_question x 512
C = self.tanh(torch.matmul(Q, torch.matmul(self.W_b, V))) # batch_size x length_of_question x 196
H_v = self.tanh(torch.matmul(self.W_v, V) + torch.matmul(torch.matmul(self.W_q, Q.permute(0, 2, 1)), C)) # batch_size x k x 196
H_q = self.tanh(torch.matmul(self.W_q, Q.permute(0, 2, 1)) + torch.matmul(torch.matmul(self.W_v, V), C.permute(0, 2, 1))) # batch_size x k x length_of_question
a_v = fn.softmax(torch.matmul(torch.t(self.W_hv), H_v), dim=2) # batch_size x 1 x 196
a_q = fn.softmax(torch.matmul(torch.t(self.W_hq), H_q), dim=2) # batch_size x 1 x length_of_question
v = torch.squeeze(torch.matmul(a_v, V.permute(0, 2, 1))) # batch_size x 512
q = torch.squeeze(torch.matmul(a_q, Q)) # batch_size x 512
return v, q
if __name__ == "__main__":
model = CoattentionNet(10000, 1000)
print(model) | 50.270492 | 177 | 0.67308 |
acf28d21fa8012db16ce26b9b3f1f3f093f6ed29 | 2,901 | py | Python | stellar_sdk/xdr/create_claimable_balance_result.py | kaotisk-hund/py-stellar-base | 30dbe1139d8f0c03c4c20ea3c9a45a19285bedb8 | [
"Apache-2.0"
] | 341 | 2015-10-06T20:56:19.000Z | 2022-03-23T15:58:54.000Z | stellar_sdk/xdr/create_claimable_balance_result.py | kaotisk-hund/py-stellar-base | 30dbe1139d8f0c03c4c20ea3c9a45a19285bedb8 | [
"Apache-2.0"
] | 479 | 2015-11-09T18:39:40.000Z | 2022-03-16T06:46:58.000Z | stellar_sdk/xdr/create_claimable_balance_result.py | kaotisk-hund/py-stellar-base | 30dbe1139d8f0c03c4c20ea3c9a45a19285bedb8 | [
"Apache-2.0"
] | 181 | 2015-10-01T23:00:59.000Z | 2022-03-05T13:42:19.000Z | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..exceptions import ValueError
from .claimable_balance_id import ClaimableBalanceID
from .create_claimable_balance_result_code import CreateClaimableBalanceResultCode
__all__ = ["CreateClaimableBalanceResult"]
class CreateClaimableBalanceResult:
"""
XDR Source Code
----------------------------------------------------------------
union CreateClaimableBalanceResult switch (
CreateClaimableBalanceResultCode code)
{
case CREATE_CLAIMABLE_BALANCE_SUCCESS:
ClaimableBalanceID balanceID;
default:
void;
};
----------------------------------------------------------------
"""
def __init__(
self,
code: CreateClaimableBalanceResultCode,
balance_id: ClaimableBalanceID = None,
) -> None:
self.code = code
self.balance_id = balance_id
def pack(self, packer: Packer) -> None:
self.code.pack(packer)
if (
self.code
== CreateClaimableBalanceResultCode.CREATE_CLAIMABLE_BALANCE_SUCCESS
):
if self.balance_id is None:
raise ValueError("balance_id should not be None.")
self.balance_id.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "CreateClaimableBalanceResult":
code = CreateClaimableBalanceResultCode.unpack(unpacker)
if code == CreateClaimableBalanceResultCode.CREATE_CLAIMABLE_BALANCE_SUCCESS:
balance_id = ClaimableBalanceID.unpack(unpacker)
if balance_id is None:
raise ValueError("balance_id should not be None.")
return cls(code, balance_id=balance_id)
return cls(code)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "CreateClaimableBalanceResult":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "CreateClaimableBalanceResult":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.code == other.code and self.balance_id == other.balance_id
def __str__(self):
out = []
out.append(f"code={self.code}")
out.append(
f"balance_id={self.balance_id}"
) if self.balance_id is not None else None
return f"<CreateClaimableBalanceResult {[', '.join(out)]}>"
| 32.965909 | 85 | 0.630472 |
acf28d98ff915995a0f9f288123acca172142192 | 6,479 | py | Python | tests/test_models.py | leondgarse/keras_efficientnet_v2 | f12af95751e6816b88f7fab8413cd8b9bd4a9494 | [
"Apache-2.0"
] | 44 | 2021-08-11T13:50:24.000Z | 2022-03-25T02:43:41.000Z | tests/test_models.py | leondgarse/keras_efficientnet_v2 | f12af95751e6816b88f7fab8413cd8b9bd4a9494 | [
"Apache-2.0"
] | 7 | 2021-08-20T00:35:17.000Z | 2021-12-24T08:01:21.000Z | tests/test_models.py | leondgarse/Keras_efficientnet_v2_test | 6268a8ff1e0df31ebe19f7bb28837c2ba1f8edf0 | [
"Apache-2.0"
] | 9 | 2021-08-19T03:39:40.000Z | 2022-02-16T10:24:18.000Z | import pytest
import tensorflow as tf
from tensorflow import keras
from skimage.data import chelsea
import sys
sys.path.append(".")
import keras_efficientnet_v2
def test_model_predict_b0_imagenet():
model = keras_efficientnet_v2.EfficientNetV2B0(pretrained="imagenet")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.76896363) <= 1e-5
def test_model_predict_b1_imagenet_preprocessing():
model = keras_efficientnet_v2.EfficientNetV2B1(pretrained="imagenet", include_preprocessing=True)
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.76861376) <= 1e-5
def test_model_predict_b2_imagenet21k_ft1k():
model = keras_efficientnet_v2.EfficientNetV2B2(pretrained="imagenet21k-ft1k")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.58329606) <= 1e-5
def test_model_predict_s_imagenet_preprocessing():
model = keras_efficientnet_v2.EfficientNetV2S(pretrained="imagenet", include_preprocessing=True)
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.8642885) <= 1e-5
def test_model_predict_t_imagenet():
""" Run a single forward pass with EfficientNetV2T on imagenet """
model = keras_efficientnet_v2.EfficientNetV2T(pretrained="imagenet")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.8502904) <= 1e-5
def test_model_predict_s_imagenet21k():
""" Run a single forward pass with EfficientNetV2S on imagenet21k """
model = keras_efficientnet_v2.EfficientNetV2S(num_classes=21843, pretrained="imagenet21k")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
assert pred.argmax() == 2389
assert abs(pred.max() - 0.15546332) <= 1e-5
def test_model_m_defination():
model = keras_efficientnet_v2.EfficientNetV2M(num_classes=0, pretrained=None)
assert model.output_shape == (None, 15, 15, 1280)
def test_model_l_defination():
model = keras_efficientnet_v2.EfficientNetV2L(num_classes=0, pretrained=None)
assert model.output_shape == (None, 15, 15, 1280)
def test_model_xl_defination():
model = keras_efficientnet_v2.EfficientNetV2XL(num_classes=0, pretrained=None)
assert model.output_shape == (None, 16, 16, 1280)
def test_model_predict_v1_b0_imagenet():
""" Run a single forward pass with EfficientNetV1B2 on imagenet """
model = keras_efficientnet_v2.EfficientNetV1B0(pretrained="imagenet")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.64605427) <= 1e-5
def test_model_predict_v1_b1_noisy_student():
""" Run a single forward pass with EfficientNetV1B2 on imagenet """
model = keras_efficientnet_v2.EfficientNetV1B1(pretrained="noisy_student")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.8223327) <= 1e-5
def test_model_predict_v1_b2_imagenet():
""" Run a single forward pass with EfficientNetV1B2 on imagenet """
model = keras_efficientnet_v2.EfficientNetV1B2(pretrained="imagenet")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.5294576) <= 1e-5
def test_model_predict_v1_b3_noisy_student_preprocessing():
""" Run a single forward pass with EfficientNetV1B6 on noisy_student """
model = keras_efficientnet_v2.EfficientNetV1B3(pretrained="noisy_student", include_preprocessing=True)
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.8770545) <= 1e-5
def test_model_predict_v1_b4_noisy_student():
""" Run a single forward pass with EfficientNetV1B6 on noisy_student """
model = keras_efficientnet_v2.EfficientNetV1B4(pretrained="noisy_student")
imm = tf.image.resize(chelsea(), model.input_shape[1:3]) # Chelsea the cat
pred = model(tf.expand_dims(imm / 128 - 1, 0)).numpy()
out = keras.applications.imagenet_utils.decode_predictions(pred)[0][0]
assert out[1] == "Egyptian_cat"
assert abs(out[2] - 0.67979187) <= 1e-5
def test_model_v1_b5_defination():
model = keras_efficientnet_v2.EfficientNetV1B5(num_classes=0, pretrained=None)
assert model.output_shape == (None, 15, 15, 2048)
def test_model_v1_b6_defination():
model = keras_efficientnet_v2.EfficientNetV1B6(num_classes=0, pretrained=None)
assert model.output_shape == (None, 17, 17, 2304)
def test_model_v1_b7_defination():
model = keras_efficientnet_v2.EfficientNetV1B7(num_classes=0, pretrained=None)
assert model.output_shape == (None, 19, 19, 2560)
def test_model_v1_l2_defination():
model = keras_efficientnet_v2.EfficientNetV1L2(num_classes=0, pretrained=None)
assert model.output_shape == (None, 25, 25, 5504)
| 40.242236 | 106 | 0.724803 |
acf28dbcbb1b610f8f92226047fe7de11de1d1d6 | 1,250 | py | Python | python3/hackerrank_leetcode/course_schedule/main.py | seLain/codesnippets | ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289 | [
"MIT"
] | null | null | null | python3/hackerrank_leetcode/course_schedule/main.py | seLain/codesnippets | ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289 | [
"MIT"
] | null | null | null | python3/hackerrank_leetcode/course_schedule/main.py | seLain/codesnippets | ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289 | [
"MIT"
] | null | null | null | from collections import defaultdict
class Solution(object):
def canFinish(self, numCourses, prerequisites):
if numCourses < 2:
return True
graph = defaultdict(list)
num_of_depends = defaultdict(int)
for s, e in set(tuple(x) for x in prerequisites):
graph[s].append(e)
num_of_depends[e] += 1
# collect courses not depended by any other course
# base_courses collects courses that can be taken in this iteration
base_courses = [i for i in range(0, numCourses) if not num_of_depends[i]]
for node in base_courses:
for pre_course in graph[node]:
# if there's base_course encountered as a pre_course
# obviously this course graph fails
if pre_course in base_courses:
return False
# removed checked dependency count by 1
num_of_depends[pre_course] -= 1
# if all deps to this pre_course were checked
# it means there is no way cyclic back to this course
if num_of_depends[pre_course] == 0:
base_courses.append(pre_course)
return len(base_courses) == numCourses | 43.103448 | 83 | 0.6008 |
acf28eba86a5f666a2385e61ca78748bd5856a40 | 269 | py | Python | tests/artificial/transf_Difference/trend_MovingMedian/cycle_0/ar_12/test_artificial_1024_Difference_MovingMedian_0_12_0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Difference/trend_MovingMedian/cycle_0/ar_12/test_artificial_1024_Difference_MovingMedian_0_12_0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Difference/trend_MovingMedian/cycle_0/ar_12/test_artificial_1024_Difference_MovingMedian_0_12_0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 12); | 38.428571 | 169 | 0.736059 |
acf28f28b277596ebe55f51a970c3c960fb1dec0 | 2,064 | py | Python | backend/pyrogram/methods/utilities/stop.py | appheap/social-media-analyzer | 0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c | [
"Apache-2.0"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/pyrogram/methods/utilities/stop.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/pyrogram/methods/utilities/stop.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from pyrogram.scaffold import Scaffold
class Stop(Scaffold):
async def stop(self, block: bool = True):
"""Stop the Client.
This method disconnects the client from Telegram and stops the underlying tasks.
Parameters:
block (``bool``, *optional*):
Blocks the code execution until the client has been stopped. It is useful with ``block=False`` in case
you want to stop the own client *within* a handler in order not to cause a deadlock.
Defaults to True.
Returns:
:obj:`~pyrogram.Client`: The stopped client itself.
Raises:
ConnectionError: In case you try to stop an already stopped client.
Example:
.. code-block:: python
:emphasize-lines: 8
from pyrogram import Client
app = Client("my_account")
app.start()
... # Call API methods
app.stop()
"""
async def do_it():
await self.terminate()
await self.disconnect()
if block:
await do_it()
else:
self.loop.create_task(do_it())
return self
| 31.272727 | 118 | 0.626453 |
acf28f705232f674507ea89f621df3066c988d4d | 1,086 | py | Python | controle_gastos/urls.py | NunesAlexandre/django2.0 | ef151d9d7dd3e3f4cf7e401f2aecfd237ed69b7f | [
"MIT"
] | null | null | null | controle_gastos/urls.py | NunesAlexandre/django2.0 | ef151d9d7dd3e3f4cf7e401f2aecfd237ed69b7f | [
"MIT"
] | null | null | null | controle_gastos/urls.py | NunesAlexandre/django2.0 | ef151d9d7dd3e3f4cf7e401f2aecfd237ed69b7f | [
"MIT"
] | null | null | null | """controle_gastos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from contas.views import index, cadastro, contato, sobre, novasenha, base
urlpatterns = [
path('admin/', admin.site.urls),
path('', index),
path('cadastro/', cadastro, name='cadastrar'),
path('contato/', contato, name='contato'),
path('sobre/', sobre, name='sobre'),
path('novasenha/', novasenha, name='novasenha'),
path('', base, name='base')
]
| 36.2 | 77 | 0.684162 |
acf290226efa697808a2dc42c59de5a6fef5f849 | 22,475 | py | Python | tests/test_models/test_heads.py | wangbingo/mmsegmentation | a327b97d3ee0350a004864d1c3ce3aff37cc83e9 | [
"Apache-2.0"
] | 28 | 2021-12-15T04:00:10.000Z | 2022-03-07T07:57:01.000Z | tests/test_models/test_heads.py | wangbingo/mmsegmentation | a327b97d3ee0350a004864d1c3ce3aff37cc83e9 | [
"Apache-2.0"
] | 7 | 2021-09-09T07:46:49.000Z | 2022-02-11T03:04:19.000Z | tests/test_models/test_heads.py | wangbingo/mmsegmentation | a327b97d3ee0350a004864d1c3ce3aff37cc83e9 | [
"Apache-2.0"
] | 3 | 2021-12-14T03:11:36.000Z | 2022-03-28T19:20:29.000Z | from unittest.mock import patch
import pytest
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.utils import ConfigDict
from mmcv.utils.parrots_wrapper import SyncBatchNorm
from mmseg.models.decode_heads import (ANNHead, ASPPHead, CCHead, DAHead,
DepthwiseSeparableASPPHead,
DepthwiseSeparableFCNHead, DNLHead,
EMAHead, EncHead, FCNHead, GCHead,
NLHead, OCRHead, PointHead, PSAHead,
PSPHead, UPerHead)
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
def _conv_has_norm(module, sync_bn):
for m in module.modules():
if isinstance(m, ConvModule):
if not m.with_norm:
return False
if sync_bn:
if not isinstance(m.bn, SyncBatchNorm):
return False
return True
def to_cuda(module, data):
module = module.cuda()
if isinstance(data, list):
for i in range(len(data)):
data[i] = data[i].cuda()
return module, data
@patch.multiple(BaseDecodeHead, __abstractmethods__=set())
def test_decode_head():
with pytest.raises(AssertionError):
# default input_transform doesn't accept multiple inputs
BaseDecodeHead([32, 16], 16, num_classes=19)
with pytest.raises(AssertionError):
# default input_transform doesn't accept multiple inputs
BaseDecodeHead(32, 16, num_classes=19, in_index=[-1, -2])
with pytest.raises(AssertionError):
# supported mode is resize_concat only
BaseDecodeHead(32, 16, num_classes=19, input_transform='concat')
with pytest.raises(AssertionError):
# in_channels should be list|tuple
BaseDecodeHead(32, 16, num_classes=19, input_transform='resize_concat')
with pytest.raises(AssertionError):
# in_index should be list|tuple
BaseDecodeHead([32],
16,
in_index=-1,
num_classes=19,
input_transform='resize_concat')
with pytest.raises(AssertionError):
# len(in_index) should equal len(in_channels)
BaseDecodeHead([32, 16],
16,
num_classes=19,
in_index=[-1],
input_transform='resize_concat')
# test default dropout
head = BaseDecodeHead(32, 16, num_classes=19)
assert hasattr(head, 'dropout') and head.dropout.p == 0.1
# test set dropout
head = BaseDecodeHead(32, 16, num_classes=19, dropout_ratio=0.2)
assert hasattr(head, 'dropout') and head.dropout.p == 0.2
# test no input_transform
inputs = [torch.randn(1, 32, 45, 45)]
head = BaseDecodeHead(32, 16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == 32
assert head.input_transform is None
transformed_inputs = head._transform_inputs(inputs)
assert transformed_inputs.shape == (1, 32, 45, 45)
# test input_transform = resize_concat
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = BaseDecodeHead([32, 16],
16,
num_classes=19,
in_index=[0, 1],
input_transform='resize_concat')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == 48
assert head.input_transform == 'resize_concat'
transformed_inputs = head._transform_inputs(inputs)
assert transformed_inputs.shape == (1, 48, 45, 45)
def test_fcn_head():
with pytest.raises(AssertionError):
# num_convs must be not less than 0
FCNHead(num_classes=19, num_convs=-1)
# test no norm_cfg
head = FCNHead(in_channels=32, channels=16, num_classes=19)
for m in head.modules():
if isinstance(m, ConvModule):
assert not m.with_norm
# test with norm_cfg
head = FCNHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
for m in head.modules():
if isinstance(m, ConvModule):
assert m.with_norm and isinstance(m.bn, SyncBatchNorm)
# test concat_input=False
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(
in_channels=32, channels=16, num_classes=19, concat_input=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 2
assert not head.concat_input and not hasattr(head, 'conv_cat')
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test concat_input=True
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(
in_channels=32, channels=16, num_classes=19, concat_input=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 2
assert head.concat_input
assert head.conv_cat.in_channels == 48
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test kernel_size=3
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert head.convs[i].kernel_size == (3, 3)
assert head.convs[i].padding == 1
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test kernel_size=1
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, kernel_size=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert head.convs[i].kernel_size == (1, 1)
assert head.convs[i].padding == 0
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test num_conv
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, num_convs=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 1
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test num_conv = 0
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(
in_channels=32,
channels=32,
num_classes=19,
num_convs=0,
concat_input=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert isinstance(head.convs, torch.nn.Identity)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_psp_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
# test no norm_cfg
head = PSPHead(in_channels=32, channels=16, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = PSPHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = PSPHead(
in_channels=32, channels=16, num_classes=19, pool_scales=(1, 2, 3))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.psp_modules[0][0].output_size == 1
assert head.psp_modules[1][0].output_size == 2
assert head.psp_modules[2][0].output_size == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_aspp_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
ASPPHead(in_channels=32, channels=16, num_classes=19, dilations=1)
# test no norm_cfg
head = ASPPHead(in_channels=32, channels=16, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = ASPPHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = ASPPHead(
in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].conv.dilation == (12, 12)
assert head.aspp_modules[2].conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_psa_head():
with pytest.raises(AssertionError):
# psa_type must be in 'bi-direction', 'collect', 'distribute'
PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='gather')
# test no norm_cfg
head = PSAHead(
in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
# test 'bi-direction' psa_type
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'bi-direction' psa_type, shrink_factor=1
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
shrink_factor=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'bi-direction' psa_type with soft_max
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_softmax=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'collect' psa_type
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='collect')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'collect' psa_type, shrink_factor=1
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
shrink_factor=1,
psa_type='collect')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'collect' psa_type, shrink_factor=1, compact=True
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='collect',
shrink_factor=1,
compact=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'distribute' psa_type
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='distribute')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
def test_gc_head():
head = GCHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'gc_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_nl_head():
head = NLHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'nl_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_cc_head():
head = CCHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'cca')
if not torch.cuda.is_available():
pytest.skip('CCHead requires CUDA')
inputs = [torch.randn(1, 32, 45, 45)]
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_uper_head():
with pytest.raises(AssertionError):
# fpn_in_channels must be list|tuple
UPerHead(in_channels=32, channels=16, num_classes=19)
# test no norm_cfg
head = UPerHead(
in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1])
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = UPerHead(
in_channels=[32, 16],
channels=16,
num_classes=19,
norm_cfg=dict(type='SyncBN'),
in_index=[-2, -1])
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = UPerHead(
in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_ann_head():
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
head = ANNHead(
in_channels=[16, 32],
channels=16,
num_classes=19,
in_index=[-2, -1],
project_channels=8)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 21, 21)
def test_da_head():
inputs = [torch.randn(1, 32, 45, 45)]
head = DAHead(in_channels=32, channels=16, num_classes=19, pam_channels=8)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 3
for output in outputs:
assert output.shape == (1, head.num_classes, 45, 45)
test_output = head.forward_test(inputs, None, None)
assert test_output.shape == (1, head.num_classes, 45, 45)
def test_ocr_head():
inputs = [torch.randn(1, 32, 45, 45)]
ocr_head = OCRHead(
in_channels=32, channels=16, num_classes=19, ocr_channels=8)
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(ocr_head, inputs)
head, inputs = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
output = ocr_head(inputs, prev_output)
assert output.shape == (1, ocr_head.num_classes, 45, 45)
def test_enc_head():
# with se_loss, w.o. lateral
inputs = [torch.randn(1, 32, 21, 21)]
head = EncHead(
in_channels=[32], channels=16, num_classes=19, in_index=[-1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 2
assert outputs[0].shape == (1, head.num_classes, 21, 21)
assert outputs[1].shape == (1, head.num_classes)
# w.o se_loss, w.o. lateral
inputs = [torch.randn(1, 32, 21, 21)]
head = EncHead(
in_channels=[32],
channels=16,
use_se_loss=False,
num_classes=19,
in_index=[-1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 21, 21)
# with se_loss, with lateral
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
head = EncHead(
in_channels=[16, 32],
channels=16,
add_lateral=True,
num_classes=19,
in_index=[-2, -1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 2
assert outputs[0].shape == (1, head.num_classes, 21, 21)
assert outputs[1].shape == (1, head.num_classes)
test_output = head.forward_test(inputs, None, None)
assert test_output.shape == (1, head.num_classes, 21, 21)
def test_dw_aspp_head():
# test w.o. c1
inputs = [torch.randn(1, 32, 45, 45)]
head = DepthwiseSeparableASPPHead(
c1_in_channels=0,
c1_channels=0,
in_channels=32,
channels=16,
num_classes=19,
dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.c1_bottleneck is None
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test with c1
inputs = [torch.randn(1, 8, 45, 45), torch.randn(1, 32, 21, 21)]
head = DepthwiseSeparableASPPHead(
c1_in_channels=8,
c1_channels=4,
in_channels=32,
channels=16,
num_classes=19,
dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.c1_bottleneck.in_channels == 8
assert head.c1_bottleneck.out_channels == 4
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_sep_fcn_head():
# test sep_fcn_head with concat_input=False
head = DepthwiseSeparableFCNHead(
in_channels=128,
channels=128,
concat_input=False,
num_classes=19,
in_index=-1,
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(2, 128, 32, 32)]
output = head(x)
assert output.shape == (2, head.num_classes, 32, 32)
assert not head.concat_input
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
assert head.conv_seg.kernel_size == (1, 1)
head = DepthwiseSeparableFCNHead(
in_channels=64,
channels=64,
concat_input=True,
num_classes=19,
in_index=-1,
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(3, 64, 32, 32)]
output = head(x)
assert output.shape == (3, head.num_classes, 32, 32)
assert head.concat_input
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
def test_dnl_head():
# DNL with 'embedded_gaussian' mode
head = DNLHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'dnl_block')
assert head.dnl_block.temperature == 0.05
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# NonLocal2d with 'dot_product' mode
head = DNLHead(
in_channels=32, channels=16, num_classes=19, mode='dot_product')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# NonLocal2d with 'gaussian' mode
head = DNLHead(
in_channels=32, channels=16, num_classes=19, mode='gaussian')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# NonLocal2d with 'concatenation' mode
head = DNLHead(
in_channels=32, channels=16, num_classes=19, mode='concatenation')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_emanet_head():
head = EMAHead(
in_channels=32,
ema_channels=24,
channels=16,
num_stages=3,
num_bases=16,
num_classes=19)
for param in head.ema_mid_conv.parameters():
assert not param.requires_grad
assert hasattr(head, 'ema_module')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_point_head():
inputs = [torch.randn(1, 32, 45, 45)]
point_head = PointHead(
in_channels=[32], in_index=[0], channels=16, num_classes=19)
assert len(point_head.fcs) == 3
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(point_head, inputs)
head, inputs = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
test_cfg = ConfigDict(
subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
output = point_head.forward_test(inputs, prev_output, None, test_cfg)
assert output.shape == (1, point_head.num_classes, 180, 180)
| 33.746246 | 79 | 0.629143 |
acf293a50c4749b6e3b46d4f8320dcda26b49edc | 612 | py | Python | sandbox/sql/numpy_example.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 5 | 2016-05-28T14:12:28.000Z | 2021-04-22T10:23:12.000Z | sandbox/sql/numpy_example.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | null | null | null | sandbox/sql/numpy_example.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 2 | 2015-07-13T10:04:10.000Z | 2021-04-22T10:23:23.000Z | import sqlite3
import numpy as N
import cPickle
#data
data = N.ones((31, 31))
#picled binary
bd = cPickle.dumps(data)
#DB connection
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
#create a table
cursor.execute('create table PSFs (id integer primary key, image BLOB)')
#insert data
#cursor.execute("insert into PSFs (image) values(?)", (sqlite3.Binary(bd),))
cursor.execute("insert into PSFs (image) values(?)", (bd,))
#read stuff
cursor.execute("SELECT image from PSFs where id = 1")
for PSF, in cursor:
data_out = cPickle.loads(PSF.encode('utf-8'))
print type(data_out), N.shape(data_out)
| 22.666667 | 76 | 0.714052 |
acf29660f22773ab757486b9928bfd7a26b08aa6 | 2,785 | py | Python | tiny_yolo_video.py | takeshikondo/keras-yolo3 | 20862b8fafa5ea533617171f8645bfd1179e6c50 | [
"MIT"
] | null | null | null | tiny_yolo_video.py | takeshikondo/keras-yolo3 | 20862b8fafa5ea533617171f8645bfd1179e6c50 | [
"MIT"
] | null | null | null | tiny_yolo_video.py | takeshikondo/keras-yolo3 | 20862b8fafa5ea533617171f8645bfd1179e6c50 | [
"MIT"
] | null | null | null | import sys
import argparse
#from yolo import YOLO, detect_video
from tiny_yolo import YOLO, detect_video
from PIL import Image
def detect_img(yolo):
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
#import cv2
#cv2.imwrite("out.jpg", np.asarray(r_image)[..., ::-1])
r_image.show()
yolo.close_session()
def detect_img_2019(yolo, image):
#img = input('Input image filename:')
#image = Image.open(img)
r_image = yolo.detect_image(image)
#import cv2
#cv2.imwrite("out.jpg", np.asarray(r_image)[..., ::-1])
#r_image.show()
#yolo.close_session()
return r_image
FLAGS = None
if __name__ == '__main__':
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--model', type=str,
help='path to model weight file, default ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors', type=str,
help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes', type=str,
help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,
help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
'--image', default=False, action="store_true",
help='Image detection mode, will ignore all positional arguments'
)
'''
Command line positional arguments -- for video detection mode
'''
parser.add_argument(
"--input", nargs='?', type=str,required=False,default='./path2your_video',
help = "Video input path"
)
parser.add_argument(
"--output", nargs='?', type=str, default="",
help = "[Optional] Video output path"
)
FLAGS = parser.parse_args()
if FLAGS.image:
"""
Image detection mode, disregard any remaining command line arguments
"""
print("Image detection mode")
if "input" in FLAGS:
print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
detect_img(YOLO(**vars(FLAGS)))
elif "input" in FLAGS:
detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
else:
print("Must specify at least video_input_path. See usage with --help.")
| 27.85 | 100 | 0.598564 |
acf2971d74f3f9bd7891418063660642b97176e7 | 7,152 | py | Python | src/sardana/tango/pool/test/base.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/tango/pool/test/base.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/tango/pool/test/base.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""Base classes for the controller tests"""
__all__ = ['BasePoolTestCase', 'ControllerLoadsTestCase',
'ControllerCreationTestCase', 'ElementCreationTestCase']
import PyTango
from taurus.external import unittest
from taurus.core.tango.starter import ProcessStarter
from sardana import sardanacustomsettings
from sardana.tango.core.util import (get_free_server, get_free_device,
get_free_alias)
from taurus.core.util import whichexecutable
class BasePoolTestCase(object):
"""Abstract class for pool DS testing.
"""
pool_ds_name = getattr(sardanacustomsettings, 'UNITTEST_POOL_DS_NAME')
pool_name = getattr(sardanacustomsettings, 'UNITTEST_POOL_NAME')
def setUp(self):
"""Start Pool DS and register extensions.
"""
# Discover the Pool launcher script
poolExec = whichexecutable.whichfile("Pool")
# register Pool server
pool_ds_name = "Pool/" + self.pool_ds_name
pool_free_ds_name = get_free_server(PyTango.Database(),
pool_ds_name)
self._starter = ProcessStarter(poolExec, pool_free_ds_name)
# register Pool device
dev_name_parts = self.pool_name.split('/')
prefix = '/'.join(dev_name_parts[0:2])
start_from = int(dev_name_parts[2])
self.pool_name = get_free_device(
PyTango.Database(), prefix, start_from)
self._starter.addNewDevice(self.pool_name, klass='Pool')
# start Pool server
self._starter.startDs()
# register extensions so the test methods can use them
self.pool = PyTango.DeviceProxy(self.pool_name)
def tearDown(self):
"""Remove the Pool instance.
"""
self._starter.cleanDb(force=True)
self._starter = None
self.pool = None
self.pool_name = None
# TODO: Currently test inputs are implemented as class members, it would be
# more aesthetic to implement them as decorators.
class ControllerLoadsTestCase(BasePoolTestCase):
"""Class for loading an arbitrary Sardana controller library and class.
"""
controller_classes = []
def test_controller_loads(self):
"""Test that the controller library and class can be loaded.
"""
libraries = self.pool.getElementsOfType('ControllerLibrary').values()
libraries_names = [lib.getName() for lib in libraries]
classes = self.pool.getElementsOfType('ControllerClass').values()
classes_names = [cls.getName() for cls in classes]
for test_lib, test_classes in self.controller_classes.items():
msg = 'ControllerLibrary %s was not correctly loaded.' % test_lib
self.assertIn(test_lib, libraries_names, msg)
msg = 'ControllerClass %s was not correctly loaded.'
for test_class in test_classes:
self.assertIn(test_class, classes_names, msg % test_class)
# TODO: Currently test inputs are implemented as class members, it would be
# more aesthetic to implement them as decorators.
class ControllerCreationTestCase(BasePoolTestCase):
"""Class for creating a controller and testing the correct creation.
"""
controller_infos = []
def test_controller_creation(self):
"""Test that the controller has been created with the correct name.
"""
for cls, name, props in self.controller_infos:
ctrl = self.pool.createController(cls, name, *props)
msg = 'Controller %s was not correctly created.' % name
self.assertEqual(ctrl.getName(), name, msg)
ctrl = self.pool.deleteElement(ctrl.getName())
# TODO: Currently test inputs are implemented as class members, it would be
# more aesthetic to implement them as decorators.
class ElementCreationTestCase(BasePoolTestCase):
"""Class used for creating a Sardana controller and Sardana elements.
"""
controller_infos = []
NAME = 0
AXIS = 1
def test_element_creation(self):
"""Test that controller and elements have been correctly created.
"""
for cls, name, props, elements in self.controller_infos:
ctrl = self.pool.createController(cls, name, *props)
msg = 'Controller %s was not correctly created.' % name
self.assertEqual(ctrl.getName(), name, msg)
for element_info in elements:
test_name = element_info[self.NAME]
test_axis = element_info[self.AXIS]
elem = self.pool.createElement(test_name, ctrl, test_axis)
msg = 'Element %s was not correctly created.' % test_name
self.assertIsNotNone(elem, msg)
name = elem.getName()
msg = 'Element name: %s does not correspond to: %s.' % \
(name, test_name)
self.assertEqual(name, test_name, msg)
elem = self.pool.deleteElement(test_name)
msg = 'Element %s was not correctly deleted.' % test_name
self.assertIsNotNone(elem, msg)
ctrl = self.pool.deleteElement(ctrl.getName())
if __name__ == '__main__':
class BuiltinControllerLoadsTest(ControllerLoadsTestCase,
unittest.TestCase):
controller_classes = {
'DummyMotorController': ('DummyMotorController',)
}
class BuiltinControllerCreationTest(ControllerCreationTestCase,
unittest.TestCase):
controller_infos = [('DummyMotorController', 'unittest', ())
]
class BuiltinElementCreationTest(ElementCreationTestCase,
unittest.TestCase):
alias = get_free_alias(PyTango.Database(), "mot_test")
controller_infos = [('DummyMotorController',
'unittest',
(),
[(alias, 1)])
]
suite = unittest.defaultTestLoader.loadTestsFromTestCase(
BuiltinElementCreationTest)
unittest.TextTestRunner(descriptions=True, verbosity=2).run(suite)
| 40.636364 | 78 | 0.633669 |
acf297dddc7cf23a8beec96f8230ecd4b7e6a6f5 | 1,446 | py | Python | Python/Day 21/snake.py | Aswinpkrishnan94/Fabulous-Python | bafba6d5b3889008299c012625b4a9e1b63b1d44 | [
"MIT"
] | null | null | null | Python/Day 21/snake.py | Aswinpkrishnan94/Fabulous-Python | bafba6d5b3889008299c012625b4a9e1b63b1d44 | [
"MIT"
] | null | null | null | Python/Day 21/snake.py | Aswinpkrishnan94/Fabulous-Python | bafba6d5b3889008299c012625b4a9e1b63b1d44 | [
"MIT"
] | null | null | null | from turtle import Turtle
# constant
START_POS = [(0, 0), (-20, 0), (-40,0)]
DIST = 20
UP = 90
DOWN = 270
LEFT = 180
RIGHT = 0
class Snake:
def __init__(self):
self.segment = []
self.create_snake()
self.head = self.segment[0]
def create_snake(self):
for pos in START_POS:
new_seg = Turtle(shape="square")
new_seg.color("white")
new_seg.penup()
new_seg.goto(pos)
self.segment.append(new_seg)
def add(self, position):
new_seg = Turtle(shape="square")
new_seg.color("white")
new_seg.penup()
new_seg.goto(position)
self.segment.append(new_seg)
def extend(self):
for position in START_POS:
self.add(self.segment[-1].position())
def move(self):
for seg in range(len(self.segment) - 1, 0, -1):
new_x = self.segment[seg - 1].xcor()
new_y = self.segment[seg - 1].ycor()
self.segment[seg].goto(new_x, new_y)
self.head.fd(DIST)
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
| 24.1 | 55 | 0.550484 |
acf298385a4338e5dc61221b499371d03c9ca099 | 166 | py | Python | tests/basics/set_symmetric_difference.py | bygreencn/micropython | 3f759b71c63f5e01df18a6e204c50f78d1b6a20b | [
"MIT"
] | 1 | 2019-05-07T15:01:19.000Z | 2019-05-07T15:01:19.000Z | tests/basics/set_symmetric_difference.py | bygreencn/micropython | 3f759b71c63f5e01df18a6e204c50f78d1b6a20b | [
"MIT"
] | null | null | null | tests/basics/set_symmetric_difference.py | bygreencn/micropython | 3f759b71c63f5e01df18a6e204c50f78d1b6a20b | [
"MIT"
] | null | null | null | print({1,2}.symmetric_difference({2,3}))
print({1,2}.symmetric_difference([2,3]))
s = {1,2}
print(s.symmetric_difference_update({2,3}))
l = list(s)
l.sort()
print(l)
| 20.75 | 43 | 0.680723 |
acf29927a000f129607c4308e7a8e8b07a612ec0 | 2,701 | py | Python | human/chmpd/make-explicit.py | alancleary/sv-genotyping-paper | caac97831ea26a2c4d9dc860ddbac328f6e57c09 | [
"MIT"
] | 22 | 2019-06-01T14:30:18.000Z | 2021-11-07T14:41:20.000Z | human/chmpd/make-explicit.py | alancleary/sv-genotyping-paper | caac97831ea26a2c4d9dc860ddbac328f6e57c09 | [
"MIT"
] | 5 | 2019-04-30T09:26:08.000Z | 2022-03-21T12:16:16.000Z | human/chmpd/make-explicit.py | alancleary/sv-genotyping-paper | caac97831ea26a2c4d9dc860ddbac328f6e57c09 | [
"MIT"
] | 2 | 2019-04-24T17:52:21.000Z | 2022-03-03T04:07:37.000Z | #!/usr/bin/env python2.7
"""
Assume:
POS coordinates are correct
REF entries are wrong
SEQ elements are correct (but don't contain match reference base at beginning) but shifted for deletions
-> pull REF from Fasta
-> deleteion: REF = REF + SEQ, ALT = REF
-> insertion: REF = REF, ALT = REF + SEQ
don't touch inversions.
throw error if more than one alt
(hacked together from vcf-add-bed-seqs.py)
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools, math
import vcf, collections, gzip, re
import pysam
from Bio.Seq import Seq
def parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("vcf", type=str,
help="VCF whose SV sequences we want to fill out")
parser.add_argument("--fasta",
default='ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/'
'GRCh38_full_analysis_set_plus_decoy_hla.fa',
help="Fasta file for reference. Needs .fai as well")
args = args[1:]
options = parser.parse_args(args)
return options
def open_input(file_path):
open_fn = gzip.open if file_path.endswith('.gz') else open
return open_fn(file_path, 'r')
def main(args):
options = parse_args(args)
# fasta index needed for reference bases
faidx = pysam.FastaFile(options.fasta)
# print the edited vcf
with open_input(options.vcf) as vcf_file:
add_span_info = True
for line in vcf_file:
if line.startswith('#'):
sys.stdout.write(line)
elif line:
vcf_toks = line.split('\t')
vcf_chrom = vcf_toks[0]
vcf_pos = int(vcf_toks[1])
vcf_sv_type = vcf_toks[4][1:-1]
sv_seq = [i for i in vcf_toks[7].split(';') if i.startswith('SEQ=')][0][4:]
ref_seq = faidx.fetch(vcf_chrom, vcf_pos - 1, vcf_pos)
if vcf_sv_type == 'INS':
vcf_toks[3] = ref_seq;
vcf_toks[4] = ref_seq + sv_seq
elif vcf_sv_type == 'DEL':
vcf_toks[3] = ref_seq + sv_seq
# it looks like the vcf_seqs are shifted. we assume POS is gospel an reload from Fasta
ref_del_seq = faidx.fetch(vcf_chrom, vcf_pos - 1, vcf_pos - 1 + len(vcf_toks[3]))
vcf_toks[3] = ref_del_seq
vcf_toks[4] = ref_seq
sys.stdout.write('\t'.join(vcf_toks))
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| 33.345679 | 119 | 0.597186 |
acf299f45b6796f1f9fcf288810b117ee43e20ea | 520 | py | Python | Dataset/Leetcode/train/78/47.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/78/47.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/78/47.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, nums: List[int]) -> List[List[int]]:
ans, path = list(), list()
n = len(nums)
def dfs(nums, cur):
if cur == n:
ans.append(path.copy())
return
for i in range(2):
if i == 0:
dfs(nums, cur + 1)
else:
path.append(nums[cur])
dfs(nums, cur + 1)
path.pop()
dfs(nums, 0)
return ans
| 24.761905 | 54 | 0.371154 |
acf29a03a394f98b931d3b0bb96faa202242e299 | 5,890 | py | Python | wagtailstreamforms/models/form.py | axoplasm/wagtailstreamforms | 2a8535c5b5cf12fcb06899cd10f3c1cd244e20df | [
"MIT"
] | null | null | null | wagtailstreamforms/models/form.py | axoplasm/wagtailstreamforms | 2a8535c5b5cf12fcb06899cd10f3c1cd244e20df | [
"MIT"
] | null | null | null | wagtailstreamforms/models/form.py | axoplasm/wagtailstreamforms | 2a8535c5b5cf12fcb06899cd10f3c1cd244e20df | [
"MIT"
] | null | null | null | import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail import VERSION as WAGTAIL_VERSION
from wagtail.admin.edit_handlers import (
FieldPanel,
MultiFieldPanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface,
)
from wagtail.core.models import Site
from wagtailstreamforms import hooks
from wagtailstreamforms.conf import get_setting
from wagtailstreamforms.fields import HookSelectField
from wagtailstreamforms.forms import FormBuilder
from wagtailstreamforms.streamfield import FormFieldsStreamField
from wagtailstreamforms.utils.general import get_slug_from_string
from wagtailstreamforms.utils.loading import get_advanced_settings_model
from .submission import FormSubmission
class FormQuerySet(models.QuerySet):
def for_site(self, site):
"""Return all forms for a specific site."""
return self.filter(site=site)
class AbstractForm(models.Model):
site = models.ForeignKey(Site, on_delete=models.SET_NULL, null=True, blank=True)
title = models.CharField(_("Title"), max_length=255)
slug = models.SlugField(
_("Slug"),
allow_unicode=True,
max_length=255,
unique=True,
help_text=_("Used to identify the form in template tags"),
)
template_name = models.CharField(
_("Template"), max_length=255, choices=get_setting("FORM_TEMPLATES")
)
fields = FormFieldsStreamField([], verbose_name=_("Fields"))
submit_button_text = models.CharField(
_("Submit button text"), max_length=100, default="Submit"
)
success_message = models.CharField(
_("Success message"),
blank=True,
max_length=255,
help_text=_(
"An optional success message to show when the form has been successfully submitted"
),
)
error_message = models.CharField(
_("Error message"),
blank=True,
max_length=255,
help_text=_(
"An optional error message to show when the form has validation errors"
),
)
post_redirect_page = models.ForeignKey(
"wagtailcore.Page",
verbose_name=_("Post redirect page"),
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="+",
help_text=_("The page to redirect to after a successful submission"),
)
process_form_submission_hooks = HookSelectField(
verbose_name=_("Submission hooks"), blank=True
)
objects = FormQuerySet.as_manager()
settings_panels = [
FieldPanel("title", classname="full"),
FieldPanel("slug"),
FieldPanel("template_name"),
FieldPanel("submit_button_text"),
MultiFieldPanel(
[FieldPanel("success_message"), FieldPanel("error_message")], _("Messages")
),
FieldPanel("process_form_submission_hooks", classname="choice_field"),
PageChooserPanel("post_redirect_page"),
]
field_panels = [StreamFieldPanel("fields")]
edit_handler = TabbedInterface(
[
ObjectList(settings_panels, heading=_("General")),
ObjectList(field_panels, heading=_("Fields")),
]
)
def __str__(self):
return self.title
class Meta:
abstract = True
ordering = ["title"]
verbose_name = _("Form")
verbose_name_plural = _("Forms")
def copy(self):
"""Copy this form and its fields."""
form_copy = Form(
site=self.site,
title=self.title,
slug=uuid.uuid4(),
template_name=self.template_name,
fields=self.fields,
submit_button_text=self.submit_button_text,
success_message=self.success_message,
error_message=self.error_message,
post_redirect_page=self.post_redirect_page,
process_form_submission_hooks=self.process_form_submission_hooks,
)
form_copy.save()
# additionally copy the advanced settings if they exist
SettingsModel = get_advanced_settings_model()
if SettingsModel:
try:
advanced = SettingsModel.objects.get(form=self)
advanced.pk = None
advanced.form = form_copy
advanced.save()
except SettingsModel.DoesNotExist:
pass
return form_copy
copy.alters_data = True
def get_data_fields(self):
"""Returns a list of tuples with (field_name, field_label)."""
data_fields = [("submit_time", _("Submission date"))]
data_fields += [
(get_slug_from_string(field["value"]["label"]), field["value"]["label"])
for field in self.get_form_fields()
]
return data_fields
def get_form(self, *args, **kwargs):
"""Returns the form."""
form_class = self.get_form_class()
return form_class(*args, **kwargs)
def get_form_class(self):
"""Returns the form class."""
return FormBuilder(self.get_form_fields()).get_form_class()
def get_form_fields(self):
"""Returns the form field's stream data."""
if WAGTAIL_VERSION >= (2, 12):
form_fields = self.fields.raw_data
else:
form_fields = self.fields.stream_data
for fn in hooks.get_hooks("construct_submission_form_fields"):
form_fields = fn(form_fields)
return form_fields
def get_submission_class(self):
"""Returns submission class."""
return FormSubmission
def process_form_submission(self, form):
"""Runs each hook if selected in the form."""
for fn in hooks.get_hooks("process_form_submission"):
if fn.__name__ in self.process_form_submission_hooks:
fn(self, form)
class Form(AbstractForm):
pass
| 30.837696 | 95 | 0.643973 |
acf29a7b415e719813b2a6278413999a33c7c04d | 1,144 | py | Python | app/views.py | jakebrinkmann/2015-PUG-flask-data-vis | f444e637d758c26eba9d08235c39bf83185d2369 | [
"MIT"
] | null | null | null | app/views.py | jakebrinkmann/2015-PUG-flask-data-vis | f444e637d758c26eba9d08235c39bf83185d2369 | [
"MIT"
] | null | null | null | app/views.py | jakebrinkmann/2015-PUG-flask-data-vis | f444e637d758c26eba9d08235c39bf83185d2369 | [
"MIT"
] | null | null | null | from flask import render_template, request, make_response
from flask_wtf import Form
from wtforms.fields.html5 import DecimalRangeField
from wtforms import RadioField
from bokeh.embed import components
from bokeh.plotting import figure
import random
from app import app
class MyForm(Form):
my_slider = DecimalRangeField('Mag')
my_radio = RadioField('Color', choices=[('#c51b8a','Pink'),('#5ab4ac','Teal')])
def draw_plot(mag, color):
xs = range(100)
ys = [(mag + x) **2 + random.randint(1, 50) for x in xs]
fig = figure(title="Polynomial", plot_width=500, plot_height=400)
fig.line(xs, ys, color=color, line_width=2)
return fig
@app.route('/', methods=('GET', 'POST'))
def index():
form = MyForm()
magnitude = 50
color = '#67a9cf'
if request.method == 'POST':
magnitude = float(request.form['my_slider'])
color = request.form['my_radio']
fig = draw_plot(magnitude, color)
script, div = components(fig)
return render_template('index.html',
title='Home',
form=form,
div=div, script=script)
| 32.685714 | 83 | 0.638986 |
acf29afa64dd56ba3c67d57633b4d50ae6283294 | 1,466 | py | Python | PythonOCC/add_function_box_example.py | leon-thomm/Pythonocc-nodes-for-Ryven | 554531f35003aeccc66927d6759d9b0d70948c12 | [
"MIT"
] | 19 | 2021-08-31T10:00:35.000Z | 2022-03-31T05:51:32.000Z | PythonOCC/add_function_box_example.py | leon-thomm/Pythonocc-nodes-for-Ryven | 554531f35003aeccc66927d6759d9b0d70948c12 | [
"MIT"
] | 8 | 2021-08-31T09:23:06.000Z | 2022-02-05T04:29:48.000Z | PythonOCC/add_function_box_example.py | leon-thomm/Pythonocc-nodes-for-Ryven | 554531f35003aeccc66927d6759d9b0d70948c12 | [
"MIT"
] | 2 | 2021-08-31T11:54:03.000Z | 2022-03-04T06:12:57.000Z | class BrepPrimAPINodeBase(PythonOCCNodeBase): # The parent class of the box
color = '#aabb44' # The color attributed to the parent class
class Box_Node(BrepPrimAPINodeBase): # explicit class name(parent class name)
"""
Generates box_________-
o_Width_______________-
o_Length______________- #the text that will appear when your mouse will stay on the node in Ryven
o_Height______________- #it indicates what inputs are expected
"""
init_inputs = [
NodeInputBP(dtype=dtypes.Data(size='s')), # number of inputs ollowing what your function needs
NodeInputBP(dtype=dtypes.Data(size='s')),
NodeInputBP(dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(), # output of the node
]
title = 'box' # the title name of your node
def apply_op(self, elements: list):
width = elements[0] # your inputs
length = elements[1]
height = elements[2]
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox # import of the method
from OCC.Core.gp import gp_Pnt
box = BRepPrimAPI_MakeBox(gp_Pnt(), width, length, height).Shape() # the function to get a result
return box # the output of the node
BRepPrimAPI_nodes = [ # add the node to the list if its family
Box_Node,
]
export_nodes(
*BRepPrimAPI_nodes, # specified the family nodes to export and to make available in Ryven
)
| 34.093023 | 107 | 0.665075 |
acf29b56855b7545789fa83dea66c7e5fda5c80b | 936 | py | Python | tests/test_get_major.py | rtlee9/SIC-list | bb4b535f421320b1dfa57bc58163e2a17f9b6a4c | [
"Apache-2.0"
] | 7 | 2017-11-30T18:01:02.000Z | 2022-03-07T01:44:32.000Z | tests/test_get_major.py | rtlee9/SIC-list | bb4b535f421320b1dfa57bc58163e2a17f9b6a4c | [
"Apache-2.0"
] | 1 | 2016-08-27T16:52:13.000Z | 2016-08-27T16:52:13.000Z | tests/test_get_major.py | rtlee9/SIC-list | bb4b535f421320b1dfa57bc58163e2a17f9b6a4c | [
"Apache-2.0"
] | 4 | 2017-01-10T17:12:15.000Z | 2020-03-30T07:41:43.000Z | # Test get_major() function
from .context import scrape_sic_osha as scrape
class TestClass:
url = 'sic_manual.display?id=1&tab=group'
major = scrape.get_major(url)
def test_len(self):
assert len(self.major) > 1
def test_grapes(self):
assert self.major[16].full_desc == \
'SIC4 0172: Grapes'
assert self.major[16].parent_desc == \
'Industry Group 017: Fruits And Tree Nuts'
def test_first(self):
assert self.major[0].full_desc == \
'Industry Group 011: Cash Grains'
assert self.major[0].parent_desc == \
'Major Group 01: Agricultural Production Crops'
def test_last(self):
assert self.major[len(self.major) - 1].full_desc == \
'SIC4 0191: General Farms, Primarily Crop'
assert self.major[len(self.major) - 1].parent_desc == \
'Industry Group 019: General Farms, Primarily Crop'
| 31.2 | 63 | 0.621795 |
acf29bf5e99d7c373ee30e1f4a43389e5931c8bc | 14,358 | py | Python | saleor/graphql/core/types/common.py | siyoola/saleor | 4e52b8655a5570a8ce0a3b1484b4d8b46fbd0ad0 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/core/types/common.py | siyoola/saleor | 4e52b8655a5570a8ce0a3b1484b4d8b46fbd0ad0 | [
"CC-BY-4.0"
] | 86 | 2021-11-01T04:51:55.000Z | 2022-03-30T16:30:16.000Z | saleor/graphql/core/types/common.py | siyoola/saleor | 4e52b8655a5570a8ce0a3b1484b4d8b46fbd0ad0 | [
"CC-BY-4.0"
] | null | null | null | from urllib.parse import urljoin
import graphene
from django.conf import settings
from ....core.tracing import traced_resolver
from ....product.product_images import get_thumbnail
from ...account.enums import AddressTypeEnum
from ..enums import (
AccountErrorCode,
AppErrorCode,
AttributeErrorCode,
ChannelErrorCode,
CheckoutErrorCode,
CollectionErrorCode,
DiscountErrorCode,
ExportErrorCode,
ExternalNotificationTriggerErrorCode,
GiftCardErrorCode,
GiftCardSettingsErrorCode,
InvoiceErrorCode,
JobStatusEnum,
LanguageCodeEnum,
MenuErrorCode,
MetadataErrorCode,
OrderErrorCode,
OrderSettingsErrorCode,
PageErrorCode,
PaymentErrorCode,
PermissionEnum,
PermissionGroupErrorCode,
PluginErrorCode,
ProductErrorCode,
ShippingErrorCode,
ShopErrorCode,
StockErrorCode,
TimePeriodTypeEnum,
TranslationErrorCode,
UploadErrorCode,
WarehouseErrorCode,
WebhookErrorCode,
WeightUnitsEnum,
)
from ..scalars import PositiveDecimal
from .money import VAT
class NonNullList(graphene.List):
"""A list type that automatically adds non-null constraint on contained items."""
def __init__(self, of_type, *args, **kwargs):
of_type = graphene.NonNull(of_type)
super(NonNullList, self).__init__(of_type, *args, **kwargs)
class CountryDisplay(graphene.ObjectType):
code = graphene.String(description="Country code.", required=True)
country = graphene.String(description="Country name.", required=True)
vat = graphene.Field(VAT, description="Country tax.")
class LanguageDisplay(graphene.ObjectType):
code = LanguageCodeEnum(
description="ISO 639 representation of the language name.", required=True
)
language = graphene.String(description="Full name of the language.", required=True)
class Permission(graphene.ObjectType):
code = PermissionEnum(description="Internal code for permission.", required=True)
name = graphene.String(
description="Describe action(s) allowed to do by permission.", required=True
)
class Meta:
description = "Represents a permission object in a friendly form."
class Error(graphene.ObjectType):
field = graphene.String(
description=(
"Name of a field that caused the error. A value of `null` indicates that "
"the error isn't associated with a particular field."
),
required=False,
)
message = graphene.String(description="The error message.")
class Meta:
description = "Represents an error in the input of a mutation."
class AccountError(Error):
code = AccountErrorCode(description="The error code.", required=True)
address_type = AddressTypeEnum(
description="A type of address that causes the error.", required=False
)
class AppError(Error):
code = AppErrorCode(description="The error code.", required=True)
permissions = NonNullList(
PermissionEnum,
description="List of permissions which causes the error.",
required=False,
)
class AttributeError(Error):
code = AttributeErrorCode(description="The error code.", required=True)
class StaffError(AccountError):
permissions = NonNullList(
PermissionEnum,
description="List of permissions which causes the error.",
required=False,
)
groups = NonNullList(
graphene.ID,
description="List of permission group IDs which cause the error.",
required=False,
)
users = NonNullList(
graphene.ID,
description="List of user IDs which causes the error.",
required=False,
)
class ChannelError(Error):
code = ChannelErrorCode(description="The error code.", required=True)
shipping_zones = NonNullList(
graphene.ID,
description="List of shipping zone IDs which causes the error.",
required=False,
)
class CheckoutError(Error):
code = CheckoutErrorCode(description="The error code.", required=True)
variants = NonNullList(
graphene.ID,
description="List of varint IDs which causes the error.",
required=False,
)
lines = NonNullList(
graphene.ID,
description="List of line Ids which cause the error.",
required=False,
)
address_type = AddressTypeEnum(
description="A type of address that causes the error.", required=False
)
class ProductWithoutVariantError(Error):
products = NonNullList(
graphene.ID,
description="List of products IDs which causes the error.",
)
class DiscountError(ProductWithoutVariantError):
code = DiscountErrorCode(description="The error code.", required=True)
channels = NonNullList(
graphene.ID,
description="List of channels IDs which causes the error.",
required=False,
)
class ExportError(Error):
code = ExportErrorCode(description="The error code.", required=True)
class ExternalNotificationError(Error):
code = ExternalNotificationTriggerErrorCode(
description="The error code.", required=True
)
class MenuError(Error):
code = MenuErrorCode(description="The error code.", required=True)
class OrderSettingsError(Error):
code = OrderSettingsErrorCode(description="The error code.", required=True)
class GiftCardSettingsError(Error):
code = GiftCardSettingsErrorCode(description="The error code.", required=True)
class MetadataError(Error):
code = MetadataErrorCode(description="The error code.", required=True)
class OrderError(Error):
code = OrderErrorCode(description="The error code.", required=True)
warehouse = graphene.ID(
description="Warehouse ID which causes the error.",
required=False,
)
order_lines = NonNullList(
graphene.ID,
description="List of order line IDs that cause the error.",
required=False,
)
variants = NonNullList(
graphene.ID,
description="List of product variants that are associated with the error",
required=False,
)
address_type = AddressTypeEnum(
description="A type of address that causes the error.", required=False
)
class InvoiceError(Error):
code = InvoiceErrorCode(description="The error code.", required=True)
class PermissionGroupError(Error):
code = PermissionGroupErrorCode(description="The error code.", required=True)
permissions = NonNullList(
PermissionEnum,
description="List of permissions which causes the error.",
required=False,
)
users = NonNullList(
graphene.ID,
description="List of user IDs which causes the error.",
required=False,
)
class ProductError(Error):
code = ProductErrorCode(description="The error code.", required=True)
attributes = NonNullList(
graphene.ID,
description="List of attributes IDs which causes the error.",
required=False,
)
values = NonNullList(
graphene.ID,
description="List of attribute values IDs which causes the error.",
required=False,
)
class CollectionError(ProductWithoutVariantError):
code = CollectionErrorCode(description="The error code.", required=True)
class ProductChannelListingError(ProductError):
channels = NonNullList(
graphene.ID,
description="List of channels IDs which causes the error.",
required=False,
)
variants = NonNullList(
graphene.ID,
description="List of variants IDs which causes the error.",
required=False,
)
class CollectionChannelListingError(ProductError):
channels = NonNullList(
graphene.ID,
description="List of channels IDs which causes the error.",
required=False,
)
class BulkProductError(ProductError):
index = graphene.Int(
description="Index of an input list item that caused the error."
)
warehouses = NonNullList(
graphene.ID,
description="List of warehouse IDs which causes the error.",
required=False,
)
channels = NonNullList(
graphene.ID,
description="List of channel IDs which causes the error.",
required=False,
)
class ShopError(Error):
code = ShopErrorCode(description="The error code.", required=True)
class ShippingError(Error):
code = ShippingErrorCode(description="The error code.", required=True)
warehouses = NonNullList(
graphene.ID,
description="List of warehouse IDs which causes the error.",
required=False,
)
channels = NonNullList(
graphene.ID,
description="List of channels IDs which causes the error.",
required=False,
)
class PageError(Error):
code = PageErrorCode(description="The error code.", required=True)
attributes = NonNullList(
graphene.ID,
description="List of attributes IDs which causes the error.",
required=False,
)
values = NonNullList(
graphene.ID,
description="List of attribute values IDs which causes the error.",
required=False,
)
class PaymentError(Error):
code = PaymentErrorCode(description="The error code.", required=True)
variants = NonNullList(
graphene.ID,
description="List of varint IDs which causes the error.",
required=False,
)
class GiftCardError(Error):
code = GiftCardErrorCode(description="The error code.", required=True)
tags = NonNullList(
graphene.String,
description="List of tag values that cause the error.",
required=False,
)
class PluginError(Error):
code = PluginErrorCode(description="The error code.", required=True)
class StockError(Error):
code = StockErrorCode(description="The error code.", required=True)
class BulkStockError(ProductError):
index = graphene.Int(
description="Index of an input list item that caused the error."
)
class UploadError(Error):
code = UploadErrorCode(description="The error code.", required=True)
class WarehouseError(Error):
code = WarehouseErrorCode(description="The error code.", required=True)
class WebhookError(Error):
code = WebhookErrorCode(description="The error code.", required=True)
class TranslationError(Error):
code = TranslationErrorCode(description="The error code.", required=True)
class SeoInput(graphene.InputObjectType):
title = graphene.String(description="SEO title.")
description = graphene.String(description="SEO description.")
class Weight(graphene.ObjectType):
unit = WeightUnitsEnum(description="Weight unit.", required=True)
value = graphene.Float(description="Weight value.", required=True)
class Meta:
description = "Represents weight value in a specific weight unit."
class Image(graphene.ObjectType):
url = graphene.String(required=True, description="The URL of the image.")
alt = graphene.String(description="Alt text for an image.")
class Meta:
description = "Represents an image."
@staticmethod
def get_adjusted(image, alt, size, rendition_key_set, info):
"""Return Image adjusted with given size."""
if size:
url = get_thumbnail(
image_file=image,
size=size,
method="thumbnail",
rendition_key_set=rendition_key_set,
)
else:
url = image.url
url = info.context.build_absolute_uri(url)
return Image(url, alt)
class File(graphene.ObjectType):
url = graphene.String(required=True, description="The URL of the file.")
content_type = graphene.String(
required=False, description="Content type of the file."
)
@staticmethod
def resolve_url(root, info):
return info.context.build_absolute_uri(urljoin(settings.MEDIA_URL, root.url))
class PriceInput(graphene.InputObjectType):
currency = graphene.String(description="Currency code.", required=True)
amount = PositiveDecimal(description="Amount of money.", required=True)
class PriceRangeInput(graphene.InputObjectType):
gte = graphene.Float(description="Price greater than or equal to.", required=False)
lte = graphene.Float(description="Price less than or equal to.", required=False)
class DateRangeInput(graphene.InputObjectType):
gte = graphene.Date(description="Start date.", required=False)
lte = graphene.Date(description="End date.", required=False)
class DateTimeRangeInput(graphene.InputObjectType):
gte = graphene.DateTime(description="Start date.", required=False)
lte = graphene.DateTime(description="End date.", required=False)
class IntRangeInput(graphene.InputObjectType):
gte = graphene.Int(description="Value greater than or equal to.", required=False)
lte = graphene.Int(description="Value less than or equal to.", required=False)
class TimePeriodInputType(graphene.InputObjectType):
amount = graphene.Int(description="The length of the period.", required=True)
type = TimePeriodTypeEnum(description="The type of the period.", required=True)
class TaxType(graphene.ObjectType):
"""Representation of tax types fetched from tax gateway."""
description = graphene.String(description="Description of the tax type.")
tax_code = graphene.String(
description="External tax code used to identify given tax group."
)
class Job(graphene.Interface):
status = JobStatusEnum(description="Job status.", required=True)
created_at = graphene.DateTime(
description="Created date time of job in ISO 8601 format.", required=True
)
updated_at = graphene.DateTime(
description="Date time of job last update in ISO 8601 format.", required=True
)
message = graphene.String(description="Job message.")
@classmethod
@traced_resolver
def resolve_type(cls, instance, _info):
"""Map a data object to a Graphene type."""
MODEL_TO_TYPE_MAP = {
# <DjangoModel>: <GrapheneType>
}
return MODEL_TO_TYPE_MAP.get(type(instance))
class TimePeriod(graphene.ObjectType):
amount = graphene.Int(description="The length of the period.", required=True)
type = TimePeriodTypeEnum(description="The type of the period.", required=True)
| 29.788382 | 87 | 0.691392 |
acf29d39d0ad31072f084b98cbb67e5ab1798a27 | 4,196 | py | Python | borrow table.py | DonnC/RabbitGUI | 3f5cc2620ae581d8666a7b0c934dfc1aabf1b727 | [
"MIT"
] | 7 | 2020-01-29T05:01:32.000Z | 2021-05-19T13:49:43.000Z | borrow table.py | DonnC/RabbitGUI | 3f5cc2620ae581d8666a7b0c934dfc1aabf1b727 | [
"MIT"
] | 1 | 2020-02-05T18:15:55.000Z | 2020-09-13T16:18:42.000Z | borrow table.py | DonnC/RabbitGUI | 3f5cc2620ae581d8666a7b0c934dfc1aabf1b727 | [
"MIT"
] | null | null | null | # # table of available rabbit data in the database
from PySimpleGUI import *
import pyperclip
from pprint import pprint
from settings import *
matrix = [
["John", "Male", "grey-white pathces", "new zealand white", "1", "zimbabwe", "20 June 2019", "28 June 2019", "Healthy rabbit ready to mate"],
["VaMatema", "Female", "grey", "new zealand brown", "2", "russia", "01 June 2019", "18 June 2019", "Healthy rabbit"],
["Murambinda", "Male", "white pathces", "Germany", "3", "london", "18 June 2019", "20 June 2019", "need thorough inspection"],
["Farai", "Male", "brown", "new zealand", "2", "zimbabwe", "20 June 2019", "27 June 2019", "borrowed rabbit"],
["Mr Kudai", "Female", "white", "new zealand black", "1", "zimbabwe", "02 June 2019", "17 June 2019", "Healthy"],
["Mutambandiro", "Male", "black", "new zealand white", "2", "russia", "09 June 2019", "12 June 2019", "Healthy, need recheck"],
["Jangano Kufa", "Female", "black pathces", "new zealand white", "1", "london", "20 June 2019", "23 June 2019", "Healthy rabbit"],
["Lloyd Guru", "Female", "black-white pathces", "new zealand white", "1", "zimbabwe", "21 June 2019", "30 June 2019", "need to inspect"]]
head = ['Owner', 'Sex', 'Color', 'Breed', 'Quantity', 'Location', 'Borrowed', 'Return', 'Notes']
#pprint(matrix)
table_right_click_opt = [
'&Right',
[
'Copy',
'Delete',
'Undo'
]
]
img = "donn.png"
image_frame_layout = [
[Text("Rabbit image")],
[Image(filename=DEFAULT_RABBIT_PIC, size=(300, 300), tooltip="rabbit identification image", key="_RABBIT_IMAGE_")]
]
table_frame = [
[
Table(values=matrix,
headings=head,
num_rows=10,
display_row_numbers=True,
enable_events=True,
font=("Berlin Sans FB", 11),
alternating_row_color='lightblue',
key='_BORROW_TABLE_',
size=(700, 100),
vertical_scroll_only=False,
right_click_menu=table_right_click_opt)
]
]
layout = [
[Text("\t\t\t\t"), Frame("", layout=image_frame_layout, size=(200, 200), key="_RABBIT_IMAGE_FRAME_")],
[Frame('Borrowed Rabbits Data', table_frame, title_color='grey', font=("Elephant", 15), size=(800, 200))]
]
#layout = [[Column(layout1)]]
window = Window('Borrowed Rabbits', layout,
font=('Helvetica', 15),
resizable=True,
).Finalize()
# The Event Loop
while True:
event, values = window.Read()
if event is None or event == "Exit":
break
if event == 'Delete':
# delete table in row indicated
del_index = values.get("_BORROW_TABLE_")
if len(del_index) > 0:
del_index = del_index[0]
# assign global for 'undo' action
global deleted_row
deleted_row = matrix.pop(del_index)
deleted_owner = deleted_row[0].title()
print("delete: ", del_index)
window.Element("_BORROW_TABLE_").Update(values=matrix)
PopupAutoClose(f"{deleted_owner} deleted!")
if event == "Copy":
copy_index = values.get("_BORROW_TABLE_")
if len(copy_index) > 0:
copy_index = copy_index[0]
row_list = matrix.pop(copy_index)
copy_string = ""
for info in row_list:
copy_string += info + "\n"
pyperclip.copy(copy_string)
print("copied: ", copy_string)
PopupAutoClose("Data copied to Clipboard!")
if event == "Undo":
# re-insert the last deleted entry in the table
try:
if deleted_row:
matrix.append(deleted_row)
window.Element("_BORROW_TABLE_").Update(values=matrix)
PopupAutoClose("Delete Operation revoked!")
# avoid duplicates in the table
deleted_row = None
else:
PopupQuickMessage("No action to 'UNDO'", font=("Calibri", 12))
except NameError:
PopupQuickMessage("No action to 'UNDO'", font=("Calibri", 12))
print(event, values) | 36.807018 | 142 | 0.573165 |
acf29d9ebc55704b24492582387c6ecc2480a12b | 350 | py | Python | scaffolder/templates/django/forms.py | javidgon/wizard | a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43 | [
"MIT"
] | null | null | null | scaffolder/templates/django/forms.py | javidgon/wizard | a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43 | [
"MIT"
] | null | null | null | scaffolder/templates/django/forms.py | javidgon/wizard | a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.forms import ModelForm
from .models import {% for model in app.models %}{{ model.name }}{% if not loop.last %}, {% endif %}{% endfor %}{% for model in app.models %}
class {{ model.name }}Form(ModelForm):
class Meta:
model = {{ model.name }}
fields = '__all__'
{% endfor %}
| 25 | 141 | 0.634286 |
acf29e40964cf5b3fa4d845900efe073c7db1a47 | 84 | py | Python | playgrounds/keras_models/features/multi_dector/workers/yolov2.py | enohoxha/AxonPy | 2c89200cdc1818cdaa4dc9b0fbec68036cb11a4b | [
"Apache-2.0"
] | 1 | 2019-04-03T07:42:43.000Z | 2019-04-03T07:42:43.000Z | playgrounds/keras_models/features/multi_dector/workers/yolov2.py | enohoxha/Axonpy | 2c89200cdc1818cdaa4dc9b0fbec68036cb11a4b | [
"Apache-2.0"
] | null | null | null | playgrounds/keras_models/features/multi_dector/workers/yolov2.py | enohoxha/Axonpy | 2c89200cdc1818cdaa4dc9b0fbec68036cb11a4b | [
"Apache-2.0"
] | null | null | null | from playgrounds.core.workers import Worker
class YOLOV2Worker(Worker):
pass
| 12 | 43 | 0.77381 |
acf29ea0527fe5209e8509841b8811d6992301da | 12,711 | py | Python | doc/examples/resting_state_fmri.py | Eric89GXL/nitime | 34adc1ddd6b93255764160057c1ea653426b36b8 | [
"BSD-3-Clause"
] | 1 | 2022-03-23T21:37:39.000Z | 2022-03-23T21:37:39.000Z | doc/examples/resting_state_fmri.py | mluessi/nitime | 0415a68b092962d06b43986ca1931090b2787e61 | [
"BSD-3-Clause"
] | null | null | null | doc/examples/resting_state_fmri.py | mluessi/nitime | 0415a68b092962d06b43986ca1931090b2787e61 | [
"BSD-3-Clause"
] | null | null | null | """
.. _resting-state:
===============================
Coherency analysis of fMRI data
===============================
The fMRI data-set analyzed in the following examples was contributed by Beth
Mormino. The data is taken from a single subject in a "resting-state" scan, in
which subjects are fixating on a cross and maintaining alert wakefulness, but
not performing any other behavioral task.
The data was pre-processed and time-series of BOLD responses were extracted
from different regions of interest (ROIs) in the brain. The data is organized
in csv file, where each column corresponds to an ROI and each row corresponds
to a sampling point.
In the following, we will demonstrate some simple time-series analysis and
visualization techniques which can be applied to this kind of data.
We start by importing the necessary modules/functions, defining the
sampling_interval of the data (TR, or repetition time) and the frequency band
of interest:
"""
import os
#Import from other libraries:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
import nitime
#Import the time-series objects:
from nitime.timeseries import TimeSeries
#Import the analysis objects:
from nitime.analysis import CorrelationAnalyzer, CoherenceAnalyzer
#Import utility functions:
from nitime.utils import percent_change
from nitime.viz import drawmatrix_channels, drawgraph_channels, plot_xcorr
#This information (the sampling interval) has to be known in advance:
TR = 1.89
f_lb = 0.02
f_ub = 0.15
"""
We use csv2rec to read the data in from file to a recarray:
"""
data_path = os.path.join(nitime.__path__[0], 'data')
data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
"""
This data structure contains in its dtype a field 'names', which contains the
first row in each column. In this case, that is the labels of the ROIs from
which the data in each column was extracted. The data from the recarray is
extracted into a 'standard' array and, for each ROI, it is normalized to
percent signal change, using the utils.percent_change function.
"""
#Extract information:
roi_names = np.array(data_rec.dtype.names)
n_samples = data_rec.shape[0]
#Make an empty container for the data
data = np.zeros((len(roi_names), n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
#Normalize the data:
data = percent_change(data)
"""
We initialize a TimeSeries object from the normalized data:
"""
T = TimeSeries(data, sampling_interval=TR)
T.metadata['roi'] = roi_names
"""
First, we examine the correlations between the time-series extracted from
different parts of the brain. The following script extracts the data (using the
draw_matrix function, displaying the correlation matrix with the ROIs labeled.
"""
#Initialize the correlation analyzer
C = CorrelationAnalyzer(T)
#Display the correlation matrix
fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.], color_anchor=0)
"""
.. image:: fig/resting_state_fmri_01.png
Notice that setting the color_anchor input to this function to 0 makes sure
that the center of the color map (here a blue => white => red) is at 0. In this
case, positive values will be displayed as red and negative values in blue.
We notice that the left caudate nucleus (labeled 'lcau') has an interesting
pattern of correlations. It has a high correlation with both the left putamen
('lput', which is located nearby) and also with the right caudate nucleus
('lcau'), which is the homologous region in the other hemisphere. Are these two
correlation values related to each other? The right caudate and left putamen
seem to have a moderately low correlation value. One way to examine this
question is by looking at the temporal structure of the cross-correlation
functions. In order to do that, from the CorrelationAnalyzer object, we extract
the normalized cross-correlation function. This results in another TimeSeries`
object, which contains the full time-series of the cross-correlation between
any combination of time-series from the different channels in the time-series
object. We can pass the resulting object, together with a list of indices to
the viz.plot_xcorr function, which visualizes the chosen combinations of
series:
"""
xc = C.xcorr_norm
idx_lcau = np.where(roi_names == 'lcau')[0]
idx_rcau = np.where(roi_names == 'rcau')[0]
idx_lput = np.where(roi_names == 'lput')[0]
idx_rput = np.where(roi_names == 'rput')[0]
fig02 = plot_xcorr(xc,
((idx_lcau, idx_rcau),
(idx_lcau, idx_lput)),
line_labels=['rcau', 'lput'])
"""
.. image:: fig/resting_state_fmri_02.png
Note that the correlation is normalized, so that the the value of the
cross-correlation functions at the zero-lag point (time = 0 sec) is equal to
the Pearson correlation between the two time-series. We observe that there are
correlations larger than the zero-lag correlation occurring at other
time-points preceding and following the zero-lag. This could arise because of a
more complex interplay of activity between two areas, which is not captured by
the correlation and can also arise because of differences in the
characteristics of the HRF in the two ROIs. One method of analysis which can
mitigate these issues is analysis of coherency between time-series
[Sun2005]_. This analysis computes an equivalent of the correlation in the
frequency domain:
.. math::
R_{xy} (\lambda) = \frac{f_{xy}(\lambda)}
{\sqrt{f_{xx} (\lambda) \cdot f_{yy}(\lambda)}}
Because this is a complex number, this computation results in two
quantities. First, the magnitude of this number, also referred to as
"coherence":
.. math::
Coh_{xy}(\lambda) = |{R_{xy}(\lambda)}|^2 =
\frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}
This is a measure of the pairwise coupling between the two time-series. It can
vary between 0 and 1, with 0 being complete independence and 1 being complete
coupling. A time-series would have a coherence of 1 with itself, but not only:
since this measure is independent of the relative phase of the two time-series,
the coherence between a time-series and any phase-shifted version of itself
will also be equal to 1.
However, the relative phase is another quantity which can be derived from this
computation:
.. math::
\phi(\lambda) = arg [R_{xy} (\lambda)] = arg [f_{xy} (\lambda)]
This value can be used in order to infer which area is leading and which area
is lagging (according to the sign of the relative phase) and, can be used to
compute the temporal delay between activity in one ROI and the other.
First, let's look at the pair-wise coherence between all our ROIs. This can be
done by creating a CoherenceAnalyzer object.
"""
C = CoherenceAnalyzer(T)
"""
Once this object is initialized with the TimeSeries object, the mid-frequency
of the frequency bands represented in the spectral decomposition of the
time-series can be accessed in the 'frequencies' attribute of the object. The
spectral resolution of this representation is the same one used in the
computation of the coherence.
Since the fMRI BOLD data contains data in frequencies which are not
physiologically relevant (presumably due to machine noise and fluctuations in
physiological measures unrelated to neural activity), we focus our analysis on
a band of frequencies between 0.02 and 0.15 Hz. This is easily achieved by
determining the values of the indices in :attr:`C.frequencies` and using those
indices in accessing the data in :attr:`C.coherence`. The coherence is then
averaged across all these frequency bands.
"""
freq_idx = np.where((C.frequencies > f_lb) * (C.frequencies < f_ub))[0]
"""
The C.coherence attribute is an ndarray of dimensions $n_{ROI}$ by $n_{ROI}$ by
$n_{frequencies}$.
We extract the coherence in that frequency band, average across the frequency
bands of interest and pass that to the visualization function:
"""
coh = np.mean(C.coherence[:, :, freq_idx], -1) # Averaging on the last dimension
fig03 = drawmatrix_channels(coh, roi_names, size=[10., 10.], color_anchor=0)
"""
.. image:: fig/resting_state_fmri_03.png
We can also focus in on the ROIs we were interested in. This requires a little
bit more manipulation of the indices into the coherence matrix:
"""
idx = np.hstack([idx_lcau, idx_rcau, idx_lput, idx_rput])
idx1 = np.vstack([[idx[i]] * 4 for i in range(4)]).ravel()
idx2 = np.hstack(4 * [idx])
coh = C.coherence[idx1, idx2].reshape(4, 4, C.frequencies.shape[0])
"""
Extract the coherence and average across the same frequency bands as before:
"""
coh = np.mean(coh[:, :, freq_idx], -1) # Averaging on the last dimension
"""
Finally, in this case, we visualize the adjacency matrix, by creating a network
graph of these ROIs (this is done by using the function drawgraph_channels
which relies on `networkx <http://networkx.lanl.gov>`_):
"""
fig04 = drawgraph_channels(coh, roi_names[idx])
"""
.. image:: fig/resting_state_fmri_04.png
This shows us that there is a stronger connectivity between the left putamen and
the left caudate than between the homologous regions in the other
hemisphere. In particular, in contrast to the relatively high correlation
between the right caudate and the left caudate, there is a rather low coherence
between the time-series in these two regions, in this frequency range.
Note that the connectivity described by coherency (and other measures of
functional connectivity) could arise because of neural connectivity between the
two regions, but also due to a common blood supply, or common fluctuations in
other physiological measures which affect the BOLD signal measured in both
regions. In order to be able to differentiate these two options, we would have
to conduct a comparison between two different behavioral states that affect the
neural activity in the two regions, without affecting these common
physiological factors, such as common blood supply (for an in-depth discussion
of these issues, see [Silver2010]_). In this case, we will simply assume that
the connectivity matrix presented represents the actual neural connectivity
between these two brain regions.
We notice that there is indeed a stronger coherence between left putamen and the
left caudate than between the left caudate and the right caudate. Next, we
might ask whether the moderate coherence between the left putamen and the right
caudate can be accounted for by the coherence these two time-series share with
the time-series derived from the left caudate. This kind of question can be
answered using an analysis of partial coherency. For the time series $x$ and
$y$, the partial coherence, given a third time-series $r$, is defined as:
.. math::
Coh_{xy|r} = \frac{|{R_{xy}(\lambda) - R_{xr}(\lambda)
R_{ry}(\lambda)}|^2}{(1-|{R_{xr}}|^2)(1-|{R_{ry}}|^2)}
In this case, we extract the partial coherence between the three regions,
excluding common effects of the left caudate. In order to do that, we generate
the partial-coherence attribute of the :class:`CoherenceAnalyzer` object, while
indexing on the additional dimension which this object had (the coherence
between time-series $x$ and time-series $y$, *given* time series $r$):
"""
idx3 = np.hstack(16 * [idx_lcau])
coh = C.coherence_partial[idx1, idx2, idx3].reshape(4, 4, C.frequencies.shape[0])
coh = np.mean(coh[:, :, freq_idx], -1)
"""
Again, we visualize the result, using both the :func:`viz.drawgraph_channels`
and the :func:`drawmatrix_channels` functions:
"""
fig05 = drawgraph_channels(coh, roi_names[idx])
fig06 = drawmatrix_channels(coh, roi_names[idx], color_anchor=0)
"""
.. image:: fig/resting_state_fmri_05.png
.. image:: fig/resting_state_fmri_06.png
As can be seen, the resulting partial coherence between left putamen and right
caudate, given the activity in the left caudate is smaller than the coherence
between these two areas, suggesting that part of this coherence can be
explained by their common connection to the left caudate.
XXX Add description of calculation of temporal delay here.
We call plt.show() in order to display the figures:
"""
plt.show()
"""
.. [Sun2005] F.T. Sun and L.M. Miller and M. D'Esposito(2005). Measuring
temporal dynamics of functional networks using phase spectrum of
fMRI data. Neuroimage, 28: 227-37.
.. [Silver2010] M.A Silver, AN Landau, TZ Lauritzen, W Prinzmetal, LC
Robertson(2010) Isolating human brain functional connectivity associated
with a specific cognitive process, in Human Vision and Electronic Imaging
XV, edited by B.E. Rogowitz and T.N. Pappas, Proceedings of SPIE, Volume
7527, pp. 75270B-1 to 75270B-9
"""
| 34.634877 | 83 | 0.756667 |
acf29ed8eb2298df68e79d6bb19248e90918b3ed | 136 | py | Python | toolbox/plotting/__init__.py | jstemmler/toolbox | e6cc0ce544d38ac9acc4975da66ac53f6fd1bf8d | [
"MIT"
] | 1 | 2017-02-11T11:17:14.000Z | 2017-02-11T11:17:14.000Z | toolbox/plotting/__init__.py | jstemmler/toolbox | e6cc0ce544d38ac9acc4975da66ac53f6fd1bf8d | [
"MIT"
] | null | null | null | toolbox/plotting/__init__.py | jstemmler/toolbox | e6cc0ce544d38ac9acc4975da66ac53f6fd1bf8d | [
"MIT"
] | null | null | null | __author__ = 'Jayson Stemmler'
__created__ = "5/13/15 12:41 PM"
import boxplots
import maps
import series
from windrose import windrose | 19.428571 | 32 | 0.794118 |
acf29ed920fc3afb149cf9dbe1b3781e5529fd7d | 29,767 | py | Python | amplify/backend/function/iamxawswrangler/lib/python/pg8000/dbapi.py | cristian-popa/s3-object-lambda-workshop | 6be64f7bbe99521cef4797044260d1c9881385ae | [
"MIT-0"
] | 2 | 2021-10-24T01:01:08.000Z | 2022-01-12T13:23:44.000Z | amplify/backend/function/iamxawswrangler/lib/python/pg8000/dbapi.py | cristian-popa/s3-object-lambda-workshop | 6be64f7bbe99521cef4797044260d1c9881385ae | [
"MIT-0"
] | null | null | null | amplify/backend/function/iamxawswrangler/lib/python/pg8000/dbapi.py | cristian-popa/s3-object-lambda-workshop | 6be64f7bbe99521cef4797044260d1c9881385ae | [
"MIT-0"
] | 3 | 2021-10-24T01:01:01.000Z | 2021-11-29T23:13:02.000Z | from datetime import date as Date, datetime as Datetime, time as Time
from itertools import count, islice
from time import localtime
from warnings import warn
import pg8000
from pg8000.converters import (
BIGINT,
BOOLEAN,
BOOLEAN_ARRAY,
BYTES,
CHAR,
CHAR_ARRAY,
DATE,
FLOAT,
FLOAT_ARRAY,
INET,
INT2VECTOR,
INTEGER,
INTEGER_ARRAY,
INTERVAL,
JSON,
JSONB,
MACADDR,
NAME,
NAME_ARRAY,
NULLTYPE,
NUMERIC,
NUMERIC_ARRAY,
OID,
PGInterval,
STRING,
TEXT,
TEXT_ARRAY,
TIME,
TIMESTAMP,
TIMESTAMPTZ,
UNKNOWN,
UUID_TYPE,
VARCHAR,
VARCHAR_ARRAY,
XID,
)
from pg8000.core import CoreConnection
from pg8000.exceptions import DatabaseError, Error, InterfaceError
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# Copyright (c) 2007-2009, Mathieu Fenniak
# Copyright (c) The Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
ROWID = OID
apilevel = "2.0"
"""The DBAPI level supported, currently "2.0".
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
threadsafety = 1
"""Integer constant stating the level of thread safety the DBAPI interface
supports. This DBAPI module supports sharing of the module only. Connections
and cursors my not be shared between threads. This gives pg8000 a threadsafety
value of 1.
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
paramstyle = "format"
BINARY = bytes
def PgDate(year, month, day):
"""Constuct an object holding a date value.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.date`
"""
return Date(year, month, day)
def PgTime(hour, minute, second):
"""Construct an object holding a time value.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.time`
"""
return Time(hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
"""Construct an object holding a timestamp value.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.datetime`
"""
return Datetime(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
"""Construct an object holding a date value from the given ticks value
(number of seconds since the epoch).
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.date`
"""
return Date(*localtime(ticks)[:3])
def TimeFromTicks(ticks):
"""Construct an objet holding a time value from the given ticks value
(number of seconds since the epoch).
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.time`
"""
return Time(*localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
"""Construct an object holding a timestamp value from the given ticks value
(number of seconds since the epoch).
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.datetime`
"""
return Timestamp(*localtime(ticks)[:6])
def Binary(value):
"""Construct an object holding binary data.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
return value
def connect(
user,
host="localhost",
database=None,
port=5432,
password=None,
source_address=None,
unix_sock=None,
ssl_context=None,
timeout=None,
tcp_keepalive=True,
application_name=None,
replication=None,
):
return Connection(
user,
host=host,
database=database,
port=port,
password=password,
source_address=source_address,
unix_sock=unix_sock,
ssl_context=ssl_context,
timeout=timeout,
tcp_keepalive=tcp_keepalive,
application_name=application_name,
replication=replication,
)
apilevel = "2.0"
"""The DBAPI level supported, currently "2.0".
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
threadsafety = 1
"""Integer constant stating the level of thread safety the DBAPI interface
supports. This DBAPI module supports sharing of the module only. Connections
and cursors my not be shared between threads. This gives pg8000 a threadsafety
value of 1.
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
paramstyle = "format"
def convert_paramstyle(style, query, args):
# I don't see any way to avoid scanning the query string char by char,
# so we might as well take that careful approach and create a
# state-based scanner. We'll use int variables for the state.
OUTSIDE = 0 # outside quoted string
INSIDE_SQ = 1 # inside single-quote string '...'
INSIDE_QI = 2 # inside quoted identifier "..."
INSIDE_ES = 3 # inside escaped single-quote string, E'...'
INSIDE_PN = 4 # inside parameter name eg. :name
INSIDE_CO = 5 # inside inline comment eg. --
in_quote_escape = False
in_param_escape = False
placeholders = []
output_query = []
param_idx = map(lambda x: "$" + str(x), count(1))
state = OUTSIDE
prev_c = None
for i, c in enumerate(query):
if i + 1 < len(query):
next_c = query[i + 1]
else:
next_c = None
if state == OUTSIDE:
if c == "'":
output_query.append(c)
if prev_c == "E":
state = INSIDE_ES
else:
state = INSIDE_SQ
elif c == '"':
output_query.append(c)
state = INSIDE_QI
elif c == "-":
output_query.append(c)
if prev_c == "-":
state = INSIDE_CO
elif style == "qmark" and c == "?":
output_query.append(next(param_idx))
elif (
style == "numeric" and c == ":" and next_c not in ":=" and prev_c != ":"
):
# Treat : as beginning of parameter name if and only
# if it's the only : around
# Needed to properly process type conversions
# i.e. sum(x)::float
output_query.append("$")
elif style == "named" and c == ":" and next_c not in ":=" and prev_c != ":":
# Same logic for : as in numeric parameters
state = INSIDE_PN
placeholders.append("")
elif style == "pyformat" and c == "%" and next_c == "(":
state = INSIDE_PN
placeholders.append("")
elif style in ("format", "pyformat") and c == "%":
style = "format"
if in_param_escape:
in_param_escape = False
output_query.append(c)
else:
if next_c == "%":
in_param_escape = True
elif next_c == "s":
state = INSIDE_PN
output_query.append(next(param_idx))
else:
raise InterfaceError(
"Only %s and %% are supported in the query."
)
else:
output_query.append(c)
elif state == INSIDE_SQ:
if c == "'":
if in_quote_escape:
in_quote_escape = False
else:
if next_c == "'":
in_quote_escape = True
else:
state = OUTSIDE
output_query.append(c)
elif state == INSIDE_QI:
if c == '"':
state = OUTSIDE
output_query.append(c)
elif state == INSIDE_ES:
if c == "'" and prev_c != "\\":
# check for escaped single-quote
state = OUTSIDE
output_query.append(c)
elif state == INSIDE_PN:
if style == "named":
placeholders[-1] += c
if next_c is None or (not next_c.isalnum() and next_c != "_"):
state = OUTSIDE
try:
pidx = placeholders.index(placeholders[-1], 0, -1)
output_query.append("$" + str(pidx + 1))
del placeholders[-1]
except ValueError:
output_query.append("$" + str(len(placeholders)))
elif style == "pyformat":
if prev_c == ")" and c == "s":
state = OUTSIDE
try:
pidx = placeholders.index(placeholders[-1], 0, -1)
output_query.append("$" + str(pidx + 1))
del placeholders[-1]
except ValueError:
output_query.append("$" + str(len(placeholders)))
elif c in "()":
pass
else:
placeholders[-1] += c
elif style == "format":
state = OUTSIDE
elif state == INSIDE_CO:
output_query.append(c)
if c == "\n":
state = OUTSIDE
prev_c = c
if style in ("numeric", "qmark", "format"):
vals = args
else:
vals = tuple(args[p] for p in placeholders)
return "".join(output_query), vals
class Cursor:
def __init__(self, connection):
self._c = connection
self.arraysize = 1
self._context = None
self._row_iter = None
self._input_oids = None
@property
def connection(self):
warn("DB-API extension cursor.connection used", stacklevel=3)
return self._c
@property
def rowcount(self):
context = self._context
if context is None:
return -1
return context.row_count
@property
def description(self):
context = self._context
if context is None:
return None
row_desc = context.columns
if row_desc is None:
return None
if len(row_desc) == 0:
return None
columns = []
for col in row_desc:
columns.append((col["name"], col["type_oid"], None, None, None, None, None))
return columns
##
# Executes a database operation. Parameters may be provided as a sequence
# or mapping and will be bound to variables in the operation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
def execute(self, operation, args=(), stream=None):
"""Executes a database operation. Parameters may be provided as a
sequence, or as a mapping, depending upon the value of
:data:`pg8000.paramstyle`.
This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:param operation:
The SQL statement to execute.
:param args:
If :data:`paramstyle` is ``qmark``, ``numeric``, or ``format``,
this argument should be an array of parameters to bind into the
statement. If :data:`paramstyle` is ``named``, the argument should
be a dict mapping of parameters. If the :data:`paramstyle` is
``pyformat``, the argument value may be either an array or a
mapping.
:param stream: This is a pg8000 extension for use with the PostgreSQL
`COPY
<http://www.postgresql.org/docs/current/static/sql-copy.html>`_
command. For a COPY FROM the parameter must be a readable file-like
object, and for COPY TO it must be writable.
.. versionadded:: 1.9.11
"""
try:
if not self._c.in_transaction and not self._c.autocommit:
self._c.execute_unnamed("begin transaction")
statement, vals = convert_paramstyle(paramstyle, operation, args)
self._context = self._c.execute_unnamed(
statement, vals=vals, input_oids=self._input_oids, stream=stream
)
self._row_iter = iter(self._context.rows)
self._input_oids = None
except AttributeError as e:
if self._c is None:
raise InterfaceError("Cursor closed")
elif self._c._sock is None:
raise InterfaceError("connection is closed")
else:
raise e
self.input_types = []
def executemany(self, operation, param_sets):
"""Prepare a database operation, and then execute it against all
parameter sequences or mappings provided.
This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:param operation:
The SQL statement to execute
:param parameter_sets:
A sequence of parameters to execute the statement with. The values
in the sequence should be sequences or mappings of parameters, the
same as the args argument of the :meth:`execute` method.
"""
rowcounts = []
input_oids = self._input_oids
for parameters in param_sets:
self._input_oids = input_oids
self.execute(operation, parameters)
rowcounts.append(self._context.row_count)
self._context.row_count = -1 if -1 in rowcounts else sum(rowcounts)
def callproc(self, procname, parameters=None):
args = [] if parameters is None else parameters
operation = "CALL " + procname + "(" + ", ".join(["%s" for _ in args]) + ")"
try:
statement, vals = convert_paramstyle("format", operation, args)
self._context = self._c.execute_unnamed(statement, vals=vals)
self._row_iter = iter(self._context.rows)
except AttributeError as e:
if self._c is None:
raise InterfaceError("Cursor closed")
elif self._c._sock is None:
raise InterfaceError("connection is closed")
else:
raise e
def fetchone(self):
"""Fetch the next row of a query result set.
This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:returns:
A row as a sequence of field values, or ``None`` if no more rows
are available.
"""
try:
return next(self)
except StopIteration:
return None
except TypeError:
raise ProgrammingError("attempting to use unexecuted cursor")
def __iter__(self):
"""A cursor object is iterable to retrieve the rows from a query.
This is a DBAPI 2.0 extension.
"""
return self
def __next__(self):
try:
return next(self._row_iter)
except AttributeError:
if self._context is None:
raise ProgrammingError("A query hasn't been issued.")
else:
raise
except StopIteration as e:
if self._context is None:
raise ProgrammingError("A query hasn't been issued.")
elif len(self._context.columns) == 0:
raise ProgrammingError("no result set")
else:
raise e
def fetchmany(self, num=None):
"""Fetches the next set of rows of a query result.
This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:param size:
The number of rows to fetch when called. If not provided, the
:attr:`arraysize` attribute value is used instead.
:returns:
A sequence, each entry of which is a sequence of field values
making up a row. If no more rows are available, an empty sequence
will be returned.
"""
try:
return tuple(islice(self, self.arraysize if num is None else num))
except TypeError:
raise ProgrammingError("attempting to use unexecuted cursor")
def fetchall(self):
"""Fetches all remaining rows of a query result.
This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:returns:
A sequence, each entry of which is a sequence of field values
making up a row.
"""
try:
return tuple(self)
except TypeError:
raise ProgrammingError("attempting to use unexecuted cursor")
def close(self):
"""Closes the cursor.
This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
self._c = None
def setinputsizes(self, *sizes):
"""This method is part of the `DBAPI 2.0 specification"""
oids = []
for size in sizes:
if isinstance(size, int):
oid = size
else:
try:
oid, _ = self._c.py_types[size]
except KeyError:
oid = pg8000.converters.UNKNOWN
oids.append(oid)
self._input_oids = oids
def setoutputsize(self, size, column=None):
"""This method is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_, however, it is not
implemented by pg8000.
"""
pass
class Connection(CoreConnection):
# DBAPI Extension: supply exceptions as attributes on the connection
Warning = property(lambda self: self._getError(Warning))
Error = property(lambda self: self._getError(Error))
InterfaceError = property(lambda self: self._getError(InterfaceError))
DatabaseError = property(lambda self: self._getError(DatabaseError))
OperationalError = property(lambda self: self._getError(OperationalError))
IntegrityError = property(lambda self: self._getError(IntegrityError))
InternalError = property(lambda self: self._getError(InternalError))
ProgrammingError = property(lambda self: self._getError(ProgrammingError))
NotSupportedError = property(lambda self: self._getError(NotSupportedError))
def _getError(self, error):
warn("DB-API extension connection.%s used" % error.__name__, stacklevel=3)
return error
def cursor(self):
"""Creates a :class:`Cursor` object bound to this
connection.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
return Cursor(self)
def commit(self):
"""Commits the current database transaction.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
self.execute_unnamed("commit")
def rollback(self):
"""Rolls back the current database transaction.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
if not self.in_transaction:
return
self.execute_unnamed("rollback")
def xid(self, format_id, global_transaction_id, branch_qualifier):
"""Create a Transaction IDs (only global_transaction_id is used in pg)
format_id and branch_qualifier are not used in postgres
global_transaction_id may be any string identifier supported by
postgres returns a tuple
(format_id, global_transaction_id, branch_qualifier)"""
return (format_id, global_transaction_id, branch_qualifier)
def tpc_begin(self, xid):
"""Begins a TPC transaction with the given transaction ID xid.
This method should be called outside of a transaction (i.e. nothing may
have executed since the last .commit() or .rollback()).
Furthermore, it is an error to call .commit() or .rollback() within the
TPC transaction. A ProgrammingError is raised, if the application calls
.commit() or .rollback() during an active TPC transaction.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
self._xid = xid
if self.autocommit:
self.execute_unnamed("begin transaction")
def tpc_prepare(self):
"""Performs the first phase of a transaction started with .tpc_begin().
A ProgrammingError is be raised if this method is called outside of a
TPC transaction.
After calling .tpc_prepare(), no statements can be executed until
.tpc_commit() or .tpc_rollback() have been called.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
self.execute_unnamed("PREPARE TRANSACTION '%s';" % (self._xid[1],))
def tpc_commit(self, xid=None):
"""When called with no arguments, .tpc_commit() commits a TPC
transaction previously prepared with .tpc_prepare().
If .tpc_commit() is called prior to .tpc_prepare(), a single phase
commit is performed. A transaction manager may choose to do this if
only a single resource is participating in the global transaction.
When called with a transaction ID xid, the database commits the given
transaction. If an invalid transaction ID is provided, a
ProgrammingError will be raised. This form should be called outside of
a transaction, and is intended for use in recovery.
On return, the TPC transaction is ended.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
if xid is None:
xid = self._xid
if xid is None:
raise ProgrammingError("Cannot tpc_commit() without a TPC transaction!")
try:
previous_autocommit_mode = self.autocommit
self.autocommit = True
if xid in self.tpc_recover():
self.execute_unnamed("COMMIT PREPARED '%s';" % (xid[1],))
else:
# a single-phase commit
self.commit()
finally:
self.autocommit = previous_autocommit_mode
self._xid = None
def tpc_rollback(self, xid=None):
"""When called with no arguments, .tpc_rollback() rolls back a TPC
transaction. It may be called before or after .tpc_prepare().
When called with a transaction ID xid, it rolls back the given
transaction. If an invalid transaction ID is provided, a
ProgrammingError is raised. This form should be called outside of a
transaction, and is intended for use in recovery.
On return, the TPC transaction is ended.
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
if xid is None:
xid = self._xid
if xid is None:
raise ProgrammingError(
"Cannot tpc_rollback() without a TPC prepared transaction!"
)
try:
previous_autocommit_mode = self.autocommit
self.autocommit = True
if xid in self.tpc_recover():
# a two-phase rollback
self.execute_unnamed("ROLLBACK PREPARED '%s';" % (xid[1],))
else:
# a single-phase rollback
self.rollback()
finally:
self.autocommit = previous_autocommit_mode
self._xid = None
def tpc_recover(self):
"""Returns a list of pending transaction IDs suitable for use with
.tpc_commit(xid) or .tpc_rollback(xid).
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
try:
previous_autocommit_mode = self.autocommit
self.autocommit = True
curs = self.cursor()
curs.execute("select gid FROM pg_prepared_xacts")
return [self.xid(0, row[0], "") for row in curs.fetchall()]
finally:
self.autocommit = previous_autocommit_mode
class Warning(Exception):
"""Generic exception raised for important database warnings like data
truncations. This exception is not currently used by pg8000.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class DataError(DatabaseError):
"""Generic exception raised for errors that are due to problems with the
processed data. This exception is not currently raised by pg8000.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class OperationalError(DatabaseError):
"""
Generic exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer. This
exception is currently never raised by pg8000.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class IntegrityError(DatabaseError):
"""
Generic exception raised when the relational integrity of the database is
affected. This exception is not currently raised by pg8000.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class InternalError(DatabaseError):
"""Generic exception raised when the database encounters an internal error.
This is currently only raised when unexpected state occurs in the pg8000
interface itself, and is typically the result of a interface bug.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class ProgrammingError(DatabaseError):
"""Generic exception raised for programming errors. For example, this
exception is raised if more parameter fields are in a query string than
there are available parameters.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class NotSupportedError(DatabaseError):
"""Generic exception raised in case a method or database API was used which
is not supported by the database.
This exception is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
pass
class ArrayContentNotSupportedError(NotSupportedError):
"""
Raised when attempting to transmit an array where the base type is not
supported for binary data transfer by the interface.
"""
pass
__all__ = [
"BIGINT",
"BINARY",
"BOOLEAN",
"BOOLEAN_ARRAY",
"BYTES",
"Binary",
"CHAR",
"CHAR_ARRAY",
"Connection",
"Cursor",
"DATE",
"DataError",
"DatabaseError",
"Date",
"DateFromTicks",
"Error",
"FLOAT",
"FLOAT_ARRAY",
"INET",
"INT2VECTOR",
"INTEGER",
"INTEGER_ARRAY",
"INTERVAL",
"IntegrityError",
"InterfaceError",
"InternalError",
"JSON",
"JSONB",
"MACADDR",
"NAME",
"NAME_ARRAY",
"NULLTYPE",
"NUMERIC",
"NUMERIC_ARRAY",
"NotSupportedError",
"OID",
"OperationalError",
"PGInterval",
"ProgrammingError",
"ROWID",
"STRING",
"TEXT",
"TEXT_ARRAY",
"TIME",
"TIMESTAMP",
"TIMESTAMPTZ",
"Time",
"TimeFromTicks",
"Timestamp",
"TimestampFromTicks",
"UNKNOWN",
"UUID_TYPE",
"VARCHAR",
"VARCHAR_ARRAY",
"Warning",
"XID",
"connect",
]
| 31.667021 | 88 | 0.606611 |
acf29fa892a6d7c7f8c0decaf4d216f4409333aa | 4,414 | py | Python | sphinx/source/tutorial/solutions/periodic.py | minrk/bokeh | ae4366e508355afc06b5fc62f1ee399635ab909d | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/tutorial/solutions/periodic.py | minrk/bokeh | ae4366e508355afc06b5fc62f1ee399635ab909d | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/tutorial/solutions/periodic.py | minrk/bokeh | ae4366e508355afc06b5fc62f1ee399635ab909d | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from bokeh.plotting import *
from bokeh.sampledata import periodic_table
from bokeh.objects import HoverTool, ColumnDataSource
from collections import OrderedDict
# categories need to be strings
elements = periodic_table.elements[periodic_table.elements['group'] != "-"]
elements['group'] = [str(x) for x in elements['group']]
elements['period'] = [str(x) for x in elements['period']]
# The categorical ranges need to be strings, so convert the groups and periods
group_range = [str(x) for x in range(1,19)]
period_range = [str(x) for x in reversed(sorted(set(elements['period'])))]
# Output static HTML file
output_file("periodic.html")
# I like this colormap OK, but feel free to change it up
colormap = {
'alkali metal' : "#a6cee3",
'alkaline earth metal' : "#1f78b4",
'halogen' : "#fdbf6f",
'metal' : "#b2df8a",
'metalloid' : "#33a02c",
'noble gas' : "#bbbb88",
'nonmetal' : "#baa2a6",
'transition metal' : "#e08e79",
}
# There are lots of things about each element we might want a hover tool
# to be able to display, so put them all in a ColumnDataSource
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements['group']],
period=[str(y) for y in elements['period']],
symx=[str(x)+":0.1" for x in elements['group']],
numbery=[str(x)+":0.8" for x in elements['period']],
massy=[str(x)+":0.15" for x in elements['period']],
namey=[str(x)+":0.3" for x in elements['period']],
sym=elements['symbol'],
name=elements['name'],
cpk=elements['CPK'],
atomic_number=elements['atomic number'],
electronic=elements['electronic configuration'],
mass=elements['atomic mass'],
type=elements['metal'],
type_color=[colormap[x] for x in elements['metal']],
)
)
hold()
# EXERCISE: add a `rect` renderer to display a rectangle at each group and column
# Use group_range for x_range and period_range for y_range. Rememeber to add a
# 'hover' to the tools and make your plot fairly wide.
rect("group", "period", 0.9, 0.9, source=source,
x_range=group_range, y_range=period_range,
fill_alpha=0.6, color="type_color",
tools="resize,hover", title="Periodic Table",
plot_width=1200)
# EXERCISE: we will be setting several of the same properties on the text renderers
# below. Add to this dictionary to set the text alignment to 'left' and the text
# baseline to 'middle'
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
# Since text can be interpreted as a data source field name in general, we have
# to specify the text a little more verbosely with a dictionary, as below
text(x=dict(field="symx", units="data"),
y=dict(field="period", units="data"),
text=dict(field="sym", units="data"),
text_font_style="bold", text_font_size="15pt", **text_props)
# EXERCISE: add text that displays the atomic number in each square with 9pt font.
# Use 'numbery' for the y position.
text(x=dict(field="symx", units="data"),
y=dict(field="numbery", units="data"),
text=dict(field="atomic_number", units="data"),
text_font_size="9pt", **text_props)
# EXERCISE: add text that displays the full name in each square with 6pt font
# Use 'namey' for the y position.
text(x=dict(field="symx", units="data"),
y=dict(field="namey", units="data"),
text=dict(field="name", units="data"),
text_font_size="6pt", **text_props)
# EXERCISE: add text that displays the atomic mass each square in 5pt font
# Use 'massy' for the y position.
text(x=dict(field="symx", units="data"),
y=dict(field="massy", units="data"),
text=dict(field="mass", units="data"),
text_font_size="5pt", **text_props)
# turn off the grid lines
grid().grid_line_color = None
# EXERCISE: configure a hover tool that displays the following:
# * name
# * atomic number
# * type
# * atomic mass
# * CPK color
# * electronic configuration
hover = [t for t in curplot().tools if isinstance(t, HoverTool)][0]
hover.tooltips = OrderedDict([
("name", "@name"),
("atomic number", "@atomic_number"),
("type", "@type"),
("atomic mass", "@mass"),
("CPK color", "$color[hex, swatch]:cpk"),
("electronic configuration", "@electronic"),
])
show()
| 35.886179 | 83 | 0.654735 |
acf29fdc59b89dd41dabdeb9a7b8ec63e9cf1b43 | 33,095 | py | Python | src/services/proto/response_pb2.py | equals2-ll/plus | 863fbdf41e09375c474b6bec08600d2678cb262e | [
"MIT"
] | 7 | 2021-08-10T03:38:58.000Z | 2022-03-10T18:53:28.000Z | src/services/proto/response_pb2.py | equals2-ll/plus | 863fbdf41e09375c474b6bec08600d2678cb262e | [
"MIT"
] | null | null | null | src/services/proto/response_pb2.py | equals2-ll/plus | 863fbdf41e09375c474b6bec08600d2678cb262e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: response.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='response.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0eresponse.proto\"\x8a\x02\n\x05Manga\x12\x10\n\x08manga_id\x18\x01 \x01(\r\x12\x12\n\nmanga_name\x18\x02 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\x12\x1a\n\x12portrait_image_url\x18\x04 \x01(\t\x12\x1b\n\x13landscape_image_url\x18\x05 \x01(\t\x12\x12\n\nview_count\x18\x06 \x01(\r\x12!\n\x08language\x18\x07 \x01(\x0e\x32\x0f.Manga.Language\"[\n\x08Language\x12\x0b\n\x07\x45NGLISH\x10\x00\x12\x0b\n\x07SPANISH\x10\x01\x12\x0e\n\nINDONESIAN\x10\x03\x12\x0e\n\nPORTUGUESE\x10\x04\x12\x0b\n\x07RUSSIAN\x10\x05\x12\x08\n\x04THAI\x10\x06\"\xd6\x01\n\x07\x43hapter\x12\x10\n\x08manga_id\x18\x01 \x01(\r\x12\x12\n\nchapter_id\x18\x02 \x01(\r\x12\x16\n\x0e\x63hapter_number\x18\x03 \x01(\t\x12\x14\n\x0c\x63hapter_name\x18\x04 \x01(\t\x12\x15\n\rthumbnail_url\x18\x05 \x01(\t\x12\x17\n\x0fstart_timestamp\x18\x06 \x01(\r\x12\x15\n\rend_timestamp\x18\x07 \x01(\r\x12\x16\n\x0e\x61lready_viewed\x18\x08 \x01(\x08\x12\x18\n\x10is_vertical_only\x18\t \x01(\x08\"\xc5\x02\n\x0bMangaDetail\x12\x15\n\x05manga\x18\x01 \x01(\x0b\x32\x06.Manga\x12\x17\n\x0fmanga_image_url\x18\x02 \x01(\t\x12\x10\n\x08overview\x18\x03 \x01(\t\x12\x1c\n\x14\x62\x61\x63kground_image_url\x18\x04 \x01(\t\x12\x16\n\x0enext_timestamp\x18\x05 \x01(\r\x12\x15\n\rupdate_timing\x18\x06 \x01(\r\x12\"\n\x1aviewing_period_description\x18\x07 \x01(\t\x12\x1b\n\x13non_appearance_info\x18\x08 \x01(\t\x12$\n\x12\x66irst_chapter_list\x18\t \x03(\x0b\x32\x08.Chapter\x12#\n\x11last_chapter_list\x18\n \x03(\x0b\x32\x08.Chapter\x12\x1b\n\x13\x63hapters_descending\x18\x11 \x01(\x08\"\x98\x02\n\x0cUpdatedManga\x12\x10\n\x08manga_id\x18\x01 \x01(\r\x12\x12\n\nmanga_name\x18\x02 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\x12\x1a\n\x12portrait_image_url\x18\x04 \x01(\t\x12\x1b\n\x13landscape_image_url\x18\x05 \x01(\t\x12\x12\n\nview_count\x18\x06 \x01(\r\x12(\n\x08language\x18\x07 \x01(\x0e\x32\x16.UpdatedManga.Language\"[\n\x08Language\x12\x0b\n\x07\x45NGLISH\x10\x00\x12\x0b\n\x07SPANISH\x10\x01\x12\x0e\n\nINDONESIAN\x10\x03\x12\x0e\n\nPORTUGUESE\x10\x04\x12\x0b\n\x07RUSSIAN\x10\x05\x12\x08\n\x04THAI\x10\x06\"T\n\x12UpdatedMangaDetail\x12$\n\rupdated_manga\x18\x01 \x01(\x0b\x32\r.UpdatedManga\x12\x18\n\x10upload_timestamp\x18\x02 \x01(\t\"<\n\x07Updated\x12\x31\n\x14updated_manga_detail\x18\x01 \x03(\x0b\x32\x13.UpdatedMangaDetail\"H\n\x08Response\x12\x1f\n\x07success\x18\x01 \x01(\x0b\x32\x0e.SuccessResult\x12\x1b\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0c.ErrorResult\"\x93\x01\n\x0b\x45rrorResult\x12#\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x13.ErrorResult.Action\x12\x11\n\tdebugInfo\x18\x04 \x01(\t\"L\n\x06\x41\x63tion\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x10\n\x0cUNAUTHORIZED\x10\x01\x12\x0f\n\x0bMAINTENANCE\x10\x02\x12\x12\n\x0eGEOIP_BLOCKING\x10\x03\"N\n\rSuccessResult\x12\"\n\x0cmanga_detail\x18\x08 \x01(\x0b\x32\x0c.MangaDetail\x12\x19\n\x07updated\x18\x14 \x01(\x0b\x32\x08.Updatedb\x06proto3'
)
_MANGA_LANGUAGE = _descriptor.EnumDescriptor(
name='Language',
full_name='Manga.Language',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ENGLISH', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPANISH', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INDONESIAN', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PORTUGUESE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUSSIAN', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='THAI', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=194,
serialized_end=285,
)
_sym_db.RegisterEnumDescriptor(_MANGA_LANGUAGE)
_UPDATEDMANGA_LANGUAGE = _descriptor.EnumDescriptor(
name='Language',
full_name='UpdatedManga.Language',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ENGLISH', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPANISH', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INDONESIAN', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PORTUGUESE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUSSIAN', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='THAI', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=194,
serialized_end=285,
)
_sym_db.RegisterEnumDescriptor(_UPDATEDMANGA_LANGUAGE)
_ERRORRESULT_ACTION = _descriptor.EnumDescriptor(
name='Action',
full_name='ErrorResult.Action',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNAUTHORIZED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MAINTENANCE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GEOIP_BLOCKING', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1409,
serialized_end=1485,
)
_sym_db.RegisterEnumDescriptor(_ERRORRESULT_ACTION)
_MANGA = _descriptor.Descriptor(
name='Manga',
full_name='Manga',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manga_id', full_name='Manga.manga_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='manga_name', full_name='Manga.manga_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='author', full_name='Manga.author', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='portrait_image_url', full_name='Manga.portrait_image_url', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='landscape_image_url', full_name='Manga.landscape_image_url', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='view_count', full_name='Manga.view_count', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='language', full_name='Manga.language', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_MANGA_LANGUAGE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=285,
)
_CHAPTER = _descriptor.Descriptor(
name='Chapter',
full_name='Chapter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manga_id', full_name='Chapter.manga_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='chapter_id', full_name='Chapter.chapter_id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='chapter_number', full_name='Chapter.chapter_number', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='chapter_name', full_name='Chapter.chapter_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='thumbnail_url', full_name='Chapter.thumbnail_url', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_timestamp', full_name='Chapter.start_timestamp', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_timestamp', full_name='Chapter.end_timestamp', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='already_viewed', full_name='Chapter.already_viewed', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_vertical_only', full_name='Chapter.is_vertical_only', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=288,
serialized_end=502,
)
_MANGADETAIL = _descriptor.Descriptor(
name='MangaDetail',
full_name='MangaDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manga', full_name='MangaDetail.manga', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='manga_image_url', full_name='MangaDetail.manga_image_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='overview', full_name='MangaDetail.overview', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='background_image_url', full_name='MangaDetail.background_image_url', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_timestamp', full_name='MangaDetail.next_timestamp', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_timing', full_name='MangaDetail.update_timing', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='viewing_period_description', full_name='MangaDetail.viewing_period_description', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='non_appearance_info', full_name='MangaDetail.non_appearance_info', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='first_chapter_list', full_name='MangaDetail.first_chapter_list', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_chapter_list', full_name='MangaDetail.last_chapter_list', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='chapters_descending', full_name='MangaDetail.chapters_descending', index=10,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=505,
serialized_end=830,
)
_UPDATEDMANGA = _descriptor.Descriptor(
name='UpdatedManga',
full_name='UpdatedManga',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manga_id', full_name='UpdatedManga.manga_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='manga_name', full_name='UpdatedManga.manga_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='author', full_name='UpdatedManga.author', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='portrait_image_url', full_name='UpdatedManga.portrait_image_url', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='landscape_image_url', full_name='UpdatedManga.landscape_image_url', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='view_count', full_name='UpdatedManga.view_count', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='language', full_name='UpdatedManga.language', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_UPDATEDMANGA_LANGUAGE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=833,
serialized_end=1113,
)
_UPDATEDMANGADETAIL = _descriptor.Descriptor(
name='UpdatedMangaDetail',
full_name='UpdatedMangaDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='updated_manga', full_name='UpdatedMangaDetail.updated_manga', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='upload_timestamp', full_name='UpdatedMangaDetail.upload_timestamp', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1115,
serialized_end=1199,
)
_UPDATED = _descriptor.Descriptor(
name='Updated',
full_name='Updated',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='updated_manga_detail', full_name='Updated.updated_manga_detail', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1201,
serialized_end=1261,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='Response.success', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='Response.error', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1263,
serialized_end=1335,
)
_ERRORRESULT = _descriptor.Descriptor(
name='ErrorResult',
full_name='ErrorResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='ErrorResult.action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='debugInfo', full_name='ErrorResult.debugInfo', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_ERRORRESULT_ACTION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1338,
serialized_end=1485,
)
_SUCCESSRESULT = _descriptor.Descriptor(
name='SuccessResult',
full_name='SuccessResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manga_detail', full_name='SuccessResult.manga_detail', index=0,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updated', full_name='SuccessResult.updated', index=1,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1487,
serialized_end=1565,
)
_MANGA.fields_by_name['language'].enum_type = _MANGA_LANGUAGE
_MANGA_LANGUAGE.containing_type = _MANGA
_MANGADETAIL.fields_by_name['manga'].message_type = _MANGA
_MANGADETAIL.fields_by_name['first_chapter_list'].message_type = _CHAPTER
_MANGADETAIL.fields_by_name['last_chapter_list'].message_type = _CHAPTER
_UPDATEDMANGA.fields_by_name['language'].enum_type = _UPDATEDMANGA_LANGUAGE
_UPDATEDMANGA_LANGUAGE.containing_type = _UPDATEDMANGA
_UPDATEDMANGADETAIL.fields_by_name['updated_manga'].message_type = _UPDATEDMANGA
_UPDATED.fields_by_name['updated_manga_detail'].message_type = _UPDATEDMANGADETAIL
_RESPONSE.fields_by_name['success'].message_type = _SUCCESSRESULT
_RESPONSE.fields_by_name['error'].message_type = _ERRORRESULT
_ERRORRESULT.fields_by_name['action'].enum_type = _ERRORRESULT_ACTION
_ERRORRESULT_ACTION.containing_type = _ERRORRESULT
_SUCCESSRESULT.fields_by_name['manga_detail'].message_type = _MANGADETAIL
_SUCCESSRESULT.fields_by_name['updated'].message_type = _UPDATED
DESCRIPTOR.message_types_by_name['Manga'] = _MANGA
DESCRIPTOR.message_types_by_name['Chapter'] = _CHAPTER
DESCRIPTOR.message_types_by_name['MangaDetail'] = _MANGADETAIL
DESCRIPTOR.message_types_by_name['UpdatedManga'] = _UPDATEDMANGA
DESCRIPTOR.message_types_by_name['UpdatedMangaDetail'] = _UPDATEDMANGADETAIL
DESCRIPTOR.message_types_by_name['Updated'] = _UPDATED
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
DESCRIPTOR.message_types_by_name['ErrorResult'] = _ERRORRESULT
DESCRIPTOR.message_types_by_name['SuccessResult'] = _SUCCESSRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Manga = _reflection.GeneratedProtocolMessageType('Manga', (_message.Message,), {
'DESCRIPTOR' : _MANGA,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:Manga)
})
_sym_db.RegisterMessage(Manga)
Chapter = _reflection.GeneratedProtocolMessageType('Chapter', (_message.Message,), {
'DESCRIPTOR' : _CHAPTER,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:Chapter)
})
_sym_db.RegisterMessage(Chapter)
MangaDetail = _reflection.GeneratedProtocolMessageType('MangaDetail', (_message.Message,), {
'DESCRIPTOR' : _MANGADETAIL,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:MangaDetail)
})
_sym_db.RegisterMessage(MangaDetail)
UpdatedManga = _reflection.GeneratedProtocolMessageType('UpdatedManga', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDMANGA,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:UpdatedManga)
})
_sym_db.RegisterMessage(UpdatedManga)
UpdatedMangaDetail = _reflection.GeneratedProtocolMessageType('UpdatedMangaDetail', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDMANGADETAIL,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:UpdatedMangaDetail)
})
_sym_db.RegisterMessage(UpdatedMangaDetail)
Updated = _reflection.GeneratedProtocolMessageType('Updated', (_message.Message,), {
'DESCRIPTOR' : _UPDATED,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:Updated)
})
_sym_db.RegisterMessage(Updated)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _RESPONSE,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:Response)
})
_sym_db.RegisterMessage(Response)
ErrorResult = _reflection.GeneratedProtocolMessageType('ErrorResult', (_message.Message,), {
'DESCRIPTOR' : _ERRORRESULT,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:ErrorResult)
})
_sym_db.RegisterMessage(ErrorResult)
SuccessResult = _reflection.GeneratedProtocolMessageType('SuccessResult', (_message.Message,), {
'DESCRIPTOR' : _SUCCESSRESULT,
'__module__' : 'response_pb2'
# @@protoc_insertion_point(class_scope:SuccessResult)
})
_sym_db.RegisterMessage(SuccessResult)
# @@protoc_insertion_point(module_scope)
| 42.869171 | 2,916 | 0.749569 |
acf2a08b2b3d6bd5358e8d00b8d96adc4f057502 | 3,913 | py | Python | libs/mqttclient.py | lianwutech/plugin_xxx_yyy | 8339ef56d2a6d4565860a002ef5e8e0e78f97745 | [
"Apache-2.0"
] | null | null | null | libs/mqttclient.py | lianwutech/plugin_xxx_yyy | 8339ef56d2a6d4565860a002ef5e8e0e78f97745 | [
"Apache-2.0"
] | null | null | null | libs/mqttclient.py | lianwutech/plugin_xxx_yyy | 8339ef56d2a6d4565860a002ef5e8e0e78f97745 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
mqttclient类
"""
import json
import logging
import threading
import paho.mqtt.client as mqtt
logger = logging.getLogger('plugin')
class MQTTClient(object):
def __init__(self, mqtt_config, network_name):
self.channel = None
self.mqtt_config = mqtt_config
self.server_addr = mqtt_config.get("server")
self.server_port = mqtt_config.get("port")
self.client_id = mqtt_config.get("client_id")
self.gateway_topic = mqtt_config.get("gateway_topic")
self.thread = None
self.network_name = network_name
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, rc):
logger.info("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("%s/#" % self.network_name)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
logger.info("收到数据消息" + msg.topic + " " + str(msg.payload))
# 消息只包含device_cmd,为json字符串
try:
cmd_msg = json.loads(msg.payload)
except Exception, e:
logger.error("消息内容错误,%r" % msg.payload)
return
if "device_id" not in cmd_msg \
or "device_addr" not in cmd_msg\
or "device_port" not in cmd_msg\
or "device_type" not in cmd_msg:
logger.error("消息格式错误。")
return
if cmd_msg["device_id"] != msg.topic:
logger.error("device_id(%s)和topic(%s)不一致." % (cmd_msg["device_id"], msg.topic))
return
# 调用channel处理指令
if self.channel is not None:
self.channel.process_cmd(cmd_msg)
else:
logger.info("channel为空,不处理.")
return
self.mqtt_client = mqtt.Client(client_id=self.client_id)
self.mqtt_client.on_connect = on_connect
self.mqtt_client.on_message = on_message
@staticmethod
def check_config(mqtt_params):
if "server" not in mqtt_params \
or "port" not in mqtt_params \
or "client_id" not in mqtt_params\
or "gateway_topic" not in mqtt_params:
return False
return True
def set_channel(self, channel):
self.channel = channel
def connect(self):
try:
self.mqtt_client.connect(host=self.server_addr, port=self.server_port, keepalive=60)
return True
except Exception, e:
logger.error("MQTT链接失败,错误内容:%r" % e)
return False
def publish_data(self, device_data_msg):
"""
发布数据
:param device_msg:
:return:
"""
if self.mqtt_client is None:
# 该情况可能发生在插件启动时,channel已启动,但mqtt还未connect
logger.debug("mqtt对象未初始化")
else:
self.mqtt_client.reconnect()
self.mqtt_client.publish(topic=self.gateway_topic, payload=json.dumps(device_data_msg))
logger.info("向Topic(%s)发布消息:%r" % (self.gateway_topic, device_data_msg))
def run(self):
try:
self.mqtt_client.loop_forever()
except Exception, e:
logger.error("MQTT链接失败,错误内容:%r" % e)
self.mqtt_client.disconnect()
def start(self):
if self.thread is not None:
# 如果进程非空,则等待退出
self.thread.join(1)
# 启动一个新的线程来运行
self.thread = threading.Thread(target=self.run)
self.thread.start()
def isAlive(self):
if self.thread is not None:
return self.thread.isAlive()
else:
return False | 32.882353 | 99 | 0.581906 |
acf2a1663d2c92f5243a14448a5efb04e25c8343 | 1,089 | py | Python | 909_Snakes_and_Ladders.py | yuqingchen/Leetcode | 6cbcb36e66a10a226ddb0966701e61ce4c2434d4 | [
"MIT"
] | 1 | 2019-12-12T20:16:08.000Z | 2019-12-12T20:16:08.000Z | 909_Snakes_and_Ladders.py | yuqingchen/Leetcode | 6cbcb36e66a10a226ddb0966701e61ce4c2434d4 | [
"MIT"
] | null | null | null | 909_Snakes_and_Ladders.py | yuqingchen/Leetcode | 6cbcb36e66a10a226ddb0966701e61ce4c2434d4 | [
"MIT"
] | null | null | null | from collections import deque
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
if not board :
return -1
res = 0
n = len(board)
visited = set()
visited.add(1)
queue = deque([1])
while queue :
for _ in range(len(queue)) :
node = queue.popleft()
if node == n*n :
return res
for d in [1, 2, 3, 4, 5, 6] :
if node+d <= n*n and node+d not in visited :
visited.add(node+d)
x = n - ((node+d -1)//n) -1
if ((node+d -1)//n)%2 == 1 :
y = n - ((node+d -1)%n) -1
else :
y = (node + d -1) % n
if board[x][y] == -1 :
newnode = node + d
else :
newnode = board[x][y]
queue.append(newnode)
res += 1
return -1 | 36.3 | 64 | 0.342516 |
acf2a363b5838dd78b11017482a67c254c019952 | 7,108 | py | Python | bassl/transform/random_color_jitter.py | kakaobrain/bassl | 551fe94343debf60a64c787be6752284153a0f7a | [
"Apache-2.0"
] | 55 | 2022-01-17T02:18:40.000Z | 2022-03-25T08:24:28.000Z | bassl/transform/random_color_jitter.py | kakaobrain/bassl | 551fe94343debf60a64c787be6752284153a0f7a | [
"Apache-2.0"
] | 5 | 2022-01-18T01:59:49.000Z | 2022-03-24T00:20:35.000Z | bassl/transform/random_color_jitter.py | kakaobrain/bassl | 551fe94343debf60a64c787be6752284153a0f7a | [
"Apache-2.0"
] | 1 | 2022-01-23T10:50:15.000Z | 2022-01-23T10:50:15.000Z | import numbers
import random
from typing import Any, Dict
import torchvision
import torchvision.transforms.functional as F
from classy_vision.dataset.transforms import register_transform
from classy_vision.dataset.transforms.classy_transform import ClassyTransform
@register_transform("VideoRandomColorJitter")
class VideoRandomColorJitter(ClassyTransform):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(
self,
brightness=0,
contrast=0,
saturation=0,
hue=0,
consistent=True,
p=1.0,
seq_len=0,
):
self.brightness = self._check_input(brightness, "brightness")
self.contrast = self._check_input(contrast, "contrast")
self.saturation = self._check_input(saturation, "saturation")
self.hue = self._check_input(
hue, "hue", center=0, bound=(-0.5, 0.5), clip_first_on_zero=False
)
self.consistent = consistent
self.threshold = p
self.seq_len = seq_len
def _check_input(
self, value, name, center=1, bound=(0, float("inf")), clip_first_on_zero=True
):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
"If {} is a single number, it must be non negative.".format(name)
)
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError(
"{} should be a single number or a list/tuple with lenght 2.".format(
name
)
)
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(
torchvision.transforms.Lambda(
lambda img: F.adjust_brightness(img, brightness_factor)
)
)
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(
torchvision.transforms.Lambda(
lambda img: F.adjust_contrast(img, contrast_factor)
)
)
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(
torchvision.transforms.Lambda(
lambda img: F.adjust_saturation(img, saturation_factor)
)
)
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(
torchvision.transforms.Lambda(lambda img: F.adjust_hue(img, hue_factor))
)
random.shuffle(transforms)
transform = torchvision.transforms.Compose(transforms)
return transform
def __call__(self, imgmap):
assert isinstance(imgmap, list)
if random.random() < self.threshold: # do ColorJitter
if self.consistent:
transform = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue
)
return [transform(i) for i in imgmap]
else:
if self.seq_len == 0:
return [
self.get_params(
self.brightness, self.contrast, self.saturation, self.hue
)(img)
for img in imgmap
]
else:
result = []
for idx, img in enumerate(imgmap):
if idx % self.seq_len == 0:
transform = self.get_params(
self.brightness,
self.contrast,
self.saturation,
self.hue,
)
result.append(transform(img))
return result
# result = []
# for img in imgmap:
# transform = self.get_params(self.brightness, self.contrast,
# self.saturation, self.hue)
# result.append(transform(img))
# return result
else: # don't do ColorJitter, do nothing
return imgmap
def __repr__(self):
format_string = self.__class__.__name__ + "("
format_string += "brightness={0}".format(self.brightness)
format_string += ", contrast={0}".format(self.contrast)
format_string += ", saturation={0}".format(self.saturation)
format_string += ", hue={0})".format(self.hue)
return format_string
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "VideoRandomColorJitter":
brightness = config.get("brightness", 0.8)
contrast = config.get("contrast", 0.8)
saturation = config.get("saturation", 0.8)
hue = config.get("hue", 0.2)
p = config.get("p", 0.8)
return cls(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
p=p,
)
| 39.270718 | 95 | 0.546005 |
acf2a4aa262af65a73e4c59fc6fa23949418ab6e | 1,157 | py | Python | extract.py | jeffin07/Dehaze-GAN | 1905a3cb75e0f7688fc090757627ce6c1e427cf8 | [
"MIT"
] | 5 | 2019-11-27T13:37:07.000Z | 2021-11-24T07:04:07.000Z | extract.py | raven-dehaze-work/Dehaze-GAN | d63a850110fb6e388c2a0a01788e3330bfc3e4bc | [
"MIT"
] | null | null | null | extract.py | raven-dehaze-work/Dehaze-GAN | d63a850110fb6e388c2a0a01788e3330bfc3e4bc | [
"MIT"
] | null | null | null | import os
import cv2
import h5py
import numpy as np
from skimage.transform import resize
if __name__ == '__main__':
if not os.path.exists('A'):
os.mkdir('A')
if not os.path.exists('B'):
os.mkdir('B')
with h5py.File('data.mat', 'r') as f:
images = np.array(f['images'])
depths = np.array(f['depths'])
images = images.transpose(0, 1, 3, 2)
depths = depths.transpose(2, 1, 0)
depths = (depths - np.min(depths, axis = (0, 1))) / np.max(depths, axis = (0, 1))
depths = ((1 - depths) * np.random.uniform(0.2, 0.4, size = (1449, ))).transpose(2, 0, 1)
for i in range(len(images)):
fog = (images[i] * depths[i]) + (1 - depths[i]) * np.ones_like(depths[i]) * 255
fog = resize(fog.transpose(1, 2, 0), (256, 256, 3), mode = 'reflect')
img = resize(images[i].transpose(1, 2, 0), (256, 256, 3), mode = 'reflect')
img = (img * 255).astype(np.uint8)
cv2.imwrite(os.path.join('A', str(i).zfill(4) + '.png'), fog)
cv2.imwrite(os.path.join('B', str(i).zfill(4) + '.png'), img)
print('Extracting image:', i, end = '\r')
print('Done.')
| 32.138889 | 93 | 0.551426 |
acf2a4b9ec5e2090321b20c7ba887db67decd7b7 | 3,647 | py | Python | h/cli/commands/user.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | 1 | 2018-03-09T02:15:16.000Z | 2018-03-09T02:15:16.000Z | h/cli/commands/user.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | 16 | 2018-03-14T21:23:46.000Z | 2019-04-29T18:55:28.000Z | h/cli/commands/user.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | 1 | 2021-03-12T09:45:04.000Z | 2021-03-12T09:45:04.000Z | # -*- coding: utf-8 -*-
import click
import sqlalchemy
from h import models
from h.views.admin_users import delete_user
@click.group()
def user():
"""Manage users."""
@user.command()
@click.option('--username', prompt=True)
@click.option('--email', prompt=True)
@click.option('--authority')
@click.password_option()
@click.pass_context
def add(ctx, username, email, password, authority):
"""Create a new user."""
request = ctx.obj['bootstrap']()
signup_service = request.find_service(name='user_signup')
signup_kwargs = {
'username': username,
'email': email,
'password': password,
'require_activation': False,
}
if authority:
signup_kwargs['authority'] = authority
signup_service.signup(**signup_kwargs)
try:
request.tm.commit()
except sqlalchemy.exc.IntegrityError as err:
upstream_error = '\n'.join(' ' + line
for line in err.message.split('\n'))
message = ('could not create user due to integrity constraint.\n\n{}'
.format(upstream_error))
raise click.ClickException(message)
click.echo("{username} created".format(username=username), err=True)
@user.command()
@click.argument('username')
@click.option('--authority')
@click.option('--on/--off', default=True)
@click.pass_context
def admin(ctx, username, authority, on):
"""
Make a user an admin.
You must specify the username of a user which you wish to give
administrative privileges.
"""
request = ctx.obj['bootstrap']()
if not authority:
authority = request.authority
user = models.User.get_by_username(request.db, username, authority)
if user is None:
msg = 'no user with username "{}" and authority "{}"'.format(username, authority)
raise click.ClickException(msg)
user.admin = on
request.tm.commit()
click.echo("{username} is now {status}an administrator"
.format(username=username,
status='' if on else 'NOT '),
err=True)
@user.command()
@click.argument('username')
@click.option('--authority')
@click.password_option()
@click.pass_context
def password(ctx, username, authority, password):
"""
Change user's password.
You must specify the username of a user whose password you want to change.
"""
request = ctx.obj['bootstrap']()
password_service = request.find_service(name='user_password')
if not authority:
authority = request.authority
user = models.User.get_by_username(request.db, username, authority)
if user is None:
msg = 'no user with username "{}" and authority "{}"'.format(username, authority)
raise click.ClickException(msg)
password_service.update_password(user, password)
request.tm.commit()
click.echo("Password changed for {}".format(username), err=True)
@user.command()
@click.argument('username')
@click.option('--authority')
@click.pass_context
def delete(ctx, username, authority):
"""
Deletes a user with all their group memberships and annotations.
You must specify the username of a user to delete.
"""
request = ctx.obj['bootstrap']()
if not authority:
authority = request.authority
user = models.User.get_by_username(request.db, username, authority)
if user is None:
msg = 'no user with username "{}" and authority "{}"'.format(username, authority)
raise click.ClickException(msg)
delete_user(request, user)
request.tm.commit()
click.echo("User {} deleted.".format(username), err=True)
| 27.216418 | 89 | 0.651769 |
acf2a4fa3c038df75be2d1c4696d2f8ea5c2d611 | 11,759 | py | Python | code/03-scrape.py | sdoerstling/medical_crowdfunding_methods | 4d424a396fc5141d2dee1279acdea03a1338321e | [
"MIT"
] | null | null | null | code/03-scrape.py | sdoerstling/medical_crowdfunding_methods | 4d424a396fc5141d2dee1279acdea03a1338321e | [
"MIT"
] | null | null | null | code/03-scrape.py | sdoerstling/medical_crowdfunding_methods | 4d424a396fc5141d2dee1279acdea03a1338321e | [
"MIT"
] | null | null | null | #import libraries
import asyncio
import aiohttp
from itertools import islice
import json
import re
import datetime
import time
from random import sample
import pandas as pd
import async_timeout
import sys
#define proxy key
PROXY = ''
#define input index
input_index = int(sys.argv[1])
#define helper functions
def findkeys(node, kv):
if isinstance(node, list):
for i in node:
for x in findkeys(i, kv):
yield x
elif isinstance(node, dict):
if kv in node:
yield node[kv]
for j in node.values():
for x in findkeys(j, kv):
yield x
#get updates
async def getUpdates(cur_camp, session):
print("Starting updates for %s" % cur_camp)
update_base_begin = "https://gateway.gofundme.com/web-gateway/v1/feed/"
update_base_end = "/updates?limit=3&offset="
update_list = []
update_resp_status = []
update_data = {
"update_list" : update_list,
"update_resp_status" : update_resp_status,
"update_data_error" : 0
}
offset = 0
while True:
#duplicates -> in case website is buggy, assume that no more than 50 updates
if(offset > 50):
return update_data
url = update_base_begin + cur_camp + update_base_end + str(offset)
async with session.get(url, proxy = PROXY) as resp:
resp_status = resp.status
update_data['update_resp_status'] = resp_status
if resp_status != 200:
break
else:
try:
update_content = await resp.text()
update_json = json.loads(update_content)
update_list += update_json['references']['updates']
#increase offset
if(update_json['meta']['has_next']):
offset += 3
else:
break
except:
update_data['update_data_error'] = 1
break
print("Response updates for %s" % cur_camp)
return update_data
#get comments
async def getComments(cur_camp, session):
print("Starting comments for %s" % cur_camp)
comment_base_begin = "https://gateway.gofundme.com/web-gateway/v1/feed/"
comment_base_end = "/comments?limit=20&offset="
comment_list = []
comment_ids = []
comment_resp_status = []
comment_data = {
"comment_list" : comment_list,
"comment_ids" : comment_ids,
"comment_resp_status" : comment_resp_status,
"comment_data_error" : 0
}
offset = 0
while True:
url = comment_base_begin + cur_camp + comment_base_end + str(offset)
async with session.get(url, proxy = PROXY) as resp:
resp_status = resp.status
comment_data["comment_resp_status"].append(resp.status)
if resp_status != 200:
break
else:
try:
comment_content = await resp.text()
comment_json = json.loads(comment_content)
#add to list of comment ids
curr_comment_ids = list(findkeys(comment_json, "comment_id"))
#add this b/c website has bugs, has infinite loops
#if duplicates -> only get unique and return
if curr_comment_ids[0] in comment_ids:
for item in comment_json['references']['contents']:
#unique
if item['comment']['comment_id'] not in comment_ids:
comment_list.append(item.copy())
else:
return comment_data
comment_ids += curr_comment_ids
comment_list += comment_json['references']['contents']
#increase offset
if(comment_json['meta']['has_next']):
offset += 20
else:
break
except:
comment_data['comment_data_error'] = 1
break
print("Response comments for %s" % cur_camp)
return comment_data
#get donations
async def getDonors(cur_camp, session):
print("Starting donors for %s" % cur_camp)
donor_base_begin = "https://gateway.gofundme.com/web-gateway/v1/feed/"
donor_base_end = "/donations?limit=100&offset="
donor_list = []
donor_resp_status = []
donor_data = {
"donor_list" : donor_list,
"donor_resp_status" : donor_resp_status,
"donor_reached_max" : 0,
"donor_data_error" : 0
}
offset = 0
while True:
# offset greater than 1000
if offset >= 1000:
donor_data['donor_reached_max'] = 1
break
url = donor_base_begin + cur_camp + donor_base_end + str(offset) + "&sort=recent"
async with session.get(url, proxy = PROXY) as resp:
resp_status = resp.status
donor_data['donor_resp_status'].append(resp.status)
if resp_status != 200:
break
else:
try:
donor_content = await resp.text()
donor_json = json.loads(donor_content)
donor_list += donor_json['references']['donations']
#increase offset
if(donor_json['meta']['has_next']):
offset += 100
else:
break
except:
donor_data['donor_data_error'] = 1
break
print("Response donors for %s" % cur_camp)
return donor_data
#function to
async def getURL(url):
print("Starting %s" % url)
await asyncio.sleep(1)
camp_data = {
"scrape" : {
"url" : url,
"resp_status" : None,
"date_scrape" : None,
"cat" : None,
"target_cat" : None,
"activity_status" : None,
"country" : None
},
"feed" : None,
"donor" : None,
"comment" : None,
"update" : None
}
#------------------
# Change so the same session is used for all simultaneous tasks
#https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
#------------------
connector=aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(connector = connector) as session:
async with session.get(url, proxy = PROXY) as resp:
print("Response %s" % url)
resp_status = resp.status
resp_headers = resp.headers
try:
date_scrape = resp_headers['Date']
except:
date_scrape = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + 'EST'
if resp_status != 200:
camp_data['scrape']['resp_status'] = resp_status
camp_data['scrape']['date_scrape'] = date_scrape
return camp_data
else:
camp_data['scrape']['resp_status'] = resp_status
camp_data['scrape']['date_scrape'] = date_scrape
content = await resp.text()
camp_json = json.loads(re.findall(r'window\.initialState = ({.*?});', content)[0])
feed = camp_json['feed']
camp_data['scrape']['cat'] = feed['campaign']['category_id']
camp_data['scrape']['activity_status'] = feed['campaign']['state']
camp_data['scrape']['country'] = feed['campaign']['location']['country']
if feed['campaign']['category_id'] not in [11]:
camp_data['scrape']['target_cat'] = 0
return camp_data
if feed['campaign']['state'] != "active":
camp_data['scrape']['target_cat'] = 1
return camp_data
if feed['campaign']['location']['country'] != "US":
camp_data['scrape']['target_cat'] = 1
return camp_data
else:
camp_data['scrape']['target_cat'] = 1
camp_data['feed'] = feed
cur_camp = feed['campaign']['url']
#donations
camp_data['donor'] = await getDonors(cur_camp, session)
#don_task = loop.create_task(getDonors(cur_camp, session))
#camp_data['donor'] = await don_task
#comments
camp_data['comment'] = await getComments(cur_camp, session)
#com_task = loop.create_task(getComments(cur_camp, session))
#camp_data['comment'] = await com_task
#updates
camp_data['update'] = await getUpdates(cur_camp, session)
#up_task = loop.create_task(getUpdates(cur_camp, session))
#camp_data['update'] = await up_task
return camp_data
#function to limit number of simultaneous tasks
def limited_as_completed(coros, limit):
"""
Run the coroutines (or futures) supplied in the
iterable coros, ensuring that there are at most
limit coroutines running at any time.
Return an iterator whose values, when waited for,
are Future instances containing the results of
the coroutines.
Results may be provided in any order, as they
become available.
Courtesy of: https://github.com/andybalaam/asyncioplus
"""
futures = [
asyncio.ensure_future(c)
for c in islice(coros, 0, limit)
]
async def first_to_finish():
while True:
await asyncio.sleep(0)
for f in futures:
if f.done():
futures.remove(f)
try:
newf = next(coros)
futures.append(
asyncio.ensure_future(newf))
except StopIteration as e:
pass
return f.result()
while len(futures) > 0:
yield first_to_finish()
#function to await tasks and add to output data when complete
async def save_when_done(tasks, data):
for res in limited_as_completed(tasks, 100):
#for res in tasks:
r = await res
data.append(r)
feed = pd.DataFrame(i['feed']['campaign'] for i in data if i['feed'] is not None)
if len(feed) >= 100000:
print("--- Hit sample size ---")
raise Exception
#load data
print("Loading campaigns")
data_filename = "../data/sitemaps/sitemaps_csv/sitemaps_" + str(input_index) + ".csv"
campaigns = pd.read_csv(data_filename)
urls = campaigns.iloc[:,1].to_list()
#split data into bits of 1000
#n = 1000 #chunk row size
#url_list = [urls[i:i + n] for i in range(0, len(urls), n)]
#define empty list to store data
data = []
#create generator of coroutines
#coros = (getURL(url) for url in urls)
coros = (getURL(url) for url in urls)
#create and run event loop
print("Starting loop")
start_time = time.time()
loop = asyncio.get_event_loop()
try:
#loop.run_until_complete(save_when_done(coros, data))
task = loop.create_task(save_when_done(coros, data))
loop.run_until_complete(task)
loop.stop()
loop.close()
except Exception:
pending = asyncio.Task.all_tasks()
[task.cancel() for task in pending]
#tasks.cancel()
loop.stop()
finally:
loop.close()
#save data
filename = '../data/scraping/gfm_data_' + str(input_index) + '.json'
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
print("done")
time_end = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("--- %s seconds ---" % (time.time() - start_time))
| 32.128415 | 96 | 0.558296 |
acf2a54987947e5170fd4b56220c871df6b1761a | 419 | py | Python | Part_3_advanced/m14_metaclass/register_cls/example_1/example_system/bike.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m14_metaclass/register_cls/example_1/example_system/bike.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m14_metaclass/register_cls/example_1/example_system/bike.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from example_system.serializable import Serializable
from example_system.serializable_registry import SerializableRegistry
class Bike(Serializable):
def __init__(self, brand: str, model: str) -> None:
super().__init__(brand, model)
self.brand = brand
self.model = model
def __str__(self) -> str:
return f"Bike: {self.brand} {self.model}"
SerializableRegistry.register(Bike)
| 26.1875 | 69 | 0.711217 |
acf2a5977fbde6fa8b12b9f6e237741f61030720 | 1,407 | py | Python | ejercicio_fichero/ejercicio_fichero2/fichero2.py | Ironwilly/python | f6d42c685b4026b018089edb4ae8cc0ca9614e86 | [
"CC0-1.0"
] | null | null | null | ejercicio_fichero/ejercicio_fichero2/fichero2.py | Ironwilly/python | f6d42c685b4026b018089edb4ae8cc0ca9614e86 | [
"CC0-1.0"
] | null | null | null | ejercicio_fichero/ejercicio_fichero2/fichero2.py | Ironwilly/python | f6d42c685b4026b018089edb4ae8cc0ca9614e86 | [
"CC0-1.0"
] | null | null | null | #1. Realizar una aplicación que sea capaz de transformar un fichero
# CSV en un fichero .sql con sentencias insert into.
#- El nombre de la tabla sql será el nombre del fichero sin extensión.
#- Las columnas de la tabla vendrán determinadas por la fila de encabezado.
#- Como entrada, el programa recibirá un fichero csv con varias líneas, y como
# salida, sea obtendrá un fichero con extensión .sql con tantos
# INSERT INTO.... como línea tuviera el CSV.
#2. Realiza la herramienta inversa a la anterior, que reciba un
# fichero con una serie de sentencias INSERT INTO y lo transforme en un fichero .csv
import csv
import sqlite3
productos = [
("Cocacola", "1.10", "20"),
("Fanta", "1.05", "30"),
("Aquarius", "1.15", "15")
]
with open("productos.csv","w", newline="\n") as csvfile:
campos = ["nombre", "precio", "cantidad"]
writer = csv.DictWriter(csvfile, fieldnames= campos)
writer.writeheader()
for nombre, precio, cantidad in productos:
writer.writerow({
"nombre": nombre, "precio": precio, "cantidad": cantidad
})
con = sqlite3.connect('mydatabase.db')
with open('productos.csv','r') as f:
reader = csv.reader(f)
columns = next(reader)
cursor = con.cursor()
for data in reader:
cursor.execute("CREATE TABLE productos (nombre VARCHAR(255), precio INT(), cantidad INT()")
cursor.commit()
| 29.93617 | 99 | 0.67022 |
acf2a6846e3784568323e65330a1bb6279123adf | 854 | py | Python | chainer/functions/math/sign.py | yuhonghong66/chainer | 15d475f54fc39587abd7264808c5e4b33782df9e | [
"MIT"
] | null | null | null | chainer/functions/math/sign.py | yuhonghong66/chainer | 15d475f54fc39587abd7264808c5e4b33782df9e | [
"MIT"
] | 2 | 2019-05-14T15:45:01.000Z | 2019-05-15T07:12:49.000Z | chainer/functions/math/sign.py | yuhonghong66/chainer | 15d475f54fc39587abd7264808c5e4b33782df9e | [
"MIT"
] | null | null | null | import chainer
from chainer import backend
from chainer import utils
def sign(x):
"""Elementwise sign function.
For a given input :math:`x`, this function returns :math:`sgn(x)`
defined as
.. math::
sgn(x) = \\left \\{ \\begin{array}{cc}
-1 & {\\rm if~x < 0} \\\\
0 & {\\rm if~x = 0} \\\\
1 & {\\rm if~x > 0} \\\\
\\end{array} \\right.
.. note::
The gradient of this function is ``None`` everywhere and therefore
unchains the computational graph.
Args:
x (~chainer.Variable): Input variable for which the sign is computed.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.sign(x)))
| 23.722222 | 77 | 0.584309 |
acf2a6cd059354c8eb540bc079fb55adcf42980b | 650 | py | Python | phonenumbers/shortdata/region_PW.py | igushev/fase_lib | 182c626193193b196041b18b9974b5b2cbf15c67 | [
"MIT"
] | 7 | 2019-05-20T09:57:02.000Z | 2020-01-10T05:30:48.000Z | phonenumbers/shortdata/region_PW.py | igushev/fase_lib | 182c626193193b196041b18b9974b5b2cbf15c67 | [
"MIT"
] | null | null | null | phonenumbers/shortdata/region_PW.py | igushev/fase_lib | 182c626193193b196041b18b9974b5b2cbf15c67 | [
"MIT"
] | null | null | null | """Auto-generated file, do not edit by hand. PW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PW = PhoneMetadata(id='PW', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='911', possible_number_pattern='\\d{3}', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='911', possible_number_pattern='\\d{3}', example_number='911', possible_length=(3,)),
short_data=True)
| 72.222222 | 140 | 0.775385 |
acf2a74433b890728b82433b64ef68126fa44707 | 1,540 | py | Python | maya-listen.py | meetar/nunchuck-to-maya | 227381d1a8e94f9452942b03b53ee94109a443e6 | [
"MIT"
] | null | null | null | maya-listen.py | meetar/nunchuck-to-maya | 227381d1a8e94f9452942b03b53ee94109a443e6 | [
"MIT"
] | null | null | null | maya-listen.py | meetar/nunchuck-to-maya | 227381d1a8e94f9452942b03b53ee94109a443e6 | [
"MIT"
] | null | null | null | ### Wii Nunchuk to Maya proof of concept
### http://zoomy.net/2010/04/11/wii-nunchuk-to-maya/
###
sys.path.append( "C:\Program Files\Common Files\Python\Python25\Lib\site-packages\win32")
sys.path.append( "C:\Program Files\Common Files\Python\Python25\Lib\site-packages\win32\lib")
import time, sys, serial, win32file, win32con, re
import maya.cmds as cmds
import maya.mel as mel
try: ser
except: 1
else: ser.close()
# open serial connection - adjust settings for your input device
ser = serial.Serial(
port='COM3',
baudrate=19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
sphere1 = polySphere(n="sphere1")[0]
cmds.setAttr(sphere1+".scaleY", 2)
flare=nonLinear(type='flare')[0]
cmds.setAttr(flare+".startFlareX",1.5)
cmds.setAttr(flare+".startFlareZ",1.5)
cmds.setAttr(flare+"Handle.visibility",0)
# progress bar, enabling "Esc"
gMainProgressBar = mel.eval('$tmp = $gMainProgressBar');
cmds.progressBar( gMainProgressBar,
edit=True,
beginProgress=True,
isInterruptable=True,
status='Reading serial data...' )
while 1:
data = ser.readline()
data = data.split(';')
data[1] = data[1][:-2]
print data
cmds.setAttr(sphere1+".rotateZ", (float(data[0])*-1)-45)
cmds.setAttr(sphere1+".rotateX", float(data[1])+90)
refresh() # update the viewscreen
if cmds.progressBar(gMainProgressBar, query=True, isCancelled=True ) :
delete(whichObj)
break
ser.close()
cmds.progressBar(gMainProgressBar, edit=True, endProgress=True)
### | 28 | 93 | 0.70974 |
acf2a7464c07df9e9556a585b1548a1bca7b76a5 | 8,893 | py | Python | inference.py | LightTwist/RobustVideoMatting | 79eb143fef3a4c58b4857c1a5a927a318f528093 | [
"Apache-2.0"
] | 11 | 2021-08-31T06:20:17.000Z | 2021-11-08T13:14:29.000Z | inference.py | umit-ml/RobustVideoMatting | 03096f23de1831b8181dadd5e165561c2759f9eb | [
"Apache-2.0"
] | 1 | 2021-09-15T10:45:48.000Z | 2021-09-15T10:45:48.000Z | inference.py | umit-ml/RobustVideoMatting | 03096f23de1831b8181dadd5e165561c2759f9eb | [
"Apache-2.0"
] | 21 | 2021-08-31T00:55:25.000Z | 2021-09-16T09:17:31.000Z | """
python inference.py \
--variant mobilenetv3 \
--checkpoint "CHECKPOINT" \
--device cuda \
--input-source "input.mp4" \
--output-type video \
--output-composition "composition.mp4" \
--output-alpha "alpha.mp4" \
--output-foreground "foreground.mp4" \
--output-video-mbps 4 \
--seq-chunk 1
"""
import torch
import os
from torch.utils.data import DataLoader
from torchvision import transforms
from typing import Optional, Tuple
from tqdm.auto import tqdm
from inference_utils import VideoReader, VideoWriter, ImageSequenceReader, ImageSequenceWriter
def convert_video(model,
input_source: str,
input_resize: Optional[Tuple[int, int]] = None,
downsample_ratio: Optional[float] = None,
output_type: str = 'video',
output_composition: Optional[str] = None,
output_alpha: Optional[str] = None,
output_foreground: Optional[str] = None,
output_video_mbps: Optional[float] = None,
seq_chunk: int = 1,
num_workers: int = 0,
progress: bool = True,
device: Optional[str] = None,
dtype: Optional[torch.dtype] = None):
"""
Args:
input_source:A video file, or an image sequence directory. Images must be sorted in accending order, support png and jpg.
input_resize: If provided, the input are first resized to (w, h).
downsample_ratio: The model's downsample_ratio hyperparameter. If not provided, model automatically set one.
output_type: Options: ["video", "png_sequence"].
output_composition:
The composition output path. File path if output_type == 'video'. Directory path if output_type == 'png_sequence'.
If output_type == 'video', the composition has green screen background.
If output_type == 'png_sequence'. the composition is RGBA png images.
output_alpha: The alpha output from the model.
output_foreground: The foreground output from the model.
seq_chunk: Number of frames to process at once. Increase it for better parallelism.
num_workers: PyTorch's DataLoader workers. Only use >0 for image input.
progress: Show progress bar.
device: Only need to manually provide if model is a TorchScript freezed model.
dtype: Only need to manually provide if model is a TorchScript freezed model.
"""
assert downsample_ratio is None or (downsample_ratio > 0 and downsample_ratio <= 1), 'Downsample ratio must be between 0 (exclusive) and 1 (inclusive).'
assert any([output_composition, output_alpha, output_foreground]), 'Must provide at least one output.'
assert output_type in ['video', 'png_sequence'], 'Only support "video" and "png_sequence" output modes.'
assert seq_chunk >= 1, 'Sequence chunk must be >= 1'
assert num_workers >= 0, 'Number of workers must be >= 0'
assert output_video_mbps == None or output_type == 'video', 'Mbps is not available for png_sequence output.'
# Initialize transform
if input_resize is not None:
transform = transforms.Compose([
transforms.Resize(input_resize[::-1]),
transforms.ToTensor()
])
else:
transform = transforms.ToTensor()
# Initialize reader
if os.path.isfile(input_source):
source = VideoReader(input_source, transform)
else:
source = ImageSequenceReader(input_source, transform)
reader = DataLoader(source, batch_size=seq_chunk, pin_memory=True, num_workers=num_workers)
# Initialize writers
if output_type == 'video':
frame_rate = source.frame_rate if isinstance(source, VideoReader) else 30
output_video_mbps = 1 if output_video_mbps is None else output_video_mbps
if output_composition is not None:
writer_com = VideoWriter(
path=output_composition,
frame_rate=frame_rate,
bit_rate=int(output_video_mbps * 1000000))
if output_alpha is not None:
writer_pha = VideoWriter(
path=output_alpha,
frame_rate=frame_rate,
bit_rate=int(output_video_mbps * 1000000))
if output_foreground is not None:
writer_fgr = VideoWriter(
path=output_foreground,
frame_rate=frame_rate,
bit_rate=int(output_video_mbps * 1000000))
else:
if output_composition is not None:
writer_com = ImageSequenceWriter(output_composition, 'png')
if output_alpha is not None:
writer_pha = VideoWriter(output_alpha, 'png')
if output_foreground is not None:
writer_fgr = VideoWriter(output_foreground, 'png')
# Inference
model = model.eval()
if device is None or dtype is None:
param = next(model.parameters())
dtype = param.dtype
device = param.device
if (output_composition is not None) and (output_type == 'video'):
bgr = torch.tensor([120, 255, 155], device=device, dtype=dtype).div(255).view(1, 1, 3, 1, 1)
try:
with torch.no_grad():
bar = tqdm(total=len(source), disable=not progress, dynamic_ncols=True)
rec = [None] * 4
for src in reader:
if downsample_ratio is None:
downsample_ratio = auto_downsample_ratio(*src.shape[2:])
src = src.to(device, dtype, non_blocking=True).unsqueeze(0) # [B, T, C, H, W]
fgr, pha, *rec = model(src, *rec, downsample_ratio)
if output_foreground is not None:
writer_fgr.write(fgr[0])
if output_alpha is not None:
writer_pha.write(pha[0])
if output_composition is not None:
if output_type == 'video':
com = fgr * pha + bgr * (1 - pha)
else:
fgr = fgr * pha.gt(0)
com = torch.cat([fgr, pha], dim=-3)
writer_com.write(com[0])
bar.update(src.size(1))
finally:
# Clean up
if output_composition is not None:
writer_com.close()
if output_alpha is not None:
writer_pha.close()
if output_foreground is not None:
writer_fgr.close()
def auto_downsample_ratio(h, w):
"""
Automatically find a downsample ratio so that the largest side of the resolution be 512px.
"""
return min(512 / max(h, w), 1)
class Converter:
def __init__(self, variant: str, checkpoint: str, device: str):
self.model = MattingNetwork(variant).eval().to(device)
self.model.load_state_dict(torch.load(checkpoint, map_location=device))
self.model = torch.jit.script(self.model)
self.model = torch.jit.freeze(self.model)
self.device = device
def convert(self, *args, **kwargs):
convert_video(self.model, device=self.device, dtype=torch.float32, *args, **kwargs)
if __name__ == '__main__':
import argparse
from model import MattingNetwork
parser = argparse.ArgumentParser()
parser.add_argument('--variant', type=str, required=True, choices=['mobilenetv3', 'resnet50'])
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('--device', type=str, required=True)
parser.add_argument('--input-source', type=str, required=True)
parser.add_argument('--input-resize', type=int, default=None, nargs=2)
parser.add_argument('--downsample-ratio', type=float)
parser.add_argument('--output-composition', type=str)
parser.add_argument('--output-alpha', type=str)
parser.add_argument('--output-foreground', type=str)
parser.add_argument('--output-type', type=str, required=True, choices=['video', 'png_sequence'])
parser.add_argument('--output-video-mbps', type=int, default=1)
parser.add_argument('--seq-chunk', type=int, default=1)
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--disable-progress', action='store_true')
args = parser.parse_args()
converter = Converter(args.variant, args.checkpoint, args.device)
converter.convert(
input_source=args.input_source,
input_resize=args.input_resize,
downsample_ratio=args.downsample_ratio,
output_type=args.output_type,
output_composition=args.output_composition,
output_alpha=args.output_alpha,
output_foreground=args.output_foreground,
output_video_mbps=args.output_video_mbps,
seq_chunk=args.seq_chunk,
num_workers=args.num_workers,
progress=not args.disable_progress
)
| 42.754808 | 156 | 0.63252 |
acf2a858098d4ea2a6765f27b5804d3733a903e7 | 2,433 | py | Python | gas_mileage/test_listTrips.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | 1 | 2021-03-22T20:45:06.000Z | 2021-03-22T20:45:06.000Z | gas_mileage/test_listTrips.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | gas_mileage/test_listTrips.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | """
Do Not Edit this file. You may and are encouraged to look at it for reference.
"""
import unittest
import re
import gas_mileage
class TestListTrips(unittest.TestCase):
def verifyLines(self, notebook, mpg):
from gas_mileage import listTrips
trips = listTrips(notebook)
self.assertTrue(type(trips) is list, 'listTrips did not return a list')
self.assertTrue(len(trips) == len(notebook))
for i in range(len(trips)):
nb = notebook[i]
matchdate = nb['date']
matchmiles = str(nb['miles']) + ' miles'
matchgallons = str(nb['gallons']) + ' gallons'
matchmpg = str(mpg[i]) + ' mpg'
trip = trips[i]
self.assertTrue(matchdate.lower() in trip.lower(), '"' + nb['date'] + '" not found in "' + trip + '"')
self.assertTrue(matchmiles.lower() in trip.lower(),
'"' + str(nb['miles']) + ' miles" not found in "' + trip + '"')
self.assertTrue(matchgallons.lower() in trip.lower(),
'"' + str(nb['gallons']) + ' gallons" not found in "' + trip + '"')
self.assertTrue(matchmpg.lower() in trip.lower(), '"' + str(mpg[i]) + ' MPG" not found in "' + trip + '"')
def test001_listTripsExists(self):
self.assertTrue('listTrips' in dir(gas_mileage),
'Function "listTrips" is not defined, check your spelling')
return
def test002_listTripsEmptyNotebook(self):
from gas_mileage import listTrips
notebook = []
lines = listTrips(notebook)
self.assertTrue(type(lines) is list, 'listTrips did not return a list')
self.assertTrue(len(lines) == 0, 'There were no trips but you returned lines')
def test003_listTrips(self):
notebook = [
{'date': '01/01/2017', 'miles': 300.0, 'gallons': 10.0},
{'date': '01/05/2017', 'miles': 182.0, 'gallons': 7.0},
{'date': '01/15/2017', 'miles': 240.0, 'gallons': 9.6}
]
mpg = [30.0, 26.0, 25.0]
self.verifyLines(notebook, mpg)
def test004_listTrips(self):
notebook = [
{'date': 'Jan 01', 'miles': 45.0, 'gallons': 1.5},
{'date': 'Jan 05', 'miles': 405.0, 'gallons': 15.0}
]
mpg = [30.0, 27.0]
self.verifyLines(notebook, mpg)
if __name__ == '__main__':
unittest.main()
| 34.267606 | 118 | 0.55076 |
acf2a858eb38b91b74b8f9e8e93d85e945733fb8 | 36,125 | py | Python | haystack/backends/whoosh_backend.py | puzzlet/django-haystack | 137e2b95334861aed8ecf41758b4c825144b9adf | [
"BSD-3-Clause"
] | 1 | 2021-05-07T11:34:52.000Z | 2021-05-07T11:34:52.000Z | haystack/backends/whoosh_backend.py | puzzlet/django-haystack | 137e2b95334861aed8ecf41758b4c825144b9adf | [
"BSD-3-Clause"
] | null | null | null | haystack/backends/whoosh_backend.py | puzzlet/django-haystack | 137e2b95334861aed8ecf41758b4c825144b9adf | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import re
import shutil
import threading
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datetime_safe import datetime
from django.utils.encoding import force_str
from haystack.backends import (
BaseEngine,
BaseSearchBackend,
BaseSearchQuery,
EmptyResults,
log_query,
)
from haystack.constants import (
DJANGO_CT,
DJANGO_ID,
FUZZY_WHOOSH_MAX_EDITS,
FUZZY_WHOOSH_MIN_PREFIX,
ID,
)
from haystack.exceptions import MissingDependency, SearchBackendError, SkipDocument
from haystack.inputs import Clean, Exact, PythonData, Raw
from haystack.models import SearchResult
from haystack.utils import get_identifier, get_model_ct
from haystack.utils import log as logging
from haystack.utils.app_loading import haystack_get_model
try:
import whoosh
except ImportError:
raise MissingDependency(
"The 'whoosh' backend requires the installation of 'Whoosh'. Please refer to the documentation."
)
# Handle minimum requirement.
if not hasattr(whoosh, "__version__") or whoosh.__version__ < (2, 5, 0):
raise MissingDependency("The 'whoosh' backend requires version 2.5.0 or greater.")
# Bubble up the correct error.
from whoosh import index
from whoosh.analysis import StemmingAnalyzer
from whoosh.fields import BOOLEAN, DATETIME
from whoosh.fields import ID as WHOOSH_ID
from whoosh.fields import IDLIST, KEYWORD, NGRAM, NGRAMWORDS, NUMERIC, TEXT, Schema
from whoosh.filedb.filestore import FileStorage, RamStorage
from whoosh.highlight import ContextFragmenter, HtmlFormatter
from whoosh.highlight import highlight as whoosh_highlight
from whoosh.qparser import FuzzyTermPlugin, QueryParser
from whoosh.searching import ResultsPage
from whoosh.writing import AsyncWriter
DATETIME_REGEX = re.compile(
r"^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d{3,6}Z?)?$"
)
LOCALS = threading.local()
LOCALS.RAM_STORE = None
class WhooshHtmlFormatter(HtmlFormatter):
"""
This is a HtmlFormatter simpler than the whoosh.HtmlFormatter.
We use it to have consistent results across backends. Specifically,
Solr, Xapian and Elasticsearch are using this formatting.
"""
template = "<%(tag)s>%(t)s</%(tag)s>"
class WhooshSearchBackend(BaseSearchBackend):
# Word reserved by Whoosh for special use.
RESERVED_WORDS = ("AND", "NOT", "OR", "TO")
# Characters reserved by Whoosh for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
"\\",
"+",
"-",
"&&",
"||",
"!",
"(",
")",
"{",
"}",
"[",
"]",
"^",
'"',
"~",
"*",
"?",
":",
".",
)
def __init__(self, connection_alias, **connection_options):
super().__init__(connection_alias, **connection_options)
self.setup_complete = False
self.use_file_storage = True
self.post_limit = getattr(connection_options, "POST_LIMIT", 128 * 1024 * 1024)
self.path = connection_options.get("PATH")
if connection_options.get("STORAGE", "file") != "file":
self.use_file_storage = False
if self.use_file_storage and not self.path:
raise ImproperlyConfigured(
"You must specify a 'PATH' in your settings for connection '%s'."
% connection_alias
)
self.log = logging.getLogger("haystack")
def setup(self):
"""
Defers loading until needed.
"""
from haystack import connections
new_index = False
# Make sure the index is there.
if self.use_file_storage and not os.path.exists(self.path):
os.makedirs(self.path)
new_index = True
if self.use_file_storage and not os.access(self.path, os.W_OK):
raise IOError(
"The path to your Whoosh index '%s' is not writable for the current user/group."
% self.path
)
if self.use_file_storage:
self.storage = FileStorage(self.path)
else:
global LOCALS
if getattr(LOCALS, "RAM_STORE", None) is None:
LOCALS.RAM_STORE = RamStorage()
self.storage = LOCALS.RAM_STORE
self.content_field_name, self.schema = self.build_schema(
connections[self.connection_alias].get_unified_index().all_searchfields()
)
self.parser = QueryParser(self.content_field_name, schema=self.schema)
self.parser.add_plugins([FuzzyTermPlugin])
if new_index is True:
self.index = self.storage.create_index(self.schema)
else:
try:
self.index = self.storage.open_index(schema=self.schema)
except index.EmptyIndexError:
self.index = self.storage.create_index(self.schema)
self.setup_complete = True
def build_schema(self, fields):
schema_fields = {
ID: WHOOSH_ID(stored=True, unique=True),
DJANGO_CT: WHOOSH_ID(stored=True),
DJANGO_ID: WHOOSH_ID(stored=True),
}
# Grab the number of keys that are hard-coded into Haystack.
# We'll use this to (possibly) fail slightly more gracefully later.
initial_key_count = len(schema_fields)
content_field_name = ""
for _, field_class in fields.items():
if field_class.is_multivalued:
if field_class.indexed is False:
schema_fields[field_class.index_fieldname] = IDLIST(
stored=True, field_boost=field_class.boost
)
else:
schema_fields[field_class.index_fieldname] = KEYWORD(
stored=True,
commas=True,
scorable=True,
field_boost=field_class.boost,
)
elif field_class.field_type in ["date", "datetime"]:
schema_fields[field_class.index_fieldname] = DATETIME(
stored=field_class.stored, sortable=True
)
elif field_class.field_type == "integer":
schema_fields[field_class.index_fieldname] = NUMERIC(
stored=field_class.stored,
numtype=int,
field_boost=field_class.boost,
)
elif field_class.field_type == "float":
schema_fields[field_class.index_fieldname] = NUMERIC(
stored=field_class.stored,
numtype=float,
field_boost=field_class.boost,
)
elif field_class.field_type == "boolean":
# Field boost isn't supported on BOOLEAN as of 1.8.2.
schema_fields[field_class.index_fieldname] = BOOLEAN(
stored=field_class.stored
)
elif field_class.field_type == "ngram":
schema_fields[field_class.index_fieldname] = NGRAM(
minsize=3,
maxsize=15,
stored=field_class.stored,
field_boost=field_class.boost,
)
elif field_class.field_type == "edge_ngram":
schema_fields[field_class.index_fieldname] = NGRAMWORDS(
minsize=2,
maxsize=15,
at="start",
stored=field_class.stored,
field_boost=field_class.boost,
)
else:
schema_fields[field_class.index_fieldname] = TEXT(
stored=True,
analyzer=StemmingAnalyzer(),
field_boost=field_class.boost,
sortable=True,
)
if field_class.document is True:
content_field_name = field_class.index_fieldname
schema_fields[field_class.index_fieldname].spelling = True
# Fail more gracefully than relying on the backend to die if no fields
# are found.
if len(schema_fields) <= initial_key_count:
raise SearchBackendError(
"No fields were found in any search_indexes. Please correct this before attempting to search."
)
return (content_field_name, Schema(**schema_fields))
def update(self, index, iterable, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
writer = AsyncWriter(self.index)
for obj in iterable:
try:
doc = index.full_prepare(obj)
except SkipDocument:
self.log.debug("Indexing for object `%s` skipped", obj)
else:
# Really make sure it's unicode, because Whoosh won't have it any
# other way.
for key in doc:
doc[key] = self._from_python(doc[key])
# Document boosts aren't supported in Whoosh 2.5.0+.
if "boost" in doc:
del doc["boost"]
try:
writer.update_document(**doc)
except Exception as e:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(
"%s while preparing object for update" % e.__class__.__name__,
exc_info=True,
extra={"data": {"index": index, "object": get_identifier(obj)}},
)
if len(iterable) > 0:
# For now, commit no matter what, as we run into locking issues otherwise.
writer.commit()
def remove(self, obj_or_string, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
whoosh_id = get_identifier(obj_or_string)
try:
self.index.delete_by_query(q=self.parser.parse('%s:"%s"' % (ID, whoosh_id)))
except Exception as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to remove document '%s' from Whoosh: %s",
whoosh_id,
e,
exc_info=True,
)
def clear(self, models=None, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.delete_index()
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
self.index.delete_by_query(
q=self.parser.parse(" OR ".join(models_to_delete))
)
except Exception as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error(
"Failed to clear Whoosh index of models '%s': %s",
",".join(models_to_delete),
e,
exc_info=True,
)
else:
self.log.error("Failed to clear Whoosh index: %s", e, exc_info=True)
def delete_index(self):
# Per the Whoosh mailing list, if wiping out everything from the index,
# it's much more efficient to simply delete the index files.
if self.use_file_storage and os.path.exists(self.path):
shutil.rmtree(self.path)
elif not self.use_file_storage:
self.storage.clean()
# Recreate everything.
self.setup()
def optimize(self):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
self.index.optimize()
def calculate_page(self, start_offset=0, end_offset=None):
# Prevent against Whoosh throwing an error. Requires an end_offset
# greater than 0.
if end_offset is not None and end_offset <= 0:
end_offset = 1
# Determine the page.
page_num = 0
if end_offset is None:
end_offset = 1000000
if start_offset is None:
start_offset = 0
page_length = end_offset - start_offset
if page_length and page_length > 0:
page_num = int(start_offset / page_length)
# Increment because Whoosh uses 1-based page numbers.
page_num += 1
return page_num, page_length
@log_query
def search(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
if not self.setup_complete:
self.setup()
# A zero length query should return no results.
if len(query_string) == 0:
return {"results": [], "hits": 0}
query_string = force_str(query_string)
# A one-character query (non-wildcard) gets nabbed by a stopwords
# filter and should yield zero results.
if len(query_string) <= 1 and query_string != "*":
return {"results": [], "hits": 0}
reverse = False
if sort_by is not None:
# Determine if we need to reverse the results and if Whoosh can
# handle what it's being asked to sort by. Reversing is an
# all-or-nothing action, unfortunately.
sort_by_list = []
reverse_counter = 0
for order_by in sort_by:
if order_by.startswith("-"):
reverse_counter += 1
if reverse_counter and reverse_counter != len(sort_by):
raise SearchBackendError(
"Whoosh requires all order_by fields"
" to use the same sort direction"
)
for order_by in sort_by:
if order_by.startswith("-"):
sort_by_list.append(order_by[1:])
if len(sort_by_list) == 1:
reverse = True
else:
sort_by_list.append(order_by)
if len(sort_by_list) == 1:
reverse = False
sort_by = sort_by_list
if facets is not None:
warnings.warn("Whoosh does not handle faceting.", Warning, stacklevel=2)
if date_facets is not None:
warnings.warn(
"Whoosh does not handle date faceting.", Warning, stacklevel=2
)
if query_facets is not None:
warnings.warn(
"Whoosh does not handle query faceting.", Warning, stacklevel=2
)
narrowed_results = None
self.index = self.index.refresh()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add(
" OR ".join(["%s:%s" % (DJANGO_CT, rm) for rm in model_choices])
)
narrow_searcher = None
if narrow_queries is not None:
# Potentially expensive? I don't see another way to do it in Whoosh...
narrow_searcher = self.index.searcher()
for nq in narrow_queries:
recent_narrowed_results = narrow_searcher.search(
self.parser.parse(force_str(nq)), limit=None
)
if len(recent_narrowed_results) <= 0:
return {"results": [], "hits": 0}
if narrowed_results:
narrowed_results.filter(recent_narrowed_results)
else:
narrowed_results = recent_narrowed_results
self.index = self.index.refresh()
if self.index.doc_count():
searcher = self.index.searcher()
parsed_query = self.parser.parse(query_string)
# In the event of an invalid/stopworded query, recover gracefully.
if parsed_query is None:
return {"results": [], "hits": 0}
page_num, page_length = self.calculate_page(start_offset, end_offset)
search_kwargs = {
"pagelen": page_length,
"sortedby": sort_by,
"reverse": reverse,
}
# Handle the case where the results have been narrowed.
if narrowed_results is not None:
search_kwargs["filter"] = narrowed_results
try:
raw_page = searcher.search_page(parsed_query, page_num, **search_kwargs)
except ValueError:
if not self.silently_fail:
raise
return {"results": [], "hits": 0, "spelling_suggestion": None}
# Because as of Whoosh 2.5.1, it will return the wrong page of
# results if you request something too high. :(
if raw_page.pagenum < page_num:
return {"results": [], "hits": 0, "spelling_suggestion": None}
results = self._process_results(
raw_page,
highlight=highlight,
query_string=query_string,
spelling_query=spelling_query,
result_class=result_class,
)
searcher.close()
if hasattr(narrow_searcher, "close"):
narrow_searcher.close()
return results
else:
if self.include_spelling:
if spelling_query:
spelling_suggestion = self.create_spelling_suggestion(
spelling_query
)
else:
spelling_suggestion = self.create_spelling_suggestion(query_string)
else:
spelling_suggestion = None
return {
"results": [],
"hits": 0,
"spelling_suggestion": spelling_suggestion,
}
def more_like_this(
self,
model_instance,
additional_query_string=None,
start_offset=0,
end_offset=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
if not self.setup_complete:
self.setup()
field_name = self.content_field_name
narrow_queries = set()
narrowed_results = None
self.index = self.index.refresh()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add(
" OR ".join(["%s:%s" % (DJANGO_CT, rm) for rm in model_choices])
)
if additional_query_string and additional_query_string != "*":
narrow_queries.add(additional_query_string)
narrow_searcher = None
if narrow_queries is not None:
# Potentially expensive? I don't see another way to do it in Whoosh...
narrow_searcher = self.index.searcher()
for nq in narrow_queries:
recent_narrowed_results = narrow_searcher.search(
self.parser.parse(force_str(nq)), limit=None
)
if len(recent_narrowed_results) <= 0:
return {"results": [], "hits": 0}
if narrowed_results:
narrowed_results.filter(recent_narrowed_results)
else:
narrowed_results = recent_narrowed_results
page_num, page_length = self.calculate_page(start_offset, end_offset)
self.index = self.index.refresh()
raw_results = EmptyResults()
searcher = None
if self.index.doc_count():
query = "%s:%s" % (ID, get_identifier(model_instance))
searcher = self.index.searcher()
parsed_query = self.parser.parse(query)
results = searcher.search(parsed_query)
if len(results):
raw_results = results[0].more_like_this(field_name, top=end_offset)
# Handle the case where the results have been narrowed.
if narrowed_results is not None and hasattr(raw_results, "filter"):
raw_results.filter(narrowed_results)
try:
raw_page = ResultsPage(raw_results, page_num, page_length)
except ValueError:
if not self.silently_fail:
raise
return {"results": [], "hits": 0, "spelling_suggestion": None}
# Because as of Whoosh 2.5.1, it will return the wrong page of
# results if you request something too high. :(
if raw_page.pagenum < page_num:
return {"results": [], "hits": 0, "spelling_suggestion": None}
results = self._process_results(raw_page, result_class=result_class)
if searcher:
searcher.close()
if hasattr(narrow_searcher, "close"):
narrow_searcher.close()
return results
def _process_results(
self,
raw_page,
highlight=False,
query_string="",
spelling_query=None,
result_class=None,
):
from haystack import connections
results = []
# It's important to grab the hits first before slicing. Otherwise, this
# can cause pagination failures.
hits = len(raw_page)
if result_class is None:
result_class = SearchResult
facets = {}
spelling_suggestion = None
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
for doc_offset, raw_result in enumerate(raw_page):
score = raw_page.score(doc_offset) or 0
app_label, model_name = raw_result[DJANGO_CT].split(".")
additional_fields = {}
model = haystack_get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(
index.fields[string_key], "convert"
):
# Special-cased due to the nature of KEYWORD fields.
if index.fields[string_key].is_multivalued:
if value is None or len(value) == 0:
additional_fields[string_key] = []
else:
additional_fields[string_key] = value.split(",")
else:
additional_fields[string_key] = index.fields[
string_key
].convert(value)
else:
additional_fields[string_key] = self._to_python(value)
del additional_fields[DJANGO_CT]
del additional_fields[DJANGO_ID]
if highlight:
sa = StemmingAnalyzer()
formatter = WhooshHtmlFormatter("em")
terms = [token.text for token in sa(query_string)]
whoosh_result = whoosh_highlight(
additional_fields.get(self.content_field_name),
terms,
sa,
ContextFragmenter(),
formatter,
)
additional_fields["highlighted"] = {
self.content_field_name: [whoosh_result]
}
result = result_class(
app_label,
model_name,
raw_result[DJANGO_ID],
score,
**additional_fields
)
results.append(result)
else:
hits -= 1
if self.include_spelling:
if spelling_query:
spelling_suggestion = self.create_spelling_suggestion(spelling_query)
else:
spelling_suggestion = self.create_spelling_suggestion(query_string)
return {
"results": results,
"hits": hits,
"facets": facets,
"spelling_suggestion": spelling_suggestion,
}
def create_spelling_suggestion(self, query_string):
spelling_suggestion = None
reader = self.index.reader()
corrector = reader.corrector(self.content_field_name)
cleaned_query = force_str(query_string)
if not query_string:
return spelling_suggestion
# Clean the string.
for rev_word in self.RESERVED_WORDS:
cleaned_query = cleaned_query.replace(rev_word, "")
for rev_char in self.RESERVED_CHARACTERS:
cleaned_query = cleaned_query.replace(rev_char, "")
# Break it down.
query_words = cleaned_query.split()
suggested_words = []
for word in query_words:
suggestions = corrector.suggest(word, limit=1)
if len(suggestions) > 0:
suggested_words.append(suggestions[0])
spelling_suggestion = " ".join(suggested_words)
return spelling_suggestion
def _from_python(self, value):
"""
Converts Python values to a string for Whoosh.
Code courtesy of pysolr.
"""
if hasattr(value, "strftime"):
if not hasattr(value, "hour"):
value = datetime(value.year, value.month, value.day, 0, 0, 0)
elif isinstance(value, bool):
if value:
value = "true"
else:
value = "false"
elif isinstance(value, (list, tuple)):
value = ",".join([force_str(v) for v in value])
elif isinstance(value, (int, float)):
# Leave it alone.
pass
else:
value = force_str(value)
return value
def _to_python(self, value):
"""
Converts values from Whoosh to native Python values.
A port of the same method in pysolr, as they deal with data the same way.
"""
if value == "true":
return True
elif value == "false":
return False
if value and isinstance(value, str):
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime(
date_values["year"],
date_values["month"],
date_values["day"],
date_values["hour"],
date_values["minute"],
date_values["second"],
)
try:
# Attempt to use json to load the values.
converted_value = json.loads(value)
# Try to handle most built-in types.
if isinstance(
converted_value,
(list, tuple, set, dict, int, float, complex),
):
return converted_value
except Exception:
# If it fails (SyntaxError or its ilk) or we don't trust it,
# continue on.
pass
return value
class WhooshSearchQuery(BaseSearchQuery):
def _convert_datetime(self, date):
if hasattr(date, "hour"):
return force_str(date.strftime("%Y%m%d%H%M%S"))
else:
return force_str(date.strftime("%Y%m%d000000"))
def clean(self, query_fragment):
"""
Provides a mechanism for sanitizing user input before presenting the
value to the backend.
Whoosh 1.X differs here in that you can no longer use a backslash
to escape reserved characters. Instead, the whole word should be
quoted.
"""
words = query_fragment.split()
cleaned_words = []
for word in words:
if word in self.backend.RESERVED_WORDS:
word = word.replace(word, word.lower())
for char in self.backend.RESERVED_CHARACTERS:
if char in word:
word = "'%s'" % word
break
cleaned_words.append(word)
return " ".join(cleaned_words)
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ""
is_datetime = False
if not hasattr(value, "input_type_name"):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, "values_list"):
value = list(value)
if hasattr(value, "strftime"):
is_datetime = True
if isinstance(value, str) and value != " ":
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend._from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == "content":
index_fieldname = ""
else:
index_fieldname = "%s:" % connections[
self._using
].get_unified_index().get_index_fieldname(field)
filter_types = {
"content": "%s",
"contains": "*%s*",
"endswith": "*%s",
"startswith": "%s*",
"exact": "%s",
"gt": "{%s to}",
"gte": "[%s to]",
"lt": "{to %s}",
"lte": "[to %s]",
"fuzzy": "%s~{}/%d".format(FUZZY_WHOOSH_MAX_EDITS),
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in [
"content",
"contains",
"startswith",
"endswith",
"fuzzy",
]:
if value.input_type_name == "exact":
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
if isinstance(prepared_value, str):
possible_values = prepared_value.split(" ")
else:
if is_datetime is True:
prepared_value = self._convert_datetime(prepared_value)
possible_values = [prepared_value]
for possible_value in possible_values:
possible_value_str = self.backend._from_python(possible_value)
if filter_type == "fuzzy":
terms.append(
filter_types[filter_type]
% (
possible_value_str,
min(
FUZZY_WHOOSH_MIN_PREFIX, len(possible_value_str)
),
)
)
else:
terms.append(filter_types[filter_type] % possible_value_str)
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = "(%s)" % " AND ".join(terms)
elif filter_type == "in":
in_options = []
for possible_value in prepared_value:
is_datetime = False
if hasattr(possible_value, "strftime"):
is_datetime = True
pv = self.backend._from_python(possible_value)
if is_datetime is True:
pv = self._convert_datetime(pv)
if isinstance(pv, str) and not is_datetime:
in_options.append('"%s"' % pv)
else:
in_options.append("%s" % pv)
query_frag = "(%s)" % " OR ".join(in_options)
elif filter_type == "range":
start = self.backend._from_python(prepared_value[0])
end = self.backend._from_python(prepared_value[1])
if hasattr(prepared_value[0], "strftime"):
start = self._convert_datetime(start)
if hasattr(prepared_value[1], "strftime"):
end = self._convert_datetime(end)
query_frag = "[%s to %s]" % (start, end)
elif filter_type == "exact":
if value.input_type_name == "exact":
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if is_datetime is True:
prepared_value = self._convert_datetime(prepared_value)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not isinstance(value, Raw):
if not query_frag.startswith("(") and not query_frag.endswith(")"):
query_frag = "(%s)" % query_frag
return "%s%s" % (index_fieldname, query_frag)
class WhooshEngine(BaseEngine):
backend = WhooshSearchBackend
query = WhooshSearchQuery
| 34.372027 | 122 | 0.542948 |
acf2a96278708bac75d3a0be10415fd85ef44fce | 7,551 | py | Python | bitcoin/tests/test_bip38.py | zimage/python-bitcoinlib | 049bb10f9a12415054c63c87d0f53ee37644beeb | [
"MIT"
] | 1 | 2015-12-02T23:26:56.000Z | 2015-12-02T23:26:56.000Z | bitcoin/tests/test_bip38.py | zimage/python-bitcoinlib | 049bb10f9a12415054c63c87d0f53ee37644beeb | [
"MIT"
] | null | null | null | bitcoin/tests/test_bip38.py | zimage/python-bitcoinlib | 049bb10f9a12415054c63c87d0f53ee37644beeb | [
"MIT"
] | 1 | 2021-01-02T14:48:17.000Z | 2021-01-02T14:48:17.000Z | # -*- coding: utf-8 -*-
import unittest
from binascii import unhexlify
from bitcoin.bip38 import Bip38
from bitcoin.key import CKey, CKeyForm
class TestBip38(unittest.TestCase):
def no_ec_multiply(self, v, compressed = False):
k = CKey()
k.generate(unhexlify(v['unencrypted_hex']))
k.set_compressed(compressed)
# Test get_secret()
self.assertEqual(unhexlify(v['unencrypted_hex']), k.get_secret())
self.assertEqual(v['unencrypted_wif'], k.get_secret(form=CKeyForm.BASE58))
# Test encryption
b = Bip38(k, v['passphrase'])
self.assertEqual(v['encrypted'], b.encrypt_no_ec_multiply())
# Test decryption
self.assertEqual(unhexlify(v['unencrypted_hex']), Bip38.decrypt(v['encrypted'], v['passphrase']))
def test_no_compression_no_ec_multiply(self):
vec = [ {'passphrase': 'TestingOneTwoThree',
'encrypted': '6PRVWUbkzzsbcVac2qwfssoUJAN1Xhrg6bNk8J7Nzm5H7kxEbn2Nh2ZoGg',
'unencrypted_wif': '5KN7MzqK5wt2TP1fQCYyHBtDrXdJuXbUzm4A9rKAteGu3Qi5CVR',
'unencrypted_hex': 'CBF4B9F70470856BB4F40F80B87EDB90865997FFEE6DF315AB166D713AF433A5',
},
{'passphrase': 'Satoshi',
'encrypted': '6PRNFFkZc2NZ6dJqFfhRoFNMR9Lnyj7dYGrzdgXXVMXcxoKTePPX1dWByq',
'unencrypted_wif': '5HtasZ6ofTHP6HCwTqTkLDuLQisYPah7aUnSKfC7h4hMUVw2gi5',
'unencrypted_hex': '09C2686880095B1A4C249EE3AC4EEA8A014F11E6F986D0B5025AC1F39AFBD9AE'
} ]
for v in vec:
self.no_ec_multiply(v)
def test_compression_no_ec_multiply(self):
vec = [ {'passphrase': 'TestingOneTwoThree',
'encrypted': '6PYNKZ1EAgYgmQfmNVamxyXVWHzK5s6DGhwP4J5o44cvXdoY7sRzhtpUeo',
'unencrypted_wif': 'L44B5gGEpqEDRS9vVPz7QT35jcBG2r3CZwSwQ4fCewXAhAhqGVpP',
'unencrypted_hex': 'CBF4B9F70470856BB4F40F80B87EDB90865997FFEE6DF315AB166D713AF433A5',
},
{'passphrase': 'Satoshi',
'encrypted': '6PYLtMnXvfG3oJde97zRyLYFZCYizPU5T3LwgdYJz1fRhh16bU7u6PPmY7',
'unencrypted_wif': 'KwYgW8gcxj1JWJXhPSu4Fqwzfhp5Yfi42mdYmMa4XqK7NJxXUSK7',
'unencrypted_hex': '09C2686880095B1A4C249EE3AC4EEA8A014F11E6F986D0B5025AC1F39AFBD9AE'
} ]
for v in vec:
self.no_ec_multiply(v, compressed = True)
def test_no_compression_ec_multiply_no_lot_sequence_numbers(self):
vec = [ {'passphrase': 'TestingOneTwoThree',
'passphrase_code': 'passphrasepxFy57B9v8HtUsszJYKReoNDV6VHjUSGt8EVJmux9n1J3Ltf1gRxyDGXqnf9qm',
'encrypted': '6PfQu77ygVyJLZjfvMLyhLMQbYnu5uguoJJ4kMCLqWwPEdfpwANVS76gTX',
'salt': '\xa5\x0d\xba\x67\x72\xcb\x93\x83',
'seedb': '\x99\x24\x1d\x58\x24\x5c\x88\x38\x96\xf8\x08\x43\xd2\x84\x66\x72\xd7\x31\x2e\x61\x95\xca\x1a\x6c',
'bitboin_address': '1PE6TQi6HTVNz5DLwB1LcpMBALubfuN2z2',
'unencrypted_wif': '5K4caxezwjGCGfnoPTZ8tMcJBLB7Jvyjv4xxeacadhq8nLisLR2',
'unencrypted_hex': 'A43A940577F4E97F5C4D39EB14FF083A98187C64EA7C99EF7CE460833959A519',
},
{'passphrase': 'Satoshi',
'passphrase_code': 'passphraseoRDGAXTWzbp72eVbtUDdn1rwpgPUGjNZEc6CGBo8i5EC1FPW8wcnLdq4ThKzAS',
'encrypted': '6PfLGnQs6VZnrNpmVKfjotbnQuaJK4KZoPFrAjx1JMJUa1Ft8gnf5WxfKd',
'salt': '\x67\x01\x0a\x95\x73\x41\x89\x06',
'seedb': '\x49\x11\x1e\x30\x1d\x94\xea\xb3\x39\xff\x9f\x68\x22\xee\x99\xd9\xf4\x96\x06\xdb\x3b\x47\xa4\x97',
'bitcoin_address': '1CqzrtZC6mXSAhoxtFwVjz8LtwLJjDYU3V',
'unencrypted_wif': '5KJ51SgxWaAYR13zd9ReMhJpwrcX47xTJh2D3fGPG9CM8vkv5sH',
'unencrypted_hex': 'C2C8036DF268F498099350718C4A3EF3984D2BE84618C2650F5171DCC5EB660A',
} ]
for v in vec:
k = CKey()
k.generate(unhexlify(v['unencrypted_hex']))
k.set_compressed(False)
# Test get_secret()
self.assertEqual(unhexlify(v['unencrypted_hex']), k.get_secret())
self.assertEqual(v['unencrypted_wif'], k.get_secret(form=CKeyForm.BASE58))
# Test get_intermediate
b = Bip38(k, v['passphrase'], ec_multiply = True)
self.assertEqual(v['passphrase_code'], b.get_intermediate(salt = v['salt']))
# Test encryption
self.assertEqual(v['encrypted'], Bip38.encrypt_ec_multiply(v['passphrase_code'], seedb=v['seedb']))
# Test decryption
self.assertEqual(unhexlify(v['unencrypted_hex']), Bip38.decrypt(v['encrypted'], v['passphrase']))
def test_no_compression_ec_multiply_lot_sequence_numbers(self):
vec = [ {'passphrase': 'MOLON LABE',
'passphrase_code': 'passphraseaB8feaLQDENqCgr4gKZpmf4VoaT6qdjJNJiv7fsKvjqavcJxvuR1hy25aTu5sX',
'encrypted': '6PgNBNNzDkKdhkT6uJntUXwwzQV8Rr2tZcbkDcuC9DZRsS6AtHts4Ypo1j',
'salt': '\x4f\xca\x5a\x97',
'seedb': '\x87\xa1\x3b\x07\x85\x8f\xa7\x53\xcd\x3a\xb3\xf1\xc5\xea\xfb\x5f\x12\x57\x9b\x6c\x33\xc9\xa5\x3f',
'bitcoin_address': '1Jscj8ALrYu2y9TD8NrpvDBugPedmbj4Yh',
'unencrypted_wif': '5JLdxTtcTHcfYcmJsNVy1v2PMDx432JPoYcBTVVRHpPaxUrdtf8',
'unencrypted_hex': '44EA95AFBF138356A05EA32110DFD627232D0F2991AD221187BE356F19FA8190',
'confirmation_code': 'cfrm38V8aXBn7JWA1ESmFMUn6erxeBGZGAxJPY4e36S9QWkzZKtaVqLNMgnifETYw7BPwWC9aPD',
'lot': 263183,
'sequence': 1,
},
{'passphrase': 'ΜΟΛΩΝ ΛΑΒΕ',
'passphrase_code': 'passphrased3z9rQJHSyBkNBwTRPkUGNVEVrUAcfAXDyRU1V28ie6hNFbqDwbFBvsTK7yWVK',
'encrypted': '6PgGWtx25kUg8QWvwuJAgorN6k9FbE25rv5dMRwu5SKMnfpfVe5mar2ngH',
'salt': '\xc4\x0e\xa7\x6f',
'seedb': '\x03\xb0\x6a\x1e\xa7\xf9\x21\x9a\xe3\x64\x56\x0d\x7b\x98\x5a\xb1\xfa\x27\x02\x5a\xaa\x7e\x42\x7a',
'bitcoin_address': '1Lurmih3KruL4xDB5FmHof38yawNtP9oGf',
'unencrypted_wif': '5KMKKuUmAkiNbA3DazMQiLfDq47qs8MAEThm4yL8R2PhV1ov33D',
'unencrypted_hex': 'CA2759AA4ADB0F96C414F36ABEB8DB59342985BE9FA50FAAC228C8E7D90E3006',
'confirmation_code': 'cfrm38V8G4qq2ywYEFfWLD5Cc6msj9UwsG2Mj4Z6QdGJAFQpdatZLavkgRd1i4iBMdRngDqDs51',
'lot': 806938,
'sequence': 1,
} ]
for v in vec:
k = CKey()
k.generate(unhexlify(v['unencrypted_hex']))
k.set_compressed(False)
# Test get_secret()
self.assertEqual(unhexlify(v['unencrypted_hex']), k.get_secret())
self.assertEqual(v['unencrypted_wif'], k.get_secret(form=CKeyForm.BASE58))
# Test get_intermediate
b = Bip38(k, v['passphrase'], ec_multiply = True, ls_numbers = True)
self.assertEqual(v['passphrase_code'], b.get_intermediate(salt = v['salt'], lot=v['lot'], sequence=v['sequence']))
# Test encryption
self.assertEqual(v['encrypted'], b.encrypt_ec_multiply(v['passphrase_code'], seedb=v['seedb']))
# Test decryption
self.assertEqual(unhexlify(v['unencrypted_hex']), Bip38.decrypt(v['encrypted'], v['passphrase']))
| 54.323741 | 126 | 0.640975 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.