content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python
"""Application controller for FastTree
designed for FastTree v1.1.0 . Also functions with v2.0.1, v2.1.0, and v2.1.3
though only with basic functionality"""
from cogent.app.parameters import ValuedParameter, FlagParameter, \
MixedParameter
from cogent.app.util import CommandLineApplication, FilePath, system, \
CommandLineAppResult, ResultPath, remove, ApplicationError
from cogent.core.tree import PhyloNode
from cogent.parse.tree import DndParser
from cogent.core.moltype import DNA, RNA, PROTEIN
from cogent.core.alignment import SequenceCollection
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Daniel McDonald", "Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
__status__ = "Development"
class FastTree(CommandLineApplication):
"""FastTree application Controller"""
_command = 'FastTree'
_input_handler = '_input_as_multiline_string'
_parameters = {
'-quiet':FlagParameter('-',Name='quiet'),
'-boot':ValuedParameter('-',Delimiter=' ',Name='boot'),
'-seed':ValuedParameter('-',Delimiter=' ',Name='seed'),
'-nni':ValuedParameter('-',Delimiter=' ',Name='nni'),
'-slow':FlagParameter('-',Name='slow'),
'-fastest':FlagParameter('-',Name='fastest'),
'-top':FlagParameter('-',Name='top'),
'-notop':FlagParameter('-',Name='notop'),
'-topm':ValuedParameter('-',Delimiter=' ',Name='topm'),
'-close':ValuedParameter('-',Delimiter=' ',Name='close'),
'-refresh':ValuedParameter('-',Delimiter=' ',Name='refresh'),
'-matrix':ValuedParameter('-',Delimiter=' ',Name='matrix'),
'-nomatrix':FlagParameter('-',Name='nomatrix'),
'-nj':FlagParameter('-',Name='nj'),
'-bionj':FlagParameter('-',Name='bionj'),
'-nt':FlagParameter('-',Name='nt'),
'-n':ValuedParameter('-',Delimiter=' ',Name='n'),
'-pseudo':MixedParameter('-',Delimiter=' ', Name='pseudo'),
'-intree':ValuedParameter('-',Delimiter=' ',Name='intree'),
'-spr':ValuedParameter('-',Delimiter=' ',Name='spr'),
'-constraints':ValuedParameter('-',Delimiter=' ',\
Name='constraints'),
'-constraintWeight':ValuedParameter('-',Delimiter=' ',\
Name='constraintWeight'),\
'-makematrix':ValuedParameter('-',Delimiter=' ',Name='makematrix')}
def __call__(self,data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
NOTE: Override of the base class to handle redirected output
"""
input_handler = self.InputHandler
suppress_stderr = self.SuppressStderr
outfile = self.getTmpFilename(self.TmpDir)
self._outfile = outfile
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self,input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,\
[self.BaseCommand,str(input_arg),'>',str(outfile),'2>',\
str(errfile)]))
if self.HaltExec:
raise AssertionError, "Halted exec with command:\n" + command
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError, \
'Unacceptable application exit status: %s, command: %s'\
% (str(exit_status),command)
out = open(outfile,"r")
err = None
if not suppress_stderr:
err = open(errfile,"r")
result = CommandLineAppResult(out,err,exit_status,\
result_paths=self._get_result_paths(data))
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _get_result_paths(self, data):
result = {}
result['Tree'] = ResultPath(Path=self._outfile)
return result
def build_tree_from_alignment(aln, moltype, best_tree=False, params=None):
"""Returns a tree from alignment
Will check MolType of aln object
"""
if params is None:
params = {}
if moltype == DNA or moltype == RNA:
params['-nt'] = True
elif moltype == PROTEIN:
params['-nt'] = False
else:
raise ValueError, \
"FastTree does not support moltype: %s" % moltype.label
if best_tree:
params['-slow'] = True
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = aln.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
app = FastTree(params=params)
result = app(int_map.toFasta())
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
#remap tip names
for tip in tree.tips():
tip.Name = int_keys[tip.Name]
return tree
|
nilq/baby-python
|
python
|
from service_runner.service_runner.answeb.ansible_api import AnsibleApi
from service_runner.service_runner.asset.models import Host
from django.conf import settings
DEFAULT_PLAYBOOKS_PATH = settings.BASE_DIR + '/service_runner/answeb/playbooks/'
def format_uptime_result(host, result):
callback = {
'message': '',
'data': {}
}
if result.get('success'):
host.status = 1
data = result.get('success').get(host.ip)
callback['message'] = 'success'
callback['data']['uptime'] = data.get('stdout')
callback['data']['status'] = host.get_status_display()
elif result.get('failed'):
host.status = 2
callback['message'] = 'failed'
callback['data']['uptime'] = result.get('failed').get(host.ip)
callback['data']['status'] = host.get_status_display()
elif result.get('unreachable'):
host.status = 2
callback['message'] = 'unreachable'
callback['data']['uptime'] = result.get('unreachable').get(host.ip)
callback['data']['status'] = host.get_status_display()
host.save()
return callback
def format_result(host, result):
callback = {}
if result.get('success'):
data = result.get('success').get(host.ip)
callback['message'] = 'success'
callback['data'] = data.get('out.stdout_lines')
elif result.get('failed'):
callback['message'] = 'failed'
callback['data'] = result.get('failed').get(host.ip)
elif result.get('unreachable'):
callback['message'] = 'unreachable'
callback['data'] = result.get('unreachable').get(host.ip)
return callback
def get_host_uptime(host_id):
callback = {}
host = Host.objects.get(id=host_id)
api = AnsibleApi(host.ip + ',')
if host.ssh_key:
api.options = api.create_options(remote_user=host.ssh_user,
private_key_file=host.ssh_key.ssh_key.path)
else:
api.options = api.create_options(remote_user=host.ssh_user)
api.passwords = dict(sshpass=host.ssh_passwd)
api.initializeData()
api.run(host.ip, 'shell', 'uptime')
result = api.get_result()
callback = format_uptime_result(host, result)
return callback
def get_host_info(host_id):
host = Host.objects.get(id=host_id)
api = AnsibleApi(host.ip + ',')
if host.ssh_key:
api.options = api.create_options(remote_user=host.ssh_user,
private_key_file=host.ssh_key.ssh_key.path)
else:
api.options = api.create_options(remote_user=host.ssh_user)
api.passwords = dict(sshpass=host.ssh_passwd)
api.initializeData()
# api.run(host.ip, 'shell', 'uptime')
api.run_playbook(host.ip,
[DEFAULT_PLAYBOOKS_PATH + 'memory_cpu_diskspace_uptime.yml'])
callback = format_result(host, api.get_result())
return callback
|
nilq/baby-python
|
python
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq import graph_utils
from seq2seq import decoders
from seq2seq.data import vocab
from seq2seq.models.basic_seq2seq import BasicSeq2Seq
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
from seq2seq.encoders.encoder import Encoder, EncoderOutput
class BasicBiSeq2Seq(BasicSeq2Seq):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="basic_biseq2seq"):
super(BasicBiSeq2Seq, self).__init__(params, mode, name)
# add candidate answer part
self.source_candidate_vocab_info = None
if "vocab_source_candidate" in self.params and self.params["vocab_source_candidate"]:
self.source_candidate_vocab_info = vocab.get_vocab_info(self.params["vocab_source_candidate"])
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = BasicSeq2Seq.default_params().copy()
params.update({
"bridge.class": "seq2seq.models.bridges.InitialStateBridge",
"bridge.params": {},
"encoder.class": "seq2seq.encoders.UnidirectionalRNNEncoder",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.BasicDecoder",
"decoder.params": {}, # Arbitrary parameters for the decoder
"source_candidate.max_seq_len": 20,
"source_candidate.reverse": True,
"vocab_source_candidate": None
})
return params
def _create_bridge(self, encoder_outputs, decoder_state_size):
"""Creates the bridge to be used between encoder and decoder"""
bridge_class = locate(self.params["bridge.class"]) or \
getattr(bridges, self.params["bridge.class"])
return bridge_class(
encoder_outputs=encoder_outputs,
decoder_state_size=decoder_state_size,
params=self.params["bridge.params"],
mode=self.mode)
def _create_decoder(self, _encoder_output, _features, _labels):
"""Creates a decoder instance based on the passed parameters."""
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size)
def _decode_train(self, decoder, bridge, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(self.target_embedding,
labels["target_ids"])
helper_train = tf_decode_helper.TrainingHelper(
inputs=target_embedded[:, :-1],
sequence_length=labels["target_len"] - 1)
decoder_initial_state = bridge()
print("basic_seq2seq decoder_initial_state:{}".format(decoder_initial_state))
return decoder(decoder_initial_state, helper_train)
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
batch_size = self.batch_size(features, labels)
if self.use_beam_search:
batch_size = self.params["inference.beam_search.beam_width"]
target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
embedding=self.target_embedding,
start_tokens=tf.fill([batch_size], target_start_id),
end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_infer)
@templatemethod("encode")
def encode(self, features, labels):
# source_embedded = tf.nn.embedding_lookup(self.source_embedding,
# features["source_ids"])
# encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
# return encoder_fn(source_embedded, features["source_len"])
# 1. query source encoder sequence output
query_embedded = tf.nn.embedding_lookup(self.source_embedding,
features["source_ids"])
query_encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
query_output = query_encoder_fn(query_embedded, features["source_len"])
# return query_output
# 2. candidate source encoder sequence output
candidate_embedded = tf.nn.embedding_lookup(self.source_candidate_embedding,
features["source_candidate_ids"])
candidate_encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
candidate_output = candidate_encoder_fn(candidate_embedded, features["source_candidate_len"])
print("query_output:{}".format(query_output))
print("candidate_output:{}".format(candidate_output))
# 3. merge two encoder generated output
# outputs = tf.concat([query_output.outputs, candidate_output.outputs], 0)
# #final_state = tf.reshape(tf.concat([query_output.final_state, candidate_output.final_state], 0), [-1, 128])
# final_state = tf.concat([query_output.final_state, candidate_output.final_state], 0)
# attention_values = tf.concat([query_output.attention_values, candidate_output.attention_values], 0)
# att_v_len = tf.concat([query_output.attention_values_length, candidate_output.attention_values_length], 0)
outputs = query_output.outputs + candidate_output.outputs
final_state = (query_output.final_state[0] + candidate_output.final_state[0],
query_output.final_state[1] + candidate_output.final_state[1])
attention_values = query_output.attention_values + candidate_output.attention_values
att_v_len = query_output.attention_values_length + candidate_output.attention_values_length
encoderOutput = EncoderOutput(outputs=outputs,
final_state=final_state,
attention_values=attention_values,
attention_values_length=att_v_len)
print("encoderOutput:{}".format(encoderOutput))
return encoderOutput
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.use_beam_search:
decoder = self._get_beam_search_decoder(decoder)
bridge = self._create_bridge(
encoder_outputs=encoder_output,
decoder_state_size=decoder.cell.state_size)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, bridge, encoder_output, features,
labels)
else:
return self._decode_train(decoder, bridge, encoder_output, features,
labels)
@property
@templatemethod("source_candidate_embedding")
def source_candidate_embedding(self):
"""Returns the embedding used for the source sequence.
"""
return tf.get_variable(
name="W_candidate",
shape=[self.source_candidate_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_uniform_initializer(
-self.params["embedding.init_scale"],
self.params["embedding.init_scale"]))
def _preprocess(self, features, labels):
"""Model-specific preprocessing for features and labels:
- Creates vocabulary lookup tables for source and target vocab
- Converts tokens into vocabulary ids
"""
# Create vocabulary lookup for source
source_vocab_to_id, source_id_to_vocab, source_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.source_vocab_info.path)
source_candidate_vocab_to_id, source_candidate_id_to_vocab, source_candidate_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.source_candidate_vocab_info.path)
# Create vocabulary look for target
target_vocab_to_id, target_id_to_vocab, target_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.target_vocab_info.path)
# Add vocab tables to graph colection so that we can access them in
# other places.
graph_utils.add_dict_to_collection({
"source_vocab_to_id": source_vocab_to_id,
"source_id_to_vocab": source_id_to_vocab,
"source_word_to_count": source_word_to_count,
"source_candidate_vocab_to_id": source_candidate_vocab_to_id,
"source_candidate_id_to_vocab": source_candidate_id_to_vocab,
"source_candidate_word_to_count": source_candidate_word_to_count,
"target_vocab_to_id": target_vocab_to_id,
"target_id_to_vocab": target_id_to_vocab,
"target_word_to_count": target_word_to_count
}, "vocab_tables")
# Slice source to max_len
if self.params["source.max_seq_len"] is not None:
features["source_tokens"] = features["source_tokens"][:, :self.params[
"source.max_seq_len"]]
features["source_len"] = tf.minimum(features["source_len"],
self.params["source.max_seq_len"])
# Slice source_candidate to max_len
if self.params["source_candidate.max_seq_len"] is not None:
features["source_candidate_tokens"] = features["source_candidate_tokens"][:, :self.params[
"source_candidate.max_seq_len"]]
features["source_candidate_len"] = tf.minimum(features["source_candidate_len"],
self.params["source_candidate.max_seq_len"])
# Look up the source ids in the vocabulary
features["source_ids"] = source_vocab_to_id.lookup(features[
"source_tokens"])
features["source_candidate_ids"] = source_candidate_vocab_to_id.lookup(features[
"source_candidate_tokens"])
# Maybe reverse the source
if self.params["source.reverse"] is True:
features["source_ids"] = tf.reverse_sequence(
input=features["source_ids"],
seq_lengths=features["source_len"],
seq_dim=1,
batch_dim=0,
name=None)
features["source_candidate_ids"] = tf.reverse_sequence(
input=features["source_candidate_ids"],
seq_lengths=features["source_candidate_len"],
seq_dim=1,
batch_dim=0,
name=None)
features["source_len"] = tf.to_int32(features["source_len"])
tf.summary.histogram("source_len", tf.to_float(features["source_len"]))
features["source_candidate_len"] = tf.to_int32(features["source_candidate_len"])
tf.summary.histogram("source_candidate_len", tf.to_float(features["source_candidate_len"]))
if labels is None:
return features, None
labels = labels.copy()
# Slices targets to max length
if self.params["target.max_seq_len"] is not None:
labels["target_tokens"] = labels["target_tokens"][:, :self.params[
"target.max_seq_len"]]
labels["target_len"] = tf.minimum(labels["target_len"],
self.params["target.max_seq_len"])
# Look up the target ids in the vocabulary
labels["target_ids"] = target_vocab_to_id.lookup(labels["target_tokens"])
labels["target_len"] = tf.to_int32(labels["target_len"])
tf.summary.histogram("target_len", tf.to_float(labels["target_len"]))
# Keep track of the number of processed tokens
num_tokens = tf.reduce_sum(labels["target_len"])
num_tokens += tf.reduce_sum(features["source_len"])
num_tokens += tf.reduce_sum(features["source_candidate_len"])
token_counter_var = tf.Variable(0, "tokens_counter")
total_tokens = tf.assign_add(token_counter_var, num_tokens)
tf.summary.scalar("num_tokens", total_tokens)
with tf.control_dependencies([total_tokens]):
features["source_tokens"] = tf.identity(features["source_tokens"])
features["source_candidate_tokens"] = tf.identity(features["source_candidate_tokens"])
# Add to graph collection for later use
graph_utils.add_dict_to_collection(features, "features")
if labels:
graph_utils.add_dict_to_collection(labels, "labels")
print("attention_biseqseq features:{} labels:{}".format(features, labels))
return features, labels
|
nilq/baby-python
|
python
|
import random
import sys
def main():
amount = 1000000
min = 0
max = sys.maxsize #To get some big integer
#Fixed Length from 2 to 6
for i in range(2,7):
result = 0
#Generate N amount of array with fixed length above
for a in range(amount):
array = []
for l in range(i):
array.append(random.randint(min, max))
resultGCD = gcd(array)
#Calculator percentage of set-wise vectors
if resultGCD == 1:
result += 1
percentage = round(result/amount*100, 2)
print(f"The percentage of set-wise coprime vectors among {amount} vectors with fixed length = {i} and range = [{min},{max}) is equal to {percentage}%")
#Function to calculate gcd of arrays with 2 and more elements
def gcd(array):
num1 = array[0]
num2 = array[1]
gcd = find_gcd(num1, num2)
for i in range(2, len(array)):
gcd = find_gcd(gcd, array[i])
return gcd
def find_gcd(a,b):
if(b==0):
return a
else:
return find_gcd(b,a%b)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# some changes
|
nilq/baby-python
|
python
|
from scapy.all import *
interface = 'mon0'
ap_list = []
def info(fm):
if fm.haslayer(Dot11):
if ((fm.type == 0) & (fm.subtype==8)):
if fm.addr2 not in ap_list:
ap_list.append(fm.addr2)
print "SSID--> ",fm.info,"-- BSSID --> ",fm.addr2
sniff(iface=interface,prn=info)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/17 13:48
# @File : db.py
# @Role : ORM
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, UniqueConstraint, DECIMAL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import class_mapper
from libs.common import get_factor_num_with_xlarge
Base = declarative_base()
def model_to_dict(model, time_strf=None):
model_dict = {}
time_strf = time_strf or "%Y-%m-%d %H:%M:%S"
for key, column in class_mapper(model.__class__).c.items():
value = getattr(model, key, None)
if isinstance(value, datetime):
value = value.strftime(time_strf)
model_dict[column.name] = value
return model_dict
class ResourceUsage(Base):
__tablename__ = 'resource_usage'
id = Column('id', Integer, primary_key=True, autoincrement=True)
ec2_id = Column('ec2_id', String(128), nullable=False) # 实例ID
cpu_usage = Column('cpu_usage', Integer(), nullable=True) # CPU使用率
mem_usage = Column('mem_usage', Integer(), nullable=True) # 内存使用率
disk_usage = Column('disk_usage', Integer(), nullable=True) # 磁盘使用率
date = Column('date', DateTime(), nullable=False) # 月份
__table_args__ = (
UniqueConstraint('ec2_id', 'date', name='uix_ec2_date'),
)
class UsageReport(Base):
__tablename__ = 'usage_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
ec2_id = Column('ec2_id', String(128), nullable=False) # 实例ID
host_name = Column('host_name', String(128), nullable=False) # 实例名字
project_name = Column('project_name', String(128), nullable=False) # 项目名称
cpu_avg_usage = Column('cpu_avg_usage', Integer(), nullable=False) # CPU当月平均使用率
mem_avg_usage = Column('mem_avg_usage', Integer(), nullable=False) # 内存当月平均使用率
disk_avg_usage = Column('disk_avg_usage', Integer(), nullable=False) # 磁盘当月平均使用率
curr_inst_type = Column('curr_inst_type', String(128), nullable=False) # 当前实例类型
suggest_inst_type = Column('suggest_inst_type', String(128), nullable=False) # 建议实例类型
cost_gap = Column('cost_gap', DECIMAL(10, 5), nullable=True) # 费用差
month = Column('month', DateTime(), nullable=False) # 月份
__table_args__ = (
UniqueConstraint('ec2_id', 'month', name='uix_ec2_month'),
)
class AWSRiUsageReport(Base):
__tablename__ = 'aws_ri_usage_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
family = Column('family', String(128), nullable=True) # 家族
size = Column('size', String(128), nullable=True) # 实例大小,Linux平台默认为xlarge
platform = Column('platform', String(128), nullable=True) # 平台:
# available_zone = Column('available_zone', String(128), nullable=True) # 区
total_running = Column('total_running', DECIMAL(10, 5), nullable=True) # 当前运行数量
total_ri = Column('total_ri', DECIMAL(10, 5), nullable=True) # RI购买数量
coverage_rate = Column('coverage_rate', DECIMAL(10, 5), nullable=True) # RI覆盖率
date = Column('date', DateTime(), nullable=True) # 月份
__table_args__ = (
UniqueConstraint('family', 'size', 'platform', 'date', name='uix_date'),
)
def merge(self, AWSRiUsageReport):
if self.family == AWSRiUsageReport.family \
and self.platform == AWSRiUsageReport.platform:
if self.platform == "UNIX/Linux":
total_running = get_factor_num_with_xlarge(AWSRiUsageReport.size) * AWSRiUsageReport.total_running
total_ri = get_factor_num_with_xlarge(AWSRiUsageReport.size) * AWSRiUsageReport.total_running
elif self.size == AWSRiUsageReport.size:
total_running = AWSRiUsageReport.total_running
total_ri = AWSRiUsageReport.total_ri
else:
return False
self.total_running += total_running
self.total_ri += total_ri
return True
class AwsTaskQueue(Base):
__tablename__ = 'aws_task_queue'
id = Column('id', Integer, primary_key=True, autoincrement=True)
date = Column('date', DateTime(), nullable=True) # 任务时间
task_name = Column('task_name', String(128), nullable=True) # 任务类型
status = Column('status', Integer, nullable=True) # 状态,0:等待执行,1:执行失败,2:已完成。
class AwsProjectBillReport(Base):
__tablename__ = 'aws_project_bill_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
userproject = Column('userproject', String(32), nullable=True) # 项目
ec2_cost = Column('ec2_cost', DECIMAL(10, 5), nullable=True) # ec2
ebs_cost = Column('ebs_cost', DECIMAL(10, 5), nullable=True) # ebs
snapshot_cost = Column('snapshot_cost', DECIMAL(10, 5), nullable=True) # snapshot
s3_cost = Column('s3_cost', DECIMAL(10, 5), nullable=True) # s3
rds_cost = Column('rds_cost', DECIMAL(10, 5), nullable=True) # rds
elasticache_cost = Column('elasticache_cost', DECIMAL(10, 5), nullable=True) # ElastiCache
credit = Column('credit', DECIMAL(10, 5), nullable=True) # Credit
no_reserved_ri_cost = Column('no_reserved_ri_cost', DECIMAL(10, 5), nullable=True) # no_reserved_ri_cost
support_cost = Column('support_cost', DECIMAL(10, 5), nullable=True) # Support费用
t_a_x = Column('t_a_x', DECIMAL(10, 5), nullable=True) # 税费
aws_total_cost = Column('aws_total_cost', DECIMAL(10, 5), nullable=True) # 总费用
bill_date = Column('bill_date', DateTime(), nullable=True) # 账单日期
__table_args__ = (
UniqueConstraint('userproject', 'bill_date', name='unix'),
)
class AwsServiceBillReport(Base):
__tablename__ = 'aws_service_bill_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
resource_id = Column('resource_id', String(64), nullable=True) # 资源id
Tag_AppName = Column('Tag_AppName', String(64), nullable=True) #应用名称
Tag_Center = Column('Tag_Center', String(64), nullable=True) #数据中心名称
Tag_Team = Column('Tag_Team', String(64), nullable=True) #组织架构名称
Traffic_charges = Column('Traffic_charges', DECIMAL(10, 5), nullable=True) #流量费用
service_name = Column('service_name', String(64), nullable=True) # 服务名
userproject = Column('userproject', String(32), nullable=True) # 项目名
total_cost = Column('total_cost', DECIMAL(10, 5), nullable=True) # 费用
bill_date = Column('bill_date', DateTime(), nullable=True) # 账单日期
__table_args__ = (
UniqueConstraint('userproject', 'service_name', 'resource_id', 'bill_date', name='unix'),
)
class AWSRiDateDB(Base):
__tablename__ = 'aws_ri_date_num'
id = Column('id', Integer, primary_key=True, autoincrement=True)
family = Column('family', String(128), nullable=True) # 家族
size = Column('size', String(128), nullable=True) # 实例大小,Linux平台默认为xlarge
platform = Column('platform', String(128), nullable=True) # 平台:
total_ri = Column('total_ri', DECIMAL(10, 5), nullable=True) # RI购买数量
end = Column('end', String(128), nullable=True) #过期时间
class AwsProjectBudgetControl(Base):
__tablename__ = 'aws_project_budget'
id = Column('id', Integer, primary_key=True, autoincrement=True)
userproject = Column('userproject', String(32), nullable=True) # 项目
aws_total_cost = Column('aws_total_cost', DECIMAL(10, 5), nullable=True) # 总费用
aws_budget_cost = Column('aws_budget_cost', DECIMAL(10, 5), nullable=True) # 预算费用
aws_alert_percentage = Column('aws_alert_percentage', DECIMAL(10, 5), default=1.2) # 警戒百分比
aws_percentage = Column('aws_percentage', DECIMAL(10, 5), nullable=True) # 费用百分比
bill_date = Column('bill_date', DateTime(), nullable=True) # 账单日期
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GlobalClientArgs', 'GlobalClient']
@pulumi.input_type
class GlobalClientArgs:
def __init__(__self__, *,
addons: Optional[pulumi.Input['GlobalClientAddonsArgs']] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input['GlobalClientMobileArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a GlobalClient resource.
"""
if addons is not None:
pulumi.set(__self__, "addons", addons)
if allowed_logout_urls is not None:
pulumi.set(__self__, "allowed_logout_urls", allowed_logout_urls)
if allowed_origins is not None:
pulumi.set(__self__, "allowed_origins", allowed_origins)
if app_type is not None:
pulumi.set(__self__, "app_type", app_type)
if callbacks is not None:
pulumi.set(__self__, "callbacks", callbacks)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_metadata is not None:
pulumi.set(__self__, "client_metadata", client_metadata)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_rotation_trigger is not None:
pulumi.set(__self__, "client_secret_rotation_trigger", client_secret_rotation_trigger)
if cross_origin_auth is not None:
pulumi.set(__self__, "cross_origin_auth", cross_origin_auth)
if cross_origin_loc is not None:
pulumi.set(__self__, "cross_origin_loc", cross_origin_loc)
if custom_login_page is not None:
pulumi.set(__self__, "custom_login_page", custom_login_page)
if custom_login_page_on is not None:
pulumi.set(__self__, "custom_login_page_on", custom_login_page_on)
if description is not None:
pulumi.set(__self__, "description", description)
if encryption_key is not None:
pulumi.set(__self__, "encryption_key", encryption_key)
if form_template is not None:
pulumi.set(__self__, "form_template", form_template)
if grant_types is not None:
pulumi.set(__self__, "grant_types", grant_types)
if initiate_login_uri is not None:
pulumi.set(__self__, "initiate_login_uri", initiate_login_uri)
if is_first_party is not None:
pulumi.set(__self__, "is_first_party", is_first_party)
if is_token_endpoint_ip_header_trusted is not None:
pulumi.set(__self__, "is_token_endpoint_ip_header_trusted", is_token_endpoint_ip_header_trusted)
if jwt_configuration is not None:
pulumi.set(__self__, "jwt_configuration", jwt_configuration)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if mobile is not None:
pulumi.set(__self__, "mobile", mobile)
if name is not None:
pulumi.set(__self__, "name", name)
if oidc_conformant is not None:
pulumi.set(__self__, "oidc_conformant", oidc_conformant)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
if sso is not None:
pulumi.set(__self__, "sso", sso)
if sso_disabled is not None:
pulumi.set(__self__, "sso_disabled", sso_disabled)
if token_endpoint_auth_method is not None:
pulumi.set(__self__, "token_endpoint_auth_method", token_endpoint_auth_method)
if web_origins is not None:
pulumi.set(__self__, "web_origins", web_origins)
@property
@pulumi.getter
def addons(self) -> Optional[pulumi.Input['GlobalClientAddonsArgs']]:
return pulumi.get(self, "addons")
@addons.setter
def addons(self, value: Optional[pulumi.Input['GlobalClientAddonsArgs']]):
pulumi.set(self, "addons", value)
@property
@pulumi.getter(name="allowedLogoutUrls")
def allowed_logout_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_logout_urls")
@allowed_logout_urls.setter
def allowed_logout_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_logout_urls", value)
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_origins")
@allowed_origins.setter
def allowed_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_origins", value)
@property
@pulumi.getter(name="appType")
def app_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "app_type")
@app_type.setter
def app_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_type", value)
@property
@pulumi.getter
def callbacks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "callbacks")
@callbacks.setter
def callbacks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "callbacks", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientMetadata")
def client_metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_metadata")
@client_metadata.setter
def client_metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_metadata", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="clientSecretRotationTrigger")
def client_secret_rotation_trigger(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_secret_rotation_trigger")
@client_secret_rotation_trigger.setter
def client_secret_rotation_trigger(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_secret_rotation_trigger", value)
@property
@pulumi.getter(name="crossOriginAuth")
def cross_origin_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cross_origin_auth")
@cross_origin_auth.setter
def cross_origin_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cross_origin_auth", value)
@property
@pulumi.getter(name="crossOriginLoc")
def cross_origin_loc(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cross_origin_loc")
@cross_origin_loc.setter
def cross_origin_loc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_origin_loc", value)
@property
@pulumi.getter(name="customLoginPage")
def custom_login_page(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_login_page")
@custom_login_page.setter
def custom_login_page(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_login_page", value)
@property
@pulumi.getter(name="customLoginPageOn")
def custom_login_page_on(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "custom_login_page_on")
@custom_login_page_on.setter
def custom_login_page_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "custom_login_page_on", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "encryption_key")
@encryption_key.setter
def encryption_key(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "encryption_key", value)
@property
@pulumi.getter(name="formTemplate")
def form_template(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "form_template")
@form_template.setter
def form_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "form_template", value)
@property
@pulumi.getter(name="grantTypes")
def grant_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "grant_types")
@grant_types.setter
def grant_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "grant_types", value)
@property
@pulumi.getter(name="initiateLoginUri")
def initiate_login_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "initiate_login_uri")
@initiate_login_uri.setter
def initiate_login_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "initiate_login_uri", value)
@property
@pulumi.getter(name="isFirstParty")
def is_first_party(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_first_party")
@is_first_party.setter
def is_first_party(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_first_party", value)
@property
@pulumi.getter(name="isTokenEndpointIpHeaderTrusted")
def is_token_endpoint_ip_header_trusted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_token_endpoint_ip_header_trusted")
@is_token_endpoint_ip_header_trusted.setter
def is_token_endpoint_ip_header_trusted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_token_endpoint_ip_header_trusted", value)
@property
@pulumi.getter(name="jwtConfiguration")
def jwt_configuration(self) -> Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]:
return pulumi.get(self, "jwt_configuration")
@jwt_configuration.setter
def jwt_configuration(self, value: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]):
pulumi.set(self, "jwt_configuration", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter
def mobile(self) -> Optional[pulumi.Input['GlobalClientMobileArgs']]:
return pulumi.get(self, "mobile")
@mobile.setter
def mobile(self, value: Optional[pulumi.Input['GlobalClientMobileArgs']]):
pulumi.set(self, "mobile", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="oidcConformant")
def oidc_conformant(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "oidc_conformant")
@oidc_conformant.setter
def oidc_conformant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "oidc_conformant", value)
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]:
return pulumi.get(self, "refresh_token")
@refresh_token.setter
def refresh_token(self, value: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]):
pulumi.set(self, "refresh_token", value)
@property
@pulumi.getter
def sso(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso")
@sso.setter
def sso(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso", value)
@property
@pulumi.getter(name="ssoDisabled")
def sso_disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso_disabled")
@sso_disabled.setter
def sso_disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso_disabled", value)
@property
@pulumi.getter(name="tokenEndpointAuthMethod")
def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "token_endpoint_auth_method")
@token_endpoint_auth_method.setter
def token_endpoint_auth_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_endpoint_auth_method", value)
@property
@pulumi.getter(name="webOrigins")
def web_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "web_origins")
@web_origins.setter
def web_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "web_origins", value)
@pulumi.input_type
class _GlobalClientState:
def __init__(__self__, *,
addons: Optional[pulumi.Input['GlobalClientAddonsArgs']] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input['GlobalClientMobileArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering GlobalClient resources.
"""
if addons is not None:
pulumi.set(__self__, "addons", addons)
if allowed_logout_urls is not None:
pulumi.set(__self__, "allowed_logout_urls", allowed_logout_urls)
if allowed_origins is not None:
pulumi.set(__self__, "allowed_origins", allowed_origins)
if app_type is not None:
pulumi.set(__self__, "app_type", app_type)
if callbacks is not None:
pulumi.set(__self__, "callbacks", callbacks)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_metadata is not None:
pulumi.set(__self__, "client_metadata", client_metadata)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_rotation_trigger is not None:
pulumi.set(__self__, "client_secret_rotation_trigger", client_secret_rotation_trigger)
if cross_origin_auth is not None:
pulumi.set(__self__, "cross_origin_auth", cross_origin_auth)
if cross_origin_loc is not None:
pulumi.set(__self__, "cross_origin_loc", cross_origin_loc)
if custom_login_page is not None:
pulumi.set(__self__, "custom_login_page", custom_login_page)
if custom_login_page_on is not None:
pulumi.set(__self__, "custom_login_page_on", custom_login_page_on)
if description is not None:
pulumi.set(__self__, "description", description)
if encryption_key is not None:
pulumi.set(__self__, "encryption_key", encryption_key)
if form_template is not None:
pulumi.set(__self__, "form_template", form_template)
if grant_types is not None:
pulumi.set(__self__, "grant_types", grant_types)
if initiate_login_uri is not None:
pulumi.set(__self__, "initiate_login_uri", initiate_login_uri)
if is_first_party is not None:
pulumi.set(__self__, "is_first_party", is_first_party)
if is_token_endpoint_ip_header_trusted is not None:
pulumi.set(__self__, "is_token_endpoint_ip_header_trusted", is_token_endpoint_ip_header_trusted)
if jwt_configuration is not None:
pulumi.set(__self__, "jwt_configuration", jwt_configuration)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if mobile is not None:
pulumi.set(__self__, "mobile", mobile)
if name is not None:
pulumi.set(__self__, "name", name)
if oidc_conformant is not None:
pulumi.set(__self__, "oidc_conformant", oidc_conformant)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
if sso is not None:
pulumi.set(__self__, "sso", sso)
if sso_disabled is not None:
pulumi.set(__self__, "sso_disabled", sso_disabled)
if token_endpoint_auth_method is not None:
pulumi.set(__self__, "token_endpoint_auth_method", token_endpoint_auth_method)
if web_origins is not None:
pulumi.set(__self__, "web_origins", web_origins)
@property
@pulumi.getter
def addons(self) -> Optional[pulumi.Input['GlobalClientAddonsArgs']]:
return pulumi.get(self, "addons")
@addons.setter
def addons(self, value: Optional[pulumi.Input['GlobalClientAddonsArgs']]):
pulumi.set(self, "addons", value)
@property
@pulumi.getter(name="allowedLogoutUrls")
def allowed_logout_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_logout_urls")
@allowed_logout_urls.setter
def allowed_logout_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_logout_urls", value)
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_origins")
@allowed_origins.setter
def allowed_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_origins", value)
@property
@pulumi.getter(name="appType")
def app_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "app_type")
@app_type.setter
def app_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_type", value)
@property
@pulumi.getter
def callbacks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "callbacks")
@callbacks.setter
def callbacks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "callbacks", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientMetadata")
def client_metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_metadata")
@client_metadata.setter
def client_metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_metadata", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="clientSecretRotationTrigger")
def client_secret_rotation_trigger(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_secret_rotation_trigger")
@client_secret_rotation_trigger.setter
def client_secret_rotation_trigger(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_secret_rotation_trigger", value)
@property
@pulumi.getter(name="crossOriginAuth")
def cross_origin_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cross_origin_auth")
@cross_origin_auth.setter
def cross_origin_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cross_origin_auth", value)
@property
@pulumi.getter(name="crossOriginLoc")
def cross_origin_loc(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cross_origin_loc")
@cross_origin_loc.setter
def cross_origin_loc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_origin_loc", value)
@property
@pulumi.getter(name="customLoginPage")
def custom_login_page(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_login_page")
@custom_login_page.setter
def custom_login_page(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_login_page", value)
@property
@pulumi.getter(name="customLoginPageOn")
def custom_login_page_on(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "custom_login_page_on")
@custom_login_page_on.setter
def custom_login_page_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "custom_login_page_on", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "encryption_key")
@encryption_key.setter
def encryption_key(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "encryption_key", value)
@property
@pulumi.getter(name="formTemplate")
def form_template(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "form_template")
@form_template.setter
def form_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "form_template", value)
@property
@pulumi.getter(name="grantTypes")
def grant_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "grant_types")
@grant_types.setter
def grant_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "grant_types", value)
@property
@pulumi.getter(name="initiateLoginUri")
def initiate_login_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "initiate_login_uri")
@initiate_login_uri.setter
def initiate_login_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "initiate_login_uri", value)
@property
@pulumi.getter(name="isFirstParty")
def is_first_party(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_first_party")
@is_first_party.setter
def is_first_party(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_first_party", value)
@property
@pulumi.getter(name="isTokenEndpointIpHeaderTrusted")
def is_token_endpoint_ip_header_trusted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_token_endpoint_ip_header_trusted")
@is_token_endpoint_ip_header_trusted.setter
def is_token_endpoint_ip_header_trusted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_token_endpoint_ip_header_trusted", value)
@property
@pulumi.getter(name="jwtConfiguration")
def jwt_configuration(self) -> Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]:
return pulumi.get(self, "jwt_configuration")
@jwt_configuration.setter
def jwt_configuration(self, value: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]):
pulumi.set(self, "jwt_configuration", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter
def mobile(self) -> Optional[pulumi.Input['GlobalClientMobileArgs']]:
return pulumi.get(self, "mobile")
@mobile.setter
def mobile(self, value: Optional[pulumi.Input['GlobalClientMobileArgs']]):
pulumi.set(self, "mobile", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="oidcConformant")
def oidc_conformant(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "oidc_conformant")
@oidc_conformant.setter
def oidc_conformant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "oidc_conformant", value)
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]:
return pulumi.get(self, "refresh_token")
@refresh_token.setter
def refresh_token(self, value: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]):
pulumi.set(self, "refresh_token", value)
@property
@pulumi.getter
def sso(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso")
@sso.setter
def sso(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso", value)
@property
@pulumi.getter(name="ssoDisabled")
def sso_disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso_disabled")
@sso_disabled.setter
def sso_disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso_disabled", value)
@property
@pulumi.getter(name="tokenEndpointAuthMethod")
def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "token_endpoint_auth_method")
@token_endpoint_auth_method.setter
def token_endpoint_auth_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_endpoint_auth_method", value)
@property
@pulumi.getter(name="webOrigins")
def web_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "web_origins")
@web_origins.setter
def web_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "web_origins", value)
class GlobalClient(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addons: Optional[pulumi.Input[pulumi.InputType['GlobalClientAddonsArgs']]] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input[pulumi.InputType['GlobalClientJwtConfigurationArgs']]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input[pulumi.InputType['GlobalClientMobileArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input[pulumi.InputType['GlobalClientRefreshTokenArgs']]] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a GlobalClient resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[GlobalClientArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a GlobalClient resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param GlobalClientArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalClientArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addons: Optional[pulumi.Input[pulumi.InputType['GlobalClientAddonsArgs']]] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input[pulumi.InputType['GlobalClientJwtConfigurationArgs']]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input[pulumi.InputType['GlobalClientMobileArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input[pulumi.InputType['GlobalClientRefreshTokenArgs']]] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalClientArgs.__new__(GlobalClientArgs)
__props__.__dict__["addons"] = addons
__props__.__dict__["allowed_logout_urls"] = allowed_logout_urls
__props__.__dict__["allowed_origins"] = allowed_origins
__props__.__dict__["app_type"] = app_type
__props__.__dict__["callbacks"] = callbacks
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_metadata"] = client_metadata
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["client_secret_rotation_trigger"] = client_secret_rotation_trigger
__props__.__dict__["cross_origin_auth"] = cross_origin_auth
__props__.__dict__["cross_origin_loc"] = cross_origin_loc
__props__.__dict__["custom_login_page"] = custom_login_page
__props__.__dict__["custom_login_page_on"] = custom_login_page_on
__props__.__dict__["description"] = description
__props__.__dict__["encryption_key"] = encryption_key
__props__.__dict__["form_template"] = form_template
__props__.__dict__["grant_types"] = grant_types
__props__.__dict__["initiate_login_uri"] = initiate_login_uri
__props__.__dict__["is_first_party"] = is_first_party
__props__.__dict__["is_token_endpoint_ip_header_trusted"] = is_token_endpoint_ip_header_trusted
__props__.__dict__["jwt_configuration"] = jwt_configuration
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["mobile"] = mobile
__props__.__dict__["name"] = name
__props__.__dict__["oidc_conformant"] = oidc_conformant
__props__.__dict__["refresh_token"] = refresh_token
__props__.__dict__["sso"] = sso
__props__.__dict__["sso_disabled"] = sso_disabled
__props__.__dict__["token_endpoint_auth_method"] = token_endpoint_auth_method
__props__.__dict__["web_origins"] = web_origins
super(GlobalClient, __self__).__init__(
'auth0:index/globalClient:GlobalClient',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
addons: Optional[pulumi.Input[pulumi.InputType['GlobalClientAddonsArgs']]] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input[pulumi.InputType['GlobalClientJwtConfigurationArgs']]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input[pulumi.InputType['GlobalClientMobileArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input[pulumi.InputType['GlobalClientRefreshTokenArgs']]] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'GlobalClient':
"""
Get an existing GlobalClient resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GlobalClientState.__new__(_GlobalClientState)
__props__.__dict__["addons"] = addons
__props__.__dict__["allowed_logout_urls"] = allowed_logout_urls
__props__.__dict__["allowed_origins"] = allowed_origins
__props__.__dict__["app_type"] = app_type
__props__.__dict__["callbacks"] = callbacks
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_metadata"] = client_metadata
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["client_secret_rotation_trigger"] = client_secret_rotation_trigger
__props__.__dict__["cross_origin_auth"] = cross_origin_auth
__props__.__dict__["cross_origin_loc"] = cross_origin_loc
__props__.__dict__["custom_login_page"] = custom_login_page
__props__.__dict__["custom_login_page_on"] = custom_login_page_on
__props__.__dict__["description"] = description
__props__.__dict__["encryption_key"] = encryption_key
__props__.__dict__["form_template"] = form_template
__props__.__dict__["grant_types"] = grant_types
__props__.__dict__["initiate_login_uri"] = initiate_login_uri
__props__.__dict__["is_first_party"] = is_first_party
__props__.__dict__["is_token_endpoint_ip_header_trusted"] = is_token_endpoint_ip_header_trusted
__props__.__dict__["jwt_configuration"] = jwt_configuration
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["mobile"] = mobile
__props__.__dict__["name"] = name
__props__.__dict__["oidc_conformant"] = oidc_conformant
__props__.__dict__["refresh_token"] = refresh_token
__props__.__dict__["sso"] = sso
__props__.__dict__["sso_disabled"] = sso_disabled
__props__.__dict__["token_endpoint_auth_method"] = token_endpoint_auth_method
__props__.__dict__["web_origins"] = web_origins
return GlobalClient(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def addons(self) -> pulumi.Output['outputs.GlobalClientAddons']:
return pulumi.get(self, "addons")
@property
@pulumi.getter(name="allowedLogoutUrls")
def allowed_logout_urls(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "allowed_logout_urls")
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "allowed_origins")
@property
@pulumi.getter(name="appType")
def app_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "app_type")
@property
@pulumi.getter
def callbacks(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "callbacks")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientMetadata")
def client_metadata(self) -> pulumi.Output[Mapping[str, Any]]:
return pulumi.get(self, "client_metadata")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientSecretRotationTrigger")
def client_secret_rotation_trigger(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
return pulumi.get(self, "client_secret_rotation_trigger")
@property
@pulumi.getter(name="crossOriginAuth")
def cross_origin_auth(self) -> pulumi.Output[bool]:
return pulumi.get(self, "cross_origin_auth")
@property
@pulumi.getter(name="crossOriginLoc")
def cross_origin_loc(self) -> pulumi.Output[str]:
return pulumi.get(self, "cross_origin_loc")
@property
@pulumi.getter(name="customLoginPage")
def custom_login_page(self) -> pulumi.Output[str]:
return pulumi.get(self, "custom_login_page")
@property
@pulumi.getter(name="customLoginPageOn")
def custom_login_page_on(self) -> pulumi.Output[bool]:
return pulumi.get(self, "custom_login_page_on")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "encryption_key")
@property
@pulumi.getter(name="formTemplate")
def form_template(self) -> pulumi.Output[str]:
return pulumi.get(self, "form_template")
@property
@pulumi.getter(name="grantTypes")
def grant_types(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "grant_types")
@property
@pulumi.getter(name="initiateLoginUri")
def initiate_login_uri(self) -> pulumi.Output[str]:
return pulumi.get(self, "initiate_login_uri")
@property
@pulumi.getter(name="isFirstParty")
def is_first_party(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_first_party")
@property
@pulumi.getter(name="isTokenEndpointIpHeaderTrusted")
def is_token_endpoint_ip_header_trusted(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_token_endpoint_ip_header_trusted")
@property
@pulumi.getter(name="jwtConfiguration")
def jwt_configuration(self) -> pulumi.Output['outputs.GlobalClientJwtConfiguration']:
return pulumi.get(self, "jwt_configuration")
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> pulumi.Output[str]:
return pulumi.get(self, "logo_uri")
@property
@pulumi.getter
def mobile(self) -> pulumi.Output['outputs.GlobalClientMobile']:
return pulumi.get(self, "mobile")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="oidcConformant")
def oidc_conformant(self) -> pulumi.Output[bool]:
return pulumi.get(self, "oidc_conformant")
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> pulumi.Output['outputs.GlobalClientRefreshToken']:
return pulumi.get(self, "refresh_token")
@property
@pulumi.getter
def sso(self) -> pulumi.Output[bool]:
return pulumi.get(self, "sso")
@property
@pulumi.getter(name="ssoDisabled")
def sso_disabled(self) -> pulumi.Output[bool]:
return pulumi.get(self, "sso_disabled")
@property
@pulumi.getter(name="tokenEndpointAuthMethod")
def token_endpoint_auth_method(self) -> pulumi.Output[str]:
return pulumi.get(self, "token_endpoint_auth_method")
@property
@pulumi.getter(name="webOrigins")
def web_origins(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "web_origins")
|
nilq/baby-python
|
python
|
import os
import unittest
from django.test import TestCase
from utils import description_for_objective, ellipsis, objectives_for_course
valid_course = 'MG4'
valid_objective = 'MG4-FACTMULT'
class BasicTests(TestCase):
def test_ellipsis(self):
long_str = 'yadayadayada'
self.assertEquals(ellipsis(long_str, 5), 'ya...')
self.assertEquals(ellipsis(long_str, len(long_str) - 1), long_str[:-4] + '...')
self.assertEquals(ellipsis(long_str, len(long_str) + 1), long_str)
self.assertEquals(ellipsis(long_str, 100), long_str)
@unittest.skipIf(not 'TEST_PROVIDER' in os.environ,
"Test case can't work without TEST_PROVIDER pointing to API provider")
def test_objective_lookup(self):
desc = description_for_objective(valid_objective, os.environ['TEST_PROVIDER'])
self.assertTrue('factors and multiples' in desc)
@unittest.skipIf(not 'TEST_PROVIDER' in os.environ,
"Test case can't work without TEST_PROVIDER pointing to API provider")
def test_course_lookup(self):
res = objectives_for_course(valid_course, os.environ['TEST_PROVIDER'])
self.assertTrue(valid_objective in [y for (y, z) in res])
|
nilq/baby-python
|
python
|
# (c) 2016 James Turner <turnerjsm@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: aws_service_ip_ranges
author:
- James Turner <turnerjsm@gmail.com>
requirements:
- must have public internet connectivity
short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
description:
- AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
options:
service:
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
'''
EXAMPLES = """
vars:
ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
- name: "Pull S3 IP ranges, and print the default return style"
debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
"""
RETURN = """
_raw:
description: comma-separated list of CIDR ranges
"""
import json
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
amazon_response = json.load(resp)['prefixes']
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
except HTTPError as e:
raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
except URLError as e:
raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
except ConnectionError as e:
raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
if 'region' in kwargs:
region = kwargs['region']
amazon_response = (item for item in amazon_response if item['region'] == region)
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
return [item['ip_prefix'] for item in amazon_response]
|
nilq/baby-python
|
python
|
from deque import Deque
def isPalindrome(string: str)->bool:
d = Deque()
for character in string:
d.addRear(character)
isPalindromeFlag: bool = True
while d.size() > 1 and isPalindromeFlag:
if d.removeFront() != d.removeRear():
isPalindromeFlag = False
return isPalindromeFlag
print(isPalindrome("radar"))
print(isPalindrome("radr"))
|
nilq/baby-python
|
python
|
from django.contrib.auth import views as auth_views
from django.urls import path
from accounts import views
from accounts.views import (
dealerSignupView, adminSignupView,
customerSignupView)
app_name = "accounts"
urlpatterns = [
path('login/', auth_views.LoginView.as_view(
template_name="accounts/login.html"), name="login"),
path('logout/', auth_views.LogoutView.as_view(
template_name="accounts/logout.html"), name="logout"),
path("signup/", dealerSignupView.as_view(), name="dealerSignup"),
path("customer/signup/", customerSignupView.as_view(),
name="customerSignup"),
path("administrator/signup/", adminSignupView.as_view(),
name="adminSignup"),
# Password Reset
path('password_reset/',
auth_views.PasswordResetView.as_view(
template_name="accounts/password_reset.html",
success_url='/accounts/password_reset/done/',
email_template_name="accounts/password_reset_email.html",
subject_template_name="accounts/password_reset_subject.txt"),
name="password_reset"),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name="accounts/password_reset_done.html",),
name="password_reset_done"),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name="accounts/password_reset_confirm.html",
success_url='/reset/done/'
),
name="password_reset_confirm"
),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(
template_name="accounts/password_reset_complete.html"),
name="password_reset_complete"),
# Profile
path("farmer/profile/", views.Farmer_Profile_View,
name="farmerProfile"),
path('customer/profile/', views.Customer_Profile_View,
name="customerProfile"),
path('administrator/profile/', views.administrator_Profile_View,
name="administratorProfile"),
]
|
nilq/baby-python
|
python
|
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from knox.auth import TokenAuthentication
class RootView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
return Response("api root")
|
nilq/baby-python
|
python
|
import sys
from collections import deque
from gym_snake.envs import *
from gym_snake.base.pos import Pos
from gym_snake.base.direc import Direc
class _HamiltonTableCell:
def __init__(self):
self.idx = None
self.direc = Direc.NONE
self.reset()
def __str__(self):
return "{ idx: %d direc: %s }" % \
(self.idx, self.direc)
__repr__ = __str__
def reset(self):
self.idx = None
self.direc = Direc.NONE
class _BFSTableCell:
def __init__(self):
self.parent = None
self.dist = sys.maxsize
self.reset()
def __str__(self):
return "{ dist: %d parent: %s }" % \
(self.dist, str(self.parent))
__repr__ = __str__
def reset(self):
self.parent = None
self.dist = sys.maxsize
class HamiltonianAgent(object):
def __init__(self, screen_height, screen_width):
if screen_height % 2 == 1 and screen_width % 2 == 1:
raise ValueError("Either height or width of screen must be an even number for Hamiltonian cycle to exist")
self.width = screen_width
self.height = screen_height
self.snake = []
self.snake_head = None
self.food = None
self.map_capacity = self.height * self.width
self._hamilton_table = [[_HamiltonTableCell() for _ in range(self.height)]
for _ in range(self.width)]
self._bfs_table = [[_BFSTableCell() for _ in range(self.height)]
for _ in range(self.width)]
self._generate_route()
def __call__(self, obs):
acts = []
for i in range(obs.shape[0]):
acts.append(self.predict(obs[i]))
return acts
def predict(self, obs):
# If flat observations are being used, transform into grid observations
if obs.ndim == 1:
obs = np.reshape(obs, (1, self.width, self.height))
elif obs.ndim != 3:
ValueError("Invalid observation shape")
self._parse_obs(obs)
head = self.snake_head
action = self._hamilton_table[head.x][head.y].direc
path = self._shortest_path_to_food()
if len(self.snake) < 0.75 * self.map_capacity:
if path:
for nxt in path:
head_idx = self._hamilton_table[head.x][head.y].idx
food_idx = self._hamilton_table[self.food.x][self.food.y].idx
nxt_idx = self._hamilton_table[nxt.x][nxt.y].idx
# Default to BFS path if it is physically impossible to collide with body
if len(self.snake) <= 2:
action = head.direc_to(nxt)
else:
# Since we don't know which block is the tail, check all snake body blocks
choose_shortest = True
for body in self.snake:
body_idx = self._hamilton_table[body.x][body.y].idx
head_idx_rel = self._relative_dist(body_idx, head_idx)
nxt_idx_rel = self._relative_dist(body_idx, nxt_idx)
food_idx_rel = self._relative_dist(body_idx, food_idx)
if not (head_idx_rel < nxt_idx_rel <= food_idx_rel):
choose_shortest = False
break
if choose_shortest:
action = head.direc_to(nxt)
# If we ended up in a situation where we are about to take a bad action, attempt to find a safe space
if self._is_valid(head.adj(action)) is False:
if path:
action = head.direc_to(path[0])
else:
# If BFS does not yield a safe route, look for any adjacent safe space
adjs = head.all_adj()
for pos in adjs:
if self._is_valid(pos):
action = head.direc_to(pos)
return action
def _parse_obs(self, obs):
self.snake = []
for x in range(self.width):
for y in range(self.height):
if obs[0][x][y] == SnakeEnv.HEAD_BLOCK:
self.snake_head = Pos(x, y)
self.snake.append(self.snake_head)
elif obs[0][x][y] == SnakeEnv.SNAKE_BLOCK:
self.snake.append(Pos(x, y))
elif obs[0][x][y] == SnakeEnv.FOOD_BLOCK:
self.food = Pos(x, y)
def _generate_route(self):
# Generate a predetermined hamiltonian cycle so that it will be the same
# no matter what observation is received
cnt = 0
if self.height % 2 == 0:
for y in range(self.height):
self._hamilton_table[0][y].idx = cnt
self._hamilton_table[0][y].direc = Direc.UP
if y == self.height - 1:
self._hamilton_table[0][y].direc = Direc.RIGHT
cnt += 1
for y in range(self.height-1, -1, -1):
if y % 2 == 1:
path = range(1, self.width)
direction = Direc.RIGHT
else:
path = range(self.width-1, 0, -1)
direction = Direc.LEFT
for idx, x in enumerate(path):
self._hamilton_table[x][y].idx = cnt
self._hamilton_table[x][y].direc = direction
if idx == self.width-2 and y != 0:
self._hamilton_table[x][y].direc = Direc.DOWN
cnt += 1
else:
for x in range(self.width):
self._hamilton_table[x][0].idx = cnt
self._hamilton_table[x][0].direc = Direc.RIGHT
if x == self.width - 1:
self._hamilton_table[x][0].direc = Direc.UP
cnt += 1
for x in range(self.width-1, -1, -1):
if x % 2 == 1:
path = range(1, self.height)
direction = Direc.UP
else:
path = range(self.height-1, 0, -1)
direction = Direc.DOWN
for idx, y in enumerate(path):
self._hamilton_table[x][y].idx = cnt
self._hamilton_table[x][y].direc = direction
if idx == self.height-2 and x != 0:
self._hamilton_table[x][y].direc = Direc.LEFT
cnt += 1
def _shortest_path_to_food(self):
self._reset_bfs_table()
food = self.food
head = self.snake_head
# Run BFS from food to head so that we can check which nodes adjacent to nodes are closest to food
# if multiple exist
start = food
dest = head
self._bfs_table[start.x][start.y].dist = 0
queue = deque()
queue.append(start)
path_found = False
while len(queue) > 0:
cur = queue.popleft()
if cur == dest:
path_found = True
adjs = cur.all_adj()
# Traverse adjacent positions
for pos in adjs:
if self._is_valid(pos):
adj_cell = self._bfs_table[pos.x][pos.y]
if adj_cell.dist == sys.maxsize:
adj_cell.parent = cur
adj_cell.dist = self._bfs_table[cur.x][cur.y].dist + 1
queue.append(pos)
# Return all possible next steps which could lead to shortest route to food source
next_steps = []
min_path_len = sys.maxsize
if path_found:
adjs = head.all_adj()
for pos in adjs:
if self._is_valid(pos):
adj_cell = self._bfs_table[pos.x][pos.y]
if adj_cell.dist < min_path_len:
next_steps = []
next_steps.append(pos)
min_path_len = adj_cell.dist
elif adj_cell.dist == min_path_len:
next_steps.append(pos)
return next_steps
def _is_valid(self, pos):
if (pos in self.snake and pos != self.snake_head) or self._out_of_bounds(pos):
return False
else:
return True
def _out_of_bounds(self, pos):
if pos.x < 0 or pos.y < 0 or pos.x >= self.width or pos.y >= self.height:
return True
else:
return False
def _relative_dist(self, ori, x):
size = self.map_capacity
if ori > x:
x += size
return x - ori
def _reset_bfs_table(self):
for row in self._bfs_table:
for col in row:
col.reset()
if __name__ == "__main__":
env = SnakeEnvFlatObsSparseReward(screen_width=8, screen_height=8)
agent = HamiltonianAgent(screen_width=8, screen_height=8)
n_timesteps = 100000
observation = env.reset()
for _ in range(n_timesteps):
action = agent.predict(observation)
observation, reward, done, infos = env.step(action)
env.render("human")
if done:
observation = env.reset()
if reward == -100.0:
assert False, "Snake died unexpectedly"
|
nilq/baby-python
|
python
|
#==================================================================================
# PROGRAM: "boat_sat.py"
# LOCATION: beluga>examples>Mansell
# Author: Justin Mansell (2016)
#
# Description: simple path optimization for a boat with bounded control used
# to demonstrate graph search continuation. Uses saturation fcns.
#==================================================================================
#Import Necessary Modules
import numpy as np
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
from math import *
import functools
def sat_func(u):
cu = 1 #Upper asymptote
cl = -1 #Lower asymptote
s0 = 1 #smoothing factor
return cu - (cu-cl)/(1+np.exp(4*s0/(cu-cl)*u))
def get_problem():
"""A simple example of graph search continuation"""
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('boat_sat')
#Define independent variables
problem.independent('t', 's')
# Define equations of motion
problem.state('x','V*cos(hdg)','m') \
.state('y','V*sin(hdg)','m') \
.state('hdg','k*(1-2/(1+exp(2*u)))','rad')
# Define controls
problem.control('u',1)
# problem.control('hdgdot','rad/s')
problem.mode = 'dae'
# Define Cost Functional
problem.cost['path'] = Expression('1+eps*u^2', 's')
#Define constraints
problem.constraints().initial('x-x_0','m') \
.initial('y-y_0','m') \
.initial('hdg-hdg_0','rad') \
.terminal('x-x_f','m') \
.terminal('y-y_f','m')
#Define constants
problem.constant('cmax',1.0,'m/s^2') #Maximum allowed centripetal acceleration
problem.constant('V',1,'m/s') #Velocity
problem.constant('k',1,'rad/s')
problem.constant('eps',1,1) #Error constant
#Problem scaling
problem.scale.unit('m',1) \
.unit('s',1) \
.unit('rad',1)
#Configure solver
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False,number_arcs=16)
#problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=100, verbose = True, cached = False)
#Initial Guess....................x0..y0..hdg0
problem.guess.setup('auto',start=[0.0,0.0,0.0])
#Add Continuation Steps
#problem.steps.add_step().num_cases(2) \
# .terminal('x', 3) \
# .terminal('y', 0.1)
problem.steps.add_step().num_cases(30) \
.terminal('x', 3.0) \
.terminal('y', 0)
problem.steps.add_step().num_cases(30) \
.terminal('x', 3.0) \
.terminal('y', 3.0)
return problem
if __name__ == '__main__':
import beluga.Beluga as Beluga
problem = get_problem()
sol = Beluga.run(problem)
|
nilq/baby-python
|
python
|
import namespace_override as override
def _(methods, address, class_name):
ret = []
post = []
if 'using.cs' in methods:
add_to = [ret]
for line in methods['using.cs']:
if '---;' not in line:
add_to[0] += [line]
else:
add_to = [post]
ret += ['']
del(methods['using.cs'])
address = list(map(lambda s: override._(s), address))
ret += ['namespace ' + '.'.join(address)]
ret += ['{']
if 'test' in class_name:
ret += [" using NUnit.Framework;"]
ret += [""]
ret += [" [TestFixture]"]
if '_.cs' in methods:
start = list(methods['_.cs'])
if 'interface' not in start[0]:
ret += [" [System.Serializable]"]
for line in start:
ret += [' ' + line]
del(methods['_.cs'])
else:
ret += [" [System.Serializable]"]
ret += [" public class " + class_name]
if '}' not in ret[-1]:
ret += [" {"]
ret += [" }"]
ret = '\n' + '\n'.join(ret[:-1]) + '\n'
return ret, '\n'.join(post)
|
nilq/baby-python
|
python
|
from abc import abstractmethod
from typing import Union, Optional, Any, List, Coroutine
import inspect
from .typings import TProtocol
from .interact import InteractiveObject, IOManager
from .component import MetadataComponent
from .behavior import BaseBehavior
from .utilles import IOStatus
class MonoMetaComponent(MetadataComponent):
io: "Monomer"
name: str
alias: str
__limit__ = ["name", "alias"]
def __getitem__(self, item: str) -> Union[Any, Coroutine[Any, Any, Any]]:
res = self.additions.get(item, None) or self.__dict__.get(item, None)
if res is None:
return self.protocol.put_metadata(item, self.io)
return res
class BaseMonoBehavior(BaseBehavior):
io: "Monomer"
@abstractmethod
def activate(self):
...
@abstractmethod
async def change_metadata(
self,
meta: str,
value: Any,
target: Optional["Monomer"] = None,
**addition
):
await self.io.protocol.set_metadata(meta, value, target or self.io, **addition)
raise NotImplementedError
async def update(self):
pass
raise NotImplementedError
class Monomer(InteractiveObject):
prefab_metadata = MonoMetaComponent
prefab_behavior = BaseMonoBehavior
metadata: MonoMetaComponent
behavior: BaseMonoBehavior
def __init__(
self,
protocol: TProtocol,
name: str,
identifier: Union[int, str],
alias: Optional[str] = None,
):
data = self.prefab_metadata(self)
data.protocol = protocol
data.identifier = f"{identifier}@{protocol.identifier}"
data.name = name
data.alias = alias or ""
super(Monomer, self).__init__(data)
self.metadata.state = IOStatus.ESTABLISHED
def __getitem__(self, item: str):
parts = item.split(".")
if len(parts) == 1:
return self.metadata.__getitem__(item)
tag, attr = parts[0], parts[1]
if self.compare(tag):
return self.metadata.__getitem__(item)
def __setstate__(self, state):
f = inspect.currentframe()
lcs = f.f_back.f_back.f_locals
self.__init__(
lcs['self'].protocol,
state['metadata']['name'],
state['metadata']['identifier'].split("@")[0],
state['metadata']['alias']
)
self.add_tags(*state['metadata']['tags'])
class _EntitySelect:
def __getitem__(self, item) -> List["Monomer"]:
monomers: List["Monomer"] = IOManager.filter(Monomer)
conditions = []
slices = list(item) if not isinstance(item, slice) else [item]
for sl in slices:
key, value = sl.start, str(sl.stop)
if key in ("id", "uid", "identifier"):
def _(monomer: "Monomer", _value=value):
return monomer.metadata.pure_id == _value
elif key == "tag":
def _(monomer: "Monomer", _value=value):
return monomer.prime_tag == _value
elif key == "type":
def _(monomer: "Monomer", _value=value):
return monomer.__class__.__name__ == _value
else:
def _(monomer: "Monomer", _key=key, _value=value):
return getattr(monomer.metadata, _key, None) == _value
conditions.append(_)
return list(filter(lambda x: all([condition(x) for condition in conditions]), monomers))
at_mono = _EntitySelect()
|
nilq/baby-python
|
python
|
try:
from . import generic as g
except BaseException:
import generic as g
class SectionTest(g.unittest.TestCase):
def test_section(self):
mesh = g.get_mesh('featuretype.STL')
# this hits many edge cases
step = .125
z_levels = g.np.arange(start=mesh.bounds[0][2],
stop=mesh.bounds[1][2] + 2 * step,
step=step)
sections = [None] * len(z_levels)
for index, z in enumerate(z_levels):
plane_origin = [0, 0, z]
plane_normal = [0, 0, 1]
section = mesh.section(plane_origin=plane_origin,
plane_normal=plane_normal)
if section is None:
# section will return None if the plane doesn't
# intersect the mesh
assert z > (mesh.bounds[1][2] -
g.trimesh.constants.tol.merge)
continue
planar, to_3D = section.to_planar()
assert planar.is_closed
assert (len(planar.polygons_full) > 0)
sections[index] = planar
# try getting the sections as Path2D through
# the multiplane method
paths = mesh.section_multiplane(plane_origin=[0, 0, 0],
plane_normal=[0, 0, 1],
heights=z_levels)
# call the multiplane method directly
lines, faces, T = g.trimesh.intersections.mesh_multiplane(
mesh=mesh,
plane_origin=[0, 0, 0],
plane_normal=[0, 0, 1],
heights=z_levels)
# make sure various methods return the same results
for index in range(len(z_levels)):
if sections[index] is None:
assert len(lines[index]) == 0
continue
rc = g.trimesh.load_path(lines[index])
assert g.np.isclose(rc.area, sections[index].area)
assert g.np.isclose(rc.area, paths[index].area)
class PlaneLine(g.unittest.TestCase):
def test_planes(self):
count = 10
z = g.np.linspace(-1, 1, count)
plane_origins = g.np.column_stack((
g.np.random.random((count, 2)), z))
plane_normals = g.np.tile([0, 0, -1], (count, 1))
line_origins = g.np.tile([0, 0, 0], (count, 1))
line_directions = g.np.random.random((count, 3))
i, valid = g.trimesh.intersections.planes_lines(
plane_origins=plane_origins,
plane_normals=plane_normals,
line_origins=line_origins,
line_directions=line_directions)
assert valid.all()
assert (g.np.abs(i[:, 2] - z) < g.tol.merge).all()
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-27 10:58
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0039_merge_20190316_2108'),
]
operations = [
migrations.AddField(
model_name='dimension',
name='description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='variable',
name='description',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='datasetaccessrequest',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 27, 12, 58, 43, 390000)),
),
]
|
nilq/baby-python
|
python
|
from django.test import TestCase
from rodan.models.job import Job
# from model_mommy import mommy
from rodan.test.helpers import RodanTestTearDownMixin, RodanTestSetUpMixin
class JobTestCase(RodanTestTearDownMixin, TestCase, RodanTestSetUpMixin):
def setUp(self):
self.setUp_rodan()
def test_save(self):
job = Job(name="test job")
job.save()
retr_job = Job.objects.get(name="test job")
self.assertEqual(retr_job.name, job.name)
def test_delete(self):
job = Job(name="test job")
job.save()
retr_job = Job.objects.get(name="test job")
retr_job.delete()
retr_job2 = Job.objects.filter(name="test job")
self.assertFalse(retr_job2.exists())
|
nilq/baby-python
|
python
|
import sys
import base64
import logging
import marshal
import importlib.util
from os import sep as path_sep
from paker.exceptions import PakerImportError
# use _memimporter if is available
_MEMIMPORTER = False
try:
import _memimporter
_MEMIMPORTER = True
except ImportError:
from paker.importers import _tempimporter as _memimporter
_module_type = type(sys)
class jsonimporter:
def __init__(self, jsonmod):
super(jsonimporter, self).__init__()
self.jsonmod: dict = jsonmod
self.module_cache = {}
self.logger = logging.getLogger(self.__class__.__name__)
sys.meta_path.append(self)
# Check whether we can satisfy the import of the module named by
# 'fullname', or whether it could be a portion of a namespace
# package. Return self if we can load it, a string containing the
# full path if it's a possible namespace portion, None if we
# can't load it.
def find_loader(self, fullname: str, path=None):
"""find_loader(fullname, path=None) -> self, str or None.
Search for a module specified by 'fullname'. 'fullname' must be the
fully qualified (dotted) module name. It returns the zipimporter
instance itself if the module was found, a string containing the
full path name if it's possibly a portion of a namespace package,
or None otherwise. The optional 'path' argument is ignored -- it's
there for compatibility with the importer protocol.
"""
path = fullname.split(".")
try:
jsonmod = self.jsonmod[path[0]]
for submod in path[1:]:
jsonmod = jsonmod["modules"][submod]
return self, []
except KeyError:
return None, []
def find_module(self, fullname, path=None):
"""find_module(fullname, path=None) -> self or None.
Search for a module specified by 'fullname'. 'fullname' must be the
fully qualified (dotted) module name. It returns the zipimporter
instance itself if the module was found, or None if it wasn't.
The optional 'path' argument is ignored -- it's there for compatibility
with the importer protocol.
"""
self.logger.debug("searching for {}".format(fullname))
return self.find_loader(fullname, path)[0]
def get_data(self, fullname):
"""Get module data by name in following format:
- package\\module.extension
This method is called by _memimporter to get source code of
.pyd and .dll modules.
"""
path = fullname.split(".")[0].split("\\")
try:
jsonmod = self.jsonmod[path[0]]
for submod in path[1:]:
jsonmod = jsonmod["modules"][submod]
return base64.b64decode(jsonmod["code"])
except Exception as e:
return None
# Load and return the module named by 'fullname'.
def load_module(self, fullname):
"""load_module(fullname) -> module.
Load the module specified by 'fullname'. 'fullname' must be the
fully qualified (dotted) module name. It returns the imported
module, or raises PakerImportError if it wasn't found.
"""
mod = sys.modules.get(fullname)
if isinstance(mod, _module_type):
return mod
if fullname in self.module_cache:
self.logger.info("loading previously imported module {}".format(fullname))
return self.module_cache[fullname]
try:
path = fullname.split(".")
jsonmod = self.jsonmod[path[0]]
for submod in path[1:]:
jsonmod = jsonmod["modules"][submod]
except KeyError:
raise PakerImportError("could not find {} module".format(fullname))
extension = jsonmod["extension"]
if extension == "py":
mod = _module_type(fullname)
mod.__loader__ = self
if jsonmod["type"] == "package":
mod.__path__ = ["paker://" + fullname.replace(".", path_sep)]
if not hasattr(mod, '__builtins__'):
mod.__builtins__ = __builtins__
sys.modules[fullname] = mod
exec(jsonmod["code"], mod.__dict__)
elif extension == "pyc":
mod = _module_type(fullname)
mod.__loader__ = self
if jsonmod["type"] == "package":
mod.__path__ = ["paker://" + fullname.replace(".", path_sep)]
if not hasattr(mod, '__builtins__'):
mod.__builtins__ = __builtins__
sys.modules[fullname] = mod
exec(marshal.loads(base64.b64decode(jsonmod["code"])), mod.__dict__)
elif extension in ("dll", "pyd", "so"):
# initname = "init" + fullname.rsplit(".", 1)[-1]
initname = "PyInit_" + fullname.split(".")[-1]
path = fullname.replace(".", "\\") + "." + extension
spec = importlib.util.find_spec(fullname, path)
self.logger.info("using {} to load '.{}' file".format("_memimporter" if _MEMIMPORTER else "_tempimporter",
extension))
mod = _memimporter.import_module(fullname, path, initname, self.get_data, spec)
mod.__name__ = fullname
sys.modules[fullname] = mod
else:
raise PakerImportError("module extension must be .py, .pyc, .dll, .pyd or .so (got {})".format(extension))
try:
mod = sys.modules[fullname]
except KeyError:
raise PakerImportError("module {} not found in sys.modules".format(fullname))
self.logger.info("{} has been imported successfully".format(mod.__name__))
self.module_cache[fullname] = mod
return mod
def add_module(self, module_name: str, module: dict):
"""Add new module to jsonimporter object."""
if not isinstance(module, dict):
raise PakerImportError("module must be a dict (got {})".format(type(module)))
self.jsonmod[module_name] = module
self.logger.info("{} has been added successfully".format(module_name))
def unload_module(self, module):
"""Unload single module from sys.modules and remove its serialized source code from memory."""
if isinstance(module, _module_type):
module = module.__name__
if module in self.jsonmod:
del self.jsonmod[module]
if module in self.module_cache:
del self.module_cache[module]
if module in sys.modules:
del sys.modules[module]
self.logger.info("{} has been unloaded successfully".format(module))
def unload(self):
"""Unload all imported modules and remove jsonimporter from meta path."""
for module_name in list(self.jsonmod.keys()):
del self.jsonmod[module_name]
if self in sys.meta_path:
sys.meta_path.remove(self)
for module_name in list(self.module_cache.keys()):
del self.module_cache[module_name]
if module_name in sys.modules:
del sys.modules[module_name]
self.logger.info("unloaded all modules")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.unload()
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from typing import Optional, List
@dataclass
class Attack:
name: str
cost: List[str]
convertedEnergyCost: int
damage: Optional[str]
text: Optional[str]
|
nilq/baby-python
|
python
|
from django import template
from django.utils import timezone
from schedules.services import get_times_from_day
from schedules.models import TimeOfDay
register = template.Library()
@register.inclusion_tag('templatetags/calendar_month.html')
def calendar_month():
variable = None
print(">>>>>>")
return {'variable': variable}
|
nilq/baby-python
|
python
|
from mock import patch
from nerve_tools.envoy import get_envoy_ingress_listeners
def test_get_envoy_ingress_listeners_success():
expected_envoy_listeners = {
('test_service.main', 1234): 54321,
}
mock_envoy_admin_listeners_return_value = {
'listener_statuses': [
{
'name': 'test_service.main.1234.ingress_listener',
'local_address': {
'socket_address': {
'address': '0.0.0.0',
'port_value': 54321,
},
},
},
],
}
with patch(
'nerve_tools.envoy._get_envoy_listeners_from_admin',
return_value=mock_envoy_admin_listeners_return_value,
):
assert get_envoy_ingress_listeners(123) == expected_envoy_listeners
def test_get_envoy_ingress_listeners_failure():
with patch(
'nerve_tools.envoy.requests.get',
side_effect=Exception,
):
assert get_envoy_ingress_listeners(123) == {}
|
nilq/baby-python
|
python
|
import datetime
def convert_timestamp(ts: str) -> str:
"""Helper function that converts timestamp to %m-%d-%Y format"""
datetime_obj = datetime.datetime.fromtimestamp(ts)
date = datetime.datetime.strftime(datetime_obj,"%m-%d-%Y")
return date
def extract_attributes_from_subreddit(subreddit) -> dict:
'''Helper function that extracts and stores attributes from Subreddit object'''
return {
"active_user_count": subreddit.active_user_count,
"url": subreddit.url,
"title": subreddit.title,
"subscribers": subreddit.subscribers,
"subreddit_type": subreddit.subreddit_type,
"spoilers_enabled": subreddit.spoilers_enabled,
"public_description": subreddit.public_description,
"over18": subreddit.over18,
"created": subreddit.created,
"created_utc": subreddit.created_utc,
"lang": subreddit.lang,
"videos_allowed": subreddit.allow_videos,
"images_allowed": subreddit.allow_images
}
|
nilq/baby-python
|
python
|
# Exercise 76 - Date and Time Generator
from datetime import datetime
print(datetime.now().strftime('Today is %A, %B %d, %Y'))
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class EvaluationConfig(AppConfig):
name = "grandchallenge.evaluation"
def ready(self):
# noinspection PyUnresolvedReferences
import grandchallenge.evaluation.signals
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from Startup import db
from sqlalchemy import Column, Integer, DateTime, Text
class History_Data(db.Model):
__tablename__ = 'history_data'
ID = Column(Integer, primary_key=True, autoincrement=True , comment='編號')
msg = Column(Text(), nullable=False, comment='歷史訊息')
InertDate = Column(DateTime(), nullable=False, comment='發送日期')
|
nilq/baby-python
|
python
|
# Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from typing import Any
from zipfile import BadZipFile, ZipFile
from monai.deploy.utils.importutil import optional_import
from .model import Model
torch, _ = optional_import("torch")
class TorchScriptModel(Model):
"""Represents TorchScript model.
TorchScript serialization format (TorchScript model file) is created by torch.jit.save() method and
the serialized model (which usually has .pt or .pth extension) is a ZIP archive containing many files.
(https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/serialization.md)
We consider that the model is a torchscript model if its unzipped archive contains files named 'data.pkl' and
'constants.pkl', and folders named 'code' and 'data'.
When predictor property is accessed or the object is called (__call__), the model is loaded in `evaluation mode`
from the serialized model file (if it is not loaded yet) and the model is ready to be used.
"""
model_type: str = "torch_script"
@property
def predictor(self) -> "torch.nn.Module": # type: ignore
"""Get the model's predictor (torch.nn.Module)
If the predictor is not loaded, load it from the model file in evaluation mode.
(https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=eval#torch.jit.ScriptModule.eval)
Returns:
torch.nn.Module: the model's predictor
"""
if self._predictor is None:
# Use a device to dynamically remap, depending on the GPU availability.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._predictor = torch.jit.load(self.path, map_location=device).eval()
return self._predictor
@predictor.setter
def predictor(self, predictor: Any):
self._predictor = predictor
def eval(self) -> "TorchScriptModel":
"""Set the model in evaluation model.
This is a proxy method for torch.jit.ScriptModule.eval().
See https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=eval#torch.jit.ScriptModule.eval
Returns:
self
"""
self.predictor.eval()
return self
def train(self, mode: bool = True) -> "TorchScriptModel":
"""Set the model in training mode.
This is a proxy method for torch.jit.ScriptModule.train().
See https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=train#torch.jit.ScriptModule.train
Args:
mode (bool): whether the model is in training mode
Returns:
self
"""
self.predictor.train(mode)
return self
@classmethod
def accept(cls, path: str):
prefix_code = False
prefix_data = False
prefix_constants_pkl = False
prefix_data_pkl = False
if not os.path.isfile(path):
return False, None
try:
zip_file = ZipFile(path)
for data in zip_file.filelist:
file_name = data.filename
pivot = file_name.find("/")
if pivot != -1 and not prefix_code and file_name[pivot:].startswith("/code/"):
prefix_code = True
if pivot != -1 and not prefix_data and file_name[pivot:].startswith("/data/"):
prefix_data = True
if pivot != -1 and not prefix_constants_pkl and file_name[pivot:] == "/constants.pkl":
prefix_constants_pkl = True
if pivot != -1 and not prefix_data_pkl and file_name[pivot:] == "/data.pkl":
prefix_data_pkl = True
except BadZipFile:
return False, None
if prefix_code and prefix_data and prefix_constants_pkl and prefix_data_pkl:
return True, cls.model_type
return False, None
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import argparse
import logging
from log_collectors.training_data_service_client import match_log_file
from log_collectors.training_data_service_client import push_log_line
from log_collectors.training_data_service_client import scan_log_dirs
def main():
logging.basicConfig(format='%(filename)s %(funcName)s %(lineno)d: %(message)s', level=logging.INFO)
log_directory = os.environ["LOG_DIR"]
# log_file = log_directory + "/latest-log"
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, default=log_directory,
help='Log directory')
FLAGS, unparsed = parser.parse_known_args()
scan_log_dirs.LogScanner(should_connect=True).scan(
log_dir=FLAGS.log_dir,
is_log=match_log_file.is_log_file,
push_function=push_log_line.push)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import os
import pickle
import multiprocessing as mp
from collections import defaultdict
from nltk import pos_tag, sent_tokenize, wordpunct_tokenize
class Preprocessor(object):
def __init__(self, corpus, target=None,**kwargs):
self.corpus = corpus
self.target = target
results = []
def fileids(self, fileids=None, categories=None):
fileids = self.corpus.resolve(fileids, categories)
if fileids:
return fileids
return self.corpus.fileids()
def on_error(self,error_msg):
print(error_msg)
def on_result(self, result):
self.results.append(result)
def abspath(self, fileid):
# Find the directory, relative to the corpus root.
parent = os.path.relpath(
os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root
)
# Compute the name parts to reconstruct
basename = os.path.basename(fileid)
name, ext = os.path.splitext(basename)
# Create the pickle file extension
basename = name + '.pickle'
# Return the path to the file relative to the target.
return os.path.normpath(os.path.join(self.target, parent, basename))
def process(self, fileid):
"""For a single file, checks the location on disk to ensure no errors,
uses +tokenize()+ to perform the preprocessing, and writes transformed
document as a pickle to target location.
"""
# Compute the outpath to write the file to.
target = self.abspath(fileid)
parent = os.path.dirname(target)
# Make sure the directory exists
if not os.path.exists(parent):
os.makedirs(parent)
# Make sure that the parent is a directory and not a file
if not os.path.isdir(parent):
raise ValueError(
"Please supply a directory to write preprocessed data to."
)
# Create a data structure for the pickle
document = list(self.tokenize(fileid))
# Open and serialize the pickle to disk
with open(target, 'wb') as f:
pickle.dump(document, f, pickle.HIGHEST_PROTOCOL)
# Clean up the document
del document
return target
def transform(self, fileids=None, categories=None,tasks=None):
# Reset the results
results = []
# Make the target directory if it doesn't already exist
if not os.path.exists(self.target):
os.makedirs(self.target)
# Create a multiprocessing pool
tasks = tasks or mp.cpu_count()
pool = mp.Pool(processes=tasks)
# Enqueue tasks on the multiprocessing pool and join
for fileid in self.fileids():
pool.apply_async(self.process, (fileid,), callback=self.on_result,error_callback=self.on_error)
# Close the pool and join
pool.close()
pool.join()
return results
def tokenize(self, fileid):
for paragraph in self.corpus.paras(fileids=fileid):
yield [
pos_tag(sent)
for sent in paragraph
]
|
nilq/baby-python
|
python
|
import doctest
import io
from contextlib import redirect_stderr, redirect_stdout
from textwrap import dedent
from scoraptor.result import TestResult
class SingleDocTest:
"""
A single DocTest based test.
Instances of this class are callable. When called, it takes
a global_environment dict, and returns a TestResult object.
We only take a global_environment, *not* a local_environment.
This makes tests not useful inside functions, methods or
other scopes with local variables. This is a limitation of
doctest, so we roll with it.
"""
def __init__(self, name, doctest_string):
self.name = name
self.doctest_string = doctest_string
self.examples = doctest.DocTestParser().parse(
doctest_string,
name
)
PLAIN_TEXT_FAILURE_SUMMARY_TEMPLATE = dedent(r"""
Test {name} failed!
Test code:
{doctest_string}
Test result:
{runresults}
""").strip()
def __call__(self, global_environment):
"""
Run test with given global_environment.
"""
test = doctest.DocTest(
[e for e in self.examples if type(e) is doctest.Example],
global_environment,
self.name,
None,
None,
self.doctest_string
)
doctestrunner = doctest.DocTestRunner(verbose=True)
runresults = io.StringIO()
with redirect_stdout(runresults), redirect_stderr(runresults):
doctestrunner.run(test, clear_globs=False)
with open('/dev/null', 'w') as f, redirect_stderr(f), redirect_stdout(f):
result = doctestrunner.summarize(verbose=True)
score = 1.0 - (result.failed / result.attempted)
if score == 1.0:
summary = 'Test {} passed!'.format(self.name)
else:
summary = self.PLAIN_TEXT_FAILURE_SUMMARY_TEMPLATE.format(
name=self.name,
doctest_string=dedent(self.doctest_string),
runresults=runresults.getvalue()
)
return TestResult(score, summary)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 11:06:16 2018
Test for the function of the chi2 script of the omnitool package.
@author: misiak
"""
import sys
from os import path
import numpy as np
import matplotlib.pyplot as plt
import mcmc_red as mcr
plt.close('all')
fs = 1e3
t_range = np.arange(0, 1, fs**-1)
# FUNCTION
funk = lambda t,a: np.heaviside(t_range-t, 1.) * a * (np.exp(t_range-t)-1)
# DATA
sig = 0.02
data = funk(.5, 1.) + np.random.normal(0, sig, t_range.shape)
# MODEL
tmod = (.4,.5,.6)
xmod = (0.5, 1., 1.5)
labmod = ('mod1', 'mod2', 'mod3')
darray = {l: funk(t,a) for l,t,a in zip(labmod, tmod, xmod)}
# TEMPORAL Chi2
d_sx2 = {l: mcr.chi2_simple(data, darray[l], err=sig) for l in labmod}
# FREQ Chi2 with fft, psd, etc...
dfft = {l: np.fft.fft(darray[l]) for l in labmod}
fftdata = np.fft.fft(data)
freq, dpsd = mcr.psd(fftdata, fs)
noise_list = list()
for k in range(100):
freq, noi = mcr.psd(np.fft.fft(np.random.normal(0, sig, t_range.shape)), fs)
noise_list.append(noi)
npsd = np.mean(noise_list, axis=0)
d_fx2 = {l: mcr.chi2_freq(fftdata, dfft[l], npsd, fs) for l in labmod}
# OPT Chi2 with free parameter
opt_funk = lambda t: funk(t, 1.3)
bounds = (0., 1)
opt_x2, opt_t = mcr.opt_chi2_freq(fftdata, opt_funk, npsd, fs, bounds, debug=True)
opt_mod = opt_funk(opt_t)
########## PLOT #############
### TEMPORAL PLOT
plt.figure()
plt.title('1000 pts _ Temporal Chi2')
plt.plot(
t_range, data, lw=1.,
label='data, $\chi^2=${:.2f}'.format(mcr.chi2_simple(data, data, err=sig))
)
for l in labmod:
plt.plot(t_range, darray[l], label=l + ' $\chi^2=${:.2f}'.format(d_sx2[l]))
plt.plot(t_range, opt_mod, ls='--', color='red', label='OPT')
plt.grid(b=True)
plt.legend()
# FREQUENCY PLOT
plt.figure()
plt.title('500 freqs _ Frequency Chi2')
plt.grid(b=True)
plt.loglog(
freq, dpsd,
label='data $\chi^2=${:.2f}'.format(mcr.chi2_freq(fftdata, fftdata, npsd, fs))
)
for l in labmod:
freq, PSD = mcr.psd(dfft[l], fs)
plt.loglog(freq, PSD, label=l+' $\chi^2=${:.2f}'.format(d_fx2[l]))
plt.loglog(freq, npsd, label='noise')
plt.legend()
|
nilq/baby-python
|
python
|
from crudbuilder.abstract import BaseCrudBuilder
from .models import Person
from crudbuilder.abstract import BaseCrudBuilder
from crudbuilder.formset import BaseInlineFormset
class PersonCrud(BaseCrudBuilder):
model = Person
search_fields = ['name']
tables2_fields = ('name', 'email')
tables2_css_class = "table table-bordered table-condensed"
tables2_pagination = 20 # default is 10
modelform_excludes = ['created_by', 'updated_by']
login_required=True
permission_required=True
# permissions = {
# 'list': 'example.person_list',
# 'create': 'example.person_create'
# }
|
nilq/baby-python
|
python
|
class Pos:
def __init__(self, x = 0, y = 0, z = 0):
self.x = x
self.y = y
self.z = z
self.dx = 0
self.dy = 0
self.dz = 0
def move(self):
self.x += self.dx
self.y += self.dy
self.z += self.dz
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import authentication.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Mail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(max_length=250, null=True, blank=True)),
('body', models.TextField(null=True, blank=True)),
('mail_draft', models.BooleanField(default=False)),
('mail_read', models.BooleanField(default=False)),
('mail_read_date', models.DateTimeField(null=True, blank=True)),
('mail_created', models.DateTimeField(auto_now=True)),
('mail_created_by', models.ForeignKey(related_name='mail_created_by_user', to=settings.AUTH_USER_MODEL)),
('mail_read_by', models.ManyToManyField(related_name='mail_read_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('mail_to', models.ManyToManyField(related_name='mail_sent_to', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('trash', models.ManyToManyField(related_name='mail_trash_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file_created', models.DateTimeField(auto_now=True)),
('mail_file', models.FileField(null=True, upload_to=authentication.models.get_upload_file_name, blank=True)),
('base_mail', models.ForeignKey(related_name='base_files', blank=True, to='messaging.Mail', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailReply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(max_length=250, null=True, blank=True)),
('body', models.TextField(null=True, blank=True)),
('reply_draft', models.BooleanField(default=False)),
('reply_read_date', models.DateTimeField(null=True, blank=True)),
('reply_created', models.DateTimeField(auto_now=True)),
('mail_to', models.ManyToManyField(related_name='reply_sent_to', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('orig_mail', models.ForeignKey(related_name='reply_mail', to='messaging.Mail')),
('reply_created_by', models.ForeignKey(related_name='reply_created_by_user', to=settings.AUTH_USER_MODEL)),
('reply_read_by', models.ManyToManyField(related_name='reply_read_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('trash', models.ManyToManyField(related_name='reply_trash_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='mailfile',
name='reply_mail',
field=models.ForeignKey(related_name='reply_files', blank=True, to='messaging.MailReply', null=True),
preserve_default=True,
),
]
|
nilq/baby-python
|
python
|
from tracardi.domain.import_config import ImportConfig, ImportConfigRecord
from tracardi.domain.storage_result import StorageResult
from tracardi.service.storage.factory import storage_manager
from typing import Optional
async def load(id: str) -> Optional[ImportConfig]:
import_configuration = await storage_manager("import").load(id)
if import_configuration is None:
return None
import_configuration = ImportConfigRecord(**import_configuration)
return ImportConfig.decode(import_configuration)
async def save(batch: ImportConfig):
batch = batch.encode()
return await storage_manager("import").upsert(batch.dict())
async def delete(id: str):
return await storage_manager("import").delete(id)
async def load_all(limit: int = 100, query: str = None):
if query is None:
result = await storage_manager("import").load_all(limit=limit)
else:
result = StorageResult(await storage_manager("import").query({
"query": {
"wildcard": {
"name": f"*{query}*"
}
},
"size": limit
}))
return list(result)
async def refresh():
return await storage_manager("import").refresh()
|
nilq/baby-python
|
python
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApiVersionResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'version_no': 'str',
'api_id': 'str',
'env_id': 'str',
'env_name': 'str',
'remark': 'str',
'publish_time': 'datetime',
'status': 'int'
}
attribute_map = {
'version_id': 'version_id',
'version_no': 'version_no',
'api_id': 'api_id',
'env_id': 'env_id',
'env_name': 'env_name',
'remark': 'remark',
'publish_time': 'publish_time',
'status': 'status'
}
def __init__(self, version_id=None, version_no=None, api_id=None, env_id=None, env_name=None, remark=None, publish_time=None, status=None):
"""ApiVersionResp - a model defined in huaweicloud sdk"""
self._version_id = None
self._version_no = None
self._api_id = None
self._env_id = None
self._env_name = None
self._remark = None
self._publish_time = None
self._status = None
self.discriminator = None
if version_id is not None:
self.version_id = version_id
if version_no is not None:
self.version_no = version_no
if api_id is not None:
self.api_id = api_id
if env_id is not None:
self.env_id = env_id
if env_name is not None:
self.env_name = env_name
if remark is not None:
self.remark = remark
if publish_time is not None:
self.publish_time = publish_time
if status is not None:
self.status = status
@property
def version_id(self):
"""Gets the version_id of this ApiVersionResp.
API历史版本的ID
:return: The version_id of this ApiVersionResp.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this ApiVersionResp.
API历史版本的ID
:param version_id: The version_id of this ApiVersionResp.
:type: str
"""
self._version_id = version_id
@property
def version_no(self):
"""Gets the version_no of this ApiVersionResp.
API的版本号
:return: The version_no of this ApiVersionResp.
:rtype: str
"""
return self._version_no
@version_no.setter
def version_no(self, version_no):
"""Sets the version_no of this ApiVersionResp.
API的版本号
:param version_no: The version_no of this ApiVersionResp.
:type: str
"""
self._version_no = version_no
@property
def api_id(self):
"""Gets the api_id of this ApiVersionResp.
API编号
:return: The api_id of this ApiVersionResp.
:rtype: str
"""
return self._api_id
@api_id.setter
def api_id(self, api_id):
"""Sets the api_id of this ApiVersionResp.
API编号
:param api_id: The api_id of this ApiVersionResp.
:type: str
"""
self._api_id = api_id
@property
def env_id(self):
"""Gets the env_id of this ApiVersionResp.
发布的环境编号
:return: The env_id of this ApiVersionResp.
:rtype: str
"""
return self._env_id
@env_id.setter
def env_id(self, env_id):
"""Sets the env_id of this ApiVersionResp.
发布的环境编号
:param env_id: The env_id of this ApiVersionResp.
:type: str
"""
self._env_id = env_id
@property
def env_name(self):
"""Gets the env_name of this ApiVersionResp.
发布的环境名称
:return: The env_name of this ApiVersionResp.
:rtype: str
"""
return self._env_name
@env_name.setter
def env_name(self, env_name):
"""Sets the env_name of this ApiVersionResp.
发布的环境名称
:param env_name: The env_name of this ApiVersionResp.
:type: str
"""
self._env_name = env_name
@property
def remark(self):
"""Gets the remark of this ApiVersionResp.
发布描述
:return: The remark of this ApiVersionResp.
:rtype: str
"""
return self._remark
@remark.setter
def remark(self, remark):
"""Sets the remark of this ApiVersionResp.
发布描述
:param remark: The remark of this ApiVersionResp.
:type: str
"""
self._remark = remark
@property
def publish_time(self):
"""Gets the publish_time of this ApiVersionResp.
发布时间
:return: The publish_time of this ApiVersionResp.
:rtype: datetime
"""
return self._publish_time
@publish_time.setter
def publish_time(self, publish_time):
"""Sets the publish_time of this ApiVersionResp.
发布时间
:param publish_time: The publish_time of this ApiVersionResp.
:type: datetime
"""
self._publish_time = publish_time
@property
def status(self):
"""Gets the status of this ApiVersionResp.
版本状态 - 1:当前生效中的版本 - 2:未生效的版本
:return: The status of this ApiVersionResp.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ApiVersionResp.
版本状态 - 1:当前生效中的版本 - 2:未生效的版本
:param status: The status of this ApiVersionResp.
:type: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiVersionResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
nilq/baby-python
|
python
|
import time
import re
from collections import Counter
import operator
start_time = time.time()
def getManhattanDistance(src, dest):
return abs(src[0] - dest[0]) + abs(src[1] - dest[1])
with open("input") as f:
coords = f.readlines()
coords = [list(map(int,re.findall(r"\d+",x.strip()))) for x in coords]
# === Part One and Two ===
ignoredAreas = [-1]
maxCoords = max(coords,key=lambda item:item[1])
coordMap = {}
maxDistance = 10000
inMaxDistanceCount = 0
for i in range(maxCoords[0]+1):
for j in range(0,maxCoords[1]+1):
distances = {}
coordMap[str(i)+":"+str(j)] = -1
totalDistance = 0
for key, coord in enumerate(coords):
distance = getManhattanDistance((i,j), coord)
distances[key] = distance
totalDistance += distance
if(i == coord[0] and j == coord[1]):
coordMap[str(i)+":"+str(j)] = key
counts = Counter(distances.values())
best = min(distances.items(), key=operator.itemgetter(1))
if(counts[best[1]] > 1):
coordMap[str(i)+":"+str(j)] = -1
else:
coordMap[str(i)+":"+str(j)] = best[0]
if( best[0] not in ignoredAreas and (i==0 or j==0 or i==maxCoords[0] or j==maxCoords[1])):
ignoredAreas.append(best[0])
if(totalDistance < maxDistance) : inMaxDistanceCount+=1
finiteAreasCount = Counter([x for x in coordMap.values() if x not in ignoredAreas])
largestFiniteArea = max(finiteAreasCount.items(), key=operator.itemgetter(1))
print("Largest finite area is", largestFiniteArea[0],"with a total area of",largestFiniteArea[1])
print("Largest area with total distance to all points below",maxDistance,":",inMaxDistanceCount)
print("Time elapsed: ", time.time() - start_time)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import calendar
from datetime import *
import fileinput
import html
import importlib
import math
import os
from os import path
import pkgutil
import re
import subprocess
import sys
import textwrap
from jinja2 import Environment, FileSystemLoader, select_autoescape
import diags
# START configuration
# Special statistics are only shown for vips
# (insert your and your friends names here)
# E. g. vips = ["MyName", "friend42"]
vips = []
# Merge or rename users by name or database id
# E.g. merges = [["MyName", "MyNameLaptop", 20], ...]
merges = []
# Maximum users to show in diagrams
maxUsers = 50
# Statistics about the TS3AudioBot
botStats = False
# Input folder for server logs
inputFolder = "Logs"
# Input folder for TS3AudioBot logs
inputFolderBot = "BotLogs"
# Output folder
outputFolder = "Result"
# Folder for temporary files
tempFolder = "temp"
# Length of a slot in seconds
slotLength = timedelta(minutes = 10)
# END configuration
# Load configuration from Settings.py
if path.isfile("Settings.py"):
exec(compile(open("Settings.py").read(), "Settings.py", "exec"))
# The length of a part in seconds
slotsPerDay = int(math.ceil(timedelta(days = 1) / slotLength))
# Enum for connections
CONNECTION_START = 1
CONNECTION_END = 2
def openTempfile(name):
return open(path.join(tempFolder, name + ".tmp"), "w")
def parseName(name):
return html.unescape(name)
def timeToString(t):
secs = t.total_seconds()
seconds = int(secs % 60)
minutes = int(secs / 60) % 60
total_hours = int(secs / 3600)
hours = total_hours % 24
days = int(total_hours / 24) % 365
years = int(total_hours / (24 * 365))
res = ""
if years > 0:
res += "{0} years ".format(years)
if years > 0 or days > 0:
res += "{0} days ".format(days)
if years > 0 or days > 0 or hours > 0:
res += "{0:02}:".format(hours)
return res + "{0:02}:{1:02}".format(minutes, seconds)
def timestampToString(timestamp):
date = datetime.fromtimestamp(timestamp)
return "{0:%Y-%m-%d}".format(date)
def to_slot_index(t, slotLength = slotLength):
return timedelta(hours = t.hour, minutes = t.minute, seconds = t.second) // slotLength
gnuplotEscapes = ['\\', '^', '_', '@', '&', '~', '{']
def gnuplotEscape(text):
for c in gnuplotEscapes:
text = text.replace(c, '\\' + c)
# Escape twice...
text = text.replace('\\', '\\\\')
return text
class DiagramCreator:
def __init__(self):
self.env = Environment(loader = FileSystemLoader("."),
autoescape = select_autoescape(["html", "xml"],
default_for_string = True),
trim_blocks = True,
lstrip_blocks = True)
self.diagramTemplate = self.env.get_template("template.gp")
self.htmlTemplate = self.env.get_template("template.html")
def load_meta(self):
os.makedirs(outputFolder, mode = 0o755, exist_ok = True)
os.makedirs(tempFolder, mode = 0o755, exist_ok = True)
self.args2diags()
def args2diags(self):
"""Parse the arguments and fill the diags list"""
# Get all diagrams
self.diags = []
package = diags
prefix = package.__name__ + "."
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix):
module = importlib.import_module(modname)
self.diags.append(module)
def load_data(self):
self.users = {}
self.fakeTimeouts = 0
self.parse_server()
self.merge()
if botStats:
self.parse_bot()
self.dayCount = (self.endDay - self.startDay).days + 1
if botStats:
self.dayCountBot = (self.endDayBot - self.startDayBot).days + 1
self.generalTab = Tab("General")
self.vipTab = Tab("VIPs")
self.tabs = [self.generalTab, self.vipTab]
if botStats:
self.botTab = Tab("Bot")
self.tabs.append(self.botTab)
def parse_server(self):
"""Open the log files and store the users and connections"""
self.startDay = date.today()
self.endDay = date.today()
inputFiles = sorted([path.join(inputFolder, f) for f in os.listdir(inputFolder)
if path.isfile(path.join(inputFolder, f))])
linePattern = re.compile(r"^(?P<Date>\d{4}-\d{2}-\d{2})\s+(?P<Time>\d{2}:\d{2}:\d{2}.\d{6})\|\s*(?P<LogLevel>\w+)\s*\|\s*(?P<Initiator>\w+)\s*\|\s*(?P<VServ>\w+)\s*\|\s*client (?P<Action>(?:dis)?connected) '(?P<Name>.*)'\(id:(?P<DbId>\d+)\) (?:reason 'reasonmsg=?(?P<Reason>.*)')?.*\n?$")
# Read connections from log
for file in inputFiles:
with open(file) as f:
# Previous line
prevline = ""
for line in f:
# Remove the bom
if line.startswith("\ufeff"):
line = line[1:]
match = linePattern.fullmatch(line)
if match:
connected = match.group("Action") == "connected"
# Get time
t = datetime.strptime(line[:19], "%Y-%m-%d %H:%M:%S").replace(tzinfo = timezone.utc).astimezone()
if t.date() < self.startDay:
self.startDay = t.date()
userId = int(match.group("DbId"))
# Find or create the user
if userId not in self.users:
u = User(parseName(match.group("Name")))
self.users[userId] = u
else:
u = self.users[userId]
# True if the line is a connect
if connected:
u.lastConnected.append(t)
elif u.lastConnected:
# Ignore clients that didn't connect
timeout = False
if match.group("Reason") == "connection lost":
# Check if it is a timeout
if "ping timeout" in prevline or "resend timeout" in prevline or t < datetime(2017, 3, 1, tzinfo = timezone.utc):
timeout = True
else:
self.fakeTimeouts += 1
con = Connection(u.lastConnected[0], t, timeout)
u.connections.append(con)
del u.lastConnected[0]
prevline = line
# Disconnect all connected clients because the log is finished.
# Use the last known timestamp for the end
for u in self.users.values():
for start in u.lastConnected:
con = Connection(start, t)
u.connections.append(con)
u.lastConnected = []
def parse_bot(self):
"""Open the bot log files and store the users and plays"""
self.startDayBot = date.today()
self.endDayBot = date.today()
inputFiles = sorted([path.join(inputFolderBot, f) for f in os.listdir(inputFolderBot)
if path.isfile(path.join(inputFolderBot, f))])
linePattern = re.compile(r"^\[(?P<Time>\d{2}:\d{2}:\d{2})\]\s*Debug: MB Got message from (?P<Name>[^:]*): !(?P<Command>.*)\n?$")
datePattern = re.compile(r"^\[(?P<Time>\d{2}:\d{2}:\d{2})\]\s*Info: \[=== (?:Date:.*,\s*(?P<mday0>\d+) (?P<month0>\w+) (?P<year0>\d+).*|Date:.*,\s*(?P<month1>\w+) (?P<mday1>\d+), (?P<year1>\d+).*|Date/Time:.*,\s*(?P<month2>\w+) (?P<mday2>\d+), (?P<year2>\d+).*)\n?$")
timePattern = re.compile(r"^\[(?P<Time>\d{2}:\d{2}:\d{2})\].*\n?$")
newLinePattern = re.compile(r"^(?P<Time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d{4}\|.*Got message from (?P<Name>[^:]*): !(?P<Command>.*)$")
newLinePattern2 = re.compile(r"^(?P<Time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d{4}\|.*User (?P<Name>.*) requested: !(?P<Command>.*)$")
# Line formats:
# [04:44:57] Info: [=== Date: Monday, 27 March 2017 ===]
# [19:31:50] Info: [=== Date/Time: Friday, August 18, 2017 7:31:50 PM
# [17:13:54] Debug: MB Got message from Hengo: !pl [URL]https://www.youtube.com/watch?v=d_RjwMZItZo[/URL]
#
# 2018-01-11 00:30:09.2136|DEBUG|Bot.TextCallback Got message from Splamy: !ver
# 2018-06-27 15:59:49.9569| INFO|Bot.TextCallback User Splamy requested: !sett get connect.name
# Read connections from log
for file in inputFiles:
with open(file) as f:
curDate = None
for line in f:
# Remove the bom
if line.startswith("\ufeff"):
line = line[1:]
match = datePattern.fullmatch(line)
if match:
for i in range(3):
if match.group("year" + str(i)) != None:
curDate = datetime.strptime("{}-{}-{} {}".format(match.group("year" + str(i)), match.group("month" + str(i)), match.group("mday" + str(i)), match.group("Time")), "%Y-%B-%d %H:%M:%S")
break
match = timePattern.fullmatch(line)
if match:
curTime = datetime.strptime(match.group("Time"), "%H:%M:%S").time()
if type(curDate) is datetime:
if curTime < curDate.time():
curDate += timedelta(days = 1)
curDate = datetime.combine(curDate.date(), curTime)
else:
curDate = curTime
match = linePattern.fullmatch(line)
if match:
if type(curDate) is datetime and curDate.date() < self.startDayBot:
self.startDayBot = curDate.date()
self.parseAddEvent(curDate, match.group("Name").strip(), match.group("Command"))
match = newLinePattern.fullmatch(line.strip())
if not match:
match = newLinePattern2.fullmatch(line.strip())
if match:
curDate = datetime.strptime(match.group("Time"), "%Y-%m-%d %H:%M:%S")
if curDate.date() < self.startDayBot:
self.startDayBot = curDate.date()
self.parseAddEvent(curDate, match.group("Name").strip(), match.group("Command"))
def parseAddEvent(self, curDate, name, command):
playCmd = command.startswith("pl") or command.startswith("py") or command.startswith("ad")
# Find or create the user
for m in merges:
if name in m:
name = m[0]
break
user = None
for u in self.users:
if u.name == name:
user = u
break
if user == None:
user = User(name)
self.users.append(user)
if playCmd:
user.botPlays.append((curDate, command))
else:
user.botCommands.append((curDate, command))
def merge(self):
# Merge users
for id, u in self.users.items():
for m in merges:
# print(f"Id {id} {type(id)} searched in {m}")
# if id in m:
# print(f"Id {id} merged in {m[0]}")
if u.name in m or id in m:
u.name = m[0]
break
# Aggregate users with the same name
self.users = list(self.users.values())
i = 0
while i < len(self.users):
j = i + 1
while j < len(self.users):
if self.users[i].name == self.users[j].name:
# Merge users
self.users[i].connections += self.users[j].connections
del self.users[j]
j -= 1
j += 1
i += 1
print("User count: {}".format(len(self.users)), file = sys.stderr)
# Select vip users
self.vip = [u for u in self.users if u.name in vips]
def create_diagrams(self):
for diag in self.diags:
diag.create_diag(self)
# Render the html
with open(path.join(outputFolder, "index.html"), "w") as f:
f.write(self.htmlTemplate.render(tabs = self.tabs,
date = datetime.now().strftime("%d.%m.%Y %H:%M")))
# Link the static data
if not path.exists(path.join(outputFolder, "static")):
os.symlink("../static", path.join(outputFolder, "static"))
def fun_per_connected_slot(self, users, slotFun, slotLength = timedelta(days = 1), floorFun = None):
"""Calls f for each day a certain connection lasts.
userStart/End are called before and after the connections of a user are worked on.
slotType is a bit field of CONNECTION_START/END
start and end are relative to the slotStart
f(user, connection, slotStart, slotType, start, end)"""
if floorFun == None:
# Round down to the nearest multiple of the slotLength time
floorFun = lambda t: t - timedelta(hours = t.hour, minutes = t.minute, seconds = t.second) % slotLength
for u in users:
for c in u.connections:
# The start of the first slot
slotStart = floorFun(c.start)
slotEnd = slotStart + slotLength
# First slot
relStart = c.start - slotStart
if c.end > slotEnd:
slotFun(u, c, slotStart, CONNECTION_START, relStart, slotLength)
else:
# Only one slot
slotFun(u, c, slotStart, CONNECTION_START | CONNECTION_END, relStart, c.end - slotStart)
continue
slotStart = slotEnd
slotEnd += slotLength
# For each slot
while c.end > slotEnd:
slotFun(u, c, slotStart, 0, timedelta(), slotLength)
slotStart = slotEnd
slotEnd += slotLength
# Last slot
slotFun(u, c, slotStart, CONNECTION_END, timedelta(), c.end - slotStart)
def write_slots_per_day(self, f, slots, name = None):
if name != None:
f.write("# {0}\n".format(name))
for i, data in enumerate(slots):
minutes = int((i * slotLength).total_seconds()) // 60
f.write("{0:02}:{1:02}\t{2}\n".format(minutes // 60, minutes % 60, data))
f.write("24:00\t{0}\n\n\n".format(slots[0]))
def write_days(self, f, days, name = None, cumulative = False, start = None):
if name != None:
f.write("# {0}\n".format(name))
if start == None:
start = self.startDay
day = start
if cumulative:
sum = 0
for data in days:
if cumulative:
sum += data
f.write("{0:%d.%m.%Y}\t{1}\n".format(day, sum))
else:
f.write("{0:%d.%m.%Y}\t{1}\n".format(day, data))
day += timedelta(days = 1)
f.write("\n\n")
class Connection:
def __init__(self, start, end, timeout = False):
# Unix timestamp
self.start = start
self.end = end
self.timeout = timeout
def duration(self):
return self.end - self.start
class User:
def __init__(self, name):
self.name = name
# Didn't connect so far
self.lastConnected = []
# List of connections
self.connections = []
self.botPlays = []
self.botCommands = []
class Diagram:
diagrams = []
def __init__(self, filename, title = "Title", width = 1920, height = 600, shortname = None):
self.filename = filename
if shortname == None:
shortname = filename
self.shortname = shortname
self.title = title
self.width = width
self.height = height
self.xlabel = "x"
self.ylabel = "y"
self.appendText = ""
self.legend = "left"
# plots can be set to none to disable them
self.plots = []
self.subtitle = None
def __iter__(self):
yield "color", "#bbbbbb"
yield "filename", self.filename
yield "outputfile", path.join(outputFolder, self.filename)
yield "shortname", self.shortname
yield "title", self.title
if self.subtitle != None:
yield "subtitle", self.subtitle
yield "width", self.width
yield "height", self.height
yield "xlabel", self.xlabel
yield "ylabel", self.ylabel
yield "legend", self.legend
yield "appendText", textwrap.dedent(self.appendText)
if self.plots:
dataFile = "'{0}.tmp' ".format(path.join(tempFolder, self.filename))
yield "plots", "plot " + ", \\\n\t".join([dataFile + p for p in self.plots])
def render(self, diagramTemplate):
# Read the template
with open("template.gp") as f:
template = f.read()
# Create the gnuplot script
tempName = path.join(tempFolder, self.filename + ".gp.tmp")
with open(tempName, "w") as f:
f.write(diagramTemplate.render(dict(self)))
subprocess.Popen(["gnuplot", tempName])
if self.subtitle:
print(self.subtitle, file = sys.stderr)
Diagram.diagrams.append(self)
class Tab:
def __init__(self, name, shortname = None):
self.name = name
if shortname == None:
shortname = name
self.shortname = shortname
self.diagrams = []
def addDiagram(self, diag):
self.diagrams.append(diag)
def __iter__(self):
yield "name", self.name
yield "shortname", self.shortname
yield "diagrams", [dict(d) for d in self.diagrams]
def main():
dc = DiagramCreator()
dc.load_meta()
dc.load_data()
dc.create_diagrams()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Author: Konstantin Schuckmann
# @Date: 2021-10-28 14:36:06
# @Last Modified by: Konstantin Schuckmann
# @Last Modified time: 2021-10-29 09:30:48
import pandas as pd
import numpy as np
# Needed for generating data from an existing dataset
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.impute import SimpleImputer
def create_dummy_variables_from_original():
data = pd.read_csv('./data/input/input.csv', sep=';')
column_names = ['Distance','Rate 0 - 49 kg', 'Rate 50 - 99 kg', 'Rate 99 - 149 kg',
'Rate 149 - 199 kg', 'Rate 199 - 299 kg', 'Rate 299 - 399 kg',
'Rate 399 - 499 kg', 'Rate 499 - 599 kg', 'Rate 599 - 699 kg',
'Rate 699 - 799 kg', 'Rate 799 - 899 kg', 'Rate 899 - 999 kg',
'Rate 999 - 1099 kg', 'Rate 1099 - 1199 kg', 'Rate 1199 - 1299 kg',
'Rate 1299 - 1399 kg', 'Rate 1399 - 1499 kg', 'Rate 1499 - 1749 kg',
'Rate 1749 - 1999 kg', 'Rate 1999 - 2249 kg', 'Rate 2249 - 2499 kg',
'Rate 2499 - 2999 kg', 'Rate 2999 - 3499 kg', 'Rate 3499 - 3999 kg',
'Rate 3999 - 4499 kg', 'Rate 4499 - 4999 kg', 'Rate 4999 - 5999 kg',
'Rate 5999 - 6999 kg', 'Rate 6999 - 7999 kg', 'Rate 7999 - 8999 kg',
'Rate 8999 - 9999 kg', 'Rate 9999 - 10999 kg', 'Rate 10999 - 11999 kg',
'Rate 11999 - 12999 kg', 'Rate 12999 - 13999 kg',
'Rate 13999 - 14999 kg', 'Rate 14999 - 15999 kg',
'Rate 15999 - 16999 kg', 'Rate 16999 - 17999 kg',
'Rate 17999 - 18999 kg', 'Rate 18999 - 19999 kg', 'Rate 20000-21499 kg',
'Rate 21500-22999 kg', 'Rate 23000-24499 kg']
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(data[['GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']])
data[['GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']] = imp.transform(data[['GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']])
sorted_data = data.sort_values(by=['Distance'])
rand_state = 11
# Fetch the dataset and store in X
X = data[column_names]
# Fit a kernel density model using GridSearchCV to determine the best parameter for bandwidth
bandwidth_params = {'bandwidth': np.arange(0.01,1,0.05)}
grid_search = GridSearchCV(KernelDensity(), bandwidth_params)
grid_search.fit(X)
kde = grid_search.best_estimator_
# Generate/sample
new_data = kde.sample(sorted_data.shape[0], random_state=rand_state)
new_data = pd.DataFrame(new_data, columns = column_names)
final_df = pd.concat([data[['Country Relation', 'Country Relation Vice Versa', 'Origin Country',
'Destination Country', 'GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']], new_data], axis = 1)
final_df.to_csv('./data/input/dummy_input.csv', sep=';', index=False)
def main():
create_dummy_variables_from_original()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'quartiles' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def quartiles(arr):
# Write your code here
arr = sorted(arr)
length_Of_Array = len(arr)
first_Number = length_Of_Array // 2
second_Number = first_Number // 2
third_Number = first_Number + second_Number
if(length_Of_Array % 2 != 0):
if(first_Number % second_Number != 0):
q1 = arr[second_Number]
q2 = arr[first_Number]
q3 = arr[third_Number + 1]
return q1, q2, q3
else:
q1 = (arr[second_Number - 1] + arr[second_Number]) // 2
q2 = arr[first_Number]
q3 = (arr[third_Number] + arr[third_Number + 1]) // 2
return q1, q2, q3
elif(length_Of_Array % 2 == 0):
if(first_Number % second_Number != 0):
q1 = arr[second_Number]
q2 = (arr[first_Number] + arr[first_Number - 1]) // 2
q3 = arr[third_Number]
return q1, q2, q3
else:
q1 = (arr[second_Number - 1] + arr[second_Number]) // 2
q2 = (arr[first_Number] + arr[first_Number - 1]) // 2
q3 = (arr[third_Number] + arr[third_Number - 1]) // 2
return q1, q2, q3
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
data = list(map(int, input().rstrip().split()))
res = quartiles(data)
fptr.write('\n'.join(map(str, res)))
fptr.write('\n')
fptr.close()
|
nilq/baby-python
|
python
|
from model.client import Client
import string
import random
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:],"n:f:", ["number of clients", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n=5
f= "data/clients.json"
for o, a in opts:
if o =="-n":
n=int(a)
elif o == "-f":
f=a
def random_string(prefix, maxlen):
symbols=string.ascii_letters
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_only_digital(prefix, maxlen):
symbols=string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Client (name="", middlename="", lastname="", nickname="", address="", home="", mobile="", work="",
email="", email2="", email3="", phone2="") ] + [
Client(name=random_string("name",5), middlename=random_string("middlename", 10),
lastname=random_string("lastname",20), nickname=random_string("nickname",20),
address=random_string("address",20), home=random_only_digital("home", 11),
mobile=random_only_digital("mobile",11), work=random_only_digital("work",11),
email="email@mail.ru", email2="email2@mail.ru", email3="email3@mail.ru",
phone2=random_only_digital("phone2",11)
)
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
nilq/baby-python
|
python
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""This module contains the slice-compare intent.
The slice-compare intent can give the result so that user can easily
compare the data according to the way user want.
Also it supports some operations like cropping based on date range,
slicing(removing rows that do not follow the conditions), group by.
Some of the operations are optional.
"""
from util import aspects
def slice_compare(table, metric, dimensions, slices, slice_compare_column,
summary_operator, **kwargs):
"""This function will implement the slice-compare intent
Also removes the tuples that do not lie in the given date range.
The arguments 'table, metric,dimension,slices, slices_compare_column,
summary_operator' are not optional, so they are passed as it is,
'date_range' will be passed in kwargs.
If some the optional args are None(not passed),
it is assumed that we don't have to apply them.
Args:
table: Type-pandas.dataframe
It has the contents of the csv file
metric: Type-string
It is the name of the column according to which we have group to be done,
summary operator is applied on metric. Metric could a column
containing strings, if we are applying count operator on it.
dimensions: Type-list of str
It is the name of column we want.
In query:'top 5 batsman according to runs', dimension is 'batsman'.
When summary_operator is not None, we group by dimensions.
date_range: Type-tuple
Tuple of start_date and end_date
date_column_name: Type-str
It is the name of column which contains date
date_format: Type-str
It is required by datetime.strp_time to parse the date in the format
Format Codes
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
slices: Type-dictionary (will be changed)
contains the key as column name and value as instance we want
to slice
slice_compare_column: Type-list of string
first element denotes the column name by which we will do comparision.
rest elements will the value belongs to that column by which we
will compare the slices.
summary_operator: Type-summary_operators enum members
It denotes the summary operator, after grouping by dimensions.
ex. SummaryOperators.MAX, SummaryOperators.SUM
Note-summary_operator is always applied on metric column passed,
and only when grouping is done
Returns:
The function will return the `table(a pandas dataframe object)`
after applying the intent on the
given `table(a pandas dataframe object)``
"""
date_column_name = kwargs.get('date_column_name', 'date')
date_range = kwargs.get('date_range', None)
date_format = kwargs.get('date_format', 'yyyy-mm-dd')
table = aspects.apply_date_range(table, date_range,
date_column_name, date_format)
table = aspects.slice_table(table, slices)
# collecting the colums not to be removed
required_columns = []
if dimensions is not None:
required_columns = dimensions.copy()
required_columns.append(metric)
table = aspects.crop_other_columns(table, required_columns)
# slice_compare_column should be the last element of the group
# so that groupby will show them together for every grouping
dimensions.remove(slice_compare_column[0])
dimensions.append(slice_compare_column[0])
table = aspects.group_by(table, dimensions, summary_operator)
return table
|
nilq/baby-python
|
python
|
import uuid
from django.db import models
from django.urls import reverse
class Masternode(models.Model):
# every masternode is bound to one transaction that shows the
# spend 1 500 001 bbp
txid = models.CharField(max_length=100, primary_key=True, editable=False)
# the address related to this masternode. The masternode reward is paid to this address
address = models.CharField(max_length=64)
# time when the masternode was first seen
inserted_at = models.DateTimeField(auto_now_add=True)
# the last time the masternode was seen (with any status)
last_seen_at = models.DateTimeField(auto_now_add=True)
# the status of the masternode known in the blockchain
# One of: ENABLED, NEW_START_REQUIRED, WATCHDOG_EXPIRED, PRE_ENABLED, UPDATE_REQUIRED, EXPIRED
status = models.CharField(max_length=30)
# version of the watchdog (?)
version = models.IntegerField()
def get_absolute_url(self):
return reverse('masternodes_masternodes')
def __str__(self):
return self.txid
def save(self, *args, **kwargs):
# we check if the current database entry is different from this entry
# If yes, we create a history entry
create_history = False
try:
db_mn = Masternode.objects.get(txid=self.txid)
if db_mn.status != self.status or str(db_mn.version) != str(self.version):
create_history = True
except Masternode.DoesNotExist:
create_history = True
pass
super(Masternode, self).save(*args, **kwargs)
if create_history:
history = MasternodeHistory()
history.masternode = self
history.status = self.status
history.version = self.version
history.save()
class MasternodeHistory(models.Model):
masternode = models.ForeignKey(Masternode, on_delete=models.CASCADE,)
# time of when his history entry was added
inserted_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=30)
version = models.IntegerField()
|
nilq/baby-python
|
python
|
'''
FastAPI Demo
SQLAlchemy ORM Models
'''
# Standard Imports
# PyPi Imports
from sqlalchemy import (
Boolean,
Column,
Integer,
String
)
# Local Imports
from database.setup import Base
###############################################################################
class User(Base):
'''ORM Models - users'''
__tablename__ = "users"
user_id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True)
salted_password_hash = Column(String)
first_name = Column(String)
last_name = Column(String)
email = Column(String, unique=True)
active_boolean = Column(Boolean, nullable=False, default=True)
admin_boolean = Column(Boolean, nullable=False, default=False)
|
nilq/baby-python
|
python
|
"""
Classe for reading/writing SpikeTrains in a text file.
It is the simple case where different spiketrains are written line by line.
Supported : Read/Write
Author: sgarcia
"""
import os
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, SpikeTrain
class AsciiSpikeTrainIO(BaseIO):
"""
Class for reading/writing SpikeTrains in a text file.
Each Spiketrain is a line.
Usage:
>>> from neo import io
>>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
>>> seg = r.read_segment()
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797,
...
"""
is_readable = True
is_writable = True
supported_objects = [Segment, SpikeTrain]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
('t_start', {'value': 0., }),
]
}
write_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
]
}
name = None
extensions = ['txt']
mode = 'file'
def __init__(self, filename=None):
"""
This class read/write SpikeTrains in a text file.
Each row is a spiketrain.
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy=False,
delimiter='\t',
t_start=0. * pq.s,
unit=pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
assert not lazy, 'Do not support lazy'
unit = pq.Quantity(1, unit)
seg = Segment(file_origin=os.path.basename(self.filename))
f = open(self.filename, 'Ur')
for i, line in enumerate(f):
alldata = line[:-1].split(delimiter)
if alldata[-1] == '':
alldata = alldata[:-1]
if alldata[0] == '':
alldata = alldata[1:]
spike_times = np.array(alldata).astype('f')
t_stop = spike_times.max() * unit
sptr = SpikeTrain(spike_times * unit, t_start=t_start, t_stop=t_stop)
sptr.annotate(channel_index=i)
seg.spiketrains.append(sptr)
f.close()
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment,
delimiter='\t',
):
"""
Write SpikeTrain of a Segment in a txt file.
Each row is a spiketrain.
Arguments:
segment : the segment to write. Only analog signals will be written.
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
information of t_start is lost
"""
f = open(self.filename, 'w')
for s, sptr in enumerate(segment.spiketrains):
for ts in sptr:
f.write('{:f}{}'.format(ts, delimiter))
f.write('\n')
f.close()
|
nilq/baby-python
|
python
|
import unittest
from main import Min_Heap
class MinHeapTestCase(unittest.TestCase):
def test_min_heap_returns_None_if_peek_is_called_with_no_items(self):
heap = Min_Heap()
self.assertEqual(heap.peek_min(), None)
def test_min_heap_returns_None_if_extract_is_called_with_no_items(self):
heap = Min_Heap()
self.assertEqual(heap.extract_min(), None)
def test_min_heap_has_the_correct_root_when_an_item_is_added(self):
heap = Min_Heap()
heap.insert(20)
self.assertEqual(heap.peek_min(), 20)
def test_min_heap_has_the_correct_items_as_they_are_added(self):
heap = Min_Heap()
heap.insert(5)
heap.insert(20)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
def test_min_heap_changes_the_root_if_smaller_item_is_added(self):
heap = Min_Heap()
heap.insert(20)
heap.insert(5)
self.assertEqual(heap.peek_min(), 5)
def test_min_heap_does_not_rebalance_when_level_as_room_for_smaller_item(self):
heap = Min_Heap()
heap.insert(20)
heap.insert(5)
heap.insert(15)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
self.assertEqual(heap._items[2], 15)
def test_min_heap_works_as_expected_as_more_levels_are_added(self):
heap = Min_Heap()
heap.insert(20)
heap.insert(5)
heap.insert(15)
heap.insert(22)
heap.insert(40)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
self.assertEqual(heap._items[2], 15)
self.assertEqual(heap._items[3], 22)
self.assertEqual(heap._items[4], 40)
def test_min_heap_rebalances_and_bubbles_up_when_smaller_item_is_added(self):
heap = Min_Heap()
heap.insert(5)
heap.insert(20)
heap.insert(15)
heap.insert(22)
heap.insert(40)
heap.insert(6)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
self.assertEqual(heap._items[2], 6)
self.assertEqual(heap._items[3], 22)
self.assertEqual(heap._items[4], 40)
self.assertEqual(heap._items[5], 15)
def test_min_heap_extracts_all_items_in_the_correct_order(self):
mins = []
heap = Min_Heap()
heap.insert(5)
heap.insert(20)
heap.insert(15)
heap.insert(3)
heap.insert(22)
heap.insert(40)
heap.insert(6)
while heap:
mins.append(heap.extract_min())
self.assertEqual(mins, [3, 5, 6, 15, 20, 22, 40])
def test_min_heap_returns_the_correct_length(self):
heap = Min_Heap()
self.assertEqual(len(heap), 0)
heap.insert(5)
self.assertEqual(len(heap), 1)
heap.insert(6)
self.assertEqual(len(heap), 2)
heap.extract_min()
self.assertEqual(len(heap), 1)
heap.extract_min()
self.assertEqual(len(heap), 0)
heap.extract_min()
self.assertEqual(len(heap), 0)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from typing import Tuple, Callable
from thinc.api import Model, to_numpy
from thinc.types import Ragged, Ints1d
from ..util import registry
@registry.layers("spacy.extract_spans.v1")
def extract_spans() -> Model[Tuple[Ragged, Ragged], Ragged]:
"""Extract spans from a sequence of source arrays, as specified by an array
of (start, end) indices. The output is a ragged array of the
extracted spans.
"""
return Model(
"extract_spans", forward, layers=[], refs={}, attrs={}, dims={}, init=init
)
def init(model, X=None, Y=None):
pass
def forward(
model: Model, source_spans: Tuple[Ragged, Ragged], is_train: bool
) -> Tuple[Ragged, Callable]:
"""Get subsequences from source vectors."""
ops = model.ops
X, spans = source_spans
assert spans.dataXd.ndim == 2
indices = _get_span_indices(ops, spans, X.lengths)
if len(indices) > 0:
Y = Ragged(X.dataXd[indices], spans.dataXd[:, 1] - spans.dataXd[:, 0]) # type: ignore[arg-type, index]
else:
Y = Ragged(
ops.xp.zeros(X.dataXd.shape, dtype=X.dataXd.dtype),
ops.xp.zeros((len(X.lengths),), dtype="i"),
)
x_shape = X.dataXd.shape
x_lengths = X.lengths
def backprop_windows(dY: Ragged) -> Tuple[Ragged, Ragged]:
dX = Ragged(ops.alloc2f(*x_shape), x_lengths)
ops.scatter_add(dX.dataXd, indices, dY.dataXd) # type: ignore[arg-type]
return (dX, spans)
return Y, backprop_windows
def _get_span_indices(ops, spans: Ragged, lengths: Ints1d) -> Ints1d:
"""Construct a flat array that has the indices we want to extract from the
source data. For instance, if we want the spans (5, 9), (8, 10) the
indices will be [5, 6, 7, 8, 8, 9].
"""
spans, lengths = _ensure_cpu(spans, lengths)
indices = []
offset = 0
for i, length in enumerate(lengths):
spans_i = spans[i].dataXd + offset
for j in range(spans_i.shape[0]):
indices.append(ops.xp.arange(spans_i[j, 0], spans_i[j, 1])) # type: ignore[call-overload, index]
offset += length
return ops.flatten(indices, dtype="i", ndim_if_empty=1)
def _ensure_cpu(spans: Ragged, lengths: Ints1d) -> Tuple[Ragged, Ints1d]:
return Ragged(to_numpy(spans.dataXd), to_numpy(spans.lengths)), to_numpy(lengths)
|
nilq/baby-python
|
python
|
"""Initial revision
Revision ID: df419851a830
Revises:
Create Date: 2020-11-11 18:00:45.523670
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'df419851a830'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('created', sa.TIMESTAMP(), server_default=sa.text('now()'), nullable=False),
sa.Column('username', sa.UnicodeText(), nullable=False),
sa.Column('email', sa.UnicodeText(), nullable=False),
sa.Column('password', sa.UnicodeText(), nullable=False),
sa.Column('salt', sa.UnicodeText(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('verificationCode', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('subscription',
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('mutation', sa.UnicodeText(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('user_id', 'mutation')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('subscription')
op.drop_table('user')
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
# Copyright (C) 2021-2022 Modin authors
#
# SPDX-License-Identifier: Apache-2.0
"""``DaskRunner`` class functionality."""
import os
import warnings
from unidist.cli.base.runner import BackendRunner
from unidist.cli.base.utils import Defaults, validate_num_cpus
from unidist.core.base.common import BackendName
class DaskRunner(BackendRunner):
"""
An implementation of unidist ``BackendRunner`` for Dask backend.
Parameters
----------
**cli_kwargs : dict
Keyword arguments supported by unidist CLI.
"""
def __init__(self, **cli_kwargs):
self.backend = BackendName.DASK
self.hosts = cli_kwargs.get("hosts", Defaults.HOSTS)
super().__init__(**cli_kwargs)
def check_kwargs_support(self, **kwargs):
"""Check support for `kwargs` combination for Dask backend."""
hosts = kwargs.get("hosts", self.hosts)
num_cpus = kwargs.get("num_cpus", self.num_cpus)
if hosts == Defaults.HOSTS:
self.hosts = None
if (
num_cpus == Defaults.NUM_CPUS
or isinstance(num_cpus, list)
and len(num_cpus) == 1
):
self.num_cpus = validate_num_cpus(num_cpus)[0]
else:
raise RuntimeError(
f"`num_cpus` must have a single value for {self.backend} backend."
)
elif isinstance(hosts, list) and len(hosts) == 1:
self.hosts = hosts[0]
if isinstance(num_cpus, list):
warnings.warn(
f"`num_cpus` isn't supported for existing {self.backend} cluster.",
RuntimeWarning,
)
self.num_cpus = None
else:
raise RuntimeError(
f"`hosts` must have a single value with existing cluster address for {self.backend} backend."
)
if (
kwargs.get("redis_password", Defaults.REDIS_PASSWORD)
!= Defaults.REDIS_PASSWORD
):
warnings.warn(
f"`redis_password` isn't supported for {self.backend} backend.",
RuntimeWarning,
)
def prepare_env(self):
"""Setup unidist environment variables for Dask backend."""
super().prepare_env()
if self.hosts is not None:
os.environ["UNIDIST_DASK_CLUSTER"] = "True"
os.environ["UNIDIST_DASK_SCHEDULER_ADDRESS"] = self.hosts
else:
os.environ["UNIDIST_CPUS"] = self.num_cpus
|
nilq/baby-python
|
python
|
import argparse
import codecs
from typing import Dict, Union
import os
import yaml
import optuna
from .model.constant import *
from .train import main as train
DB_URL = 'mysql+pymysql://pmod:pmod@{host}:13306/optuna_pmod?charset=utf8'
RESULT_DIR = os.path.join(DIR_OPTUNA, DIR_RESULTS)
def parse_args() -> Dict[str, str]:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_optuna = parser.add_argument_group('Optuna')
parser_optuna.add_argument(
f'--{arg_hyphen(ARG_SEED)}',
type=int, default=1,
help='Seed for random number generator.'
)
parser_optuna.add_argument(
'-n', f'--{arg_hyphen(ARG_N_TRIALS)}',
type=int, default=200,
help='Number of trials.'
)
parser_optuna.add_argument(
'-t', f'--{arg_hyphen(ARG_TAG)}',
type=str, required=True,
help='Optuna training tag.'
)
parser_optuna.add_argument(
'-H', f'--{arg_hyphen(ARG_HOST)}',
type=str, default=None,
help='When using a MySQL server, specify the hostname.'
)
parser_optuna.add_argument(
'-s', f'--{arg_hyphen(ARG_SAMPLER)}',
choices=SINGLE_SAMPLERS, default=SINGLE_SAMPLER_TPE,
help='Optuna sampler.'
)
parser.set_defaults(func=single_main)
parser_train = parser.add_argument_group('Training')
parser_train.add_argument(
'-tdc', f'--{arg_hyphen(ARG_TRAIN_DL_CONFIG)}',
type=str, metavar='PATH', required=True,
help='PATH of JSON file of dataloader config for training.'
)
parser_train.add_argument(
'-vdc', f'--{arg_hyphen(ARG_VAL_DL_CONFIG)}',
type=str, metavar='PATH', default=None,
help=f'PATH of JSON file of dataloader config for validation. If not specified, the same file as "--{arg_hyphen(ARG_TRAIN_DL_CONFIG)}" will be used.'
)
parser_train.add_argument(
'-bs', f'--{arg_hyphen(ARG_BLOCK_SIZE)}',
type=int, default=0,
help='Block size of dataset.'
)
parser_train.add_argument(
'-td', f'--{arg_hyphen(ARG_TRAIN_DATA)}',
type=str, metavar='PATH', nargs='+', required=True,
help='PATH of training HDF5 datasets.'
)
parser_train.add_argument(
'-vd', f'--{arg_hyphen(ARG_VAL_DATA)}',
type=str, metavar='PATH', nargs='*', default=[],
help=f'PATH of validation HDF5 datasets. If not specified, the same files as "--{arg_hyphen(ARG_TRAIN_DATA)}" will be used.'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_EPOCHS)}',
type=int, default=50,
help='Epochs'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_EPOCH_START_COUNT)}',
type=int, default=1,
help='The starting epoch count'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_STEPS_PER_EPOCH)}',
type=int, default=10000,
help='Number of steps per epoch. If it is greater than the total number of datasets, then the total number of datasets is used.'
)
parser_train.add_argument(
'-ppa', f'--{arg_hyphen(ARG_PROJECTED_POSITION_AUGMENTATION)}',
action='store_false', help='Unuse Projected Positiion Augmentation'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_TR_ERROR_RANGE)}',
type=float, default=[0.6, 1.3, 0.7],
help='Translation Error Range [m].'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_ROT_ERROR_RANGE)}',
type=float, default=3.0,
help='Rotation Error Range [deg].'
)
parser_net = parser.add_argument_group('Network')
parser_net.add_argument(
'-b', f'--{arg_hyphen(ARG_BATCH_SIZE)}',
type=int, default=2,
help='Batch Size'
)
parser_net.add_argument(
f'--{arg_hyphen(ARG_RESUME)}',
type=str, metavar='PATH', default=None,
help='PATH of checkpoint(.pth).'
)
parser_net.add_argument(
f'-amp', f'--{arg_hyphen(ARG_AMP)}',
action='store_true',
help='Use AMP.'
)
parser_optim = parser.add_argument_group('Optimizer')
parser_optim.add_argument(
f'--{arg_hyphen(ARG_CLIP_MAX_NORM)}',
type=float, default=1.0,
help='max_norm for clip_grad_norm.'
)
parser_optim.add_argument(
'-op', f'--{arg_hyphen(ARG_OPTIM_PARAMS)}',
type=str, metavar='PATH', default='./config/optim-params-default.yaml', help='PATH of YAML file of optimizer params.'
)
parser_optim.add_argument(
'-o', f'--{arg_hyphen(ARG_OPTIMIZER)}',
type=str, default=None, choices=OPTIM_TYPES,
help='Optimizer'
)
parser_optim.add_argument(
'-lp', f'--{arg_hyphen(ARG_LR_POLICY)}',
type=str, default=LR_POLICY_PLATEAU, choices=LR_POLICIES,
help='Learning rate policy.'
)
parser_loss = parser.add_argument_group('Loss')
parser_loss.add_argument(
f'--{arg_hyphen(ARG_L1)}',
type=float, default=None,
help='Weight of L1 loss.'
)
parser_loss.add_argument(
f'--{arg_hyphen(ARG_SEG_CE)}',
type=float, default=None,
help='Weight of Segmentation CrossEntropy Loss.'
)
parser_loss.add_argument(
f'--{arg_hyphen(ARG_SEG_CE_AUX1)}',
type=float, default=None,
help='Weight of Segmentation Aux1 CrosEntropy Loss.'
)
parser_loss.add_argument(
f'--{arg_hyphen(ARG_SEG_CE_AUX2)}',
type=float, default=None,
help='Weight of Segmentation Aux2 CrosEntropy Loss.'
)
parser_multi = subparsers.add_parser(
'multi', help='Multi Objective Trial'
)
parser_multi.add_argument(
'-s', f'--{arg_hyphen(ARG_SAMPLER)}',
choices=MULTI_SAMPLERS, default=MULTI_SAMPLER_MOTPE,
help='Optuna sampler.'
)
parser_multi.set_defaults(func=multi_main)
args = vars(parser.parse_args())
if os.path.isfile(args[ARG_OPTIM_PARAMS]) is False:
raise FileNotFoundError(f'"{args[ARG_OPTIM_PARAMS]}"')
with open(args[ARG_OPTIM_PARAMS]) as f:
optim_params:dict = yaml.safe_load(f)
args[ARG_OPTIM_PARAMS] = optim_params
args[ARG_EVAL_DATA] = []
args[ARG_DETECT_ANOMALY] = False
return args
def objective_with_args(args: Dict[str, str], workdir: str):
def objective(trial: optuna.Trial):
train_args: Dict[str, str] = args.copy()
train_args.pop('func', None)
trial.set_user_attr('hostname', os.environ.get('HOST_NAME', os.environ['HOSTNAME']))
train_args[ARG_TAG] = f'optuna-trial{trial.number:06d}-{args[ARG_TAG]}'
# Optimizer
train_args[ARG_OPTIMIZER] = trial.suggest_categorical(ARG_OPTIMIZER, OPTIM_TYPES) if args[ARG_OPTIMIZER] is None else args[ARG_OPTIMIZER]
optim_params: Dict[str, Dict[str, Dict[str, Dict[str, Union[int, float]]]]] = train_args[ARG_OPTIM_PARAMS]
for module_key, module_dict in optim_params.items():
optim_dict: Dict[str, Union[float, Dict[str, float]]] = module_dict[CONFIG_OPTIMIZER]
if optim_dict.get(CONFIG_LR) is None:
optim_dict[CONFIG_LR] = trial.suggest_loguniform(CONFIG_LR, 1e-9, 1e-1)
if train_args[ARG_OPTIMIZER] in [OPTIM_TYPE_ADAM, OPTIM_TYPE_ADABELIEF]:
optim_param_dict: Dict[str, float] = optim_dict[train_args[ARG_OPTIMIZER]]
if optim_param_dict.get(CONFIG_BETA1) is None:
optim_param_dict[CONFIG_BETA1] = trial.suggest_uniform(CONFIG_BETA1, 0.0, 1.0)
if optim_param_dict.get(CONFIG_BETA2) is None:
optim_param_dict[CONFIG_BETA2] = trial.suggest_uniform(CONFIG_BETA2, 0.0, 1.0)
elif train_args[ARG_OPTIMIZER] in [OPTIM_TYPE_SGD]:
optim_param_dict: Dict[str, float] = optim_dict[train_args[ARG_OPTIMIZER]]
if optim_param_dict.get(CONFIG_MOMENTUM) is None:
optim_param_dict[CONFIG_MOMENTUM] = trial.suggest_float(CONFIG_MOMENTUM, 0.0, 1.0)
else:
raise NotImplementedError(train_args[ARG_OPTIMIZER])
scheduler_dict: Dict[str, Dict[str, Union[int, float]]] = module_dict[CONFIG_SCHEDULER]
if train_args[ARG_LR_POLICY] in [LR_POLICY_LAMBDA]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_EPOCH_COUNT) is None:
scheduler_param_dict[CONFIG_EPOCH_COUNT] = args[ARG_EPOCH_START_COUNT]
if scheduler_param_dict.get(CONFIG_NITER) is None:
scheduler_param_dict[CONFIG_NITER] = trial.suggest_int(CONFIG_NITER, 1, args[ARG_STEPS_PER_EPOCH])
if scheduler_param_dict.get(CONFIG_NITER_DECAY) is None:
scheduler_param_dict[CONFIG_NITER_DECAY] = trial.suggest_int(CONFIG_NITER_DECAY, 1, args[ARG_STEPS_PER_EPOCH])
elif train_args[ARG_LR_POLICY] in [LR_POLICY_STEP]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_NITER_DECAY) is None:
scheduler_param_dict[CONFIG_NITER_DECAY] = trial.suggest_int(CONFIG_NITER_DECAY, low=1)
if scheduler_param_dict.get(CONFIG_GAMMA) is None:
scheduler_param_dict[CONFIG_GAMMA] = trial.suggest_uniform(CONFIG_GAMMA, 1e-3, 9e-1)
elif train_args[ARG_LR_POLICY] in [LR_POLICY_PLATEAU]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_PATIENCE) is None:
scheduler_param_dict[CONFIG_PATIENCE] = trial.suggest_int(CONFIG_PATIENCE, 1, 100)
if scheduler_param_dict.get(CONFIG_FACTOR) is None:
scheduler_param_dict[CONFIG_FACTOR] = trial.suggest_uniform(CONFIG_FACTOR, 1e-3, 9e-1)
if scheduler_param_dict.get(CONFIG_THRESHOLD) is None:
scheduler_param_dict[CONFIG_THRESHOLD] = trial.suggest_uniform(CONFIG_THRESHOLD, 1e-9, 1e-1)
elif train_args[ARG_LR_POLICY] in [LR_POLICY_COS]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_NITER) is None:
scheduler_param_dict[CONFIG_NITER] = trial.suggest_int(CONFIG_NITER, 1, args[ARG_STEPS_PER_EPOCH])
if scheduler_param_dict.get(CONFIG_ETA_MIN) is None:
scheduler_param_dict[CONFIG_ETA_MIN] = trial.suggest_uniform(CONFIG_ETA_MIN, 1e-9, 1e-1)
elif train_args[ARG_LR_POLICY] in [LR_POLICY_CLR]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_BASE_LR) is None:
scheduler_param_dict[CONFIG_BASE_LR] = trial.suggest_uniform(CONFIG_BASE_LR, 1e-9, 1e-1)
if scheduler_param_dict.get(CONFIG_MAX_LR) is None:
scheduler_param_dict[CONFIG_MAX_LR] = trial.suggest_uniform(CONFIG_MAX_LR, 1e-9, 1e-1)
else:
raise NotImplementedError(train_args[ARG_LR_POLICY])
# Loss
train_args[ARG_L1] = trial.suggest_uniform(ARG_L1, 0.0, 1.0) if args[ARG_L1] is None else args[ARG_L1]
train_args[ARG_SEG_CE] = trial.suggest_uniform(ARG_SEG_CE, 0.0, 1.0) if args[ARG_SEG_CE] is None else args[ARG_SEG_CE]
train_args[ARG_SEG_CE_AUX1] = trial.suggest_uniform(ARG_SEG_CE_AUX1, 0.0, 1.0) if args[ARG_SEG_CE_AUX1] is None else args[ARG_SEG_CE_AUX1]
train_args[ARG_SEG_CE_AUX2] = trial.suggest_uniform(ARG_SEG_CE_AUX2, 0.0, 1.0) if args[ARG_SEG_CE_AUX2] is None else args[ARG_SEG_CE_AUX2]
print(f'{"Trial":11s}: {trial.number:14d}')
result: Dict[str, float] = train(train_args, workdir, trial)
if len(trial.study.directions) > 1:
return result[METRIC_IOU], result[METRIC_MAPE]
else:
return 1.0 - result[METRIC_IOU] + result[METRIC_MAPE]
return objective
def optimize(study: optuna.Study, args: Dict[str, Union[int, str]], workdir: str, storage: str):
print(f'{"Optuna":11s}: {"Tag":14s}: {args[ARG_TAG]}')
print(f'{"":11s}: {"Study":14s}: {study.study_name}')
print(f'{"":11s}: {"Num Trials":14s}: {args[ARG_N_TRIALS]}')
print(f'{"":11s}: {"Storage":14s}: {storage}')
try:
study.optimize(objective_with_args(args, workdir), n_trials=args[ARG_N_TRIALS])
except:
pass
finally:
save_path: str = os.path.join(
workdir,
RESULT_DIR
)
os.makedirs(save_path, exist_ok=True)
for best_trial in study.best_trials:
save_path = os.path.join(save_path, f'{best_trial.datetime_start.strftime("%Y%m%dT%H%M%S")}-{args[ARG_TAG]}.yaml')
with codecs.open(save_path, mode='w', encoding='utf-8') as f:
yaml.dump(best_trial.params, f, encoding='utf-8', allow_unicode=True)
def single_main(args: Dict[str, str], workdir: str):
if args[ARG_SAMPLER] == SINGLE_SAMPLER_GRID:
sampler = optuna.samplers.GridSampler()
elif args[ARG_SAMPLER] == SINGLE_SAMPLER_RANDOM:
sampler = optuna.samplers.RandomSampler(seed=args[ARG_SEED])
elif args[ARG_SAMPLER] == SINGLE_SAMPLER_CMAES:
sampler = optuna.samplers.CmaEsSampler(seed=args[ARG_SEED])
else:
sampler = optuna.samplers.TPESampler(seed=args[ARG_SEED])
storage: str = DB_URL.replace('{host}', args[ARG_HOST]) if isinstance(args[ARG_HOST], str) else None
study_name: str = f'pmod-{args[ARG_TAG]}'
study: optuna.Study = optuna.create_study(
storage=storage,
sampler=sampler,
direction='minimize',
study_name=study_name,
load_if_exists=True,
)
if 'Objective' not in study.user_attrs.keys():
study.set_user_attr('Objective', '(1 - mIoU) + MAPE')
optimize(study, args, workdir, storage)
def multi_main(args: Dict[str, str], workdir: str):
if args[ARG_SAMPLER] == MULTI_SAMPLER_NSGA2:
sampler = optuna.samplers.NSGAIISampler(seed=args[ARG_SEED])
else:
sampler = optuna.samplers.MOTPESampler(seed=args[ARG_SEED])
storage: str = DB_URL.replace('{host}', args[ARG_HOST]) if isinstance(args[ARG_HOST], str) else None
study_name: str = f'pmod-{args[ARG_TAG]}'
study: optuna.Study = optuna.create_study(
storage=storage,
directions=['maximize', 'minimize'],
sampler=sampler,
study_name=study_name,
load_if_exists=True,
)
if 'Objectives' not in study.user_attrs.keys():
study.set_user_attr('Objectives', ['Best mIoU', 'Best MAPE'])
optimize(study, args, workdir, storage)
|
nilq/baby-python
|
python
|
'''
Prints data about general statistics by region
'''
def runStat(dashes):
regions = {}
for dash in dashes.dashes:
if dash.region in regions:
regions[dash.region]["pay"] += dash.total
regions[dash.region]["num"] += 1
delta = dash.end - dash.start
regions[dash.region]["hours"] += (delta.seconds / 60.) / 60.
regions[dash.region]["deliveries"] += len(dash.deliveries)
else:
delta = dash.end - dash.start
regions[dash.region] = {"pay": dash.total, "num": 1, "hours": ((delta.seconds / 60.) / 60.), "deliveries": len(dash.deliveries)}
for region in regions:
print("Region: " + region)
print("Dashes done: " + str(regions[region]["num"]))
print("Deliveries done: " + str(regions[region]["deliveries"]))
print("Money made: $" + str(regions[region]["pay"] / 100.))
print("Hours worked: " + str(int(regions[region]["hours"])))
print("Average hourly rate: $" + str(int(regions[region]["pay"] / regions[region]["hours"]) / 100.))
print("Average deliveries per hour: " + ("%.2f" % (regions[region]["deliveries"] / regions[region]["hours"])))
print()
def getName():
return "Stats per region"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import funct
import sql
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates/'), autoescape=True)
template = env.get_template('metrics.html')
print('Content-type: text/html\n')
funct.check_login()
try:
user, user_id, role, token, servers = funct.get_users_params()
cmd = "rpm --query haproxy-wi-metrics-* |awk -F\"metrics\" '{print $2}' |awk -F\".noa\" '{print $1}' |sed 's/-//1' |sed 's/-/./'"
service_ver, stderr = funct.subprocess_execute(cmd)
if service_ver == '* is not installed':
servers = ''
else:
servers = sql.select_servers_metrics(user_id.value)
except Exception:
pass
template = template.render(h2=1, title="Metrics",
autorefresh=1,
role=role,
user=user,
servers=servers,
versions=funct.versions(),
services=service_ver[0],
token=token)
print(template)
|
nilq/baby-python
|
python
|
import sys
lista = [
('chave1', 'valor1'),
('chave2', 'valor2'),
('chave1', 'valor1'),
('chave2', 'valor2'),
('chave1', 'valor1'),
]
#d1 = {x.upper(): y.upper() for x, y in lista} # deixa tudo em maiusculo
#d1 = {x for x in range(5)}
d1 = {f'chave_{x}': 'a' for x in range(5)}
print(d1)
print(sys.getsizeof(d1))
print(sys.getsizeof(lista))
|
nilq/baby-python
|
python
|
from githubpy import *
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--token")
parser.add_argument("-o", "--owner")
parser.add_argument("-r", "--repo")
parser.add_argument("-w", "--workflow", action='append', default=[])
parser.add_argument("-b", "--branch", help="Branch or tag")
options = parser.parse_args()
ghc = GitHubClient(token=options.token)
result = ghc.ActionsCreateWorkflowDispatch(options.owner,
options.repo,
options.workflow[0],
options.branch, inputs={})
if not isinstance(result, HttpResponse) and result.status_code != 204:
print("ERROR: {result.message}")
return
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from PyQt5.QtWidgets import *
import sys
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QGridLayout()
self.setLayout(layout)
toolbox = QToolBox()
layout.addWidget(toolbox, 0, 0)
label = QLabel()
toolbox.addItem(label, "Honda")
label = QLabel()
toolbox.addItem(label, "Toyota")
label = QLabel()
toolbox.addItem(label, "Mercedes")
app = QApplication(sys.argv)
screen = Window()
screen.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# mkv.py - Matroska Streaming Video Files
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer, Jason Tackaberry
#
# Maintainer: Jason Tackaberry <tack@urandom.ca>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
from struct import unpack
import logging
import re
from datetime import datetime
# import kaa.metadata.video core
from . import core
# get logging object
log = logging.getLogger('metadata')
# Main IDs for the Matroska streams
MATROSKA_VIDEO_TRACK = 0x01
MATROSKA_AUDIO_TRACK = 0x02
MATROSKA_SUBTITLES_TRACK = 0x11
MATROSKA_HEADER_ID = 0x1A45DFA3
MATROSKA_TRACKS_ID = 0x1654AE6B
MATROSKA_CUES_ID = 0x1C53BB6B
MATROSKA_SEGMENT_ID = 0x18538067
MATROSKA_SEGMENT_INFO_ID = 0x1549A966
MATROSKA_CLUSTER_ID = 0x1F43B675
MATROSKA_VOID_ID = 0xEC
MATROSKA_CRC_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_DURATION_ID = 0x4489
MATROSKA_CRC32_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_MUXING_APP_ID = 0x4D80
MATROSKA_WRITING_APP_ID = 0x5741
MATROSKA_CODEC_ID = 0x86
MATROSKA_CODEC_PRIVATE_ID = 0x63A2
MATROSKA_FRAME_DURATION_ID = 0x23E383
MATROSKA_VIDEO_SETTINGS_ID = 0xE0
MATROSKA_VIDEO_WIDTH_ID = 0xB0
MATROSKA_VIDEO_HEIGHT_ID = 0xBA
MATROSKA_VIDEO_INTERLACED_ID = 0x9A
MATROSKA_VIDEO_DISPLAY_WIDTH_ID = 0x54B0
MATROSKA_VIDEO_DISPLAY_HEIGHT_ID = 0x54BA
MATROSKA_VIDEO_STEREO = 0x53B8
MATROSKA_AUDIO_SETTINGS_ID = 0xE1
MATROSKA_AUDIO_SAMPLERATE_ID = 0xB5
MATROSKA_AUDIO_CHANNELS_ID = 0x9F
MATROSKA_TRACK_UID_ID = 0x73C5
MATROSKA_TRACK_NUMBER_ID = 0xD7
MATROSKA_TRACK_TYPE_ID = 0x83
MATROSKA_TRACK_LANGUAGE_ID = 0x22B59C
MATROSKA_TRACK_OFFSET = 0x537F
MATROSKA_TRACK_FLAG_DEFAULT_ID = 0x88
MATROSKA_TRACK_FLAG_ENABLED_ID = 0xB9
MATROSKA_TITLE_ID = 0x7BA9
MATROSKA_DATE_UTC_ID = 0x4461
MATROSKA_NAME_ID = 0x536E
MATROSKA_CHAPTERS_ID = 0x1043A770
MATROSKA_CHAPTER_UID_ID = 0x73C4
MATROSKA_EDITION_ENTRY_ID = 0x45B9
MATROSKA_CHAPTER_ATOM_ID = 0xB6
MATROSKA_CHAPTER_TIME_START_ID = 0x91
MATROSKA_CHAPTER_TIME_END_ID = 0x92
MATROSKA_CHAPTER_FLAG_ENABLED_ID = 0x4598
MATROSKA_CHAPTER_DISPLAY_ID = 0x80
MATROSKA_CHAPTER_LANGUAGE_ID = 0x437C
MATROSKA_CHAPTER_STRING_ID = 0x85
MATROSKA_ATTACHMENTS_ID = 0x1941A469
MATROSKA_ATTACHED_FILE_ID = 0x61A7
MATROSKA_FILE_DESC_ID = 0x467E
MATROSKA_FILE_NAME_ID = 0x466E
MATROSKA_FILE_MIME_TYPE_ID = 0x4660
MATROSKA_FILE_DATA_ID = 0x465C
MATROSKA_SEEKHEAD_ID = 0x114D9B74
MATROSKA_SEEK_ID = 0x4DBB
MATROSKA_SEEKID_ID = 0x53AB
MATROSKA_SEEK_POSITION_ID = 0x53AC
MATROSKA_TAGS_ID = 0x1254C367
MATROSKA_TAG_ID = 0x7373
MATROSKA_TARGETS_ID = 0x63C0
MATROSKA_TARGET_TYPE_VALUE_ID = 0x68CA
MATROSKA_TARGET_TYPE_ID = 0x63CA
MATRSOKA_TAGS_TRACK_UID_ID = 0x63C5
MATRSOKA_TAGS_EDITION_UID_ID = 0x63C9
MATRSOKA_TAGS_CHAPTER_UID_ID = 0x63C4
MATRSOKA_TAGS_ATTACHMENT_UID_ID = 0x63C6
MATROSKA_SIMPLE_TAG_ID = 0x67C8
MATROSKA_TAG_NAME_ID = 0x45A3
MATROSKA_TAG_LANGUAGE_ID = 0x447A
MATROSKA_TAG_STRING_ID = 0x4487
MATROSKA_TAG_BINARY_ID = 0x4485
# See mkv spec for details:
# http://www.matroska.org/technical/specs/index.html
# Map to convert to well known codes
# http://haali.cs.msu.ru/mkv/codecs.pdf
FOURCCMap = {
'V_THEORA': 'THEO',
'V_SNOW': 'SNOW',
'V_MPEG4/ISO/ASP': 'MP4V',
'V_MPEG4/ISO/AVC': 'AVC1',
'A_AC3': 0x2000,
'A_MPEG/L3': 0x0055,
'A_MPEG/L2': 0x0050,
'A_MPEG/L1': 0x0050,
'A_DTS': 0x2001,
'A_PCM/INT/LIT': 0x0001,
'A_PCM/FLOAT/IEEE': 0x003,
'A_TTA1': 0x77a1,
'A_WAVPACK4': 0x5756,
'A_VORBIS': 0x6750,
'A_FLAC': 0xF1AC,
'A_AAC': 0x00ff,
'A_AAC/': 0x00ff
}
stereo_map = {
1: 'side by side (left eye is first)',
2: 'top-bottom (right eye is first)',
3: 'top-bottom (left eye is first)',
4: 'checkboard (right is first)',
5: 'checkboard (left is first)',
6: 'row interleaved (right is first)',
7: 'row interleaved (left is first)',
8: 'column interleaved (right is first)',
9: 'column interleaved (left is first)',
10: 'anaglyph (cyan/red)',
11: 'side by side (right eye is first)',
12: 'anaglyph (green/magenta)',
13: 'both eyes laced in one Block (left eye is first)',
14: 'both eyes laced in one Block (right eye is first)'
}
def matroska_date_to_datetime(date):
"""
Converts a date in Matroska's date format to a python datetime object.
Returns the given date string if it could not be converted.
"""
# From the specs:
# The fields with dates should have the following format: YYYY-MM-DD
# HH:MM:SS.MSS [...] To store less accuracy, you remove items starting
# from the right. To store only the year, you would use, "2004". To store
# a specific day such as May 1st, 2003, you would use "2003-05-01".
format = re.split(r'([-:. ])', '%Y-%m-%d %H:%M:%S.%f')
while format:
try:
return datetime.strptime(date, ''.join(format))
except ValueError:
format = format[:-2]
return date
def matroska_bps_to_bitrate(bps):
"""
Tries to convert a free-form bps string into a bitrate (bits per second).
"""
m = re.search(r'([\d.]+)\s*(\D.*)', bps)
if m:
bps, suffix = m.groups()
if 'kbit' in suffix:
return float(bps) * 1024
elif 'kbyte' in suffix:
return float(bps) * 1024 * 8
elif 'byte' in suffix:
return float(bps) * 8
elif 'bps' in suffix or 'bit' in suffix:
return float(bps)
if bps.replace('.', '').isdigit():
if float(bps) < 30000:
# Assume kilobits and convert to bps
return float(bps) * 1024
return float(bps)
# Used to convert the official matroska tag names (and some unofficial ones) to core
# attributes. tag name -> attr, filter
TAGS_MAP = {
# From Media core
'album': ('album', None),
'title': ('title', None),
'subtitle': ('caption', None),
'comment': ('comment', None),
'comments': ('comment', None),
'url': ('url', None),
'artist': ('artist', None),
'keywords': ('keywords', lambda s: [word.strip() for word in s.split(',')]),
'composer_nationality': ('country', None),
'date_released': ('datetime', None),
'date_recorded': ('datetime', None),
'date_written': ('datetime', None),
# From Video core
'encoder': ('encoder', None),
'bps': ('bitrate', matroska_bps_to_bitrate),
'part_number': ('trackno', int),
'total_parts': ('trackof', int),
'copyright': ('copyright', None),
'genre': ('genre', None),
'actor': ('actors', None),
'written_by': ('writer', None),
'producer': ('producer', None),
'production_studio': ('studio', None),
'law_rating': ('rating', None),
'summary': ('summary', None),
'synopsis': ('synopsis', None),
}
class EbmlEntity:
"""
This is class that is responsible to handle one Ebml entity as described in
the Matroska/Ebml spec
"""
def __init__(self, inbuf):
# Compute the EBML id
# Set the CRC len to zero
self.crc_len = 0
# Now loop until we find an entity without CRC
try:
self.build_entity(inbuf)
except IndexError:
raise core.ParseError()
while self.get_id() == MATROSKA_CRC32_ID:
self.crc_len += self.get_total_len()
inbuf = inbuf[self.get_total_len():]
self.build_entity(inbuf)
def build_entity(self, inbuf):
self.compute_id(inbuf)
if self.id_len == 0:
log.debug("EBML entity not found, bad file format")
raise core.ParseError()
self.entity_len, self.len_size = self.compute_len(inbuf[self.id_len:])
self.entity_data = inbuf[self.get_header_len() : self.get_total_len()]
self.ebml_length = self.entity_len
self.entity_len = min(len(self.entity_data), self.entity_len)
# if the data size is 8 or less, it could be a numeric value
self.value = 0
if self.entity_len <= 8:
for pos, shift in zip(list(range(self.entity_len)), list(range((self.entity_len-1)*8, -1, -8))):
self.value |= self.entity_data[pos] << shift
def add_data(self, data):
maxlen = self.ebml_length - len(self.entity_data)
if maxlen <= 0:
return
self.entity_data += data[:maxlen]
self.entity_len = len(self.entity_data)
def compute_id(self, inbuf):
self.id_len = 0
if len(inbuf) < 1:
return 0
first = inbuf[0]
if first & 0x80:
self.id_len = 1
self.entity_id = first
elif first & 0x40:
if len(inbuf) < 2:
return 0
self.id_len = 2
self.entity_id = inbuf[0]<<8 | inbuf[1]
elif first & 0x20:
if len(inbuf) < 3:
return 0
self.id_len = 3
self.entity_id = (inbuf[0]<<16) | (inbuf[1]<<8) | inbuf[2]
elif first & 0x10:
if len(inbuf) < 4:
return 0
self.id_len = 4
self.entity_id = (inbuf[0]<<24) | (inbuf[1]<<16) | (inbuf[2]<<8) | inbuf[3]
self.entity_str = inbuf[0:self.id_len]
def compute_len(self, inbuf):
if not inbuf:
return 0, 0
i = num_ffs = 0
len_mask = 0x80
len = inbuf[0]
while not len & len_mask:
i += 1
len_mask >>= 1
if i >= 8:
return 0, 0
len &= len_mask - 1
if len == len_mask - 1:
num_ffs += 1
for p in range(i):
len = (len << 8) | inbuf[p + 1]
if len & 0xff == 0xff:
num_ffs += 1
if num_ffs == i + 1:
len = 0
return len, i + 1
def get_crc_len(self):
return self.crc_len
def get_value(self):
return self.value
def get_float_value(self):
if len(self.entity_data) == 4:
return unpack('!f', self.entity_data)[0]
elif len(self.entity_data) == 8:
return unpack('!d', self.entity_data)[0]
return 0.0
def get_data(self):
return self.entity_data
def get_utf8(self):
# EBML RFC says "A string MAY be zero padded at the end."
return str(self.entity_data.rstrip(b'\x00'), 'utf-8', 'replace')
def get_str(self):
return str(self.entity_data.rstrip(b'\x00'), 'ascii', 'replace')
def get_id(self):
return self.entity_id
def get_str_id(self):
return self.entity_str
def get_len(self):
return self.entity_len
def get_total_len(self):
return self.entity_len + self.id_len + self.len_size
def get_header_len(self):
return self.id_len + self.len_size
class Matroska(core.AVContainer):
"""
Matroska video and audio parser. If at least one video stream is
detected it will set the type to MEDIA_AV.
"""
media = core.MEDIA_AUDIO
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.file = file
# Read enough that we're likely to get the full seekhead and elements after
# the seekhead (but before clusters) in case the file is truncated.
# (FIXME: kludge.)
buffer = file.read(100000)
if not buffer:
# Regular File end
raise core.ParseError()
# Check the Matroska header
header = EbmlEntity(buffer)
if header.get_id() != MATROSKA_HEADER_ID:
raise core.ParseError()
log.debug("HEADER ID found %08X" % header.get_id() )
self.mime = 'application/mkv'
self.type = 'Matroska'
self.has_idx = False
self.objects_by_uid = {}
self._in_seekhead = False
# Now get the segment
self.segment = segment = EbmlEntity(buffer[header.get_total_len():])
# Record file offset of segment data for seekheads
self.segment.offset = header.get_total_len() + segment.get_header_len()
if segment.get_id() != MATROSKA_SEGMENT_ID:
log.debug("SEGMENT ID not found %08X" % segment.get_id())
return
log.debug("SEGMENT ID found %08X" % segment.get_id())
# The parsing strategy for mkv is to first process the seekhead (which is
# at the top of the file), which points to all top-level elements we're
# interested in parsing. Seekhead parsing is more robust as it seeks
# across the file as needed and reads all data. If this succeeds, then
# we stop processing everything else in the segment as we're done.
#
# If the seekhead parsing fails, this is usually because the file is
# incomplete/corrupt. In this case, we clear out anything that might
# have been processed from the seekhead and continue on with the
# other elements in the segment that might be in our pre-read buffer.
try:
for elem in self.process_one_level(segment):
log.debug("Segment level id: %x", elem.get_id())
try:
self.process_elem(elem)
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
# Seekhead was successfully processed so we're done.
break
except core.ParseError:
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
# We couldn't finish processing the seekhead. Clear
# out all metadata and keep processing the segment.
log.debug("Failed to process seekhead, continuing with segment")
del self.audio[:]
del self.video[:]
del self.subtitles[:]
del self.chapters[:]
self.objects_by_uid.clear()
continue
else:
# Some other error, stop processing.
break
except core.ParseError:
pass
if not self.has_idx:
log.debug('WARNING: file has no index')
self._set('corrupt', True)
def process_elem(self, elem):
elem_id = elem.get_id()
log.debug('BEGIN: process element %x size %d', elem_id, elem.entity_len)
if elem_id == MATROSKA_SEGMENT_INFO_ID:
duration = 0
scalecode = 1000000.0
for ielem in self.process_one_level(elem):
ielem_id = ielem.get_id()
if ielem_id == MATROSKA_TIMECODESCALE_ID:
scalecode = ielem.get_value()
elif ielem_id == MATROSKA_DURATION_ID:
duration = ielem.get_float_value()
elif ielem_id == MATROSKA_TITLE_ID:
self.title = ielem.get_utf8()
elif ielem_id == MATROSKA_DATE_UTC_ID:
timestamp = unpack('!q', ielem.get_data())[0] / 10.0**9
# Date is offset 2001-01-01 00:00:00 (timestamp 978307200.0)
self.timestamp = int(timestamp + 978307200)
self.length = duration * scalecode / 1000000000.0
elif elem_id == MATROSKA_TRACKS_ID:
self.process_tracks(elem)
elif elem_id == MATROSKA_CHAPTERS_ID:
self.process_chapters(elem)
elif elem_id == MATROSKA_ATTACHMENTS_ID:
self.process_attachments(elem)
elif elem_id == MATROSKA_SEEKHEAD_ID:
self.process_seekhead(elem)
elif elem_id == MATROSKA_TAGS_ID:
self.process_tags(elem)
elif elem_id == MATROSKA_CUES_ID:
self.has_idx = True
log.debug('END: process element %x', elem_id)
return True
def process_seekhead(self, elem):
if self._in_seekhead:
return log.debug('skipping recursive seekhead processing')
self._in_seekhead = True
for seek_elem in self.process_one_level(elem):
if seek_elem.get_id() != MATROSKA_SEEK_ID:
continue
for sub_elem in self.process_one_level(seek_elem):
if sub_elem.get_id() == MATROSKA_SEEK_POSITION_ID:
self.file.seek(self.segment.offset + sub_elem.get_value())
buffer = self.file.read(100)
elem = EbmlEntity(buffer)
# Fetch all data necessary for this element.
if elem.ebml_length > 100:
elem.add_data(self.file.read(elem.ebml_length - 100))
self.process_elem(elem)
self._in_seekhead = False
def process_tracks(self, tracks):
tracksbuf = tracks.get_data()
index = 0
while index < tracks.get_len():
trackelem = EbmlEntity(tracksbuf[index:])
self.process_track(trackelem)
index += trackelem.get_total_len() + trackelem.get_crc_len()
def process_one_level(self, item):
buf = item.get_data()
index = 0
while index < item.get_len():
if len(buf[index:]) == 0:
break
elem = EbmlEntity(buf[index:])
yield elem
index += elem.get_total_len() + elem.get_crc_len()
def process_track(self, track):
# Collapse generator into a list since we need to iterate over it
# twice.
elements = [ x for x in self.process_one_level(track) ]
track_type = [ x.get_value() for x in elements if x.get_id() == MATROSKA_TRACK_TYPE_ID ]
if not track_type:
log.debug('Bad track: no type id found')
return
track_type = track_type[0]
track = None
if track_type == MATROSKA_VIDEO_TRACK:
log.debug("Video track found")
track = self.process_video_track(elements)
elif track_type == MATROSKA_AUDIO_TRACK:
log.debug("Audio track found")
track = self.process_audio_track(elements)
elif track_type == MATROSKA_SUBTITLES_TRACK:
log.debug("Subtitle track found")
track = core.Subtitle()
track.id = len(self.subtitles)
self.subtitles.append(track)
for elem in elements:
self.process_track_common(elem, track)
def process_track_common(self, elem, track):
elem_id = elem.get_id()
if elem_id == MATROSKA_TRACK_LANGUAGE_ID:
track.language = elem.get_str()
log.debug("Track language found: %s" % track.language)
elif elem_id == MATROSKA_NAME_ID:
track.title = elem.get_utf8()
elif elem_id == MATROSKA_TRACK_NUMBER_ID:
track.trackno = elem.get_value()
elif elem_id == MATROSKA_TRACK_FLAG_ENABLED_ID:
track.enabled = bool(elem.get_value())
elif elem_id == MATROSKA_TRACK_FLAG_DEFAULT_ID:
track.default = bool(elem.get_value())
elif elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_CODEC_PRIVATE_ID:
track.codec_private = elem.get_data()
elif elem_id == MATROSKA_TRACK_UID_ID:
self.objects_by_uid[elem.get_value()] = track
def process_video_track(self, elements):
track = core.VideoStream()
# Defaults
track.codec = 'Unknown'
track.fps = 0
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_FRAME_DURATION_ID:
try:
track.fps = 1 / (pow(10, -9) * (elem.get_value()))
except ZeroDivisionError:
pass
elif elem_id == MATROSKA_VIDEO_SETTINGS_ID:
d_width = d_height = None
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_VIDEO_WIDTH_ID:
track.width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_HEIGHT_ID:
track.height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_WIDTH_ID:
d_width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_HEIGHT_ID:
d_height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_INTERLACED_ID:
value = int(settings_elem.get_value())
self._set('interlaced', value)
elif settings_elem_id == MATROSKA_VIDEO_STEREO:
value = stereo_map.get(int(settings_elem.get_value()), None)
if value:
self._set('stereo', value)
if None not in (d_width, d_height):
track.aspect = float(d_width) / d_height
else:
self.process_track_common(elem, track)
# convert codec information
# http://haali.cs.msu.ru/mkv/codecs.pdf
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.endswith('FOURCC') and len(track.codec_private or '') == 40:
track.codec = track.codec_private[16:20]
elif track.codec.startswith('V_REAL/'):
track.codec = track.codec[7:]
elif track.codec.startswith('V_'):
# FIXME: add more video codecs here
track.codec = track.codec[2:]
self.media = core.MEDIA_AV
track.id = len(self.video)
self.video.append(track)
return track
def process_audio_track(self, elements):
track = core.AudioStream()
track.codec = 'Unknown'
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_AUDIO_SETTINGS_ID:
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_AUDIO_SAMPLERATE_ID:
track.samplerate = settings_elem.get_float_value()
elif settings_elem_id == MATROSKA_AUDIO_CHANNELS_ID:
track.channels = settings_elem.get_value()
else:
self.process_track_common(elem, track)
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.startswith('A_'):
track.codec = track.codec[2:]
track.id = len(self.audio)
self.audio.append(track)
return track
def process_chapters(self, chapters):
elements = self.process_one_level(chapters)
for elem in elements:
if elem.get_id() == MATROSKA_EDITION_ENTRY_ID:
buf = elem.get_data()
index = 0
while index < elem.get_len():
sub_elem = EbmlEntity(buf[index:])
if sub_elem.get_id() == MATROSKA_CHAPTER_ATOM_ID:
self.process_chapter_atom(sub_elem)
index += sub_elem.get_total_len() + sub_elem.get_crc_len()
def process_chapter_atom(self, atom):
elements = self.process_one_level(atom)
chap = core.Chapter()
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CHAPTER_TIME_START_ID:
# Scale timecode to seconds (float)
chap.pos = elem.get_value() / 1000000 / 1000.0
elif elem_id == MATROSKA_CHAPTER_FLAG_ENABLED_ID:
chap.enabled = elem.get_value()
elif elem_id == MATROSKA_CHAPTER_DISPLAY_ID:
# Matroska supports multiple (chapter name, language) pairs for
# each chapter, so chapter names can be internationalized. This
# logic will only take the last one in the list.
for display_elem in self.process_one_level(elem):
if display_elem.get_id() == MATROSKA_CHAPTER_STRING_ID:
chap.name = display_elem.get_utf8()
elif elem_id == MATROSKA_CHAPTER_UID_ID:
self.objects_by_uid[elem.get_value()] = chap
log.debug('Chapter "%s" found', chap.name)
chap.id = len(self.chapters)
self.chapters.append(chap)
def process_attachments(self, attachments):
buf = attachments.get_data()
index = 0
while index < attachments.get_len():
elem = EbmlEntity(buf[index:])
if elem.get_id() == MATROSKA_ATTACHED_FILE_ID:
self.process_attachment(elem)
index += elem.get_total_len() + elem.get_crc_len()
def process_attachment(self, attachment):
elements = self.process_one_level(attachment)
name = desc = ''
mimetype = b''
data = None
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_FILE_NAME_ID:
name = elem.get_utf8()
elif elem_id == MATROSKA_FILE_DESC_ID:
desc = elem.get_utf8()
elif elem_id == MATROSKA_FILE_MIME_TYPE_ID:
mimetype = elem.get_data()
elif elem_id == MATROSKA_FILE_DATA_ID:
data = elem.get_data()
# Right now we only support attachments that could be cover images.
# Make a guess to see if this attachment is a cover image.
if mimetype.startswith(b"image/") and "cover" in (name+desc).lower() and data:
self.thumbnail = data
log.debug('Attachment "%s" found' % name)
def process_tags(self, tags):
# Tags spec: http://www.matroska.org/technical/specs/tagging/index.html
# Iterate over Tags children. Tags element children is a
# Tag element (whose children are SimpleTags) and a Targets element
# whose children specific what objects the tags apply to.
for tag_elem in self.process_one_level(tags):
# Start a new dict to hold all SimpleTag elements.
tags_dict = core.Tags()
# A list of target uids this tags dict applies too. If empty,
# tags are global.
targets = []
for sub_elem in self.process_one_level(tag_elem):
if sub_elem.get_id() == MATROSKA_SIMPLE_TAG_ID:
self.process_simple_tag(sub_elem, tags_dict)
elif sub_elem.get_id() == MATROSKA_TARGETS_ID:
# Targets element: if there is no uid child (track uid,
# chapter uid, etc.) then the tags dict applies to the
# whole file (top-level Media object).
for target_elem in self.process_one_level(sub_elem):
target_elem_id = target_elem.get_id()
if target_elem_id in (MATRSOKA_TAGS_TRACK_UID_ID, MATRSOKA_TAGS_EDITION_UID_ID,
MATRSOKA_TAGS_CHAPTER_UID_ID, MATRSOKA_TAGS_ATTACHMENT_UID_ID):
targets.append(target_elem.get_value())
elif target_elem_id == MATROSKA_TARGET_TYPE_VALUE_ID:
# Target types not supported for now. (Unclear how this
# would fit with kaa.metadata.)
pass
if targets:
# Assign tags to all listed uids
for target in targets:
try:
self.objects_by_uid[target].tags.update(tags_dict)
self.tags_to_attributes(self.objects_by_uid[target], tags_dict)
except KeyError:
log.warning('Tags assigned to unknown/unsupported target uid %d', target)
else:
self.tags.update(tags_dict)
self.tags_to_attributes(self, tags_dict)
def process_simple_tag(self, simple_tag_elem, tags_dict):
"""
Returns a dict representing the Tag element.
"""
name = lang = value = children = None
binary = False
for elem in self.process_one_level(simple_tag_elem):
elem_id = elem.get_id()
if elem_id == MATROSKA_TAG_NAME_ID:
name = elem.get_utf8().lower()
elif elem_id == MATROSKA_TAG_STRING_ID:
value = elem.get_utf8()
elif elem_id == MATROSKA_TAG_BINARY_ID:
value = elem.get_data()
binary = True
elif elem_id == MATROSKA_TAG_LANGUAGE_ID:
lang = elem.get_utf8()
elif elem_id == MATROSKA_SIMPLE_TAG_ID:
if children is None:
children = core.Tags()
self.process_simple_tag(elem, children)
if children:
# Convert ourselves to a Tags object.
children.value = value
children.langcode = lang
value = children
else:
# XXX: Python datetime objects have no way to express partial dates
# (e.g. only year), which the Matroska spec allows. Therefore datetime
# is not suitable for this. Until we figure out a proper way to express
# dates, just pass the tag value directly.
#if name.startswith('date_'):
# # Try to convert date to a datetime object.
# value = matroska_date_to_datetime(value)
value = core.Tag(value, lang, binary)
if name in tags_dict:
# Multiple items of this tag name.
if not isinstance(tags_dict[name], list):
# Convert to a list
tags_dict[name] = [tags_dict[name]]
# Append to list
tags_dict[name].append(value)
else:
tags_dict[name] = value
def tags_to_attributes(self, obj, tags):
# Convert tags to core attributes.
for name, tag in list(tags.items()):
if isinstance(tag, dict):
# Nested tags dict, recurse.
self.tags_to_attributes(obj, tag)
continue
elif name not in TAGS_MAP:
continue
attr, filter = TAGS_MAP[name]
if attr not in obj._keys and attr not in self._keys:
# Tag is not in any core attribute for this object or global,
# so skip.
continue
# Pull value out of Tag object or list of Tag objects. We expect scalar values
# so in the case of lists (because there was more than one tag of the same name)
# just pick the first.
value = tag[0].value if isinstance(tag, list) else tag.value
if filter:
try:
value = filter(value)
except Exception as e:
log.warning('Failed to convert tag to core attribute: %s', e)
# Special handling for tv series recordings. The 'title' tag
# can be used for both the series and the episode name. The
# same is true for trackno which may refer to the season
# and the episode number. Therefore, if we find these
# attributes already set we try some guessing.
if attr == 'trackno' and getattr(self, attr) is not None:
# delete trackno and save season and episode
self.season = self.trackno
self.episode = value
self.trackno = None
continue
if attr == 'title' and getattr(self, attr) is not None:
# store current value of title as series and use current
# value of title as title
self.series = self.title
if attr in obj._keys:
setattr(obj, attr, value)
else:
setattr(self, attr, value)
Parser = Matroska
|
nilq/baby-python
|
python
|
#Author:D4Vinci
def ip2long(ip):
ip = ip.split("/")[0].split(":")[0]
p = ip.split(".")
return str( ( ( ( ( int(p[0]) * 256 + int(p[1]) ) * 256 ) + int(p[2]) ) * 256 ) + int(p[3]))
#p[0] + "." + str( ( ( ( int( p[1] ) * 256 + int( p[2] ) ) * 256 ) + int( p[3] ) ) * 256 ),
#p[0] + "." + p[1] + str( ( int( p[2] ) *256 ) + int( p[3] ) )
def ip2hex(ip):
ip = ip.split("/")[0].split(":")[0]
p = ip.split(".")
return [str( hex( int(p[0]) ) ) +"."+ str( hex( int(p[1]) ) ) +"."+ str( hex( int(p[2]) ) ) +"."+ str( hex( int(p[3]) ) ),
str( hex( int(p[0]) ) ) +"."+ str( hex( int(p[1]) ) ) +"."+ str( hex( int(p[2]) ) ) +"."+ str( int(p[3]) ),
str( hex( int(p[0]) ) ) +"."+ str( hex( int(p[1]) ) ) +"."+ str( int(p[2]) ) +"."+ str( int(p[3]) ),
str( hex( int(p[0]) ) ) +"."+ str( int(p[1]) ) +"."+ str( int(p[2]) ) +"."+ str( int(p[3]) ),
"0x"+"0"*8+str( hex( int(p[0]) ) ).replace("0x","") +"."+ "0x"+"0"*6+str( hex( int(p[1]) ) ).replace("0x","") +"."+ "0x"+"0"*4+str( hex( int(p[2]) ) ).replace("0x","")+"."+ "0x"+"0"*2+str( hex( int(p[3]) ) ).replace("0x",""),
str( hex( int( ip2long( ip ) ) ) ).replace( "L" , "" )]
def ip2Octal(ip):
return '.'.join(format(int(x), '04o') for x in ip.split('.'))
def ip_as_urlencoded(ip):
ip = ip.split("/")[0]
en=""
for i in ip :
if i.isdigit() :
en += "%3{}".format(i)
elif i == "." :
en += "%2E"
elif i == ":" :
en += "%3A"
return en
def ip_as_url(ip):
return [ "http://howsecureismypassword.net@"+str(ip),
"http://google.com@"+str( ip2long( ip ) ),
"http://facebook.com@"+str( ip2hex( ip )[-1] ),
"http://"+str( ip_as_urlencoded(ip) ),
"https://www.google.com@search@"+str( ip_as_urlencoded(ip) ),
"http://anywebsite@"+str( ip2Octal(ip) )]
print "\n Cuteit - Make a malicious ip a bit cuter :D"
print " Note:don't type a long url because it's encode the ip only.!"
ip = raw_input(" ip > ")
ip=ip.replace("http://","")
print "\n"
for n,i in enumerate( ip2hex(ip) + ip_as_url(ip) ):
if "http" not in i:
print " ["+str(n)+"] "+"http://"+i
else:
print " ["+str(n)+"] "+i
print " [12] http://" + ip2Octal(ip)
print " [13] http://" + ip2long(ip)
|
nilq/baby-python
|
python
|
import os.path
activate_this = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.pyvenv/bin/activate_this.py')
exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this))
import syslog
from dotenv import dotenv_values
from keycloak import KeycloakOpenID
from keycloak.exceptions import KeycloakError
DEFAULT_USER = "nobody"
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_AUTH)
options = {}
def parse_options(pamh, argv):
global options
for arg in argv[1:]:
args = arg.split('=')
if len(args) > 1:
options[args[0]] = args[1]
else:
options[args[0]] = True
try:
config_file = options.get('config')
if config_file:
if not os.path.isabs(config_file):
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), config_file)
options.update(dotenv_values(config_file))
except Exception as e:
pam_syslog(syslog.LOG_CRIT, pamh, "auth", "failed to read configuration: %s" % e)
return pamh.PAM_SYSTEM_ERR
def pam_syslog(prio, pamh, choice, message):
#print("pam_keycloak(%s:%s): %s" % (pamh.service, choice, message))
syslog.syslog(prio, "pam_keycloak(%s:%s): %s" % (pamh.service, choice, message))
def pam_sm_authenticate(pamh, flags, argv):
parse_options(pamh, argv)
try:
user = pamh.get_user(None)
except pamh.exception, e:
return e.pam_result
if user is None:
pamh.user = DEFAULT_USER
try:
# Configure client
keycloak_openid = KeycloakOpenID(server_url=options['server_url'],
realm_name=options['realm_name'],
client_id=options['client_id'],
client_secret_key=options['client_secret_key'],
verify=True)
# Get WellKnow
config_well_know = keycloak_openid.well_know()
except KeycloakError, e:
pam_syslog(syslog.LOG_NOTICE, pamh, "auth", "unable to authenticate for %s: %d %s" % (user, e.response_code, e.error_message))
return pamh.PAM_AUTHINFO_UNAVAIL
if pamh.authtok is None:
passmsg = pamh.Message(pamh.PAM_PROMPT_ECHO_OFF,
"Password: ")
res = pamh.conversation(passmsg)
pamh.authtok = res.resp
try:
token = keycloak_openid.token(user, pamh.authtok)
# Potentially fetch the user info and check for specific claims here:
# userinfo = keycloak_openid.userinfo(token['access_token'])
return pamh.PAM_SUCCESS
except KeycloakError as e:
pam_syslog(syslog.LOG_NOTICE, pamh, "auth", "authentication failure for %s: %d %s" % (user, e.response_code, e.error_message))
if e.response_code == 401:
return pamh.PAM_AUTH_ERR
return pamh.PAM_AUTHINFO_UNAVAIL
return pamh.PAM_AUTH_ERR
def pam_sm_setcred(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_acct_mgmt(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_open_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_close_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_chauthtok(pamh, flags, argv):
return pamh.PAM_SUCCESS
|
nilq/baby-python
|
python
|
from dagster import execute_pipeline
from docs_snippets.concepts.configuration.config_mapping import example_pipeline
def test_config_mapping():
res = execute_pipeline(example_pipeline)
assert res.success
assert res.result_for_solid("hello_external").output_value() == "Hello, Sam!"
res = execute_pipeline(
example_pipeline, run_config={"solids": {"hello_external": {"config": {"name": "Bob"}}}}
)
assert res.success
assert res.result_for_solid("hello_external").output_value() == "Hello, Bob!"
|
nilq/baby-python
|
python
|
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
__author__ = 'but0n'
from multiprocessing import Pool, Manager
from bs4 import BeautifulSoup
import time, random, requests, sqlite3, os
server = Manager()
host = 'http://www.80s.tw'
screen = server.dict({'label' : 'NONE', 'url' : 'http://baidu.com', 'title':'none', 'IMG':'none', 'detail':'none', 'link':'none', 'index':0, 'total':10})
def mLog(opt):
os.system('clear')
print('\033[41;30m MESSAGE: %s\033[m' % opt['label'])
print('\033[46;30m PATH: %10s\033[m\n' % opt['url'])
print('\033[0;35m TITLE\033[m:\t%s' % opt['title'])
print('\033[0;35m IMG\033[m:\t%s' % opt['IMG'][:30]+'...')
print('\033[0;34m DETAIL\033[m:%s' % opt['detail'][:60]+'...')
print('\033[0;36m LINK\033[m:\t%s' % opt['link'][:60]+'...')
bar_status = opt['index']*40/opt['total']
status = opt['index']*100/opt['total']
print('\n[%-40s]%s(%d/%d)' % ('>'*bar_status, str(status)+'%', opt['index'], opt['total']))
class domPa(object):
def __init__(self, path, section = 'a', title = '.title', img = '.img', detail = '.detail'):
self.path = path
self.page = requests.get(host+path)
self.status = self.page.status_code
self.section = section
self.img = img
self.title = title
self.detail = detail
self.dom = BeautifulSoup(self.page.text, 'html.parser')
self.p = Pool(5)
def run(self):
screen['url'] = self.path
screen['label'] = self.status
screen['total'] = len(self.dom.select('.me1.clearfix')[0].select('li'))
mLog(screen)
result = []
for e in self.dom.select('.me1.clearfix')[0].select('li'):
result.append(self.p.apply_async(botTask, (e,)))
# self.botTask(i,e)
self.p.close()
self.p.join()
for res in result:
for e in res.get():
dat = (e[0],e[1],e[2],e[3])
try:
db.execute('INSERT INTO movies VALUES(?,?,?,?)',dat)
except Exception as e:
screen['label'] = '*************SAME LINK!************'
mLog(screen)
db.commit()
def botTask(e):
dom_title_path_img = e.select('a')[0]
movieName = dom_title_path_img.get('title')
screen['title'] = movieName
movieImg = dom_title_path_img.select('img')[0].get('_src')[2:]
screen['IMG'] = movieImg
movieDetail = e.select('.tip')[0].get_text().strip()
screen['detail'] = movieDetail[:50]+'...'
urll = host + dom_title_path_img.get('href')
pagee = requests.get(urll)
dom = BeautifulSoup(pagee.text, 'html.parser')
datas = []
for ee in dom.select('span.xunlei')[0].select('a'):
movieLink = ee.get('href')
screen['link'] = movieLink
mLog(screen)
# robLog(i, 'Got it ! [%s]@ %s' % (movieName, movieLink))
datas.append([movieName,movieLink, movieDetail,movieImg])
# end = time.time()
# robLog(i, 'Task done! Cost %0.2fs' % (end-start), '\033[0;36m')
screen['index'] += 1
return (datas)
# mLog(u'but0n,I\'m Running!')
# mLog('Connect Database...')
screen['label'] = 'Connect Database...'
db = sqlite3.connect('mv.db')
if db:
try:
db.execute('CREATE TABLE movies(name text, link text primary key, detail text, img text)')
screen['label']='CREATE TABLE...'
mLog()
finally:
i = 1
while i:
bug = domPa('/movie/list/-----p'+str(i))
if bug.status == 200:
screen['index']=0
screen['label']='HTTP Connect Succeed! To [p'+str(i)+']'
mLog(screen)
i += 1
bug.run()
else:
screen['label'] = 'Checkout your network!'
mLog(screen)
i = 0
db.close()
mLog('DONE')
|
nilq/baby-python
|
python
|
import os
from setuptools import setup, find_packages
DESCRIPTION = (
"Graphical interface to manage Flatpak, Snap, AppImage and AUR packages"
)
AUTHOR = "Vinicius Moreira"
AUTHOR_EMAIL = "vinicius_fmoreira@hotmail.com"
NAME = 'bauh'
URL = "https://github.com/vinifmor/" + NAME
file_dir = os.path.dirname(os.path.abspath(__file__))
with open(file_dir + '/requirements.txt', 'r') as f:
requirements = [line.strip() for line in f.readlines() if line]
with open(file_dir + '/{}/__init__.py'.format(NAME), 'r') as f:
exec(f.readlines()[0])
setup(
name=NAME,
version=eval('__version__'),
description=DESCRIPTION,
long_description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
python_requires=">=3.5",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data={NAME: ["view/resources/locale/*", "view/resources/img/*", "gems/*/resources/img/*", "gems/*/resources/locale/*"]},
install_requires=requirements,
test_suite="tests",
entry_points={
"console_scripts": [
"{name}={name}.app:main".format(name=NAME)
]
},
include_package_data=True,
license="zlib/libpng",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
|
nilq/baby-python
|
python
|
import pygame as pg
class Snake(object):
def __init__(self, speed, tiles, path, length, message=None):
self.tiles = tiles
self.speed = speed
self.length = length
self.message = message
self.body_color = pg.Color("red")
self.head_color = pg.Color("blue")
self.body_coordinates = [path[0]]
self.path = path
self.path.pop(0)
# How far the snakes is on the last block
self.progress = 0
self.last_dt = 0
def update(self, dt):
self.progress = self.progress + (dt * self.speed)
if self.progress > 1:
self.move()
self.progress = self.progress - 1
def move(self):
if self.path:
self.body_coordinates.append(self.path[0])
self.path.pop(0)
if len(self.body_coordinates) > self.length:
self.body_coordinates.pop(0)
def draw(self, screen):
# draw body parts
for i, position in enumerate(self.body_coordinates[:-1]):
rectangle = self.tiles.get_rectangle(position)
pg.draw.rect(screen, self.body_color, self.tiles.get_rectangle(position))
if self.message:
font = pg.font.SysFont("arial", rectangle.height)
letter_surface = font.render(self.message[i], True, pg.Color("Black"))
screen.blit(letter_surface, rectangle.topleft)
# draw head
head_rectangle = self.tiles.get_rectangle(self.body_coordinates[-1])
pg.draw.rect(screen, self.head_color, head_rectangle)
def clicked_snake(self, position):
for coordinate in self.body_coordinates:
if self.tiles.get_rectangle(coordinate).collidepoint(position):
return True
return False
|
nilq/baby-python
|
python
|
#-----------------------------------------------------------
# Baixar vídeos do youtube
#
# Programa copiado para teste, visto no Instagram: @pycodebr
#-----------------------------------------------------------
import os
from termcolor import colored
from pytube import YouTube
# Limpa tela ao iniciar // Clean screen on startup
os.system('clear') or None
# Informe o link do vídeo e o local onde deseja salvarprint
print(colored('1: Copie e cole o endereço do vídeo do Youtbe', 'red'))
print(colored('2: Informe o caminho correto da pasta onde o vídeo será baixado ou digite enter para baixar o vídeo na pasta atual\n', 'red'))
print(colored('3: O formato baixado é MP4\n', 'blue'))
link = input('Digite o link do vídeo que irá baixar: ')
path = input('Digite o diretório onde irá salvar o video OU pressione enter: ')
yt = YouTube(link)
# Detalhes do vídeos
print("Título: ", yt.title)
print('Número de views: ', yt.views)
print('Tamanho do Vídeo: ', yt.length)
print('Avaliação do vídeo: ', yt.rating)
# Usando a maior resolução
ys = yt.streams.get_highest_resolution()
# Inciando download
print('Baixando o vídeo na pasta informada......', path)
ys.download(path)
print(colored('Download finalizado...', 'green'))
|
nilq/baby-python
|
python
|
import convert
keypointSimilarity = .80
passwordSimilarity = .50
correctness = None
'Carries out the comparison between the stored password and the attempted password'
def compare_data(password, attempt):
global correctness
pass_array = stripOutZeros(password)
attempt_array = stripOutZeros(attempt)
pass_length = len(pass_array)
attempt_length = len(attempt_array)
longest = max(pass_length, attempt_length)
num_matches = longest_common_substring(pass_array, attempt_array)
correctness = float(num_matches)/longest
if correctness >= passwordSimilarity:
return True
else:
return False
'Runs a longest common substring algorithm implemented bottom up, to compare the two arrays of audio key points'
def longest_common_substring(password, attempt):
matrix = []
for g in range(len(password)+1):
horlist = []
for h in range(len(attempt)+1):
horlist.append(0)
matrix.append(horlist)
for i in range(len(password)):
for j in range(len(attempt)):
if closeEnough(password[i][0], attempt[j][0]):
matrix[i+1][j+1] = 1+matrix[i][j]
else:
matrix[i + 1][j + 1] = max(matrix[i+1][j], matrix[i][j+1])
return matrix[-1][-1]
'Gets rid of zeros signifying silence at the beingnning and end of the audio data'
def stripOutZeros(array):
j = 0
for i in range(len(array)):
if array[i][0] == 0:
j +=1
else:
break
g = 0
for h in range(len(array)):
if array[-h][0] ==0:
g +=1
else:
break
return array[i:len(array)-g]
'determines whether two compared frequencies are within the allotted similarity value'
def closeEnough(A, B):
C = min(A,B)
lowerBound = C*keypointSimilarity
upperBound = C*(2-keypointSimilarity)
if lowerBound < A < upperBound:
if lowerBound < B < upperBound:
return True
return False
# password1 = convert.process_audio("Test_Files\\lowHigh.wav")
# attempt1 = convert.process_audio("Test_Files\\lowHigh1.wav")
# attempt2 = convert.process_audio("Test_Files\\high.wav")
# attempt3 = convert.process_audio("Test_Files\\highLow.wav")
#
#
# print(compare_data(password1, password1))
# print correctness
# print(compare_data(password1, attempt1))
# print correctness
# print(compare_data(password1, attempt2))
# print correctness
# print(compare_data(password1, attempt3))
# print correctness
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Pocket PiAP
# ......................................................................
# Copyright (c) 2017-2020, Kendrick Walls
# ......................................................................
# Licensed under MIT (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# ......................................................................
# http://www.github.com/reactive-firewall/PiAP-python-tools/LICENSE.rst
# ......................................................................
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ......................................................................
try:
try:
import context
except Exception as ImportErr: # pragma: no branch
ImportErr = None
del ImportErr
from . import context
if context.__name__ is None:
raise ImportError("[CWE-758] Failed to import context")
else:
from context import unittest as unittest
except Exception:
raise ImportError("[CWE-758] Failed to import test context")
class MoreClientChecksTestSuite(unittest.TestCase):
"""More Unit test cases for piaplib.lint.check_clients_status."""
def test_piaplib_lint_import_syntax(self):
"""Test case importing code."""
theResult = False
try:
from .context import piaplib
from piaplib import pocket
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import check as check
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pocket, pku, interfaces, lint, check, clients_check_status]:
if depends.__name__ is None:
theResult = False
theResult = True
except Exception as impErr:
print(str(type(impErr)))
print(str(impErr))
theResult = False
assert theResult
def test_clients_check_status_aa(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", False, False, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp_val = clients_check_status.show_client(
"1.2.3.4",
False,
False,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp_val)
self.assertIsInstance(temp_val, str, "Test output is NOT a string")
theResult = isinstance(temp_val, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ab(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", True, False, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.show_client(
"1.2.3.4",
True,
False,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is Not a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ac(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", True, True, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.show_client(
"1.2.3.4",
True,
True,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is Not a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ad(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", False, True, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.show_client(
"1.2.3.4",
False,
True,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is NOT a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ae(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", *, *, "JUNK")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from lint import clients_check_status as clients_check_status
for depends in [piaplib, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
self.assertIsNotNone(clients_check_status.show_client("1.2.3.4", False, False, "JUNK"))
self.assertIsNotNone(clients_check_status.show_client("1.2.3.4", False, True, "JUNK"))
self.assertIsNotNone(clients_check_status.show_client("1.2.3.4", True, False, "JUNK"))
temp = clients_check_status.show_client("1.2.3.4", True, True, "JUNK")
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is NOT a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ba(self):
"""Test case for piaplib.lint.clients_check_status.get_client_name(None IP)"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.get_client_name(
None,
False,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNone(temp)
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_bb(self):
"""Test case for piaplib.lint.clients_check_status.get_client_name(None Iface)"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.get_client_name(
"1.2.3.4",
False,
None
)
self.assertEqual(temp, str("UNKNOWN"))
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_raw_cc(self):
"""Test case for piaplib.lint.clients_check_status.get_client_sta_status_raw()"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
self.assertIsNotNone(clients_check_status.get_client_sta_status_raw())
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_raw_ce(self):
"""Test case for piaplib.lint.clients_check_status.get_client_lease_status_raw(None)"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
self.assertIsNotNone(clients_check_status.get_client_lease_status_raw())
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_client_insane_or_no_mac_handled(self):
"""Tests the imposible state for client mac given bad values."""
theResult = True
try:
from lint import clients_check_status as clients_check_status
except Exception:
import lint.clients_check_status as clients_check_status
if clients_check_status.__name__ is None:
theResult = False
else:
try:
test_mac = str("11:AA:22:33:44:55")
self.assertIsNotNone(
clients_check_status.get_client_lease_status(test_mac)
)
self.assertIsNotNone(
clients_check_status.get_client_lease_status(None)
)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
import tkinter as tk
import os, sys
browser = None
def login(user, passw, check_browser):
global browser
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.dirname(__file__)
return os.path.join(base_path, relative_path)
chrome_driver = resource_path('./driver/chromedriver.exe')
if check_browser == 1:
browser = webdriver.Chrome(chrome_driver)
elif check_browser == 0:
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
browser = webdriver.Chrome(options=chrome_options, executable_path=chrome_driver)
# Login
browser.get('https://wfg.xcelsolutions.com')
username = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="reduxFormInputField1"]')))
password = browser.find_element_by_xpath('//*[@id="reduxFormInputField3"]')
username.send_keys(user)
password.send_keys(passw)
password.send_keys(Keys.RETURN)
# Navigate to course directory
# TODO universal navigation for different courses
# Check for bad login
try:
course_button = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div[3]/div/div[2]/div/div[2]/div/div/div[2]')))
course_button.click()
except:
raise NameError('Bad Login')
choose_course = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '//*[@id="root"]/div/div[3]/div/div[2]/div/div[3]/div[2]/div[1]/div/div/div/div/div[2]/div/div/div[1]/div/button')))
choose_course.click()
# Close dialog box if it exists and open course
try:
choose_course_content = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div[3]/div/div[2]/div/div[4]/div[1]/div[2]/div/div[4]/div[1]/div[2]/div/div/div/div')))
choose_course_content.click()
except:
dialog_box = WebDriverWait(browser, 5).until \
(EC.element_to_be_clickable((By.XPATH, '//*[@id="root"]/div/div[4]/div[1]/div[2]/button')))
dialog_box.click()
choose_course_content = WebDriverWait(browser, 10).until \
(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div[3]/div/div[2]/div/div[4]/div[1]/div[2]/div/div[4]/div[1]/div[2]/div/div/div/div')))
choose_course_content.click()
get_hours()
# Navigates bot to course page, begin idle
def get_hours():
global after_id
global after_id2
# print('Farm start')
complete_course = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '//*[@id="root"]/div/div[3]/div/div[2]/div/div[4]/div[1]/div[2]/div/div[3]/div[2]/div[2]/div/div[1]/div/div[2]/div/button')))
complete_course.click()
# Click past identification check
frame = WebDriverWait(browser, 10).until \
(EC.presence_of_element_located((By.XPATH, '//*[@id="root"]/div/div[3]/iframe')))
browser.switch_to.frame(frame)
identify_button = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '//*[@id="identify_student_submit"]')))
identify_button.click()
after_id = window.after(596000, save)
after_id2 = window.after(605000, get_hours)
# Save and exit from course
def save():
global browser
save_button = WebDriverWait(browser, 10).until \
(EC.element_to_be_clickable((By.XPATH, '//*[@id="exitButton"]')))
save_button.click()
# print('Farmed 10 minutes')
# Start/Stop web bot after button event
def run():
global browser
global btn
global ent_user
global ent_pass
global C2
global user
global passw
global check_browser
global after_id
global after_id2
after_id = None
after_id2 = None
user1 = user.get()
passw1 = passw.get()
check_browser1 = check_browser.get()
if btn.get() == 'Run':
btn.set('Stop')
ent_user.configure(state='disabled')
ent_pass.configure(state='disabled')
C2.configure(state='disabled')
# Run bot, if login error exit, if arbitrary error reset and rerun bot
# If bot is not working, GUI will stay unresponsive
# If wrong login, bot quits and prompts user to try to run again
try:
login(user1, passw1, check_browser1)
except NameError:
browser.quit()
btn.set('Run')
ent_user.configure(state='normal')
ent_pass.configure(state='normal')
C2.configure(state='normal')
except Exception as e:
browser.quit()
if after_id is not None:
window.after_cancel(after_id)
window.after_cancel(after_id2)
btn.set('Run')
run()
elif btn.get() == 'Stop':
btn.set('Run')
ent_user.configure(state='normal')
ent_pass.configure(state='normal')
C2.configure(state='normal')
try:
save()
time.sleep(3)
browser.quit()
window.after_cancel(after_id)
window.after_cancel(after_id2)
except:
browser.quit()
if after_id is not None:
window.after_cancel(after_id)
window.after_cancel(after_id2)
# Create a new window
window = tk.Tk()
window.title("Web Idler")
window.resizable(False, False)
# Create a new frame for data entries and checkboxes
frm = tk.Frame(relief=tk.SUNKEN, borderwidth=3)
frm.pack()
# Username
user = tk.StringVar()
lbl_user = tk.Label(master=frm, text="Username:")
ent_user = tk.Entry(master=frm, width=50, textvariable=user)
lbl_user.grid(row=0, column=0, sticky="e")
ent_user.grid(row=0, column=1)
# Password
passw = tk.StringVar()
lbl_pass = tk.Label(master=frm, text="Password:")
ent_pass = tk.Entry(master=frm, width=50, textvariable=passw, show="*")
lbl_pass.grid(row=1, column=0, sticky="e")
ent_pass.grid(row=1, column=1)
# Toggle show password
def showPass():
if check_pass.get() == 1:
ent_pass.configure(show="")
elif check_pass.get() == 0:
ent_pass.configure(show="*")
# Checkboxes
check_pass = tk.IntVar()
check_browser = tk.IntVar()
C1 = tk.Checkbutton(frm, text="Show password", variable=check_pass, onvalue=1, offvalue=0, command=showPass)
C2 = tk.Checkbutton(frm, text="Display browser", variable=check_browser, onvalue=1, offvalue=0)
C1.grid(row=2, column=1, sticky="w")
C2.grid(row=3, column=1, sticky="w")
# Create a new frame for Run/Stop button
frm_buttons = tk.Frame()
frm_buttons.pack(fill=tk.X, ipadx=5, ipady=0)
# Create the 'Run/Stop' button
btn = tk.StringVar()
btn.set('Run')
btn_run = tk.Button(master=frm_buttons, textvariable=btn, command=run)
btn_run.pack(side=tk.TOP, ipadx=10, pady=2.5)
# Start the application
window.mainloop()
|
nilq/baby-python
|
python
|
import collections
import importlib
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from unet3d.utils import get_logger
logger = get_logger('HDF5Dataset')
class HDF5Dataset(Dataset):
def __init__(self, file_path, phase):
assert phase in ['train', 'val', 'test']
print('Phase now: ', phase)
# file_path: train_data path / val_data path / test_data path
self.file_path = file_path # datasets/train_data/
self.data_dir_list = glob.glob(self.file_path + '*')
def __getitem__(self, idx):
raws, labels = self._loader(self.data_dir_list[idx])
# raws = np.rollaxis(raws, 3, 1)
# labels = np.rollaxis(labels, 3, 1)
# raws = self.crop_img(raws)
# labels = self.crop_img(labels)
img_data = self._normalization(raws)
seg_data = labels
img_data = img_data.reshape(1, 16, 256, 256)
seg_data = seg_data.reshape(1, 16, 256, 256)
# label 1,2,4 和背景区域做二分类
# print('Before: ', seg_data.shape)
# seg_mask = np.zeros((1, 128, 160, 160))
# seg_mask[0] = ( (seg_data[0] + seg_data[1] + seg_data[2]) > 0.1 ).astype(int)
# print('After: ', seg_mask.shape)
# label 1,4 和 2做二分类
# print('Before: ', seg_data.shape)
# seg_mask = np.zeros((2, 128, 160, 160))
# seg_mask[0] = ( (seg_data[0] + seg_data[2]) > 0.1 ).astype(int)
# seg_mask[1] = ( seg_data[1] > 0.1 ).astype(int)
# seg_data = ( (seg_data[0] + seg_data[2]) > 0.1 ).astype(int)
# print('After: ', seg_data.shape)
# label 1 和 4 做二分类
# print('Before: ', seg_data.shape)
# seg_mask = np.zeros((2, 128, 160, 160))
# seg_mask[0] = seg_data[0]
# seg_mask[1] = seg_data[2]
# seg_data = ( (seg_data[0] + seg_data[2]) > 0.1 ).astype(int)
# print('After: ', seg_data.shape)
return img_data, seg_data
def __len__(self):
return len(self.data_dir_list)
@staticmethod
def _loader(path):
with h5py.File(path, 'r') as input_file:
raws = input_file['raw'][()]
labels = input_file['label'][()]
raws = np.array(raws)
labels = np.array(labels)
return raws, labels
@staticmethod
def _normalization(img_data):
# 归一化
img_nonzero = img_data[np.nonzero(img_data)]
img = (img_data - np.mean(img_nonzero)) / np.std(img_nonzero)
img[img == img.min()] = 0
return img
@staticmethod
def crop_img(img_data):
img_data = img_data[:, 13:141, 40:200, 40:200] # shape: (4, 128, 160, 160)
return img_data
def get_train_loaders(config):
assert 'loaders' in config, 'Could not find data loaders configuration'
loaders_config = config['loaders']
logger.info('Creating training and validation set loaders...')
num_workers = loaders_config.get('num_workers', 1)
logger.info(f'Number of workers for train/val dataloader: {num_workers}')
batch_size = loaders_config.get('batch_size', 1)
logger.info(f'Batch size for train/val loader: {batch_size}')
# when training with volumetric data use batch_size of 1 due to GPU memory constraints
train_dataset = HDF5Dataset(loaders_config['train_path'], phase='train')
val_dataset = HDF5Dataset(loaders_config['val_path'], phase='val')
return {
'train': DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers),
'val': DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
}
def get_test_loaders(config):
assert 'datasets' in config, 'Could not find data sets configuration'
datasets_config = config['datasets']
test_path = datasets_config['test_path']
num_workers = datasets_config.get('num_workers', 1)
logger.info(f'Number of workers for the dataloader: {num_workers}')
batch_size = datasets_config.get('batch_size', 1)
logger.info(f'Batch size for dataloader: {batch_size}')
# construct datasets lazily
test_dataset = HDF5Dataset(test_path, phase='test')
# img_data, seg_data
return {'test': DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)}
|
nilq/baby-python
|
python
|
from .aggregate_representation import AggregateRepresentationTransformation
from .aggregate_representation_softmax import AggregateRepresentationTransformationSoftmax
from .edge_state_update import EdgeStateUpdateTransformation
from .input_sequence_direct import InputSequenceDirectTransformation
from .new_nodes_vote import NewNodesVoteTransformation
from .new_nodes_inform import NewNodesInformTransformation
from .node_state_update import NodeStateUpdateTransformation
from .direct_reference_update import DirectReferenceUpdateTransformation
from .output_category import OutputCategoryTransformation
from .output_sequence import OutputSequenceTransformation
from .output_set import OutputSetTransformation
from .propagation import PropagationTransformation
from .sequence_aggregate_summary import SequenceAggregateSummaryTransformation
|
nilq/baby-python
|
python
|
import appuifw as ui
import globalui
from pytriloquist import Const
from pytriloquist.btclient import BluetoothError
from pytriloquist.gui import Dialog
from pytriloquist.gui.settings import SettingsDialog
from pytriloquist.gui.app import ApplicationsDialog
from pytriloquist.gui.input import InputDialog
class IntroDialog(Dialog):
"""
Application starting point.
"""
def __init__(self, app):
Dialog.__init__(self, app)
def get_title(self):
"""Returns the dialog title.
"""
return Const.APP_TITLE
def init_ui(self):
"""Initializes the user interface.
"""
self.main_dialog = MainDialog(self.app, self)
self.settings_dialog = SettingsDialog(self.app, self)
self.menu = [
(_(u"Open") , self.opt_list_observe),
(_(u"About"), self.about),
(_(u"Exit") , self.app.exit),
]
self.options = [
(1, _("Connect") , self.connect),
(2, _("Settings"), self.settings)
]
self.opt_list = ui.Listbox([opt[1] for opt in self.options], self.opt_list_observe)
def display(self):
"""Displays the dialog on the device.
"""
ui.app.screen = "normal"
ui.app.set_tabs([], None)
ui.app.menu = self.menu
ui.app.body = self.opt_list
ui.app.exit_key_handler = self.app.exit
def opt_list_observe(self):
"""Function called when a mode is selected from the list.
"""
selected = self.options[self.opt_list.current()]
selected[2]()
def connect(self):
"""Connects to the server.
"""
try:
self.app.btclient.connect()
except BluetoothError, e:
ui.note(_(e.msg), "error")
else:
self.main_dialog.execute()
def settings(self):
"""Opens the Settings dialog.
"""
self.settings_dialog.execute()
def about(self):
"""Opens the About dialog.
"""
data = {
"title" : Const.APP_TITLE,
"version": Const.APP_VERSION,
"year" : Const.APP_YEAR,
"url" : Const.APP_URL,
"author" : Const.APP_AUTHOR,
"lauthor": _(u"Authors:"),
}
text = u"%(title)s v%(version)s (c) %(year)s\n" \
"%(url)s\n\n" \
"%(lauthor)s\n" \
"%(author)s" % data
globalui.global_msg_query(text, _(u"About"), 0)
class MainDialog(Dialog):
"""
This dialog displays the list of applications and input methods.
"""
def __init__(self, app, parent):
Dialog.__init__(self, app, parent)
def get_title(self):
"""Returns the dialog title.
"""
return Const.APP_TITLE
def init_ui(self):
"""Initializes the user interface.
"""
self.tabs = [
(_(u"Apps"), self.open_apps),
]
self.menu = [
(_(u"Orientation"), (
(_(u"Automatic"), self.set_orientation("automatic")),
(_(u"Landscape"), self.set_orientation("landscape")),
(_(u"Portrait") , self.set_orientation("portrait")),
)),
(_(u"Disconnect"), self.back)
]
# Dialogs
self.apps_dialog = ApplicationsDialog(self.app, self)
if ui.touch_enabled():
# Only works with touch-enabled devices
self.input_dialog = InputDialog(self.app, self)
self.tabs.append((_(u"Input"), self.open_input))
def set_orientation(self, orientation):
"""Returns a function that changes the display orientation.
"""
def fn():
ui.app.orientation = orientation
return fn
def display(self):
"""Displays the dialog on the device.
"""
ui.app.set_tabs([t[0] for t in self.tabs], self.tab_handler)
ui.app.exit_key_handler = self.app.exit
self.tab_handler(0)
def back(self):
"""Executes the parent dialog.
"""
Dialog.back(self)
self.disconnect()
def disconnect(self):
"""Disconnects from the server.
"""
try:
self.app.btclient.close()
except BluetoothError, e:
ui.note(_(e.msg), "error")
def open_apps(self):
"""Opens the applications dialog.
"""
self.apps_dialog.execute()
def open_input(self):
"""Opens the input dialog.
"""
if ui.touch_enabled():
self.input_dialog.execute()
else:
ui.note(_(u"Touch not enabled."), "error")
def tab_handler(self, index):
"""Handles tab events.
"""
[t[1] for t in self.tabs][index]()
|
nilq/baby-python
|
python
|
import tensorflow as tf
def nalu(input_layer, num_outputs, epsilon=1e-6):
""" Calculate the Neural Arithmetic Logic Unit (NALU).
Arguments:
input_layer - the input vector we want to the NALU of.
num_outputs - dimension of the output vector.
epsilon - small shift to prevent log(0)
Returns:
y - vector of dimension (X.shape.dims[0], num_outputs)
"""
shape = (input_layer.shape.dims[-1].value, num_outputs)
with tf.name_scope("NALU"):
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=5), name="W_hat")
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=5), name="M_hat")
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02), name="G")
W = tf.multiply(tf.tanh(W_hat), tf.sigmoid(M_hat))
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + epsilon), W))
a = tf.matmul(input_layer, W)
g = tf.sigmoid(tf.matmul(input_layer, G))
y = tf.multiply(g, a) + tf.multiply(1-g, m)
return y
|
nilq/baby-python
|
python
|
from unifuncnet.fetchers.compound_fetchers.compound_fetcher import *
from unifuncnet.utils.rhea_sqlite_connector import RheaSqliteConnector
class CompoundFetcherRhea(CompoundFetcher, RheaSqliteConnector):
def __init__(self, compound_id, memory_storage=None):
CompoundFetcher.__init__(self, compound_id=compound_id, memory_storage=memory_storage)
self.db = 'rhea'
self.set_convergence_args()
self.compound = self.get_compound_rhea()
self.add_compound()
def set_convergence_args(self):
# args for convergence
self.convergence_args['reactions'] = set()
def get_compound_rhea(self):
compound_instance = Compound({'chebi': self.compound_id})
self.convergence_args['reactions'] = self.fetch_reactions_rhea_from_chebi(self.compound_id)
return compound_instance
def converge_compound_global(self):
self.converge_compound_to_reaction()
def converge_compound_to_reaction(self):
if self.convergence_args['reactions']:
for reaction_id in self.convergence_args['reactions']:
print(f'Linking from compound {self.compound_id} in {self.db} to reaction {reaction_id}')
self.find_reaction(query_id=reaction_id)
if __name__ == '__main__':
search = CompoundFetcherRhea('7580')
search.compound.get_all_info()
|
nilq/baby-python
|
python
|
import json
from unittest import TestCase
from django.test.client import Client
from mock import patch
from regcore_write.views.notice import *
class ViewsNoticeTest(TestCase):
def test_add_not_json(self):
url = '/notice/docdoc'
response = Client().put(url, content_type='application/json',
data='{Invalid}')
self.assertEqual(400, response.status_code)
@patch('regcore_write.views.notice.db')
def test_add_label_success(self, db):
url = '/notice/docdoc'
response = Client().put(url, content_type='application/json',
data=json.dumps({'some': 'struct'}))
self.assertTrue(db.Notices.return_value.put.called)
args = db.Notices.return_value.put.call_args[0]
self.assertEqual('docdoc', args[0])
self.assertEqual({'some': 'struct', 'cfr_parts': []}, args[1])
response = Client().put(
url, content_type='application/json',
data=json.dumps({'some': 'struct', 'cfr_part': '1111'}))
self.assertTrue(db.Notices.return_value.put.called)
args = db.Notices.return_value.put.call_args[0]
self.assertEqual('docdoc', args[0])
self.assertEqual({'some': 'struct', 'cfr_parts': ['1111']}, args[1])
response = Client().put(
url, content_type='application/json',
data=json.dumps({'some': 'struct', 'cfr_parts': ['111', '222']}))
self.assertTrue(db.Notices.return_value.put.called)
args = db.Notices.return_value.put.call_args[0]
self.assertEqual('docdoc', args[0])
self.assertEqual({'some': 'struct', 'cfr_parts': ['111', '222']},
args[1])
|
nilq/baby-python
|
python
|
# Write to a text file
#Open the file, write the value and close the file
f = open("output.txt","w")
message="Hi all! Welcome from CEEO Innovations!"
text=f.write(message)
f.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse
import os
cgroup = '/sys/fs/cgroup'
class Containers(object):
def __init__(self, glob_dir: str = 'devices/lxc') -> None:
self.containers = []
for name in filter(lambda d: os.path.isdir(os.path.join(cgroup, glob_dir, d)),
os.listdir(os.path.join(cgroup, glob_dir))):
self.containers.append(Container(name))
def cpu_usage(self) -> float:
"""Get sum of all containers cpu usage"""
return sum(map(lambda c: c.get_cpu, self.containers))
def print_stats(self, args: object) -> None:
"""Print container usage statistics"""
def sort_by(method: str) -> callable:
if method in ('name', 'cpu', 'memory', 'percent', 'procs'):
return lambda c: getattr(c, 'get_{0}'.format(method))
return lambda c: c.get_cpu
cpu_usage = self.cpu_usage()
print('{0:26} {1:18} {2:5} {3} {4}'.format('name ', 'memory', 'cpu', 'cpu%', 'procs'))
print('-' * 62)
template = '{0.get_name:20} {0.get_memory:10.2f} M {0.get_cpu:15.2f} {1:6.2f} {0.get_procs}'
sort = getattr(args, 'sort')
for container in sorted(self.containers, key=sort_by(sort), reverse=(sort != 'name')):
print(template.format(container, container.get_percent(cpu_usage)))
class Container(object):
"""Define a container object with its related properties"""
def __init__(self, name: str) -> None:
"""Class constructor"""
self.name = name
self._cache = {}
@property
def get_name(self) -> str:
return self.name
@property
def get_memory(self) -> float:
"""Return memory usage in bytes"""
if 'memory' not in self._cache:
with open(os.path.join(cgroup, 'memory/lxc', self.name, 'memory.usage_in_bytes'), 'r') as fh:
self._cache['memory'] = round(int(fh.read().strip()) / 1024 / 1024, 2)
return self._cache.get('memory')
@property
def get_cpu(self) -> float:
"""Return cpu usage in seconds"""
if 'cpu' not in self._cache:
with open(os.path.join(cgroup, 'cpu,cpuacct/lxc', self.name, 'cpuacct.usage'), 'r') as fh:
self._cache['cpu'] = round(int(fh.read().strip()) / 10 ** 9, 2)
return self._cache.get('cpu')
def get_percent(self, total: float = 0.0) -> float:
"""Get cpu usage in percent"""
if 'percent' not in self._cache:
self._cache['percent'] = round(self.get_cpu * 100 / total, 2)
return self._cache.get('percent')
@property
def get_procs(self) -> int:
"""Get number of processes"""
if 'procs' not in self._cache:
with open(os.path.join(cgroup, 'pids/lxc', self.name, 'pids.current'), 'r') as fh:
self._cache['procs'] = int(fh.read().strip())
return self._cache.get('procs')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LXC 2.0 Statistics utility')
parser.add_argument('--sort', type=str, default='cpu', help='Sort column (could be name, cpu, memory or procs)')
Containers().print_stats(parser.parse_args())
|
nilq/baby-python
|
python
|
# encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QPaintEngine(__Shiboken.Object):
# no doc
def begin(self, *args, **kwargs): # real signature unknown
pass
def clearDirty(self, *args, **kwargs): # real signature unknown
pass
def coordinateOffset(self, *args, **kwargs): # real signature unknown
pass
def drawEllipse(self, *args, **kwargs): # real signature unknown
pass
def drawImage(self, *args, **kwargs): # real signature unknown
pass
def drawLines(self, *args, **kwargs): # real signature unknown
pass
def drawPath(self, *args, **kwargs): # real signature unknown
pass
def drawPixmap(self, *args, **kwargs): # real signature unknown
pass
def drawPoints(self, *args, **kwargs): # real signature unknown
pass
def drawPolygon(self, *args, **kwargs): # real signature unknown
pass
def drawRects(self, *args, **kwargs): # real signature unknown
pass
def drawTextItem(self, *args, **kwargs): # real signature unknown
pass
def drawTiledPixmap(self, *args, **kwargs): # real signature unknown
pass
def end(self, *args, **kwargs): # real signature unknown
pass
def hasFeature(self, *args, **kwargs): # real signature unknown
pass
def isActive(self, *args, **kwargs): # real signature unknown
pass
def isExtended(self, *args, **kwargs): # real signature unknown
pass
def paintDevice(self, *args, **kwargs): # real signature unknown
pass
def painter(self, *args, **kwargs): # real signature unknown
pass
def setActive(self, *args, **kwargs): # real signature unknown
pass
def setDirty(self, *args, **kwargs): # real signature unknown
pass
def setSystemClip(self, *args, **kwargs): # real signature unknown
pass
def setSystemRect(self, *args, **kwargs): # real signature unknown
pass
def syncState(self, *args, **kwargs): # real signature unknown
pass
def systemClip(self, *args, **kwargs): # real signature unknown
pass
def systemRect(self, *args, **kwargs): # real signature unknown
pass
def testDirty(self, *args, **kwargs): # real signature unknown
pass
def type(self, *args, **kwargs): # real signature unknown
pass
def updateState(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
active = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
extended = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
gccaps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
selfDestruct = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
AllDirty = PySide.QtGui.QPaintEngine.DirtyFlag.AllDirty
AllFeatures = PySide.QtGui.QPaintEngine.PaintEngineFeature.AllFeatures
AlphaBlend = PySide.QtGui.QPaintEngine.PaintEngineFeature.AlphaBlend
Antialiasing = PySide.QtGui.QPaintEngine.PaintEngineFeature.Antialiasing
BlendModes = PySide.QtGui.QPaintEngine.PaintEngineFeature.BlendModes
Blitter = PySide.QtGui.QPaintEngine.Type.Blitter
BrushStroke = PySide.QtGui.QPaintEngine.PaintEngineFeature.BrushStroke
ConicalGradientFill = PySide.QtGui.QPaintEngine.PaintEngineFeature.ConicalGradientFill
ConstantOpacity = PySide.QtGui.QPaintEngine.PaintEngineFeature.ConstantOpacity
ConvexMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.ConvexMode
CoreGraphics = PySide.QtGui.QPaintEngine.Type.CoreGraphics
Direct3D = PySide.QtGui.QPaintEngine.Type.Direct3D
DirtyBackground = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBackground
DirtyBackgroundMode = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBackgroundMode
DirtyBrush = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBrush
DirtyBrushOrigin = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBrushOrigin
DirtyClipEnabled = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyClipEnabled
DirtyClipPath = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyClipPath
DirtyClipRegion = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyClipRegion
DirtyCompositionMode = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyCompositionMode
DirtyFlag = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.DirtyFlag'>"
DirtyFlags = None # (!) real value is "<type 'DirtyFlags'>"
DirtyFont = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyFont
DirtyHints = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyHints
DirtyOpacity = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyOpacity
DirtyPen = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyPen
DirtyTransform = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyTransform
LinearGradientFill = PySide.QtGui.QPaintEngine.PaintEngineFeature.LinearGradientFill
MacPrinter = PySide.QtGui.QPaintEngine.Type.MacPrinter
MaskedBrush = PySide.QtGui.QPaintEngine.PaintEngineFeature.MaskedBrush
MaxUser = PySide.QtGui.QPaintEngine.Type.MaxUser
ObjectBoundingModeGradients = PySide.QtGui.QPaintEngine.PaintEngineFeature.ObjectBoundingModeGradients
OddEvenMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.OddEvenMode
OpenGL = PySide.QtGui.QPaintEngine.Type.OpenGL
OpenGL2 = PySide.QtGui.QPaintEngine.Type.OpenGL2
OpenVG = PySide.QtGui.QPaintEngine.Type.OpenVG
PaintBuffer = PySide.QtGui.QPaintEngine.Type.PaintBuffer
PaintEngineFeature = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.PaintEngineFeature'>"
PaintEngineFeatures = None # (!) real value is "<type 'PaintEngineFeatures'>"
PainterPaths = PySide.QtGui.QPaintEngine.PaintEngineFeature.PainterPaths
PaintOutsidePaintEvent = PySide.QtGui.QPaintEngine.PaintEngineFeature.PaintOutsidePaintEvent
PatternBrush = PySide.QtGui.QPaintEngine.PaintEngineFeature.PatternBrush
PatternTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PatternTransform
Pdf = PySide.QtGui.QPaintEngine.Type.Pdf
PerspectiveTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PerspectiveTransform
Picture = PySide.QtGui.QPaintEngine.Type.Picture
PixmapTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PixmapTransform
PolygonDrawMode = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.PolygonDrawMode'>"
PolylineMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.PolylineMode
PorterDuff = PySide.QtGui.QPaintEngine.PaintEngineFeature.PorterDuff
PostScript = PySide.QtGui.QPaintEngine.Type.PostScript
PrimitiveTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PrimitiveTransform
QuickDraw = PySide.QtGui.QPaintEngine.Type.QuickDraw
QWindowSystem = PySide.QtGui.QPaintEngine.Type.QWindowSystem
RadialGradientFill = PySide.QtGui.QPaintEngine.PaintEngineFeature.RadialGradientFill
Raster = PySide.QtGui.QPaintEngine.Type.Raster
RasterOpModes = PySide.QtGui.QPaintEngine.PaintEngineFeature.RasterOpModes
SVG = PySide.QtGui.QPaintEngine.Type.SVG
Type = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.Type'>"
User = PySide.QtGui.QPaintEngine.Type.User
WindingMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.WindingMode
Windows = PySide.QtGui.QPaintEngine.Type.Windows
X11 = PySide.QtGui.QPaintEngine.Type.X11
|
nilq/baby-python
|
python
|
import asyncio
import aioredis
import jinja2
import peewee_async
import aiohttp_jinja2
import aiohttp_debugtoolbar
from aiohttp import web
from aiohttp_session import session_middleware
from aiohttp_session.redis_storage import RedisStorage
import settings
from settings import logger
from helpers.middlewares import request_user_middleware
from helpers.template_tags import tags
from helpers.models import database
async def create_app(loop):
""" Prepare application """
redis_pool = await aioredis.create_pool(settings.REDIS_CON, loop=loop)
middlewares = [session_middleware(RedisStorage(redis_pool)), request_user_middleware]
if settings.DEBUG:
middlewares.append(aiohttp_debugtoolbar.middleware)
# init application
app = web.Application(loop=loop, middlewares=middlewares)
app.redis_pool = redis_pool
app.wslist = {}
jinja_env = aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(settings.TEMPLATE_DIR),
context_processors=[aiohttp_jinja2.request_processor], )
jinja_env.globals.update(tags)
if settings.DEBUG:
aiohttp_debugtoolbar.setup(app, intercept_redirects=False)
# db conn
database.init(**settings.DATABASE)
app.database = database
app.database.set_allow_sync(False)
app.objects = peewee_async.Manager(app.database)
# make routes
from urls import routes
for route in routes:
app.router.add_route(**route)
app.router.add_static('/static', settings.STATIC_DIR, name='static')
app.logger = logger
handler = app.make_handler(access_log=logger)
serv_generator = loop.create_server(handler, settings.HOST, settings.PORT)
return serv_generator, handler, app
async def shutdown(server, app, handler):
""" Safe close server """
for room in app.wslist.values():
for peer in room.values():
peer.send_json({'text': 'Server shutdown'})
server.close()
await server.wait_closed()
app.redis_pool.close()
await app.redis_pool.wait_closed()
await app.objects.close()
await app.shutdown()
await handler.finish_connections(10.0)
await app.cleanup()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
serv_generator, handler, app = loop.run_until_complete(create_app(loop))
server = loop.run_until_complete(serv_generator)
logger.debug(f'Start server {server.sockets[0].getsockname()}')
try:
loop.run_forever()
except KeyboardInterrupt:
logger.debug('Keyboard Interrupt ^C')
finally:
logger.debug('Stop server begin')
loop.run_until_complete(shutdown(server, app, handler))
loop.close()
logger.debug('Stop server end')
|
nilq/baby-python
|
python
|
import os
from elasticsearch import Elasticsearch
from elasticsearch import helpers
class Pes:
def __init__(self):
self.client = Elasticsearch([
{"host": os.getenv("ES_GATEWAY"),
"port": os.getenv("ES_PORT") or 9200}
])
def create_index(self, index_name: str):
# 创建 index
self.client.indices.create(index=index_name, body={
'settings': {
'index': {
'number_of_shards': 1,
'number_of_replicas': 0,
}
},
'mappings': {
'properties': {
'subnet': {'type': 'text'}
}
}
})
def drop_index(self, index_name: str):
# 删除 index
self.client.indices.delete(index=index_name, ignore=[400, 404])
def get_index_info(self, index_name: str):
# 获取 index 信息
index_info = self.client.indices.get(index=index_name)
return index_info
def do_bulk(self, index_name: str, op_type: str):
# bulk
action = [{
'_op_type': op_type,
'_index': index_name,
'_id': 'test',
'_source': {
'subnets': '218038272',
'mask': '16',
}
}]
helpers.bulk(self.client, action)
# 关闭连接
client.close()
|
nilq/baby-python
|
python
|
import csv
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header):
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
def load_value_file(file_path):
with open(file_path, 'r') as input_file:
value = float(input_file.read().rstrip('\n\r'))
return value
def calculate_accuracy(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1))
n_correct_elems = correct.float().sum().item()
return n_correct_elems / batch_size
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0).item()
res.append(correct_k / batch_size)
return res
import os
from itertools import combinations, chain, product
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
def save_gif(frames, file_path, vmax=255, vmin=0, interval=3000/25):
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(
left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ims = []
plt.xticks([])
plt.yticks([])
plt.grid(True)
for frame in frames:
m = plt.imshow(
(frame).reshape(*frame.shape[:-1]).astype(np.uint8),
cmap=plt.cm.gray, vmax=vmax, vmin=vmin)
plt.axis('off')
ims.append([m])
ani = animation.ArtistAnimation(fig, ims, interval=interval, repeat=False)
ani.save(file_path, writer="imagemagick")
plt.close()
def gen_hama_photo(h, w):
mask = np.zeros((8, 8, 1))
if w.size:
mask[:, w] = 1
if h.size:
mask[h, :] = 1
return mask
def gen_hama_photo_patterns():
index = list(chain(*[list(combinations(range(8), i)) for i in range(9)]))
g = (gen_hama_photo(np.array(h), np.array(w)) for (h, w) in product(index, repeat=2))
masks = np.array(list(g))
# assert masks.shape[0] == 2**16
mask_hama_photo = np.unique(masks, axis=0)
return mask_hama_photo[1:-1]
def fit_hama_photo(raw, mask=gen_hama_photo_patterns()):
res = np.array([
mask[np.argmin(np.mean(np.square(mask - raw[i]), axis=(1, 2, 3)))]
for i in range(16)])
return res
def fit_hitomi(raw):
agmx = raw.argmax(axis=0)
res = np.array([np.ones((8, 8, 1)) * i == agmx for i in range(16)]).astype(np.uint8)
return res
def fit_rand(raw, th=0):
res = (raw>=th).astype(np.uint8)
return res
# def fit_hama_photo_tensor(raw, mask=gen_hama_photo_patterns()):
# from keras import backend as K
# import tensorflow as tf
# mask_tensor = tf.reshape(
# tf.convert_to_tensor(mask, dtype=tf.float32), (-1, 8, 8, 1))
# n = 16
# i1 = tf.constant(1)
# res0 = tf.reshape(mask_tensor[K.argmin(K.mean(K.square(
# mask_tensor - raw[0]), axis=(1, 2, 3)))],
# (1, 8, 8, 1))
# c = lambda i, res: i < n
# b = lambda i, res: (
# i+1,
# tf.concat([res, tf.reshape(mask_tensor[K.argmin(K.mean(K.square(
# mask_tensor - raw[i]), axis=(1, 2, 3)))],
# (1, 8, 8, 1))], axis=0))
# _, res = tf.while_loop(
# c, b, loop_vars=[i1, res0],
# shape_invariants=[i1.get_shape(), tf.TensorShape((None, 8, 8, 1))])
# return res
#
#
# def fit_hitomi_tensor(raw):
# from keras import backend as K
# import tensorflow as tf
# i1 = tf.constant(1, dtype=tf.int64)
# agmx = K.argmax(raw, axis=0)
# res0 = tf.reshape(
# K.cast(K.equal(agmx, 0), dtype=K.floatx()), (1, 8, 8, 1))
# c = lambda i, res: i < 16
# b = lambda i, res: (
# i+1,
# tf.concat([
# res,
# tf.reshape(
# K.cast(K.equal(agmx, i), dtype=K.floatx()), (1, 8, 8, 1))
# ], axis=0)
# )
# _, res = tf.while_loop(
# c, b, loop_vars=[i1, res0],
# shape_invariants=[i1.get_shape(), tf.TensorShape((None, 8, 8, 1))])
# return res
#
#
# def fit_rand_tensor(raw, th=0):
# from keras import backend as K
# res = K.cast(K.greater_equal(raw, th), K.floatx())
# return res
|
nilq/baby-python
|
python
|
import requests
from configparser import ConfigParser
import os
import json
import pandas as pd
lat_long_request_url= 'https://cdn-api.co-vin.in/api/v2/appointment/centers/public/findByLatLong?'
class DetailsAssigner:
def __init__(self, *args) -> None:
self.config_obj= args[0]
self.dose_type= 'available_capacity_dose1' if int(self.config_obj.items('dose_type')[0][1])==1 else 'available_capacity_dose2'
self.age_details= self.config_obj.items('age_details')[0][1]
self.vaccine_name= 'COVISHIELD' if int(self.config_obj.get('vaccine_name','covishield'))!=0 and int(self.config_obj.get('vaccine_name','covaxin'))==0 else 'COVAXIN' if int(self.config_obj.get('vaccine_name','covaxin'))!=0 and int(self.config_obj.get('vaccine_name','covishield')) ==0 else 'any'
# print(self.vaccine_name)
print('checking for age {} and dose number {}'.format(self.age_details,self.dose_type))
class ObjectModifier:
def __init__(self, *args) -> None:
self.config_file= args[0]
def get_config(self):
self.config= ConfigParser()
self.config.read(self.config_file)
self.config.optionxform= str
class VaccineGenerator(DetailsAssigner, ObjectModifier):
def __init__(self, *args) -> None:
super(VaccineGenerator, self).__init__(*args)
def get_latitude_longitude(self):
lat_long_request_param= {}
my_ip_resp= requests.get('https://get.geojs.io/v1/ip.json').json()['ip']
lat_long_url= 'https://get.geojs.io/v1/ip/geo/'+my_ip_resp+'.json'
address_response= requests.get(lat_long_url).json()
lat_long_request_param['lat']=address_response['latitude']
lat_long_request_param['long']=address_response['longitude']
return lat_long_request_param
def get_nearby_centres(self):
self.centre_ids= set()
lat_long_request_param= self.get_latitude_longitude()
resp= requests.get('https://cdn-api.co-vin.in/api/v2/appointment/centers/public/findByLatLong?', params= lat_long_request_param)
centre_dict= json.loads(resp.text)
for dict_elem in centre_dict['centers']:
self.centre_ids.add(dict_elem['center_id'])
def filter_specific_vaccine(self, vaccine_available_dict):
new_dict= {}
if self.vaccine_name!='any':
for key,val in vaccine_available_dict.items():
if val[0][5]==self.vaccine_name:
new_dict[key]=val[0]
return new_dict
return vaccine_available_dict
def parse_json(self, vaccine_available_centre_detailed_list):
dose_type= self.dose_type
age= int(self.age_details)
vaccine_available_dict={}
for centre_idx in range(len(vaccine_available_centre_detailed_list)):
centre_id= vaccine_available_centre_detailed_list[centre_idx]['center_id']
# print(vaccine_available_centre_detailed_list[centre_idx])
centre_details= [vaccine_available_centre_detailed_list[centre_idx]['name'],vaccine_available_centre_detailed_list[centre_idx]['address'],vaccine_available_centre_detailed_list[centre_idx]['block_name'],vaccine_available_centre_detailed_list[centre_idx]['pincode']]
for session_idx in range(len(vaccine_available_centre_detailed_list[centre_idx]['sessions'])):
# check part for age and dose type
if vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx][dose_type]>0 and vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['min_age_limit']<age:
slot_details_value= [vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['date'],vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['vaccine'],dose_type, vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx][dose_type],vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['min_age_limit'],vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['slots']]
slot_val=[]
slot_val.extend(centre_details)
slot_val.extend(slot_details_value)
if centre_id not in vaccine_available_dict:
vaccine_available_dict[centre_id]= list()
vaccine_available_dict[centre_id].append(slot_val)
return vaccine_available_dict
def check_availability(self):
availability_details_dict= {}
centre_id_api_params= {}
vaccine_available_centre_detailed_list= []
test_list=[]
cur_date= pd.to_datetime('now').strftime('%d-%m-%Y')
for centre_id in self.centre_ids:
centre_id_api_params['center_id']= centre_id
centre_id_api_params['date']= cur_date
response= requests.get('https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByCenter?', params= centre_id_api_params)
availability_details_dict= json.loads(response.text)
if len(availability_details_dict)>0:
vaccine_available_centre_detailed_list.append(availability_details_dict['centers'])
test_list.append(availability_details_dict)
vaccine_available_dict= self.parse_json(vaccine_available_centre_detailed_list)
# print(vaccine_available_dict)
if len(vaccine_available_dict)>=1:
specific_vaccine_available_dict= self.filter_specific_vaccine(vaccine_available_dict)
if len(specific_vaccine_available_dict)>=1:
print(specific_vaccine_available_dict)
os.system("afplay " + 'vaccine_alert.WAV')
if __name__ == '__main__':
config_file_abs_path= os.path.abspath(os.path.join(os.path.dirname(__file__),'config_details.ini'))
obj_modify= ObjectModifier(config_file_abs_path)
obj_modify.get_config()
vaccine_generator= VaccineGenerator(obj_modify.config)
vaccine_generator.get_nearby_centres()
vaccine_generator.check_availability()
|
nilq/baby-python
|
python
|
from ftis.analyser.descriptor import Chroma
from ftis.analyser.audio import CollapseAudio
from ftis.world import World
from ftis.corpus import Corpus
import argparse
parser = argparse.ArgumentParser(description="Process input and output location")
parser.add_argument(
"-i",
"--input",
default="~/corpus-folder/corpus1",
type=str,
help="Folder for input. This should contain some audio files.",
)
parser.add_argument(
"-o",
"--output",
default="~/corpus-folder/chroma",
type=str,
help="Folder for output. This will be made if it doesnt exist.",
)
args = parser.parse_args()
src = Corpus(args.input)
out = args.output
process = World(source=src, sink=out)
process.add(CollapseAudio(), Chroma())
if __name__ == "__main__":
process.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2018 Daniel Koguciuk <daniel.koguciuk@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
'''
@author: Daniel Koguciuk <daniel.koguciuk@gmail.com>
@note: Created on 04.07.2018
'''
import os
import sys
import cv2
import time
import tqdm
import shutil
import argparse
import numpy as np
from utils import data_generator as gen
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FEAT_DIR = os.path.join(BASE_DIR, 'features')
HOGF_DIR = os.path.join(FEAT_DIR, 'hog')
if not os.path.exists(FEAT_DIR):
os.mkdir(FEAT_DIR)
if os.path.exists(HOGF_DIR):
shutil.rmtree(HOGF_DIR)
os.mkdir(HOGF_DIR)
def __HOG(img, cell_size=(8,8), nbins=9):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin = np.int32(nbins*ang/(2*np.pi))
bin_cells = []
mag_cells = []
cellx, celly = cell_size
for i in range(0,int(img.shape[0]/celly)):
for j in range(0,int(img.shape[1]/cellx)):
bin_cells.append(bin[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
mag_cells.append(mag[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
hists = [np.bincount(b.ravel(), m.ravel(), nbins) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= cv2.norm(hist) + eps
return hist
def _HOG(images, image_size):
"""
Calc HOG features for grayscale images.
Args:
images (ndarray of size [images, some_size, some_size]): Grayscale images.
Returns:
(ndarray of size [images, features_no]): HOG features for each image.
"""
NBINS = 9
CELL_SIZE = (int(image_size/16), int(image_size/16))
hog_features = [__HOG(image, cell_size=CELL_SIZE, nbins=NBINS) for image in images]
return np.stack(hog_features, axis=0)
def extract_HOG(generator, category='species', image_size=256, train=True, verbose=True):
"""
Extract HOG features for specified dataset (train/test).
Args:
generator (Generator class object): Generator class object.
train (bool): Am I working with train or test data?
verbose (bool): Should I print some additional info?
category (str): What category do you want: species or breeds?
Returns:
(ndarray of size [images, features_no], ndarray of size [images]) Features and labels.
"""
all_featrs = []
all_labels = []
batch_size = 64
start_time = time.time()
batches = generator.images_count(train=train) / batch_size
print("Calculating HOG featues..")
for images, labels in tqdm.tqdm(generator.generate_batch(train=train, batch_size=batch_size, category=category, image_size=image_size), total=batches):
all_featrs.append(_HOG(images, image_size))
all_labels.append(labels)
all_featrs = np.concatenate(all_featrs, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
hog_time = time.time()
if verbose:
print ("Features calculated in ", hog_time - start_time, " seconds")
return all_featrs, all_labels
def main(argv):
# Parser ==================================================================
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--image_size", help="images size (defaults: 256)", type=int, default=256)
args = vars(parser.parse_args())
# Extract features ========================================================
generator = gen.OxfordIIITPets(colorspace='GRAY', train_size=0.8)
features_train, labels_train = extract_HOG(generator, category='species', image_size=args['image_size'], train=True, verbose=True)
features_test, labels_test = extract_HOG(generator, category='species', image_size=args['image_size'], train=False, verbose=True)
# Save ====================================================================
np.save(os.path.join(HOGF_DIR, "features_train.npy"), features_train)
np.save(os.path.join(HOGF_DIR, "labels_train.npy"), labels_train)
np.save(os.path.join(HOGF_DIR, "features_test.npy"), features_test)
np.save(os.path.join(HOGF_DIR, "labels_test.npy"), labels_test)
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import sys
sys.path.insert(0,"/var/www/janus/")
from janus import app as application
|
nilq/baby-python
|
python
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="file to encode/decode using the provided key")
parser.add_argument("output_file", help="name under which the processed file should be saved")
parser.add_argument("key", help="cryptographic key to process file with")
args = parser.parse_args()
with open(args.input_file, 'rb') as in_file:
with open(args.output_file, 'wb') as out_file:
key = args.key.encode('utf-8')
key_length = len(key)
key_index = 0
in_data = in_file.read()
out_data = bytearray(b'')
for byte in in_data:
out_data.append(byte ^ key[key_index])
if key_index == key_length - 1:
key_index = 0
else:
key_index += 1
out_file.write(out_data)
print("Operation completed successfully!")
|
nilq/baby-python
|
python
|
# coding: utf-8
from fabkit import filer, sudo, env
from fablib.base import SimpleBase
# from fablib import git
from fablib.python import Python
from oslo_config import cfg
CONF = cfg.CONF
class FabClient(SimpleBase):
def __init__(self):
self.data_key = 'fabkit_tools'
self.data = {
'user': 'nobody',
'group': 'nobody',
'prefix': '/opt/fabkit',
'task_patterns': 'local.*,check.*',
}
self.services = [
'fabagent',
'fabagent-central',
]
def init_before(self):
self.python = Python(self.data['prefix'])
def init_after(self):
self.data['owner'] = '{0}:{1}'.format(self.data['user'], self.data['group'])
self.data['host'] = env.host
def setup(self):
data = self.init()
var_dir = CONF.client.package_var_dir
common_repo = '{0}/fabkit-repo-common'.format(var_dir)
client_repo = '{0}/fabkit-repo-client'.format(var_dir)
filer.template('{0}/fabfile.ini'.format(client_repo), data=data)
sudo('rm -rf {0}/fabfile && '
'cp -r {1}/fabfile {0}/fabfile && '
'chown -R {2}:{3} {0}/fabfile'.format(
client_repo, common_repo, data['user'], data['group']))
if env.host == env.hosts[0]:
sudo('/opt/fabkit/bin/fabclient sync_db')
self.start_services().enable_services()
self.restart_services()
sudo('/opt/fabkit/bin/fabclient -l')
|
nilq/baby-python
|
python
|
class LinearElasticMaterialModel:
def __init__(self, youngs_modulus, poissons_ratio):
self.young_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
class LinearElasticPlaneMaterialModel(LinearElasticMaterialModel):
def __init__(self, youngs_modulus, poissons_ratio, thickness):
super().__init__(youngs_modulus, poissons_ratio)
self.thickness = thickness
class LinearElasticPlaneStressMaterialModel(LinearElasticPlaneMaterialModel):
def compute_stresses(self, ex, ey, gamma_xy):
factor = self.young_modulus * self.thickness / (1 - self.poissons_ratio**2)
sx = factor * (ex + self.poissons_ratio * ey)
sy = factor * (self.poissons_ratio * ex + ey)
tau_xy = factor * (1 - self.poissons_ratio) * gamma_xy / 2
return sx, sy, tau_xy
class LinearElasticPlaneStrainMaterialModel(LinearElasticPlaneMaterialModel):
def compute_stresses(self, ex, ey, gamma_xy):
factor = self.young_modulus * self.thickness / ((1 + self.poissons_ratio) * (1 - 2 * self.poissons_ratio))
sx = factor * ((1 - self.poissons_ratio) * ex + self.poissons_ratio * ey)
sy = factor * (self.poissons_ratio * ex + (1 - self.poissons_ratio) * ey)
tau_xy = factor * (1 - 2 * self.poissons_ratio) * gamma_xy / 2
return sx, sy, tau_xy
|
nilq/baby-python
|
python
|
"""
The purpose of this script is to train an AI agent to play the custom-built
Kuiper Escape game using the A2C reinforcement learning algorithm.
"""
# 3rd party imports
import gym
import gym_kuiper_escape
# from code.evaluation import evaluate_policy
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.callbacks import EvalCallback
from stable_baselines.common import make_vec_env
from stable_baselines import A2C
# Create environment instance(s)
env_eval = gym.make('kuiper-escape-base-v0')
env = make_vec_env('kuiper-escape-base-v0', n_envs=4)
# Define callback function
eval_callback = EvalCallback(
env_eval,
best_model_save_path='./logs/agent_best',
log_path='./logs/',
eval_freq=10000,
deterministic=True,
render=False
)
# Create agent model
agent = A2C('MlpPolicy', env, verbose=1, tensorboard_log='./tensorboard/')
# Train agent
agent.learn(5000000,
reset_num_timesteps=False,
callback=eval_callback
)
agent.save("agent")
|
nilq/baby-python
|
python
|
from copy import deepcopy
from unyt import dimensions
from mosdef_cassandra.utils.units import validate_unit, validate_unit_list
import parmed
import warnings
import unyt as u
class MoveSet(object):
def __init__(self, ensemble, species_topologies):
"""A class to contain all the move probabilities and related
values required to perform a simulation in ``Cassandra``.
A MoveSet contains the move probabilities
and other related quantities (e.g., max translation/rotation)
that are required to run Cassandra. When the MoveSet
is created the specified ``ensemble`` and ``species_topologies``
are used to generate initial guesses for all required values.
Depending upon the specifics of your system, these guesses may
be very reasonable or downright terrible. Use the same
``species_topologies`` for your call to ``mosdef_cassandra.System()``
and ``mosdef_cassandra.MoveSet()``.
Parameters
----------
ensemble : str
string describing the desired ensembled. Supported
values include ``'nvt'``, ``'npt'``, ``'gcmc'``,
``'gemc'``, ``'gemc_npt'``
species_topologies : list
list of ``parmed.Structures``, with one species per element
Returns
-------
``mosdef_cassandra.MoveSet``
"""
if not isinstance(species_topologies, list):
raise TypeError(
"species_topologies should be a " "list of species"
)
for species in species_topologies:
if not isinstance(species, parmed.Structure):
raise TypeError("each species should be a " "parmed.Structure")
# Extract self._n_species
self._n_species = len(species_topologies)
# Set the ensemble
self.ensemble = ensemble
# Infer the number of boxes
if (
self.ensemble == "nvt"
or self.ensemble == "npt"
or self.ensemble == "gcmc"
):
self._n_boxes = 1
else:
self._n_boxes = 2
# Set '_restricted_typed' and '_restricted_value'
self._restricted_type = None
self._restricted_value = None
# Define default probabilities
# Most are ensemble-dependent
self.prob_angle = 0.0
self.prob_dihedral = 0.0
if self.ensemble == "nvt":
self.prob_translate = 0.33
self.prob_rotate = 0.33
self.prob_regrow = 0.34
self.prob_volume = 0.0
self.prob_insert = 0.0
self.prob_swap = 0.0
elif self.ensemble == "npt":
self.prob_translate = 0.33
self.prob_rotate = 0.33
self.prob_regrow = 0.335
self.prob_volume = 0.005
self.prob_insert = 0.0
self.prob_swap = 0.0
# GCMC sums to 0.9 b/c symmetric prob_delete
elif self.ensemble == "gcmc":
self.prob_translate = 0.25
self.prob_rotate = 0.25
self.prob_regrow = 0.30
self.prob_volume = 0.0
self.prob_insert = 0.1
self.prob_swap = 0.0
elif self.ensemble == "gemc":
self.prob_translate = 0.30
self.prob_rotate = 0.30
self.prob_regrow = 0.295
self.prob_volume = 0.005
self.prob_insert = 0.0
self.prob_swap = 0.1
elif self.ensemble == "gemc_npt":
self.prob_translate = 0.30
self.prob_rotate = 0.30
self.prob_regrow = 0.295
self.prob_volume = 0.005
self.prob_insert = 0.0
self.prob_swap = 0.1
else:
raise ValueError("Uh oh, how did we end up here?")
# Max translation and rotations specified per-species-per-box
self.max_translate = [
[2.00 * u.angstrom] * self._n_species
] * self._n_boxes
self.max_rotate = [[30.0 * u.degree] * self._n_species] * self._n_boxes
# Prob swap and max vol are per-box
self.prob_swap_from_box = [1.0 / self._n_boxes] * self._n_boxes
# Default max deltas for volume moves
if self.ensemble == "npt" or self.ensemble == "gemc":
self.max_volume = [500.0 * (u.angstrom ** 3)]
elif self.ensemble == "gemc_npt":
self.max_volume = [
500.0 * (u.angstrom ** 3),
5000.0 * (u.angstrom ** 3),
]
else:
self.max_volume = [0.0 * (u.angstrom ** 3)]
# Set the default CBMC options
self.cbmc_n_insert = 10
self.cbmc_n_dihed = 10
self.cbmc_rcut = 6.0 * u.angstrom
# Remaining options are per-species
self.max_dihedral = [0.0 * u.degree] * self._n_species
self.prob_regrow_species = [1.0] * self._n_species
if self.ensemble in ["gcmc", "gemc", "gemc_npt"]:
self.insertable = [True] * self._n_species
else:
self.insertable = [False] * self._n_species
if self.ensemble in ["gemc", "gemc_npt"]:
self.prob_swap_species = [1.0] * self._n_species
else:
self.prob_swap_species = [0.0] * self._n_species
# Here we handle species-wise exceptions
for ispec, species in enumerate(species_topologies):
if len(species.atoms) == 1:
for ibox in range(self._n_boxes):
self.max_rotate[ibox][ispec] = 0.0 * u.degree
self.prob_regrow_species[ispec] = 0.0
elif len(species.bonds) == 0:
print(
"Treating {} as a non-insertable rigid species "
"since it has no bonds".format(species)
)
for ibox in range(self._n_boxes):
self.max_translate[ibox][ispec] = 0.0 * u.angstrom
self.max_rotate[ibox][ispec] = 0.0 * u.degree
self.prob_regrow_species[ispec] = 0.0
self.insertable[ispec] = False
self.prob_swap_species[ispec] = 0.0
# Correct species_prob_regrow
if sum(self.prob_regrow_species) > 0:
sp_regrowth_prob = 1.0 / sum(self.prob_regrow_species)
for i, prob in enumerate(self.prob_regrow_species):
if prob > 0.0:
self.prob_regrow_species[i] = sp_regrowth_prob
if sum(self.prob_swap_species) > 0:
# Correct species_prob_swap
prob_swap_species = 1.0 / sum(self.prob_swap_species)
for idx, insert in enumerate(self.insertable):
if insert:
self.prob_swap_species[idx] = prob_swap_species
# If all species have no prob regrowth, set prob_regrow to
# zero and redistribute prob to translate/rotate
if sum(self.prob_regrow_species) == 0.0:
self.prob_translate += self.prob_regrow / 2.0
self.prob_rotate += self.prob_regrow / 2.0
self.prob_regrow = 0.0
# If all species are not rotatable change prob rotation
# move to zero. Redistribute prob to translate
if self.ensemble == "gemc" or self.ensemble == "gemc_npt":
if (
sum(self.max_rotate[0]).to_value()
+ sum(self.max_rotate[1]).to_value()
== 0.0
):
self.prob_translate += self.prob_rotate
self.prob_rotate = 0.0
else:
if sum(self.max_rotate[0]).to_value() == 0.0:
self.prob_translate += self.prob_rotate
self.prob_rotate = 0.0
def add_restricted_insertions(
self, species_topologies, restricted_type, restricted_value
):
"""Add restricted insertions for specific species and boxes
Parameters
----------
species_topologies : list
list of ``parmed.Structures`` containing one list per box of species
restricted_type : list
list of restricted insertion types containing one list per box of species
restricted_value : list
list of restricted insertion values (unyt arrays) containing one list per box of species
"""
if self._restricted_type and self._restricted_value:
warnings.warn(
"Restricted insertion has been previously"
" added and will be replaced."
)
if self.ensemble not in ["gcmc", "gemc", "gemc_npt"]:
raise ValueError(
"Restricted insertions are only valid for"
" 'gcmc', 'gemc', and 'gemc_npt' ensembles."
)
if len(restricted_type) != len(restricted_value):
raise ValueError(
"Length of 'restricted_type' and "
" 'restricted_value' must match."
)
for box in restricted_type:
if isinstance(box, (str, int, float)):
raise TypeError(
"Restricted type must be passed as a list"
" of lists corresponding to each box."
)
if len(box) != len(species_topologies):
raise ValueError(
"Length of 'species' and "
" length of box list in 'restricted_type'"
" must match. `species` has a length of {}"
" and the box list in 'restricted_type' has a "
" length of {}".format(len(species_topologies), len(box))
)
for box in restricted_value:
if isinstance(box, (str, int, float)):
raise TypeError(
"Restricted value must be passed as a list"
" of lists corresponding to each box."
)
if len(box) != len(species_topologies):
raise ValueError(
"Length of 'species' and "
" length of species list in 'restricted_value'"
" must match. `species` has a length of {}"
" and the box list in 'restricted_value' has a "
" length of {}".format(len(species_topologies), len(box))
)
if self.ensemble == "gcmc" and len(restricted_type) != 1:
raise ValueError(
"GCMC ensemble contains 1 box but"
" `restricted_type` of length {}"
" was passed.".format(len(restricted_type))
)
if self.ensemble in ["gemc", "gemc_npt"] and len(restricted_type) != 2:
raise ValueError(
"GEMC ensembles contain 2 boxes but"
" `restricted_type` of length {}"
" was passed.".format(len(restricted_type))
)
for types, values in zip(restricted_type, restricted_value):
for typ, val in zip(types, values):
if not typ and not val:
pass
elif typ and not val:
raise ValueError(
"`restricted_type` {} was passed"
" but `restricted_value` is None.".format(typ, val)
)
elif val and not typ:
raise ValueError(
"`restricted_value` {} was passed"
" but `restricted_type` is None.".format(val, typ)
)
else:
_check_restriction_type(typ, val)
# Check units of restricted value
if typ == "interface":
[validate_unit(i, dimensions.length) for i in val]
else:
validate_unit(val, dimensions.length)
self._restricted_type = restricted_type
self._restricted_value = restricted_value
@property
def ensemble(self):
return self._ensemble
@ensemble.setter
def ensemble(self, ensemble):
if hasattr(self, "_ensemble"):
raise AttributeError(
"Ensemble cannot be changed. Please create a new MoveSet instead."
)
valid_ensembles = ["nvt", "npt", "gcmc", "gemc", "gemc_npt"]
if ensemble not in valid_ensembles:
raise ValueError(
'Invalid ensemble "{}" Supported '
"ensembles include {}".format(ensemble, valid_ensembles)
)
self._ensemble = ensemble
@property
def prob_translate(self):
return self._prob_translate
@prob_translate.setter
def prob_translate(self, prob_translate):
prob_translate = self._validate_probability(
prob_translate,
"prob_translate",
)
self._prob_translate = prob_translate
@property
def prob_rotate(self):
return self._prob_rotate
@prob_rotate.setter
def prob_rotate(self, prob_rotate):
prob_rotate = self._validate_probability(
prob_rotate,
"prob_rotate",
)
self._prob_rotate = prob_rotate
@property
def prob_angle(self):
return self._prob_angle
@prob_angle.setter
def prob_angle(self, prob_angle):
prob_angle = self._validate_probability(
prob_angle,
"prob_angle",
)
self._prob_angle = prob_angle
@property
def prob_dihedral(self):
return self._prob_dihedral
@prob_dihedral.setter
def prob_dihedral(self, prob_dihedral):
prob_dihedral = self._validate_probability(
prob_dihedral,
"prob_dihedral",
)
self._prob_dihedral = prob_dihedral
@property
def prob_regrow(self):
return self._prob_regrow
@prob_regrow.setter
def prob_regrow(self, prob_regrow):
prob_regrow = self._validate_probability(
prob_regrow,
"prob_regrow",
)
self._prob_regrow = prob_regrow
@property
def prob_volume(self):
return self._prob_volume
@prob_volume.setter
def prob_volume(self, prob_volume):
prob_volume = self._validate_probability(
prob_volume,
"prob_volume",
)
if prob_volume > 0.0:
if self.ensemble == "nvt" or self.ensemble == "gcmc":
raise ValueError(
"Ensemble is {}. prob_volume cannot be "
"non-zero in the {} ensemble".format(
self._ensemble, self.ensemble
)
)
elif prob_volume == 0.0:
if (
self.ensemble == "npt"
or self.ensemble == "gemc"
or self.ensemble == "gemc_npt"
):
raise ValueError(
"Ensemble is {}. prob_volume must be "
"> 0.0 in this ensemble".format(self.ensemble)
)
# Pass all checks. Update prob_volume.
self._prob_volume = prob_volume
@property
def prob_insert(self):
return self._prob_insert
@prob_insert.setter
def prob_insert(self, prob_insert):
prob_insert = self._validate_probability(
prob_insert,
"prob_insert",
)
if self.ensemble != "gcmc" and prob_insert != 0.0:
raise ValueError(
"Ensemble is {}. Insertion probability "
"must be = 0.0".format(self.ensemble)
)
if self.ensemble == "gcmc" and prob_insert == 0.0:
raise ValueError(
"Ensemble is {}. Insertion probability "
"must be > 0.0".format(self.ensemble)
)
self._prob_insert = prob_insert
@property
def prob_swap(self):
return self._prob_swap
@prob_swap.setter
def prob_swap(self, prob_swap):
prob_swap = self._validate_probability(
prob_swap,
"prob_swap",
)
if self.ensemble != "gemc" and self.ensemble != "gemc_npt":
if prob_swap != 0.0:
raise ValueError(
"Ensemble is {}. Swapping probability "
"must be = 0.0".format(self.ensemble)
)
if self.ensemble == "gemc" or self.ensemble == "gemc_npt":
if prob_swap == 0.0:
raise ValueError(
"Ensemble is {}. Swapping probability "
"must be > 0.0".format(self.ensemble)
)
self._prob_swap = prob_swap
@property
def max_translate(self):
return self._max_translate
@max_translate.setter
def max_translate(self, max_translate):
max_translate = validate_unit_list(
max_translate,
(self._n_boxes, self._n_species),
dimensions.length,
"max_translate",
)
for max_val in max_translate.flatten():
if max_val.to_value() < 0.0:
raise ValueError(
"Max translation values cannot be less than zero"
)
self._max_translate = max_translate
@property
def max_rotate(self):
return self._max_rotate
@max_rotate.setter
def max_rotate(self, max_rotate):
max_rotate = validate_unit_list(
max_rotate,
(self._n_boxes, self._n_species),
dimensions.angle,
"max_rotate",
)
for max_val in max_rotate.flatten():
if (
max_val.to_value("degree") < 0.0
or max_val.to_value("degree") > 360.0
):
raise ValueError(
"Max rotation values must be between 0.0 and 360.0 degrees."
)
self._max_rotate = max_rotate
@property
def max_dihedral(self):
return self._max_dihedral
@max_dihedral.setter
def max_dihedral(self, max_dihedral):
max_dihedral = validate_unit_list(
max_dihedral,
(self._n_species,),
dimensions.angle,
"max_dihedral",
)
for max_val in max_dihedral:
if (
max_val.to_value("degree") < 0.0
or max_val.to_value("degree") > 360.0
):
raise ValueError(
"Max dihedral rotation values must be between 0.0 and 360.0 degrees."
)
self._max_dihedral = max_dihedral
@property
def prob_swap_from_box(self):
return self._prob_swap_from_box
@prob_swap_from_box.setter
def prob_swap_from_box(self, prob_swap_from_box):
if (
not isinstance(prob_swap_from_box, list)
or len(prob_swap_from_box) != self._n_boxes
):
raise TypeError(
"prob_swap_from_box must be a list with length "
"(number of boxes)"
)
validated_prob_swap_from_box = []
for prob_swap in prob_swap_from_box:
prob_swap = self._validate_probability(
prob_swap,
"prob_swap_from_box",
)
validated_prob_swap_from_box.append(prob_swap)
self._prob_swap_from_box = validated_prob_swap_from_box
@property
def max_volume(self):
return self._max_volume
@max_volume.setter
def max_volume(self, max_volume):
if type(max_volume) not in (list, u.unyt_array):
if self.ensemble == "gemc_npt":
max_volume = [max_volume] * self._n_boxes
else:
max_volume = [max_volume]
if self.ensemble == "gemc_npt":
shape = (self._n_boxes,)
else:
shape = (1,)
max_volume = validate_unit_list(
max_volume,
shape,
dimensions.length ** 3,
"max_volume",
)
for max_vol in max_volume.flatten():
if max_vol < 0.0:
raise ValueError("max_volume cannot be less than zero.")
self._max_volume = max_volume
@property
def insertable(self):
return self._insertable
@insertable.setter
def insertable(self, insertable):
if (
not isinstance(insertable, list)
or len(insertable) != self._n_species
):
raise TypeError(
"insertable must be a list with length " "(number of species)"
)
for insert in insertable:
if not isinstance(insert, bool):
raise TypeError(
"The insertability of each species "
"must be provided as a boolean type."
)
self._insertable = insertable
@property
def prob_swap_species(self):
return self._prob_swap_species
@prob_swap_species.setter
def prob_swap_species(self, prob_swap_species):
if (
not isinstance(prob_swap_species, list)
or len(prob_swap_species) != self._n_species
):
raise TypeError(
"prob_swap_species must be a list with length "
"(number of species)"
)
validated_prob_swap_species = []
for prob_swap in prob_swap_species:
prob_swap = self._validate_probability(
prob_swap,
"prob_swap_species",
)
validated_prob_swap_species.append(prob_swap)
self._prob_swap_species = validated_prob_swap_species
@property
def prob_regrow_species(self):
return self._prob_regrow_species
@prob_regrow_species.setter
def prob_regrow_species(self, prob_regrow_species):
if (
not isinstance(prob_regrow_species, list)
or len(prob_regrow_species) != self._n_species
):
raise TypeError(
"prob_regrow_species must be a list with length "
"(number of species)"
)
validated_prob_regrow_species = []
for prob_regrow in prob_regrow_species:
prob_regrow = self._validate_probability(
prob_regrow, "prob_regrow"
)
validated_prob_regrow_species.append(prob_regrow)
self._prob_regrow_species = validated_prob_regrow_species
@property
def cbmc_n_insert(self):
return self._cbmc_n_insert
@cbmc_n_insert.setter
def cbmc_n_insert(self, cbmc_n_insert):
if type(cbmc_n_insert) != int:
raise TypeError("cbmc_n_insert must be of type int")
if cbmc_n_insert <= 0:
raise ValueError("cbmc_n_insert must be greater than zero")
self._cbmc_n_insert = cbmc_n_insert
@property
def cbmc_n_dihed(self):
return self._cbmc_n_dihed
@cbmc_n_dihed.setter
def cbmc_n_dihed(self, cbmc_n_dihed):
if type(cbmc_n_dihed) != int:
raise TypeError("cbmc_n_dihed must be of type int")
if cbmc_n_dihed <= 0:
raise ValueError("cbmc_n_dihed must be greater than zero")
self._cbmc_n_dihed = cbmc_n_dihed
@property
def cbmc_rcut(self):
return self._cbmc_rcut
@cbmc_rcut.setter
def cbmc_rcut(self, cbmc_rcut):
if type(cbmc_rcut) not in (list, u.unyt_array):
cbmc_rcut = [cbmc_rcut] * self._n_boxes
cbmc_rcut = validate_unit_list(
cbmc_rcut,
(self._n_boxes,),
dimensions.length,
"cbmc_rcut",
)
for rcut in cbmc_rcut.flatten():
if rcut.to_value() < 0.0:
raise ValueError("cbmc_rcut cannot be less than zero.")
self._cbmc_rcut = cbmc_rcut
def print(self):
"""Print the current contents of the MoveSet"""
contents = """
Ensemble: {ensemble}
Probability of selecting each move type:
Translate: {prob_translate}
Rotate: {prob_rotate}
Regrow: {prob_regrow}
Volume: {prob_volume}
Insert: {prob_insert}
Delete: {prob_delete}
Swap: {prob_swap}
Angle: {prob_angle}
Dihedral: {prob_dihedral}
""".format(
ensemble=self.ensemble,
prob_translate=self.prob_translate,
prob_rotate=self.prob_rotate,
prob_regrow=self.prob_regrow,
prob_volume=self.prob_volume,
prob_insert=self.prob_insert,
prob_delete=self.prob_insert,
prob_swap=self.prob_swap,
prob_angle=self.prob_angle,
prob_dihedral=self.prob_dihedral,
)
contents += """
CBMC selections:
Number of trial positions: {n_insert}
Number of trial dihedral angles: {n_dihed}
CBMC cutoff(s):
""".format(
n_insert=self.cbmc_n_insert,
n_dihed=self.cbmc_n_dihed,
)
for idx, value in enumerate(self.cbmc_rcut):
contents += " Box {}: {}\n".format(idx + 1, value)
contents += "\n\nPer species quantities:\n\n"
contents += " "
for idx in range(self._n_species):
contents += "species{idx} ".format(idx=idx + 1)
contents += "\n"
contents += " "
for idx in range(self._n_species):
contents += "======== ".format(idx=idx + 1)
contents += "\n"
contents += " Max translate (Ang): "
for (box, max_translate_box) in enumerate(self.max_translate):
if box > 0:
contents += " "
for (idx, max_translate) in enumerate(max_translate_box):
contents += "{max_trans:4.2f} ".format(
max_trans=max_translate
)
contents += "(Box {box})".format(box=box + 1)
contents += "\n"
contents += " Max rotate (deg): "
for (box, max_rotate_box) in enumerate(self.max_rotate):
if box > 0:
contents += " "
for (idx, max_rotate) in enumerate(max_rotate_box):
contents += "{max_rot:4.2f} ".format(
max_rot=max_rotate
)
contents += "(Box {box})".format(box=box + 1)
contents += "\n"
contents += " Insertable: "
for (idx, insert) in enumerate(self.insertable):
contents += "{insert} ".format(insert=insert)
contents += "\n"
contents += " Max dihedral: "
for (idx, max_dih) in enumerate(self.max_dihedral):
contents += "{max_dih:4.2f} ".format(max_dih=max_dih)
contents += "\n"
contents += " Prob swap: "
for (idx, prob_swap) in enumerate(self.prob_swap_species):
contents += "{prob_swap:4.2f} ".format(
prob_swap=prob_swap
)
contents += "\n"
contents += " Prob regrow: "
for (idx, prob_regrow) in enumerate(self.prob_regrow_species):
contents += "{regrow:4.2f} ".format(regrow=prob_regrow)
contents += "\n"
contents += "\n\nMax volume (Ang^3):\n"
for (box, max_vol) in enumerate(self.max_volume):
contents += " Box {box}: {max_vol}\n".format(
box=box + 1, max_vol=max_vol
)
if self._restricted_type != None:
contents += "\nRestricted Insertions (Ang):\n"
for box in range(self._n_boxes):
for species, (typ, value) in enumerate(
zip(
self._restricted_type[box], self._restricted_value[box]
)
):
if typ == "sphere":
contents += "Box {box}, Species {species}: sphere, R = {r_value}\n".format(
box=box + 1, species=species + 1, r_value=value
)
elif typ == "cylinder":
contents += "Box {box}, Species {species}: cylinder, R = {r_value}\n".format(
box=box + 1, species=species + 1, r_value=value
)
elif typ == "slitpore":
contents += "Box {box}, Species {species}: slitpore, z_max = {z_max}\n".format(
box=box + 1, species=species + 1, z_max=value
)
elif typ == "interface":
contents += "Box {box}, Species {species}: interface, z_min = {z_min}, z_max = {z_max}\n".format(
box=box + 1,
species=species + 1,
z_min=value[0],
z_max=value[1],
)
else:
contents += (
"Box {box}, Species {species}: None\n".format(
box=box + 1, species=species + 1
)
)
print(contents)
def _validate_probability(self, probability, name):
if type(probability) not in (float, int):
raise TypeError(f"{name} must be of type float")
else:
probability = float(probability)
if probability < 0.0 or probability > 1.0:
raise ValueError(f"{name} must be between 0.0 and 1.0.")
return probability
def _check_restriction_type(restriction_type, restriction_value):
valid_restrict_types = ["sphere", "cylinder", "slitpore", "interface"]
# Check restriction insertion type
if restriction_type not in valid_restrict_types:
raise ValueError(
'Invalid restriction type "{}". Supported '
"restriction types include {}".format(
restriction_type, valid_restrict_types
)
)
# Check if correct number of arguments passed
if restriction_type == "interface":
if len(restriction_value) != 2:
raise ValueError(
"Invalid number of arguments passed."
"{} arguments for restriction type {}"
"were passed. 2 are required".format(
len(restriction_value), restriction_type
)
)
else:
if not isinstance(restriction_value, u.unyt_array):
raise TypeError(
"Invalid type for `restriction_value` passed. A"
" single argument of type `unyt_array"
" should be passed".format(restriction_type)
)
|
nilq/baby-python
|
python
|
import unittest
import unittest.mock
import uuid
from g1.asyncs import kernels
from g1.asyncs.bases import tasks
from g1.messaging import reqrep
from g1.messaging.reqrep import clients
from g1.messaging.reqrep import servers
from g1.messaging.wiredata import jsons
class InvalidRequestError(Exception):
pass
class InternalServerError(Exception):
pass
@reqrep.raising(InvalidRequestError, InternalServerError)
class TestInterface:
@reqrep.raising(ValueError)
def greet(self, name: str) -> str:
raise NotImplementedError
def f(self):
raise NotImplementedError
def g(self):
raise NotImplementedError
def h(self):
raise NotImplementedError
@reqrep.raising(InternalServerError)
class TestOnlyOneError:
def f(self):
raise NotImplementedError
# Don't inherit from ``TestInterface`` because we intentionally leave
# out ``f`` unimplemented.
class TestApplication:
async def greet(self, name):
return 'Hello, %s' % name
async def g(self):
return object()
async def h(self):
# Test error that is not declared in the interface.
raise RuntimeError
Request, Response = reqrep.generate_interface_types(TestInterface, 'Test')
WIRE_DATA = jsons.JsonWireData()
class ServerTest(unittest.TestCase):
def test_only_one_error(self):
request_type, response_type = \
reqrep.generate_interface_types(TestOnlyOneError)
server = servers.Server(
TestOnlyOneError(),
request_type,
response_type,
WIRE_DATA,
)
self.assertEqual(
server._declared_error_types,
{InternalServerError: 'internal_server_error'},
)
@kernels.with_kernel
def test_serve(self):
server = servers.Server(
TestApplication(),
Request,
Response,
WIRE_DATA,
invalid_request_error=InvalidRequestError(),
internal_server_error=InternalServerError(),
)
wire_request = WIRE_DATA.to_lower(
Request(args=Request.m.greet(name='world'))
)
self.assertEqual(
WIRE_DATA.to_upper(
Response,
kernels.run(server._serve(wire_request)),
),
Response(result=Response.Result(greet='Hello, world')),
)
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(b'')),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_upper error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.f()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'unknown method: f: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.g()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_lower error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.h()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'server error: ')
@kernels.with_kernel
def test_end_to_end(self):
def do_test(client, server, server_serve):
url = 'inproc://%s' % uuid.uuid4()
server.socket.listen(url)
client.socket.dial(url)
server_task = tasks.spawn(server_serve)
client_task = tasks.spawn(client.m.greet(name='world'))
with self.assertRaises(kernels.KernelTimeout):
kernels.run(timeout=0.005)
self.assertTrue(client_task.is_completed())
self.assertEqual(
client_task.get_result_nonblocking(), 'Hello, world'
)
self.assertFalse(server_task.is_completed())
server.socket.close()
kernels.run(timeout=1)
self.assertTrue(server_task.is_completed())
self.assertIsNone(server_task.get_result_nonblocking())
app = TestApplication()
with servers.Server(app, Request, Response, WIRE_DATA) as server:
with clients.Client(Request, Response, WIRE_DATA) as client:
do_test(client, server, server.serve)
app = TestApplication()
server = servers.Server(app, Request, Response, WIRE_DATA)
with clients.Client(Request, Response, WIRE_DATA) as client:
with server:
do_test(client, server, server.serve)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
GET_PACKAGE_ADT_XML='''<?xml version="1.0" encoding="utf-8"?>
<pak:package xmlns:pak="http://www.sap.com/adt/packages" xmlns:adtcore="http://www.sap.com/adt/core" adtcore:masterLanguage="EN" adtcore:name="$IAMTHEKING" adtcore:type="DEVC/K" adtcore:changedAt="2019-01-29T23:00:00Z" adtcore:version="active" adtcore:createdAt="2019-01-29T23:00:00Z" adtcore:changedBy="DEVELOPER" adtcore:description="This is a package" adtcore:descriptionTextLimit="60" adtcore:language="EN">
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/vit/wb/object_type/devck/object_name/%24IAMTHEKING" rel="self" type="application/vnd.sap.sapgui" title="Representation in SAP Gui"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/applicationcomponents" rel="applicationcomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Application Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/softwarecomponents" rel="softwarecomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Software Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/transportlayers" rel="transportlayers" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Layers Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/translationrelevances" rel="translationrelevances" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Relevances Value Help"/>
<pak:attributes pak:packageType="development" pak:isPackageTypeEditable="false" pak:isAddingObjectsAllowed="false" pak:isAddingObjectsAllowedEditable="true" pak:isEncapsulated="false" pak:isEncapsulationEditable="false" pak:recordChanges="false" pak:isRecordChangesEditable="false" pak:isSwitchVisible="false"/>
<pak:superPackage/>
<pak:applicationComponent pak:name="-" pak:description="No application component assigned" pak:isVisible="true" pak:isEditable="false"/>
<pak:transport>
<pak:softwareComponent pak:name="LOCAL" pak:description="" pak:isVisible="true" pak:isEditable="false"/>
<pak:transportLayer pak:name="" pak:description="" pak:isVisible="false" pak:isEditable="false"/>
</pak:transport>
<pak:useAccesses pak:isVisible="false"/>
<pak:packageInterfaces pak:isVisible="false"/>
<pak:subPackages>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_doc" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_DOC" adtcore:description="Documentation stuff"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_src" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_SRC" adtcore:description="Production source codes"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_tests" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_TESTS" adtcore:description="Package with Tests"/>
</pak:subPackages>
</pak:package>
'''
GET_PACKAGE_ADT_XML_NOT_FOUND='''<?xml version="1.0" encoding="utf-8"?>
<exc:exception xmlns:exc="http://www.sap.com/abapxml/types/communicationframework">
<namespace id="com.sap.adt"/>
<type id="ExceptionResourceNotFound"/>
<message lang="EN">Error while importing object PKG_NAME from the database.</message>
<localizedMessage lang="EN">Error while importing object PKG_NAME from the database.</localizedMessage>
<properties/>
</exc:exception>
'''.replace('\n', '').replace('\r', '')
|
nilq/baby-python
|
python
|
#
# 13. Roman to Integer
#
# Roman numerals are represented by seven different symbols: I, V, X, L, C, D, M
#
# Symbols Value
#
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
#
# For example, two is written as II in Roman numeral, just two one's added
# together.
# Twelve is written as, XII, which is simply X + II. The number twenty seven is
# written as XXVII, which is XX + V + II.
#
# Roman numerals are usually written largest to smallest from left to right.
# However, the numeral for four is not IIII. Instead, number four is written as
# IV. Because the one is before the five we subtract it making four. The same
# principle applies to the number nine, which is written as IX. There are six
# instances where subtraction is used:
#
# - I can be placed before V and X to make 4 and 9.
# - X can be placed before L and C to make 40 and 90.
# - C can be placed before D and M to make 400 and 900.
#
# Given a roman numeral, convert it to an integer. Input is guaranteed to be
# within the range from 1 to 3999.
#
# Example 1:
# Input: "III"
# Output: 3
#
# Example 2:
# Input: "IV"
# Output: 4
#
# Example 3:
# Input: "IX"
# Output: 9
#
# Example 4:
# Input: "LVIII"
# Output: 58
#
# Example 5:
# Input: "MCMXCIV"
# Output: 1994
#
class Solution(object):
# 遍历字符串始终累加,
# 当前一字符对应数字比当前字符对应数字小时,
# 结果减二倍前一字符对应数字
def romanToInt(self, s):
mapping = {
'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000
}
res = 0
front = mapping[s[0]] if s else 0
for c in s:
res += mapping[c]
if front < mapping[c]:
res -= 2 * front
front = mapping[c]
return res
# 分为单个和两个,两种情况
def romanToInt2(self, s):
doubles = {'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}
singles = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
res, i, length = 0, 0, len(s)
while i < length:
if i < length - 1 and s[i:i + 2] in doubles:
res += doubles[s[i:i + 2]]
i += 2
else:
res += singles[s[i]]
i += 1
return res
|
nilq/baby-python
|
python
|
from factory import DjangoModelFactory, Sequence, SubFactory
from movie_planet.movies.models import Comment, Movie
class MovieFactory(DjangoModelFactory):
title = Sequence(lambda n: "Title %03d" % n)
class Meta:
model = Movie
class CommentFactory(DjangoModelFactory):
body = "test body"
movie = SubFactory(MovieFactory)
class Meta:
model = Comment
def create_movies_with_rank():
movie_1 = MovieFactory()
movie_2 = MovieFactory()
movie_3 = MovieFactory()
CommentFactory(movie=movie_1)
CommentFactory(movie=movie_2)
CommentFactory(movie=movie_2)
CommentFactory(movie=movie_3)
CommentFactory(movie=movie_3)
return movie_1, movie_2, movie_3
|
nilq/baby-python
|
python
|
import uuid
import time
import pickle
from redis import Redis
class AcquireTimeoutError(Exception):
"""
在规定时间内,没有获取到到锁时,抛出的异常
"""
class RedisLock:
"""
redis 分布式锁
"""
@classmethod
def register_redis(cls, redis: Redis):
cls.redis = redis
def __init__(self, lock_key, acquire_time=10, lock_timeout=60):
"""
:param lock_key 锁名称
:param acquire_time 尝试获取锁的时间,如果在指定时间内没有获取到锁,则返回 False
:param lock_timeout 过期时间
"""
self.lock_key = 'lock:' + lock_key
self.acquire_time = acquire_time
self.lock_timeout = lock_timeout
self.identifier = ''
def acquire(self, blocking=True):
"""
:param blocking 是否阻塞
:return 如果获取到锁,则返回 True,否则 False
"""
identifier = str(uuid.uuid4())
end = time.time() + self.acquire_time
while time.time() < end:
if self.redis.set(self.lock_key, identifier, ex=self.lock_timeout, nx=True):
self.identifier = identifier
return True
if blocking:
time.sleep(0.01)
else:
return False
raise AcquireTimeoutError()
def release(self):
"""
删除锁
"""
if self.identifier == '':
return
pipe = self.redis.pipeline(True)
pipe.watch(self.lock_key)
if pipe.get(self.lock_key).decode(encoding='utf-8') == self.identifier:
pipe.multi()
pipe.delete(self.lock_key)
pipe.execute()
self.identifier = ''
@staticmethod
def lock(lockname, acquire_time=10, lock_timeout=60, blocking=True):
"""
使用方法:
@RedisLock.lock('test', acquire_time=1, lock_timeout=60, blocking=False)
def test():
pass
:param lockname: 锁名称
:param acquire_time: 阻塞获取锁的时间
:param lock_timeout: 锁的超时时间
:param blocking: 非阻塞获取锁,如果没有获取到锁,则不会执行修饰的方法
"""
def decorator(func):
def wrapper(*args, **kwargs):
lock = RedisLock(lockname, acquire_time=acquire_time, lock_timeout=lock_timeout)
if lock.acquire(blocking=blocking):
try:
ret = func(*args, **kwargs)
return ret
except BaseException as e:
raise e
finally:
lock.release()
return wrapper
return decorator
class RedisCache:
"""
redis 分布式缓存
"""
@classmethod
def register_redis(cls, redis: Redis):
cls.redis = redis
@staticmethod
def __get_one_name(name):
"""
获取一级缓存名称
"""
return "cache:%s" % name
@staticmethod
def __get_two_name(name):
"""
获取二级缓存名称
"""
return "cache2:%s" % name
@classmethod
def cache(cls, cachename, timeout=60):
"""
使用方法:
@RedisCache.cache('test', timeout=60)
def test():
pass
:param cachename: 锁名称
:param timeout: 锁的超时时间
:return:
"""
def decorator(func):
def wrapper(*args, **kwargs):
val = cls.redis.get(cls.__get_one_name(cachename))
if val is not None:
# 从缓存中获取数据
return pickle.loads(val)
# 缓存失效的解决方案:
# 使用分布式锁,只有一个进程去原始数据中获取
lock = RedisLock('cachelock.' + cachename, lock_timeout=timeout)
if lock.acquire() is False:
# 没有获取到锁,则使用二级缓存
val = cls.redis.get(cls.__get_two_name(cachename))
ret = pickle.loads(val)
else:
# 获取到锁,从原始数据获取锁,同时设置一级缓存和二级缓存
ret = func(*args, **kwargs)
val = pickle.dumps(ret)
cls.redis.set(cls.__get_one_name(cachename), val, ex=timeout)
cls.redis.set(cls.__get_two_name(cachename), val, ex=timeout * 10)
lock.release()
return ret
return wrapper
return decorator
|
nilq/baby-python
|
python
|
from django.db import models
class MyPublicModel(models.Model):
name = models.CharField(max_length=32)
class MyPrivateModel(models.Model):
name = models.CharField(max_length=32)
class MyPresenceModel(models.Model):
name = models.CharField(max_length=32)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
## Linear model of a Boeing 747
# Level flight at 40,000 ft elevation
# Velocity at 774 ft/sec (0.80 Mach)
# States
# u - uw (ft/sec) - horizontal velocity - horizontal wind
# w - ww (ft/sec) - vertical velocity - vertical wind
# q (crad/sec) - angular velocity
# theta (crad) - angle from horizontal
# note: crad = 0.01 rad
# Inputs
# e - elevator
# t - throttle
# Outputs
# u - uw (ft/sec) - horizontal airspeed
# hdot = -w + u0 * theta with u0 = 774 ft/sec
A = np.array([[-.003, 0.039, 0, -0.322],
[-0.065, -0.319, 7.74, 0],
[0.020, -0.101, -0.429, 0],
[0, 0, 1, 0]])
B = np.array([[0.01, 1],
[-0.18, -0.04],
[-1.16, 0.598],
[0, 0]])
C = np.array([[1, 0, 0, 0],
[0, -1, 0, 7.74]])
#%% Build model
m = GEKKO()
x,y,u = m.state_space(A,B,C)
m.time = [0,0.1,0.2,0.4,1,1.5,2,3,4,5,6,7,8,10,12,15,20]
m.time = np.linspace(0,10,101)
m.options.imode = 6
m.options.NODES = 2
## MV tuning
for i in range(len(u)):
u[i].lower = -5
u[i].upper = 5
u[i].dcost = 1
u[i].status = 1
## CV tuning
# tau = first order time constant for trajectories
y[0].tau = 3
y[1].tau = 5
# tr_init = 2 (first order traj, re-center with each cycle)
y[0].tr_init = 2
y[1].tr_init = 2
# targets (dead-band needs upper and lower values)
y[0].sphi= -8.5
y[0].splo= -9.5
y[1].sphi= 5.4
y[1].splo= 4.6
y[0].status = 1
y[1].status = 1
m.solve()
#%% plot results
plt.figure(1)
plt.subplot(311)
plt.plot(m.time,u[0],'r-',linewidth=2.0)
plt.plot(m.time,u[1],'k:',linewidth=2.0)
plt.legend(['Elevator','Thrust'])
plt.ylabel('MV Action')
plt.subplot(312)
plt.plot(m.time,y[0],'b:',linewidth=2.0)
plt.legend('Air Speed')
plt.subplot(313)
plt.plot(m.time,y[1],'g--',linewidth=2.0)
plt.legend('Climb Rate')
plt.show()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3.7
from modules.commands.helpers.textutil import add as quote_add
HELP_TEXT = ["!addquote <quote>", "Add the selected text for review (broadcasters adding bypass review."]
def call(salty_inst, c_msg, **kwargs):
success, response = quote_add(salty_inst, c_msg, "quote", **kwargs)
return success, response
def test(salty_inst, c_msg, **kwargs):
assert True
|
nilq/baby-python
|
python
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import time
import luigi
from servicecatalog_factory import constants
from servicecatalog_factory.workflow.portfolios.get_bucket_task import GetBucketTask
from servicecatalog_factory.workflow.tasks import FactoryTask, logger
class CreateProductTask(FactoryTask):
uid = luigi.Parameter()
region = luigi.Parameter()
name = luigi.Parameter()
owner = luigi.Parameter(significant=False)
description = luigi.Parameter(significant=False)
distributor = luigi.Parameter(significant=False)
support_description = luigi.Parameter(significant=False)
support_email = luigi.Parameter(significant=False)
support_url = luigi.Parameter(significant=False)
tags = luigi.ListParameter(default=[], significant=False)
def params_for_results_display(self):
return {
"region": self.region,
"uid": self.uid,
"name": self.name,
}
def requires(self):
return {"s3_bucket_url": GetBucketTask()}
def output(self):
return luigi.LocalTarget(
f"output/CreateProductTask/{self.region}-{self.name}.json"
)
def run(self):
logger_prefix = f"{self.region}-{self.name}"
with self.regional_client("servicecatalog") as service_catalog:
search_products_as_admin_response = service_catalog.search_products_as_admin_single_page(
Filters={"FullTextSearch": [self.name]}
)
found = False
product_view_summary = None
for product_view_details in search_products_as_admin_response.get(
"ProductViewDetails"
):
product_view_summary = product_view_details.get("ProductViewSummary")
if product_view_summary.get("Name") == self.name:
found = True
logger.info(f"Found product: {self.name}: {product_view_summary}")
things_to_change = dict()
if product_view_summary.get("Owner") != self.owner:
things_to_change["Owner"] = self.owner
if product_view_summary.get("ShortDescription") != self.description:
things_to_change["Description"] = self.description
if product_view_summary.get("Distributor") != self.distributor:
things_to_change["Distributor"] = self.distributor
if (
product_view_summary.get("SupportDescription")
!= self.support_description
):
things_to_change[
"SupportDescription"
] = self.support_description
if product_view_summary.get("SupportEmail") != self.support_email:
things_to_change["SupportEmail"] = self.support_email
if product_view_summary.get("SupportUrl") != self.support_url:
things_to_change["SupportUrl"] = self.support_url
if len(things_to_change.keys()) > 0:
service_catalog.update_product(
Id=product_view_summary.get("ProductId"), **things_to_change
)
break
if not found:
logger.info(f"Not found product: {self.name}, creating")
tags = [{"Key": "ServiceCatalogFactory:Actor", "Value": "Product"}] + [
{"Key": t.get("Key"), "Value": t.get("Value"),} for t in self.tags
]
create_product_args = {
"ProductType": "CLOUD_FORMATION_TEMPLATE",
"ProvisioningArtifactParameters": {
"Name": "-",
"Type": "CLOUD_FORMATION_TEMPLATE",
"Description": "Placeholder version, do not provision",
"Info": {
"LoadTemplateFromURL": "https://{}.s3.{}.amazonaws.com/{}".format(
self.load_from_input("s3_bucket_url").get(
"s3_bucket_url"
),
constants.HOME_REGION,
"empty.template.yaml",
)
},
},
"Name": self.name,
"Owner": self.owner,
"Description": self.description,
"Distributor": self.distributor,
"SupportDescription": self.support_description,
"SupportEmail": self.support_email,
"SupportUrl": self.support_url,
"Tags": tags,
}
product_view_summary = (
service_catalog.create_product(**create_product_args)
.get("ProductViewDetail")
.get("ProductViewSummary")
)
product_id = product_view_summary.get("ProductId")
logger.info(f"Created product {self.name}, waiting for completion")
while True:
time.sleep(2)
search_products_as_admin_response = (
service_catalog.search_products_as_admin_single_page()
)
products_ids = [
product_view_detail.get("ProductViewSummary").get("ProductId")
for product_view_detail in search_products_as_admin_response.get(
"ProductViewDetails"
)
]
logger.info(f"Looking for {product_id} in {products_ids}")
if product_id in products_ids:
logger.info(f"Found {product_id} ")
break
if product_view_summary is None:
raise Exception(f"{logger_prefix}: did not find or create a product")
product_view_summary["uid"] = self.uid
with self.output().open("w") as f:
logger.info(f"{logger_prefix}: about to write! {product_view_summary}")
f.write(json.dumps(product_view_summary, indent=4, default=str,))
|
nilq/baby-python
|
python
|
"""Delta-v estimation for propulsive landing."""
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import fsolve
# Speed of sound in air at 290 K [units: meter second**-1].
a = 342
# Graviational acceleration [units: meter second**-2].
g_0 = 9.81
# Atmosphere scale height [units: meter].
# At 290 K, near surface
H_0 = 8500.
# Atmosphere sea level density [units: kilogram meter**-3].
# At 290 K
rho_0 = 1.20
def drag(M):
"""Drag coefiicient of a cylinder in transonic flight.
Reference: S. F. Hoerner, "Fluid-Dynamic Drag" Ch 16.3
Arguments:
M (scalar): Mach number [units: dimensionless].
"""
# Hoerner says K_fore = 0.9. The formula below is a hack to
# make the curve match Hoerner ch 16 figure 14.
K_fore = 0.9 if M > 1 else 0.8
# Stagnation pressure increment / dynamic pressure
qq = 1 + M**2/4 + M**4/10 # Eqn 15.4
if M >= 1:
# Include pressure loss due to normal shock
qq = 1.84 - 0.76/M**2 + 0.166/M**4 + 0.035/M**6 # Eqn 16.4
C_D = K_fore * qq
return C_D
def terminal_velocity(m_A, H):
"""Terminal velocity of a falling cylinder.
Arguments:
m_A (scalar): mass/area ratio [units: kilogram meter**-2].
H (scalar): altitude [units: meter].
Returns:
Terminal velocity [units: meter second**-1].
"""
def root_fun(v):
M = v / a
v_t = (2 * m_A * g_0 / (drag(M) * rho_0))**0.5 * np.exp(H / (2 * H_0))
return v - v_t
v_t = fsolve(root_fun, 300.)[0]
return v_t
def landing_dv(m_A, accel):
"""Landing dv.
Arguments:
m_A (scalar): mass/area ratio [units: kilogram meter**-2].
accel (scalar): landing acceleartion [units: meter second**-2].
Returns:
Terminal velocity [units: meter second**-1].
"""
def root_fun(v):
M = v / a
t_b = v / accel
H = 0.5 * accel * t_b**2
v_t = (2 * m_A * g_0 / (drag(M) * rho_0))**0.5 * np.exp(H / (2 * H_0))
return v - v_t
v_t = fsolve(root_fun, 300.)[0]
return v_t * (1 + g_0 / accel)
def main():
# Plot the drag model
M = np.linspace(0, 4)
C_D = [drag(M_) for M_ in M]
plt.plot(M, C_D)
plt.grid(True)
plt.xlabel('$M$')
plt.ylabel('$C_D$')
# Range of mass/area ratios to consider
m_A = np.linspace(300, 4000)
# Compute and plot delta-v for landing
plt.figure()
accels = [2*g_0, 3*g_0, 4*g_0]
colors = ['C0', 'C1', 'C2']
for accel, color in zip(accels, colors):
dv_land = np.array([landing_dv(m_A_, accel) for m_A_ in m_A])
v_t = dv_land / (1 + g_0 / accel)
plt.plot(m_A, dv_land,
label='$\Delta v_{{land}}, a={:.0f} g_0$'.format(accel / g_0),
color=color, linestyle='-')
plt.plot(m_A, v_t,
label='$v_t, a={:.0f} g_0$'.format(accel / g_0),
color=color, linestyle='--')
plt.axhline(y=a, color='grey', label='sonic')
plt.xlabel('Mass / frontal area ratio [kg/m^2]')
plt.ylabel('Velocity [m/s]')
plt.legend()
plt.savefig('landing_dv.png')
plt.show()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""Auto-generated file, do not edit by hand. EG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_EG = PhoneMetadata(id='EG', country_code=20, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[189]\\d?|[24-6])\\d{8}|[13]\\d{7}', possible_length=(8, 9, 10), possible_length_local_only=(6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1(?:3[23]|5\\d?)|(?:2[2-4]|3)\\d|4(?:0[2-5]|[578][23]|64)|5(?:0[2-7]|5\\d|7[23])|6[24-689]3|8(?:2[2-57]|4[26]|6[237]|8[2-4])|9(?:2[27]|3[24]|52|6[2356]|7[2-4]))\\d{6}', example_number='234567890', possible_length=(8, 9), possible_length_local_only=(6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='1[0-25]\\d{8}', example_number='1001234567', possible_length=(10,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', example_number='8001234567', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{7}', example_number='9001234567', possible_length=(10,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d)(\\d{7,8})', format='\\1 \\2', leading_digits_pattern=['[23]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{6,7})', format='\\1 \\2', leading_digits_pattern=['1[35]|[4-6]|8[2468]|9[235-7]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[189]'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.