content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
class ControlConfiguration(object):
def __init__(self, control_horizon, prediction_horizon, min_diff=1e-3, max_diff=0.15, diff_decay=0.8):
self.control_horizon = control_horizon
self.prediction_horizon = prediction_horizon
self.min_diff = min_diff
self.max_diff = max_diff
self.current_diff = max_diff
self.diff_decay = diff_decay
def apply_decay(self):
if self.current_diff > self.min_diff:
self.current_diff *= self.diff_decay
|
nilq/baby-python
|
python
|
from flexitext.utils import listify
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.current = 0
def at_end(self):
return self.peek().kind == "EOF"
def advance(self):
self.current += 1
return self.tokens[self.current - 1]
def peek(self):
return self.tokens[self.current]
def previous(self):
return self.tokens[self.current - 1]
def check(self, types):
if self.at_end():
return False
return self.peek().kind in listify(types)
def match(self, types):
if self.check(types):
self.advance()
return True
else:
return False
|
nilq/baby-python
|
python
|
import argparse
import polyscope as ps
import pickle
import os
import numpy as np
from mmcv import Config, DictAction
from mmdet3d.datasets import build_dataloader, build_dataset
def parse_args():
parser = argparse.ArgumentParser(
description='Visualize Results')
parser.add_argument('config', help='test config file path')
parser.add_argument('result', help='saved result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--logfile', type=str,
help='place to holder evaluation results')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where results will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both specified, '
'--options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
ps.init()
cfg = Config.fromfile(args.config)
dataset = build_dataset(cfg.data.test)
print(dataset.CLASSES)
with open(args.result, 'rb') as fin:
results = pickle.load(fin)
R = np.array([[1, 0, 0], [0, 0, 1], [0,-1,0]])
for i, data in enumerate(dataset):
ps.remove_all_structures()
scene = dataset.scenes[i]
name=f'sample-{i}'
if len(scene.keys()) <= 1:
continue
for k, val in scene.items():
name += f'-{k}-{val}'
dataset.samples[k][val]
points = dataset.load_points(k, val).tensor.cpu().numpy()
points = R.dot(points.T).T
ptr = ps.register_point_cloud(f'{k}-{val}', points+np.array([10, 0, 10*dataset.cat2id[k]]))
idx = dataset.cat2id[k] * 100 + val
gt_labels = dataset[idx]['gt_labels'].data.cpu().numpy()
pred = results[idx]['pred'].cpu().numpy()
acc = (gt_labels == pred).astype(np.float32)
ptr.add_scalar_quantity('acc', acc, enabled=True)
points = data['points'].data.cpu().numpy()
points = R.dot(points.T).T
gt_labels = data['gt_labels'].data.cpu().numpy()
pred = results[i]['pred'].cpu().numpy()
acc = (gt_labels == pred).astype(np.float32)
if acc.mean() > 1 - 1e-6:
continue
ptr = ps.register_point_cloud(f'sample-{i}', points)
ptr.add_scalar_quantity('gt', gt_labels, enabled=True)
ptr.add_scalar_quantity('acc', acc, enabled=True)
ps.show()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from flask.ext.mail import Mail, Message
from run import mail
def sendUserEmail(to, message):
header = 'Allez Viens User Contacted You'
sendEmail([to], header, message)
def sendValidationEmail(to, url):
header = 'Allez Viens Validation'
body = "Please click <a href='" + url + "'>this link</a> to validate and edit your route.</br> If you did not request this, please disregard this email."
sendEmail([to], header, body)
def sendEmail(to, header, body):
msg = Message(
header,
recipients=to
)
msg.body = "body"
msg.html = body
mail.send(msg)
|
nilq/baby-python
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: data/exercise_route2.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import types_pb2 as types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='data/exercise_route2.proto',
package='data',
syntax='proto2',
serialized_pb=_b('\n\x1a\x64\x61ta/exercise_route2.proto\x12\x04\x64\x61ta\x1a\x0btypes.proto\":\n\x13PbLocationSyncPoint\x12\x10\n\x08latitude\x18\x01 \x02(\x01\x12\x11\n\tlongitude\x18\x02 \x02(\x01\"\x83\x02\n\x18PbExerciseRouteSyncPoint\x12\r\n\x05index\x18\x01 \x02(\r\x12+\n\x08location\x18\x02 \x01(\x0b\x32\x19.data.PbLocationSyncPoint\x12(\n\rgps_date_time\x18\x03 \x01(\x0b\x32\x11.PbSystemDateTime\x12\x13\n\x08\x61ltitude\x18\x04 \x01(\x11:\x01\x30\x12#\n\x16\x63oordinate_granularity\x18\x05 \x01(\r:\x03\x31\x30\x30\x12#\n\x15timestamp_granularity\x18\x06 \x01(\r:\x04\x31\x30\x30\x30\x12\"\n\x14\x61ltitude_granularity\x18\x07 \x01(\r:\x04\x31\x30\x30\x30\"\xb1\x01\n\x17PbExerciseRouteSamples2\x12\x32\n\nsync_point\x18\x01 \x03(\x0b\x32\x1e.data.PbExerciseRouteSyncPoint\x12\x18\n\x10satellite_amount\x18\x02 \x03(\r\x12\x10\n\x08latitude\x18\x03 \x03(\x12\x12\x11\n\tlongitude\x18\x04 \x03(\x12\x12\x11\n\ttimestamp\x18\x05 \x03(\x11\x12\x10\n\x08\x61ltitude\x18\x06 \x03(\x12')
,
dependencies=[types__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PBLOCATIONSYNCPOINT = _descriptor.Descriptor(
name='PbLocationSyncPoint',
full_name='data.PbLocationSyncPoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='latitude', full_name='data.PbLocationSyncPoint.latitude', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='longitude', full_name='data.PbLocationSyncPoint.longitude', index=1,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=49,
serialized_end=107,
)
_PBEXERCISEROUTESYNCPOINT = _descriptor.Descriptor(
name='PbExerciseRouteSyncPoint',
full_name='data.PbExerciseRouteSyncPoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='data.PbExerciseRouteSyncPoint.index', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location', full_name='data.PbExerciseRouteSyncPoint.location', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gps_date_time', full_name='data.PbExerciseRouteSyncPoint.gps_date_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='data.PbExerciseRouteSyncPoint.altitude', index=3,
number=4, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coordinate_granularity', full_name='data.PbExerciseRouteSyncPoint.coordinate_granularity', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp_granularity', full_name='data.PbExerciseRouteSyncPoint.timestamp_granularity', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude_granularity', full_name='data.PbExerciseRouteSyncPoint.altitude_granularity', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=369,
)
_PBEXERCISEROUTESAMPLES2 = _descriptor.Descriptor(
name='PbExerciseRouteSamples2',
full_name='data.PbExerciseRouteSamples2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sync_point', full_name='data.PbExerciseRouteSamples2.sync_point', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='satellite_amount', full_name='data.PbExerciseRouteSamples2.satellite_amount', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='latitude', full_name='data.PbExerciseRouteSamples2.latitude', index=2,
number=3, type=18, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='longitude', full_name='data.PbExerciseRouteSamples2.longitude', index=3,
number=4, type=18, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='data.PbExerciseRouteSamples2.timestamp', index=4,
number=5, type=17, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='data.PbExerciseRouteSamples2.altitude', index=5,
number=6, type=18, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=372,
serialized_end=549,
)
_PBEXERCISEROUTESYNCPOINT.fields_by_name['location'].message_type = _PBLOCATIONSYNCPOINT
_PBEXERCISEROUTESYNCPOINT.fields_by_name['gps_date_time'].message_type = types__pb2._PBSYSTEMDATETIME
_PBEXERCISEROUTESAMPLES2.fields_by_name['sync_point'].message_type = _PBEXERCISEROUTESYNCPOINT
DESCRIPTOR.message_types_by_name['PbLocationSyncPoint'] = _PBLOCATIONSYNCPOINT
DESCRIPTOR.message_types_by_name['PbExerciseRouteSyncPoint'] = _PBEXERCISEROUTESYNCPOINT
DESCRIPTOR.message_types_by_name['PbExerciseRouteSamples2'] = _PBEXERCISEROUTESAMPLES2
PbLocationSyncPoint = _reflection.GeneratedProtocolMessageType('PbLocationSyncPoint', (_message.Message,), dict(
DESCRIPTOR = _PBLOCATIONSYNCPOINT,
__module__ = 'data.exercise_route2_pb2'
# @@protoc_insertion_point(class_scope:data.PbLocationSyncPoint)
))
_sym_db.RegisterMessage(PbLocationSyncPoint)
PbExerciseRouteSyncPoint = _reflection.GeneratedProtocolMessageType('PbExerciseRouteSyncPoint', (_message.Message,), dict(
DESCRIPTOR = _PBEXERCISEROUTESYNCPOINT,
__module__ = 'data.exercise_route2_pb2'
# @@protoc_insertion_point(class_scope:data.PbExerciseRouteSyncPoint)
))
_sym_db.RegisterMessage(PbExerciseRouteSyncPoint)
PbExerciseRouteSamples2 = _reflection.GeneratedProtocolMessageType('PbExerciseRouteSamples2', (_message.Message,), dict(
DESCRIPTOR = _PBEXERCISEROUTESAMPLES2,
__module__ = 'data.exercise_route2_pb2'
# @@protoc_insertion_point(class_scope:data.PbExerciseRouteSamples2)
))
_sym_db.RegisterMessage(PbExerciseRouteSamples2)
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
# Created by MechAviv
# Quest ID :: 32707
# [FriendStory] Student From Another World
sm.setIntroBoxChat(1530000)
sm.setSpeakerType(3)
sm.sendNext("Hello? Hello?\r\n\r\nOkay, the magician guy said he teleported the phone to someone who can help. So, um, hi? Can you help me, maybe?")
sm.setIntroBoxChat(1530000)
sm.setSpeakerType(3)
sm.sendSay("...What?\r\n\r\nYes, I'm talking to-\r\n\r\nDude, relax! I'll just ask-")
sm.setIntroBoxChat(1530000)
sm.setSpeakerType(3)
if sm.sendAskYesNo("Argh, this guy is being a total spaz.\r\nHey, he wants to know if he can teleport you here. That cool?\r\n#b(You will be moved to the Closet House in Henesys.)#k"):
sm.setIntroBoxChat(1530000)
sm.setSpeakerType(3)
sm.sendNext("Great. ...Hey, magic dude! Do your magicky-thing!\r\n\r\n#e#b(You can also use the Dimensional Mirror to get here.)#n#k")
sm.warp(330002040, 0)
else:
sm.setIntroBoxChat(1530000)
sm.setSpeakerType(3)
sm.sendNext("...I understand. They wouldn't be interested in me, even with things turned around like they are.")
|
nilq/baby-python
|
python
|
# original: https://github.com/yukuku/telebot
# modified by: Bak Yeon O @ http://bakyeono.net
# description: http://bakyeono.net/post/2015-08-24-using-telegram-bot-api.html
# github: https://github.com/bakyeono/using-telegram-bot-api
#
# 구글 앱 엔진 라이브러리 로드
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
# URL, JSON, 로그, 정규표현식 관련 라이브러리 로드
import urllib
import urllib2
import json
import logging
import re
# 봇 토큰, 봇 API 주소
TOKEN = '238857665:AAGE2Axe3w8ygb4J8i3V4xOLjQ5hTITh9oY'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# 봇이 응답할 명령어
CMD_START = '/start'
CMD_STOP = '/stop'
CMD_HELP = '/help'
CMD_BROADCAST = '/broadcast'
# 봇 사용법 & 메시지
USAGE = u"""[사용법] 아래 명령어를 메시지로 보내거나 버튼을 누르시면 됩니다.
/start - (봇 활성화)
/stop - (봇 비활성화)
/help - (이 도움말 보여주기)
"""
MSG_START = u'봇을 시작합니다.'
MSG_STOP = u'봇을 정지합니다.'
# 커스텀 키보드
CUSTOM_KEYBOARD = [
[CMD_START],
[CMD_STOP],
[CMD_HELP],
]
# 채팅별 봇 활성화 상태
# 구글 앱 엔진의 Datastore(NDB)에 상태를 저장하고 읽음
# 사용자가 /start 누르면 활성화
# 사용자가 /stop 누르면 비활성화
class EnableStatus(ndb.Model):
enabled = ndb.BooleanProperty(required=True, indexed=True, default=False,)
def set_enabled(chat_id, enabled):
u"""set_enabled: 봇 활성화/비활성화 상태 변경
chat_id: (integer) 봇을 활성화/비활성화할 채팅 ID
enabled: (boolean) 지정할 활성화/비활성화 상태
"""
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = enabled
es.put()
def get_enabled(chat_id):
u"""get_enabled: 봇 활성화/비활성화 상태 반환
return: (boolean)
"""
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
def get_enabled_chats():
u"""get_enabled: 봇이 활성화된 채팅 리스트 반환
return: (list of EnableStatus)
"""
query = EnableStatus.query(EnableStatus.enabled == True)
return query.fetch()
# 메시지 발송 관련 함수들
def send_msg(chat_id, text, reply_to=None, no_preview=True, keyboard=None):
u"""send_msg: 메시지 발송
chat_id: (integer) 메시지를 보낼 채팅 ID
text: (string) 메시지 내용
reply_to: (integer) ~메시지에 대한 답장
no_preview: (boolean) URL 자동 링크(미리보기) 끄기
keyboard: (list) 커스텀 키보드 지정
"""
params = {
'chat_id': str(chat_id),
'text': text.encode('utf-8'),
}
if reply_to:
params['reply_to_message_id'] = reply_to
if no_preview:
params['disable_web_page_preview'] = no_preview
if keyboard:
reply_markup = json.dumps({
'keyboard': keyboard,
'resize_keyboard': True,
'one_time_keyboard': False,
'selective': (reply_to != None),
})
params['reply_markup'] = reply_markup
try:
urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode(params)).read()
except Exception as e:
logging.exception(e)
def broadcast(text):
u"""broadcast: 봇이 켜져 있는 모든 채팅에 메시지 발송
text: (string) 메시지 내용
"""
for chat in get_enabled_chats():
send_msg(chat.key.string_id(), text)
# 봇 명령 처리 함수들
def cmd_start(chat_id):
u"""cmd_start: 봇을 활성화하고, 활성화 메시지 발송
chat_id: (integer) 채팅 ID
"""
set_enabled(chat_id, True)
send_msg(chat_id, MSG_START, keyboard=CUSTOM_KEYBOARD)
def cmd_stop(chat_id):
u"""cmd_stop: 봇을 비활성화하고, 비활성화 메시지 발송
chat_id: (integer) 채팅 ID
"""
set_enabled(chat_id, False)
send_msg(chat_id, MSG_STOP)
def cmd_help(chat_id):
u"""cmd_help: 봇 사용법 메시지 발송
chat_id: (integer) 채팅 ID
"""
send_msg(chat_id, USAGE, keyboard=CUSTOM_KEYBOARD)
def cmd_broadcast(chat_id, text):
u"""cmd_broadcast: 봇이 활성화된 모든 채팅에 메시지 방송
chat_id: (integer) 채팅 ID
text: (string) 방송할 메시지
"""
send_msg(chat_id, u'메시지를 방송합니다.', keyboard=CUSTOM_KEYBOARD)
broadcast(text)
def cmd_echo(chat_id, text, reply_to):
u"""cmd_echo: 사용자의 메시지를 따라서 답장
chat_id: (integer) 채팅 ID
text: (string) 사용자가 보낸 메시지 내용
reply_to: (integer) 답장할 메시지 ID
"""
send_msg(chat_id, text, reply_to=reply_to)
def process_cmds(msg):
u"""사용자 메시지를 분석해 봇 명령을 처리
chat_id: (integer) 채팅 ID
text: (string) 사용자가 보낸 메시지 내용
"""
msg_id = msg['message_id']
chat_id = msg['chat']['id']
text = msg.get('text')
if (not text):
return
if CMD_START == text:
cmd_start(chat_id)
return
if (not get_enabled(chat_id)):
return
if CMD_STOP == text:
cmd_stop(chat_id)
return
if CMD_HELP == text:
cmd_help(chat_id)
return
cmd_broadcast_match = re.match('^' + CMD_BROADCAST + ' (.*)', text)
if cmd_broadcast_match:
cmd_broadcast(chat_id, cmd_broadcast_match.group(1))
return
cmd_echo(chat_id, text, reply_to=msg_id)
return
# 웹 요청에 대한 핸들러 정의
# /me 요청시
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
# /updates 요청시
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
# /set-wehook 요청시
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
# /webhook 요청시 (텔레그램 봇 API)
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
self.response.write(json.dumps(body))
process_cmds(body['message'])
# 구글 앱 엔진에 웹 요청 핸들러 지정
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set-webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
nilq/baby-python
|
python
|
from . import overworld
from . import dungeon1
from . import dungeon2
from . import dungeon3
from . import dungeon4
from . import dungeon5
from . import dungeon6
from . import dungeon7
from . import dungeon8
from . import dungeonColor
from .requirements import AND, OR, COUNT, FOUND
from .location import Location
from locations.items import *
class Logic:
def __init__(self, configuration_options, rnd):
world = overworld.World(configuration_options)
dungeons = [
dungeon1.Dungeon1(configuration_options),
dungeon2.Dungeon2(configuration_options),
dungeon3.Dungeon3(configuration_options),
dungeon4.Dungeon4(configuration_options),
dungeon5.Dungeon5(configuration_options),
dungeon6.Dungeon6(configuration_options),
dungeon7.Dungeon7(configuration_options),
dungeon8.Dungeon8(configuration_options),
dungeonColor.DungeonColor(configuration_options)
]
entranceMapping = list(range(9))
if configuration_options.dungeonshuffle:
rnd.shuffle(entranceMapping)
self.entranceMapping = entranceMapping
else:
self.entranceMapping = None
dungeons[entranceMapping[0]].entrance.connect(world.start, TAIL_KEY)
dungeons[entranceMapping[1]].entrance.connect(world.swamp, OR(BOWWOW, MAGIC_ROD, HOOKSHOT))
dungeons[entranceMapping[2]].entrance.connect(world.center_area, AND(SLIME_KEY, OR(FLIPPERS, FEATHER)))
dungeons[entranceMapping[3]].entrance.connect(world.center_area, AND(ANGLER_KEY, OR(FLIPPERS, AND(POWER_BRACELET, PEGASUS_BOOTS))))
dungeons[entranceMapping[4]].entrance.connect(world.center_area, FLIPPERS)
dungeons[entranceMapping[5]].entrance.connect(world.dungeon6_entrance, FACE_KEY)
dungeons[entranceMapping[6]].entrance.connect(world.right_mountains_3, BIRD_KEY)
dungeons[entranceMapping[7]].entrance.connect(world.left_side_mountain, AND(COUNT(SHIELD, 2), OCARINA, SWORD)) # TODO: Requires song3
dungeons[entranceMapping[8]].entrance.connect(world.graveyard, POWER_BRACELET)
self.start = world.start
self.location_list = []
self.iteminfo_list = []
self.__location_set = set()
self.__recursiveFindAll(self.start)
del self.__location_set
for ii in self.iteminfo_list:
ii.configure(configuration_options)
def dumpFlatRequirements(self):
def __rec(location, req):
if hasattr(location, "flat_requirements"):
new_flat_requirements = requirements.mergeFlat(location.flat_requirements, requirements.flatten(req))
if new_flat_requirements == location.flat_requirements:
return
location.flat_requirements = new_flat_requirements
else:
location.flat_requirements = requirements.flatten(req)
for connection, requirement in location.simple_connections:
__rec(connection, AND(req, requirement) if req else requirement)
for connection, requirement in location.gated_connections:
__rec(connection, AND(req, requirement) if req else requirement)
__rec(self.start, None)
for ii in self.iteminfo_list:
print(ii)
for fr in ii._location.flat_requirements:
print(" " + ", ".join(sorted(map(str, fr))))
def __recursiveFindAll(self, location):
if location in self.__location_set:
return
self.location_list.append(location)
self.__location_set.add(location)
for ii in location.items:
self.iteminfo_list.append(ii)
for connection, requirement in location.simple_connections:
self.__recursiveFindAll(connection)
for connection, requirement in location.gated_connections:
self.__recursiveFindAll(connection)
class MultiworldLogic:
def __init__(self, configuration_options, rnd):
self.worlds = []
self.start = Location()
self.location_list = [self.start]
self.iteminfo_list = []
for n in range(2):
world = Logic(configuration_options, rnd)
for ii in world.iteminfo_list:
ii.world = n
for loc in world.location_list:
loc.simple_connections = [(target, addWorldIdToRequirements(n, req)) for target, req in loc.simple_connections]
loc.gated_connections = [(target, addWorldIdToRequirements(n, req)) for target, req in loc.gated_connections]
loc.items = [MultiworldItemInfoWrapper(n, ii) for ii in loc.items]
self.iteminfo_list += loc.items
self.worlds.append(world)
self.start.simple_connections += world.start.simple_connections
self.start.gated_connections += world.start.gated_connections
self.start.items += world.start.items
world.start.items.clear()
self.location_list += world.location_list
self.entranceMapping = None
class MultiworldItemInfoWrapper:
def __init__(self, world, target):
self.world = world
self.target = target
self.OPTIONS = None
def read(self, rom):
return "W%d_%s" % (self.world, self.target.read(rom))
def getOptions(self):
if self.OPTIONS is None:
options = self.target.getOptions()
if self.target.MULTIWORLD and len(options) > 1:
self.OPTIONS = []
for n in range(2):
self.OPTIONS += ["W%d_%s" % (n, t) for t in options]
else:
self.OPTIONS = ["W%d_%s" % (self.world, t) for t in options]
return self.OPTIONS
def patch(self, rom, option):
if self.world != int(option[1]):
rom.banks[0x3E][0x3300 + self.target.room] = 0x01
self.target.patch(rom, option[3:], cross_world=True)
else:
self.target.patch(rom, option[3:])
def __repr__(self):
return "W%d:%s" % (self.world, repr(self.target))
def addWorldIdToRequirements(n, req):
if isinstance(req, str):
return "W%d_%s" % (n, req)
if isinstance(req, COUNT):
return COUNT(addWorldIdToRequirements(n, req.item), req.amount)
if isinstance(req, AND):
return AND(*(addWorldIdToRequirements(n, r) for r in req))
if isinstance(req, OR):
return OR(*(addWorldIdToRequirements(n, r) for r in req))
raise RuntimeError("Unknown requirement type: %s" % (req))
|
nilq/baby-python
|
python
|
'''
This object reads PCL files and prepare the microarray data as training set to DAs.
The input training vector can either be a gene's expression value over all sampels, or one
microarray sample with all genes' expression value. To feed into DAs, the
standard input dataset is a two-dimensional array with each row as a training sample.
'''
import numpy
class PCLfile(object):
def __init__(self, dataset, skip_col=2):
'''
type dataset: string
param dataset: path to the pcl file
type skip_col: int
param skip_col: the number of colunms to skip between the first gene ID column and the first
experimental column.
'''
try:
dataset_fh = open(dataset,'r')
except IOError:
print "Error, file not found."
self.data_matrix = []
self.id_list = []
line_count = 0
for line in dataset_fh:
if line_count == 0:
self.sample_list = line.rstrip().split('\t')[(skip_col+1):] #This stores samples' names
line_count +=1
continue
line_new = line.strip().split('\t')
self.data_matrix.append(line_new[(skip_col+1):]) #This extract microarray data with gene in rows, sample in columns.
self.id_list.append(line_new[0]) #This stores each gene's ID
self.data_matrix = numpy.array(self.data_matrix, dtype = numpy.float64) #Convert data_matrix to a numpy array
#Normalize every row linearly so that the min is 0 and max is 1
#This directly change the self.data_matrix
def zero_one_normalization(self):
for i in xrange(self.data_matrix.shape[0]): #'shape' return the dimension of the the matrix and shape[0] return the first dimension which is the row.
row_minimum = self.data_matrix[i,:].min()
row_maximum = self.data_matrix[i,:].max()
row_range = row_maximum - row_minimum
self.data_matrix[i,:] = (self.data_matrix[i,:] - row_minimum)/row_range
def zero_one_normalization_sample(self):
for i in xrange(self.data_matrix.shape[1]): #'shape' return the dimension of the the matrix and shape[0] return the first dimension which is the row.
row_minimum = self.data_matrix[:,i].min()
row_maximum = self.data_matrix[:,i].max()
row_range = row_maximum - row_minimum
self.data_matrix[:,i] = (self.data_matrix[:,i] - row_minimum)/row_range
#Normalize the data, one row at a time, by converting each value to a row-relative Z score.
#This directly change the self.data_matrix
def z_normalization(self):
for i in xrange(self.data_matrix.shape[0]):
mean = numpy.mean(self.data_matrix[i,:])
standev = numpy.std(self.data_matrix[i,:])
self.data_matrix[i,:] = (self.data.matrix[i,:] - mean) / standev
def z_normalization_sample(self):
for i in xrange(self.data_matrix.shape[1]):
mean = numpy.mean(self.data_matrix[:,i])
standev = numpy.std(self.data_matrix[:,i])
self.data_matrix[:,i] = (self.data.matrix[:,i] - mean) / standev
def logistic_normalization(self):
for i in xrange(self.data_matrix.shape[0]):
self.data_matrix[i,:] = 1.0 / (1.0+ numpy.exp(-self.data_matrix[i,:]))
#This function returns a matrix with each gene in a row
def get_gene(self):
return self.data_matrix
#This function returns a matrix with each sample in a row
def get_sample(self):
return self.data_matrix.T
#This function permutes samples. It returns a data matrix with the order of samples being permuted.
def get_permuted_sample(self,seed=123):
transposed = self.data_matrix.T #After matrix transpose, each row represents one sample from the microarray data.
if seed == 0:
return transposed, self.sample_list
else:
numpy.random.seed(seed)
numpy.random.shuffle(transposed) #numpy.random.shuffle only shuffles the array along the
#first index of a multi-dimensional array, which is the row here.
numpy.random.seed(seed)
numpy.random.shuffle(self.sample_list)
return transposed , self.sample_list
def permuted_gene_order(self, seed=123):
numpy.random.seed(seed)
numpy.random.shuffle(self.data_matrix)
numpy.random.seed(seed)
numpy.random.shuffle(self.id_list)
#This function writes a PCLfile object to a text file
def write_pcl(self, outputPath):
try:
outputFileHandle = open(outputPath, 'w')
except IOError:
print "Was not able to open the output file"
return False
#First write the header
outputFileHandle.write('Gene_symbol\t')
header = '\t'.join(map(str,self.sample_list))
outputFileHandle.write(header)
outputFileHandle.write('\n')
#Now write the gene values
for i in range(self.data_matrix.shape[0]):
geneID = self.id_list[i]
geneValue = self.data_matrix[i,:]
outputFileHandle.write(geneID + '\t' + '\t'.join(map(str, geneValue)))
outputFileHandle.write('\n')
outputFileHandle.close()
return True
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name='budget',
version='1.2.2',
packages=['budget'],
entry_points={
'console_scripts': [
'budget=budget.__main__:main'
]
})
|
nilq/baby-python
|
python
|
from Jumpscale import j
def main(self):
"""
to run:
kosmos 'j.data.types.test(name="iprange")'
"""
ipv4 = j.data.types.get("iprange", default="192.168.0.0/28")
assert ipv4.default_get() == "192.168.0.0/28"
assert ipv4.check("192.168.23.255/28") == True
assert ipv4.check("192.168.23.300/28") == False
assert ipv4.check("192.168.23.255/32") == True
ipv6 = j.data.types.get("iprange")
assert ipv6.default_get() == "::"
assert ipv6.check("2001:db00::0/24") == True
assert ipv6.check("2001:db00::1/24") == True
assert ipv6.check("2001:db00::0/ffff:ff00::") == False
self._log_info("TEST DONE LIST")
return "OK"
|
nilq/baby-python
|
python
|
from typing import Optional
from pydantic import BaseModel, Field
class CodePayload(BaseModel):
id: str = Field(..., max_length=8)
code: str
description: Optional[str] = None
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------
# BASE: Simple script to score points for the facebook CTF
# -------------------------------------------------------------
#
# Written by Javier (@javutin)
import time
import json
import hashlib
import logging
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
HOST = ''
PORT = 12345
INFINITE_LOOP = 1
TEAM_FILE = "/tmp/SCORE_POINTS"
TEAM_NAME = "team"
TEAM_MD5 = "check"
LOG_FILE = "score_http.log"
LOG = 1
DEFAULT_VALUE = "facebookCTF"
CHAR_LIMIT = 32
LINES_LIMIT = 1
if LOG == 1:
logger = logging.getLogger(__name__)
logfile = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter('[ %(asctime)s ] - %(message)s')
logfile.setFormatter(formatter)
logger.addHandler(logfile)
logger.setLevel(logging.INFO)
class customHTTPServer(BaseHTTPRequestHandler):
def log_request(self, code): pass
def do_GET(self):
list_teams = []
json_list = []
try:
if LOG == 1:
logger.info('%s %s' % (self.client_address, self.command))
else:
print "[ %s ] %s %s" % (time.asctime(), self.client_address, self.command)
self.send_response(200)
self.end_headers()
f = open(TEAM_FILE, 'r')
teams = f.readlines()[:LINES_LIMIT]
for t in teams:
list_teams.append(t.strip()[:CHAR_LIMIT])
f.close()
except Exception, e:
if LOG == 1:
logger.info('Oops! Something happened: %s' % (e.strerror))
else:
print "[ %s ] Oops! Something happened: %s" % (time.asctime(), e.strerror)
team_name = DEFAULT_VALUE
list_teams.append(team_name)
#list_teams = list(set(list_teams))
for l_t in list_teams:
team_md5 = hashlib.md5(l_t).hexdigest()
team_list = { TEAM_NAME : l_t, TEAM_MD5 : team_md5 }
json_list.append(team_list)
teams_json_list = json.dumps(json_list);
self.wfile.write(teams_json_list)
self.wfile.write('\n')
if LOG == 1:
logger.info('Sent %s' % (teams_json_list))
else:
print "[ %s ] Sent %s" % (time.asctime(), teams_json_list)
return
def main():
try:
server = HTTPServer((HOST,PORT),customHTTPServer)
if LOG == 1:
logger.info('CTF Scorer Starts - %s:%s' % (HOST, PORT))
else:
print "[ %s ] CTF Scorer Starts - %s:%s" % (time.asctime(), HOST, PORT)
server.serve_forever()
except KeyboardInterrupt:
if LOG == 1:
logger.info('CTF Scorer Stopped')
else:
print "[ %s ] CTF Scorer Stopped" % (time.asctime())
server.socket.close()
if __name__=='__main__':
main()
# EOF
|
nilq/baby-python
|
python
|
from collections import defaultdict
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse
from symposion.proposals.kinds import get_kind_slugs, get_proposal_model
from email_log.models import Email
from pycon.models import PyConProposal
from pycon.finaid.models import FinancialAidApplication, APPLICATION_TYPE_SPEAKER
from pycon.finaid.utils import has_application, send_email_message
SLUGS = get_kind_slugs()
DOMAIN = Site.objects.get_current().domain
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--kind', action='store', dest='kind', required=True,
help='Proposal Kind to notify: {}'.format(', '.join(SLUGS)))
def handle(self, *args, **options):
if options['kind'] not in SLUGS:
print('ERROR: Unknown Proposal Kind: {}\n Must be one of: {}'.format(options['kind'], ', '.join(SLUGS)))
return False
to_apply = defaultdict(list)
to_confirm = defaultdict(list)
accepted = get_proposal_model(options['kind']).objects.filter(overall_status=PyConProposal.STATUS_ACCEPTED)
for proposal in accepted:
if proposal.speaker.financial_support and has_application(proposal.speaker.user):
application = FinancialAidApplication.objects.get(user=proposal.speaker.user)
application.application_type = APPLICATION_TYPE_SPEAKER
application.presenting = True
application.save()
path = reverse('speaker_grant_edit')
url = 'https://{domain}{path}'.format(domain=DOMAIN, path=path)
to_confirm[proposal.speaker.email].append(proposal)
if proposal.speaker.financial_support and not has_application(proposal.speaker.user):
path = reverse('speaker_grant_apply')
url = 'https://{domain}{path}'.format(domain=DOMAIN, path=path)
to_apply[proposal.speaker.email].append(proposal)
for email, proposals in to_apply.items():
notified = Email.objects.filter(
recipients='; '.join(['pycon-aid@python.org', email]),
subject='Speaker assistance for your {}.'.format(options['kind'].title())
).exists()
if notified:
continue
send_email_message(
'speaker_grant_apply',
from_='pycon-aid@python.org',
to=['pycon-aid@python.org', email],
context={
'proposal_kind': options['kind'],
'user': proposals[0].speaker.user,
'domain': DOMAIN,
'proposal': proposals[0],
},
)
for email, proposals in to_confirm.items():
notified = Email.objects.filter(
recipients='; '.join(['pycon-aid@python.org', email]),
subject='Speaker assistance for your {}.'.format(options['kind'].title())
).exists()
if notified:
continue
send_email_message(
'speaker_grant_confirm',
from_='pycon-aid@python.org',
to=['pycon-aid@python.org', email],
context={
'proposal_kind': options['kind'],
'user': proposals[0].speaker.user,
'domain': DOMAIN,
'proposal': proposals[0],
},
)
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
import fnmatch
import logging
import re
import subprocess
import uuid
from threading import Thread
from .decorators import side_effecting
from .utils import create_sha1sum_file
class Action:
"""Абстрактный класс действия.
Attributes:
name: Строка, уникальное имя действия.
src_path: Строка, путь к исходным файлам.
dest_path: Строка, путь к выходным файлам.
basename: Строка, не должна быть началом другого basename.
Используется для связи сопряжённых действий.
(Например, tar и cleaner для одного набора файлов)
use_re_in_patterns: Логическое значение, если True, то паттерны
regexp, иначе shell.
continue_on_error: Логическое значение, определяет стоит ли продолжать
выполнение после провала текущего действия.
dry: Логическое значение, включает тестовый режим.
"""
DRYRUN_POSTFIX = 'DRYRUN'
def __init__(self, name):
self.name = name
self.logger = logging.getLogger(name)
self.source = None
self.basename = _generate_random_basename()
self.scheme = None
self.src_path = '.'
self.dest_path = '.'
self.use_re_in_patterns = False
self.continue_on_error = False
self._dry = False
@property
def dry(self):
return self._dry
@dry.setter
def dry(self, dry):
if not self._dry and dry:
self._dry = dry
self.logger.name = '{0}_{1}'.format(
self.logger.name,
self.DRYRUN_POSTFIX,
)
def prepare_pattern(self, pattern):
"""Обработка первичных паттернов.
Args:
pattern: Строка. Если use_re_in_patterns is True,
то считается, что паттерн имеет формат shell и
переводится в формат regex.
Returns:
Строку, готовый к использованию/компиляции паттерн.
"""
pattern = pattern.strip()
if self.use_re_in_patterns:
return pattern
translated = fnmatch.translate(pattern)
if translated.endswith('(?ms)'):
translated = translated[:-5]
if translated.endswith('\\Z'):
translated = translated[:-2]
return r'\A{0}\Z'.format(translated)
@staticmethod
def stream_watcher_filtered(
stream,
logger,
filters=None,
remove_header=False,
default_level=None,
):
"""Наблюдатель за потоком данных.
Args:
stream: читаемый поток с данными
filters (dict): словарь с фильтрами вида
{ log_level (int) : [pat1 (str), pat2 (str)], ... }
remove_header (bool): удалять первое слово в строке
default_level (int): стандартный уровень логгирования.
"""
default_level = default_level or logging.NOTSET
filters = filters or {}
filter_tuples = [
(pattern, status) for status, patterns in filters.items()
for pattern in patterns
]
try:
for line in stream:
line = line.strip()
if remove_header and ' ' in line:
# удаление первого слова из строки, если оно не последнее
words = line.split()[1:]
line = ' '.join(words)
if not line:
break
for pattern, log_level in filter_tuples:
if re.match(pattern, line):
logger.log(level=log_level, msg=line)
break
else:
logger.log(level=default_level, msg=line)
except UnicodeDecodeError:
logger.exception('Encoding in the output is corrupted :(')
if not stream.closed:
stream.close()
def execute_cmdline(
self,
cmdline,
return_stdout=False,
stdout_params=None,
stderr_params=None,
):
"""Выполняет cmdline.
Args:
cmdline: Строка, которую следует выполнить в консоли
return_stdout: Логическое значение, не использовать наблюдателей,
после выполнения вернуть содержимое stdout.
stdout_params: Словарь, параметры для наблюдателя за stdout вида
{'default_level': logging.<LEVEL>, 'remove_header': <bool>, 'filters': <dict>}
stderr_params: Словарь, параметры для наблюдателя за stderr вида
{'default_level': logging.<LEVEL>, 'remove_header': <bool>, 'filters': <dict>}
Формат filters можно найти в описании к stream_watcher_filtered.
"""
process = subprocess.Popen(
cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True,
)
if return_stdout:
process.wait()
value = process.stdout.read()
process.stdout.close()
process.stderr.close()
return value
stdout_params['stream'] = process.stdout
stdo = Thread(
target=self.stream_watcher_filtered,
name='stdout-watcher',
kwargs=stdout_params,
)
stderr_params['stream'] = process.stderr
stde = Thread(
target=self.stream_watcher_filtered,
name='stderr-watcher',
kwargs=stderr_params,
)
stdo.start()
stde.start()
process.wait()
stdo.join()
stde.join()
return None
unsafe_execute_cmdline = classmethod(execute_cmdline)
"""Unsafe версия execute_cmdline."""
execute_cmdline = side_effecting(execute_cmdline)
def create_checksum_file(self, src_file, dest_file):
"""Создаёт файл с хэшсуммой.
Метод нужен для логирования. Основная работа происходит в _create_checksum_file.
"""
try:
hash_value = self._create_checksum_file(
src_file,
dest_file,
)
except PermissionError as exc:
self.logger.warning(
'Невозможно создать файл с хэшсуммой: %s',
exc,
)
else:
if self.dry:
hash_value = '(dryrun, хэшсумма не подсчитывается)'
self.logger.info(
'Создан файл %s с хэшсуммой %s',
dest_file,
hash_value,
)
@side_effecting
def _create_checksum_file(self, src_file, dest_file):
return create_sha1sum_file(
src_file,
dest_file,
)
def __repr__(self):
name = self.__class__.__name__
attrs = self.__dict__.copy()
if attrs.get('source'):
attrs['source'] = '<{cls} \'{name}\'>'.format(
cls=attrs.get('source').__class__.__name__,
name=attrs.get('source').name,
)
return '{name}: {attrs}'.format(
name=name,
attrs=attrs,
)
def start(self):
"""Абстрактный метод, запускает выполнение действия.
Returns:
False, если нужно прервать цепочку обработки.
"""
raise NotImplementedError('Should have implemented this')
def _generate_random_basename():
"""Генерирует случайный basename."""
return uuid.uuid4().hex.upper()[0:6]
|
nilq/baby-python
|
python
|
"""Update invoice check constraint
Revision ID: 49e1c8c65f59
Revises: c45d12536d7e
Create Date: 2017-03-31 02:09:55.488859
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '49e1c8c65f59'
down_revision = 'c45d12536d7e'
branch_labels = None
depends_on = None
def upgrade():
op.drop_constraint('invoices_afa_gwg_check', 'invoices')
op.create_check_constraint(
'invoices_afa_gwg_check', 'invoices',
'pooling is TRUE and gwg is FALSE and afa is NULL or '
'pooling is FALSE and afa is not NULL and gwg is FALSE or '
'pooling is FALSE and afa is NULL and gwg is TRUE or '
'pooling is FALSE and afa is NULL and gwg is FALSE')
pass
def downgrade():
pass
|
nilq/baby-python
|
python
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import imghdr
import os
import re
import sys
import semantic_version
from murano.common.helpers import path
from murano.common.i18n import _
from murano.common import utils
from murano.packages import exceptions
from murano.packages import package
class PackageBase(package.Package):
def __init__(self, format_name, runtime_version,
source_directory, manifest):
super(PackageBase, self).__init__(
format_name, runtime_version, source_directory)
self._full_name = manifest.get('FullName')
if not self._full_name:
raise exceptions.PackageFormatError('FullName is not specified')
self._check_full_name(self._full_name)
self._version = semantic_version.Version.coerce(str(manifest.get(
'Version', '0.0.0')))
self._package_type = manifest.get('Type')
if self._package_type not in package.PackageType.ALL:
raise exceptions.PackageFormatError(
'Invalid package Type {0}'.format(self._package_type))
self._display_name = manifest.get('Name', self._full_name)
self._description = manifest.get('Description')
self._author = manifest.get('Author')
self._supplier = manifest.get('Supplier') or {}
self._logo = manifest.get('Logo')
self._tags = manifest.get('Tags', [])
self._logo_cache = None
self._supplier_logo_cache = None
self._source_directory = source_directory
@abc.abstractproperty
def requirements(self):
raise NotImplementedError()
@abc.abstractproperty
def classes(self):
raise NotImplementedError()
@abc.abstractmethod
def get_class(self, name):
raise NotImplementedError()
@abc.abstractproperty
def ui(self):
raise NotImplementedError()
@property
def full_name(self):
return self._full_name
@property
def source_directory(self):
return self._source_directory
@property
def version(self):
return self._version
@property
def package_type(self):
return self._package_type
@property
def display_name(self):
return self._display_name
@property
def description(self):
return self._description
@property
def author(self):
return self._author
@property
def supplier(self):
return self._supplier
@property
def tags(self):
return list(self._tags)
@property
def logo(self):
return self._load_image(self._logo, 'logo.png', 'logo')
@property
def meta(self):
return None
@property
def supplier_logo(self):
return self._load_image(
self._supplier.get('Logo'), 'supplier_logo.png', 'supplier logo')
def get_resource(self, name):
resources_dir = path.secure_join(self._source_directory, 'Resources')
if not os.path.exists(resources_dir):
os.makedirs(resources_dir)
return path.secure_join(resources_dir, name)
def _load_image(self, file_name, default_name, what_image):
full_path = path.secure_join(
self._source_directory, file_name or default_name)
if not os.path.isfile(full_path) and not file_name:
return
allowed_ftype = ('png', 'jpeg', 'gif')
allowed_size = 500 * 1024
try:
if imghdr.what(full_path) not in allowed_ftype:
msg = _('{0}: Unsupported Format. Only {1} allowed').format(
what_image, ', '.join(allowed_ftype))
raise exceptions.PackageLoadError(msg)
fsize = os.stat(full_path).st_size
if fsize > allowed_size:
msg = _('{0}: Uploaded image size {1} is too large. '
'Max allowed size is {2}').format(
what_image, fsize, allowed_size)
raise exceptions.PackageLoadError(msg)
with open(full_path, 'rb') as stream:
return stream.read()
except Exception as ex:
trace = sys.exc_info()[2]
utils.reraise(
exceptions.PackageLoadError,
exceptions.PackageLoadError(
'Unable to load {0}: {1}'.format(what_image, ex)),
trace)
@staticmethod
def _check_full_name(full_name):
error = exceptions.PackageFormatError('Invalid FullName ' + full_name)
if re.match(r'^[\w\.]+$', full_name):
if full_name.startswith('.') or full_name.endswith('.'):
raise error
if '..' in full_name:
raise error
else:
raise error
|
nilq/baby-python
|
python
|
from converter import Parser, Converter
test_cases = [
# LENGTH
"100 kmeter nfeet",
"1 nauticalmile meter",
"1 microleague millifoot",
"13 dlea Mft",
"1 ft fur",
"100 kilometer dafeet",
"1 ft m",
"17 kilord Gin"
# DATA
"10 kbyte Mb",
"8 bit byte",
"100 GiB Mibit",
"100 Gibit MiB"
]
if __name__ == "__main__":
for item in test_cases:
data = Parser().parse(item)
print(Converter().result(data))
|
nilq/baby-python
|
python
|
#
# The script providing implementation of structures and functions used in
# the Novelty Search method.
#
from functools import total_ordering
# how many nearest neighbors to consider for calculating novelty score?
KNN = 15
# the maximal novelty archive size
MAXNoveltyArchiveSize = 1000
@total_ordering
class NoveltyItem:
"""
The class to encapsulate information about particular item that
holds information about novelty score associated with specific
genome along with auxiliary information. It is used in combination
with NoveltyArchive
"""
def __init__(self, generation=-1, genomeId=-1, novelty=-1):
"""
Creates new item with specified parameters.
Arguments:
generation: The evolution generation when this item was created
genomeId: The ID of genome associated with it
novelty: The novelty score of genome
"""
self.generation = generation
self.genomeId = genomeId
self.novelty = novelty
# Indicates whether this item was already added to the archive
self.in_archive = False
# The list holding data points associated with this item that will be used
# to calculate distance between this item and any other item. This distance
# will be used to estimate the novelty score associated with the item.
self.data = []
def __str__(self):
"""
The function to create string representation
"""
return "%s: id: %d, at generation: %d, novelty: %f\tdata: %s" % \
(self.__class__.__name__, self.genomeId, self.generation, self.novelty, self.data)
def _is_valid_operand(self, other):
return (hasattr(other, "novelty"))
def __lt__(self, other):
"""
Compare if this item is less novel than supplied other item.
"""
if not self._is_valid_operand(other):
return NotImplemented
# less novel is less
return self.novelty < other.novelty
class NoveltyArchive:
"""
The novelty archive contains all of the novel items we have encountered thus far.
"""
def __init__(self, metric):
"""
Creates new instance with specified novelty threshold and function
defined novelty metric.
Arguments:
metric: The function to calculate the novelty score of specific genome.
"""
self.novelty_metric = metric
# list with all novel items found so far
self.novel_items = []
def size(self):
"""
Returns the size of this archive.
"""
return len(self.novel_items)
def evaluate_novelty_score(self, item, n_items_list):
"""
The function to evaluate novelty score of given novelty item among archive items
and population items.
Arguments:
item: The novelty item to evaluate
n_items_list: The list with novelty items for current population
"""
# collect distances among archived novelty items
distances = []
for n in self.novel_items:
if n.genomeId != item.genomeId:
distances.append(self.novelty_metric(n, item))
else:
print("Novelty Item is already in archive: %d" % n.genomeId)
# collect distances to the novelty items in the population
for p_item in n_items_list:
if p_item.genomeId != item.genomeId:
distances.append(self.novelty_metric(p_item, item))
# calculate average KNN
distances = sorted(distances)
item.novelty = sum(distances[:KNN])/KNN
# store novelty item
self._add_novelty_item(item)
return item.novelty
def write_to_file(self, path):
"""
The function to write all NoveltyItems stored in this archive.
Arguments:
path: The path to the file where to store NoveltyItems
"""
with open(path, 'w') as file:
for ni in self.novel_items:
file.write("%s\n" % ni)
def _add_novelty_item(self, item):
"""
The function to add specified NoveltyItem to this archive.
Arguments:
item: The NoveltyItem to be added
"""
# add item
item.in_archive = True
if len(self.novel_items) >= MAXNoveltyArchiveSize:
# check if this item has higher novelty than last item in the archive (minimal novelty)
if item > self.novel_items[-1]:
# replace it
self.novel_items[-1] = item
else:
# just add new item
self.novel_items.append(item)
# sort items array in descending order by novelty score
self.novel_items.sort(reverse=True)
|
nilq/baby-python
|
python
|
from timeit import default_timer as timer
# from datahelpers.data_helper_ml_mulmol6_OnTheFly import DataHelperMulMol6
from datahelpers.data_helper_ml_normal import DataHelperMLNormal
from datahelpers.data_helper_ml_2chan import DataHelperML2CH
from datahelpers.data_helper_ml_mulmol6_OnTheFly import DataHelperMLFly
from datahelpers.data_helper_pan11 import DataHelperPan11
from trainer import TrainTask as tr
from trainer import TrainTaskLite as ttl
from evaluators import eval_ml_mulmol_d as evaler
from evaluators import eval_ml_origin as evaler_one
from evaluators import eval_pan11 as evaler_pan
from utils.ArchiveManager import ArchiveManager
from datahelpers.Data import LoadMethod
import logging
def get_exp_logger(am):
log_path = am.get_exp_log_path()
# logging facility, log both into file and console
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=log_path,
filemode='w+')
console_logger = logging.StreamHandler()
logging.getLogger('').addHandler(console_logger)
logging.info("log created: " + log_path)
if __name__ == "__main__":
###############################################
# exp_names you can choose from at this point:
#
# Input Components:
#
# * ML_One
# * ML_2CH
# * ML_Six
# * ML_One_DocLevel
# * PAN11
# * PAN11_2CH
#
# Middle Components:
#
# * NParallelConvOnePoolNFC
# * NConvDocConvNFC
# * ParallelJoinedConv
# * NCrossSizeParallelConvNFC
# * InceptionLike
# * PureRNN
################################################
input_component = "ML_2CH"
middle_component = "NCrossSizeParallelConvNFC"
truth_file = "17_papers.csv"
am = ArchiveManager(input_component, middle_component, truth_file=truth_file)
get_exp_logger(am)
logging.warning('===================================================')
logging.debug("Loading data...")
if input_component == "ML_One":
dater = DataHelperMLNormal(doc_level=LoadMethod.SENT, embed_type="glove",
embed_dim=300, target_sent_len=50, target_doc_len=None, train_csv_file=truth_file,
total_fold=5, t_fold_index=0)
ev = evaler_one.Evaluator()
elif input_component == "ML_FLY":
dater = DataHelperMLFly(doc_level=LoadMethod.SENT, embed_type="glove",
embed_dim=300, target_sent_len=50, target_doc_len=None, train_csv_file=truth_file,
total_fold=5, t_fold_index=0)
ev = evaler_one.Evaluator()
elif input_component == "ML_2CH":
dater = DataHelperML2CH(doc_level=LoadMethod.SENT, embed_type="both",
embed_dim=300, target_sent_len=50, target_doc_len=None, train_csv_file=truth_file,
total_fold=5, t_fold_index=0)
ev = evaler_one.Evaluator()
elif input_component == "ML_Six":
dater = DataHelperMulMol6(doc_level="sent", num_fold=5, fold_index=4, embed_type="glove",
embed_dim=300, target_sent_len=50, target_doc_len=400)
ev = evaler.evaler()
elif input_component == "ML_One_DocLevel":
dater = DataHelperMLNormal(doc_level="doc", train_holdout=0.80, embed_type="glove",
embed_dim=300, target_sent_len=128, target_doc_len=128)
ev = evaler_one.Evaluator()
elif input_component == "PAN11_ONE":
dater = DataHelperPan11(embed_type="glove", embed_dim=300, target_sent_len=100, prob_code=1)
ev = evaler_pan.Evaluator()
elif input_component == "PAN11_2CH":
dater = DataHelperPan11(embed_type="both", embed_dim=300, target_sent_len=100, prob_code=0)
ev = evaler_pan.Evaluator()
else:
raise NotImplementedError
if middle_component == "ORIGIN_KIM":
tt = ttl.TrainTask(data_helper=dater, am=am, input_component=input_component, exp_name=middle_component,
batch_size=64, evaluate_every=100, checkpoint_every=500, max_to_keep=8)
else:
tt = tr.TrainTask(data_helper=dater, am=am, input_component=input_component, exp_name=middle_component,
batch_size=64, evaluate_every=1000, checkpoint_every=2000, max_to_keep=6,
restore_path=None)
start = timer()
# n_fc variable controls how many fc layers you got at the end, n_conv does that for conv layers
tt.training(filter_sizes=[[1, 2, 3, 4, 5]], num_filters=80, dropout_keep_prob=0.5, n_steps=15000, l2_lambda=0,
dropout=True, batch_normalize=True, elu=True, fc=[128])
end = timer()
print((end - start))
ev.load(dater)
ev.evaluate(am.get_exp_dir(), None, doc_acc=True, do_is_training=True)
|
nilq/baby-python
|
python
|
#! /usr/bin/python
# transaction_csv_cleanup.py
# for Python 3
# Searches specified folder or default download folder for exported
# bank transaction file (.csv format) & adjusts format for YNAB import
# CHANGELOG
# 2017-09-29
# ~ Merged in parameters from https://www.reddit.com/user/FinibusBonorum
# ~ Auto folder finder disabled if folder path specified
# ~ Moved winreg import into Windows-specific section to avoid Linux conflict
# ~ Refined winreg import
# ~ Realised that Windows has no default shebang support so just used Linux shebang line!
# ~ Added fix_row function that handles missing input headers better than previously
# ~ Renamed find_downloads() to find_directory()
# ~ Added header_swap function
# 2017-10-04
# ~ Added g_hasheaders variable for if data is missing column headers
# ~ Actually implemented csv delimiter in csv function!
# OPERATIONS
# ~ Find & open TransactionExport.csv for processing
# ~ Change columns from
# Date, Details, Debit, Credit, Balance to
# Date, Payee, Category, Memo, Outflow, Inflow & delete Balance column
# ~ Create blank Category column
# ~ Copy data from Payee column into Memo column
# ~ Write new data to [g_filepath]+[g_filename]+[g_suffix] = fixed_TransactionExport.csv
# edit the following section based on bank format
g_filename = "TransactionExport"
g_input_columns = ["Date", "Payee", "Outflow", "Inflow", "Running Balance"]
g_output_columns = ["Date", "Payee", "Category", "Memo", "Outflow", "Inflow"]
g_filepath = ""
g_suffix = ".csv"
g_fixed_prefix = "fixed_"
g_delimiter = ","
g_hasheaders = True
#
# don't edit below here unless you know what you're doing!
import csv, os
def get_files():
# find the transaction file
os.chdir(find_directory())
a = g_suffix
b = g_filename
c = g_fixed_prefix
return [f for f in os.listdir(".") if f.endswith(a) if b in f if c not in f]
def clean_data(file):
# extract data from transaction file
output_data = []
with open(file) as transaction_file:
transaction_reader = csv.reader(transaction_file, delimiter = g_delimiter)
transaction_data = list(transaction_reader)
# make each row of our new transaction file
for row in transaction_data:
# add new row to output list
output_data.append(fix_row(row))
# fix column headers
if g_hasheaders is False:
output_data.insert(0, g_output_columns)
else:
output_data[0] = g_output_columns
return output_data
def fix_row(row):
# fixes a row of our file
output = []
for header in g_output_columns:
header = header_swap(header)
try:
# check to see if our output header exists in input
index = g_input_columns.index(header)
cell = row[index]
except ValueError:
# header isn't in input, default to blank cell
cell = ""
output.append(cell)
return output
def header_swap(header):
# replaces one column's value with another if required
if header is "Memo":
header = "Payee"
return header
def write_data(filename, data):
# write out the new CSV file
with open(g_fixed_prefix + filename, "w", newline = "") as file:
writer = csv.writer(file)
for row in data:
writer.writerow(row)
return
def find_directory():
# finds the downloads folder for the active user if g_filepath is not set
if g_filepath is "":
if os.name is "nt":
# Windows
from winreg import OpenKey, QueryValueEx, HKEY_CURRENT_USER # import Windows-specific stuff here
shell_path = "SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
dl_key = "{374DE290-123F-4565-9164-39C4925E467B}"
with OpenKey(HKEY_CURRENT_USER, shell_path) as key:
dir = QueryValueEx(key, dl_key)[0]
else:
# Linux
userhome = os.path.expanduser('~')
dir = os.path.join(userhome, "Downloads")
else:
dir = g_filepath
return dir
def main():
# find all applicable files
files = get_files()
for file in files:
# create cleaned csv for each file
output = clean_data(file)
write_data(file, output)
# delete original csv file
os.remove(file)
return
main()
|
nilq/baby-python
|
python
|
# ***************************************************
# SERVO test for duty cycle range
#
# ProtoStax Air Quality Monitor.
# using Raspberry Pi A+, Micro Servo SG92R, RGB LED and ProtoStax Enclosure for Raspberry Pi
# --> https://www.protostax.com/products/protostax-for-raspberry-pi-a
# You can also use
# --> https://www.protostax.com/products/protostax-for-raspberry-pi-b
# --> https://www.protostax.com/products/protostax-for-raspberry-pi-zero
#
# Use this program to test the range of movement from your micro servo
# and set the appropriate MIN, MAX and CENTER duty cycles for your given
# servo in the aqi_monitor.py program
#
# At the way the servo is oriented on the ProtoStax Kit for Micro Servo,
# the highest duty cycle will position the servo arm to the left and
# reducing the duty cycle will cause a clockwise rotation to the max
# value. So note that MAX_DUTY cycle will correspond to a ZERO reading
# and MIN_DUTY cycle will correspond to a maximum reading (of around 250)
# This inversion of logic is handled in the main aqi_monitor.py program - you just
# need to plug in the values for MIN_DUTY and MAX_DUTY and CENTER_DUTY.
#
# Start off with the highest duty cycle where the servo moves without jittering.
# Then place the gauge indicator and position it so that it is closest to zero reading (or less than zero).
# Then adjust the duty cycle (by reducing the duty cycle so that you get a ZERO reading) - this will be your MAX_DUTY
#
# Find out the MIN_DUTY cycle where the servo will safely get to the max
# rotation. If the servo is grinding or whining, back off. This was
# around 3 in my case. If I set it to 1, for example, the servo goes
# crazy and goes into continuous rotation. Avoid this if you don't want to
# risk damaging your servo! If I set it to 2, the needle started drifting - you
# don't want this either!
#
# If this happens, you will need to repeat the zeroing procedure above, and the
# MIN_DUTY to where it doesn't jitter or drift. After that, proceed with the next step:
#
# Adjust the duty cycle so that the gauge needle points to 150, the center value
# Note this CENTER_DUTY cycle value. It was around 5.9 to 6.0 in my case.
#
# Written by Sridhar Rajagopal for ProtoStax.
#
#
# BSD license. All text above must be included in any redistribution
import RPi.GPIO as GPIO
import time
# Configure the Pi to use pin names (i.e. BOARD) and allocate I/O
# We are utilizing the BOARD pin numbering, which means
# connect the servo to physical pin number 7 on your Raspberry Pi
# (Or change the SERVO_PIN below to correspond to the physical pin number you
# are using)
GPIO.setmode(GPIO.BOARD)
SERVO_PIN = 7
# Set SERVO_PIN for output
GPIO.setup(SERVO_PIN, GPIO.OUT)
# Create PWM channel on the SERVO_PIN with a frequency of 50Hz
# Refer to your micro servo's datasheet for the frequency it uses
pwm_servo = GPIO.PWM(SERVO_PIN, 50)
pwm_servo.start(0)
try:
print("Test different duty cycles to find out the: ")
print("* MIN DUTY CYCLE (this will correspond to HIGHEST indicator reading, usually around 250), ")
print("* MAX DUTY CYCLE (this will correpond to the 0 indicator")
print("* CENTER DUTY CYCLE - find out which duty cycle gets you to a reading of 150")
print("Note these values for use in the aqi_monitor.py program")
while True:
duty_cycle = float(input("Enter Duty Cycle (usually between 2 and 12 for SG92R, but the exact limits vary):"))
pwm_servo.ChangeDutyCycle(duty_cycle)
time.sleep(0.5)
except KeyboardInterrupt:
print("CTRL-C: Terminating program.")
finally:
print("Cleaning up GPIO...")
pwm_servo.stop()
GPIO.cleanup()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore, Qt
class QRoundProgressBar(QtGui.QWidget):
StyleDonut = 1
StylePie = 2
StyleLine = 3
PositionLeft = 180
PositionTop = 90
PositionRight = 0
PositionBottom = -90
UF_VALUE = 1
UF_PERCENT = 2
UF_MAX = 4
def __init__(self):
super(QRoundProgressBar, self).__init__()
self.min = 0
self.max = 100
self.value = 25
self.nullPosition = self.PositionTop
self.barStyle = self.StyleDonut
self.outlinePenWidth =1
self.dataPenWidth = 1
self.rebuildBrush = False
self.format = "%p%"
self.decimals = 1
self.updateFlags = self.UF_PERCENT
self.gradientData = []
self.donutThicknessRatio = 0.75
def setRange(self, min, max):
self.min = min
self.max = max
if self.max < self.min:
self.max, self.min = self.min, self.max
if self.value < self.min:
self.value = self.min
elif self.value > self.max:
self.value = self.max
if not self.gradientData:
self.rebuildBrush = True
self.update()
def setMinimun(self, min):
self.setRange(min, self.max)
def setMaximun(self, max):
self.setRange(self.min, max)
def setValue(self, val):
if self.value != val:
if val < self.min:
self.value = self.min
elif val > self.max:
self.value = self.max
else:
self.value = val
self.update()
def setNullPosition(self, position):
if position != self.nullPosition:
self.nullPosition = position
if not self.gradientData:
self.rebuildBrush = True
self.update()
def setBarStyle(self, style):
if style != self.barStyle:
self.barStyle = style
self.update()
def setOutlinePenWidth(self, penWidth):
if penWidth != self.outlinePenWidth:
self.outlinePenWidth = penWidth
self.update()
def setDataPenWidth(self, penWidth):
if penWidth != self.dataPenWidth:
self.dataPenWidth = penWidth
self.update()
def setDataColors(self, stopPoints):
if stopPoints != self.gradientData:
self.gradientData = stopPoints
self.rebuildBrush = True
self.update()
def setFormat(self, format):
if format != self.format:
self.format = format
self.valueFormatChanged()
def resetFormat(self):
self.format = ''
self.valueFormatChanged()
def setDecimals(self, count):
if count >= 0 and count != self.decimals:
self.decimals = count
self.valueFormatChanged()
def setDonutThicknessRatio(self, val):
self.donutThicknessRatio = max(0., min(val, 1.))
self.update()
def paintEvent(self, event):
outerRadius = min(self.width(), self.height())
baseRect = QtCore.QRectF(1, 1, outerRadius-2, outerRadius-2)
buffer = QtGui.QImage(outerRadius, outerRadius, QtGui.QImage.Format_ARGB32)
buffer.fill(0)
p = QtGui.QPainter(buffer)
p.setRenderHint(QtGui.QPainter.Antialiasing)
# data brush
self.rebuildDataBrushIfNeeded()
# background
self.drawBackground(p, buffer.rect())
# base circle
self.drawBase(p, baseRect)
# data circle
arcStep = 360.0 / (self.max - self.min) * self.value
self.drawValue(p, baseRect, self.value, arcStep)
# center circle
innerRect, innerRadius = self.calculateInnerRect(baseRect, outerRadius)
self.drawInnerBackground(p, innerRect)
# text
self.drawText(p, innerRect, innerRadius, self.value)
# finally draw the bar
p.end()
painter = QtGui.QPainter(self)
painter.drawImage(0, 0, buffer)
def drawBackground(self, p, baseRect):
p.fillRect(baseRect, self.palette().background())
def drawBase(self, p, baseRect):
bs = self.barStyle
if bs == self.StyleDonut:
p.setPen(QtGui.QPen(self.palette().shadow().color(), self.outlinePenWidth))
p.setBrush(self.palette().base())
p.drawEllipse(baseRect)
elif bs == self.StylePie:
p.setPen(QtGui.QPen(self.palette().base().color(), self.outlinePenWidth))
p.setBrush(self.palette().base())
p.drawEllipse(baseRect)
elif bs == self.StyleLine:
p.setPen(QtGui.QPen(self.palette().base().color(), self.outlinePenWidth))
p.setBrush(Qt.Qt.NoBrush)
p.drawEllipse(baseRect.adjusted(self.outlinePenWidth/2, self.outlinePenWidth/2, -self.outlinePenWidth/2, -self.outlinePenWidth/2))
def drawValue(self, p, baseRect, value, arcLength):
# nothing to draw
if value == self.min:
return
# for Line style
if self.barStyle == self.StyleLine:
p.setPen(QtGui.QPen(self.palette().highlight().color(), self.dataPenWidth))
p.setBrush(Qt.Qt.NoBrush)
p.drawArc(baseRect.adjusted(self.outlinePenWidth/2, self.outlinePenWidth/2, -self.outlinePenWidth/2, -self.outlinePenWidth/2),
self.nullPosition * 16,
-arcLength * 16)
return
# for Pie and Donut styles
dataPath = QtGui.QPainterPath()
dataPath.setFillRule(Qt.Qt.WindingFill)
# pie segment outer
dataPath.moveTo(baseRect.center())
dataPath.arcTo(baseRect, self.nullPosition, -arcLength)
dataPath.lineTo(baseRect.center())
p.setBrush(self.palette().highlight())
p.setPen(QtGui.QPen(self.palette().shadow().color(), self.dataPenWidth))
p.drawPath(dataPath)
def calculateInnerRect(self, baseRect, outerRadius):
# for Line style
if self.barStyle == self.StyleLine:
innerRadius = outerRadius - self.outlinePenWidth
else: # for Pie and Donut styles
innerRadius = outerRadius * self.donutThicknessRatio
delta = (outerRadius - innerRadius) / 2.
innerRect = QtCore.QRectF(delta, delta, innerRadius, innerRadius)
return innerRect, innerRadius
def drawInnerBackground(self, p, innerRect):
if self.barStyle == self.StyleDonut:
p.setBrush(self.palette().alternateBase())
cmod = p.compositionMode()
p.setCompositionMode(QtGui.QPainter.CompositionMode_Source)
p.drawEllipse(innerRect)
p.setCompositionMode(cmod)
def drawText(self, p, innerRect, innerRadius, value):
if not self.format:
return
text = self.valueToText(value)
# !!! to revise
f = self.font()
# f.setPixelSize(innerRadius * max(0.05, (0.35 - self.decimals * 0.08)))
f.setPixelSize(innerRadius * 1.8 / len(text))
p.setFont(f)
textRect = innerRect
p.setPen(self.palette().text().color())
p.drawText(textRect, Qt.Qt.AlignCenter, text)
def valueToText(self, value):
textToDraw = self.format
format_string = '{' + ':.{}f'.format(self.decimals) + '}'
if self.updateFlags & self.UF_VALUE:
textToDraw = textToDraw.replace("%v", format_string.format(value))
if self.updateFlags & self.UF_PERCENT:
percent = (value - self.min) / (self.max - self.min) * 100.0
textToDraw = textToDraw.replace("%p", format_string.format(percent))
if self.updateFlags & self.UF_MAX:
m = self.max - self.min + 1
textToDraw = textToDraw.replace("%m", format_string.format(m))
return textToDraw
def valueFormatChanged(self):
self.updateFlags = 0;
if "%v" in self.format:
self.updateFlags |= self.UF_VALUE
if "%p" in self.format:
self.updateFlags |= self.UF_PERCENT
if "%m" in self.format:
self.updateFlags |= self.UF_MAX
self.update()
def rebuildDataBrushIfNeeded(self):
if self.rebuildBrush:
self.rebuildBrush = False
dataBrush = QtGui.QConicalGradient()
dataBrush.setCenter(0.5,0.5)
dataBrush.setCoordinateMode(QtGui.QGradient.StretchToDeviceMode)
for pos, color in self.gradientData:
dataBrush.setColorAt(1.0 - pos, color)
# angle
dataBrush.setAngle(self.nullPosition)
p = self.palette()
p.setBrush(QtGui.QPalette.Highlight, dataBrush)
self.setPalette(p)
version = 1.0
class BattaryWidget(QtCore.QObject): #MoveSimpleWidget
def __init__(self):
#self.sock = sock
self.Name = 'Move Simple'
self.minVolt = 10.60
self.maxVolt = 18
def getWidget(self):
#elf.webCam = QtWebKit.QWebView()
#self.webCam.setUrl(QtCore.QUrl('http://195.235.198.107:3344/axis-cgi/mjpg/video.cgi?resolution=320x240'))
self.ampere = QtGui.QLabel('0.0A')
#self.ampere.setMinimumHeight(40)
#self.ampere.setMinimumWidth(40)
self.volt = QtGui.QLabel('0.0V')
#self.volt.setMinimumHeight(40)
#self.volt.setMinimumWidth(40)
self.power = QtGui.QLabel('0.0W')
#self.power.setMinimumHeight(40)
#self.power.setMinimumWidth(40)
self.workTimeText = QtGui.QLabel('Work:')
self.workTime = QtGui.QLabel('0h:0m:0s') #1h:23m:11s
self.forecastTimeText = QtGui.QLabel('Forecast:')
self.forecastTime = QtGui.QLabel('0h:0m:0s')
#self.progrBar = QRoundProgressBar()
self.bar = QRoundProgressBar()
self.bar.setFixedSize(50, 50)
self.bar.setDataPenWidth(1)
self.bar.setOutlinePenWidth(1)
self.bar.setDonutThicknessRatio(0.5)
self.bar.setDecimals(1)
self.bar.setFormat('%v')
# self.bar.resetFormat()
self.bar.setNullPosition(90)
self.bar.setBarStyle(QRoundProgressBar.StyleDonut)
self.bar.setDataColors([(0., QtGui.QColor.fromRgb(255,0,0)), (0.5, QtGui.QColor.fromRgb(255,255,0)), (1., QtGui.QColor.fromRgb(0,255,0))])
self.bar.setRange(0, 100)
self.bar.setValue(0)
#lay = QtGui.QVBoxLayout()
#lay.addWidget(self.bar)
#self.setLayout(lay)
#self.progrBar.setMinimun(0)
#self.progrBar.setMinimum(0)
#self.progrBar.setMaximum(100)
self.speed = QtGui.QSlider(QtCore.Qt.Horizontal)
self.speed.setMaximum(100)
#self.speed.setMinimumWidth(150)
self.connect(self.speed, QtCore.SIGNAL('valueChanged(int)'),
self.setV )
#self.delButton.setMaximumWidth(55)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSpacing(1)
self.gridLayout.addWidget(self.bar, 1, 1) #2, 2, QtCore.Qt.AlignRight)
self.gridLayout.addWidget(self.ampere, 2, 0, QtCore.Qt.AlignCenter)
self.gridLayout.addWidget(self.volt, 2, 1, QtCore.Qt.AlignCenter)
self.gridLayout.addWidget(self.power, 2, 2, QtCore.Qt.AlignCenter)
self.gridLayout.addWidget(self.workTimeText, 3, 0)
self.gridLayout.addWidget(self.workTime, 3, 2)
self.gridLayout.addWidget(self.forecastTimeText, 4, 0, 4, 1)
self.gridLayout.addWidget(self.forecastTime, 4, 2)
#self.gridLayout.addWidget(self.speed, 5, 0, 5, 3, QtCore.Qt.AlignRight)
widget = QtGui.QWidget()
widget.setLayout(self.gridLayout)
return widget
def setV(self, value):
self.bar.setValue(value)
def getName(self):
return self.Name
|
nilq/baby-python
|
python
|
from .parser import MopacParser
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
from bann.b_container.states.framework.pytorch.lr_scheduler_param import LrSchAlgWr, \
create_lr_sch_json_param_output
from bann.b_container.functions.pytorch.init_framework_fun import InitNetArgs
from bann.b_container.states.framework.pytorch.optim_param import OptimAlgWr, \
create_optim_json_param_output
def _create_output_lr_sch(scheduler_wr: LrSchAlgWr, /) -> str:
state_str = f"\"{scheduler_wr.param_name()}\":\t"
state_str += f"\"{','.join(name for name in scheduler_wr.lr_sch_type_name)}\""
output = create_lr_sch_json_param_output(scheduler_wr)
if output:
state_str += f",\n\t{output}"
return state_str
def _create_output_optim(optim_wr: OptimAlgWr, /) -> str:
state_str = f"\"{optim_wr.param_name()}\":\t"
state_str += f"\"{','.join(name for name in optim_wr.optim_type_name(False))}\""
output = create_optim_json_param_output(optim_wr)
if output:
state_str += f",\n\t{output}"
return state_str
def _check_if_empty(output: str, /) -> str:
if not output:
return ""
return f"\n\t{output},"
def create_hyper_param_str(network_name: str, init_args: InitNetArgs, /) -> str:
# net state
output_string = f"Net {network_name} arguments:"
output_string += _check_if_empty(init_args.net_state.get_kwargs_repr())
# initializer
output_string += _check_if_empty(init_args.initializer_wr.init_state.get_kwargs_repr())
# hyper
if init_args.hyper_optim_wr is not None:
state_str = f"\"{init_args.hyper_optim_wr.param_name()}\":\t"
state_str += f"\"{init_args.hyper_optim_wr.hyper_type_name}\""
if init_args.hyper_optim_wr.hyper_state.get_kwargs_repr():
state_str += f",\n\t{init_args.hyper_optim_wr.hyper_state.get_kwargs_repr()}"
output_string += f"\n\t{state_str},"
# prepare
state_str = f"\"{init_args.prepare_wr.param_name()}\":\t"
state_str += f"\"{init_args.prepare_wr.pr_type_name}\""
if init_args.prepare_wr.pr_state.get_kwargs_repr():
state_str += f",\n\t{init_args.prepare_wr.pr_state.get_kwargs_repr()}"
output_string += f"\n\t{state_str},"
# trainer
state_str = f"\"{init_args.trainer_wr.param_name()}\":\t"
state_str += f"\"{init_args.trainer_wr.trainer_type_name}\""
if init_args.trainer_wr.train_state.get_kwargs_repr():
state_str += f",\n\t{init_args.trainer_wr.train_state.get_kwargs_repr()}"
output_string += f"\n\t{state_str},"
# tester
state_str = f"\"{init_args.tester_wr.param_name()}\":\t"
state_str += f"\"{init_args.tester_wr.tester_type_name}\""
if init_args.tester_wr.test_state.get_kwargs_repr():
state_str += f",\n\t{init_args.tester_wr.test_state.get_kwargs_repr()}"
output_string += f"\n\t{state_str},"
# optim
if init_args.optimizer_wr is not None:
output_string += f"\n\t{_create_output_optim(init_args.optimizer_wr)},"
# scheduler
if init_args.scheduler_wr is not None:
output_string += f"\n\t{_create_output_lr_sch(init_args.scheduler_wr)},"
# criterion
if init_args.criterion_wr is not None:
state_str = f"\"{init_args.criterion_wr.param_name()}\":\t"
state_str += f"\"{init_args.criterion_wr.criterion_type_name}\""
if init_args.criterion_wr.criterion_state.get_kwargs_repr():
state_str += f",\n\t{init_args.criterion_wr.criterion_state.get_kwargs_repr()}"
output_string += f"\n\t{state_str},"
return output_string.rstrip(',') + "\n"
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(name='pyfdam',
version='0.1',
description='Code for fitting impedance data and simulating discharge experiments',
url='http://github.com/muhammadhasyim/pyfdam',
author='Muhammad R. Hasyim',
author_email='muhammad_hasyim@berkeley.edu',
license='MIT',
packages=find_packages(),
install_requires=['numpy','scipy','lmfit'],
zip_safe=False)
|
nilq/baby-python
|
python
|
# from __future__ import print_function, division
from tensorflow.keras.layers import Input, Dense, Flatten, Dropout, UpSampling2D, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, LeakyReLU, MaxPooling2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras.mixed_precision import experimental as mixed_precision
import tensorflow_io as tfio
import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import tensorflow_datasets as tfds
from utils.datasets import Dataset
from model.model_builder import base_model
from model.model import conv_module
BATCH_SIZE = 8
EPOCHS = 50
DATASET_DIR = './datasets/'
IMAGE_SIZE = (512, 512)
num_classes = 2
def l1(y_true, y_pred):
return K.mean(K.abs(y_pred - y_true))
class GAN():
def __init__(self):
self.img_rows = 512
self.img_cols = 512
self.channels = 2
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
optimizer = Adam(0.0002, 0.5)
optimizer = mixed_precision.LossScaleOptimizer(optimizer, loss_scale='dynamic') # tf2.4.1 이전
# self.options = tf.data.Options()
# self.options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
# self.train_dataset_config = Dataset(DATASET_DIR, IMAGE_SIZE, BATCH_SIZE, mode='train',
# dataset='CustomCelebahq')
# self.train_data = self.train_dataset_config.gan_trainData(self.train_dataset_config.train_data)
self.train_data = tfds.load('CustomCelebahq',
data_dir=DATASET_DIR, split='train[:25%]')
self.number_train = self.train_data.reduce(0, lambda x, _: x + 1).numpy()
print("학습 데이터 개수", self.number_train)
self.train_data = self.train_data.shuffle(1024)
self.train_data = self.train_data.batch(BATCH_SIZE)
# self.train_data = self.train_data.prefetch(tf.data.experimental.AUTOTUNE)
# self.train_data = self.train_data.repeat()
# self.train_data = self.train_data.with_options(self.options)
# self.train_data = mirrored_strategy.experimental_distribute_dataset(self.train_data)
# options = tf.data.Options()
# options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
# self.train_data = self.train_data.with_options(options)
self.steps_per_epoch = self.number_train // BATCH_SIZE
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(512, 512, 1))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
self.combined.compile(loss='mse', optimizer=optimizer)
def build_generator(self):
model_input, model_output = base_model(image_size=(512, 512, 1), num_classes=2)
model = tf.keras.Model(model_input, model_output)
return model
def build_discriminator(self):
inputs = Input(shape=(512, 512, 2))
x = conv_module(inputs, channel=64, rate=1, activation='relu')
x = MaxPooling2D()(x)
x = conv_module(x, channel=128, rate=1, activation='relu')
x = MaxPooling2D()(x)
x = conv_module(x, channel=256, rate=1, activation='relu')
x = MaxPooling2D()(x)
x = conv_module(x, channel=512, rate=1, activation='relu')
x = MaxPooling2D()(x)
x = conv_module(x, channel=512, rate=1, activation='relu')
x = Flatten()(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=x, name='discriminator')
return model
def train(self, epochs, batch_size=128, sample_interval=50):
pbar = tqdm(self.train_data, total=self.steps_per_epoch, desc = 'Batch', leave = True, disable=False)
for epoch in range(epochs):
# for features in tqdm(self.train_data, total=self.steps_per_epoch):
for features in pbar:
# for features in self.train_data:
# ---------------------
# Train Discriminator
# ---------------------
img = tf.cast(features['image'], tf.uint8)
shape = img.shape
# Adversarial ground truths
valid = np.ones((shape[0], 1))
fake = np.zeros((shape[0], 1))
img = tf.image.resize(img, (512, 512), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# gray_img = tfio.experimental.color.rgb_to_grayscale(img)
# gray_img = tf.image.rgb_to_grayscale(img)
#
# Gray_3channel = tf.concat([gray_img, gray_img, gray_img], axis=-1)
# gray_ycbcr = tfio.experimental.color.rgb_to_ycbcr(Gray_3channel)
# gray_Y = gray_ycbcr[:, :, 0]
# gray_Y = tf.cast(gray_Y, tf.float32)
# gray_Y = (gray_Y / 127.5) - 1.0
# gray_Y = tf.expand_dims(gray_Y, axis=-1)
img_YCbCr = tfio.experimental.color.rgb_to_ycbcr(img)
gray_Y = img_YCbCr[:, :, :, 0]
gray_Y = tf.cast(gray_Y, tf.float32)
gray_Y = (gray_Y / 127.5) - 1.0
# gray_Y /= 255.
gray_Y = tf.expand_dims(gray_Y, axis=-1)
Cb = img_YCbCr[:, :, :, 1]
Cb = tf.cast(Cb, tf.float32)
Cb = (Cb / 127.5) - 1.0
# Cb /= 255.
Cb = tf.expand_dims(Cb, axis=-1)
Cr = img_YCbCr[:, :, :, 2]
Cr = tf.cast(Cr, tf.float32)
Cr = (Cr / 127.5) - 1.0
# Cr /= 255.
Cr = tf.expand_dims(Cr, axis=-1)
CbCr = tf.concat([Cb, Cr], axis=-1)
# Generate a batch of new images
noise = tf.random.uniform(shape=[batch_size, 512, 512, 1], maxval=1.0)
gen_imgs = self.generator.predict(gray_Y)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(CbCr, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
noise = tf.random.uniform(shape=[batch_size, 512, 512, 1], maxval=1.0)
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
# t.set_description("text", refresh=True)
# print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (
# epoch, self.d_loss[0], 100 * self.d_loss[1], self.g_loss))
pbar.set_description("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (
epoch, d_loss[0], 100 * d_loss[1], g_loss))
# self.train_data = self.train_data.repeat()
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
# r, c = 5, 5
# noise = np.random.normal(0, 1, (r * c, self.latent_dim))
# gen_imgs = self.generator.predict(noise)
#
# # Rescale images 0 - 1
# gen_imgs = 0.5 * gen_imgs + 0.5
#
# fig, axs = plt.subplots(r, c)
# cnt = 0
# for i in range(r):
# for j in range(c):
# axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
# axs[i,j].axis('off')
# cnt += 1
# fig.savefig("images/%d.png" % epoch)
# plt.close()
self.combined.save_weights('test_model.h5')
# self.combined.s
if __name__ == '__main__':
# mirrored_strategy = tf.distribute.MirroredStrategy()
# with mirrored_strategy.scope():
gan = GAN()
gan.train(epochs=EPOCHS, batch_size=BATCH_SIZE, sample_interval=1)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.1 on 2020-11-06 15:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('devices', '0006_auto_20201105_1843'),
]
operations = [
migrations.AlterField(
model_name='modbusdeviceparameters',
name='boudrate',
field=models.PositiveSmallIntegerField(choices=[(9600, '9600'), (28800, '28800'), (115200, '115200'), (57600, '57600'), (600, '600'), (19200, '19200'), (2400, '2400'), (4800, '4800'), (300, '300'), (1200, '1200'), (38400, '38400'), (14400, '14400')], default=9600, verbose_name='Скорость обмена данными'),
),
migrations.AlterField(
model_name='modbusdeviceparameters',
name='parity',
field=models.CharField(choices=[('even', 'even'), ('none', 'none'), ('odd', 'odd')], default='none', max_length=4, verbose_name='Контроль четности'),
),
migrations.AlterField(
model_name='modbustagparameters',
name='data_type',
field=models.CharField(choices=[('WORD', 'WORD'), ('BOOL', 'BOOL'), ('STRING', 'STRING'), ('FLOAT', 'FLOAT'), ('UINT', 'UINT'), ('INT', 'INT')], max_length=50),
),
migrations.AlterField(
model_name='tag',
name='data_type',
field=models.CharField(choices=[('BOOL', 'BOOL'), ('STRING', 'STRING'), ('INT', 'INT'), ('FLOAT', 'FLOAT')], max_length=20, verbose_name='Тип данных'),
),
migrations.CreateModel(
name='HistoricalStringValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(blank=True, max_length=200, verbose_name='STRING-значение')),
('quality', models.CharField(choices=[('GOOD', 'GOOD'), ('BAD', 'BAD')], default='BAD', max_length=4, verbose_name='Качество')),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата и время')),
('tag', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='devices.tag')),
],
options={
'verbose_name': 'Архивное STRING-значение тега',
'verbose_name_plural': 'Архивные STRING-значения тега',
},
),
migrations.CreateModel(
name='HistoricalIntValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.IntegerField(blank=True, verbose_name='INTEGER-значение')),
('quality', models.CharField(choices=[('GOOD', 'GOOD'), ('BAD', 'BAD')], default='BAD', max_length=4, verbose_name='Качество')),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата и время')),
('tag', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='devices.tag')),
],
options={
'verbose_name': 'Архивное INTEGER-значение тега',
'verbose_name_plural': 'Архивные INTEGER-значения тега',
},
),
migrations.CreateModel(
name='HistoricalFloatValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField(blank=True, verbose_name='FLOAT-значение')),
('quality', models.CharField(choices=[('GOOD', 'GOOD'), ('BAD', 'BAD')], default='BAD', max_length=4, verbose_name='Качество')),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата и время')),
('tag', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='devices.tag')),
],
options={
'verbose_name': 'Архивное FLOAT-значение тега',
'verbose_name_plural': 'Архивные FLOAT-значения тега',
},
),
migrations.CreateModel(
name='HistoricalBooleanValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.BooleanField(blank=True, verbose_name='BOOLEAN-значение')),
('quality', models.CharField(choices=[('GOOD', 'GOOD'), ('BAD', 'BAD')], default='BAD', max_length=4, verbose_name='Качество')),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата и время')),
('tag', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='devices.tag')),
],
options={
'verbose_name': 'Архивное BOOLEAN-значение тега',
'verbose_name_plural': 'Архивные BOOLEAN-значения тега',
},
),
]
|
nilq/baby-python
|
python
|
''' sample dream '''
from dreamer import Dream
if __name__ == "__main__":
dream = Dream(dream_type='sex')
for i in range(10):
print dream.dream()
|
nilq/baby-python
|
python
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
This source code is released under the New BSD License, (the "License").
******************************************************************************/
"""
import argparse
import json
import sys
# Some globals:
targetName = 'Q-Chem'
debug = False
def getOptions():
userOptions = {}
userOptions['Title'] = {}
userOptions['Title']['type'] = 'string'
userOptions['Title']['default'] = ''
userOptions['Calculation Type'] = {}
userOptions['Calculation Type']['type'] = "stringList"
userOptions['Calculation Type']['default'] = 1
userOptions['Calculation Type']['values'] = \
['Single Point', 'Equilibrium Geometry', 'Frequencies']
userOptions['Theory'] = {}
userOptions['Theory']['type'] = "stringList"
userOptions['Theory']['default'] = 2
userOptions['Theory']['values'] = \
['HF', 'MP2', 'B3LYP', 'B3LYP5', 'EDF1', 'M062X', 'CCSD']
userOptions['Basis'] = {}
userOptions['Basis']['type'] = "stringList"
userOptions['Basis']['default'] = 2
userOptions['Basis']['values'] = \
['STO-3G', '3-21G', '6-31G(d)', '6-31G(d,p)', '6-31+G(d)', '6-311G(d)',
'cc-pVDZ', 'cc-pVTZ', 'LANL2DZ', 'LACVP']
userOptions['Filename Base'] = {}
userOptions['Filename Base']['type'] = 'string'
userOptions['Filename Base']['default'] = 'job'
userOptions['Charge'] = {}
userOptions['Charge']['type'] = "integer"
userOptions['Charge']['default'] = 0
userOptions['Charge']['minimum'] = -9
userOptions['Charge']['maximum'] = 9
userOptions['Multiplicity'] = {}
userOptions['Multiplicity']['type'] = "integer"
userOptions['Multiplicity']['default'] = 1
userOptions['Multiplicity']['minimum'] = 1
userOptions['Multiplicity']['maximum'] = 6
# TODO Coordinate format (need zmatrix)
opts = {'userOptions': userOptions}
return opts
def generateInputFile(opts):
# Extract options:
title = opts['Title']
calculate = opts['Calculation Type']
theory = opts['Theory']
basis = opts['Basis']
charge = opts['Charge']
multiplicity = opts['Multiplicity']
# Convert to code-specific strings
calcStr = ''
if calculate == 'Single Point':
calcStr = 'SP'
elif calculate == 'Equilibrium Geometry':
calcStr = 'Opt'
elif calculate == 'Frequencies':
calcStr = 'Freq'
else:
raise Exception('Unhandled calculation type: %s' % calculate)
theoryStr = ''
if theory in ['HF', 'B3LYP', 'B3LYP5', 'EDF1', 'M062X', 'MP2', 'CCSD']:
theoryStr = theory
else:
raise Exception('Unhandled theory type: %s' % theory)
basisStr = ''
if basis in ['STO-3G', '3-21G', '6-31G(d)', '6-31G(d,p)', '6-31+G(d)',
'6-311G(d)', 'cc-pVDZ', 'cc-pVTZ']:
basisStr = 'BASIS %s' % basis
elif basis in ['LANL2DZ', 'LACVP']:
basisStr = 'ECP %s' % basis
else:
raise Exception('Unhandled basis type: %s' % basis)
output = ''
output += '$rem\n'
output += ' JOBTYPE %s\n' % calcStr
output += ' METHOD %s\n' % theoryStr
output += ' %s\n' % basisStr
output += ' GUI 2\n'
output += '$end\n\n'
output += '$comment\n %s\n$end\n\n' % title
output += '$molecule\n'
output += ' %s %s\n' % (charge, multiplicity)
output += '$$coords:___Sxyz$$\n'
output += '$end\n'
return output
def generateInput():
# Read options from stdin
stdinStr = sys.stdin.read()
# Parse the JSON strings
opts = json.loads(stdinStr)
# Generate the input file
inp = generateInputFile(opts['options'])
# Basename for input files:
baseName = opts['options']['Filename Base']
# Prepare the result
result = {}
# Input file text -- will appear in the same order in the GUI as they are
# listed in the array:
files = []
files.append({'filename': '%s.qcin' % baseName, 'contents': inp})
if debug:
files.append({'filename': 'debug_info', 'contents': stdinStr})
result['files'] = files
# Specify the main input file. This will be used by MoleQueue to determine
# the value of the $$inputFileName$$ and $$inputFileBaseName$$ keywords.
result['mainFile'] = '%s.qcin' % baseName
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Generate a %s input file.' % targetName)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--print-options', action='store_true')
parser.add_argument('--generate-input', action='store_true')
parser.add_argument('--display-name', action='store_true')
parser.add_argument('--lang', nargs='?', default='en')
args = vars(parser.parse_args())
debug = args['debug']
if args['display_name']:
print(targetName)
if args['print_options']:
print(json.dumps(getOptions()))
elif args['generate_input']:
print(json.dumps(generateInput()))
|
nilq/baby-python
|
python
|
import os
import pickle
if __name__ == '__main__':
data_tag = 'tw_mm_s4' # 'tw_mm_s1' || 'tw_mm_imagenet_s2' || 'tw_mm_daily_s2'
data_dir = '../data/{}'.format(data_tag)
for data_tag in ['train', 'valid', 'test']:
print('\nComputing url map for %s' % data_tag)
src_fn = os.path.join(data_dir, '{}_src.txt'.format(data_tag))
trg_fn = os.path.join(data_dir, '{}_url_map.pt'.format(data_tag))
url_map = {}
with open(src_fn, 'r', encoding='utf-8') as fr:
for idx, line in enumerate(fr):
img_fn = line.split('<sep>')[-1].split('/')[-1].strip()
if img_fn not in url_map.keys():
url_map[img_fn] = idx
else:
print('Error, there are duplicate img filenames: %s' % img_fn)
with open(trg_fn, 'wb') as fw:
pickle.dump(url_map, fw)
print('Dump %d items of a dict into %s' % (len(url_map), trg_fn))
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
pfjetEfficiency = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/JME/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_pfjetpT 'Jet pT turnON; PFJet(pT) [GeV]; efficiency' pfjetpT_numerator pfjetpT_denominator",
"effic_pfjetpT_pTThresh 'Jet pT turnON; PFJet(pT) [GeV]; efficiency' pfjetpT_pTThresh_numerator pfjetpT_pTThresh_denominator",
"effic_pfjetphi 'Jet efficiency vs #phi; PF Jet #phi [rad]; efficiency' pfjetphi_numerator pfjetphi_denominator",
"effic_pfjeteta 'Jet efficiency vs #eta; PF Jet #eta; efficiency' pfjeteta_numerator pfjeteta_denominator",
## HB
"effic_pfjetpT_HB 'Jet pT turnON (HB); PFJet(pT) [GeV]; efficiency' pfjetpT_HB_numerator pfjetpT_HB_denominator",
"effic_pfjetpT_HB_pTThresh 'Jet pT turnON (HB); PFJet(pT) [GeV]; efficiency' pfjetpT_pTThresh_HB_numerator pfjetpT_pTThresh_HB_denominator",
"effic_pfjetphi_HB 'Jet efficiency vs #phi (HB); PF Jet #phi [rad]; efficiency' pfjetphi_HB_numerator pfjetphi_HB_denominator",
"effic_pfjeteta_HB 'Jet efficiency vs #eta (HB); PF Jet #eta; efficiency' pfjeteta_HB_numerator pfjeteta_HB_denominator",
## HE
"effic_pfjetpT_HE 'Jet pT turnON (HE); PFJet(pT) [GeV]; efficiency' pfjetpT_HE_numerator pfjetpT_HE_denominator",
"effic_pfjetpT_HE_pTThresh 'Jet pT turnON (HE); PFJet(pT) [GeV]; efficiency' pfjetpT_pTThresh_HE_numerator pfjetpT_pTThresh_HE_denominator",
"effic_pfjetphi_HE 'Jet efficiency vs #phi (HE); PF Jet #phi [rad]; efficiency' pfjetphi_HE_numerator pfjetphi_HE_denominator",
"effic_pfjeteta_HE 'Jet efficiency vs #eta (HE); PF Jet #eta; efficiency' pfjeteta_HE_numerator pfjeteta_HE_denominator",
## HE_p
"effic_pfjetpT_HE_p 'Jet pT turnON (HEP); PFJet(pT) [GeV]; efficiency' pfjetpT_HE_p_numerator pfjetpT_HE_p_denominator",
"effic_pfjetpT_HE_p_pTThresh 'Jet pT turnON (HEP); PFJet(pT) [GeV]; efficiency' pfjetpT_pTThresh_HE_p_numerator pfjetpT_pTThresh_HE_p_denominator",
"effic_pfjetphi_HE_p 'Jet efficiency vs #phi (HEP); PF Jet #phi [rad]; efficiency' pfjetphi_HE_p_numerator pfjetphi_HE_p_denominator",
"effic_pfjeteta_HE_p 'Jet efficiency vs #eta (HEP); PF Jet #eta; efficiency' pfjeteta_HE_p_numerator pfjeteta_HE_p_denominator",
## HE_m
"effic_pfjetpT_HE_m 'Jet pT turnON (HEM); PFJet(pT) [GeV]; efficiency' pfjetpT_HE_m_numerator pfjetpT_HE_m_denominator",
"effic_pfjetpT_HE_m_pTThresh 'Jet pT turnON (HEM); PFJet(pT) [GeV]; efficiency' pfjetpT_pTThresh_HE_m_numerator pfjetpT_pTThresh_HE_m_denominator",
"effic_pfjetphi_HE_m 'Jet efficiency vs #phi (HEM); PF Jet #phi [rad]; efficiency' pfjetphi_HE_m_numerator pfjetphi_HE_m_denominator",
"effic_pfjeteta_HE_m 'Jet efficiency vs #eta (HEM); PF Jet #eta; efficiency' pfjeteta_HE_m_numerator pfjeteta_HE_m_denominator",
## HF
"effic_pfjetpT_HF 'Jet pT turnON (HF); PFJet(pT) [GeV]; efficiency' pfjetpT_HF_numerator pfjetpT_HF_denominator",
"effic_pfjetpT_HF_pTThresh 'Jet pT turnON (HF); PFJet(pT) [GeV]; efficiency' pfjetpT_pTThresh_HF_numerator pfjetpT_pTThresh_HF_denominator",
"effic_pfjetphi_HF 'Jet efficiency vs #phi (HF); PF Jet #phi [rad]; efficiency' pfjetphi_HF_numerator pfjetphi_HF_denominator",
"effic_pfjeteta_HF 'Jet efficiency vs #eta (HF); PF Jet #eta; efficiency' pfjeteta_HF_numerator pfjeteta_HF_denominator",
## 2D Eff
"effic_pfjetEtaVsPhi 'Jet efficiency vs #eta and #phi; PF Jet #eta; #phi' pfjetEtaVsPhi_numerator pfjetEtaVsPhi_denominator",
"effic_pfjetEtaVsPhi_HB 'Jet efficiency vs #eta and #phi(HB); PF Jet #eta; #phi' pfjetEtaVsPhi_HB_numerator pfjetEtaVsPhi_HB_denominator",
"effic_pfjetEtaVsPhi_HE 'Jet efficiency vs #eta and #phi(HE); PF Jet #eta; #phi' pfjetEtaVsPhi_HE_numerator pfjetEtaVsPhi_HE_denominator",
"effic_pfjetEtaVsPhi_HF 'Jet efficiency vs #eta and #phi(HF); PF Jet #eta; #phi' pfjetEtaVsPhi_HF_numerator pfjetEtaVsPhi_HF_denominator",
"effic_pfjetEtaVsPhi_HE_p 'Jet efficiency vs #eta and #phi(HE_p); PF Jet #eta; #phi' pfjetEtaVsPhi_HE_p_numerator pfjetEtaVsPhi_HE_p_denominator",
"effic_pfjetEtaVsPhi_HE_m 'Jet efficiency vs #eta and #phi(HE_m); PF Jet #eta; #phi' pfjetEtaVsPhi_HE_m_numerator pfjetEtaVsPhi_HE_m_denominator",
"effic_pfjetEtaVspT 'Jet efficiency #eta vs Pt; PF Jet #eta; Pt' pfjetEtaVspT_numerator pfjetEtaVspT_denominator",
"effic_pfjetEtaVspT_HB 'Jet efficiency #eta vs Pt(HB); PF Jet #eta; Pt' pfjetEtaVspT_HB_numerator pfjetEtaVspT_HB_denominator",
"effic_pfjetEtaVspT_HE 'Jet efficiency #eta vs Pt(HE); PF Jet #eta; Pt' pfjetEtaVspT_HE_numerator pfjetEtaVspT_HE_denominator",
"effic_pfjetEtaVspT_HF 'Jet efficiency #eta vs Pt(HF); PF Jet #eta; Pt' pfjetEtaVspT_HF_numerator pfjetEtaVspT_HF_denominator",
"effic_pfjetEtaVspT_HE_p 'Jet efficiency #eta vs Pt(HE_p); PF Jet #eta; Pt' pfjetEtaVspT_HE_p_numerator pfjetEtaVspT_HE_p_denominator",
"effic_pfjetEtaVspT_HE_m 'Jet efficiency #eta vs Pt(HE_m); PF Jet #eta; Pt' pfjetEtaVspT_HE_m_numerator pfjetEtaVspT_HE_m_denominator"
),
efficiencyProfile = cms.untracked.vstring(
"effic_pfjetpT_vs_LS 'JET efficiency vs LS; LS; PF JET efficiency' pfjetpTVsLS_numerator pfjetpTVsLS_denominator",
# "effic_pfjetpT_HBvs_LS 'JET efficiency vs LS; LS; PF JET efficiency' pfjetpTVsLS_HB_numerator pfjetpTVsLS_HB_denominator",
# "effic_pfjetpT_HEvs_LS 'JET efficiency vs LS; LS; PF JET efficiency' pfjetpTVsLS_HE_numerator pfjetpTVsLS_HE_denominator",
# "effic_pfjetpT_HFvs_LS 'JET efficiency vs LS; LS; PF JET efficiency' pfjetpTVsLS_HF_numerator pfjetpTVsLS_HF_denominator",
# "effic_pfjetpT_HE_mvs_LS 'JET efficiency vs LS; LS; PF JET efficiency' pfjetpTVsLS_HE_m_numerator pfjetpTVsLS_HE_m_denominator",
# "effic_pfjetpT_HE_pvs_LS 'JET efficiency vs LS; LS; PF JET efficiency' pfjetpTVsLS_HE_p_numerator pfjetpTVsLS_HE_p_denominator",
),
)
calojetEfficiency = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/JME/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_calojetpT 'Jet pT turnON; CaloJet(pT) [GeV]; efficiency' calojetpT_numerator calojetpT_denominator",
"effic_calojetpT_pTThresh 'Jet pT turnON; CaloJet(pT) [GeV]; efficiency' calojetpT_pTThresh_numerator calojetpT_pTThresh_denominator",
"effic_calojetphi 'Jet efficiency vs #phi; Calo Jet #phi [rad]; efficiency' calojetphi_numerator calojetphi_denominator",
"effic_calojeteta 'Jet efficiency vs #eta; Calo Jet #eta; efficiency' calojeteta_numerator calojeteta_denominator",
"effic_calojetpT_HB 'Jet pT turnON (HB); CaloJet(pT) [GeV]; efficiency' calojetpT_HB_numerator calojetpT_HB_denominator",
"effic_calojetpT_HB_pTThresh 'Jet pT turnON (HB); CaloJet(pT) [GeV]; efficiency' calojetpT_pTThresh_HB_numerator calojetpT_pTThresh_HB_denominator",
"effic_calojetphi_HB 'Jet efficiency vs #phi (HB); Calo Jet #phi [rad]; efficiency' calojetphi_HB_numerator calojetphi_HB_denominator",
"effic_calojeteta_HB 'Jet efficiency vs #eta (HB); Calo Jet #eta; efficiency' calojeteta_HB_numerator calojeteta_HB_denominator",
"effic_calojetpT_HE 'Jet pT turnON (HE); CaloJet(pT) [GeV]; efficiency' calojetpT_HE_numerator calojetpT_HE_denominator",
"effic_calojetpT_HE_pTThresh 'Jet pT turnON (HE); CaloJet(pT) [GeV]; efficiency' calojetpT_pTThresh_HE_numerator calojetpT_pTThresh_HE_denominator",
"effic_calojetphi_HE 'Jet efficiency vs #phi (HE); Calo Jet #phi [rad]; efficiency' calojetphi_HE_numerator calojetphi_HE_denominator",
"effic_calojeteta_HE 'Jet efficiency vs #eta (HE); Calo Jet #eta; efficiency' calojeteta_HE_numerator calojeteta_HE_denominator",
"effic_calojetpT_HE_p 'Jet pT turnON (HEP); CaloJet(pT) [GeV]; efficiency' calojetpT_HE_p_numerator calojetpT_HE_p_denominator",
"effic_calojetpT_HE_p_pTThresh 'Jet pT turnON (HEP); CaloJet(pT) [GeV]; efficiency' calojetpT_pTThresh_HE_p_numerator calojetpT_pTThresh_HE_P_denominator",
"effic_calojetphi_HE_p 'Jet efficiency vs #phi (HEP); Calo Jet #phi [rad]; efficiency' calojetphi_HE_p_numerator calojetphi_HE_p_denominator",
"effic_calojeteta_HE_p 'Jet efficiency vs #eta (HEP); Calo Jet #eta; efficiency' calojeteta_HE_p_numerator calojeteta_HE_p_denominator",
"effic_calojetpT_HE_m 'Jet pT turnON (HEM); CaloJet(pT) [GeV]; efficiency' calojetpT_HE_m_numerator calojetpT_HE_m_denominator",
"effic_calojetpT_HE_m_pTThresh 'Jet pT turnON (HEM); CaloJet(pT) [GeV]; efficiency' calojetpT_pTThresh_HE_m_numerator calojetpT_pTThresh_HE_m_denominator",
"effic_calojetphi_HE_m 'Jet efficiency vs #phi (HEM); Calo Jet #phi [rad]; efficiency' calojetphi_HE_m_numerator calojetphi_HE_m_denominator",
"effic_calojeteta_HE_m 'Jet efficiency vs #eta (HEM); Calo Jet #eta; efficiency' calojeteta_HE_m_numerator calojeteta_HE_m_denominator",
"effic_calojetpT_HF 'Jet pT turnON; CaloJet(pT) [GeV]; efficiency' calojetpT_HF_numerator calojetpT_HF_denominator",
"effic_calojetpT_HF_pTThresh 'Jet pT turnON; CaloJet(pT) [GeV]; efficiency' calojetpT_pTThresh_HF_numerator calojetpT_pTThresh_HF_denominator",
"effic_calojetphi_HF 'Jet efficiency vs #phi; Calo Jet #phi [rad]; efficiency' calojetphi_HF_numerator calojetphi_HF_denominator",
"effic_calojeteta_HF 'Jet efficiency vs #eta; Calo Jet #eta; efficiency' calojeteta_HF_numerator calojeteta_HF_denominator",
## 2D Eff
"effic_calojetEtaVsPhi 'Jet efficiency vs #eta and #phi; Calo Jet #eta; #phi' calojetEtaVsPhi_numerator calojetEtaVsPhi_denominator",
"effic_calojetEtaVsPhi_HB 'Jet efficiency vs #eta and #phi(HB); Calo Jet #eta; #phi' calojetEtaVsPhi_HB_numerator calojetEtaVsPhi_HB_denominator",
"effic_calojetEtaVsPhi_HE 'Jet efficiency vs #eta and #phi(HE); Calo Jet #eta; #phi' calojetEtaVsPhi_HE_numerator calojetEtaVsPhi_HE_denominator",
"effic_calojetEtaVsPhi_HF 'Jet efficiency vs #eta and #phi(HF); Calo Jet #eta; #phi' calojetEtaVsPhi_HF_numerator calojetEtaVsPhi_HF_denominator",
"effic_calojetEtaVsPhi_HE_p 'Jet efficiency vs #eta and #phi(HE_p); Calo Jet #eta; #phi' calojetEtaVsPhi_HE_p_numerator calojetEtaVsPhi_HE_p_denominator",
"effic_calojetEtaVsPhi_HE_m 'Jet efficiency vs #eta and #phi(HE_m); Calo Jet #eta; #phi' calojetEtaVsPhi_HE_m_numerator calojetEtaVsPhi_HE_m_denominator",
"effic_calojetEtaVspT 'Jet efficiency #eta vs Pt; Calo Jet #eta; Pt' calojetEtaVspT_numerator calojetEtaVspT_denominator",
"effic_calojetEtaVspT_HB 'Jet efficiency #eta vs Pt(HB); Calo Jet #eta; Pt' calojetEtaVspT_HB_numerator calojetEtaVspT_HB_denominator",
"effic_calojetEtaVspT_HE 'Jet efficiency #eta vs Pt(HE); Calo Jet #eta; Pt' calojetEtaVspT_HE_numerator calojetEtaVspT_HE_denominator",
"effic_calojetEtaVspT_HF 'Jet efficiency #eta vs Pt(HF); Calo Jet #eta; Pt' calojetEtaVspT_HF_numerator calojetEtaVspT_HF_denominator",
"effic_calojetEtaVspT_HE_p 'Jet efficiency #eta vs Pt(HE_p); Calo Jet #eta; Pt' calojetEtaVspT_HE_p_numerator calojetEtaVspT_HE_p_denominator",
"effic_calojetEtaVspT_HE_m 'Jet efficiency #eta vs Pt(HE_m); Calo Jet #eta; Pt' calojetEtaVspT_HE_m_numerator calojetEtaVspT_HE_m_denominator"
),
efficiencyProfile = cms.untracked.vstring(
"effic_calojetpT_vs_LS 'JET efficiency vs LS; LS; Calo Jet efficiency' calojetpTVsLS_numerator calojetpTVsLS_denominator",
# "effic_calojetpT_vs_LS 'JET efficiency vs LS; LS; Calo JET efficiency' calojetpTVsLS_numerator calojetpTVsLS_denominator",
# "effic_calojetpT_HBvs_LS 'JET efficiency vs LS; LS; Calo JET efficiency' calojetpTVsLS_HB_numerator calojetpTVsLS_HB_denominator",
# "effic_calojetpT_HEvs_LS 'JET efficiency vs LS; LS; Calo JET efficiency' calojetpTVsLS_HE_numerator calojetpTVsLS_HE_denominator",
# "effic_calojetpT_HFvs_LS 'JET efficiency vs LS; LS; Calo JET efficiency' calojetpTVsLS_HF_numerator calojetpTVsLS_HF_denominator",
# "effic_calojetpT_HE_mvs_LS 'JET efficiency vs LS; LS; Calo JET efficiency' calojetpTVsLS_HE_m_numerator calojetpTVsLS_HE_m_denominator",
# "effic_calojetpT_HE_pvs_LS 'JET efficiency vs LS; LS; Calo JET efficiency' calojetpTVsLS_HE_p_numerator calojetpTVsLS_HE_p_denominator",
),
)
JetMetPromClient = cms.Sequence(
pfjetEfficiency
*calojetEfficiency
)
|
nilq/baby-python
|
python
|
class solution:
def capital(self,word):
count = 0
for i in range(len(word)):
if word[i] >= chr(65) and word[i] > chr(91):
count = count+1
if count == len(word):
return True
elif count == 0:
return True
elif count == 1 and word[0] >= chr(65) and word[i] < chr(91):
return True
else:
return False
|
nilq/baby-python
|
python
|
import numpy as np
from Bio.Seq import Seq
from .biotables import COMPLEMENTS, CODONS_SEQUENCES
def complement(dna_sequence):
"""Return the complement of the DNA sequence.
For instance ``complement("ATGCCG")`` returns ``"TACGGC"``.
Uses Biopython for speed.
"""
if hasattr(dna_sequence, "complement"):
return dna_sequence.complement()
if len(dna_sequence) <= 30:
return "".join([COMPLEMENTS[nuc] for nuc in dna_sequence])
# This alternative has overhead but is really fast on long sequences
return str(Seq(dna_sequence).complement())
def reverse_complement(dna_sequence):
"""Return the reverse-complement of the DNA sequence.
For instance ``reverse_complement("ATGCCG")`` returns ``"CGGCAT"``.
Uses Biopython for speed.
"""
if hasattr(dna_sequence, "reverse_complement"):
return dna_sequence.reverse_complement()
return complement(dna_sequence)[::-1]
def reverse_translate(protein_sequence, randomize_codons=False):
"""Return a DNA sequence which translates to the provided protein sequence.
Note: at the moment, the first valid codon found is used for each
amino-acid (so it is deterministic but no codon-optimization is done).
"""
if randomize_codons:
random_indices = np.random.randint(0, 1000, len(protein_sequence))
return "".join(
[
CODONS_SEQUENCES[aa][random_index % len(CODONS_SEQUENCES[aa])]
for aa, random_index in zip(protein_sequence, random_indices)
]
)
return "".join([CODONS_SEQUENCES[aa][0] for aa in protein_sequence])
def translate(dna_sequence, translation_table="Bacterial"):
"""Translate the DNA sequence into an amino-acid sequence "MLKYQT...".
If ``translation_table`` is the name or number of a NCBI genetic table,
Biopython will be used. See here for options:
http://biopython.org/DIST/docs/tutorial/Tutorial.html#sec26
``translation_table`` can also be a dictionary of the form
``{"ATT": "M", "CTC": "X", etc.}`` for more exotic translation tables.
"""
if isinstance(translation_table, dict):
return "".join(
[
translation_table[dna_sequence[i : i + 3]]
for i in range(0, len(dna_sequence), 3)
]
)
else:
return str(Seq(dna_sequence).translate(table=translation_table))
|
nilq/baby-python
|
python
|
#
# Generated with WinchBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.namedobject import NamedObjectBlueprint
class WinchBlueprint(NamedObjectBlueprint):
""""""
def __init__(self, name="Winch", package_path="sima/simo", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(EnumAttribute("controlType","sima/simo/WinchControl","Type of coupling winch control"))
self.attributes.append(Attribute("maximumSpeed","number","Max. run velocity for winch",default=0.0))
self.attributes.append(Attribute("acceleration","number","Max. run acceleration for winch",default=0.0))
self.attributes.append(Attribute("maximumLength","number","Max. wire length that can be added to drum",default=0.0))
self.attributes.append(Attribute("drumLength","number","Initial wire length at drum",default=0.0))
self.attributes.append(BlueprintAttribute("intervals","sima/simo/WinchRunInterval","",True,Dimension("*")))
|
nilq/baby-python
|
python
|
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import torchvision
from torchvision import transforms
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
# Open the image
img = Image.open(image_path)
# Resize the image
img = img.resize((256,256))
# Crop the image
img = img.crop((0,0,224,224))
# Get the color channels
img = np.array(img)/255
# Normalize the images
means = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = (img - means) / std
# Transpose the colors
img = img.transpose((2, 0, 1))
return np.array(img)
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
_, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
|
nilq/baby-python
|
python
|
from urllib.parse import urlparse
from wal_e.blobstore import wabs
from wal_e.operator.backup import Backup
class WABSBackup(Backup):
"""
A performs Windows Azure Blob Service uploads to of PostgreSQL WAL files
and clusters
"""
def __init__(self, layout, creds, gpg_key_id):
super(WABSBackup, self).__init__(layout, creds, gpg_key_id)
url_tup = urlparse(layout.prefix)
container_name = url_tup.netloc
self.cinfo = wabs.calling_format.from_store_name(container_name)
from wal_e.worker.wabs import wabs_worker
self.worker = wabs_worker
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------------------------------
# Comparision Operators in Python
# ------------------------------------------------------------------------------------
# So basically there are 6 Comparison Operators in Python , let us learn them one by one .
# Note : One more thing to note is that whenever you compare two "things" , the return value of this Comparision will be either True or False
# 1. "=="(Equal) -> we use this operator to see if two variables are equal.
a = 5
b = 5
print(a == b) # Here since a and b are equal we will get True in output.
# 2. "!="(Not Equal) -> we use this operator to see if two variables are equal or not , if they are not equal it will return True else False.
a = 5
b = 5
print(a != b) # Here since a and b are equal we will get False in output.
# 3. ">"(Greater than ) -> we use this operator to see if one variable is greater than other.
a = 9
b = 5
print(a > b) # Here since a is greater than b , we will get True in output.
# 4. ">="(Greater than or equal to ) -> we use this operator to see if two variables are equal or one is grater than the other.
a = 5
b = 5
print(a >= b) # Here since a and b are equal, we will get True in output.
# 5. "<"(Less than ) -> we use this operator to see if one variable is less than other.
a = 9
b = 5
print(a < b) # Here since a is greater than b , we will get False in output.
# 6. "<="(Less than or equal to ) -> we use this operator to see if two variables are equal or one is less than the other.
a = 3
b = 5
print(a <= b) # Here since a is less than b , we will get True in output.
# ------------------------------------------------------------------------------------
# Challenge for you :
# ------------------------------------------------------------------------------------
# Given a array of numbers . Traverse the array and find out if the number at ith position is less than , greater than or equal to 10.
nums = [1, 3, 10, -7, 8]
# write your code here
|
nilq/baby-python
|
python
|
from game_parser import read_lines
from grid import grid_to_string
from player import Player
from cells import Start, End, Air, Fire, Water, Teleport, Wall
class Game:
def __init__(self, filename):
self.grid = read_lines(filename)
self.position = self.start_position(self.grid)
self.player = Player(self.position[0], self.position[1])
self.move_made =[]
self.prev_row = 0
self.prev_col = 0
def game_move(self, move):
key =['a', 's', 'd', 'w', 'e', 'q', 'A', 'S', 'D', 'W', 'E', 'Q']
if move in key:
if move == 'q' or move =='Q':
return "\n"+"Bye!", 1
elif move == 'e' or move == 'E':
self.move_made.append(move)
return self.grid[self.player.row][self.player.col].step(self)
else:
self.prev_row = self.player.row
self.prev_col = self.player.col
self.player.move(move)
self.move_made.append(move)
# WHEN THE PLAYER STEP OUT OF THE MAZE BOUND
if self.player.row == -1 or self.player.row == len(self.grid) or self.player.col == -1 or self.player.col == len(self.grid[0]):
self.move_made.pop()
self.player.row = self.prev_row
self.player.col = self.prev_col
return 'You walked into a wall. Oof!'+"\n",0
else:
return self.grid[self.player.row][self.player.col].step(self)
else:
return 'Please enter a valid move (w, a, s, d, e, q).'+'\n',0
def start_position(self, grid): #INITIAL POSITION OF PLAYER
pos =[]
i = 0
while i < len(self.grid):
j = 0
while j < len(self.grid[i]):
if type(self.grid[i][j]) == Start:
pos.append(i)
pos.append(j)
j += 1
i += 1
return pos
def print_step(self, ls): #PRINT THE MOVE LIST IN LOWERCASE
line =''
i=0
while i < len(ls):
if i == len(ls)-1:
line += ls[i].lower()
else:
line += '{}, '.format(ls[i].lower())
i+=1
return line
|
nilq/baby-python
|
python
|
# import the necessary packages
import cv2
import math
import numpy as np
# canny edge method, not currently used
def find_edges(image):
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
grey = cv2.GaussianBlur(grey, (17, 17), 0) # blur
grey = cv2.bilateralFilter(grey, 9, 75, 75) # what does this do again?
edged = cv2.Canny(grey, 100, 200, apertureSize=5) # find edges
# Blur, erode, dilate to remove noise. May have to look at net-effect
edged = cv2.GaussianBlur(edged, (17, 17), 0)
kernel = np.ones((3, 3), np.uint8)
edged = cv2.erode(edged, kernel, iterations=1)
edged = cv2.dilate(edged, kernel, iterations=1)
return edged
def get_keypoints_and_descriptors(img, orb, n=3):
#img = find_edges_2(img)
# Gets keypoints from all parts of the image by subdividing it into n*n parts and calculating keypoints separately
keypoints = []
descriptors = []
h,w = img.shape[:2]
for i in range(0,n):
for j in range(0,n):
mask = np.zeros((h, w), dtype = 'uint8')
cv2.rectangle(mask, (i*w//n, j*h//n), ((i+1)*w//n, (j+1)*h//n), 255, cv2.FILLED)
kp, des = orb.detectAndCompute(img, mask)
if kp is not None:
keypoints.extend(kp)
# descriptors need to be in a numpy array
if des is not None:
descriptors.append(des)
return keypoints, np.concatenate(descriptors) if descriptors else None
def get_orb(n=3):
# Initiate ORB detector
return cv2.ORB_create(nfeatures=12000 // (n * n), # find many features
patchSize=31, # granularity
edgeThreshold=1) # edge margin to ignore
def get_flann_index_params():
FLANN_INDEX_LSH = 6
return dict(algorithm=FLANN_INDEX_LSH,
table_number=6, # 6-12
key_size=12, # 12-20
multi_probe_level=1) # 1-2
def reshape(img, shape):
h, w, c = shape
if len(img.shape) == 2 and c == 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if h or w:
h = h or img.shape[0]
w = w or img.shape[1]
img = cv2.resize(img, (w, h))
return img
def resize(img, length):
if not length:
return img
h, w = img.shape[:2]
new_height, new_width = (length, int((length/h)*w)) if h > w else (int((length/w)*h), length)
return cv2.resize(img, (new_width, new_height))
def display(*imgs):
for img in imgs:
cv2.imshow('', resize(img, 650))
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_matching_points(des_template, des_photo):
flann_index_params = get_flann_index_params()
flann_search_params = dict(checks=50) # 50-100
flann_matcher = cv2.FlannBasedMatcher(flann_index_params, flann_search_params)
matches = flann_matcher.knnMatch(des_template, des_photo, k=2)
# Lowe's ratio test, removes matches that are likely to be incorrect
if matches is not None:
lowe_ratio = 0.7 # 0.65-0.8, false negatives-false positives
matches = [m[0] for m in matches if len(m) == 1 or (len(m) >= 2 and m[0].distance < lowe_ratio * m[1].distance)]
return matches
def get_distance(x, y):
return math.hypot(x[0] - y[0], x[1] - y[1])
def find_transform_and_mask(kp_template, kp_photo, matches):
src_pts = np.float32([kp_template[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp_photo[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
# use RANSAC method to discount suspect matches
transform, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return transform, mask
def reverse_transformation(photo, transform, original_shape):
# inverse the transformation to retrieve the original
h, w = original_shape[:2]
try:
inverse = np.linalg.inv(transform)
except np.linalg.linalg.LinAlgError as err:
return None
return cv2.warpPerspective(photo, inverse, (w, h))
def pad_lrud(lrud, padding):
l, r, u, d = lrud
return l - padding, r + padding, u - padding, d + padding
def crop_section(image, lrud):
l, r, u, d = lrud
l, u = max(0, l), max(0, u)
return image[u:d, l:r]
def crop_sections(image, df_with_lrud):
df_with_crops = df_with_lrud.copy()
df_with_crops['crop'] = df_with_crops['lrud'].apply(lambda x: crop_section(image, x))
return df_with_crops
def low_pass_filter(img):
# subtract effect of low-pass filter (convolution with 5x5 Gaussian kernel)
return img + (img - cv2.GaussianBlur(img, (5, 5), 0))
def high_pass_filter(img):
# high-pass filter (convolution with 3x3 kernel that approximates Laplacean)
return cv2.filter2D(img, -1, np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]))
def low_and_high_pass_filter(img):
return high_pass_filter(low_pass_filter(img))
|
nilq/baby-python
|
python
|
from ...doc import *
cudaRT_thread = [
# 4.2. Thread Management [DEPRECATED]
func_decl( [ "cudaThreadExit" ] ),
func_decl( [ "cudaThreadGetCacheConfig" ],
[ parm_def('pCacheConfig', [MEMORY_HOST, SCALAR], INOUT_OUT ) ] ),
func_decl( [ "cudaThreadGetLimit" ],
[ parm_def('pValue', [MEMORY_HOST, SCALAR], INOUT_OUT, { "typepatch" : "size_t *" } ),
parm_def('limit', PASSBYVALUE, INOUT_IN ) ] ),
func_decl( [ "cudaThreadSetCacheConfig" ],
[ parm_def('cacheConfig', PASSBYVALUE, INOUT_IN ) ] ),
func_decl( [ "cudaThreadSetLimit" ],
[ parm_def('limit', PASSBYVALUE, INOUT_IN ),
parm_def('value', PASSBYVALUE, INOUT_IN, { "typepatch" : "size_t" } ) ] ),
func_decl( [ "cudaThreadSynchronize" ] ),
]
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import numpy as np
import itertools
import logging
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_sim.envs.utils.state import ObservableState, FullState
def mlp(input_dim, mlp_dims, last_relu=False):
layers = []
mlp_dims = [input_dim] + mlp_dims # chuan qi lai
for i in range(len(mlp_dims) - 1):
layers.append(nn.Linear(mlp_dims[i], mlp_dims[i + 1]))
if i != len(mlp_dims) - 2 or last_relu:
layers.append(nn.ReLU())
net = nn.Sequential(*layers)
return net
class ValueNetwork(nn.Module):
def __init__(self, input_dim, mlp_dims):
super().__init__()
self.value_network = mlp(input_dim, mlp_dims)
def forward(self, state):
value = self.value_network(state)
return value
class CADRL(Policy):
def __init__(self):
super().__init__()
self.name = 'CADRL'
self.trainable = True
self.multiagent_training = None
self.kinematics = None
self.epsilon = None
self.gamma = None
self.sampling = None
self.speed_samples = None
self.rotation_samples = None
self.query_env = None
self.action_space = None
self.speeds = None
self.rotations = None
self.action_values = None
self.with_om = None
self.cell_num = None
self.cell_size = None
self.om_channel_size = None
self.self_state_dim = 6
self.human_state_dim = 7
self.joint_state_dim = self.self_state_dim + self.human_state_dim
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('cadrl', 'mlp_dims').split(', ')]
self.model = ValueNetwork(self.joint_state_dim, mlp_dims)
self.multiagent_training = config.getboolean('cadrl', 'multiagent_training')
logging.info('Policy: CADRL without occupancy map')
def set_common_parameters(self, config):
self.gamma = config.getfloat('rl', 'gamma')
self.kinematics = config.get('action_space', 'kinematics')
self.sampling = config.get('action_space', 'sampling')
self.speed_samples = config.getint('action_space', 'speed_samples')
self.rotation_samples = config.getint('action_space', 'rotation_samples')
self.query_env = config.getboolean('action_space', 'query_env')
self.cell_num = config.getint('om', 'cell_num')
self.cell_size = config.getfloat('om', 'cell_size')
self.om_channel_size = config.getint('om', 'om_channel_size')
def set_device(self, device):
self.device = device
self.model.to(device)
def set_epsilon(self, epsilon):
self.epsilon = epsilon
def build_action_space(self, v_pref):
"""
Action space consists of 25 uniformly sampled actions in permitted range and 25 randomly sampled actions.
"""
holonomic = True if self.kinematics == 'holonomic' else False
speeds = [(np.exp((i + 1) / self.speed_samples) - 1) / (np.e - 1) * v_pref for i in range(self.speed_samples)]
if holonomic:
rotations = np.linspace(0, 2 * np.pi, self.rotation_samples, endpoint=False)
else:
rotations = np.linspace(-np.pi / 4, np.pi / 4, self.rotation_samples)
action_space = [ActionXY(0, 0) if holonomic else ActionRot(0, 0)]
for rotation, speed in itertools.product(rotations, speeds):
if holonomic:
action_space.append(ActionXY(speed * np.cos(rotation), speed * np.sin(rotation)))
else:
action_space.append(ActionRot(speed, rotation))
self.speeds = speeds
self.rotations = rotations
self.action_space = action_space
def propagate(self, state, action):
if isinstance(state, ObservableState):
# propagate state of humans
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = ObservableState(next_px, next_py, action.vx, action.vy, state.radius)
elif isinstance(state, FullState):
# propagate state of current agent
# perform action without rotation
if self.kinematics == 'holonomic':
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = FullState(next_px, next_py, action.vx, action.vy, state.radius,
state.gx, state.gy, state.v_pref, state.theta)
else:
next_theta = state.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
next_px = state.px + next_vx * self.time_step
next_py = state.py + next_vy * self.time_step
next_state = FullState(next_px, next_py, next_vx, next_vy, state.radius, state.gx, state.gy,
state.v_pref, next_theta)
else:
raise ValueError('Type error')
return next_state
def predict(self, state):
"""
Input state is the joint state of robot concatenated by the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_min_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
ob, reward, done, info = self.env.onestep_lookahead(action)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in ob], dim=0)
# VALUE UPDATE
outputs = self.model(self.rotate(batch_next_states))
min_output, min_index = torch.min(outputs, 0)
min_value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * min_output.data.item()
self.action_values.append(min_value)
if min_value > max_min_value:
max_min_value = min_value
max_action = action
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def transform(self, state):
"""
Take the state passed from agent and transform it to tensor for batch training
:param state:
:return: tensor of shape (len(state), )
"""
assert len(state.human_states) == 1
state = torch.Tensor(state.self_state + state.human_states[0]).to(self.device)
state = self.rotate(state.unsqueeze(0)).squeeze(dim=0)
return state
def rotate(self, state):
"""
Transform the coordinate to agent-centric. x axis: position -> goal
Input state tensor is of size (batch_size, state_length)
"""
# 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
batch = state.shape[0]
dx = (state[:, 5] - state[:, 0]).reshape((batch, -1)) # -1 means calculated automatically
dy = (state[:, 6] - state[:, 1]).reshape((batch, -1))
rot = torch.atan2(state[:, 6] - state[:, 1], state[:, 5] - state[:, 0])
dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)
v_pref = state[:, 7].reshape((batch, -1))
vx = (state[:, 2] * torch.cos(rot) + state[:, 3] * torch.sin(rot)).reshape((batch, -1))
vy = (state[:, 3] * torch.cos(rot) - state[:, 2] * torch.sin(rot)).reshape((batch, -1))
radius = state[:, 4].reshape((batch, -1))
if self.kinematics == 'unicycle':
theta = (state[:, 8] - rot).reshape((batch, -1))
else:
# set theta to be zero since it's not used
theta = torch.zeros_like(v_pref)
vx1 = (state[:, 11] * torch.cos(rot) + state[:, 12] * torch.sin(rot)).reshape((batch, -1))
vy1 = (state[:, 12] * torch.cos(rot) - state[:, 11] * torch.sin(rot)).reshape((batch, -1))
px1 = (state[:, 9] - state[:, 0]) * torch.cos(rot) + (state[:, 10] - state[:, 1]) * torch.sin(rot)
px1 = px1.reshape((batch, -1))
py1 = (state[:, 10] - state[:, 1]) * torch.cos(rot) - (state[:, 9] - state[:, 0]) * torch.sin(rot)
py1 = py1.reshape((batch, -1))
radius1 = state[:, 13].reshape((batch, -1))
radius_sum = radius + radius1
da = torch.norm(torch.cat([(state[:, 0] - state[:, 9]).reshape((batch, -1)), (state[:, 1] - state[:, 10]).
reshape((batch, -1))], dim=1), 2, dim=1, keepdim=True)
new_state = torch.cat([dg, v_pref, theta, radius, vx, vy, px1, py1, vx1, vy1, radius1, da, radius_sum], dim=1)
return new_state
|
nilq/baby-python
|
python
|
from snovault import upgrade_step
@upgrade_step('award', '', '2')
def award_0_2(value, system):
# http://redmine.encodedcc.org/issues/1295
# http://redmine.encodedcc.org/issues/1307
rfa_mapping = ['ENCODE2', 'ENCODE2-Mouse']
if value['rfa'] in rfa_mapping:
value['status'] = 'disabled'
else:
value['status'] = 'current'
# http://encode.stanford.edu/issues/1022
if 'url' in value:
if value['url'] == '':
del value['url']
@upgrade_step('award', '2', '3')
def award_2_3(value, system):
# http://redmine.encodedcc.org/issues/4743
if value['viewing_group'] == 'ENCODE':
value['viewing_group'] = 'ENCODE3'
@upgrade_step('award', '5', '6')
def award_5_6(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4682
if 'title' not in value:
value['title'] = value['name']
@upgrade_step('award', '6', '7')
def award_6_7(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4711
for milestone in value.get('milestones', []):
assay_term_name = milestone.get('assay_term_name', '')
if assay_term_name == 'single-nuclei ATAC-seq':
milestone['assay_term_name'] = 'single-nucleus ATAC-seq'
|
nilq/baby-python
|
python
|
# example based on code from numpy library:
# https://github.com/numpy/numpy/blob/master/numpy/matlib.py
# https://github.com/numpy/numpy/blob/master/numpy/fft/fftpack.c
def ones(shape, dtype=None, order='C'):
# ...
static void radb3(int ido, int l1, const Treal cc[], Treal ch[],
const Treal wa1[], const Treal wa2[])
{
// ...
}
'''
Crucial difference is that first is public, second is private.
Public API is for people who may not know how to use library,
or are new to numerics. The function has specific purpose,
return value and argument types, following certain convention.
This can and should be explained shortly.
Then an illustrative example can be given.
Private implementation details of fft are addressed to a handful
of developers - experts who can do it accurately and in a performant manner.
These details can change any time without users noticing.
In order to explain the second one, part of numeric algorithms textbook
would need to be put into the source code. But it wouldn't serve any purpose:
people working with such problems can associate name "radb3" with
mixed-radix FFT of factor 3 and find relevant explanation in more suitable
place than source code.
'''
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
# ...
// no documentation at all!
static void radb3(int ido, int l1, const Treal cc[], Treal ch[],
const Treal wa1[], const Treal wa2[])
{
// ...
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
import datetime
from collections import defaultdict
import pytz
from celery.task import periodic_task, task
from celery.schedules import crontab
from apps.api.modules.bkdata_databus import BkDataDatabusApi
from apps.log_databus.models import CollectorConfig
from apps.log_databus.handlers.collector import CollectorHandler
from apps.api import TransferApi, BkLogApi
from apps.log_databus.constants import (
STORAGE_CLUSTER_TYPE,
REGISTERED_SYSTEM_DEFAULT,
CollectItsmStatus,
)
from apps.feature_toggle.plugins.constants import FEATURE_BKDATA_DATAID
from apps.log_measure.handlers.elastic import ElasticHandle
from apps.utils.log import logger
from apps.log_databus.models import StorageUsed
from apps.feature_toggle.handlers.toggle import FeatureToggleObject
@task(ignore_result=True)
def shutdown_collector_warm_storage_config(cluster_id):
"""异步关闭冷热集群的采集项"""
result_table_list = []
for collector in CollectorConfig.objects.all():
if not collector.table_id:
continue
result_table_list.append(collector.table_id)
if not result_table_list:
return
cluster_infos = CollectorHandler.bulk_cluster_infos(result_table_list=result_table_list)
for collector in CollectorConfig.objects.all():
try:
if not collector.table_id:
continue
cluster_info = cluster_infos.get(collector.table_id)
if not cluster_info:
continue
if cluster_info["cluster_config"]["cluster_id"] != cluster_id:
continue
TransferApi.modify_result_table(
{
"table_id": collector.table_id,
"default_storage": "elasticsearch",
"default_storage_config": {
"warm_phase_days": 0,
},
}
)
except Exception as e:
logger.error("refresh collector storage config error", e)
continue
@periodic_task(run_every=crontab(minute="0", hour="1"))
def collector_status():
"""
检测采集项:24小时未入库自动停止
:return:
"""
# 筛选24小时未入库的采集项
day_ago = datetime.datetime.now(pytz.timezone("UTC")) - datetime.timedelta(days=1)
collector_configs = CollectorConfig.objects.filter(table_id=None, is_active=True, created_at__lt=day_ago).exclude(
itsm_ticket_status=CollectItsmStatus.APPLYING
)
# 停止采集项
for _collector in collector_configs:
if (
FeatureToggleObject.switch(FEATURE_BKDATA_DATAID)
and _collector.bkdata_data_id
and BkDataDatabusApi.get_cleans(params={"raw_data_id": _collector.bkdata_data_id})
):
continue
CollectorHandler(collector_config_id=_collector.collector_config_id).stop()
@periodic_task(run_every=crontab(minute="0"))
def sync_storage_capacity():
"""
每小时同步业务各集群已用容量
:return:
"""
# 1、获取已有采集项业务
business_list = CollectorConfig.objects.all().values("bk_biz_id").distinct()
# 2、获取所有集群
params = {"cluster_type": STORAGE_CLUSTER_TYPE}
cluster_obj = TransferApi.get_cluster_info(params)
from apps.log_search.models import LogIndexSet
cluster_biz_cnt_map = defaultdict(lambda: defaultdict(int))
for index_set in LogIndexSet.objects.all():
cluster_biz_cnt_map[index_set.storage_cluster_id][index_set.project_id] += 1
for _cluster in cluster_obj:
try:
usage, total = get_storage_usage_and_all(_cluster["cluster_config"]["cluster_id"])
index_count = LogIndexSet.objects.filter(
storage_cluster_id=_cluster["cluster_config"]["cluster_id"]
).count()
StorageUsed.objects.update_or_create(
bk_biz_id=0,
storage_cluster_id=_cluster["cluster_config"]["cluster_id"],
defaults={
"storage_used": 0,
"storage_total": total,
"storage_usage": usage,
"index_count": index_count,
"biz_count": len(cluster_biz_cnt_map.get(_cluster["cluster_config"]["cluster_id"], {}).keys()),
},
)
# 2-1公共集群:所有业务都需要查询
if _cluster["cluster_config"].get("registered_system") == REGISTERED_SYSTEM_DEFAULT:
for _business in business_list:
storage_used = get_biz_storage_capacity(_business["bk_biz_id"], _cluster)
StorageUsed.objects.update_or_create(
bk_biz_id=_business["bk_biz_id"],
storage_cluster_id=_cluster["cluster_config"]["cluster_id"],
defaults={"storage_used": storage_used},
)
# 2-2第三方集群:只需查询指定业务
else:
bk_biz_id = _cluster["cluster_config"].get("custom_option", {}).get("bk_biz_id")
if not bk_biz_id:
continue
storage_used = get_biz_storage_capacity(bk_biz_id, _cluster)
StorageUsed.objects.update_or_create(
bk_biz_id=bk_biz_id,
storage_cluster_id=_cluster["cluster_config"]["cluster_id"],
defaults={"storage_used": storage_used},
)
except Exception as e:
logger.exception("sync_storage_info error: %s" % e)
def query(cluster_id):
def get(url):
try:
return BkLogApi.es_route(
{
"scenario_id": "es",
"storage_cluster_id": cluster_id,
"url": url,
}
)
except Exception as e: # pylint:disable=broad-except
logger.exception(f"request es info error {e}")
return None
return get
def get_storage_usage_and_all(cluster_id):
storage_config = query(cluster_id)("_cat/allocation?bytes=b")
usage = 0
total = 0
if not storage_config:
return usage, total
for _storage in storage_config:
total += int(_storage.get("disk.total") or 0)
usage += int(_storage.get("disk.used") or 0)
return int((usage / total) * 100), total
def get_biz_storage_capacity(bk_biz_id, cluster):
# 集群信息
cluster_config = cluster["cluster_config"]
domain_name = cluster_config["domain_name"]
port = cluster_config["port"]
auth_info = cluster.get("auth_info", {})
username = auth_info.get("username")
password = auth_info.get("password")
index_format = f"{bk_biz_id}_bklog_*"
# 索引信息
try:
indices_info = ElasticHandle(domain_name, port, username, password).get_indices_cat(
index=index_format, bytes="mb", column=["index", "store.size", "status"]
)
except Exception as e: # pylint: disable=broad-except
logger.exception(f"集群[{domain_name}] 索引cat信息获取失败,错误信息:{e}")
return 0
# 汇总容量
total_size = 0
for _info in indices_info:
if _info["status"] == "close":
continue
total_size += int(_info["store.size"])
return round(total_size / 1024, 2)
|
nilq/baby-python
|
python
|
#codeing utf-8
class Solution(object):
def longest_common_subsequence(self,s):
lens = len(s)
if lens <= 1 :
return s
sleft,sright = 0,0
dp=[[0 for i in range(lens)] for j in range(lens)]
for i in range(1,lens):
dp[i][i] = True
dp[i][i-1] = True
for i in range(2,lens):
for j in range(0,lens-i):
if s[j] == s[j+i-1] and dp[j+1][j+i-2] :
dp[j][j+i-1] = True
if sright-sleft+1 < i:
sleft = j
sright = j+i-1
return s[sleft:sright-sleft+1]
a=Solution()
print (a.longest_common_subsequence('abccbaaa'))
|
nilq/baby-python
|
python
|
#!/bin/python3
import sys
def isBalanced(s):
# Complete this function
str_len = len(s)
if((str_len < 2) or (str_len % 2 != 0)):
return "NO"
brackets_map = {')':'(', ']':'[', '}':'{'}
open_brckts = {'{','(','['}
stack = list()
bracket = ""
is_balanced = True
for i in range(str_len):
bracket = s[i]
if(bracket in open_brckts):
stack.append(bracket)
else:
last_open = brackets_map[bracket]
if(not stack):
return "NO"
if(last_open != stack.pop()):
is_balanced = False
break
if(is_balanced and not stack):
return "YES"
else:
return "NO"
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
s = input().strip()
result = isBalanced(s)
print(result)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 13 16:31:11 2014
@author: jc3e13
This module contains functions for investigating internal gravity waves.
All functions take angular frequency and wavenumber. Angular wavenumber is 2 pi
divided by the wavelength and angular frequency is 2 pi divided by the period.
"""
import numpy as np
def omega(N, k, m, l=0., f=0.):
"""Dispersion relation for an internal gravity wave in a continuously
stratified fluid. Make sure inputs are in angular units of radians per
metre or radians per second.
(Gill 1980)
Parameters
----------
N : ndarray
Buoyancy frequency [rad s-1]
k : ndarray
Horizontal wavenumber (x) [rad m-1]
m : ndarray
Vertical wavenumber (z) [rad m-1]
l : ndarray, optional
Horizontal wavenumber (y) [rad m-1]
f : ndarray, optional
Coriolis parameter [rad s-1]
Returns
-------
omega : ndarray
Frequency [rad s-1]
"""
N2 = N**2
k2 = k**2
m2 = m**2
l2 = l**2
f2 = f**2
return np.sqrt((f2*m2 + N2*(k2 + l2))/(k2 + l2 + m2))
def U_0(phi_0, k, l, om, f):
"""Zonal velocity amplitude. Wavenumber and frequency should be in angular
units."""
return ((k*om + 1j*l*f)/(om**2 - f**2))*phi_0
def V_0(phi_0, k, l, om, f):
"""Meridional velocity amplitude. Wavenumber and frequency should be in
angular units."""
return ((l*om - 1j*k*f)/(om**2 - f**2))*phi_0
def W_0(phi_0, m, om, N):
"""Vertical velocity amplitude. Wavenumber and frequency should be in
angular units."""
return (-om*m/(N**2 - om**2))*phi_0
def B_0(phi_0, m, om, N):
"""Buoyancy perturbation amplitude. Wavenumber and frequency should be in
angular units."""
return (1j*m*N**2/(N**2 - om**2))*phi_0
def ETA_0(phi_0, m, om, N):
"""Isopycnal displacement amplitude. Wavenumber and frequency should be in
angular units."""
return phi_0*1j*m/(N**2 - om**2)
def RHO_0(phi_0, m, om, N, g=-9.81, rho_0=1000.):
"""Density perturbation amplitude. Wavenumber and frequency should be in
angular units."""
return -B_0(phi_0, m, om, N)*rho_0/g
def wave_phase(x, y, z, t, k, l, m, om, U=0., V=0., W=0., phase_0=0.):
"""Phase of complex exponential equal to:
k * x - (om + k * U) t + phase_0
where k is the wavevector, x the position vector, om the frequency, t is
time, U is the mean flow vector (k * U is the doppler factor) and
phase_0 is an arbitrary phase offset.
"""
return 1j*(k*x + l*y + m*z - (om + k*U + l*V + m*W)*t + phase_0)
def phi(x, y, z, t, phi_0, k, l, m, om, U=0., V=0., W=0., phase_0=0.):
"""Pressure pertubation."""
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(phi_0*np.exp(phase))
def u(x, y, z, t, phi_0, k, l, m, om, f=0., U=0., V=0., W=0., phase_0=0.):
"""Zonal velocity pertubation."""
amplitude = U_0(phi_0, k, l, om, f)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(amplitude*np.exp(phase))
def v(x, y, z, t, phi_0, k, l, m, om, f=0., U=0., V=0., W=0., phase_0=0.):
"""Meridional velocity pertubation."""
amplitude = V_0(phi_0, k, l, om, f)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(amplitude*np.exp(phase))
def w(x, y, z, t, phi_0, k, l, m, om, N, U=0., V=0., W=0., phase_0=0.):
"""Vertical velocity pertubation."""
amplitude = W_0(phi_0, m, om, N)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(amplitude*np.exp(phase))
def b(x, y, z, t, phi_0, k, l, m, om, N, U=0., V=0., W=0., phase_0=0.):
"""Buoyancy pertubation."""
amplitude = B_0(phi_0, m, om, N)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(amplitude*np.exp(phase))
def rho(x, y, z, t, phi_0, k, l, m, om, N, U=0., V=0., W=0., phase_0=0.,
g=-9.81, rho_0=1000.):
"""Density pertubation."""
amplitude = RHO_0(phi_0, m, om, N, g, rho_0)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(amplitude*np.exp(phase))
def eta(x, y, z, t, phi_0, k, l, m, om, N, U=0., V=0., W=0., phase_0=0.):
"""Vertical displacement of isopycnals."""
amplitude = ETA_0(phi_0, m, om, N)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(amplitude*np.exp(phase))
def wave_vel(r, t, phi_0, N, f, k, l, m, om, U=0., V=0., W=0., phase_0=0.):
"""Wave velocity, accepts position stack and returns velocity stack."""
x = r[..., 0]
y = r[..., 1]
z = r[..., 2]
u_amp = U_0(phi_0, k, l, om, f)
v_amp = V_0(phi_0, k, l, om, f)
w_amp = W_0(phi_0, m, om, N)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
u = np.real(u_amp*np.exp(phase))
v = np.real(v_amp*np.exp(phase))
w = np.real(w_amp*np.exp(phase))
return (np.vstack((u, v, w))).T
def buoy(r, t, phi_0, N, k, l, m, om, U=0., V=0., W=0., phase_0=0.):
"""Wave buoyancy, accepts position stack and returns buoyancy array."""
x = r[..., 0]
y = r[..., 1]
z = r[..., 2]
b_amp = B_0(phi_0, m, om, N)
phase = wave_phase(x, y, z, t, k, l, m, om, U=U, V=V, W=W, phase_0=phase_0)
return np.real(b_amp*np.exp(phase))
def cgz(k, m, N, l=0., f=0.):
"""Vertical component of group velocity."""
num = -m*(N**2 - f**2)*(k**2 + l**2)
den = (k**2 + l**2 + m**2)**1.5 * (f**2*m**2 + N**2*(k**2 + l**2))**0.5
return num/den
def cgvec(k, l, m, N, f):
"""Group velocity vector."""
om = omega(N, k, m, l, f)
phi = phip(k, m, l)
lamb = lambdap(k, l)
mag = np.cos(phi)*np.sin(phi)*(N**2 - f**2)/(om*np.sqrt(k**2 + l**2 + m**2))
dir = np.array([np.sin(phi)*np.cos(lamb), np.sin(phi)*np.sin(lamb), -np.cos(phi)])
return mag*dir
def phip(k, m, l=0.):
"""Angle between the wavevector and the horizontal."""
return np.arcsin(np.sqrt(m**2/(k**2 + l**2 + m**2)))
def lambdap(k, l):
"""Azimuthal angle of the wave vector."""
return np.arctan2(l, k)
def alpha(k, m, l=0.):
"""Ratio of vertical to horizontal wavenumber."""
return np.sqrt((k**2 + l**2)/m**2)
def Edens(w_0, k, m, l=0., rho_0=1025.):
"""Energy density."""
phi = phip(k, m, l=l)
return 0.5*rho_0*(w_0/np.cos(phi))**2
def Efluxz(w_0, k, m, N, l=0., f=0., rho_0=1025.):
"""Vertical energy flux in frame of reference moving with the wave."""
return Edens(w_0, k, m, l=l, rho_0=rho_0)*cgz(k, m, N, l=l, f=f)
def Mfluxz(phi_0, k, l, m, om, N, f=0., rho_0=1025.):
"""Absolute vertical flux of horizontal momentum.
Warning: I believe this is incorrect for small aspect ratio."""
u_amp = np.abs(U_0(phi_0, k, l, om, f))
v_amp = np.abs(V_0(phi_0, k, l, om, f))
w_amp = np.abs(W_0(phi_0, m, om, N))
return 0.5*rho_0*np.sqrt(((u_amp*w_amp)**2 + (v_amp*w_amp)**2))
|
nilq/baby-python
|
python
|
import json
import random
from copy import deepcopy
from datetime import datetime
from flask import Markup
from flask import Response, current_app, flash, redirect, url_for
from flask_admin.actions import action
from quokka.utils.text import slugify
class PublishAction(object):
@action(
'toggle_publish',
'Publish/Unpublish',
'Publish/Unpublish?'
)
def action_toggle_publish(self, ids):
for _id in ids:
model = current_app.db.get_with_content(_id=_id)
model['published'] = not model['published']
# fires the versioning and hooks
self._on_model_change(None, model, False)
pk = self.get_pk_value(model)
self.coll.update({'_id': pk}, model)
# more hooks
self.after_model_change(None, model, False)
flash(
f'{len(ids)} items were successfully published/Unpublished.',
'success'
)
class CloneAction(object):
@action(
'clone_item',
'Create a copy',
'Are you sure you want a copy?'
)
def action_clone_item(self, ids):
if len(ids) > 1:
flash(
"You can select only one item for this action",
'error'
)
return
model = current_app.db.get_with_content(_id=ids[0])
clone = deepcopy(model)
del clone['_id']
clone['slug'] = f'{clone["slug"]}-{random.getrandbits(32)}'
clone['_isclone'] = True
self._on_model_change(None, clone, True)
self.coll.insert(clone)
self.after_model_change(None, clone, True)
return redirect(url_for('.edit_view', id=clone['_id']))
class UserProfileBlockAction(object):
@action(
'create_userprofile',
'Create user profile block',
'Are you sure you want to create user profile block?'
)
def action_create_userprofile(self, ids):
for _id in ids:
user = current_app.db.users.find_one({'_id': _id})
if not user.get('fullname'):
user['fullname'] = user['username']
current_app.db.users.update_one(
{'_id': user['_id']}, {'fullname': user['fullname']}
)
# This update looks like having a cache
# self.coll.update_one(
# {'_id': _id}, {'fullname': user['fullname']}
# )
fullslug = slugify(user['fullname'])
existing_block = current_app.db.get(
'index', {'content_type': 'block', 'slug': fullslug}
)
if existing_block:
blocklink = url_for(
'quokka.core.content.admin.blockview.edit_view',
id=existing_block['_id']
)
flash(Markup(
f'Profile block for {user["username"]} '
f'already exists at: '
f'<a href="{blocklink}">{existing_block["_id"]}</a>'
))
else:
# TODO: move creation logic to a model like obj
new_data = {
'title': user['fullname'],
'slug': fullslug,
'summary': f'Profile page for {user["username"]}',
'published': True,
'comments': False,
'content_type': 'block',
'version': 0,
'date': datetime.now(),
'modified': datetime.now(),
'language': 'en',
'content_format': 'markdown',
'created_by': 'admin',
'modified_by': 'admin',
'category': '',
'category_slug': '',
'custom_vars': [
{'key': 'profile_title',
'value': f'@note change this field to customize html page title'}, # noqa
{'key': 'twitter',
'value': f'@note Fill this field with user twitter profile e.g: http://twitter.com/{user["username"]}'}, # noqa
{'key': 'facebook',
'value': f'@note Fill this field with user facebook profile e.g: http://facebook.com/{user["username"]}'}, # noqa
{'key': 'pinterest',
'value': f'@note Fill this field with user pinterest profile e.g: http://pinterest.com/{user["username"]}'}, # noqa
{'key': 'github',
'value': f'@note Fill this field with user github profile e.g http://github.com/{user["username"]}'}, # noqa
{'key': 'aboutme',
'value': f'@note Fill this field with user about.me profile e.g: http://aboutme.com/{user["username"]}'}, # noqa
{'key': 'instagram',
'value': f'@note Fill this field with user instagram profile e.g: http://instagram.com/{user["username"]}'}, # noqa
{'key': 'site',
'value': '@note Fill this field with user website link'}, # noqa
{'key': 'banner_color', 'value': '@note Fill this field with a color code or name e.g: #ffcc00 or yellow'}, # noqa
{'key': 'banner_image', 'value': '@note Fill this field witha banner image url e.g: http://site.com/image.jpg'}, # noqa
{'key': 'gravatar_email', 'value': '@note Fill this field with gravatar registered email e.g: user@site.com'}, # noqa
{'key': 'author_avatar', 'value': '@note Fill this field with an absolute url to a profile image e.g: http://site.com/image.png'}, # noqa
],
'quokka_create_form_class': 'FormMeta',
'quokka_create_form_module': 'quokka.core.content.formats',
'quokka_format_class': 'MarkdownFormat',
'quokka_format_module': 'quokka.core.content.formats',
'quokka_module': 'quokka.core.content.admin',
'tags_slug': None,
'block_items': [],
'authors_slug': None,
}
new = current_app.db.insert('index', new_data)
new_data['_id'] = new.inserted_id
current_app.db.push_content(new_data)
newlink = url_for(
'quokka.core.content.admin.blockview.edit_view',
id=new.inserted_id
)
flash(Markup(
f'Profile block for {user["username"]} '
f'Created at: '
f'<a href="{newlink}">{new.inserted_id}</a>'
))
# TODO: Serialize and activate this action
class ExportAction(object):
@action('export_to_json', 'Export as json')
def export_to_json(self, ids):
qs = self.model.objects(id__in=ids)
return Response(
qs.to_json(),
mimetype="text/json",
headers={
"Content-Disposition":
"attachment;filename=%s.json" % self.model.__name__.lower()
}
)
@action('export_to_csv', 'Export as csv')
def export_to_csv(self, ids):
qs = json.loads(self.model.objects(id__in=ids).to_json())
def generate():
yield ','.join(list(max(qs, key=lambda x: len(x)).keys())) + '\n'
for item in qs:
yield ','.join([str(i) for i in list(item.values())]) + '\n'
return Response(
generate(),
mimetype="text/csv",
headers={
"Content-Disposition":
"attachment;filename=%s.csv" % self.model.__name__.lower()
}
)
|
nilq/baby-python
|
python
|
import json
from dmscreen.data.data_loader import get_class_id
def ParseData():
f = open('dmscreen/data/allSpells.json',)
data = json.load(f)
id_ = 0
for i in data["allSpells"]:
classes = []
for s in i["classes"].split(", "):
classes.append(get_class_id(s))
i["classes"] = classes
i["id"] = id_
id_ += 1
f2 = open('dmscreen/data/allSpells-new.json', 'w')
json.dump(data, f2, indent=4)
f.close()
|
nilq/baby-python
|
python
|
# BOT TOKEN
TOKEN = ""
|
nilq/baby-python
|
python
|
import urllib.request
import zipfile
from os import remove, rename, listdir, path
from shutil import rmtree
import re
INVALID = re.compile(r'-.*', re.MULTILINE)
def download_deps(deps, libFolder):
for zipUrl in deps:
print('downloading from '+zipUrl)
urllib.request.urlretrieve(zipUrl, 'temp.zip')
zipFile = zipfile.ZipFile('temp.zip')
zipFile.extractall(libFolder)
zipFile.close()
try:
remove('temp.zip')
except:
print('no dependencies to download')
for folder in listdir(libFolder):
folderPath = path.join(libFolder, folder)
if path.isdir(folderPath):
newName = re.sub(INVALID, '', folder)
try:
rename(folderPath, path.join(libFolder, newName))
except:
print('duplicate found, deleting old one')
rmtree(path.join(libFolder, newName))
rename(folderPath, path.join(libFolder, newName))
print('renamed '+folder+' to '+newName)
return
|
nilq/baby-python
|
python
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.common.services.loadbalancer import octavia
from rally_openstack.task import scenario
class OctaviaBase(scenario.OpenStackScenario):
"""Base class for Octavia scenarios with basic atomic actions."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(OctaviaBase, self).__init__(context, admin_clients, clients)
if hasattr(self, "_admin_clients"):
self.admin_octavia = octavia.Octavia(
self._admin_clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
if hasattr(self, "_clients"):
self.octavia = octavia.Octavia(
self._clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
|
nilq/baby-python
|
python
|
from django.views.generic.list import ListView
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect, render
from oppgavegen.models import Set, Chapter, Level
from oppgavegen.views.mixins import LoginRequiredMixin
from oppgavegen.view_logic.current_work import set_current_set, set_current_chapter, set_current_level
from django.contrib.sites.models import Site
class UserSetListView(LoginRequiredMixin, ListView):
template_name = 'sets/user_set_list.html'
def get_queryset(self):
return Set.objects.filter(editor=self.request.user)
class SetChapterListView(LoginRequiredMixin, ListView):
"""List Chapters in Set"""
template_name = 'sets/set_chapter_list.html'
def get_queryset(self):
chapters = []
self.set = get_object_or_404(Set, id=self.args[0])
order = self.set.order
for x in order.split(','): # get chapters in chapterlist in order
for chapter in self.set.chapters.all():
if chapter.pk == int(x):
chapters.append(chapter)
break
return chapters
def get_context_data(self, **kwargs):
context = super(SetChapterListView, self).get_context_data(**kwargs)
context['set'] = self.set
context['site'] = Site.objects.last()
set_current_set(self.request.user, self.set)
return context
class ChapterLevelsListView(LoginRequiredMixin, ListView):
"""List levels in chapter"""
template_name = 'sets/chapter_level_list.html'
def get_queryset(self):
levels = []
self.chapter = get_object_or_404(Chapter, id=self.args[0])
order = self.chapter.order
for x in order.split(','):
for level in self.chapter.levels.all():
if level.pk == int(x):
levels.append(level)
break
return levels
def get_context_data(self, **kwargs):
context = super(ChapterLevelsListView, self).get_context_data(**kwargs)
context['chapter'] = self.chapter
set_current_chapter(self.request.user, self.chapter)
return context
@login_required
def set_public(request, set_id):
""" Set a private or new set to be public (listed on the front page) """
set = Set.objects.get(pk=set_id)
if set.editor == request.user:
set.is_public = True
set.save()
return redirect('chapters_by_set', set_id)
else:
return redirect('index')
@login_required
def set_private(request, set_id):
""" Set a public set to be private (not listed on the front page) """
set = Set.objects.get(pk=set_id)
if set.editor == request.user:
set.is_public = False
set.save()
return redirect('chapters_by_set', set_id)
else:
return redirect('index')
class LevelsTemplatesListView(LoginRequiredMixin, ListView):
"""List templates in level"""
template_name = 'sets/level_template_list.html'
def get_queryset(self):
self.level = get_object_or_404(Level, id=self.args[0])
return self.level.templates.all()
def get_context_data(self, **kwargs):
context = super(LevelsTemplatesListView, self).get_context_data(**kwargs)
context['level'] = self.level
set_current_level(self.request.user, self.level)
context['k_factor'] = self.level.k_factor
context['k_factor_template'] = self.level.k_factor_template
return context
@login_required
def set_students_admin(request, set_id):
# get an editable list of students in a set
set = Set.objects.get(id=set_id)
if set.editor == request.user and set.is_requirement:
students = set.users.all().order_by('last_name')
goto = render(request, 'sets/set_students_admin.html', {'set': set, 'students': students})
else:
goto = redirect('index')
return goto
|
nilq/baby-python
|
python
|
import argparse
import fsspec
import fv3config
def _parse_write_run_directory_args():
parser = argparse.ArgumentParser("write_run_directory")
parser.add_argument(
"config", help="URI to fv3config yaml file. Supports any path used by fsspec."
)
parser.add_argument(
"rundir", help="Desired output directory. Must be a local directory"
)
return parser.parse_args()
def _parse_enable_restart_args():
parser = argparse.ArgumentParser("enable_restart")
parser.add_argument(
"config",
help="URI to fv3config yaml file. Supports any path used by fsspec. "
"File will be modified in place.",
)
parser.add_argument(
"initial_conditions", help="Path to restart initial conditions.",
)
return parser.parse_args()
def _parse_enable_nudging_args():
parser = argparse.ArgumentParser("enable_nudging")
parser.add_argument(
"config",
help="URI to fv3config yaml file. Supports any path used by fsspec. "
"File will be modified in place.",
)
return parser.parse_args()
def write_run_directory():
args = _parse_write_run_directory_args()
with fsspec.open(args.config) as f:
config = fv3config.load(f)
fv3config.write_run_directory(config, args.rundir)
def enable_restart():
args = _parse_enable_restart_args()
with fsspec.open(args.config) as f:
config = fv3config.load(f)
restart_config = fv3config.enable_restart(config, args.initial_conditions)
with fsspec.open(args.config, mode="w") as f:
fv3config.dump(restart_config, f)
def enable_nudging():
args = _parse_enable_nudging_args()
with fsspec.open(args.config) as f:
config = fv3config.load(f)
# only update config if nudging is turned on
if config["namelist"]["fv_core_nml"].get("nudge", False):
updated_config = fv3config.enable_nudging(config)
with fsspec.open(args.config, mode="w") as f:
fv3config.dump(updated_config, f)
|
nilq/baby-python
|
python
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FormList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the FormList
:param Version version: Version that contains the resource
:returns: twilio.rest.verify.v2.form.FormList
:rtype: twilio.rest.verify.v2.form.FormList
"""
super(FormList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, form_type):
"""
Constructs a FormContext
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
return FormContext(self._version, form_type=form_type, )
def __call__(self, form_type):
"""
Constructs a FormContext
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
return FormContext(self._version, form_type=form_type, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FormList>'
class FormPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the FormPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.verify.v2.form.FormPage
:rtype: twilio.rest.verify.v2.form.FormPage
"""
super(FormPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FormInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.verify.v2.form.FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
return FormInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FormPage>'
class FormContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, form_type):
"""
Initialize the FormContext
:param Version version: Version that contains the resource
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
super(FormContext, self).__init__(version)
# Path Solution
self._solution = {'form_type': form_type, }
self._uri = '/Forms/{form_type}'.format(**self._solution)
def fetch(self):
"""
Fetch the FormInstance
:returns: The fetched FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return FormInstance(self._version, payload, form_type=self._solution['form_type'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FormContext {}>'.format(context)
class FormInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class FormTypes(object):
FORM_PUSH = "form-push"
def __init__(self, version, payload, form_type=None):
"""
Initialize the FormInstance
:returns: twilio.rest.verify.v2.form.FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
super(FormInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'form_type': payload.get('form_type'),
'forms': payload.get('forms'),
'form_meta': payload.get('form_meta'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'form_type': form_type or self._properties['form_type'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FormContext for this FormInstance
:rtype: twilio.rest.verify.v2.form.FormContext
"""
if self._context is None:
self._context = FormContext(self._version, form_type=self._solution['form_type'], )
return self._context
@property
def form_type(self):
"""
:returns: The Type of this Form
:rtype: FormInstance.FormTypes
"""
return self._properties['form_type']
@property
def forms(self):
"""
:returns: Object that contains the available forms for this type.
:rtype: dict
"""
return self._properties['forms']
@property
def form_meta(self):
"""
:returns: Additional information for the available forms for this type.
:rtype: dict
"""
return self._properties['form_meta']
@property
def url(self):
"""
:returns: The URL to access the forms for this type.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the FormInstance
:returns: The fetched FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FormInstance {}>'.format(context)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import rospy
# from vanttec_uuv.msg import ThrustControl
from geometry_msgs.msg import Twist
thrust_pub = rospy.Publisher("/uuv_desired_velocity", Twist, queue_size=1000)
def remap_vel(src_vel):
des_vel = Twist()
des_vel.linear.x = src_vel.linear.x
des_vel.linear.y = -src_vel.linear.y
des_vel.linear.z = -src_vel.linear.z
des_vel.angular.x = 0.0
des_vel.angular.y = 0.0
des_vel.angular.z = -src_vel.angular.z
thrust_pub.publish(des_vel)
def main():
rospy.init_node("remap_thrust", anonymous=0)
rospy.Subscriber("/uuv_simulation/dynamic_model/vel", Twist, remap_vel)
rospy.spin()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import sys
import os
import math
"""
Notes
- The trick here is to split the right hand side (RHS) of the equation into two
fractions i.e. x = -b/2/a +/- math.sqrt(discriminant)/2/a
- The discriminant is >= 0 for real roots and < 0 for imaginary roots
- For real roots: x1,2 = -b/2/a +/- math.sqrt(discriminant)/2/a as usual
- For complex roots:
x1 = complex(-b/2/a, math.sqrt(-discriminant)/2/a)
x2 = complex(-b/2/a, -math.sqrt(-discriminant)/2/a)
I've leave it to your to experience the joy of solving it
A complex number is made up of two parts: real and imaginary. Python allows us
to represent complex numbers by passing the real and imaginary parts as arguments
to the complex() class. For example,
complex(1, 2)
returns the complex number (1+2j): 1 is real and 2j is imaginary.
Now, when we are trying to solve (I will only handle the case with +; the one with - is the same)
x = (-b + math.sqrt(b**2 - 4*a*c)/(2*a)
or
-b + math.sqrt(b**2 - 4*a*c)
x = ----------------------------.
2*a
We can write this as
⎡ b ⎤ ⎡math.sqrt(b**2 - 4*a*c)⎤
x = ⎢- ---⎢+⎢-----------------------⎢
⎣ 2*a⎦ ⎣ 2*a ⎦
Now the right has two values: the one first one is always real but the second one
becomes imaginary when the discriminant is negative. Please see the slide titled
Complex Math in week3 slides.
This will give you enough to work on to solve the problem. I'd like you to experience
the joy of solving it for yourself.
"""
def calculate(a, b, c):
""" Solve quadratic equation and return the value of x
:param float a: coefficient of x^2
:param float b: coefficient of x
:param float c: constant
:return: both solutions
:rtype: tuple(float, float)
"""
discriminant = b ** 2 - 4 * a * c
disc_complex = complex(discriminant, math.sqrt(-discriminant)/(2 * a))
print(f"Discriminant: {discriminant} Complex: {disc_complex}")
if discriminant < 0:
return None, None
x1 = (-b + math.sqrt(discriminant)) / (2 * a)
x2 = (-b - math.sqrt(discriminant)) / (2 * a)
return x1, x2
def main():
a = float(input("a: "))
b = float(input("b: "))
c = float(input("c: "))
x1, x2 = calculate(a, b, c)
print(f"x1={x1}, x2={x2}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
|
nilq/baby-python
|
python
|
from a10sdk.common.A10BaseClass import A10BaseClass
class HostList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param dns_host: {"minLength": 1, "maxLength": 31, "type": "string", "description": "DNS remote host", "format": "string"}
:param ipv4_mask: {"type": "string", "description": "IPV4 mask", "format": "ipv4-netmask"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "host-list"
self.DeviceProxy = ""
self.dns_host = ""
self.ipv4_mask = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ipv4List(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ipv4_host: {"type": "string", "description": "IPV4 remote host", "format": "ipv4-address"}
:param ipv4_mask: {"type": "string", "description": "IPV4 mask", "format": "ipv4-netmask"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ipv4-list"
self.DeviceProxy = ""
self.ipv4_host = ""
self.ipv4_mask = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ipv6List(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ipv6_host: {"type": "string", "description": "IPV6 remote host", "format": "ipv6-address"}
:param ipv6_mask: {"description": "IPV6 mask", "minimum": 1, "type": "number", "maximum": 128, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ipv6-list"
self.DeviceProxy = ""
self.ipv6_host = ""
self.ipv6_mask = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Remote(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param host_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"dns-host": {"minLength": 1, "maxLength": 31, "type": "string", "description": "DNS remote host", "format": "string"}, "optional": true, "ipv4-mask": {"type": "string", "description": "IPV4 mask", "format": "ipv4-netmask"}}}]}
:param ipv4_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "ipv4-host": {"type": "string", "description": "IPV4 remote host", "format": "ipv4-address"}, "ipv4-mask": {"type": "string", "description": "IPV4 mask", "format": "ipv4-netmask"}}}]}
:param ipv6_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ipv6-host": {"type": "string", "description": "IPV6 remote host", "format": "ipv6-address"}, "optional": true, "ipv6-mask": {"description": "IPV6 mask", "minimum": 1, "type": "number", "maximum": 128, "format": "number"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "remote"
self.DeviceProxy = ""
self.host_list = []
self.ipv4_list = []
self.ipv6_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class Read(A10BaseClass):
"""Class Description::
Define a read only community string.
Class read supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param oid_list: {"minItems": 1, "items": {"type": "oid"}, "uniqueItems": true, "array": [{"required": ["oid-val"], "properties": {"remote": {"type": "object", "properties": {"host-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"dns-host": {"minLength": 1, "maxLength": 31, "type": "string", "description": "DNS remote host", "format": "string"}, "optional": true, "ipv4-mask": {"type": "string", "description": "IPV4 mask", "format": "ipv4-netmask"}}}]}, "ipv4-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "ipv4-host": {"type": "string", "description": "IPV4 remote host", "format": "ipv4-address"}, "ipv4-mask": {"type": "string", "description": "IPV4 mask", "format": "ipv4-netmask"}}}]}, "ipv6-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ipv6-host": {"type": "string", "description": "IPV6 remote host", "format": "ipv6-address"}, "optional": true, "ipv6-mask": {"description": "IPV6 mask", "minimum": 1, "type": "number", "maximum": 128, "format": "number"}}}]}}}, "oid-val": {"description": "specific the oid (The oid value, object-key)", "format": "string", "minLength": 1, "optional": false, "maxLength": 128, "type": "string"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/snmp-server/community/read/{user}/oid/{oid-val}"}
:param user: {"description": "SNMPv1/v2c community string", "format": "string", "minLength": 1, "optional": false, "maxLength": 31, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/snmp-server/community/read/{user}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "user"]
self.b_key = "read"
self.a10_url="/axapi/v3/snmp-server/community/read/{user}"
self.DeviceProxy = ""
self.oid_list = []
self.remote = {}
self.user = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
nilq/baby-python
|
python
|
from ddb.__main__ import register_default_caches, clear_caches
from ddb.command import LifecycleCommand
from ddb.config import config
from ddb.event import bus
from ddb.phase import DefaultPhase, phases
def test_lifecycle():
register_default_caches()
phases.register(DefaultPhase("step1"))
phases.register(DefaultPhase("step2"))
config.args.clear_cache = False
events = []
bus.on(None, lambda event: events.append(event))
command = LifecycleCommand("test", "TestCommand", "step1", "step2", DefaultPhase("step3"))
command.execute()
assert events == ["phase:step1", "phase:step2", "phase:step3"]
events = []
command.execute()
assert events == ["phase:step1", "phase:step2", "phase:step3"]
def test_lifecycle_run_once():
register_default_caches()
clear_caches()
config.args.clear_cache = False
phases.register(DefaultPhase("step1", run_once=True))
phases.register(DefaultPhase("step2"))
events = []
bus.on(None, lambda event, *args, **kwargs: events.append(event))
command = LifecycleCommand("test", "TestCommand", "step1", "step2", "step1", DefaultPhase("step3"))
command.execute()
assert events == ["phase:step1", "phase:step2", "phase:step3"]
events = []
command.execute()
assert events == ["phase:step2", "phase:step3"]
|
nilq/baby-python
|
python
|
from pyserialization.serializable import Serializable
from pyserialization.serialint import SerialU32
from pyserialization.seriallist import serial_list
from pyserialization.serialstring import SerialAsciiString
from operator import mul
import functools
import numpy as np
class _IntList(serial_list(SerialU32)):
"""List of int types for saving array shape"""
pass
class SerialNdArray(Serializable):
"""
Type for serializing a numpy.ndarray
"""
def __init__(self, value=None):
"""
Initializes the array with an empty ndarray or an existing ndarray
"""
Serializable.__init__(self)
if value is not None:
self.set(value)
else:
self._array = np.zeros([])
def __str__(self):
"""Returns the __str__ representation of the stored ndarray"""
return self._array.__str__()
def get(self):
"""Returns the stored ndarray"""
return self._array
def set(self, value):
"""S
ets the SerialNdArray with an existing ndarray
Args:
value: The new ndarray to track
"""
if not isinstance(value, np.ndarray):
raise ValueError('Value must be of type ndarray, not {}'.format(type(value)))
self._array = value
def load_in_place(self, data, index=0):
"""
Deserializes the ndarray
Type is serialized by first saving the data type as a string, the number of elements in the flattened array, the
data in the ndarray, and then a list of U32s giving the shape of the array.
"""
data_type, index = SerialAsciiString.from_bytes(data, index)
array_size, index = SerialU32.from_bytes(data, index)
self._array = np.frombuffer(data, data_type.get(), array_size.get(), index)
index += self._array.nbytes
size_array, index = _IntList.from_bytes(data, index)
self._array = np.reshape(self._array, [value.get() for value in size_array])
return index
def to_bytes(self):
"""
Serializes the ndarray
Type is serialized by first saving the data type as a string, the number of elements in the flattened array, the
data in the ndarray, and then a list of U32s giving the shape of the array.
"""
data = SerialAsciiString(str(self._array.dtype)).to_bytes()
data += SerialU32(functools.reduce(mul, self._array.shape, 1)).to_bytes()
data += self._array.tobytes()
size_array = _IntList()
for value in self._array.shape:
size_array.append(value)
data += size_array.to_bytes()
return data
|
nilq/baby-python
|
python
|
from django.db import models
import a2s
SERVER_TYPES = (
(68, 'Dedicated'),
(100, 'Dedicated'),
(108, 'Non-dedicated'),
(112, 'SourceTV'),
)
PLATFORMS = (
(76, 'Linux'),
(108, 'Linux'),
(109, 'Mac OS X'),
(111, 'Mac OS X'),
(119, 'Windows')
)
class Server(models.Model):
title = models.CharField(max_length=200)
host = models.CharField(max_length=200)
port = models.IntegerField()
# Whether we should run a2s_info queries.
get_info = models.BooleanField(default=True)
# Whether we should get a list of players.
get_players = models.BooleanField(default=True)
# Whether we should get the servers rules (cvars).
get_rules = models.BooleanField(default=True)
# True if the server is contactable, otherwise False.
up = models.BooleanField(default=True, editable=False)
updated_at = models.DateTimeField(auto_now=True)
def update_info(self):
#a2s = valve.source.a2s.ServerQuerier((self.host, self.port))
try:
info = dict(a2s.info((self.host, self.port)))
self.up = True
self.save()
#except valve.source.a2s.NoResponseError:
except:
self.up = False
self.save()
return False
info_model = Info(server=self)
info_model.server_name = info['server_name']
info_model.map = info['map_name']
info_model.folder = info['folder']
info_model.game = info['game']
info_model.app_id = info['app_id']
info_model.player_count = info['player_count']
print('player_count' + info['player_count'])
info_model.max_players = info['max_players']
info_model.bot_count = info['bot_count']
info_model.server_type = info['server_type']
info_model.platform = info['platform']
info_model.password_protected = info['password_protected']
info_model.vac_enabled = info['vac_enabled']
info_model.version = info['version']
self.info_set.all().delete()
info_model.save()
print("player_count" + info_model.player_count)
return True
def update_players(self):
#a2s = valve.source.a2s.ServerQuerier((self.host, self.port))
try:
players = a2s.players((self.host, self.port))
self.up = True
self.save()
except:
self.up = False
self.save()
return False
player_models = []
for player in players:
player = dict(player)
player_models.append(Player(
server=self,
name=player['name'],
score=player['score'],
duration=player['duration']
))
self.player_set.all().delete()
Player.objects.bulk_create(player_models)
return True
def update_rules(self):
#a2s = valve.source.a2s.ServerQuerier((self.host, self.port))
try:
rules = a2s.rules((self.host, self.port))
self.up = True
self.save()
#except valve.source.a2s.NoResponseError:
except:
self.up = False
self.save()
return False
rule_models = []
for cvar, value in rules.items():
rule_models.append(Rule(
server=self,
cvar=cvar,
value=str(value)
))
self.rule_set.all().delete()
Rule.objects.bulk_create(rule_models)
return True
def __unicode__(self):
return self.title
class Info(models.Model):
"""
Stores a game servers response to a2s_info, this contains
General information about the server, such as player count and
the current map.
"""
server = models.OneToOneField(
Server,
on_delete=models.CASCADE,
primary_key=True,
)
# The name of the server as returned by a2s_info
server_name = models.CharField(max_length=256)
# The name of the map the server is currently running
map = models.CharField(max_length=64)
# The gamedir of the mod being ran by the server, E.g "tf" or "csgo"
folder = models.CharField(max_length=64)
# A string identifying the game being ran by the server
game = models.CharField(max_length=64)
# The numberic application ID of the game ran by the server, note that the
# ID of the client is returned, not the server. For example TF2 is 440
# instead of 232250 which is the server software
app_id = models.IntegerField()
player_count = models.IntegerField()
max_players = models.IntegerField()
bot_count = models.IntegerField()
server_type = models.IntegerField(choices=SERVER_TYPES)
platform = models.IntegerField(choices=PLATFORMS)
password_protected = models.BooleanField()
vac_enabled = models.BooleanField()
version = models.CharField(max_length=4000)
class Player(models.Model):
"""
Stores a game servers response to a2s_player, this contains
a list of who is playing, their score, etc.
"""
server = models.ForeignKey(Server)
name = models.CharField(max_length=64)
score = models.IntegerField()
duration = models.FloatField()
def __unicode__(self):
return self.name
class Rule(models.Model):
"""
Stores a subset of a server's console variables (often referred to as
'cvars',) specifically those which have the ``FCVAR_NOTIFY`` flag set
on them. These cvars are used to indicate game mode's configuration,
such as the gravity setting for the map or whether friendly fire is
enabled or not. You could also use this to transmit data from the
server to django by having your plugin create a cvar.
"""
server = models.ForeignKey(Server)
cvar = models.CharField(max_length=640)
value = models.CharField(max_length=640)
def __unicode__(self):
return self.cvar
|
nilq/baby-python
|
python
|
from canoser import Struct, Uint8, bytes_to_int_list, hex_to_int_list
from libra.transaction.transaction_argument import TransactionArgument, normalize_public_key
from libra.bytecode import bytecodes
from libra.account_address import Address
class Script(Struct):
_fields = [
('code', [Uint8]),
('args', [TransactionArgument])
]
@classmethod
def gen_transfer_script(cls, receiver_address,micro_libra):
if isinstance(receiver_address, bytes):
receiver_address = bytes_to_int_list(receiver_address)
if isinstance(receiver_address, str):
receiver_address = hex_to_int_list(receiver_address)
code = bytecodes["peer_to_peer_transfer"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_mint_script(cls, receiver_address,micro_libra):
receiver_address = Address.normalize_to_int_list(receiver_address)
code = bytecodes["mint"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_create_account_script(cls, fresh_address):
fresh_address = Address.normalize_to_int_list(fresh_address)
code = bytecodes["create_account"]
args = [
TransactionArgument('Address', fresh_address),
TransactionArgument('U64', 0)
]
return Script(code, args)
@classmethod
def gen_rotate_auth_key_script(cls, public_key):
key = normalize_public_key(public_key)
code = bytecodes["rotate_authentication_key"]
args = [
TransactionArgument('ByteArray', key)
]
return Script(code, args)
@staticmethod
def get_script_bytecode(script_name):
return bytecodes[script_name]
|
nilq/baby-python
|
python
|
'''
@brief Encoder for Packet data
This encoder takes in PktData objects, serializes them, and sends the results
to all registered senders.
Serialized Packet format:
+--------------------------------+ -
| Header = "A5A5 " | |
| (5 byte string) | |
+--------------------------------+ Added by
| Destination = "GUI " or "FSW " | Sender
| (4 byte string) | |
+--------------------------------+ -
| Length of descriptor, ID, |
| and channel data |
| (variable bytes, check config) |
+--------------------------------+
| Descriptor type = 4 |
| (4 bytes) |
+--------------------------------+
| ID |
| (2 bytes) |
+--------------------------------+
| Channel 1 value |
+--------------------------------+
| Channel 2 value |
+--------------------------------+
| ... |
+--------------------------------+
| Channel n value |
+--------------------------------+
@date Created August 9, 2018
@author R. Joseph Paetz
@bug No known bugs
'''
from __future__ import print_function
from __future__ import absolute_import
from .encoder import Encoder
from fprime_gds.common.data_types.pkt_data import PktData
from fprime_gds.common.utils.config_manager import ConfigManager
from fprime_gds.common.utils.data_desc_type import DataDescType
# For Unit Test
import sys
from fprime_gds.common.templates.ch_template import ChTemplate
from fprime_gds.common.templates.pkt_template import PktTemplate
from fprime_gds.common.data_types.ch_data import ChData
from fprime.common.models.serialize.time_type import TimeType
from fprime.common.models.serialize.u8_type import U8Type
from fprime.common.models.serialize.u16_type import U16Type
from fprime.common.models.serialize.u32_type import U32Type
class PktEncoder(Encoder):
'''Encoder class for packet data'''
def __init__(self, dest="GUI", config=None):
'''
Constructor
Args:
dest (string, "FSW" or "GUI", default="GUI"): Destination for binary
data produced by encoder.
config (ConfigManager, default=None): Object with configuration data
for the sizes of fields in the binary data. If None passed,
defaults are used.
Returns:
An initialized PktEncoder object
'''
super(PktEncoder, self).__init__(dest, config)
self.len_obj = self.config.get_type("msg_len")
self.desc_obj = self.config.get_type("msg_desc")
self.id_obj = self.config.get_type("pkt_id")
def data_callback(self, data):
'''
Function called to pass data through the encoder.
Objects that the encoder is registered to will call this function to
pass data to the encoder. This function will then send the encoded
data to all registered senders.
Args:
data (PktData obj): object to encode into binary data.
'''
self.send_to_all(self.encode_api(data))
def encode_api(self, data):
'''
Encodes the given PktData object as binary data and returns the result.
Args:
data (PktData obj): object to encode
Returns:
Encoded version of the data argument as binary data
'''
# TODO Should we verify that it is a PktData object? Or is that too much
# overhead.
pkt_temp = data.get_template()
self.desc_obj.val = DataDescType["FW_PACKET_PACKETIZED_TLM"].value
desc_bin = self.desc_obj.serialize()
self.id_obj.val = pkt_temp.get_id()
id_bin = self.id_obj.serialize()
time_bin = data.get_time().serialize()
ch_bin = ""
for ch in data.get_chs():
ch_bin += ch.get_val_obj().serialize()
len_val = len(desc_bin) + len(id_bin) + len(time_bin) + len(ch_bin)
self.len_obj.val = len_val
len_bin = self.len_obj.serialize()
binary_data = (len_bin + desc_bin + id_bin + time_bin + ch_bin)
return binary_data
if __name__ == "__main__":
# Unit Tests
config = ConfigManager()
config.set('types', 'msg_len', 'U16')
enc = PktEncoder()
enc_config = PktEncoder("GUI", config)
ch_temp_1 = ChTemplate(101, "test_ch", "test_comp", U32Type())
ch_temp_2 = ChTemplate(102, "test_ch2", "test_comp2", U8Type())
ch_temp_3 = ChTemplate(103, "test_ch3", "test_comp3", U16Type())
pkt_temp = PktTemplate(64, "test_pkt", [ch_temp_1, ch_temp_2, ch_temp_3])
time_obj = TimeType(2, 0, 1533758629, 123456)
ch_obj_1 = ChData(U32Type(1356), time_obj, ch_temp_1)
ch_obj_2 = ChData(U8Type(143), time_obj, ch_temp_2)
ch_obj_3 = ChData(U16Type(1509), time_obj, ch_temp_3)
pkt_obj = PktData([ch_obj_1, ch_obj_2, ch_obj_3], time_obj, pkt_temp)
desc_bin = "\x00\x00\x00\x04"
id_bin = "\x00\x40"
time_bin = "\x00\x02\x00\x5b\x6b\x4c\xa5\x00\x01\xe2\x40"
ch_bin = "\x00\x00\x05\x4c\x8F\x05\xe5"
long_len_bin = "\x00\x00\x00\x18"
short_len_bin = "\x00\x18"
reg_expected = (long_len_bin + desc_bin + id_bin + time_bin + ch_bin)
config_expected = (short_len_bin + desc_bin + id_bin + time_bin + ch_bin)
reg_output = enc.encode_api(pkt_obj)
if (reg_output != reg_expected):
print ("FAIL: expected regular output to be %s, but found %s"%
(list(reg_expected), list(reg_output)))
sys.exit(-1)
else:
print("PASSED test 1")
config_output = enc_config.encode_api(pkt_obj)
if (config_output != config_expected):
print("FAIL: expected configured output to be %s, but found %s"%
(list(config_expected), list(config_output)))
sys.exit(-1)
else:
print("PASSED test 2")
print("ALL TESTS PASSED!")
|
nilq/baby-python
|
python
|
import asyncio
import logging
from kubernetes import client, config, watch
#def main():
logger = logging.getLogger('k8s_events')
logger.setLevel(logging.DEBUG)
config.load_kube_config()
v1 = client.CoreV1Api()
v1ext = client.ExtensionsV1beta1Api()
async def pods():
w = watch.Watch()
for event in w.stream(v1.list_pod_for_all_namespaces):
logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name))
await asyncio.sleep(0)
async def deployments():
w = watch.Watch()
for event in w.stream(v1ext.list_deployment_for_all_namespaces):
logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name))
await asyncio.sleep(0)
ioloop = asyncio.get_event_loop()
ioloop.create_task(pods())
ioloop.create_task(deployments())
ioloop.run_forever()
|
nilq/baby-python
|
python
|
from utils import *
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Seq_len, X_train = read_txt('./data/data.txt')
Embedding_matrix = pretrained_embedding_layer(w2v_m, w2i)
vocab_size = len(w2i)
n_hidden = 16
embedding_dim = 300
class LM_Dataset(Dataset):
def __init__(self, x, seq_len):
super().__init__()
self.x = x
self.seq_len = seq_len
def __getitem__(self, index):
x, y = sentences_to_indices(self.x[index], w2i, self.seq_len[index])
#y = convert_to_one_hot(y, len(w2i))
return x, y
def __len__(self):
return self.x.shape[0]
Train_DS = LM_Dataset(X_train, seq_len)
Train_DL = DataLoader(Train_DS, batch_size=1, shuffle=True)
class BiLSTM(nn.Module):
def __init__(self, vocab_size, n_hidden, embedding_dim):
super(BiLSTM, self).__init__()
self.n_hidden = n_hidden
self.embedding_dim = embedding_dim
self.word_embeddings = nn.Embedding.from_pretrained(Embedding_matrix, freeze=True)
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=n_hidden, bidirectional=True)
self.softmax = nn.Linear(n_hidden * 2, vocab_size)
self.apply(self._init_weights)
def forward(self, X):
input = self.word_embeddings(X) # input : [batch_size, len_seq, embedding_dim]
input = input.to(torch.float32)
input = input.permute(1, 0, 2) # input : [len_seq, batch_size, embedding_dim]
#初始化h和c
hidden_state = torch.zeros(1*2, len(X), self.n_hidden).to(device) # [num_layers(=1) * num_directions(=2), batch_size, n_hidden]
cell_state = torch.zeros(1*2, len(X), self.n_hidden).to(device) # [num_layers(=1) * num_directions(=2), batch_size, n_hidden]
# output : [len_seq, batch_size, num_directions(=2)*n_hidden]
outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
outputs = outputs.view(-1, n_hidden * 2)
outputs = self.softmax(outputs)
outputs = outputs.view(1, outputs.shape[1], outputs.shape[0])
return outputs
def _init_weights(self, layer) -> None:
if isinstance(layer, nn.Linear):
nn.init.xavier_uniform_(layer.weight)
model = BiLSTM(vocab_size, n_hidden, embedding_dim).to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2)
def train(model, loss_fn, optimizer, epochs):
print("Train Start")
for e in range(1, epochs + 1):
for line_num, (x, y) in enumerate(Train_DL):
model.train()
loss = 0
optimizer.zero_grad()
x, y = x.to(device), y.to(device)
z_pred = model(x)
loss += loss_fn(z_pred, y)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5) # gradient clipping
optimizer.step()
if e % 10 == 0:
print(f'{"-" * 20} Epoch {e} {"-" * 20}')
print("loss is :", loss)
train(model, loss_fn, optimizer, epochs=10000)
|
nilq/baby-python
|
python
|
import io
import random
import picamera
def motion_detected():
# Randomly return True (like a fake motion detection routine)
return random.randint(0, 10) == 0
camera = picamera.PiCamera()
stream = picamera.PiCameraCircularIO(camera, seconds=20)
camera.start_recording(stream, format='h264')
try:
while True:
print "Waiting..."
camera.wait_recording(1)
if motion_detected():
print "Recording..."
# Keep recording for 10 seconds and only then write the
# stream to disk
camera.wait_recording(10)
stream.copy_to('motion.h264')
finally:
camera.stop_recording()
|
nilq/baby-python
|
python
|
import argparse
import webbrowser
import json
import traceback
import socket
import threading
import signal
import os
from pathlib import Path
from lyrebird import log
from lyrebird import application
from lyrebird.config import Rescource, ConfigManager
from lyrebird.mock.mock_server import LyrebirdMockServer
from lyrebird.proxy.proxy_server import LyrebirdProxyServer
from lyrebird.event import EventServer
from lyrebird.task import BackgroundTaskServer
logger = log.get_logger()
def main():
"""
Command line main entry
Start lyrebird
* start in default config
```
lyrebird
```
* start with verbose mode
```
lyrebird -v
```
* start without open a web browser
```
lyrebird -b
```
* start with a specified config file
```
lyrebird -c /path/to/your/config/file
```
* start with multipart args
```
lyrebird -v --mock 8080 -c /path/to/your/config/file
```
"""
parser = argparse.ArgumentParser(prog='lyrebird')
parser.add_argument('-v', dest='verbose', action='store_true', help='Show verbose log')
parser.add_argument('--mock', dest='mock', type=int, help='Set mock server port, default port is 4272')
parser.add_argument('--proxy', dest='proxy', type=int, help='Set proxy server port, default port is 9090')
parser.add_argument('--data', dest='data', help='Set data dir, default is "./data/"')
parser.add_argument('-b', '--no_browser', dest='no_browser', action='store_true', help='Start without open a browser')
parser.add_argument('-c', '--config', dest='config', help='Start with a config file. Default is "~/.lyrebird/conf.json"')
parser.add_argument('--log', dest='log', help='Set output log file path')
subparser = parser.add_subparsers(dest='sub_command')
src_parser = subparser.add_parser('src')
src_parser.add_argument('uri')
subparser.add_parser('plugin')
args = parser.parse_args()
if args.config:
application._cm = ConfigManager(conf_path=args.config)
else:
application._cm = ConfigManager()
application._src = Rescource()
# set current ip to config
try:
application._cm.config['ip'] = _get_ip()
except socket.gaierror as e:
logger.error('Failed to get local IP address, error occurs on %s' % e)
if args.verbose:
application._cm.config['verbose'] = True
# init file logger after config init
log.init(args.log)
if args.mock:
application._cm.config['mock.port'] = args.mock
if args.proxy:
application._cm.config['proxy.port'] = args.proxy
if args.data:
application._cm.config['mock.data'] = args.data
logger.debug(f'Read args: {args}')
if args.sub_command == 'src':
logger.debug('EXEC SUBCMD:SRC')
src(args)
elif args.sub_command == 'plugin':
logger.debug('EXEC SUBCMD:PLUGIN')
plugin(args)
else:
logger.debug('EXEC LYREBIRD START')
run(args)
def run(args:argparse.Namespace):
# Check mock data group version. Update if is older than 1.x
from . import mock_data_formater
data_path = application._cm.config['mock.data']
data_dir = Path(data_path)
mock_data_formater.check_data_dir(data_dir)
# show current config contents
config_str = json.dumps(application._cm.config, ensure_ascii=False, indent=4)
logger.warning(f'Lyrebird start with config:\n{config_str}')
application.server['event'] = EventServer()
application.server['task'] = BackgroundTaskServer()
application.server['proxy'] = LyrebirdProxyServer()
application.server['mock'] = LyrebirdMockServer()
application.start_server()
# auto open web browser
if not args.no_browser:
webbrowser.open(f'http://localhost:{application.config["mock.port"]}')
# stop event handler
def signal_handler(signum, frame):
application.stop_server()
threading.Event().set()
logger.warning('!!!Ctrl-C pressed. Lyrebird stop!!!')
os._exit(1)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def debug():
# use lyrebird.debug to start plugin in debug mode
# can pass args by sys.args
main()
# main thread loop
import asyncio
loop = asyncio.get_event_loop()
loop.run_forever()
def plugin(args:argparse.Namespace):
pass
def src(args:argparse.Namespace):
from threading import Thread
def worker():
application._src.download(args.uri)
Thread(target=worker).start()
def _get_ip():
"""
Get local ip from socket connection
:return: IP Addr string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('meituan.com', 80))
return s.getsockname()[0]
|
nilq/baby-python
|
python
|
""" Decorator module.
Contains various decorators for hook callbacks.
"""
class BaseDecorator:
""" Base class for decorators in Eris. """
# The interface for hooks means that events will always be the first argument, anything else
# will be passed as payloads for the events.
_EVENT_OFFSET: int = 1
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
import enum
class MealEnum(enum.Enum):
BREAKFAST = "Breakfast"
MORNING_SNACK = "Morning snack"
LUNCH = "Lunch"
AFTERNOON_SNACK = "Afternoon snack"
DINNER = "Dinner"
|
nilq/baby-python
|
python
|
from gutenberg.query import get_etexts
from gutenberg.query import get_metadata
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
from gutenberg.query import list_supported_metadatas
from gutenberg.acquire import set_metadata_cache
from gutenberg.acquire.metadata import SqliteMetadataCache
if __name__ == '__main__':
# load cache from db
#cache = SqliteMetadataCache('metadata2.sqlite')
#set_metadata_cache(cache)
#print(cache.is_open)
#cache.open()
#md_attrs = list_supported_metadatas()
features = ["author", "formaturi", "language", "rights", "subject", "title",]
last_ebook_id = 61041
i = 1
while i <= last_ebook_id:
if i % 100 == 0:
print(f'on {i}')
for feature_name in features:
data = get_metadata(feature_name, i)
print(feature_name, data)
text = strip_headers(load_etext(i)).strip().replace('\r','')
print(text[:10])
print('\n\n')
#print(text) # prints 'MOBY DICK; OR THE WHALE\n\nBy Herman Melville ...'
#print(type(cache))
#print(cache.is_open)
#print(len(cache.graph))
#for s,p,o in cache.graph:
# print(s,p,o)
# break
|
nilq/baby-python
|
python
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver import FirefoxOptions
import json, config, traceback, threading, os, logging
import time as timer
from datetime import datetime
from bs4 import BeautifulSoup
from viberbot import Api
from viberbot.api.bot_configuration import BotConfiguration
from viberbot.api.messages.text_message import TextMessage
logging.basicConfig(
handlers=[logging.FileHandler("./log.log", "a", "utf-8")],
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
viber = Api(BotConfiguration(
name=config.NAME,
avatar=config.AVATAR_URL,
auth_token=config.TOKEN
))
def process(recipient_id:str):
text = ('preparing task for user '+recipient_id)
logging.info(text)
options = FirefoxOptions()
options.add_argument("--headless")
try:
driver = webdriver.Firefox(executable_path='./geckodriver')
except OSError:
driver = webdriver.Firefox(executable_path='./geckodriver.exe')
except:
driver = webdriver.Firefox(options=options, executable_path='./geckodriver')
driver.get("https://vhttos.com/login")
try:
driver, recipient_id, usr, pwd = login(driver, recipient_id)
task(driver, recipient_id, usr, pwd)
except:
pass
def login(driver, recipient_id):
# get forms
forms = driver.find_elements_by_class_name('input-form-reg')
with open('./users.json', encoding="utf-8") as j:
data = json.load(j)[recipient_id]
usr = data['username']
pwd = data['password']
try:
# enter username
username = forms[0]
username.clear()
username.send_keys(usr)
# enter password
password = forms[1]
password.clear()
password.send_keys(pwd)
timer.sleep(10)
# click login
driver.find_element_by_css_selector('.btn-vhttech').click()
timer.sleep(5)
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.foreach-temp')))
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.vht-badge-chip')))
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.riglist .row-implement')))
return driver, recipient_id, usr, pwd
except KeyboardInterrupt:
driver.close()
return
except:
send(recipient_id, 'Đăng nhập bị lỗi (sai email hoặc mật khẩu), vui lòng đăng ký lại')
return
def task(driver, recipient_id, usr, pwd):
text = (recipient_id+ ' up')
logging.info(text)
text = ('broke task for user '+recipient_id)
try:
refresh_count = 0
while True:
if refresh_count == 300:
driver.refresh()
timer.sleep(10)
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.foreach-temp')))
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.vht-badge-chip')))
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.riglist .row-implement')))
refresh_count = 0
try:
with open('./users.json', encoding="utf-8") as j:
data = json.load(j)[recipient_id]
max_temp = data['temp']
if data['username'] != usr:
logging.info(text)
break
elif data['password'] != pwd:
logging.info(text)
break
except:
traceback.print_exc()
logging.info(text)
break
try:
miner = None
elements = driver.find_elements_by_css_selector('.riglist .row-implement')
for element in elements:
css = element.value_of_css_property('background-color')
if css.startswith('rgba'):
rgb = css[5:][:-1].replace(',', '').split()
else:
rgb = css[4:][:-1].replace(',', '').split()
down = False
if len(rgb) == 3:
if rgb[0] != '41' or rgb[1] != '40' or rgb[2] != '40':
if rgb[0] != '34' or rgb[1] != '34' or rgb[2] != '34':
down = True
elif len(rgb) == 4:
if rgb[0] != '62' or rgb[1] != '99' or rgb[2] != '114' or rgb[3] != '0.3':
down = True
if down == True:
ele_source = element.get_attribute('innerHTML')
miner_ = BeautifulSoup(str(ele_source), 'html.parser').find('span', {'class': 'vht-badge-chip rig-name nv-row'})
if miner_ is None:
miner_ = BeautifulSoup(str(ele_source), 'html.parser').find('span', {'class': 'vht-badge-chip rig-name rx-row'})
miner = miner_.get_text()
time = get_current_time(datetime.now())
message = f'Máy đào {miner} của bạn đang rớt mạng vào lúc {time}! https://vhttos.com/rig-list'
text = (recipient_id+': '+message)
logging.info(text)
send(recipient_id, message)
html = driver.page_source
riglist = BeautifulSoup(html, 'html.parser').find_all('div', {'class': 'container-fluid riglist'})[4]
rigs = BeautifulSoup(str(riglist), 'html.parser').find_all('div', {'class': 'row row-implement'})
for rig in rigs:
status = BeautifulSoup(str(rig), 'html.parser').find('div', {'class': 'col-lg-4 col-md-5 col-sm-5 col-12 nomarpad'})
temps_ = BeautifulSoup(str(status), 'html.parser').find_all('div', {'class': 'foreach-temp'})
temps = []
for temp_ in temps_:
temps.append((temp_.get_text().replace('\n', '')).replace(' ', ''))
overheat = []
for temp in temps:
if int(temp) >= int(max_temp):
overheat.append(temp)
if overheat != []:
miner__ = BeautifulSoup(str(rig), 'html.parser').find('span', {'class': 'vht-badge-chip rig-name nv-row'})
if miner__ is None:
miner__ = BeautifulSoup(str(rig), 'html.parser').find('span', {'class': 'vht-badge-chip rig-name rx-row'})
miner_ = miner__.get_text()
if miner_ == miner:
continue
time = get_current_time(datetime.now())
message = f'Máy đào {miner_} của bạn có {len(overheat)} card đang quá {max_temp} độ vào lúc {time}! https://vhttos.com/rig-list'
text = (recipient_id+': '+message)
logging.info(text)
send(recipient_id, message)
except:
traceback.print_exc()
pass
timer.sleep(60)
refresh_count += 60
driver.close()
except KeyboardInterrupt:
driver.close()
def get_current_time(datetime_now):
time = datetime.now().strftime("%H:%M %d/%m/%Y")
return time
def send(id, text):
viber.send_messages(id, [TextMessage(text=text)])
def main():
with open('./users.json', encoding="utf-8") as j:
data = json.load(j)
for user in data:
exist = False
for thread in threading.enumerate():
if thread.name == user:
exist = True
if exist == False:
thread = threading.Thread(target=process, args=(user,))
thread.name = user
thread.start()
main()
|
nilq/baby-python
|
python
|
# Copyright (c) 2012, Johan Rydberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Example that builds a ring of actors and then send a message
through the ring.
"""
from guild import actor
import gevent
def forward(receive, address):
pat, data = receive()
address | data
def build(receive, n):
ring = []
for i in range(n):
if not ring:
node = actor.spawn(forward, actor.curaddr())
else:
node = actor.spawn(forward, ring[-1])
ring.append(node)
gevent.sleep()
ring[-1] | {'text': 'hello around the ring'}
pat, data = receive()
return data
mesh = actor.Mesh()
node = actor.Node(mesh, 'cookie@localhost.local:3232')
addr = node.spawn(build, 10000)
print node.wait(addr)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#3. Use Pexpect to retrieve the output of 'show ip int brief' from pynet-rtr2.
import pexpect, getpass, sys, time, re
def main():
ip_addr = '184.105.247.71'
username = 'pyclass'
password = getpass.getpass()
cmd = 'show ip int brief'
ssh_conn = pexpect.spawn('ssh {}@{}'.format(username, ip_addr))
#ssh_conn.logfile = sys.stdout
ssh_conn.timeout = 5
ssh_conn.expect('ssword:')
ssh_conn.sendline(password)
ssh_conn.expect('#')
router_name = ssh_conn.before.strip()
prompt = router_name + ssh_conn.after.strip()
ssh_conn.sendline('terminal length 0')
ssh_conn.expect(prompt)
ssh_conn.sendline(cmd)
ssh_conn.expect(cmd + '.*' + prompt)
print ssh_conn.after
##Only run if not called by another file/program
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
********************************************************************************
post_processing
********************************************************************************
Polyline simplification
=======================
.. autosummary::
:toctree: generated/
:nosignatures:
simplify_paths_rdp
Sorting
=======
.. autosummary::
:toctree: generated/
:nosignatures:
seams_align
seams_smooth
sort_into_vertical_layers
reorder_vertical_layers
sort_paths_minimum_travel_time
zig_zag_open_paths
Additional
==========
.. autosummary::
:toctree: generated/
:nosignatures:
generate_brim
generate_raft
spiralize_contours
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Polyline simplification
from .simplify_paths_rdp import * # noqa: F401 E402 F403
# Sorting
from .seams_align import * # noqa: F401 E402 F403
from .seams_smooth import * # noqa: F401 E402 F403
from .sort_into_vertical_layers import * # noqa: F401 E402 F403
from .reorder_vertical_layers import * # noqa: F401 E402 F403
from .sort_paths_minimum_travel_time import * # noqa: F401 E402 F403
# Orienting
from .unify_paths_orientation import * # noqa: F401 E402 F403
# Additional
from .generate_brim import * # noqa: F401 E402 F403
from .generate_raft import * # noqa: F401 E402 F403
from .spiralize_contours import * # noqa: F401 E402 F403
from .zig_zag_open_paths import * # noqa: F401 E402 F403
__all__ = [name for name in dir() if not name.startswith('_')]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import logging
import os
import pprint
import sys
import time
LOG = logging.getLogger()
LOG.setLevel(logging.DEBUG) # For now, let Handlers control the levels.
def system(call, checked=False):
LOG.info(call)
rc = os.system(call)
msg = '{} <- {!r}'.format(rc, call)
LOG.debug(msg)
if checked and rc:
raise Exception(msg)
return rc
def touch(fname):
with open(fname, 'a'):
os.utime(fname, None)
def foo(srtc):
LOG.info('In foo')
outputs = srtc['output_files']
options = srtc['options']
import pprint
print('options:{}'.format(pprint.pformat(options)))
uows = options['snafu.task_options.uows']
with open(outputs[0], 'w') as stream:
data = ['FOO{}'.format(i) for i in range(uows)]
json_txt = json.dumps(data, indent=2)
stream.write(json_txt)
def bar(srtc):
LOG.info('In bar')
outputs = srtc['output_files']
touch(outputs[0])
def fubar(srtc):
LOG.info('In fubar')
inputs = srtc['input_files']
outputs = srtc['output_files']
with open(inputs[0]) as stream:
data = json.loads(stream.read())
with open(outputs[0], 'w') as stream:
stream.write(json.dumps(data))
def scatter_fubar(srtc):
LOG.info('In scatter_fubar')
inputs = srtc['input_files']
outputs = srtc['output_files']
max_nchunks = srtc['max_nchunks']
#chunk_keys = srtc['chunk_keys']
from . import scatter_json_list as mod
mod.run('scatter_fubar', max_nchunks, inputs[0], outputs[0])
def scatter_json_list_plus_txt(srtc):
LOG.info('In scatter_json_list_plus_txt: {}'.format(repr(srtc)))
inputs = srtc['input_files']
outputs = srtc['output_files']
max_nchunks = srtc['max_nchunks']
tcid = srtc['tool_contract_id']
basename = os.path.splitext(tcid)[1][1:]
#chunk_keys = srtc['chunk_keys']
from . import scatter_json_list_plus_txt as mod
mod.run(basename, max_nchunks, inputs[0], inputs[1], outputs[0])
def gather_json_list(srtc):
LOG.info('In gather_json')
inputs = srtc['input_files']
outputs = srtc['output_files']
chunk_key = srtc['chunk_key']
chunk_input_json_fn = inputs[0]
output_fn = outputs[0]
from . import gather_json_list as mod
mod.run(chunk_key, chunk_input_json_fn, output_fn)
def run_rtc(args):
setup_logging(args)
LOG.info('sys.executable={!r}'.format(sys.executable))
LOG.info('Parsed args (after logging setup): {!r}'.format(vars(args)))
LOG.info('rtc_path: {!r}'.format(args.rtc_path))
rtc_path = args.rtc_path
rtc = json.load(open(args.rtc_path))
LOG.info('rtc: {!s}'.format(pprint.pformat(rtc)))
srtc = rtc['resolved_tool_contract']
tcid = srtc['tool_contract_id']
options = srtc['options']
log_level = srtc['log_level']
input_files = srtc['input_files']
output_files = srtc['output_files']
nproc = srtc['nproc']
#resources = srtc['resources']
task_func = {
'foo': foo,
'bar': bar,
'task_run_fubar_jobs': fubar,
'scatter_fubar': scatter_fubar,
'gather_fubar': gather_json_list,
'task_falcon0_dazzler_tan_apply_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon0_dazzler_daligner_apply_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon0_dazzler_lamerge_apply_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon0_run_daligner_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon0_run_las_merge_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon0_run_cns_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon1_run_daligner_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon1_run_las_merge_jobs_scatter': scatter_json_list_plus_txt,
'task_falcon0_dazzler_tan_apply_jobs_gather': gather_json_list,
'task_falcon0_dazzler_daligner_apply_jobs_gather': gather_json_list,
'task_falcon0_dazzler_lamerge_apply_jobs_gather': gather_json_list,
'task_falcon0_run_daligner_jobs_gather': gather_json_list,
'task_falcon0_run_las_merge_jobs_gather': gather_json_list,
'task_falcon0_run_cns_jobs_gather': gather_json_list,
'task_falcon1_run_daligner_jobs_gather': gather_json_list,
'task_falcon1_run_las_merge_jobs_gather': gather_json_list,
}
func_name = os.path.splitext(tcid)[1][1:]
func = task_func[func_name]
func(srtc)
def emit_one(args):
pass
def emit_all(args):
pass
def setup_logging(args):
handler = get_logging_handler(args)
LOG.addHandler(handler)
try:
import logging_tree
print('logging_tree:')
logging_tree.printout()
except ImportError:
pass
del_logging_flags(args)
def get_logging_handler(args):
"""Return new logging Handler.
Also, remove related flags from argparse args.
"""
fmt = '[%(levelname)s]%(message)s'
log_level = args.log_level
if args.log_level is not None:
log_level = args.log_level
if args.verbose:
log_level = 'INFO'
if args.quiet:
log_level = 'CRITICAL'
if args.debug:
log_level = 'DEBUG'
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt=fmt)
logging.Formatter.converter = time.gmtime
if args.log_file:
handler = logging.FileHandler(args._log_file, mode='a')
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
handler.setLevel(log_level)
return handler
def add_logging_flags(parser):
"""
--log-file LOG_FILE Write the log to file. Default(None) will write to
stdout.
--log-level {DEBUG,INFO,WARNING,ERROR,CRITICAL}
Set log level (default: INFO)
"""
parser.add_argument('--log-file',
help='Write the log to file. By default, write to stdout.')
parser.add_argument('--log-level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set log level.')
parser.add_argument('--verbose', '-v',
help='Set the verbosity level. (Only partially supported for now.)')
parser.add_argument('--quiet',
help='Alias for setting log level to CRITICAL')
parser.add_argument('--debug',
help='Alias for setting log level to DEBUG')
def del_logging_flags(args):
delattr(args, 'log_file')
delattr(args, 'log_level')
delattr(args, 'verbose')
delattr(args, 'quiet')
delattr(args, 'debug')
class HelpF(argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
def main(argv=sys.argv):
description = 'Multi-quick-tool-runner for pbsmrtpipe tasks'
epilog = 'Real tool should be inferred from the resolved_tool_contract->tool_contract_id field.'
parser = argparse.ArgumentParser(
description=description, epilog=epilog,
formatter_class=HelpF,
)
parser.add_argument('--version',
action='version', version='0.0.0',
help="show program's version number and exit"
)
subparsers = parser.add_subparsers(
help='sub-command help')
parser_run = subparsers.add_parser('run-rtc',
formatter_class=HelpF)
parser_emit_one = subparsers.add_parser('emit-tool-contract',
formatter_class=HelpF)
parser_emit_all = subparsers.add_parser('emit-tool-contracts',
formatter_class=HelpF)
parser_run.set_defaults(func=run_rtc)
parser_emit_one.set_defaults(func=emit_one)
parser_emit_all.set_defaults(func=emit_all)
parser_run.add_argument('rtc_path',
help='Path to resolved tool contract')
parser_emit_one.add_argument('tc_id',
help='Tool Contract Id')
parser_emit_all.add_argument('--output-dir', '-o',
default=os.getcwd(),
help='Emit all Tool Contracts to output directory')
add_logging_flags(parser_run)
args = parser.parse_args(argv[1:])
args.func(args)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import json
import asyncio
from os import environ
from functools import partial
from aiohttp import ClientSession, ClientConnectionError
from pyee import AsyncIOEventEmitter
from aiohttp_sse_client.client import EventSource
DEFAULT_STREAM_URL = 'https://stream.flowdock.com/flows'
__all__ = ["EventStream"]
class EventStream(AsyncIOEventEmitter):
def __init__(self, auth, flows, url=None, session=None, params=None, loop=None):
super().__init__(loop or asyncio.get_event_loop())
self._evt = None
self.auth = auth
self.flows = flows
self.params = params or dict()
self.session = session or ClientSession()
self.url = url or environ.get("FLOWDOCK_STREAM_URL", DEFAULT_STREAM_URL)
async def connect(self, retry=3):
if self._evt is not None:
return
self._evt = EventSource(self.url, session=self.session,
timeout=-1,
on_open=partial(self.emit, 'connected'),
on_error=partial(self.emit, 'error'),
**self._options())
retry = 0 if retry < 0 else retry
await self._evt.connect(retry)
async def _process_data(event_source, emit, loop):
try:
async for evt in event_source:
emit("rawdata", evt)
msg = await loop.run_in_executor(None, json.loads, evt.data)
emit("message", msg)
except ClientConnectionError as e:
emit("disconnected", e)
except Exception as e:
emit("clientError", e)
coro = _process_data(self._evt, self.emit, self._loop)
self._loop.create_task(coro)
async def end(self):
if self._evt is not None:
await self._evt.close()
self._evt = None
def _options(self):
qs = dict(filter=",".join(self.flows))
qs.update(self.params)
options = {
"params": qs,
"headers": {
"Authorization": self.auth
}
}
return options
|
nilq/baby-python
|
python
|
"""
File: My_drawing.py
Name:Elsa
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect,GPolygon,GLabel
from campy.graphics.gwindow import GWindow
def main():
"""
TODO:
This figure uses campy module to demonstrate personality.
A lot of faiths hold by people, just like the shape of circles or triangles,
while eventually others can only see the polygon.
"""
window=GWindow(600,600)
# color of background
rect=GRect(800,800)
rect.filled=True
rect.fill_color='lightgrey'
window.add(rect)
# polygon, circle ,rect and triangle with different colors
polygon1=GPolygon()
polygon1.add_vertex((550, 590))
polygon1.add_vertex((570, 360))
polygon1.add_vertex((100, 60))
polygon1.filled=True
polygon1.fill_color='greenyellow'
window.add(polygon1)
rect1=GRect(335,335,x=135,y=150)
rect1.filled=True
rect1.fill_color='sage'
rect2=GRect(370,370,x=120,y=135)
rect2.filled=True
rect2.fill_color='magenta'
rect3=GRect(400,400,x=105,y=120)
rect3.filled=True
rect3.fill_color='purple'
rect4=GRect(440,440,x=85,y=100)
rect4.filled=True
rect4.fill_color='peachpuff'
window.add(rect4)
window.add(rect3)
window.add(rect2)
window.add(rect1)
circle5=GOval(265,265,x=170,y=185)
circle5.filled=True
circle5.fill_color='lightsage'
circle6=GOval(285,285,x=160,y=175)
circle6.filled=True
circle6.fill_color='tan'
circle7=GOval(305,305,x=150,y=165)
circle7.filled=True
circle7.fill_color='midnightblue'
circle8=GOval(325,325,x=140,y=155)
circle8.filled=True
circle8.fill_color='powderblue'
window.add(circle8)
window.add(circle7)
window.add(circle6)
window.add(circle5)
triangle1=GPolygon()
triangle1.add_vertex((300,230))
triangle1.add_vertex((225,340))
triangle1.add_vertex((375,340))
triangle2=GPolygon()
triangle2.add_vertex((300,215))
triangle2.add_vertex((210,350))
triangle2.add_vertex((390,350))
triangle1.filled=True
triangle1.fill_color='pink'
triangle2.filled=True
triangle2.fill_color='lightgrey'
triangle3=GPolygon()
triangle3.add_vertex((300,200))
triangle3.add_vertex((195,360))
triangle3.add_vertex((405,360))
triangle4=GPolygon()
triangle4.add_vertex((300,185))
triangle4.add_vertex((180,370))
triangle4.add_vertex((420,370))
triangle3.filled=True
triangle3.fill_color='linen'
triangle4.filled=True
triangle4.fill_color='yellow'
window.add(triangle4)
window.add(triangle3)
window.add(triangle2)
window.add(triangle1)
circle1=GOval(20,20,x=290,y=290)
circle1.filled=True
circle1.fill_color='aquamarine'
circle2=GOval(40,40,x=280,y=280)
circle2.filled=True
circle2.fill_color='aqua'
circle3=GOval(60,60,x=270,y=270)
circle3.filled=True
circle3.fill_color='darkblue'
circle4=GOval(80,80,x=260,y=260)
circle4.filled=True
circle4.fill_color='blueviolet'
window.add(circle4)
window.add(circle3)
window.add(circle2)
window.add(circle1)
polygon=GPolygon()
polygon.add_vertex((100, 60))
polygon.add_vertex((50,100))
polygon.add_vertex((40,180))
polygon.add_vertex((20,400))
polygon.add_vertex((30,550))
polygon.add_vertex((180,580))
polygon.add_vertex((400, 550))
polygon.add_vertex((550, 590))
polygon.filled=True
polygon.fill_color='salmon'
window.add(polygon)
# logo
sc101=GLabel('SC101-2020.Nov')
sc101.font='Courier-15-bold-italic'
window.add(sc101,0,window.height-sc101.height+20)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
from lib import xmltodict
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SCRAMBLED = u'да'
class ReferenceBase(object):
def __init__(self, url):
self.url = url
self.data = {}
self.session = requests.session()
raw_data = self.request(self.url)
for c in raw_data:
mcast, sid, pids, crypt = c["SCR_VYHODNAYA_GRUPPA"], c["SID_TRSC"], c["REQUIRED_PIDS"], c["SHIFROVANIE"]
#print mcast, sid, crypt, crypt.strip() == SCRAMBLED
if mcast not in self.data:
self.data[mcast] = {sid: {"pids": pids.split(",") if pids else [],
"crypt": crypt.strip() == SCRAMBLED}}
else:
if sid not in self.data[mcast]:
self.data[mcast].update({sid: {"pids": pids.split(",") if pids else [],
"crypt": crypt.strip() == SCRAMBLED}})
def request(self, url):
request = self.session.get(url, verify=False)
data = request.text.encode("utf-8")
data = data[1:-1]
result = []
for _ in range(data.count('}')):
index = data.find('}')
if index == -1:
break
part = data[:index+1]
result += [json.loads(part)]
data = data[index+2:]
return result
def check(self, sencore_tss):
for ts_name in sencore_tss:
ts_data = sencore_tss[ts_name]
try:
ts_mcast = ts_data["dst_addr"].split(":")[0]
except Exception as why:
logging.exception(why)
continue
for sid in ts_data["services"]:
if sid == "count":
continue
reference = self.data.get(
ts_mcast, {}
).get(
str(sid), {}
)
if reference:
reference_sid_s = set(reference["pids"])
sencore_sid_s = set(map(lambda o: str(o), ts_data["services"][sid]["pids"].keys()))
diff = list(reference_sid_s.difference(sencore_sid_s))
sencore_tss[ts_name]["services"][sid]["pids_ok"] = ",".join(diff) if diff else "OK"
crypt_ok = reference["crypt"] == sencore_tss[ts_name]["services"][sid]["scrambled"]
if crypt_ok:
sencore_tss[ts_name]["services"][sid]["scrambled_ok"] = 0
else:
sencore_tss[ts_name]["services"][sid]["scrambled_ok"] = 1
else:
sencore_tss[ts_name]["services"][sid]["pids_ok"] = "REFERENCE_DOES_NOT_EXIST"
sencore_tss[ts_name]["services"][sid]["scrambled_ok"] = "REFERENCE_DOES_NOT_EXIST"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__name__ = "Phoniebox"
import configparser # needed only for the exception types ?!
from ConfigParserExtended import ConfigParserExtended
import codecs
import subprocess # needed for aplay call
import os,sys
from time import sleep
from mpd import MPDClient
# get absolute path of this script
dir_path = os.path.dirname(os.path.realpath(__file__))
defaultconfigFilePath = os.path.join(dir_path,'./phoniebox.conf')
# TODO: externalize helper functions for the package. How?
def is_int(s):
""" return True if string is an int """
try:
int(s)
return True
except ValueError:
return False
def str2bool(s):
""" convert string to a python boolean """
return s.lower() in ("yes", "true", "t", "1")
def str2num(s):
""" convert string to an int or a float """
try:
return int(s)
except ValueError:
return float(s)
def find_modified_files(path,since):
modified_files = []
for root, dirs, files in os.walk(path):
for basename in files:
filename = os.path.join(path, basename)
status = os.stat(filename)
if status.st_mtime > since:
modified_files.append(filename)
return modified_files
def file_modified(filename,since):
if os.stat(filename).st_mtime > since:
return True
else:
return False
class Phoniebox(object):
def __init__(self,configFilePath=defaultconfigFilePath):
print("Using configuration file {}".format(configFilePath))
self.read_config(configFilePath)
# read cardAssignments from given card assignments file
card_assignments_file = self.get_setting("phoniebox","card_assignments_file")
self.cardAssignments = self.read_cardAssignments()
if self.get_setting("phoniebox","translate_legacy_cardassignments","bool") == True:
self.log("Translating legacy cardAssignment config from folder.conf files.",3)
legacy_cardAssignments = self.translate_legacy_cardAssignments()
self.update_cardAssignments(legacy_cardAssignments)
def log(self,msg,level=3):
""" level based logging to stdout """
log_level_map = {0:None,1:"error",2:"warning",3:"info",4:"extended",5:"debug"}
log_level = int(self.get_setting("phoniebox","log_level"))
if log_level >= level and log_level != -1:
print("{}: {}".format(log_level_map[level].upper(),msg))
def mpd_init_connection(self):
""" connect to mpd """
host = self.get_setting("mpd","host")
if host == -1:
host = "localhost"
port = self.get_setting("mpd","port")
if port == -1:
port = 6600
timeout = self.get_setting("mpd","timeout")
if timeout == -1:
timeout = 3
self.client = MPDClient()
self.client.host = host
self.client.port = port
self.client.timeout = timeout
#ret = self.mpd_connect_timeout()
if self.mpd_connect_timeout() != 0:
sys.exit()
else:
self.log("connected to MPD with settings host = {}, port = {}, timeout = {}".format(host,port,timeout),3)
def mpd_connect_timeout(self):
""" establishes the connection to MPD when disconnected """
success = False
runtime = 0
try:
self.client.disconnect()
except:
pass
while success != True and runtime <= self.client.timeout:
try:
self.client.connect(self.client.host,self.client.port)
success = True
self.log("Connected to MPD at {} on port {}.".format(self.client.host,self.client.port),5)
return 0
except:
self.log("Could not connect to MPD, retrying.",5)
sleep(0.2)
runtime += 0.2
if runtime >= self.client.timeout:
self.log("Could not connect to MPD for {}s, giving up.".format(self.client.timeout),2)
return 1
def do_second_swipe(self):
""" react to the second swipe of the same card according to settings"""
second_swipe_map = { 'default': self.do_restart_playlist,
'restart': self.do_restart_playlist,
'restart_track':self.do_restart_track,
'stop': self.do_stop,
'pause': self.do_toggle,
'noaudioplay': self.do_pass,
'skipnext': self.do_next,
}
setting_key = "second_swipe"
map_key = self.config.get("phoniebox",setting_key)
try:
second_swipe_map[map_key]()
except KeyError as e:
self.log("Unknown setting \"{} = {}\", using \"{} = default\".".format(setting_key,map_key,setting_key),5)
second_swipe_map['default']()
def do_restart_playlist(self):
""" restart the same playlist from the beginning """
# TODO: Any reason not to just start the first item in the current playlist?
self.mpd_connect_timeout()
self.set_mpd_playmode(self.lastplayedID)
self.play_mpd(self.get_cardsetting(self.lastplayedID,"uri"))
def do_restart_track(self):
""" restart currently playing track """
self.mpd_connect_timeout()
mpd_status = self.client.status()
self.set_mpd_playmode(self.lastplayedID)
# restart current track
self.client.play(mpd_status['song'])
def do_start_playlist(self,cardid):
""" restart the same playlist, eventually resume """
if self.get_cardsetting(self.lastplayedID,"resume"):
self.resume(self.lastplayedID,"save")
self.mpd_connect_timeout()
self.set_mpd_playmode(cardid)
self.play_mpd(self.get_cardsetting(cardid,"uri"))
if self.get_cardsetting(cardid,"resume"):
self.resume(cardid,"resume")
self.lastplayedID = cardid
def do_toggle(self):
""" toggle play/pause """
self.mpd_connect_timeout()
status = self.client.status()
if status['state'] == "play":
self.client.pause()
else:
self.client.play()
def do_pass(self):
""" do nothing (on second swipe with noaudioplay) """
pass
def do_next(self):
""" skip to next track or restart playlist if stopped (on second swipe with noaudioplay) """
self.mpd_connect_timeout()
status = self.client.status()
# start playlist if in stop state or there is only one song in the playlist (virtually loop)
if (status["state"] == "stop") or (status["playlistlength"] == "1"):
self.do_restart_playlist()
else:
self.client.next()
def do_stop(self):
""" do nothing (on second swipe with noaudioplay) """
self.mpd_connect_timeout()
self.client.stop()
def play_alsa(self,audiofile):
""" pause mpd and play file on alsa player """
self.mpd_connect_timeout()
self.client.pause()
# TODO: use the standard audio device or set them via phoniebox.conf
subprocess.call(["aplay -q -Dsysdefault:CARD=sndrpijustboomd " + audiofile], shell=True)
subprocess.call(["aplay -q -Dsysdefault " + audiofile], shell=True)
def play_mpd(self,uri):
""" play uri in mpd """
self.mpd_connect_timeout()
self.client.clear()
self.client.add(uri)
self.client.play()
self.log("phoniebox: playing {}".format(uri.encode('utf-8')),3)
# TODO: is there a better way to check for "value not present" than to return -1?
def get_setting(self,section,key,opt_type="string"):
""" get a setting from configFile file or cardAssignmentsFile
if not present, return -1
"""
try:
num = str2num(section)
parser = self.cardAssignments
except ValueError:
parser = self.config
try:
opt = parser.get(section,key)
except configparser.NoOptionError:
print("No option {} in section {}".format(key,section))
return -1
except configparser.NoSectionError:
print("No section {}".format(section))
return -1
if "bool" in opt_type.lower():
return str2bool(opt)
else:
try:
return str2num(opt)
except ValueError:
return opt
def get_cardsetting(self,cardid,key,opt_type="string"):
""" catches Errors """
return self.get_setting(cardid,key,opt_type)
def mpd_init_settings(self):
""" set initial mpd state:
max_volume
initial_volume """
mpd_status = self.client.status()
max_volume = self.get_setting("phoniebox","max_volume")
init_volume = self.get_setting("phoniebox","init_volume")
if max_volume == -1:
max_volume = 100 # the absolute max_volume is 100%
if init_volume == -1:
init_volume = 0 # to be able to compare
if max_volume < init_volume:
self.log("init_volume cannot exceed max_volume.",2)
init_volume = max_volume # do not exceed max_volume
if mpd_status["volume"] > max_volume:
self.client.setvol(init_volume)
def set_mpd_playmode(self,cardid):
""" set playmode in mpd according to card settings """
playmode_defaults_map = {"repeat":0,"random":0,"single":0,"consume":0}
set_playmode_map = { "repeat":self.client.repeat,
"random":self.client.random,
"single":self.client.single,
"consume":self.client.consume }
for key in set_playmode_map.keys():
# option is set if config file contains "option = 1" or just "option" without value.
playmode_setting = self.get_cardsetting(cardid,key)
if playmode_setting == -1 or playmode_setting == 1:
playmode_setting = 1
else:
playmode_setting = playmode_defaults_map[key]
# set value
set_playmode_map[key](playmode_setting)
self.log("setting mpd {} = {}".format(key,playmode_setting),5)
def resume(self,cardid,action="resume"):
""" seek to saved position if resume is activated """
self.mpd_connect_timeout()
mpd_status = self.client.status()
print(mpd_status)
if action in ["resume","restore"]:
opt_resume = self.get_cardsetting(cardid,"resume")
if opt_resume == -1 or opt_resume == 1:
resume_elapsed = self.get_cardsetting(cardid,"resume_elapsed")
resume_song = self.get_cardsetting(cardid,"resume_song")
if resume_song == -1:
resume_song = 0
if resume_elapsed != -1 and resume_elapsed != 0:
self.log("{}: resume song {} at time {}s".format(cardid,
self.get_cardsetting(cardid,"resume_song"),
self.get_cardsetting(cardid,"resume_elapsed")),5)
self.client.seek(resume_song,resume_elapsed)
elif action in ["save","store"]:
try:
self.log("{}: save state, song {} at time {}s".format(cardid,
mpd_status["song"],mpd_status["elapsed"]),5)
self.cardAssignments.set(cardid,"resume_elapsed",
mpd_status["elapsed"])
self.cardAssignments.set(cardid,"resume_song",
mpd_status["song"])
except KeyError as e:
print("KeyError: {}".format(e))
except ValueError as e:
print("ValueError: {}".format(e))
def read_cardAssignments(self):
card_assignments_file = self.config.get("phoniebox","card_assignments_file")
parser = ConfigParserExtended(allow_no_value=True)
dataset = parser.read(card_assignments_file)
if len(dataset) != 1:
raise ValueError("Config file {} not found!".format(card_assignments_file))
return parser
def update_cardAssignments(self,static_cardAssignments):
"""card_assignments_file = self.config.get("phoniebox","card_assignments_file")
parser = ConfigParserExtended(allow_no_value=True)
dataset = parser.read(card_assignments_file)
if len(dataset) != 1:
raise ValueError("Config file {} not found!".format(card_assignments_file))
# if cardAssignments is still empty, store new cardAssignments directly
# otherwise compare new values with old values and update only certain values
if hasattr(self, 'cardAssignments'):
self.debug("cardAssignments already set, updating data in memory with new data from file {}".format(card_assignments_file))
static_cardAssignments = parser"""
self.log("Updating changes in cardAssignments from disk.",3)
keep_cardsettings = ["resume_song","resume_elapsed"]
common_sections = list(set(static_cardAssignments.sections()).intersection(self.cardAssignments.sections()))
for section in common_sections:
for option in keep_cardsettings:
if self.cardAssignments.has_option(section,option):
value = self.cardAssignments.get(section,option)
static_cardAssignments.set(section,option,value)
self.log("Updating cardid {} with \"{} = {}\".".format(section,option,value),5)
# finally assign new values
self.cardAssignments = static_cardAssignments
def read_config(self,configFilePath=defaultconfigFilePath):
""" read config variables from file """
configParser = ConfigParserExtended(allow_no_value=True,interpolation=configparser.BasicInterpolation())
dataset = configParser.read(configFilePath)
if len(dataset) != 1:
raise ValueError("Config file {} not found!".format(configFilePath))
self.config = configParser
def translate_legacy_cardAssignments(self,last_translate_legacy_cardAssignments=0):
""" reads the card settings data from the old scheme an translates them """
shortcuts_path = self.get_setting("phoniebox","shortcuts_path")
audiofolders_path = self.get_setting("phoniebox","audiofolders_path")
if shortcuts_path != -1:
configParser = ConfigParserExtended()
shortcut_files = [f for f in os.listdir(shortcuts_path) if os.path.isfile(os.path.join(shortcuts_path,f)) and is_int(f)]
# filename is the cardid
for filename in shortcut_files:
with open(os.path.join(shortcuts_path,filename)) as f:
uri = f.readline().strip().decode('utf-8')
# add default settings
if not filename in configParser.sections():
self.log("Adding section {} to cardAssignments".format(filename),5)
configParser.add_section(filename)
configParser[filename] = self.config["default_cardsettings"]
configParser.set(filename,"cardid",filename)
configParser.set(filename,"uri",uri)
# translate and add folder.conf settings if they contradict default_cardsettings
cardsettings_map = {"CURRENTFILENAME":None,
"ELAPSED":"resume_elapsed",
"PLAYSTATUS":None,
"RESUME":"resume",
"SHUFFLE":"random",
"LOOP":"repeat"}
folderconf = os.path.join(audiofolders_path,uri,"folder.conf")
if os.path.isfile(folderconf) and file_modified(folderconf,last_translate_legacy_cardAssignments):
with open(folderconf) as f:
lines = f.readlines()
cardsettings_old = dict([l.strip().replace('"','').split("=") for l in lines])
for key in cardsettings_old.keys():
if cardsettings_map[key] != None:
# ignore 0 and OFF values, drop settings that have None in cardsettings_map
if key != "ELAPSED":
if cardsettings_old[key] != "0" and cardsettings_old[key] != "OFF":
configParser.set(filename,cardsettings_map[key],"1")
else:
configParser.set(filename,cardsettings_map[key],"0")
else:
try:
elapsed_val = float(cardsettings_old[key])
except ValueError:
elaped_val = 0
configParser.set(filename,cardsettings_map[key],str(elapsed_val))
return configParser
def write_new_cardAssignments(self):
""" updates the cardsettings with according to playstate """
card_assignments_file = self.config.get("phoniebox","card_assignments_file")
self.log("Write new card assignments to file {}.".format(card_assignments_file),3)
with codecs.open(card_assignments_file,'w','utf-8') as f:
self.cardAssignments.write(f)
def print_to_file(self,filename,string):
""" simple function to write a string to a file """
with codecs.open(filename,'w','utf-8') as f:
f.write(string)
if __name__ == "__main__":
print("This module is not to be run! Use \"from Phoniebox import Phoniebox\" instead!")
else:
print("Phoniebox imported. Use \"box = Phoniebox(configFile)\" to get it working.")
|
nilq/baby-python
|
python
|
from datetime import timedelta
from django.db import models
from django.utils import timezone
import time
from .config import YEKPAY_SIMULATION
class TransactionManager(models.Manager):
""" Manager for :class:`Transaction` """
def create_transaction(self, transaction_data):
transaction_data["status"] = "PENDING"
transaction_data["simulation"] = YEKPAY_SIMULATION
created_transaction = self.create(**transaction_data)
created_transaction.order_number = self.generate_uniq_order_number()
created_transaction.save(update_fields=["order_number"])
return created_transaction
def generate_uniq_order_number(self):
order_number = self._generate_order_number()
while self.filter(order_number=order_number).exists():
order_number += 1
return order_number
def _generate_order_number(self):
return int(round(time.time()))
def get_old_pending_transactions(self):
return self.filter(
created_at__lt=timezone.now() - timedelta(minutes=30),
status="PENDING",
)
|
nilq/baby-python
|
python
|
from Utility.Types.Reconstruction import Reconstruction
class Background_Reconstruction(Reconstruction):
def __init__(self, cams, points, image_folder_path, sparse_reconstruction_type):
super(Background_Reconstruction, self).__init__(
cams,
points,
image_folder_path,
sparse_reconstruction_type)
self.ground_mesh = None
def add_ground_mesh(self, mesh):
self.ground_mesh = mesh
def get_ground_mesh(self):
return self.ground_mesh
|
nilq/baby-python
|
python
|
from .approach import Approach
from .challenge import Challenge
from .review_history import ReviewHistory
from .submission import Submission
from .task import Task
from .team import Team
from .team_invitation import TeamInvitation
__all__ = ['Approach', 'Challenge', 'ReviewHistory', 'Submission', 'Task', 'Team', 'TeamInvitation']
|
nilq/baby-python
|
python
|
# Telegram
# TELEGRAM
import telegram
from telegram import ReplyKeyboardMarkup
from telegram.error import NetworkError, Unauthorized
# ACCESO A DATOS EN SERVIDORES (usado por telegram)
import json
import requests
import config
import emailUtil
import Datos
# mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
# FUNCIONES TELERAM
# mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
#URL de la API de TELEGRAM
URL = "https://api.telegram.org/bot{}/".format(config.TOKEN)
chat_id = 0
update_id = None
user_keyboard = [['/info','/fig'],['/email', '/txt'],['/save','/ayuda'],['/deleteOld','/deleteNew']]
user_keyboard_markup = ReplyKeyboardMarkup(user_keyboard, one_time_keyboard=True)
""" poner en marcha el bot """
telegram_bot_experimento_bio = telegram.Bot(config.TOKEN)
#comandos a mostrar al pedir '/ayuda'
listaComandos = ["/ayuda - Mostrar esta Ayuda", \
"/email - envia datos completos por email",\
"/info - Mostrar datos actuales", \
"/txt - envia datos completos a telegram", \
"/fig - Grafico de Evolucion",\
"/deleteOld - Borra los 15 primeros datos",\
"/deleteNew - Borra los 15 ultimos datos",\
"/save - Realiza una copia de seguridad","\n"]
FLAG_enviar_PNG = False #controla el proceso de envio de grafica al usuario
FLAG_enviar_TXT = False #controla el proceso de envio de fichero de datos al usuario
FLAG_delete_old = False #control de borrado de los primeros datos tomados
FLAG_delete_new = False #control de borrado de los ultimos datos tomados
FLAG_pruebas = False #Para hacer pruebas con telegram (sin uso)
FLAG_enviar_INFO = False
FLAG_save_DATA = False
FLAG_send_DATA = False
#bucle para generar el texto encadenando todos los comandos de ayuda.
#Para el mensaje que se envia por telegram al pedir '/ayuda'
listaComandosTxt = ""
for comando in listaComandos:
listaComandosTxt += comando+"\n"
def get_url(url):
'''
Funcion de apoyo a la recogida de telegramas,
Recoge el contenido desde la url de telegram
'''
response = requests.get(url)
content = response.content.decode("utf8")
return content
def send_picture(picture):
url = URL+"sendPhoto";
files = {'photo': open(picture, 'rb')}
data = {'chat_id' : chat_id}
r= requests.post(url, files=files, data=data)
def send_document(doc):
url = URL+"sendDocument";
files = {'document': open(doc, 'rb')}
data = {'chat_id' : chat_id}
r= requests.post(url, files=files, data=data)
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def send_message(text):
'''
Funcion para enviar telergamas atraves de la API
'''
try:
url = URL + "sendMessage?text={}&chat_id={}".format(text, chat_id)
#print("url >> ",url)
get_url(url)
except:
print("ERROR de envio")
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def atenderTelegramas():
'''
Funcion principal de la gestion de telegramas.
Los atiende y procesa, ejecutando aquellos que son ordenes directas.
Solicita la 'ayuda' de otras funciones para aquellos comandos
complejos que contiene parametros
'''
global text, chat_id, chat_time, comando, chat_user_name
global FLAG_enviar_PNG, FLAG_pruebas, FLAG_enviar_TXT, FLAG_delete_old, FLAG_delete_new, FLAG_enviar_INFO,FLAG_save_DATA,FLAG_send_DATA
global update_id
try:
# Request updates after the last update_id
for update in telegram_bot_experimento_bio.get_updates(offset=update_id, timeout=0): #timeout=5, si nos da problemas con internet lento
update_id = update.update_id +1
if update.message: # porque se podrian recibir updates sin mensaje...
comando = update.message.text #MENSAJE_RECIBIDO
chat_time = update.message.date
user = update.message.from_user #USER_FULL
chat_id = int(update.message.from_user.id)
chat_user_name = user.first_name #USER_REAL_NAME
usuario = chat_user_name
try:
# para DEBUG, imprimimos lo que va llegando
print (str(chat_time) + " >>> " + str(chat_id) +": " + usuario + " --> " + comando)
if update.message.entities[0].type == "bot_command" and update.message.text == "/start":
update.message.reply_text("Bienvenido a Experimento Bio v1.1", reply_markup=user_keyboard_markup)
# =============== INTERPRETAR LOS COMANDOS QUE LLEGAN Y ACTUAR EN CONSECUENCIA ===============
if comando == "/send" and (chat_id == config.ADMIN_USER or config.ADMIN_USER == None): #decidir quien puede enviar correos
FLAG_send_DATA = True
return
if comando == "/save" and (chat_id == config.ADMIN_USER or config.ADMIN_USER == None): #solo el administrador puede forzar el salvado de datos no programado
FLAG_save_DATA = True
return
# Lista de comandos para usuarios basicos (clientes)
if comando == "/ayuda":
send_message (listaComandosTxt)
return
if comando == "/info":
FLAG_enviar_INFO = True
return
if comando == "/fig":
FLAG_enviar_PNG = True
return
if comando == "/txt":
FLAG_enviar_TXT = True
return
if comando == "/deleteOld" and (chat_id == config.ADMIN_USER or config.ADMIN_USER == None):
FLAG_delete_old = True
return
if comando == "/deleteNew" and (chat_id == config.ADMIN_USER or config.ADMIN_USER == None):
FLAG_delete_new = True
return
except:
print ("----- ERROR ATENDIENDO TELEGRAMAS ----------------------")
if chat_id != 0:
#ante cualquier comando desconocido devolvemos 'ok', para despistar a los que intenten 'probar suerte'
send_message ("OK")
except:
pass
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
# Andrew Riker
# CS1400 - LW2 XL
# Assignment #04
import math
# user enters length of sides
length = eval(input("Enter length of the polygon sides: "))
# user enters number of sides
numOfSides = eval(input("Enter the number of sides the polygon has: "))
# calculate the area of the polygon
area = (numOfSides * math.pow(length, 2)) / (4 * (math.tan(math.pi / numOfSides)))
# print the area of polygon
print("The area of the polygon is:", str(round(area, 5)))
|
nilq/baby-python
|
python
|
# flake8: noqa
import geonomics as gnx
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
# define number of individuals to plot tracks for, and number of timesteps for
# tracks
n_individs = 20
n_timesteps = 5000
# make figure
fig = plt.figure(figsize=(9.25, 4.5))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1.065])
# make model
mod = gnx.make_model(gnx.read_parameters_file(('/home/drew/Desktop/stuff/berk/'
'research/projects/sim/'
'methods_paper/make_movesurf_img/'
'movesurf_img_params.py')))
# plot the movement_surface
ax1 = plt.subplot(gs[0])
mod.plot_movement_surface(0, 'chist', ticks=False)
ax1.set_title('mixture histograms', fontsize=20)
# plot tracks
ax2 = plt.subplot(gs[1])
im = plt.pcolormesh(np.linspace(0, 7, 8), np.linspace(0, 7, 8),
mod.land[0].rast, cmap='plasma')
#gnx.help.plot_movement(mod.comm[0], mod.land, n_timesteps,
# 0, mod.params, subset_spp=n_individs-1,
# ticks=False, color='gray', color_by_individ=False,
# increasing_linewidth=False, alpha=0.5,
# include_start_points=False)
gnx.help.plot_movement(mod.comm[0], mod.land, n_timesteps,
0, mod.params, subset_spp=1, ticks=False,
increasing_linewidth=False, alpha=0.7, color='black',
include_start_points=False)
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('conductance', rotation=270, labelpad=25, y=0.5, fontsize=18)
cbar.ax.tick_params(labelsize=15)
#ax2.set_title('Sample movement tracks\nfor %i individuals' % n_individs)
ax2.set_title('movement tracks', fontsize=20)
fig.tight_layout()
plt.show()
#fig.savefig(('/home/drew/Desktop/stuff/berk/research/projects/sim/'
# 'methods_paper/img/final/move_surf.pdf'),
# format='pdf', dpi=1000)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import paramiko
import sys
hostname = sys.argv[1]
port = 22
usr = 'user'
pwd = 'pass'
try:
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(hostname, port=port, username=usr, password=pwd)
except paramiko.SSHException as e:
raise
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Imports
import prometheus_client
import traceback
import speedtest
import threading
import argparse
import time
# Arguments
parser = argparse.ArgumentParser(description='Prometheus exporter where it reports speedtest statistics based on user\'s preference.')
parser.add_argument('--web.listen-address', action='store', dest='listen_addr', help='Specify host and port for Prometheus to use to display metrics for scraping.')
parser.add_argument('--servers', action='store', dest='servers', help='Specific a or list of server ID(s) by comma to perform speedtests with.')
parser.add_argument('--source', action='store', dest='source', help='Specify source IP for speedtest to use to perform test.')
parser.add_argument('--interval', action='store', dest='interval', help='How often in seconds the tests should be performed.')
# Attributes
metrics = {
'speedtest_ping': prometheus_client.Gauge('speedtest_ping', 'Ping time in milliseconds.', ['server_name', 'server_loc', 'server_id']),
'speedtest_download': prometheus_client.Gauge('speedtest_download', 'Network download speed in Mbps.', ['server_name', 'server_loc', 'server_id']),
'speedtest_upload': prometheus_client.Gauge('speedtest_upload', 'Network upload speed in Mbps.', ['server_name', 'server_loc', 'server_id'])
}
# Classes
class UpdateMetrics(threading.Thread):
def __init__(self, _servers, _source, _interval):
threading.Thread.__init__(self)
self.servers = _servers
self.interval = _interval
def run(self):
while True:
try:
print('INFO: Updating metrics...', flush=True)
# Perform test
tester = speedtest.Speedtest()
tester.get_servers(self.servers)
tester.get_best_server()
tester.download()
tester.upload()
result = tester.results.dict()
# Convert bytes to Mbps
download_speed = result['download'] / 1000000.0
upload_speed = result['upload'] / 1000000.0
# Update metrics
metrics['speedtest_ping'].labels(server_name=result['server']['name'], server_loc=result['server']['country'], server_id=result['server']['id']).set(result['ping'])
metrics['speedtest_download'].labels(server_name=result['server']['name'], server_loc=result['server']['country'], server_id=result['server']['id']).set(download_speed)
metrics['speedtest_upload'].labels(server_name=result['server']['name'], server_loc=result['server']['country'], server_id=result['server']['id']).set(upload_speed)
print('INFO: Metrics updated!', flush=True)
except Exception:
# Set metrics to -1
metrics['speedtest_ping'].labels(server_name='', server_loc='', server_id=0).set(-1)
metrics['speedtest_download'].labels(server_name='', server_loc='', server_id=0).set(-1)
metrics['speedtest_upload'].labels(server_name='', server_loc='', server_id=0).set(-1)
print('ERROR: Unable to update metrics! Reason:\n{}'.format(traceback.print_exc()))
# Wait
time.sleep(self.interval)
# Main
if __name__ == '__main__':
print('INFO: Loading exporter...')
options = parser.parse_args()
host = '0.0.0.0'
port = 9100
servers = []
source = None
interval = 900
try:
if options.listen_addr:
host = options.listen_addr.split(':')[0]
port = int(options.listen_addr.split(':')[-1])
if options.servers:
if ',' in options.servers:
for server in options.servers.split(','):
servers.append(int(server))
if options.source:
source = options.source
if options.interval:
interval = int(options.interval)
except Exception:
print('ERROR: Invalid argument input! Reason:\n{}'.format(traceback.print_exc()))
print('INFO: Exporter ready!')
UpdateMetrics(_servers=servers, _source=source, _interval=interval).start()
prometheus_client.start_http_server(port, host)
|
nilq/baby-python
|
python
|
import logging
import os
import signal
import socket
import time
from contextlib import contextmanager
from subprocess import Popen
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
class UserObject:
def predict(self, X, features_names):
logging.info("Predict called")
return X
class MicroserviceWrapper:
def __init__(self, app_location, envs={}, tracing=False):
self.app_location = app_location
self.env_vars = self._env_vars(envs)
self.cmd = self._get_cmd(tracing)
def _env_vars(self, envs):
env_vars = dict(os.environ)
s2i_env_file = os.path.join(self.app_location, ".s2i", "environment")
with open(s2i_env_file) as fh:
for line in fh.readlines():
line = line.strip()
if line:
key, value = line.split("=", 1)
key, value = key.strip(), value.strip()
if key and value:
env_vars[key] = value
env_vars.update(envs)
env_vars.update(
{
"PYTHONUNBUFFERED": "x",
"PYTHONPATH": self.app_location,
"APP_HOST": "127.0.0.1",
"PREDICTIVE_UNIT_HTTP_SERVICE_PORT": "9000",
"PREDICTIVE_UNIT_GRPC_SERVICE_PORT": "5000",
"PREDICTIVE_UNIT_METRICS_SERVICE_PORT": "6005",
"PREDICTIVE_UNIT_METRICS_ENDPOINT": "/metrics-endpoint",
}
)
return env_vars
def _get_cmd(self, tracing):
cmd = (
"seldon-core-microservice",
self.env_vars["MODEL_NAME"],
"--service-type",
self.env_vars["SERVICE_TYPE"],
)
if "PERSISTENCE" in self.env_vars:
cmd += ("--persistence", self.env_vars["PERSISTENCE"])
if tracing:
cmd += ("--tracing",)
return cmd
def __enter__(self):
try:
logging.info(f"starting: {' '.join(self.cmd)}")
self.p = Popen(
self.cmd, cwd=self.app_location, env=self.env_vars, preexec_fn=os.setsid
)
time.sleep(1)
self._wait_until_ready()
return self.p
except Exception:
logging.error("microservice failed to start")
raise RuntimeError("Server did not bind to 127.0.0.1:5000")
@retry(wait=wait_fixed(4), stop=stop_after_attempt(10))
def _wait_until_ready(self):
logging.debug("=== trying again")
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r1 = s1.connect_ex(("127.0.0.1", 9000))
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r2 = s2.connect_ex(("127.0.0.1", 6005))
s3 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r3 = s3.connect_ex(("127.0.0.1", 5000))
if r1 != 0 or r2 != 0 or r3 != 0:
raise EOFError("Server not ready yet")
logging.info("microservice ready")
def _get_return_code(self):
self.p.poll()
return self.p.returncode
def __exit__(self, exc_type, exc_val, exc_tb):
if self.p:
group_id = os.getpgid(self.p.pid)
# Kill the entire process groups (including subprocesses of self.p)
os.killpg(group_id, signal.SIGKILL)
|
nilq/baby-python
|
python
|
__author__ = 'alex'
import os
import subprocess
import logging
from mountn.utils import lsblk, SubprocessException
from mountn.gui import gui
from locale import gettext as _
class TcplayDevice(object):
class Item(object):
def __init__(self, plugin, **kwargs):
self.plugin = plugin
self.active = kwargs.get("active", False)
self.device = kwargs.get("device", None)
self.name = kwargs.get("name", None)
self.uuid = kwargs.get("uuid", "")
def __str__(self):
return os.path.basename(self.device)
@property
def saved(self):
conf = self.plugin.settings.setdefault("items",[])
return self.uuid in conf
@property
def actions(self):
actions = []
if self.active:
actions.append((self.deactivate, _("Unmount")))
else:
actions.append((self.activate, _("Mount")))
if self.saved:
actions.append((self.unsave, _("Remove favourite")))
else:
actions.append((self.save, _("Add favourite")))
return actions
def activate(self):
cmd = [TcplayDevice.PKEXEC_BIN, TcplayDevice.TCPLAY_BIN, "--map="+self.name, "--device="+self.device]
password = gui.get_password(None, _("Enter password for %s:") % self.name, save_id="tcplay:%s" % self.uuid)
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(password+"\r")
if proc.returncode != 0:
logging.error(stderr)
raise SubprocessException("Process terminated with status %d" % proc.returncode, command=" ".join(cmd), retcode=proc.returncode, errout=stderr, stdout=stdout)
self.active = True
return True
def deactivate(self):
cmd = [TcplayDevice.PKEXEC_BIN, TcplayDevice.DMSETUP_BIN, "remove", self.name]
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logging.error(stderr)
raise SubprocessException("Process terminated with status %d" % proc.returncode, command=" ".join(cmd), retcode=proc.returncode, errout=stderr, stdout=stdout)
self.active = False
return True
def save(self):
conf = self.plugin.settings.setdefault("items",[])
if self.uuid not in conf:
conf.append(self.uuid)
def unsave(self):
conf = self.plugin.settings.setdefault("items",[])
conf.remove(self.uuid)
PKEXEC_BIN = "pkexec"
TCPLAY_BIN = "tcplay"
DMSETUP_BIN = "dmsetup"
name = "TCPlay-Devices"
def __init__(self, settings):
self.settings = settings
@property
def items(self):
items = {}
for device in lsblk():
fname = os.path.basename(device["NAME"])
uuid = self._get_uuid(device)
if device["TYPE"] == "crypt" and fname.startswith("tc_"):
items[uuid] = TcplayDevice.Item(self, device=device["NAME"], name=os.path.basename(fname), uuid=uuid, active=True)
elif device["TYPE"] == "part" and device["MOUNTPOINT"] == "":
items[uuid] = TcplayDevice.Item(self, device=device["NAME"], name="tc_%s"%fname, uuid=uuid, active=False)
return items.values()
def _get_uuid(self, device):
ATTRS = ("PARTUUID", "WSN")
uuid = ""
for attr in ATTRS:
uuid = device.get(attr)
if uuid:
return uuid
if "PARENT" in device:
return self._get_uuid(device["PARENT"])
else:
return None
|
nilq/baby-python
|
python
|
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from numpy import pi
qreg_q = QuantumRegister(3, 'q')
creg_c = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.h(qreg_q[1])
circuit.cx(qreg_q[1], qreg_q[2])
circuit.barrier(qreg_q[1], qreg_q[2], qreg_q[0])
circuit.cx(qreg_q[0], qreg_q[1])
circuit.h(qreg_q[0])
circuit.barrier(qreg_q[2], qreg_q[0], qreg_q[1])
circuit.measure(qreg_q[0], creg_c[0])
circuit.measure(qreg_q[1], creg_c[1])
circuit.barrier(qreg_q[2], qreg_q[0], qreg_q[1])
circuit.cx(qreg_q[1], qreg_q[2])
circuit.cz(qreg_q[0], qreg_q[2])
|
nilq/baby-python
|
python
|
from mmdet.models.necks.fpn import FPN
from .second_fpn import SECONDFPN
from .imvoxelnet import ImVoxelNeck, KittiImVoxelNeck, NuScenesImVoxelNeck
__all__ = ['FPN', 'SECONDFPN', 'ImVoxelNeck', 'KittiImVoxelNeck', 'NuScenesImVoxelNeck']
|
nilq/baby-python
|
python
|
import cv2
face_cascade = cv2.CascadeClassifier("./haarcascade_frontalface_default.xml")
img = cv2.imread("face1.jpg")
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces= face_cascade.detectMultiScale(gray_img, scaleFactor = 1.15, minNeighbors=5)
print(type(faces))
print(faces)
# for x,y,w,h in faces:
# print("x:",x)
# print("y:",y)
# print("w:",w)
# print("h:",h)
# img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)
x,y,w,h = faces[0]
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)
cv2.imshow("Face",img)
cv2.waitKey(0) # 0 : Closes as soon as we press any key
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
# Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
PATH = '.messaging.internetofthings.ibmcloud.com:1883/api/v0002/device/types/'
def main(dict):
iot_org_id = dict['iot_org_id']
device_id = dict['device_id']
device_type = dict['device_type']
api_token = dict['api_token']
requests.post('http://' + iot_org_id + PATH + device_type +
'/devices/' + device_id + '/events/query',
headers={'Content-Type': 'application/json'},
json={
'payload': dict['payload'],
'client': dict['client'],
'language': dict['language']},
auth=('use-token-auth', api_token))
return {'msg': dict['msg']['text']}
|
nilq/baby-python
|
python
|
try:
from setuptools import setup
except:
from distutils.core import setup
setup(
name='pytorch_custom',
version='0.0dev',
author='Alexander Soare',
packages=['pytorch_custom'],
url='https://github.com/alexander-soare/PyTorch-Custom',
license='Apache 2.0',
description='My own miscellaneous helpers for pytorch',
install_requires=[
'pandas',
'matplotlib',
'tqdm',
'numpy',
'scikit-learn',
],
)
|
nilq/baby-python
|
python
|
'''
Copyright Hackers' Club, University Of Peradeniya
Author : E/13/181 (Samurdhi Karunarathne)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at *
http://www.apache.org/licenses/LICENSE-2.0 *
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
s=raw_input()
a=s.count('A')
d=s.count('D')
x=s.count('X')
y=s.count('Y')
p=s.count('P')
r=s.count('R')
value=min(a,d,x,y,p,r)
print value
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hustle Cloudlab Repeatable Experiment Profile
Default behavior:
By default, this uses a c220g5 with 100GB of storage and runs experiments at scale factor 1.
Numbered experiments will not be run unless provided with one or more arguments to use.
A common argument can be provided that will precede all per-experiment arguments.
Storage size may need to be increased for larger scale factors.
Instructions:
No additional instructions needed. Remember to access experiment results at: /mydata/results
"""
import geni.portal as portal
import geni.rspec.pg as pg
import json
try:
import urllib.parse as url_parser
except ImportError:
import urllib as url_parser
pc = portal.Context()
pc.defineParameter("hardware", "Hardware (Default: c220g5)", portal.ParameterType.STRING, "c220g5")
pc.defineParameter("storage", "Storage Size (Default: 100GB)", portal.ParameterType.STRING, "100GB")
pc.defineParameter("scale_factor", "SSB Scale Factor (Default: 1)", portal.ParameterType.INTEGER, 1)
pc.defineParameter("common_args",
"Common Experiment Args (Default: \"ssb hash-aggregate\", replace with \"skip\" if not in use.)",
portal.ParameterType.STRING, "ssb hash-aggregate")
pc.defineParameter("experiment_1_args", "Experiment 1 Args (Default: \"skip\")", portal.ParameterType.STRING, "skip")
pc.defineParameter("experiment_2_args", "Experiment 2 Args (Default: \"skip\")", portal.ParameterType.STRING, "skip")
pc.defineParameter("experiment_3_args", "Experiment 3 Args (Default: \"skip\")", portal.ParameterType.STRING, "skip")
pc.defineParameter("experiment_4_args", "Experiment 4 Args (Default: \"skip\")", portal.ParameterType.STRING, "skip")
pc.defineParameter("experiment_5_args", "Experiment 5 Args (Default: \"skip\")", portal.ParameterType.STRING, "skip")
params = portal.context.bindParameters()
'''
c220g5 224 nodes (Intel Skylake, 20 core, 2 disks)
CPU Two Intel Xeon Silver 4114 10-core CPUs at 2.20 GHz
RAM 192GB ECC DDR4-2666 Memory
Disk One 1 TB 7200 RPM 6G SAS HDs
Disk One Intel DC S3500 480 GB 6G SATA SSD
NIC Dual-port Intel X520-DA2 10Gb NIC (PCIe v3.0, 8 lanes)
NIC Onboard Intel i350 1Gb
Note that the sysvol is the SSD, while the nonsysvol is the 7200 RPM HD.
We almost always want to use the sysvol.
'''
rspec = pg.Request()
node = pg.RawPC("node")
node.hardware_type = params.hardware
bs = node.Blockstore("bs", "/mydata")
bs.size = params.storage
bs.placement = "sysvol"
# explicitly copy the needed params for better readability
out_params = {
"hardware": params.hardware,
"storage": params.storage,
"scale_factor": params.scale_factor,
"common_args": params.common_args,
"experiment_1_args": params.experiment_1_args,
"experiment_2_args": params.experiment_2_args,
"experiment_3_args": params.experiment_3_args,
"experiment_4_args": params.experiment_4_args,
"experiment_5_args": params.experiment_5_args,
}
enc_str = url_parser.quote_plus((json.dumps(out_params, separators=(',', ':'))))
execute_str = \
"sudo touch /mydata/params.json;" + \
"sudo chmod +777 /mydata/params.json;" + \
"echo " + enc_str + " > /mydata/params.json;" + \
"sudo chmod +777 /local/repository/scripts/cloudlab/cloudlab_setup.sh;" + \
"/local/repository/scripts/cloudlab/cloudlab_setup.sh " + str(params.scale_factor) + ";" + \
"sudo chmod +777 /mydata/repo/scripts/cloudlab/cloudlab.py;" + \
"python3 /mydata/repo/scripts/cloudlab/cloudlab.py >> /mydata/report.txt 2>&1;"
node.addService(pg.Execute(shell="bash", command=execute_str))
rspec.addResource(node)
pc.printRequestRSpec(rspec)
|
nilq/baby-python
|
python
|
import datetime
import logging
import random
from GameParent import Game
from GameParent import SetupFailure, SetupSuccess
logger = logging.getLogger(__name__)
handler = logging.FileHandler('../logs/{}.log'.format(str(datetime.datetime.now()).replace(' ', '_').replace(':', 'h', 1).replace(':', 'm').split('.')[0][:-2]))
formatter = logging.Formatter('%(asctime)s::%(levelname)s::%(name)s::%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
class GameObject(Game):
"""
Implements Russian Roulette
"""
@staticmethod
def get_game_name():
return "RussianRoulette"
@staticmethod
def how_to_play():
return "RussianRoulette is a game played where on each turn the users spins a virtual chamber where 1 in 6 bullets are filled. The user fires and lives or dies. If they live, then they pass the gun to the next user. The process continues until the gun is discharged. If you invoke this game with an integer argument, that integer will determine the amount of chambers present in the gun. If you invoke this with a boolean that toggles whether or not last man standing is enabled or not."
@staticmethod
def get_game_short_name():
return "RR"
async def setup(self, args):
self.__shots = 0
self.__gun = 6
self.__last_man_standing = True
self.__current_turn_index = 0
logger.info('Setting up a RussianRoulette game...')
if len(args) == 2:
if (type(args[0]) == bool and type(args[1]) == int) or (type(args[0]) == int and type(args[1]) == bool):
if type(args[0]) == bool:
self.__last_man_standing = args[0]
else:
self.__gun = args[0]
if type(args[1]) == bool:
self.__last_man_standing = args[1]
else:
self.__gun = args[1]
else:
logger.debug('Could not setup game, invalid arguments')
return SetupFailure(f'**Command \'play {self.get_game_short_name()}\' Usage: **`>play {self.get_game_short_name()} [users-to-play, ...] (number_of_empty_chambers=5 (int)) (last_man_standing=false (boolean))`')
elif len(args) == 1:
if type(args[0]) == bool or type(args) == int:
if type(args[0]) == bool:
self.__last_man_standing = args[0]
else:
self.__gun = args[0]
else:
logger.debug('Could not setup game, invalid arguments')
return SetupFailure(f'**Command \'play {self.get_game_short_name()}\' Usage: **`>play {self.get_game_short_name()} [users-to-play, ...] (number_of_empty_chambers=5 (int)) (last_man_standing=false (boolean))`')
elif len(args) > 0 and (len(args) == 1 and args[0].lower() == 'help'):
logger.debug('Could not setup game, invalid arguments or user requested help')
return SetupFailure(f'**Command \'play {self.get_game_short_name()}\' Usage: **`>play {self.get_game_short_name()} [users-to-play, ...] (number_of_empty_chambers=5) (last_man_standing=False)`')
elif len(self.players) < 2:
logger.debug('Could not setup game, user provided too few users to play')
return SetupFailure('You can\'t play RussianRoulette by yourself.')
if self.__gun < 1 or self.__gun > 1000:
logger.debug('Could not setup game, user provided too big playfield')
return SetupFailure('Invalid gun size.')
logger.debug('Passed standard checks setting up turn...')
random.shuffle(self.players)
self.__current_turn_index = 0
await self.channel.send("Playing with a gun with {} chambers, {}.".format(self.__gun, "last man standing" if self.__last_man_standing else "one bullet"))
pidx = 0
for player in self.players:
if pidx == self.__current_turn_index:
await self.channel.send("<@{0}>, you go first! Good luck!".format(player.id))
else:
await self.channel.send("<@{0}>, let\'s see what happens...".format(player.id))
pidx += 1
await self.show()
return SetupSuccess(self)
async def move(self, args, player):
logger.debug('Checking turn...')
if player != self.players[self.__current_turn_index]:
await self.channel.send('It is not your turn currently.')
return
self.__shots += 1
logger.debug("Getting number...")
if random.randint(1, self.__gun) == self.__gun // 2:
logger.debug("Will be a kill shot, sending message")
# Oh no!
await self.channel.send("**{0}** :skull::boom::gun:".format(self.get_current_player().name))
if not self.__last_man_standing:
logger.debug("Clearing game...")
await self.end_game()
logger.debug("Sending meta-data...")
await self.channel.send("**{0}** looses! It took {1} shots!".format(self.get_current_player().name, self.__shots))
else:
logger.debug("removing player and updating index")
self.players.remove(player)
if len(self.players) == 1:
logger.debug("Clearing game...")
await self.end_game()
logger.debug("Sending meta-data...")
await self.channel.send("**{0}** wins! It took {1} shots!".format(self.players[0].name, self.__shots))
else:
self.__current_turn_index = (self.__current_turn_index - 1) % len(self.players)
logger.debug("Calling next turn...")
self.next_turn()
logger.debug("Showing board...")
await self.show()
else:
logger.debug("Shot not lethal, click! Sending message")
await self.channel.send("**{0}** :sunglasses::gun: *click*".format(self.get_current_player().name))
logger.debug("Calling next turn...")
self.next_turn()
logger.debug("Showing board...")
await self.show()
def next_turn(self):
self.__current_turn_index = (self.__current_turn_index + 1) % len(self.players)
def get_current_player(self):
return self.players[self.__current_turn_index]
async def show(self):
board = "**{0}** :triumph::gun:".format(self.get_current_player().name)
await self.channel.send(board)
|
nilq/baby-python
|
python
|
import torch.nn as nn
import torch
from .initModel import initModel
import torch.nn.functional as F
from torch.autograd import Variable
import codecs
import os
import json
class simplE(initModel):
def __init__(self, config):
super(simplE, self).__init__(config)
self.entHeadEmbedding = nn.Embedding(self.config.entTotal, self.config.embedding_dim)
self.entTailEmbedding = nn.Embedding(self.config.entTotal, self.config.embedding_dim)
self.relEmbedding = nn.Embedding(self.config.relTotal, self.config.embedding_dim)
self.relInverseEmbedding = nn.Embedding(self.config.relTotal, self.config.embedding_dim)
self.criterion = nn.Softplus()
self.batchSize = self.config.batchSize
self.init()
def init(self):
nn.init.xavier_uniform_(self.entHeadEmbedding.weight.data)
nn.init.xavier_uniform_(self.entTailEmbedding.weight.data)
nn.init.xavier_uniform_(self.relEmbedding.weight.data)
nn.init.xavier_uniform_(self.relInverseEmbedding.weight.data)
def loss(self, score_pos, score_neg):
y = Variable(torch.Tensor([-1]))
if self.config.cuda:
y = y.cuda()
#softplus
loss1 = torch.sum(self.criterion(-score_pos) + self.criterion(score_neg))
return loss1
def pos_neg_score(self,score):
pos_score = score[:self.batchSize]
neg_score = score[self.batchSize:].view(self.batchSize, -1)
neg_score = torch.mean(neg_score,dim=1)
pos_score = torch.clamp(pos_score, min=-20, max=20)
neg_score = torch.clamp(neg_score, min=-20, max=20)
return pos_score, neg_score
def forward(self, batch):
self.batchSize = batch.shape[0]//(1 + self.config.negativeSize * 2)
h = batch[:, 0]
t = batch[:, 1]
r = batch[:, 2]
emb_h_as_h = self.entHeadEmbedding(h)
emb_t_as_t = self.entTailEmbedding(t)
emb_r = self.relEmbedding(r)
emb_h_as_t = self.entTailEmbedding(h)
emb_t_as_h = self.entHeadEmbedding(t)
emb_r_inv = self.relInverseEmbedding(r)
score = torch.sum((emb_h_as_h * emb_r * emb_t_as_t + emb_h_as_t * emb_r_inv * emb_t_as_h)/2, -1)
score = self.pos_neg_score(score)
return score
def predict(self, h, r, t):
emb_h_as_h = self.entHeadEmbedding(h)
emb_t_as_t = self.entHeadEmbedding(t)
emb_r = self.relEmbedding(r)
emb_h_as_t = self.entTailEmbedding(h)
emb_t_as_h = self.entHeadEmbedding(t)
emb_r_inv = self.relInverseEmbedding(r)
score = torch.sum(1/2 * (emb_h_as_h * emb_r * emb_t_as_t + emb_h_as_t * emb_r_inv * emb_t_as_h), -1)
score = torch.clamp(score, min=-20, max=20)
return score
def save_embedding(self, emb_path, prefix):
ent_head_path = os.path.join(emb_path, "simplE_head_entity{}.embedding".format(prefix))
ent_tail_path = os.path.join(emb_path, "simplE_tail_entity{}.embedding".format(prefix))
rel_path = os.path.join(emb_path, "simplE_rel{}.embedding".format(prefix))
rel_rev_path = os.path.join(emb_path, "simplE_rel_rev{}.embedding".format(prefix))
with codecs.open(ent_head_path, "w") as f:
json.dump(self.entHeadEmbedding.cpu().weight.data.numpy().tolist(), f)
with codecs.open(ent_tail_path, "w") as f:
json.dump(self.entTailEmbedding.cpu().weight.data.numpy().tolist(), f)
with codecs.open(rel_path, "w") as f:
json.dump(self.relEmbedding.cpu().weight.data.numpy().tolist(), f)
with codecs.open(rel_rev_path, "w") as f:
json.dump(self.relInverseEmbedding.cpu().weight.data.numpy().tolist(), f)
|
nilq/baby-python
|
python
|
__author__ = 'jonnyfunfun'
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.