index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,800 | 3df7dac62e87a5d5cd908d3c79dfe302b8c679ad | # -*- coding:utf-8 -*-
import logging
from stompest.config import StompConfig
from stompest.sync import Stomp
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
uri = 'failover:(tcp://x:61613,tcp://y:61613,tcp://z:61613)?randomize=false,startupMaxReconnectAttempts=3,initialReconnectDelay=7,maxReconnectDelay=8,maxReconnectAttempts=0'
CONFIG = StompConfig(uri)
QUEUE = '/queue/liuyang-test'
if __name__ == '__main__':
client = Stomp(CONFIG)
client.connect()
client.send(QUEUE, 'test message 1')
client.send(QUEUE, 'test message 2')
client.disconnect()
|
988,801 | feac1708da65ef2bcd890e2e812e2537cb480aa5 | import pytest
from web3 import Web3, EthereumTesterProvider
import vyper
from hypothesis import given, strategies as st
from trie.smt import calc_root
@pytest.fixture(scope="module")
def merkle_root_contract():
w3 = Web3(EthereumTesterProvider())
with open("contracts/MerkleRoot.vy", "r") as f:
interface = vyper.compile_code(f.read(), output_formats=["abi", "bytecode"])
txn_hash = w3.eth.contract(**interface).constructor().transact()
address = w3.eth.waitForTransactionReceipt(txn_hash)['contractAddress']
return w3.eth.contract(address, **interface)
def to_bytes32(val: int) -> bytes:
assert 0 <= val < 2**256, "Value out of range!"
return val.to_bytes(32, byteorder='big')
@given(
tokenId=st.integers(min_value=0, max_value=2**256-1),
txnHash=st.binary(min_size=32, max_size=32),
# NOTE: For some reason, this fails to pass the health check
#proof=st.lists(elements=st.binary(min_size=32, max_size=32), min_size=256, max_size=256),
proof=st.lists(elements=st.just(b'\x00' * 32), min_size=256, max_size=256),
)
def test_calc_root(merkle_root_contract, tokenId, txnHash, proof):
a = merkle_root_contract.functions.getMerkleRoot(tokenId, txnHash, proof).call()
b = calc_root(to_bytes32(tokenId), txnHash, proof)
assert a == b, "Mismatch\nl: {}\nr: {}".format("0x"+a.hex(), "0x"+b.hex())
|
988,802 | 9ef270b190eed3fba09ea17de563158c50b769cb | #!/usr/bin/env python
# coding:utf-8
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "firstdjango.settings")
django.setup()
def main():
from blog.models import Article
f = open('oldblog.txt')
BlogList = []
for line in f:
title, content = line.split('****')
article = Article(title=title, content=content)
BlogList.append(article)
# Article.objects.get_or_create(title=title, content=content)
# django.db.models 中还有一个函数叫 get_or_create() 有就获取过来,没有就创建,
# 用它可以避免重复
f.close()
Article.objects.bulk_create(BlogList)
# 导出
# python manage.py dumpdata blog > blog_dump.json
# 导入
# python manage.py loaddata blog_dump.json
# python manage.py dumpdata auth > auth.json # 导出用户数据
if __name__ == '__main__':
main()
print ('Done')
# 数据库的迁移
|
988,803 | a8a36b21173f3028be3c4c17f3b54389ab15ad9d | from django.conf.urls import patterns, url
from .views import CountryAsia, ContactUsMarkAsRead, ContactUsCreate
urlpatterns = patterns(
'',
url(r'^countries/$', CountryAsia.as_view(), name='country_asia'),
url(r'^countries/(?P<slug>[a-zA-Z0-9-]+)/$', ContactUsCreate.as_view(), name='contact_add'),
url(r'^mark_as_read/(?P<pk>\d+)/$', ContactUsMarkAsRead.as_view(), name='contact_us_mark_as_read'),
)
|
988,804 | ae87c3582b594bb4f4cf3c754b4a03ce44aea345 | import argparse
import base64
import numpy as np
import socketio
import eventlet.wsgi
from PIL import Image
from flask import Flask, render_template
from io import BytesIO
from model import preprocess
from keras.models import model_from_json
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
# Preprocess image before passing to model for prediction
height = image_array.shape[0]
width = image_array.shape[1]
image_array = image_array[height//2 - 25: height-25, 50: width-50]
image_array = preprocess(image_array)
# Preprocess finished
transformed_image_array = image_array[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 0.2
# Reduced speed while turning for older mac
if abs(steering_angle) > 0.1 and float(speed) > 5.0 :
throttle = 0.1
# Increased throttle for tough terrain on track 2
if float(speed) < 5.0 :
throttle = 0.4
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
|
988,805 | 54bb7d38a390f2b07bd6e259ca6b398a21faa350 | import os
# Where the repository exists
base_path = ""
bert_config_file = os.path.join(base_path, "bert_config.json")
# Where you want to save models (this requires lots of space - better on hhds)
save_path = ""
pretrained_path = os.path.join(save_path, "pretrained_berts")
finetuned_path = os.path.join(save_path, "finetuned_berts")
# Where you are loading the data from (better on ssd if possible for faster reads)
data_path = ""
glue_data_path = os.path.join(data_path, "glue_data")
train_data_path = os.path.join(data_path, "train_data")
|
988,806 | efd5f51c616973dc5d934562db033412acd92857 |
print()
"""
format方法
"""
#括号及其里面的字符 (称作格式化字段) 将会被 format() 中的参数替换
print("我叫{},今年{}!".format("张三",22))
#括号中的数字用于指向传入对象在 format() 中的位置
print("我叫{0},今年{1}!".format("张三",22))
print("我叫{1},今年{0}!".format("张三",22))
#
#在format()中使用关键字参数,它们的值会指向使用该名字的参数
print("我叫{name},今年{age}!".format(name="张三",age=22))
print("我叫{name},今年{age}!".format(age=22,name="张三"))
#位置及关键字参数可以任意的结合
print("我叫{0},今年{1},现住{place}!".format("张三",22,place="深圳"))
print("我叫{0},现住{place},今年{1}!".format("张三",22,place="深圳"))
# print("我叫{name},现住{0},今年{1}!".format(name="张三","深圳",22))
#':'和格式标识符可以跟着字段名。可以对值进行更好的格式化
a = 3.1415926
print("a的值为{0:.3f}".format(a))
print("{0:5}---{1:05d}".format("张三",18))
# __call__
# __str__
# __add__
# __repr__
# __iter__
# __next__
# __dict__
#
#
# 2007 iphone
# 2008 iphone3
# 2009 iphone3GS
# 2010 iphone4
# 2011 iphone4S
# 2012 iphone5
# 2013 iphone5S
# ...
#
|
988,807 | 1d544b7321ccd9fca279efea5ff8e6aa71db354f | # Generated by Django 2.0.2 on 2018-02-27 18:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_page_gender'),
]
operations = [
migrations.RenameField(
model_name='page',
old_name='gender',
new_name='template',
),
]
|
988,808 | f0c0137f9876d1c898c282560037acdced46f2b3 | # coding=utf-8
from utils import FifoList, BoundedPriorityQueue
from models import (SearchNode, SearchNodeHeuristicOrdered,
SearchNodeStarOrdered, SearchNodeCostOrdered)
def breadth_first(problem, graph_search=False):
return _search(problem,
FifoList(),
graph_search=graph_search)
def depth_first(problem, graph_search=False):
return _search(problem,
[],
graph_search=graph_search)
def limited_depth_first(problem, depth_limit, graph_search=False):
return _search(problem,
[],
graph_search=graph_search,
depth_limit=depth_limit)
def iterative_limited_depth_first(problem, graph_search=False):
return _iterative_limited_search(problem,
limited_depth_first,
graph_search=graph_search)
def uniform_cost(problem, graph_search=False):
return _search(problem,
BoundedPriorityQueue(),
graph_search=graph_search,
node_factory=SearchNodeCostOrdered)
def greedy(problem, graph_search=False):
return _search(problem,
BoundedPriorityQueue(),
graph_search=graph_search,
node_factory=SearchNodeHeuristicOrdered)
def astar(problem, graph_search=False):
return _search(problem,
BoundedPriorityQueue(),
graph_search=graph_search,
node_factory=SearchNodeStarOrdered)
def _iterative_limited_search(problem, search_method, graph_search=False):
solution = None
limit = 0
while not solution:
solution = search_method(problem, limit, graph_search)
limit += 1
return solution
def _search(problem, fringe, graph_search=False, depth_limit=None,
node_factory=SearchNode):
memory = set()
fringe.append(node_factory(state=problem.initial_state,
problem=problem))
while fringe:
node = fringe.pop()
if problem.is_goal(node.state):
return node
if depth_limit is None or node.depth < depth_limit:
childs = []
for n in node.expand():
if graph_search:
if n.state not in memory:
memory.add(n.state)
childs.append(n)
else:
childs.append(n)
for n in childs:
fringe.append(n)
|
988,809 | 863bfbd434c96d7b19d3df58caf091877816549e | #!/usr/bin/env python
#import SimpleXMLRPCServer
import xmlrpc.server
import os
def ls(directory):
try:
return os.listdir(directory)
except OSError:
return []
def ls_boom(directory):
return os.listdir(directory)
def cb(obj):
print("OBJECT::", obj)
print("OBJECT.__class__::", obj.__class__)
return obj.cb()
if __name__ == '__main__':
#s = SimpleXMLRPCServer.SimpleXMLRPCServer(('127.0.0.1', 8765))
s = xmlrpc.server.SimpleXMLRPCServer(('127.0.0.1', 8765))
s.register_function(ls)
s.register_function(ls_boom)
s.register_function(cb)
s.serve_forever()
#import xmlrpc.client
#x = xmlrpc.client.ServerProxy('http://localhost:8765')
#建一个新的xmlrpc.server对象,绑在local的8765端口,注册 ls(),ls_boom(),cd() 函数
# ls()会列出os.listdir()传来的目录内容, 列表方式,屏蔽错误, ls_boom()返回异常.
#从字典的xml版本中提取数据? |
988,810 | a0a29c0c629b1ff5947c43f389e519d6264b9dd9 | class TextBox(RibbonItem):
""" The TextBox object represents text-based control that allows the user to enter text. """
Image=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The image of the TextBox.
Get: Image(self: TextBox) -> ImageSource
Set: Image(self: TextBox)=value
"""
PromptText=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The prompt text for the text box.
Get: PromptText(self: TextBox) -> str
Set: PromptText(self: TextBox)=value
"""
SelectTextOnFocus=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""A value that indicates if the text is selected when the text box gains focus.
Get: SelectTextOnFocus(self: TextBox) -> bool
Set: SelectTextOnFocus(self: TextBox)=value
"""
ShowImageAsButton=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates if the Image set
in the text box should be displayed as a clickable button.
Get: ShowImageAsButton(self: TextBox) -> bool
Set: ShowImageAsButton(self: TextBox)=value
"""
Value=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The object that supplies the text value.
Get: Value(self: TextBox) -> object
Set: Value(self: TextBox)=value
"""
Width=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the width of the TextBox.
Get: Width(self: TextBox) -> float
Set: Width(self: TextBox)=value
"""
EnterPressed=None
m_ItemType=None
|
988,811 | 1cee54791d78ae474887453215b57fecc43bc499 | from polygon import Polygon
from segitiga import Segitiga
from segilima import Segilima
#menunjukan bahwa file ini merupakan kelas main
if __name__ == "__main__":
#s4 adalah objek dari kelas segiempat
s3 = Segitiga()
#method inputsisi() dan dispSisi() dimiliki polygon (kelas induk)
s3.inputSisi()
s3.dispSisi()
#method hitung keliling() dimiliki oleh kelas segiempat (kelas anak)
s3.hitungLuas()
s5 = Segilima.Segilima()
#method inputsisi() dan dispSisi() dimiliki polygon (kelas induk)
s5.inputSisi()
s5.dispSisi()
#method hitung keliling() dimiliki oleh kelas segiempat (kelas anak)
s5.hitungKeliling()
poly = Polygon()
poly.inputSisi()
poly.dispSisi()
# poly adalah objek dari kelas polygon
|
988,812 | 671e1509883f5e83bf87a3525897f3a2d7f730c9 | import logging
try:
from .func import load_config
except (ImportError, ValueError):
from guang_toolkit import load_config
logger = logging.getLogger(__name__)
class Basic:
def set_input(self, keys, values, path_config):
"""
设置属性
:param keys: 属性名称列表
:param values: 属性值列表
:param path_config:
:return:
"""
items = [(name, value) for (name, value) in zip(keys, values)]
invalid_items = [(name, value) for (name, value) in items if value is None] # 没有输入的属性
if len(invalid_items) == 0: # 如果属性全部都输入了
for name, value in items:
self.__setattr__(name, str(value))
elif len(invalid_items) == len(keys): # 如果属性都没输入
if path_config is None:
raise FileNotFoundError('请指定配置文件!')
else:
dict_params = load_config(path_config, config_type='json')
for name in keys:
value = dict_params.get(name)
if value is None:
raise ValueError(f'配置文件缺少参数{name}')
else:
self.__setattr__(name, value)
else: # 部分属性输入了
raise ValueError(f'缺少配置参数{[name for name, _ in invalid_items]}')
|
988,813 | 9e2027336d498f9e8231b9c23d6802bcd9977a9d | # =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
# from documentation:
# http://www.cisco.com/c/en/us/td/docs/routers/asr9000/software/asr9k_r5-3/sysman/configuration/guide/b-sysman-cg-53xasr9k/b-sysman-cg-53xasr9k_chapter_0100.html#con_57141
"""
For example,
disk0:asr9k-px-5.3.3.CSCuz33376-1.0.0
package_type = None
version_re = 5.3.3
smu_re = CSCuz33376
sp_re = None
subversion_re = 1.0.0
"""
platforms = ["asr9k", "hfr"]
package_types = "mini mcast mgbl mpls k9sec diags fpd doc bng li optic services services-infa " \
"infra-test video 9000v asr901 asr903 ncs500x".split()
version_re = re.compile("(?P<VERSION>\d+\.\d+\.\d+(\.\d+\w+)?)")
smu_re = re.compile("(?P<SMU>CSC[a-z]{2}\d{5})")
sp_re = re.compile("(?P<SP>(sp|fp)\d{0,2})")
subversion_re = re.compile("(CSC|sp|fp).*(?P<SUBVERSION>\d+\.\d+\.\d+?)")
class SoftwarePackage(object):
def __init__(self, package_name):
# External Name: asr9k-asr9000v-nV-px.pie-6.1.2
# Internal Name: asr9k-9000v-nV-px-6.1.2
if 'asr9000v' in package_name:
package_name = package_name.replace('asr9000v', '9000v')
elif 'services-infra-px' in package_name:
package_name = package_name.replace('-px', '')
self.package_name = package_name
@property
def platform(self):
for platform in platforms:
if platform + "-" in self.package_name:
return platform
else:
return None
@property
def package_type(self):
for package_type in package_types:
if "-" + package_type + "-" in self.package_name:
return package_type
else:
return None
@property
def architecture(self):
# asr9k-mcast-px.pie-5.3.2 (external name)
# disk0:asr9k-mcast-px-5.3.2 (internal name)
# asr9k-px-5.3.3.CSCuy81837.pie (external name)
# disk0:asr9k-px-5.3.3.CSCuy81837-1.0.0 (internal name)
if 'services-infra' in self.package_name:
return True
if "-px" in self.package_name:
return "px"
else:
return None
@property
def version(self):
result = re.search(version_re, self.package_name)
return result.group("VERSION") if result else None
@property
def smu(self):
result = re.search(smu_re, self.package_name)
return result.group("SMU") if result else None
@property
def sp(self):
result = re.search(sp_re, self.package_name)
return result.group("SP") if result else None
@property
def subversion(self):
if self.sp or self.smu:
result = re.search(subversion_re, self.package_name)
return result.group("SUBVERSION") if result else None
return None
def is_valid(self):
return self.platform and self.version and self.architecture and (self.package_type or self.smu or self.sp)
def __eq__(self, other):
result = self.platform == other.platform and \
self.package_type == other.package_type and \
self.architecture == other.architecture and \
self.version == other.version and \
self.smu == other.smu and \
self.sp == other.sp and \
(self.subversion == other.subversion if self.subversion and other.subversion else True)
if result:
# Append the disk location to the package name
if ":" in self.package_name:
disk = self.package_name.split(':')[0] + ":"
if not other.package_name.startswith(disk):
other.package_name = disk + other.package_name
return result
def __hash__(self):
return hash("{}{}{}{}{}".format(
self.platform, self.package_type, self.architecture, self.version, self.smu, self.sp, self.subversion))
@staticmethod
def from_show_cmd(cmd):
software_packages = set()
data = cmd.split()
for line in data:
software_package = SoftwarePackage(line)
if software_package.is_valid():
software_packages.add(software_package)
return software_packages
@staticmethod
def from_package_list(pkg_list):
software_packages = set()
for pkg in pkg_list:
software_package = SoftwarePackage(pkg)
if software_package.is_valid():
software_packages.add(software_package)
return software_packages
def __repr__(self):
return self.package_name
def __str__(self):
return self.__repr__()
|
988,814 | 0c2163de99bd92ae49ee3d882cdcf24143d7cf7a | from header import *
def solution(A):
# Kadane's algorithm
mn = inf
ans = -inf
for p in A:
mn = min(mn, p)
ans = max(ans, p-mn)
return ans if ans!=-inf else -1
A = input()
A = [int(x) for x in A.split()]
ans = solution(A)
print(ans)
"""
""" |
988,815 | 694efbeabc13e2c1a224753aa6aa80e6749a623b | from . import test_context
import sys
import io
import unittest
import keyring
import keyring.backend
import re
import os
import tempfile
from requests.exceptions import HTTPError
from requests import Response
try:
from contextlib import redirect_stdout, redirect_stderr
except ImportError:
from contextlib2 import redirect_stdout, redirect_stderr
import qjira.__main__ as prog
from . import test_util
from . import test_data
PY3 = sys.version_info > (3,)
class TestableKeyring(keyring.backend.KeyringBackend):
priority = 1
entries = dict()
def _key(self, servicename, username):
key = "{0}_{1}".format(servicename, username)
return key
def set_password(self, servicename, username, password):
key = self._key(servicename, username)
self.entries[key] = password
def get_password(self, servicename, username):
key = self._key(servicename, username)
return self.entries[key]
def delete_password(self, servicename, username):
key = self._key(servicename, username)
del self.entries[key]
class TestMainCLI(test_util.SPTestCase, test_util.MockJira, unittest.TestCase):
@classmethod
def setUpClass(cls):
keyring.set_keyring(TestableKeyring())
def setUp(self):
keyring.get_keyring().entries['qjira-sp_userb'] = 'xyzzy'
self.std_out = io.StringIO() if PY3 else io.BytesIO()
self.std_err = io.StringIO() if PY3 else io.BytesIO()
self.setup_mock_jira()
def tearDown(self):
self.teardown_mock_jira()
def test_stores_credentials(self):
with redirect_stdout(self.std_out):
with redirect_stderr(self.std_err):
prog.main(['-w','blah','-u','usera', 'cycletime', 'IIQCB'])
self.assertEqual('blah', keyring.get_keyring().entries['qjira-sp_usera'])
def test_not_authorized_clears_credentials(self):
self.assertEqual('xyzzy', keyring.get_keyring().entries['qjira-sp_userb'])
self.raise401 = True
with self.assertRaises(HTTPError) as ctx:
with redirect_stdout(self.std_out):
with redirect_stderr(self.std_err):
prog.main(['-w','xyzzy','-u','userb', 'cycletime', 'IIQCB'])
exc = ctx.exception
self.assertEqual(exc.response.status_code, 401)
with self.assertRaises(KeyError):
keyring.get_keyring().entries['qjira-sp_userb']
def test_progress_shown(self):
re_1of1 = re.compile('Retrieved 1 issue')
self.json_response = {
'total': 1,
'issues': [test_data.singleSprintStory()]
}
with redirect_stdout(self.std_out):
with redirect_stderr(self.std_err):
prog.main([ '-w', 'blah','cycletime', 'TEST'])
self.assertRegex_(self.std_err.getvalue(), re_1of1)
def test_progress_hidden(self):
re_1of1 = re.compile('Retrieved 1 issue')
self.json_response = {
'total': 1,
'issues': [test_data.singleSprintStory()]
}
with redirect_stderr(self.std_err):
with redirect_stdout(self.std_out):
prog.main(['-w', 'blah', 'cycletime', '--no-progress', 'TEST'])
self.assertNotRegex_(self.std_err.getvalue(), re_1of1)
def test_write_to_file(self):
f, path = tempfile.mkstemp(suffix='csv')
self.json_response = {
'total': 1,
'issues': [test_data.singleSprintStory()]
}
lines = None
try:
with redirect_stderr(self.std_err):
with redirect_stdout(self.std_out):
prog.main(['-w', 'blah', 'cycletime', '--no-progress', '-o', path, 'TEST'])
with open(path, 'r') as o:
lines = o.readlines()
finally:
os.unlink(path)
self.assertEqual(2, len(lines))
def test_command_options_require_project(self):
with self.assertRaises(SystemExit) as ctx:
with redirect_stderr(self.std_err):
prog.main([ '-w', 'blah', 'cycletime', '--no-progress'])
exc = ctx.exception
self.assertEqual(exc.code, 2)
self.assertRegex_(self.std_err.getvalue(), r'cycletime: error:')
def test_command_jql_require_jql(self):
with self.assertRaises(SystemExit) as ctx:
with redirect_stderr(self.std_err):
prog.main([ '-w', 'blah', 'jql', '--no-progress'])
exc = ctx.exception
self.assertEqual(exc.code, 2)
self.assertRegex_(self.std_err.getvalue(), r'jql: error:')
def test_filter_by_date_argparse(self):
'''The velocity commands date filter validates input argument.'''
self.json_response = {
'total': 1,
'issues': [test_data.singleSprintStory()]
}
with redirect_stderr(self.std_err):
with redirect_stdout(self.std_out):
prog.main(['-w', 'blah', 'velocity', '--no-progress', '--filter-by-date', '11/01/2017', 'TEST'])
with self.assertRaises(SystemExit) as ctx:
with redirect_stderr(self.std_err):
with redirect_stdout(self.std_out):
prog.main(['-w', 'blah', 'velocity', '--no-progress', '--filter-by-date', 'not a date', 'TEST'])
exc = ctx.exception
self.assertEqual(exc.code, 2)
self.assertRegex_(self.std_err.getvalue(), r'velocity: error:')
|
988,816 | f448df5b05534b693baf1240b329c9f090263a55 | class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
dic = {}
for index in range(len(list1)):
name = list1[index]
dic[name] = [index, -1]
pass
for index in range(len(list2)):
name = list2[index]
if dic.has_key(name):
dic[name][1] = index
pass
pass
arr = dic.items()
res = []
currsum = -1
for ele in arr:
if ele[1][1] != -1: # in both list
if currsum == -1:
currsum = ele[1][0] + ele[1][1]
res.append(ele[0])
pass
else:
tempsum = ele[1][0] + ele[1][1]
if tempsum < currsum:
res = []
currsum = tempsum
res.append(ele[0])
pass
elif tempsum == currsum:
res.append(ele[0])
pass
pass
pass
pass
return res
|
988,817 | a1011d4530475d102b99dc175ee80f4a1afa6868 | import argparse, io, json, logging, os, re, subprocess, sys, tempfile
from collections import defaultdict
import vcf
import pysam
import pysam.bcftools as bcftools
import pybedtools.bedtool as bed
import numpy as np
import pandas as pd
from pathlib import Path
from shlex import quote
from .sample import Sample
from .variant import variant_descriptor, Variant, DeletionVariant
from npsv import npsva
ZSCORE_THRESHOLD = 1.5
def count_alleles_with_svviz2(variant, args, input_bam):
with tempfile.TemporaryDirectory(dir=args.tempdir) as tempdir:
vcf_path = variant.to_minimal_vcf(args, tempdir=tempdir)
command = "{exec} --ref {ref} --outdir {outdir} --no-render --variants {vcf} {bam}".format(
exec=quote("svviz2"),
ref=quote(args.reference),
outdir=tempdir,
vcf=quote(vcf_path),
bam=quote(input_bam),
)
subprocess.check_call(command, shell=True)
id = (
variant.record.ID
or f"{variant.record.CHROM}_{variant.record.POS}_{variant.record.sv_end}"
)
svviz2_file_names = [
f"{id}.{variant.record.var_subtype[:3].lower()}_{variant.record.CHROM}_{variant.record.POS-1}",
f"{id}.{variant.record.var_subtype[:3].lower()}_{variant.record.CHROM}_{variant.record.POS}",
f"{id}.SequenceDefinedVariant.{variant.record.CHROM}_{variant.record.POS-1}-{variant.record.sv_end-1}",
]
for report_prefix in svviz2_file_names:
report_path = os.path.join(tempdir, report_prefix + ".report.tsv")
if os.path.exists(report_path):
break
else:
assert False, f"Couldn't find report file for {id}"
report = pd.read_csv(report_path, sep="\t")
report.fillna({"allele": "all"}, inplace=True)
alleles = report.groupby(["allele", "key"])
ref = alleles.get_group(("ref", "count")).iat[0, 3]
alt = alleles.get_group(("alt", "count")).iat[0, 3]
return ref, alt
class Features(object):
FEATURES = [
"REF_READ",
"ALT_READ",
"REF_WEIGHTED_SPAN",
"ALT_WEIGHTED_SPAN",
"REF_CONC_SPAN",
"ALT_CONC_SPAN",
"INSERT_LOWER",
"INSERT_UPPER",
"CLIP_PRIMARY",
"COVERAGE",
"DHFC",
"DHBFC",
"DHFFC",
]
FEATURE_COLS = ["#CHROM", "START", "END", "TYPE", "SAMPLE", "SVLEN", *FEATURES]
MISSING_DATA = "."
def __init__(self, variant: Variant, sample: Sample):
self.__dict__['variant'] = variant
self.__dict__['sample'] = sample
self.__dict__['features'] = defaultdict(lambda: Features.MISSING_DATA)
def __setattr__(self, name, value):
if name not in Features.FEATURES:
raise ValueError("Unknown feature: %s", name)
self.features[name] = value
def print_features(self, file, force_variant=None, ac=None):
print_variant = self.variant if force_variant is None else force_variant
print(
print_variant.chrom,
print_variant.pos,
print_variant.end,
print_variant.subtype,
self.sample.name,
print_variant.event_length,
sep="\t",
end="",
file=file,
)
for feature in Features.FEATURES:
file.write(f"\t{self.features[feature]}")
if ac is not None:
file.write(f"\t{ac}")
file.write("\n")
@staticmethod
def header(out_file=sys.stdout, ac=None):
"""Generate header for SV features file
Args:
out_file (file object, optional): Output file object. Defaults to sys.stdout.
ac (int, optional): Not None to include AC in header. Defaults to None.
"""
if ac is not None:
print(*Features.FEATURE_COLS, "AC", sep="\t", file=out_file)
else:
print(*Features.FEATURE_COLS, sep="\t", file=out_file)
def coverage_over_region(input_bam, region, reference, min_mapq=40, min_baseq=15, min_anchor=11):
"""Compute coverage over 1-indexed, closed region"""
if reference:
depth_result = pysam.depth( # pylint: disable=no-member
"-Q", str(min_mapq),
"-q", str(min_baseq),
"-l", str(min_anchor),
"-r", region,
"--reference", reference,
input_bam,
)
else:
depth_result = pysam.depth( # pylint: disable=no-member
"-Q", str(min_mapq),
"-q", str(min_baseq),
"-l", str(min_anchor),
"-r", region,
input_bam,
)
# start, end are 0-indexed half-open coordinates
_, start, end = pysam.libcutils.parse_region(region=region)
region_length = end - start
if len(depth_result) > 0 and region_length > 0:
depths = np.loadtxt(io.StringIO(depth_result), dtype=int, usecols=2)
total_coverage = np.sum(depths)
return (total_coverage / region_length, total_coverage, region_length)
else:
return (0., 0., region_length)
def count_realigned_reads(
args,
fragments,
variant,
ref_contig="ref",
alt_contig="alt",
count_straddle=True,
**kwargs,
):
# 1-indexed coordinates
rl_breakpoint, rr_breakpoint = variant.ref_breakpoints(args.flank, contig=ref_contig)
al_breakpoint, ar_breakpoint = variant.alt_breakpoints(args.flank, contig=alt_contig)
counts, read_names = fragments.count_realigned_reads(
[(rl_breakpoint, rr_breakpoint or "", al_breakpoint, ar_breakpoint or "")],
count_straddle=count_straddle,
**kwargs,
)
# If multiple breakpoints, average counts
if args.mapq_reads:
ref_reads = (counts["rl_mapq"] + counts["rr_mapq"]) / (1 if rr_breakpoint is None else 2)
alt_reads = (counts["al_mapq"] + counts["ar_mapq"]) / (1 if ar_breakpoint is None else 2)
else:
ref_reads = (counts["rl"] + counts["rr"]) / (1 if rr_breakpoint is None else 2)
alt_reads = (counts["al"] + counts["ar"]) / (1 if ar_breakpoint is None else 2)
return ref_reads, alt_reads, read_names
def extract_features(
args: argparse.Namespace,
variant: Variant,
input_bam: str,
sample: Sample,
max_reads: int = None,
input_fasta: str = None,
ref_contig: str = "ref",
alt_contig: str = "alt",
insert_hist: bool = True,
):
features = Features(variant, sample)
try:
if input_fasta is None:
# Generate FASTA with ref and alt alleles formatted for use with bwa (via npsva)
fasta_path, ref_contig, alt_contig = variant.synth_fasta(args)
else:
fasta_path = input_fasta
fragments = npsva.RealignedFragments(
fasta_path,
sample.mean_insert_size,
sample.std_insert_size,
sample.insert_size_density().as_dict() if insert_hist else {},
input_bam,
)
finally:
# Clean up the FASTA file if we created one
if fasta_path != input_fasta:
os.remove(fasta_path)
# Gather reads from the BAM file
# Previously a fixed 1000 for paired-end and args.flank for realignment
pair_flank = min(args.flank, sample.search_distance())
ci_pos = variant.get_ci("CIPOS", default_ci=args.default_ci)
ci_end = variant.get_ci("CIEND", default_ci=args.default_ci)
if variant.ref_length > 2 * pair_flank + ci_pos[1] + abs(ci_end[0]):
# When event is large, gather reads only near the breakpoints
fragments.gather_reads(variant.left_flank_region_string(left_flank=abs(ci_pos[0]) + pair_flank, right_flank=ci_pos[1] + pair_flank))
fragments.gather_reads(variant.right_flank_region_string(left_flank=abs(ci_end[0]) + pair_flank, right_flank=ci_end[1] + pair_flank))
else:
# When event is small gather reads across the entire event
fragments.gather_reads(variant.region_string(flank=pair_flank))
# Allele Count Evidence
ref_count, alt_count, *_ = count_realigned_reads(
args,
fragments,
variant,
ref_contig=ref_contig,
alt_contig=alt_contig,
count_straddle=args.count_straddle,
)
features.REF_READ = ref_count
features.ALT_READ = alt_count
# Read Pair Evidence
left_breakpoint = variant.left_flank_region_string(left_flank=1, right_flank=1)
right_breakpoint = variant.right_flank_region_string(left_flank=1, right_flank=1)
fragment_delta = -variant.event_length if variant.is_deletion else variant.event_length
pair_results = fragments.count_pipeline_straddlers(
left_breakpoint, right_breakpoint, pair_flank, fragment_delta, 1.5, args.min_anchor,
)
# TODO: Incorporate 'concordance' count features
features.REF_WEIGHTED_SPAN = pair_results["ref_weighted_count"]
features.ALT_WEIGHTED_SPAN = pair_results["alt_weighted_count"]
features.REF_CONC_SPAN = pair_results["ref_conc_count"]
features.ALT_CONC_SPAN = pair_results["alt_conc_count"]
insert_count = pair_results["insert_count"]
if insert_count > 0:
features.INSERT_LOWER = pair_results["insert_lower"] / insert_count
features.INSERT_UPPER = pair_results["insert_upper"] / insert_count
else:
features.INSERT_LOWER = 0
features.INSERT_UPPER = 0
# Clipped Read Evidence
if variant.is_deletion:
# For deletions we are interested in clipped reads within the event
left_clip_results = fragments.count_pipeline_clipped_reads(left_breakpoint, args.min_clip)
clip = left_clip_results["right"]
clip_total = left_clip_results["total"]
right_clip_results = fragments.count_pipeline_clipped_reads(right_breakpoint, args.min_clip)
clip += right_clip_results["left"]
clip_total += right_clip_results["total"]
elif variant.is_insertion:
# TODO: Handle complex variants
# For insertions we are interested in clipped reads on either side of breakpoint
clip_results = fragments.count_pipeline_clipped_reads(left_breakpoint, args.min_clip)
clip = clip_results["left"] + clip_results["right"] + clip_results["both"]
clip_total = clip_results["total"]
elif variant.is_duplication:
# For duplication we are interested in clipped reads outside the event
left_clip_results = fragments.count_pipeline_clipped_reads(left_breakpoint, args.min_clip)
clip = left_clip_results["left"]
clip_total = left_clip_results["total"]
right_clip_results = fragments.count_pipeline_clipped_reads(right_breakpoint, args.min_clip)
clip += right_clip_results["right"]
clip_total += right_clip_results["total"]
features.CLIP_PRIMARY = clip / clip_total if clip_total > 0 else 0.
# Coverage Evidence
# Coverage currently only relevant for deletion and duplication events
if variant.is_insertion:
return features
assert variant.is_deletion or variant.is_duplication, "Coverage features only supported for DEL/DUP variants"
coverage, _, _ = coverage_over_region(input_bam, variant.region_string(), args.reference, min_mapq=args.min_mapq, min_baseq=args.min_baseq, min_anchor=args.min_anchor)
_, left_flank_bases, left_flank_length = coverage_over_region(
input_bam,
variant.left_flank_region_string(args.rel_coverage_flank),
args.reference,
min_mapq=args.min_mapq, min_baseq=args.min_baseq, min_anchor=args.min_anchor
)
_, right_flank_bases, right_flank_length = coverage_over_region(
input_bam,
variant.right_flank_region_string(args.rel_coverage_flank),
args.reference,
min_mapq=args.min_mapq, min_baseq=args.min_baseq, min_anchor=args.min_anchor
)
# Normalized coverage features adapted from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6479422/
# Chromosomal normalized coverage
features.COVERAGE = coverage
features.DHFC = coverage / sample.chrom_mean_coverage(variant.chrom)
# GC normalized coverage
gc_fraction, alignable_bases = variant.ref_gc_fraction
if alignable_bases > 0:
features.DHBFC = coverage / sample.gc_mean_coverage(gc_fraction)
else:
features.DHBFC = 1. if coverage > 0 else 0.
# Flank normalized coverage
total_flank_bases = left_flank_bases + right_flank_bases
total_flank_length = left_flank_length + right_flank_length
if total_flank_bases > 0 and total_flank_length > 0:
features.DHFFC = coverage / (total_flank_bases / total_flank_length)
else:
features.DHFFC = 1. if coverage > 0 else 0.
return features
def extract(
args: argparse.Namespace,
input_vcf: str,
input_bam: str,
out_file=sys.stdout,
max_reads: int = None,
ac: int = None,
sample: Sample = None,
force_variant: Variant = None,
insert_hist: bool = True,
):
"""Extract and print deletion SV features for a VCF and BAM file
Args:
args (argparse.Namespace): Command line arguments
input_vcf (str): Path to VCF file
input_bam (str): Path to BAM file
out_file (file_object, optional): File to write features. Defaults to sys.stdout.
max_reads (int, optional): Max reads for feature extraction. Defaults to None.
ac (int, optional): Allele count for current features. Defaults to None.
sample (Sample, optional): Sample object. Defaults to None.
force_variant (Variant, optional): Variant to determine feature variant columns. Defaults to None.
insert_hist (bool, optional): Use empirical insert size historgram. Defaults to True.
Raises:
ValueError: Missing argument
"""
# Print header line as requested
if args.header:
Features.header(out_file, ac)
if sample is not None:
pass
elif args.stats_path is not None:
sample = Sample.from_npsv(args.stats_path, bam_path=input_bam)
elif None not in (
args.fragment_mean,
args.fragment_sd,
args.read_length,
args.depth,
):
sample = Sample.from_distribution(
input_bam,
args.fragment_mean,
args.fragment_sd,
args.read_length,
mean_coverage=args.depth,
)
else:
raise ValueError("Library distribution must be provided")
# Extract features for all SVs
vcf_reader = vcf.Reader(filename=input_vcf)
for record in vcf_reader:
variant = Variant.from_pyvcf(record, args.reference)
if variant is None:
logging.warning("Variant type or VCF record not supported for %s. Skipping.", record.ID)
features = extract_features(
args,
variant,
input_bam,
sample,
max_reads=max_reads,
insert_hist=insert_hist,
)
# Print features
features.print_features(
out_file, force_variant=force_variant, ac=ac,
)
|
988,818 | c4a01a1ca263d321446485673ccc207aec18f46d | from collections import OrderedDict
from datetime import datetime
from flask_login import UserMixin, AnonymousUserMixin, current_user
from mongoengine.context_managers import switch_db
from markdown.util import etree
import difflib
from . import db, login_manager, wiki_pwd
from .wiki_util import unified_diff
@login_manager.user_loader
def load_user(user_id):
return WikiUser.objects(id=user_id).first()
class Permission:
"""Permissions to access Project Wiki"""
READ = 0x01
WRITE = 0x02
ADMIN = 0x40
SUPER = 0x80
roles = OrderedDict([
('Super', 0xff),
('Admin', 0x7f),
('User', Permission.READ | Permission.WRITE),
('Guest', Permission.READ)
])
class AnonymousUser(AnonymousUserMixin):
id = ''
name = 'system'
group = ''
def can(self, group, permissions):
return False
def belong_to(self, group):
return False
def is_admin(self, group):
return False
def is_super_admin(self):
return False
class WikiUser(UserMixin, db.Document):
"""Collection of Project Wiki users
:param name: username
:param email: user email
:param password_hash: the hash of user's password
The actual password is not stored, only its hash.
:param permissions: user's permissions for each group
Example:
{'group1': 0x03, 'group2': 0x40}
"""
name = db.StringField(unique=True)
email = db.StringField(required=True)
password_hash = db.StringField()
permissions = db.DictField(default=dict())
meta = {'collection': 'wiki_user'}
def __repr__(self):
return '<User {}>'.format(self.name)
def set_password(self, password):
self.password_hash = wiki_pwd.hash(password)
def verify_password(self, password):
return wiki_pwd.verify(password, self.password_hash)
def set_role(self, group, role):
"""Set the role of a user.
:param group: group name (no whitespace)
:param role: `Super`, `Admin`, `User`, or `Guest`,
Super: Add/remove group, activate/deactiavte group,
add/remove users, etc.
Admin: Add/remove users in the group, read/write pages.
User: Read/write pages.
Guest: Read pages.
"""
self.permissions[group] = roles[role]
def get_role(self, group):
idx = list(roles.values()).index(self.permissions[group])
return list(roles)[idx]
def can(self, group, permissions):
"""Check user's permission"""
if self.is_super_admin():
return True
if group in self.permissions:
return (self.permissions[group] & permissions) == permissions
else:
return False
def belong_to(self, group):
return group in self.permissions
def is_admin(self, group):
return self.can(group, Permission.ADMIN)
def is_super_admin(self):
return 'super' in self.permissions \
and (self.permissions['super'] & Permission.SUPER) == Permission.SUPER
class WikiFile(db.Document):
"""Collection of uploaded files.
:param id: file id
The `id` is a sequencial integer share by all groups.
For example, if a file is uploaded in `group1`, and its id is 100,
the id of next file, no matter which group it is uploaded in, will
be 101.
:param name: original file name
:param secured_name: secured file name created from original file name
Secured file names are created to ensure file names can't be used
to corrupt computer systems.
:param mime_type: file type
Example: image/png
:param size: file size in bytes
:param upload_on: the time when file is uploaded
:param upload_by: username of the one uploads the file
"""
id = db.SequenceField(primary_key=True)
name = db.StringField(max_length=256, required=True)
secured_name = db.StringField(max_length=256)
mime_type = db.StringField()
size = db.IntField() # in bytes
uploaded_on = db.DateTimeField(default=datetime.now)
uploaded_by = db.StringField()
meta = {
'collection': 'wiki_file',
'allow_inheritance': True,
}
def __repr__(self):
return '<File - %s>' % self.name
class WikiPageVersion(db.Document):
"""Collection of page versions.
:param diff: differences between two adjacent versions
:param version: version number
:param modified_on: the time when this version of page is modified
:param modified_by: username of the one who modified the wiki page
"""
diff = db.StringField()
version = db.IntField()
modified_on = db.DateTimeField()
modified_by = db.StringField()
def __repr__(self):
return '<Version {}>'.format(self.version)
meta = {
'collection': 'wiki_page_version',
'indexes': [{
'fields': ['$diff'],
'default_language': 'english'
}]
}
class WikiComment(db.EmbeddedDocument):
"""Comment, once submitted, can be deleted by author or admin,
but cannot be modified.
Comment accepts the same kind of markdown used to edit wiki pages.
In addition, when one can enter `[@user1]`, Project Wiki will send
out a notification email to `user1`.
:param id: comment id
:param timestamp: the time comment is submitted
:param author: username of comment author
:param html: html rendered from entered markdown
:param md: submitted markdown
"""
# id = <epoch time>-<author id>
id = db.StringField(required=True)
timestamp = db.DateTimeField(default=datetime.now)
author = db.StringField()
html = db.StringField()
md = db.StringField()
class WikiPage(db.Document):
"""Collection of Project Wiki pages.
:param title: page title
:param md: page markdown
:param html: html rendered from `md`
:param toc: table of contents generated based on headings in `md`
:param current_version: current version number of the page
:param versions: a list references to previous versions of the page
:param modified_on: the most recent time when page is modified
:param modified_by: username of the one who modified the page recently
:param comments: comments
:param refs: a list of references to the pages mentioned
:param files: a list of references to the files mentioned
"""
title = db.StringField(required=True, unique=True)
md = db.StringField()
html = db.StringField()
toc = db.StringField()
current_version = db.IntField(default=1)
versions = db.ListField(db.ReferenceField(WikiPageVersion))
modified_on = db.DateTimeField(default=datetime.now)
modified_by = db.StringField()
comments = db.ListField(db.EmbeddedDocumentField(WikiComment))
refs = db.ListField(db.ReferenceField('self'))
files = db.ListField(db.ReferenceField('WikiFile'))
meta = {
'collection': 'wiki_page',
'indexes': [
'#title', {
'fields': ['$title', '$md', '$comments.md'],
'default_language': 'english',
'weights': {'title': 10, 'md': 2, 'comments.md': 1}
}
]
}
def __repr__(self):
return '<Wiki Page - {}>'.format(self.title)
def update_content(self, group, md, html, toc):
"""Update page content and make other changes accordingly.
:param group: group name (no whitespace)
:param md: markdown
:param html: html rendered from `md`
:param toc: table of contents generated based on headings in `md`
"""
self.html = html
self.toc = toc
diff = unified_diff.make_patch(self.md, md)
if diff:
pv = WikiPageVersion(diff, self.current_version, self.modified_on,
self.modified_by).switch_db(group).save()
self.versions.append(pv)
self.md = md
self.modified_on = datetime.now()
self.modified_by = current_user.name
self.current_version += 1
with switch_db(WikiCache, group) as _WikiCache:
_cache = _WikiCache.objects.only('changes_id_title').first()
_cache.add_changed_page(self.id, self.title, self.modified_on)
self.save()
def rename(self, group, new_title):
"""Rename a wikipage, update all the pages which reference it,
as well as WikiPageVersion, WikiCache.
:param group: group name (no whitespace)
:param new_title: the new title of the page
"""
old_md = '[[{}]]'.format(self.title)
new_md = '[[{}]]'.format(new_title)
old_html = render_wiki_link(group, self.id, self.title)
new_html = render_wiki_link(group, self.id, new_title)
# `switch_db(WikiPage, group)` has already been done in `main.wiki_rename_page`.
for p in self.__class__.objects(refs__contains=self.id).exclude('comments').all():
p.md = p.md.replace(old_md, new_md)
p.html = p.html.replace(old_html, new_html)
p.save()
with switch_db(WikiPageVersion, group) as _WikiPageVersion:
for pv in _WikiPageVersion.objects.search_text(old_md).all():
pv.diff = pv.diff.replace(old_md, new_md)
pv.save()
with switch_db(WikiCache, group) as _WikiCache:
_WikiCache.objects(changes_id_title=[self.id, self.title]).\
update(set__changes_id_title__S=[self.id, new_title])
_WikiCache.objects(keypages_id_title=[self.id, self.title]).\
update(set__keypages_id_title__S=[self.id, new_title])
self.title = new_title
self.save()
def get_version_content(self, group, old_ver_num):
"""Recover an old version of the page.
Because WikiPageVersion only stores the unified diff between two adjecent
versions, to get a really old version it needs to apply the difference
one by one.
For example, if there is a `page` whose current version is 10, and one
need to recover version 7, here is what would happen:
page.content[version 10] + diff[version 9] -> page.content[version 9]
page.content[version 9] + diff[version 8] -> page.content[version 8]
page.content[version 8] + diff[version 7] -> page.content[version 7]
So it took longer to calculate what the content of an old version is than
a newer version.
:param group: group name (no whitespace)
:param old_ver_num: the old version number
"""
with switch_db(WikiPageVersion, group):
old_to_current = self.versions[(old_ver_num - 1):]
old_to_current_patches = [v.diff for v in old_to_current[::-1]]
return unified_diff.apply_patches(self.md, old_to_current_patches, revert=True)
def make_wikipage_diff(self, group, old_ver_num, new_ver_num):
"""Generate a table to compare differences between two different
versions of the page.
:param group: group name (no whitespace)
:param old_ver_num: old version number
:param new_ver_num: new version number
"""
old_content = self.get_version_content(group, old_ver_num)
new_content = self.get_version_content(group, new_ver_num)
d = difflib.HtmlDiff()
diff_table = d.make_table(old_content.splitlines(), new_content.splitlines())
diff_table = diff_table.replace(' ', ' ').replace(' nowrap="nowrap"', '')
return diff_table
class WikiCache(db.Document):
"""Each group should only have one document in this collection.
:param keypages_id_title: keypage ids and titles
:param changes_id_title: recently changed page ids and titles
"""
keypages_id_title = db.ListField()
changes_id_title = db.ListField()
latest_change_time = db.DateTimeField(default=datetime.now)
meta = {'collection': 'wiki_cache'}
def update_keypages(self, group, *titles):
self.keypages_id_title = []
with switch_db(WikiPage, group) as _WikiPage:
for t in titles:
p = _WikiPage.objects(title=t).only('id').first()
if p is not None:
self.keypages_id_title.append((p.id, t))
# Deduplicate keypages and keep the original order
self.keypages_id_title = sorted(set(self.keypages_id_title),
key=self.keypages_id_title.index)
self.save()
def add_changed_page(self, page_id, page_title, page_time):
page_id_title = [page_id, page_title]
if not self.changes_id_title:
self.changes_id_title = []
self.changes_id_title = list(filter(lambda x:x != page_id_title,
self.changes_id_title))
self.changes_id_title.append(page_id_title)
if len(self.changes_id_title) > 50:
self.changes_id_title = self.changes_id_title[-50:]
self.latest_change_time = page_time
self.save()
class WikiGroup(db.Document):
"""Collection of Project Wiki groups.
:param name_with_whitespace: group name with whitespace, for display
:param name_no_whitespace: group name without whitespace, used as argument
"""
name_with_whitespace = db.StringField()
name_no_whitespace = db.StringField()
active = db.BooleanField()
meta = {'collection': 'wiki_group'}
class WikiLoginRecord(db.Document):
username = db.StringField()
timestamp = db.DateTimeField(default=datetime.now)
browser = db.StringField()
platform = db.StringField()
details = db.StringField()
ip = db.StringField()
meta = {
'collection': 'wiki_login_record',
'ordering': ['-timestamp']
}
def render_wiki_link(group, page_id, page_title, tostring=True):
"""Render html """
el = etree.Element('a', attrib={
'class': 'wiki-page',
'href': '/{0}/{1!s}/page'.format(group, page_id)
})
el.text = page_title
if tostring:
return etree.tostring(el, encoding='unicode')
else:
return el
def render_wiki_file(group, file_id, filename, tostring=True):
sub_el = etree.Element('img', attrib={
'alt': 'file icon',
'src': '/static/icons/file-icon.png',
'width': '20',
'height': '20'
})
sub_el.tail = filename
el = etree.Element('a', attrib={'href': '/{}/file/{}?filename={}'.
format(group, file_id, filename),
'class': 'wiki-file'})
el.append(sub_el)
if tostring:
return etree.tostring(el, encoding='unicode')
else:
return el
def render_wiki_image(group, file_id, filename, tostring=True):
el = etree.Element('img', attrib={'src': '/{}/file/{}?filename={}'.
format(group, file_id, filename),
'class': 'wiki-file'})
if tostring:
return etree.tostring(el, encoding='unicode')
else:
return el
|
988,819 | abdefc152064347bb69667b0854e22b55da98f99 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 09 21:32:57 2019
@author: Illusion
"""
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras import backend as K
from quiver_engine import server
K.set_image_dim_ordering('th')
model = Sequential()
model.add(Convolution2D(32, kernel_size=(3, 3),padding='same',input_shape=(3 , 100, 100)))
model.add(Activation('relu'))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64,(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(5))
model.add(Activation('sigmoid'))
# let's train the model using SGD + momentum ----------------------------------
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.load_weights("weights.hdf5")
server.launch(model,input_folder='./',temp_folder='./filters')
|
988,820 | a31fb79fd472c98061c9c4bbafbc01c444e2f728 | catlog = ['Image Referenced'] |
988,821 | 001ef8c9b15be4a6ee266f85f068ac67274f4ea8 | #! /usr/bin/env python
def main():
#Open a file for writing
#f = open("textfile.txt", "w+")
# Open the file for appending text to the end
f = open("textfile.txt", "r")
# Write some lines of data to the file
# for i in range(10):
# f.write("This is line " + str(i) + "\r\n")
#close file
#f.close()
# Open the file backup and read contents
# readline() reads one line character at a time, readlines() reads in the whole file at once and splits it by line.
if f.mode == 'r':
#contents = f.read()
#print contents
fl = f.readlines()
for x in fl:
print x
if __name__ == '__main__':
main() |
988,822 | bedf8a7b2c3c195108b9f87a887dcbcbbe10d8c6 | # coding: utf-8
pi = "3.141592653589793238462643383279"\
+"50288419716939937510582097494459230"\
+"7816406286208998628034825342117067"
import re
import utility
from commands import Command
def control_pi(argument):
argument = argument.strip()
if not argument:
return "No argument given"
if argument == "3":
return "Congratulations, you have the knowledge of a three year old!"
if len(argument) > 50:
return "Copypasta is FTL!"
if argument == pi[:len(argument)]:
return "Congratulations, you know pi to %s decimals" % (len(argument)-2,)
return "No no, %s isn't pi, try again, truncate this time." % argument
class picomp(Command):
def __init__(self):
pass
def trig_pi(self, bot, source, target, trigger, argument):
return control_pi(argument)
|
988,823 | df7b6a8626c19a654f99475fa45c70660f1eb743 | def StringMutation(string, position, char):
#To get substring = string[startIndex, length]
string = string[:position] + char + string[position+1:]
return string
str = input()
position, newChar = input().split()
newString = StringMutation(str, int(position), newChar)
print(newString) |
988,824 | 22d51684352655c2eadef6c9d46ee0e87016dc4f | """Modify the variables below to define the execution of the balancer.
Read the description (below each variable as comments) for each variable carefuly.
"""
## INPUT DEFINITION
###############################################################################
PATH_TO_CSV = 'survey.csv'
# Type string
# Path to the csv file containing the information gathered for each participant
COLUMN_MAPPER = {
'Name': 'name',
'If your team is less than 4 people would you like to have other students join your team to get to a total of 4?': 'team_open',
'If you already have team members, please list the full name of all (max 4) team members below. (Write N/A if you are not part of a team)': 'teams',
'What year at the university are you?': 'year',
'How comfortable are you with python code?': 'python'
}
# Type dict
# map current column names to new names
# !!!!!!NOTE!!!!!!!! hereafter refer to the new column names in your variables
PARTICIPANT_COLUMN = 'name'
# Type string
# Column id in the input csv file uniquely identifying the partiticpant
TEAM_CHOICE_COLUMN = 'teams'
# Type string
# Column id in the input csv file with desired teamates
# contents should be iterable eg. string containing commas
TEAM_OPEN_COLUMN = 'team_open'
# Type string
# Column id in the input csv indicating if their team is open for new participants
# Contents should be boolean, eg. "yes", "False", "1" etc.
BALANCE_VARIABLES = [
# ('gender', 'at_least_or_none', ('female', 2)),
('year', 'weighted', 1.0),
('python', 'callable', 'square')
]
# Type list of tuple, each of len 3
# Each tuple contains in order:
# - Column id in the input csv of the variable
# - The type of variable, options below
# - A parameter to use associate with that type of variable
# Variable types:
# - 'weighted', simply weigh the value by a float. Parameter is float.
# - 'callable', apply a callable that returns a float to the value. Parameter is callable or string name of callable in `numpy`
# - 'at_least_or_none', indicates that the the team should contains at least X or none of Y in the parameter (Y, X)
CUSTOM_VALUE_MAPPING = {
'year': {'Senior': 4, 'Junior': 3, 'Sophomore': 2, 'Freshman':1}
}
# Dict of dict
# for a column name, provide a dictionary mapping current contents to desired contents
## ALGORITHM DEFINITION
###############################################################################
LOSS = "square"
# Type callable or name of callable in numpy that returns a float
# The function that is applied to the stdev of team scores to be minimized
TEAM_SCORING = 'mean'
# str, "mean" or "sum"
# how to treat participant scores to determine the team score
# mean is agnostic to team size, so a team of any size can score any value
# sum will produce lower scores for teams with fewer members
MAXIMUM_PARTICIPANTS = 4
# Type int
# the maximum number of participants per team
|
988,825 | 12393597e7b225d77bdcf3a84f25b39626349fc0 | """Current version of package bioinformatica."""
__version__ = "1.0.0" |
988,826 | ab9c7ab5f70d5fec0eb3a7b017818cd841445115 | class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
后面的一个数减前面一个数的max值
"""
if not prices :
return 0
max=0
min_price=prices[0]
for i in range(1,len(prices)):
profit=prices[i]-min_price
if prices[i] < min_price:
min_price = prices[i]
if profit > max:
max = profit
return max
def maxProfit2(self, prices):
"""
:type prices: List[int]
:rtype: int
后面的一个数减前面一个数的max值
使用 min,max 竟然更慢。。。。
"""
if not prices :
return 0
max_profit=0
min_price=prices[0]
for i in range(1,len(prices)):
profit=prices[i]-min_price
min_price= min(prices[i],min_price)
max_profit= max(profit > max_profit)
return max_profit
def maxProfit3(self, prices):
"""
:type prices: List[int]
:rtype: int
后面的一个数减前面一个数的max值
使用 for p in ps 竟然更慢。。。。
"""
if not prices :
return 0
max=0
min_price=prices[0]
for p in prices:
profit=p-min_price
if p < min_price:
min_price = p
if profit > max:
max = profit
return max
if __name__ == '__main__':
import time
start = time.clock()
s=Solution()
# print(s.maxProfit([7,1,5,3,6,4]))
print(s.maxProfit([7,1,5,3,6,4]))
print("Time Used: " + str(time.clock() - start)) |
988,827 | 8cc414cfb312be50c3191fd525c11ec0edc8608d | class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
count = 0
for i, j in enumerate(nums):
if j != val:
nums[count] = j
count += 1
return count
num_input = [1, 2, 3, 4, 4, 4, 4, 5]
val_input = 4
first = Solution()
result = first.removeElement(num_input, val_input)
print("result:", result) |
988,828 | b58c5a211f870e68a7232ed56af0e6969177df0a | from tkinter import *
from pickleTools import psave,pload
class MyTable(Frame):
def __init__(self,value,total,headlie,headhang):
Frame.__init__(self)
self.value = value
self.total_lie=total
self.headlie=headlie
self.headhang=headhang
self.entryWidth=10
self.data=[]
self.he_hang=[]
self.he_lie=[0]*len(self.value[0])
self.total_hang=[]
self.total_one_hang=[]
self.fpvalue=[]
self.e=[]
self.total_total=0
self.entry=[]
self.textvar=[]
self.var_total_lie=[]
self.var_total_one_hang=[]
self.var_head_hang=[]
self.var_head_lie=[]
self.var_total_toal=DoubleVar()
self.cow = len(self.value) + 1 #5 4人
self.colomn = len(self.value[0]) + 1 #3 2项目
self.table=Frame(root)
def table_build(self):
title="多项目比例分配汇总表"
table_title = Frame(self.table)
title_l=Label(table_title, text=title)
title_l.pack()
table_title.grid(row=0,columnspan=len(self.headlie))
table_head_lie = Frame(self.table)
self.thl_s=[]
thl_e=[]
for i in range(len(self.headlie)):
self.thl_s.append(StringVar())
thl_e.append(Entry(table_head_lie))
for i in range(len(self.headlie)):
self.thl_s[i].set(self.headlie[i])
thl_e[i]["textvariable"]=self.thl_s[i]
thl_e[i]["width"]=self.entryWidth
thl_e[i].grid(row=0,column=i)
thl_e[i].bind("<KeyRelease>", self.reload)
table_head_lie.grid(row=1,columnspan=len(self.headlie))
table_head_hang=Frame(self.table)
self.thh_s=[]
thh_e=[]
for i in range(len(self.headhang)):
self.thh_s.append(StringVar())
thh_e.append(Entry(table_head_hang))
for i in range(len(self.headhang)):
self.thh_s[i].set(self.headhang[i])
thh_e[i]["textvariable"]=self.thh_s[i]
thh_e[i]["width"]=self.entryWidth
thh_e[i].grid(row=i,column=0)
thh_e[i].bind("<KeyRelease>", self.reload)
table_head_hang.grid(row=2,column=0,rowspan=2)
table_core=Frame(self.table)
self.s = []
e = []
for i in range(len(self.value)):
ss=[]
ee=[]
for j in range(len(self.value[0])):
# print(i,j)
ss.append(DoubleVar())
ee.append(Entry(table_core))
self.s.append(ss)
e.append(ee)
# print("++++++++++++")
for i in range(len(self.value)):
for j in range(len(self.value[0])):
# print(i, j)
self.s[i][j].set(format( self.value[i][j],'.0f'))
e[i][j]["textvariable"]=self.s[i][j]
e[i][j]["width"]=self.entryWidth
e[i][j].grid(row=i,column=j)
e[i][j].bind("<KeyRelease>", self.reload)
# table_core.grid(row=2,column=1,columnspan=len(self.value))
table_core.grid(row=2, column=1)
table_total_lie=Frame(self.table)
self.ttl_s = []
ttl_e = []
for i in range(len(self.total_lie)):
self.ttl_s.append(DoubleVar())
ttl_e.append(Entry(table_total_lie))
for i in range(len(self.total_lie)):
self.ttl_s[i].set(self.total_lie[i])
ttl_e[i]["textvariable"] = self.ttl_s[i]
ttl_e[i]["width"] = self.entryWidth
ttl_e[i].grid(row=0, column=i)
# ttl_e[i].bind("<KeyRelease>", self.reload)
table_total_lie.grid(row=3, column=1,columnspan=len(self.total_lie))
table_total_hang = Frame(self.table)
self.tth_s = []
tth_e = []
for i in range(len(self.total_one_hang)):
self.tth_s.append(DoubleVar())
tth_e.append(Entry(table_total_hang))
for i in range(len(self.total_one_hang)):
self.tth_s[i].set(format( self.total_one_hang[i],'.2f'))
tth_e[i]["textvariable"] = self.tth_s[i]
tth_e[i]["width"] = self.entryWidth
tth_e[i].grid(row=i, column=0)
tth_e[i]["state"] = 'disabled'
# ttl_e[i].bind("<KeyRelease>", self.reload)
table_total_hang.grid(row=2, column=len(self.headlie)-1)
table_total=Frame(self.table)
tt_s=DoubleVar()
tt_e=Entry(table_total)
tt_s.set(format(self.total_total, '.2f'))
tt_e["textvariable"] = tt_s
tt_e["width"] = self.entryWidth
tt_e.grid()
tt_e["state"] = 'disabled'
table_total.grid(row=3,column=len(self.headlie)-1)
btnLoad = Button(self.table, text="加载", command=self.cload)
btnLoad.grid(row=4,column=0)
btnSave = Button( self.table ,text="保存", command=self.csave)
btnSave.grid(row=4,column=1)
self.table.pack()
def test(self):
self.fenpei()
self.total_add()
self.table_build()
def add_all_hang(self,value_in,value_out):#行相加
value_out.clear()
for i in range(len(value_in)):
h = 0
for v in value_in[i]:
h+=v
value_out.append(h)
# print(self.he[0],self.he)
def add_all_lie(self):
# h = [0]*len(self.value[0])
self.he_lie = [0] * len(self.value[0])
for v in self.value:
for i in range(len(v)):
self.he_lie[i] += v[i]
# print(self.he_lie)
def total_add(self):
self.total_total=0
for v in self.total_one_hang:
self.total_total+=v
def fenpei(self):
self.add_all_hang(self.value,self.he_hang)
self.add_all_lie()
# print(self.he_hang)
# print(self.he_lie)
self.total_hang.clear()
for i in range(len(self.value)):
h = []
for j in range(len(self.value[i])):
# print(i,j,self.value[i][j])
# print(self.value[i][j]/self.he_lie[j]*self.total_lie[j])
h.append(self.value[i][j]/self.he_lie[j]*self.total_lie[j])
# print(i,j,h)
self.total_hang.append(h)
# print(self.total_hang)
self.add_all_hang(self.total_hang,self.total_one_hang)
# print(self.total_one_hang)
def updatevalue(self, event=0):
#大致按照entry add的方式,更新self的value等的值,然后调用分配 应该就行
# self.textvar[i][j].set(self.value[i][j])
# self.value[1][1]=self.textvar[1][1].get()
for i in range(len(self.value)):
for j in range(len(self.value[i])):
# print(type(self.s[i][j].get()))
# print(type(self.s[i][j]))
self.value[i][j]=self.s[i][j].get()
for i in range(len(self.total_lie)):
self.total_lie[i]=self.ttl_s[i].get()
for i in range(len(self.headhang)):
self.headhang[i]=self.thh_s [i].get()
for i in range(len(self.headlie)):
self.headlie[i]=self.thl_s[i].get()
# print(self.total_lie)
# print("jisuan")
def reload(self,event=0):
# print(event)
self.updatevalue()
self.fenpei()
self.total_add()
self.table_build()
def reloadfromfile(self):
self.fenpei()
self.total_add()
self.table_build()
def t2(self):
self.fenpei()
self.total_add()
self.entry_init()
self.entry_add()
def csave(self):
self.data.clear()
self.data.append(self.value)
self.data.append(self.total_lie)
self.data.append(self.headhang)
self.data.append(self.headlie)
psave(self.data)
data=pload()
print(data)
def cload(self):
data=pload()
# print(data)
self.value=[]
self.total_lie=[]
self.headhang=[]
self.headlie=[]
self.value=data[0]
self.total_lie=data[1]
self.headhang=data[2]
self.headlie=data[3]
# print(self.value)
self.reloadfromfile()
if __name__ == "__main__":
root = Tk()
# column 默认值是 0
value = [[1, 2,2,1], [1, 1,2,1], [2, 3,1,1], [4, 0,3,1],[3,5,7,1]]
total_lie = [888, 600,89,734]
headlie=["姓名","项目1","项目2","项目3","ff","总和"]
headhang=["张三","李四","王五","赵柳","找齐","金额"]
app = MyTable(value,total_lie,headlie,headhang)
# # app.t1()
# app.t2()
app.test()
app.mainloop()
|
988,829 | 4bb2a2d7bccf2fe24c734f33e6a2c46fb5c3c6a2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: Gözde Gül Şahin
Test Stack Generalizer Model
"""
import subprocess
import argparse
from IO.conllWriter import *
from IO.util import *
from loader import *
from scorer import *
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-test_file', type=str, default='data/CoNLL2009-ST-Turkish/CoNLL2009-ST-evaluation-Turkish.txt',
help="test file")
parser.add_argument('-save_dir1', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-save_dir2', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-save_dir3', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-save_dir4', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-save_dir5', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-save_dir6', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-save_dir7', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-ens_model_dir', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-ens_save_dir', required=False, default=None,
help="directory of the checkpointed models")
parser.add_argument('-lang', type=str, default='tur',
help='directory of the checkpointed models')
parser.add_argument('-gpuid', type=int, default=0, help='Id of the GPU to run')
args = parser.parse_args()
test(args)
def test(test_args):
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(test_args.gpuid)
# global settings
goldFile = test_args.test_file
experiments = [test_args.save_dir1,test_args.save_dir2,test_args.save_dir3,test_args.save_dir4, \
test_args.save_dir5,test_args.save_dir6,test_args.save_dir7]
predictedSenseSents = None
try:
os.stat(test_args.ens_save_dir)
except:
os.mkdir(test_args.ens_save_dir)
models_lst = []
test_data_lst = []
role_to_ix = {}
ldr_gen = None
for model_dir in experiments:
if model_dir==None:
break
with open(os.path.join(model_dir, 'config.pkl'), 'rb') as f:
args = pickle.load(f)
args.save_dir = model_dir
args.batch_size = 1
ldr = Loader(args, test_file=goldFile, save_dir = model_dir, train=False, test=True)
if len(role_to_ix)==0:
role_to_ix = ldr.role_to_ix
ldr_gen = ldr
# Base learners
test_data = ldr.getData(ldr.test_data, train=False)
model_path, _ = get_last_model_path(model_dir)
mtest = torch.load(model_path)
# Load ensemble
ens_model_path, _ = get_last_model_path(test_args.ens_model_dir)
mensemble = torch.load(ens_model_path)
if args.use_cuda:
mtest = mtest.cuda()
# change all batch sizes to 1
mtest.batch_size = 1
if mtest.subwordModel != None:
mtest.subwordModel.batch_size = 1
models_lst.append(mtest)
test_data_lst.append(test_data)
print("Begin testing...")
plst, glst, num_corr_sr, num_found_sr, num_gold_sr = testRoleLabelsEnsembleLearner(models_lst, mensemble, test_data_lst, role_to_ix,
mode="eval", type="simple")
# Write results
systemFilePath = os.path.join(test_args.ens_save_dir, "system.conll")
conllOut = codecs.open(systemFilePath, "w", encoding='utf-8')
if (test_args.lang=="fin"):
writeCoNLLUD(conllOut, ldr_gen, plst, predictedSenseSents)
else:
writeCoNLL(conllOut, ldr_gen, plst, predictedSenseSents)
# necesary for copula handling in conll09 files
if (test_args.lang in ["tur", "fin"]):
goldFile = os.path.join(test_args.ens_save_dir, "goldTest.conll")
goldConllOut = codecs.open(os.path.join(test_args.ens_save_dir, "goldTest.conll"), "w", encoding='utf-8')
if (test_args.lang=="fin"):
writeCoNLLUD(goldConllOut, ldr_gen, glst)
else:
writeCoNLL(goldConllOut, ldr_gen, glst)
# run eval09 script
scoreOut = codecs.open(os.path.join(test_args.ens_save_dir, "eval09_analysis.out"), "w", encoding='utf-8')
subprocess.call(["perl", "eval09.pl","-g", goldFile,"-s" ,systemFilePath], stdout=scoreOut)
# run self evaluator and write to test.log file
log_out = open(os.path.join(test_args.ens_save_dir, "test_scores.log"), "w")
writeScores(num_corr_sr, num_found_sr, num_gold_sr, log_out)
if __name__ == "__main__":
main()
|
988,830 | 8592378886ee42d3e56ac226002e5b6c35318032 | #!/usr/bin/env python3
# from numba import njit
import itertools
from bisect import bisect_right,bisect_left
INF = 10**10
# @njit
def solve(n,m,x):
if n >= m:
return 0
else:
d = list(sorted(x[i+1] - x[i] for i in range(m-1)))[::-1]
res = x[-1] - x[0]
for i in range(min(m-1,n-1)):
res -= d[i]
return res
def main():
N,M = map(int,input().split())
x = sorted(list(map(int,input().split())))
print(solve(N,M,x))
return
if __name__ == '__main__':
main()
|
988,831 | a8560b6ddd95f61ecc6e781fc99546001d44bc4a | import json
import zipfile
import difflib
import os
import pymysql
import re
import csv
from datetime import datetime
import boto3 #used to connect to aws servies
from io import BytesIO #used to convert file into bytes in order to unzip
import dateutil.tz
###############################################################################################################################
import file_validator_object
from prior_test_result_validator import prior_test_result_validator
from demographic_data_validator import demographic_data_validator
from Biospecimen_validator import Biospecimen_validator
from other_files_validator import other_files_validator
###############################################################################################################################
def lambda_handler(event, context):
s3_client = boto3.client("s3")
s3_resource = boto3.resource("s3")
ssm = boto3.client("ssm")
bucket_name = event["Records"][0]["s3"]["bucket"]["name"]
key_name = event["Records"][0]["s3"]["object"]["key"]
###############################################################################################################################
## user variables
list_of_valid_file_names =(['Demographic_Data.csv','Prior_Test_Results.csv','Confirmatory_Test_Results.csv','Assay_Metadata.csv',
'Assay_Target.csv','Biospecimen_Metadata.csv','Aliquot_Metadata.csv','Equipment_Metadata.csv','Reagent_Metadata.csv',
'Consumable_Metadata.csv','Submission_Metadata.csv'])
host_client = ssm.get_parameter(Name="db_host", WithDecryption=True).get("Parameter").get("Value")
user_name = ssm.get_parameter(Name="lambda_db_username", WithDecryption=True).get("Parameter").get("Value")
user_password =ssm.get_parameter(Name="lambda_db_password", WithDecryption=True).get("Parameter").get("Value")
file_dbname = ssm.get_parameter(Name="jobs_db_name", WithDecryption=True).get("Parameter").get("Value")
pre_valid_db = ssm.get_parameter(Name="Prevalidated_DB", WithDecryption=True).get("Parameter").get("Value")
output_bucket_name = "data-validation-output-bucket"
CBC_submission_name = "CBC_Name"
eastern = dateutil.tz.gettz('US/Eastern')
###############################################################################################################################
## check if submitted file is a zip and extract contents of file
CBC_submission_info = datetime.now(tz=eastern).strftime("%Y-%m-%d-%H-%M") + "_" + key_name
try:
new_key = CBC_submission_name+'/'+ CBC_submission_info +'/' + key_name
copy_source = {'Bucket': bucket_name,'Key': key_name}
s3_resource.meta.client.copy(copy_source, output_bucket_name, new_key) #copy submitted file to output bucket
except:
print("why does this crash, second loop?")
return{}
if(str(key_name).endswith('.zip')): #if submitted file is a zip, unzip contents to directory
try:
zip_obj = s3_resource.Object(bucket_name = bucket_name, key = key_name)
buffer = BytesIO(zip_obj.get()["Body"].read())
z = zipfile.ZipFile(buffer)
listOfFileNames = z.namelist()
for filename in listOfFileNames: #loops over each file in the zip and writes to output bucket
file_info = z.getinfo(filename)
if(str(filename).endswith('.zip')): #only move files that are not *.zip, orginal zip has different location
print('## zip file does not need to be coppied over, not moving')
else:
new_key = CBC_submission_name+'/'+ CBC_submission_info +'/'+ 'Submitted_Files/'+ filename
print("##unziped file location :: " + output_bucket_name + "/" + new_key)
response = s3_resource.meta.client.upload_fileobj(z.open(filename),Bucket = output_bucket_name, Key = new_key)
except:
s3_file_path = CBC_submission_name+'/'+ CBC_submission_info
error_msg = "Zip file was found, but not able to open. Unable to Process Submission"
write_error_message(s3_resource,output_bucket_name,s3_file_path,error_msg) #if submited file is not a zip, write error message
return{}
else:
s3_file_path = CBC_submission_name+'/'+ CBC_submission_info
error_msg = "Submitted file is not a valid Zip file, Unable to Process Submission"
write_error_message(s3_resource,output_bucket_name,s3_file_path) #if submited file is not a zip, write error message
return{}
s3_resource.Object(bucket_name, key_name).delete() #once all copying has been done, delete orgional file
########################################################################################################################
# compare contents of file to valid list of names for spelling or duplicate entries, plus check for csv extensions
list_copy = [item.lower() for item in listOfFileNames]
sort_idx = [i[0] for i in sorted(enumerate(list_copy), key=lambda x:x[1])]
listOfFileNames = [listOfFileNames[i] for i in sort_idx]
submission_error_list = [['File_Name','Column_Name','Error_Message']]
for uni_id in listOfFileNames:
if (uni_id.find('.csv') > 0) == False:
submission_error_list.append([uni_id,"All Columns","File Is not a CSV file, Unable to Process"])
indices = [i for i, x in enumerate(listOfFileNames) if x == uni_id] #checks for duplicate file name entries
if len(indices) > 1:
submission_error_list .append([uni_id,"All Columns","Filename was found " + str(len(indices)) + " times in submission, Can not process multiple copies"]);
wrong_count = len(uni_id)
for valid in list_of_valid_file_names:
sequence = difflib.SequenceMatcher(isjunk = None,a = uni_id,b = valid).ratio()
matching_letters = (sequence/2)*(len(valid) + len(uni_id))
wrong_letters = len(uni_id) - matching_letters
if wrong_letters < wrong_count:
wrong_count = wrong_letters
if wrong_count == 0:
pass #perfect match, no errors
elif wrong_count <= 3: #up to 3 letters wrong, possible mispelled
submission_error_list.append([uni_id,"All Columns","Filename was possibly alterted, potenial typo, please correct and resubmit file"])
elif wrong_count > 3: #more then 3 letters wrong, name not recongized
submission_error_list.append([uni_id,"All Columns","Filename was not recgonized, please correct and resubmit file"])
error_count = len(submission_error_list) - 1
if (error_count) > 0:
print("Submitted Files Names have been checked for spelling errors, extra files or duplicate entries. " + str(error_count) + " errors were found. \n")
print(submission_error_list)
######################################################################################################################
# open each file and compare header list to mysql database to validate all column names exist and spelled correctly
conn = connect_to_sql_database(host_client,pre_valid_db,user_name,user_password)
if conn == 0:
write_submission_error_csv(s3_resource,output_bucket_name,s3_file_path,"Submission_Error_List.csv",submission_error_list)
print("Unable to connect to mySQL database to preform column name validation. Terminating Validation Process")
return{}
if len(submission_error_list) > 1:
error_files = [i[1][0] for i in enumerate(submission_error_list)]
error_files = error_files[1:]
listOfFileNames = [i for i in listOfFileNames if i not in error_files]
current_error_count = 0; current_demo = [];
for test_name in enumerate(listOfFileNames):
test_object = file_validator_object.Submitted_file(test_name[1],' ') #create the file object
test_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,test_name[1])
if test_name[1] == "Prior_Test_Results.csv" :
mysql_table_list = ['Prior_Test_Result']
elif test_name[1] == "Demographic_Data.csv":
mysql_table_list = ["Demographic_Data","Prior_Covid_Outcome","Comorbidity"]
elif test_name[1] == "Assay_Metadata.csv":
mysql_table_list = ["Assay_Metadata"]
elif test_name[1] == "Assay_Target.csv" :
mysql_table_list = ["Assay_Target"]
elif test_name[1] == "Confirmatory_Test_Results.csv":
mysql_table_list = ["Confirmatory_Test_Result"]
elif test_name[1] == "Biospecimen_Metadata.csv":
mysql_table_list = ["Biospecimen","Collection_Tube"]
elif test_name[1] == "Aliquot_Metadata.csv":
mysql_table_list = ["Aliquot","Aliquot_Tube"]
elif test_name[1] == "Equipment_Metadata.csv":
mysql_table_list = ["Equipment"]
elif test_name[1] == "Reagent_Metadata.csv":
mysql_table_list = ["Reagent"]
elif test_name[1] == "Consumable_Metadata.csv": #consumable table does not exist at this time
mysql_table_list = ["Consumable"]
elif test_name[1] == "Submission_Metadata.csv":
submitting_center = test_object.Column_Header_List[1]
Number_of_Research_Participants = int(test_object.Data_Table.iloc[1][1]);
Number_of_Biospecimens = int(test_object.Data_Table.iloc[2][1])
count_table = test_object.Data_Table[submitting_center].value_counts()
submit_file_count = count_table[count_table.index == 'X'][0] + 1; #add one for submision metadata
submit_list = test_object.Data_Table[test_object.Data_Table[submitting_center] == 'X']['Submitting Center'].tolist()
submit_to_file = [i for i in submit_list if i not in listOfFileNames] #in submission, not in zip
file_to_submit = [i for i in listOfFileNames if i not in submit_list] #in zip not in submission metadata
continue
else:
print(test_name[1] + " was not found, unable to check")
continue
if test_name[1] != "submission_metadata.csv":
test_object.compare_csv_to_mysql(pre_valid_db,mysql_table_list,conn)
if test_name[1] == 'Demographic_Data.csv':
current_demo = test_object.Data_Table
current_demo = current_demo['Research_Participant_ID'].tolist()
if len(test_object.header_name_validation) > 1:
submission_error_list.append(test_object.header_name_validation[1:][0])
current_error_count = current_error_count + 1;
if current_error_count > 0:
print("Column names in each submitted file have been checked for spelling errors,")
print("extra columns, missing or duplicate entries: " + str(current_error_count) + " errors were found. \n")
error_count = error_count + current_error_count
current_error_count = 0
if len(submitting_center) == 0:
error_msg = "Submission_Metadata.csv was not found in submission zip file"
submission_error_list.append(["submission_metadata.csv","All Columns",error_msg])
current_error_count = current_error_count + 1;
if len(listOfFileNames) != submit_file_count:
error_msg = ("Expected: " + str(submit_file_count) + " files. Found " + str(len(listOfFileNames)) + " files in submission")
submission_error_list.append(["submission_metadata.csv","List of File Names",error_msg])
current_error_count = current_error_count + 1;
if len(file_to_submit) > 0:
for i in file_to_submit:
if i == "Submission_Metadata.csv":
pass
else:
error_msg = "file name was found in the submitted zip, but was not checked in submission metadata.csv"
submission_error_list.append(["submission_metadata.csv",i,error_msg])
current_error_count = current_error_count + 1;
if len(submit_to_file) > 0:
for i in submit_to_file:
error_msg = "file name was checked in submission metadata.csv, but was not found in the submitted zip file"
submission_error_list.append(["submission_metadata.csv",i,error_msg])
current_error_count = current_error_count + 1;
if current_error_count > 0:
print("Submission metadata has been checked, comparing user inputs to actual files found in submission: " +
str(current_error_count) + " errors were found.\n")
error_count = error_count + current_error_count
if error_count > 0:
print("A Total of " + str(error_count) + " errors were found in the submission file, please correct and Resubmit")
print("Terminating Validation Process")
conn.close()
write_submission_error_csv(s3_resource,output_bucket_name,s3_file_path,"Submission_Error_List.csv",submission_error_list)
return{}
print("### Submission validation was sucessfull. No Errors were found ###")
print("### Proceeding to check each csv file for validation ###")
del count_table,current_error_count,file_to_submit,i,indices,list_copy,matching_letters,Number_of_Biospecimens
del Number_of_Research_Participants,sequence,sort_idx,submit_file_count,submit_list,submit_to_file,test_name
del test_object,uni_id,valid,wrong_count,wrong_letters
#####################################################################################################
## if no submission errors, pull key peices from sql schema and import cbc id file
pos_list,neg_list = file_validator_object.get_mysql_queries(pre_valid_db,conn,1)
assay_results,assay_target = file_validator_object.get_mysql_queries(pre_valid_db,conn,2)
participant_ids,biospec_ids = file_validator_object.get_mysql_queries(pre_valid_db,conn,3)
if len(current_demo) > 0:
current_demo = (set(participant_ids['Research_Participant_ID'].tolist() + current_demo))
current_demo = [x for x in current_demo if x == x]
sql_connect = conn.cursor()
table_sql_str = ("SELECT * FROM `" + pre_valid_db + "`.`Seronet_CBC_ID`")
query_res = sql_connect.execute(table_sql_str)
rows = sql_connect.fetchall()
valid_cbc_ids = [i[1] for i in enumerate(rows) if rows[i[0]][1] == submitting_center]
print("## The CBC Name is: " + valid_cbc_ids[0][1] + " and the submission code is: " + str(valid_cbc_ids[0][0]))
s3_file_path = CBC_submission_name+'/'+ CBC_submission_info + '/' + 'Validation_Errors'
######################################################################################################################################################################################################
if "Prior_Test_Results.csv" in listOfFileNames:
prior_valid_object = file_validator_object.Submitted_file("Prior_Test_Results.csv",'Research_Participant_ID')
prior_valid_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Prior_Test_Results.csv')
pos_list,neg_list = file_validator_object.split_participant_pos_neg_prior(prior_valid_object,pos_list,neg_list)
prior_valid_object = prior_test_result_validator(prior_valid_object,neg_list,pos_list,re,valid_cbc_ids,current_demo)
prior_valid_object.write_error_file("Prior_Test_Results_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
######################################################################################################################################################################################################
if "Demographic_Data.csv" in listOfFileNames:
demo_data_object = file_validator_object.Submitted_file("Demographic_Data.csv",'Research_Participant_ID')
demo_data_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Demographic_Data.csv')
demo_data_object = demographic_data_validator(demo_data_object,neg_list,pos_list,re,valid_cbc_ids)
demo_data_object.write_error_file("Demographic_Data_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
######################################################################################################################################################################################################
if "Biospecimen_Metadata.csv" in listOfFileNames:
Biospecimen_object = file_validator_object.Submitted_file("Biospecimen_Metadata.csv",'Biospecimen_ID')
Biospecimen_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Biospecimen_Metadata.csv')
Biospecimen_object.get_pos_neg_logic(pos_list,neg_list)
biospec_ids = biospec_ids.append(Biospecimen_object.Data_Table[['Biospecimen_ID','Biospecimen_Type']])
Biospecimen_object = Biospecimen_validator(Biospecimen_object,neg_list,pos_list,re,valid_cbc_ids,current_demo)
Biospecimen_object.write_error_file("Biospecimen_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
###############################################################################################################################
if "Aliquot_Metadata.csv" in listOfFileNames:
Aliquot_object = file_validator_object.Submitted_file("Aliquot_Metadata.csv",['Aliquot_ID','Biospecimen_ID'])
Aliquot_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Aliquot_Metadata.csv')
Aliquot_object = other_files_validator(Aliquot_object,re,valid_cbc_ids,biospec_ids,"Aliquot_Errors_Found.csv")
Aliquot_object.write_error_file("Aliquot_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
###############################################################################################################################
if "Equipment_Metadata.csv" in listOfFileNames:
Equipment_object = file_validator_object.Submitted_file("Equipment_Metadata.csv",['Equipment_ID','Biospecimen_ID'])
Equipment_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Equipment_Metadata.csv')
Equipment_object = other_files_validator(Equipment_object,re,valid_cbc_ids,biospec_ids,"Equipment_Errors_Found.csv")
Equipment_object.write_error_file("Equipment_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
###############################################################################################################################
if "Reagent_Metadata.csv" in listOfFileNames:
Reagent_object = file_validator_object.Submitted_file("Reagent_Metadata.csv",['Biospecimen_ID','Reagent_Name'])
Reagent_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Reagent_Metadata.csv')
Reagent_object = other_files_validator(Reagent_object,re,valid_cbc_ids,biospec_ids,"Reagent_Errors_Found.csv")
Reagent_object.write_error_file("Reagent_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
###############################################################################################################################
if "Consumable_Metadata.csv" in listOfFileNames:
Consumable_object = file_validator_object.Submitted_file("Consumable_Metadata.csv",['Biospecimen_ID','Consumable_Name'])
Consumable_object.load_csv_file(s3_client,output_bucket_name,CBC_submission_name,CBC_submission_info,'Consumable_Metadata.csv')
Consumable_object = other_files_validator(Consumable_object,re,valid_cbc_ids,biospec_ids,"Consumable_Errors_Found.csv")
Consumable_object.write_error_file("Consumable_Errors_Found.csv",s3_resource,s3_file_path,output_bucket_name)
###############################################################################################################################
print("Connection to RDS mysql instance is now closed")
print("Validation Function is Complete")
conn.close
return{}
###############################################################################################################################
def connect_to_sql_database(host_client,dbname,user_name,user_password): #connect to mySQL database
conn = 0
port = 3306
try:
conn = pymysql.connect(host = host_client, user=user_name, password=user_password, db=dbname, connect_timeout=5)
print("SUCCESS: Connection to RDS mysql instance succeeded\n")
except:
print("ERROR: Unexpected error: Could not connect to MySql instance.\n")
return conn #return connection variable to be used in queries
##writes text file if submitted file is not a zip or unable to open the zip file
def write_error_message(s3_resource,output_bucket_name,s3_file_path,error_msg):
file_name = "Submission_Error.txt"
lambda_path = "/tmp/" + file_name
with open(lambda_path, 'w+') as file:
file.write(error_msg)
file.close()
s3_file_path = s3_file_path + "/" + file_name
s3_resource.meta.client.upload_file(lambda_path, output_bucket_name, s3_file_path)
print("## Errors were found in the Submitted File. Ending Validation ##")
##writes a csv file containing any errors found in submission validation
def write_submission_error_csv(s3_resource,output_bucket_name,s3_file_path,file_name,list_of_elem):
lambda_path = "/tmp/" + file_name
with open(lambda_path, 'w+', newline='') as csvfile: #w is new file, a+ is append to file
csv_writer = csv.writer(csvfile)
for file_indx in enumerate(list_of_elem):
csv_writer.writerow(file_indx[1])
s3_file_path = s3_file_path + "/" + file_name
s3_resource.meta.client.upload_file(lambda_path, output_bucket_name, s3_file_path) |
988,832 | fa19b8ca7d0dbb29b7e9c94ca3c4b3f0b16efec0 | """Contains a Node class which implements an AVL binary search tree.
Each node can be considered a binary search tree and has the usual
methods to insert, delete, and check membership of nodes. By default,
the insert and delete methods will perform self-balancing consistent
with an AVL tree. This behavior can be suppressed by passing the optional
'balanced=False' keyword argument to the insert or delete methods.
The class also supports four traversal methods which return generators:
- in_order
- pre_order
- post_order
- breadth_first.
Additionally, methods are included to help visualize the tree structure.
get_dot returns DOT source code, suitable for use with programs such as
Graphviz (http://graphviz.readthedocs.org/en/stable/index.html), and
save_render saves a rendering of the tree structure to the file system.
Passing the optional 'render=True' keyword argument to the insert and
delete methods will automatically save a render to disk upon execution.
Finally, the helper methods 'create_best_case' and 'create_worst_case'
facilitate creation of tree composeds of _n_ integers.
This module was completed with reference to the following:
'Binary Search Tree libary in Python'
(http://www.laurentluce.com/posts/binary-search-tree-library-in-python/)
by Laurent Luce.
'How to Balance your Binary Search Trees - AVL Trees'
(https://triangleinequality.wordpress.com/2014/07/15/how-to-balance-your-binary-search-trees-avl-trees/)
'The AVL Tree Rotations Tutorial'
(http://pages.cs.wisc.edu/~paton/readings/liblitVersion/AVL-Tree-Rotations.pdf)
by John Hargrove
"""
from __future__ import print_function
from __future__ import unicode_literals
import random
from queue import Queue
class Node(object):
"""A class for a binary search tree node."""
def __init__(self, val=None, parent=None):
self.val = val
self.parent = parent
self.left = None
self.right = None
def __repr__(self):
return '<BST: ({})>'.format(self.val)
def __str__(self):
return '{}'.format(self.val)
def __len__(self):
return self.size()
def __iter__(self):
return self.in_order()
def __add__(self, other):
for item in other:
self.insert(item)
def __sub__(self, other):
for item in other:
self.delete(item)
def insert(self, val, balanced=True, render=False):
"""Insert a node with a value into the tree.
If val is already present, it will be ignored.
args:
val: the value to insert
balanced: performs AVL self-balancing if set to True
render: automatically saves a render to disk if set to True
"""
if self.val is not None:
if val == self.val:
return None
if val < self.val:
if self.left is None:
self.left = Node(val, self)
if balanced:
self.left._self_balance()
else:
self.left.insert(val, balanced, render)
elif val > self.val:
if self.right is None:
self.right = Node(val, self)
if balanced:
self.right._self_balance()
else:
self.right.insert(val, balanced, render)
else:
self.val = val
if render and self.parent is None:
self.save_render()
def delete(self, val, balanced=True, render=False):
"""Delete a node matching value and reorganize tree as needed.
If the matched node is the only node in the tree, only its value
will be deleted.
args:
val: the value of the node to delete
balanced: performs AVL self-balancing if set to True
render: automatically saves a render to disk if set to True
"""
node = self.lookup(val)
parent = node.parent
if node is not None:
children_count = node._children_count()
if children_count == 0:
if parent:
if parent.left is node:
parent.left = None
else:
parent.right = None
if balanced:
parent._self_balance()
else:
self.val = None
elif children_count == 1:
if node.left:
child = node.left
else:
child = node.right
if parent:
if parent.left is node:
parent.left = child
else:
parent.right = child
child.parent = parent
if balanced:
child._self_balance()
else:
self.left = child.left
self.right = child.right
try:
self.right.parent = self
self.left.parent = self
except AttributeError:
pass
self.val = child.val
if balanced:
self._self_balance()
else:
parent = node
successor = node.right
while successor.left:
parent = successor
successor = successor.left
node.val = successor.val
if parent.left == successor:
parent.left = successor.right
try:
parent.left.parent = parent
except AttributeError:
pass
parent._self_balance()
else:
parent.right = successor.right
try:
parent.right.parent = parent
except AttributeError:
pass
if balanced:
parent._self_balance()
if render and self.parent is None:
self.save_render()
def contains(self, val):
"""Check tree for node with given value.
args:
val: the value to check for
returns: True if val is in the tree, False if not.
"""
if val == self.val:
return True
elif val < self.val:
if self.left is None:
return False
return self.left.contains(val)
elif val > self.val:
if self.right is None:
return False
return self.right.contains(val)
def lookup(self, val):
"""Find a node by value and return that node.
args:
val: the value to search by
returns: a node
"""
if val < self.val:
if self.left is None:
return None, None
return self.left.lookup(val)
elif val > self.val:
if self.right is None:
return None, None
return self.right.lookup(val)
else:
return self
def size(self):
"""Return the total number of nodes in the tree.
returns: integer of total node; 0 if empty
"""
if self.val is None:
return 0
left_size = self.left.size() if self.left is not None else 0
right_size = self.right.size() if self.right is not None else 0
return left_size + right_size + 1
def depth(self):
"""Return an the total number of levels in the tree.
If there is one value, the depth should be 1, if two values it'll be 2,
if three values it may be 2 or three, depending, etc.
returns: integer of level number
"""
left_depth = self.left.depth() if self.left is not None else 0
right_depth = self.right.depth() if self.right is not None else 0
return max(left_depth, right_depth) + 1
def balance(self):
"""Return a positive or negative number representing tree balance.
Trees higher on the left than the right should return a positive value,
trees higher on the right than the left should return a negative value.
An ideally-balanced tree should return 0.
returns: integer
"""
left_depth = self.left.depth() if self.left is not None else 0
right_depth = self.right.depth() if self.right is not None else 0
return left_depth - right_depth
def _is_left(self):
"""Check nodes relationship to parent.
returns:
- True if node is left child of parent
- False if node is right childe of parent
- None if node has no parent
"""
if self.parent is None:
return None
else:
return self is self.parent.left
def _rotate_right(self):
"""Perform a single right tree rotation."""
pivot = self.left
if pivot is None:
return
self.val, pivot.val = pivot.val, self.val
self.left = pivot.left
if self.left is not None:
self.left.parent = self
pivot.left = pivot.right
pivot.right = self.right
if pivot.right is not None:
pivot.right.parent = pivot
self.right = pivot
def _rotate_left(self):
"""Perform a single left tree rotation."""
pivot = self.right
if pivot is None:
return
self.val, pivot.val = pivot.val, self.val
self.right = pivot.right
if self.right is not None:
self.right.parent = self
pivot.right = pivot.left
pivot.left = self.left
if pivot.left is not None:
pivot.left.parent = pivot
self.left = pivot
def _self_balance(self):
"""Balance the subtree from given node."""
balance = self.balance()
# Tree is left heavy
if balance == 2:
if self.left.balance() <= -1:
# Double Right
self.left._rotate_left()
# Single Right
self._rotate_right()
if self.parent is not None:
self.parent._self_balance()
# Tree is right heavy
elif balance == -2:
if self.right.balance() >= 1:
# Double Left
self.right._rotate_right()
# Single Left
self._rotate_left()
if self.parent is not None:
self.parent._self_balance()
else:
if self.parent is not None:
self.parent._self_balance()
def in_order(self):
"""Return a generator with tree values from in-order traversal"""
stack = []
node = self
while stack or node:
if node:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node.val
node = node.right
def pre_order(self):
"""Return a generator with tree values from pre-order traversal"""
stack = []
node = self
while stack or node:
if node:
yield node.val
stack.append(node)
node = node.left
else:
node = stack.pop()
node = node.right
def post_order(self):
"""Return a generator with tree values from post-order traversal"""
stack = []
node = self
last = None
while stack or node:
if node:
stack.append(node)
node = node.left
else:
peek = stack[-1]
if peek.right is not None and last != peek.right:
node = peek.right
else:
yield peek.val
last = stack.pop()
node = None
def breadth_first(self):
"""Return a generator with tree values from breadth first traversal"""
q = Queue()
q.enqueue(self)
while q.size() > 0:
node = q.dequeue()
yield node.val
if node.left:
q.enqueue(node.left)
if node.right:
q.enqueue(node.right)
def _children_count(self):
"""Return a node's number of children."""
cnt = 0
if self.left:
cnt += 1
if self.right:
cnt += 1
return cnt
def get_dot(self):
"""Return the tree with root as a dot graph for visualization."""
return "digraph G{\n%s}" % ("" if self.val is None else (
"\t%s;\n%s\n" % (
self.val,
"\n".join(self._get_dot())
)
))
def _get_dot(self):
"""recursively prepare a dot graph entry for this node."""
if self.left is not None:
yield "\t%s -> %s;" % (self.val, self.left.val)
for i in self.left._get_dot():
yield i
elif self.right is not None:
r = random.randint(0, 1e9)
yield "\tnull%s [shape=point];" % r
yield "\t%s -> null%s;" % (self.val, r)
if self.right is not None:
yield "\t%s -> %s;" % (self.val, self.right.val)
for i in self.right._get_dot():
yield i
elif self.left is not None:
r = random.randint(0, 1e9)
yield "\tnull%s [shape=point];" % r
yield "\t%s -> null%s;" % (self.val, r)
def save_render(self, savefile="tree.gv"):
"""Render and save a represntation of the tree.
args:
savefile: the optional filename
"""
from graphviz import Source
src = Source(self.get_dot())
path = 'graphviz/{}'.format(savefile)
src.render(path)
@classmethod
def _sorted_list_to_bst(cls, items=[], start=None, end=None, parent=None):
"""Create a balanced binary search tree from sorted list.
args:
items: the sorted list of items to insert into tree
start: the start of the list
end: the end of the list
returns: a balanced binary search tree (node)
"""
if start > end:
return None
mid = start + (end - start) // 2
node = Node(items[mid], parent)
node.left = cls._sorted_list_to_bst(items, start, mid - 1, node)
node.right = cls._sorted_list_to_bst(items, mid + 1, end, node)
return node
@classmethod
def create_best_case(cls, n):
"""Create a balanced binary search tree from a given range.
args:
n: the range of integers to insert into the tree
returns: a balanced binary search tree (node)
"""
return cls._sorted_list_to_bst(range(n), 0, n - 1)
@classmethod
def create_worst_case(cls, n):
"""Create an unbalanced binary search tree from a given range.
The tree will be one long linear branch to the right.
args:
n: the range of integers to add to the tree
returns: a (very) unbalanced binary search tree (node)
"""
node = Node(0)
parent = node
for i in range(1, n):
parent.right = Node(i, parent)
parent = parent.right
return node
if __name__ == '__main__':
from timeit import Timer
"""Document the best and worst cases for searching for a value in the tree.
The worst case consists of a tree with one long linear branch.
The best case is a perfectly balanced tree.
"""
SIZE = 900
LOOKUP = 900
worst = Node.create_worst_case(SIZE)
best = Node.create_best_case(SIZE)
worst_case = Timer(
'worst.contains({})'.format(LOOKUP, SIZE), 'from __main__ import worst'
).timeit(1000)
best_case = Timer(
'best.contains({})'.format(LOOKUP), 'from __main__ import best'
).timeit(1000)
print(
"\nLookup Time Comparison: Best and Worst Case\n"
"\nGiven a tree of {n} items, find a node with value of {l}.\n"
.format(n=SIZE, l=LOOKUP)
)
print(
"Worst case, with tree balanced at {b}.\n"
"Time: {t}\n"
.format(b=worst.balance(), t=worst_case)
)
print(
"Best case, with tree balanced at {b}.\n"
"Time: {t}\n"
.format(b=best.balance(), t=best_case)
)
|
988,833 | 81d42d08bbbb637ca8a7f840033bad8871c76533 | """命令行火车票查看器
Usage:
tickets [-gdtkz] <from> <to> <date>
"""
# Options:
# -h,--help 显示帮助菜单
# -g 高铁
# -d 动车
# -t 特快
# -k 快速
# -z 直达
# Example:
# tickets 北京 上海 2016-10-10
# python tickets.py 上海 北京 2018-08-02
from docopt import docopt
from parse_station import Stations
from InqueryTickets import InqueryTickets
arguments = docopt(__doc__)
url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.8971'
station = Stations(url)
Inquery = InqueryTickets(station, arguments)
Inquery.inquery()
|
988,834 | c20452250d7ec10cb767f82e23c8fcf442a1877c |
def get_url():
url = 'http://test.cwty.bet/api'
def get_header():
headers = {"Content-Type": "application/json-patch+json"}
|
988,835 | 9f7013f6b4b6f627d5b650d8249074dadf36c869 | import random
from collections import deque
from typing import List
import numpy as np
import stock_exchange
import time
from experts.obscure_expert import ObscureExpert
from framework.vote import Vote
from framework.period import Period
from framework.portfolio import Portfolio
from framework.stock_market_data import StockMarketData
from framework.interface_expert import IExpert
from framework.interface_trader import ITrader
from framework.order import Order, OrderType
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from framework.order import Company
from framework.utils import save_keras_sequential, load_keras_sequential
from framework.logger import logger
class DeepQLearningTrader(ITrader):
"""
Implementation of ITrader based on Deep Q-Learning (DQL).
"""
RELATIVE_DATA_DIRECTORY = 'traders/dql_trader_data'
def __init__(self, expert_a: IExpert, expert_b: IExpert, load_trained_model: bool = True,
train_while_trading: bool = False, color: str = 'black', name: str = 'dql_trader', ):
"""
Constructor
Args:
expert_a: Expert for stock A
expert_b: Expert for stock B
load_trained_model: Flag to trigger loading an already trained neural network
train_while_trading: Flag to trigger on-the-fly training while trading
"""
# Save experts, training mode and name
super().__init__(color, name)
assert expert_a is not None and expert_b is not None
self.expert_a = expert_a
self.expert_b = expert_b
self.train_while_trading = train_while_trading
# Parameters for neural network
self.state_size = 2
self.action_size = 9
self.hidden_size = 50
# Parameters for deep Q-learning
self.gamma = 0.9
self.learning_rate = 0.001
self.epsilon = 1.0
self.epsilon_decay = 0.999
self.epsilon_min = 0.01
self.batch_size = 64
self.min_size_of_memory_before_training = 1000 # should be way bigger than batch_size, but smaller than memory
self.memory = deque(maxlen=2000)
# Attributes necessary to remember our last actions and fill our memory with experiences
self.last_state = None
self.last_action_a = None
self.last_action_b = None
self.last_portfolio_value = None
# Create main model, either as trained model (from file) or as untrained model (from scratch)
# neuronales netz
self.model = None
if load_trained_model:
self.model = load_keras_sequential(self.RELATIVE_DATA_DIRECTORY, self.get_name())
logger.info(f"DQL Trader: Loaded trained model")
if self.model is None: # loading failed or we didn't want to use a trained model
self.model = Sequential()
self.model.add(Dense(self.hidden_size * 2, input_dim=self.state_size, activation='relu'))
self.model.add(Dense(self.hidden_size, activation='relu'))
self.model.add(Dense(self.action_size, activation='linear'))
logger.info(f"DQL Trader: Created new untrained model")
assert self.model is not None
self.model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
def save_trained_model(self):
"""
Save the trained neural network under a fixed name specific for this traders.
"""
save_keras_sequential(self.model, self.RELATIVE_DATA_DIRECTORY, self.get_name())
logger.info(f"DQL Trader: Saved trained model")
def calc_reward(self, current_portfolio_value: int):
if current_portfolio_value > self.last_portfolio_value:
return 1
elif current_portfolio_value < self.last_portfolio_value:
return -1
else:
return 0
def action_mapping(self, recommendation):
# Transform action into number
action_dict = {"SELLSELL": 0, "SELLBUY": 1, "BUYSELL": 2, "BUYBUY": 3, "SELLHOLD": 4, "BUYHOLD": 5, "HOLDSELL": 6, "HOLDBUY": 7, "HOLDHOLD": 8}
return action_dict[recommendation[0] + recommendation[1]]
def comb_cash_share(self, temp_csc, stock_market_data):
# Transform combination of cash, share_company_a, company_b into number; t=True, f=False
comb_dict = {'ttt': 0, 'ttf': 1, 'tft': 2, 'tff': 3, 'ftt': 4, 'ftf': 5, 'fft': 6, 'fff': 7}
if temp_csc[0] >= max([stock_market_data.get_most_recent_price(Company.A), stock_market_data.get_most_recent_price(Company.B)]):
tmp = "t"
else:
tmp = "f"
if temp_csc[1] > 0:
tmp += "t"
else:
tmp += "f"
if temp_csc[2] > 0:
tmp += "t"
else:
tmp += "f"
return comb_dict[tmp]
def states_compution(self, stock_market_data: StockMarketData, portfolio: Portfolio):
temp_opinion = [self.expert_a.vote(stock_market_data[Company.A]), self.expert_b.vote(stock_market_data[Company.B])]
for i, entry in enumerate(temp_opinion):
if entry == Vote.BUY:
temp_opinion[i] = "BUY"
elif entry == Vote.SELL:
temp_opinion[i] = "SELL"
elif entry == Vote.HOLD:
temp_opinion[i] = "HOLD"
recommended_action = self.action_mapping(temp_opinion)
cash = portfolio.cash
share_a = portfolio.get_stock(Company.A)
share_b = portfolio.get_stock(Company.A)
cash_share_combination = self.comb_cash_share([cash, share_a, share_b], stock_market_data)
state = np.array([recommended_action, cash_share_combination])
state = np.reshape(state, [1, 2])
return state
def train_model(self):
selected_batch = random.sample(self.memory, self.batch_size)
target_list = np.zeros(self.batch_size, dtype=object)
reward_list = np.zeros(self.batch_size, dtype=object)
list_lastState = []
list_currentState = []
# save necessary information into lists
index = 0
for lastState, lastAction, currentReward, currentState in selected_batch:
list_lastState.append(lastState[0])
list_currentState.append(currentState[0])
reward_list[index] = currentReward
index += 1
# predict rewards for old and current States
y = self.model.predict(np.array(list_lastState))
x = self.model.predict(np.array(list_currentState))
for i in range(0, self.batch_size):
target_list[i] = reward_list[i] + self.gamma * np.amax(x[i])
y[i][selected_batch[i][1]] = target_list[i]
# decrease epsilon until min is reached for trade-off between exploration and exploitation
if self.epsilon > self.epsilon_min:
self.epsilon = self.epsilon * self.epsilon_decay
self.model.train_on_batch(np.array(list_lastState), y)
def append_memory(self, last_state, last_action: int, reward: int, current_state):
self.memory.append((last_state, last_action, reward, current_state))
def select_action(self, current_state, current_portfolio_value):
# Exploration
if random.randint(0, 1) <= self.epsilon:
current_action = random.randint(0, self.action_size-1)
# Exploitation
else:
current_action = np.argmax(self.model.predict(current_state))
return current_action
def trade(self, portfolio: Portfolio, stock_market_data: StockMarketData) -> List[Order]:
"""
Generate action to be taken on the "stock market"
Args:
portfolio : current Portfolio of this traders
stock_market_data : StockMarketData for evaluation
Returns:
A OrderList instance, may be empty never None
"""
assert portfolio is not None
assert stock_market_data is not None
assert stock_market_data.get_companies() == [Company.A, Company.B]
current_state = self.states_compution(stock_market_data, portfolio)
current_portfolio_value = portfolio.get_value(stock_market_data)
if self.train_while_trading is False:
# for test set use trained ann
action = np.argmax(self.model.predict(current_state)[0])
else:
action = self.select_action(current_state, current_portfolio_value)
if self.last_state is not None:
reward = self.calc_reward(current_portfolio_value)
self.append_memory(self.last_state, self.last_action_a, reward, current_state)
# train model as soon as sufficient memory is reached
if len(self.memory) > self.min_size_of_memory_before_training:
self.train_model()
# Split action into individual actions for Company A and B
current_action_a = 0
current_action_b = 0
assert action < 9 and action >= 0
if action == 0:
current_action_a = OrderType.SELL
current_action_b = OrderType.SELL
amount_to_sell_a = portfolio.get_stock(Company.A)
amount_to_sell_b = portfolio.get_stock(Company.B)
elif action == 1:
current_action_a = OrderType.SELL
amount_to_sell_a = portfolio.get_stock(Company.A)
current_action_b = OrderType.BUY
stock_price = stock_market_data.get_most_recent_price(Company.A)
amount_to_buy_b = int(portfolio.cash/stock_price)
elif action == 2:
current_action_a = OrderType.BUY
stock_price = stock_market_data.get_most_recent_price(Company.A)
amount_to_buy_a = int(portfolio.cash/stock_price)
current_action_b = OrderType.SELL
amount_to_sell_b = portfolio.get_stock(Company.B)
elif action == 3:
current_action_a = OrderType.BUY
stock_price = stock_market_data.get_most_recent_price(Company.A)
amount_to_buy_a = int((portfolio.cash/stock_price)/2)
current_action_b = OrderType.BUY
stock_price = stock_market_data.get_most_recent_price(Company.B)
amount_to_buy_b = int((portfolio.cash/stock_price)/2)
elif action == 4:
current_action_a = OrderType.SELL
amount_to_sell_a = portfolio.get_stock(Company.A)
# current_action_b = "hold"
elif action == 5:
current_action_a = OrderType.BUY
stock_price = stock_market_data.get_most_recent_price(Company.A)
amount_to_buy_a = int(portfolio.cash/stock_price)
# current_action_b = "hold"
elif action == 6:
# current_action_a = "hold"
current_action_b = OrderType.SELL
amount_to_sell_b = portfolio.get_stock(Company.B)
elif action == 7:
# current_action_a = "hold"
current_action_b = OrderType.BUY
stock_price = stock_market_data.get_most_recent_price(Company.B)
amount_to_buy_b = int(portfolio.cash/stock_price)
order_list = []
if current_action_a != 0:
if current_action_a == OrderType.SELL and amount_to_sell_a > 0:
order_list.append(Order(current_action_a, Company.A, amount_to_sell_a))
elif current_action_a == OrderType.BUY and portfolio.cash > 0:
order_list.append(Order(current_action_a, Company.A, amount_to_buy_a))
if current_action_b != 0:
if current_action_b == OrderType.SELL and amount_to_sell_b > 0:
order_list.append(Order(current_action_b, Company.B, amount_to_sell_b))
elif current_action_b == OrderType.BUY and portfolio.cash > 0:
order_list.append(Order(current_action_b, Company.B, amount_to_buy_b))
self.last_action_a = action
self.last_state = current_state
self.last_portfolio_value = current_portfolio_value
return order_list
# This method retrains the traders from scratch using training data from TRAINING and test data from TESTING
EPISODES = 5
if __name__ == "__main__":
beginning = time.time()
# Create the training data and testing data
# Hint: You can crop the training data with training_data.deepcopy_first_n_items(n)
training_data = StockMarketData([Company.A, Company.B], [Period.TRAINING])
testing_data = StockMarketData([Company.A, Company.B], [Period.TESTING])
# Create the stock exchange and one traders to train the net
stock_exchange = stock_exchange.StockExchange(10000.0)
training_trader = DeepQLearningTrader(ObscureExpert(Company.A), ObscureExpert(Company.B), False, True)
# Save the final portfolio values per episode
final_values_training, final_values_test = [], []
print("Episode over")
print(final_values_training)
print(final_values_test)
for i in range(EPISODES):
logger.info(f"DQL Trader: Starting training episode {i}")
# train the net
stock_exchange.run(training_data, [training_trader])
training_trader.save_trained_model()
final_values_training.append(stock_exchange.get_final_portfolio_value(training_trader))
# test the trained net
testing_trader = DeepQLearningTrader(ObscureExpert(Company.A), ObscureExpert(Company.B), True, False)
stock_exchange.run(testing_data, [testing_trader])
final_values_test.append(stock_exchange.get_final_portfolio_value(testing_trader))
logger.info(f"DQL Trader: Finished training episode {i}, "
f"final portfolio value training {final_values_training[-1]} vs. "
f"final portfolio value test {final_values_test[-1]}")
end = time.time()
print(end-beginning)
from matplotlib import pyplot as plt
plt.figure()
plt.plot(final_values_training, label='training', color="black")
plt.plot(final_values_test, label='test', color="green")
plt.title('final portfolio value training vs. final portfolio value test')
plt.ylabel('final portfolio value')
plt.xlabel('episode')
plt.legend(['training', 'test'])
plt.show()
|
988,836 | 0fcd3ef21b6ff7bf6e91dda61b67f5d84698abcd | # Generated by Django 2.1.7 on 2019-08-15 05:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CDapp', '0003_auto_20190813_1306'),
]
operations = [
migrations.RenameField(
model_name='class',
old_name='universty',
new_name='university',
),
migrations.AlterField(
model_name='person',
name='work_or_study',
field=models.CharField(blank=True, choices=[('w', 'work'), ('s', 'study')], max_length=10, null=True),
),
]
|
988,837 | 6aa0bda42314db86e6f211506da270e278e6ee2e | import unittest
from hstest.check_result import CheckResult
from hstest.dynamic.dynamic_test import dynamic_test
from hstest.stage_test import StageTest
class TestFeedback(StageTest):
@dynamic_test(feedback="feedback 1")
def test(self):
return CheckResult.wrong("feedback 2")
class Test(unittest.TestCase):
def test(self):
status, feedback = TestFeedback().run_tests()
self.assertNotEqual(status, 0)
self.assertEquals(
"Wrong answer in test #1\n" +
"\n" +
"feedback 2\n" +
"\n" +
"feedback 1",
feedback
)
if __name__ == '__main__':
Test().test()
|
988,838 | 88834e74f10374e3ef5ec5a28104d0dc3553690a | from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait as WD
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
import pymysql
driver = webdriver.Chrome(ChromeDriverManager().install())
for i in range(1, 4):
driver.get(f"https://www.ppomppu.co.kr/zboard/zboard.php?id=ppomppu&page={i}")
wait = WD(driver, 30)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#revolution_main_table > tbody > tr:nth-child(7)')))
# xpath = "//*[@id='revolution_main_table']/tbody/tr"
data_1 = driver.find_elements_by_class_name('list1')
data_2 = driver.find_elements_by_class_name('list0')
data_1_list = []
data_2_list = []
for j in data_1:
try:
tag_number = j.text[:7]
title = j.find_element_by_tag_name('font').text
reply_count = j.find_element_by_class_name('list_comment2').text
data = [tag_number, title, reply_count]
data_1_list.append(data)
except:
continue
for j in data_2:
try:
tag_number = j.text[:7]
title = j.find_element_by_tag_name('font').text
reply_count = j.find_element_by_class_name('list_comment2').text
data = [tag_number, title, reply_count]
data_2_list.append(data)
except:
continue
data_list = data_1_list + data_2_list
driver.close()
|
988,839 | d94cd0faa8cadc2b3e9fcf9eec5ea9c8a68b13fa | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Top Block
# Generated: Fri Sep 11 14:50:34 2015
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import window
from gnuradio.eng_option import eng_option
from gnuradio.gr import firdes
from gnuradio.wxgui import fftsink2
from gnuradio.wxgui import scopesink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class top_block(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 320000
##################################################
# Blocks
##################################################
self.wxgui_scopesink2_0 = scopesink2.scope_sink_f(
self.GetWin(),
title="Scope Plot",
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0,
ac_couple=False,
xy_mode=False,
num_inputs=1,
trig_mode=gr.gr_TRIG_MODE_AUTO,
y_axis_label="Counts",
)
self.Add(self.wxgui_scopesink2_0.win)
self.wxgui_fftsink2_0 = fftsink2.fft_sink_f(
self.GetWin(),
baseband_freq=0,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
self.Add(self.wxgui_fftsink2_0.win)
self.hilbert_fc_0 = filter.hilbert_fc(64)
self.high_pass_filter_0 = gr.interp_fir_filter_fff(1, firdes.high_pass(
1, samp_rate, 15000, 10, firdes.WIN_HAMMING, 6.76))
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_float*1, samp_rate)
self.blocks_multiply_xx_1_0 = blocks.multiply_vff(1)
self.blocks_multiply_xx_1 = blocks.multiply_vcc(1)
self.blocks_multiply_xx_0 = blocks.multiply_vff(1)
self.blocks_complex_to_real_0 = blocks.complex_to_real(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.analog_sig_source_x_0_0_2 = analog.sig_source_c(samp_rate, analog.GR_SIN_WAVE, 20000, 1, 0)
self.analog_sig_source_x_0_0_1 = analog.sig_source_f(samp_rate, analog.GR_COS_WAVE, 20000, 1, 0)
self.analog_sig_source_x_0_0 = analog.sig_source_f(samp_rate, analog.GR_COS_WAVE, 20000, 1, 0)
self.analog_sig_source_x_0 = analog.sig_source_f(samp_rate, analog.GR_COS_WAVE, 1000, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.analog_sig_source_x_0_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.high_pass_filter_0, 0))
self.connect((self.high_pass_filter_0, 0), (self.hilbert_fc_0, 0))
self.connect((self.hilbert_fc_0, 0), (self.blocks_multiply_xx_1, 0))
self.connect((self.analog_sig_source_x_0_0_2, 0), (self.blocks_multiply_xx_1, 1))
self.connect((self.high_pass_filter_0, 0), (self.blocks_multiply_xx_1_0, 1))
self.connect((self.analog_sig_source_x_0_0_1, 0), (self.blocks_multiply_xx_1_0, 0))
self.connect((self.blocks_multiply_xx_1, 0), (self.blocks_complex_to_real_0, 0))
self.connect((self.blocks_multiply_xx_1_0, 0), (self.blocks_add_xx_0, 0))
self.connect((self.blocks_complex_to_real_0, 0), (self.blocks_add_xx_0, 1))
self.connect((self.blocks_add_xx_0, 0), (self.wxgui_fftsink2_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.wxgui_scopesink2_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_0_0.set_sampling_freq(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0_0_1.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_0_0_2.set_sampling_freq(self.samp_rate)
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
self.wxgui_scopesink2_0.set_sample_rate(self.samp_rate)
self.high_pass_filter_0.set_taps(firdes.high_pass(1, self.samp_rate, 15000, 10, firdes.WIN_HAMMING, 6.76))
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = top_block()
tb.Run(True)
|
988,840 | af43a4fad5c93c8f89661b245f84a9b669804118 | import numpy as np
import pylab as pl
from get_data import import_data_to_denoise, reconstruct_data
linear_0, non_linear_0, sigma, type_of_data, time_para, test_0, linear, non_linear, method, test = import_data_to_denoise()
to_test = 6 #78 #6 #151
i = 2
i_0 = np.where(test_0 == to_test)[0][0]
index = np.where(test == to_test)[0]
ln_p, lppl, lppl_0 = reconstruct_data(linear_0[i_0], non_linear_0[i_0], sigma[i_0], type_of_data[i_0], time_para[i_0], linear[index][i], non_linear[index][i])
fft_p = np.fft.fft(ln_p)
fft_lppl = np.fft.fft(lppl)
freq = np.fft.fftfreq(len(lppl))
fft_noise = np.abs(fft_p)**2 - np.abs(fft_lppl)**2
print np.where(fft_noise > np.abs(fft_lppl)**2, 1, 0).mean()
if lppl_0 is not None:
print (np.max(ln_p) - np.min(ln_p))/(np.max(lppl_0) - np.min(lppl_0)+sigma[i_0])
noise = np.abs(np.fft.ifft(fft_noise))
ln_p_f = np.abs(np.fft.ifft(np.where(np.abs(freq)<0.2, fft_p, 0)))
lppl_p_f = np.abs(np.fft.ifft(np.where(np.abs(freq)<0.2, fft_lppl, 0)))
fig = pl.figure()
pl.plot(ln_p, label = 'ln(p)')
pl.plot(np.abs(np.fft.ifft(np.where(np.abs(freq)<0.2, fft_p, 0))))
if lppl_0 is not None:
pl.plot(lppl_0, label = 'lppl$_0$')
pl.plot(lppl, label = 'lppl')
pl.plot()
pl.plot(noise-noise[0]+ln_p[0], label = 'noise')
pl.legend()
fig= pl.figure()
pl.plot(freq, np.abs(fft_p))
pl.plot(freq, np.abs(fft_lppl))
pl.show()
|
988,841 | 67c8d428439de5e2fe43c9c7ea1ce2cec2895c18 | from pyscf import gto, scf, grad
import time
import numpy as np
import scipy
from zeroth_order_ghf import rhf_to_ghf, get_p0, get_hcore0, get_pi0, get_f0,\
get_e0_nuc, get_e0_elec
def get_s1(mol, atom, coord):
r"""Calculates first order pertubation to the orbital overlap matrix
.. math::
\mathbf{S}^{(1)}_{\mu\nu}
= \left(\frac{\partial\phi_{\mu}}{\partial a}\bigg|\phi_{\nu}\right)
+ \left(\phi_{\mu}\bigg|\frac{\partial\phi_{\nu}}{\partial a}\right)
:param mol: Molecule class as defined by PySCF.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
coord = '0' for x
'1' for y
'2' for z
:returns: First order overlap matrix.
"""
s_py = -mol.intor("int1e_ipovlp")[coord]
#minus sign due to pyscf definition
s1 = np.zeros_like(s_py)
for i in range(s_py.shape[1]):
lambda_i = int(i in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for j in range(s_py.shape[1]):
lambda_j = int(j in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
s1[i][j] += s_py[i][j]*lambda_i+s_py[j][i]*lambda_j
omega = np.identity(2)
s1 = np.kron(omega, s1)
np.set_printoptions(precision=3)
return s1
def get_p1(g0, g1, complexsymmetric, nelec):
r"""Calculates the first order density matrix from the zeroth and first
order coefficient matrices. It is defined by (only over occupied MOs):
.. math::
\mathbf{P^{(1)}} =
\mathbf{G^{(0)}G^{(1)\dagger\diamond}}
+ \mathbf{G^{(1)}G^{(0)\dagger\diamond}}
:param g0: zeroth order GHF coefficient matrix.
:param g1: First order GHF coefficient matrix.
:param complexsymmetric: If :const:'True', :math:'/diamond = /star'.
If :const:'False', :math:'\diamond = \hat{e}'.
:param nelec: Number of electrons in the molecule, determines the number
of occupied orbitals.
:returns: The first order density matrix.
"""
if not complexsymmetric:
p1 = (np.dot(g0[:,0:nelec], g1[:,0:nelec].T.conj())
+ np.dot(g1[:,0:nelec], g0[:,0:nelec].T.conj()))
else:
p1 = (np.dot(g0[:,0:nelec], g1[:,0:nelec].T)
+ np.dot(g1[:,0:nelec], g0[:,0:nelec].T))
return p1
def get_hcore1(mol, atom, coord):
r"""Calculates the first order core hamiltonian matrix.
Each element is given by:
.. math::
\left(\mathbf{H_{core}^{(1)}}\right)_{\mu\nu}
= \left(\frac{\partial\phi_{\mu}}{\partial a}\left|
\mathbf{\hat{H}_{core}}\right|\phi_{\nu}\right)
+ \left(\phi_{\mu}\left|\frac{\partial\mathbf{\hat{H}_{core}}}
{\partial a}\right|\phi_{\nu}\right)
+ \left(\phi_{\mu}\left|\mathbf{\hat{H}_{core}}\right|
\frac{\partial\phi_{\nu}}{\partial a}\right)
(Note that :math:'a' is a particular specified pertubation, e.g movement
in the x direction of atom 1)
:param mol: Molecule class as defined by PySCF.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
coord = '0' for x
'1' for y
'2' for z
:returns: First order core hamiltonian matrix.
"""
mf = scf.RHF(mol)
g = grad.rhf.Gradients(mf)
hcore1 = g.hcore_generator(mol)(atom)[coord]
omega = np.identity(2)
hcore1 = np.kron(omega, hcore1)
return hcore1
def get_pi1(mol, atom, coord):
r"""Calculates the 4 dimensional first order pi tensor by digesting the
of 2 electron integrals given by PySCF.
Symmetry of the 2 electron integrals is manipulated to digest the PySCF
tensor, in which the first MO of each 2 electron integral has been
differentiated.
Each element is given by:
.. math::
\mathbf{\Pi_{\delta'\mu',\epsilon'\nu',\delta\mu,\epsilon\nu}^{(1)}}
= \mathbf{\Omega_{\delta'\delta}\Omega_{\epsilon'\epsilon}}
\left(\mu'\mu|\nu'\nu\right)^{(1)}
-\mathbf{\Omega_{\delta'\epsilon}\Omega_{\epsilon'\delta}}
\left(\mu'\nu|\nu'\mu\right)^{(1)}
\left(\mu'\mu|\nu'\nu\right)^{(1)}
=\left(\frac{\partial\phi_{\mu'}}{\partial a}\phi_{\mu}|
\phi_{\nu'}\phi_{\nu}\right)
+\left(\phi_{\mu'}\frac{\partial\phi_{\mu}}{\partial a}|
\phi_{\nu'}\phi_{\nu}\right)
+\left(\phi_{\mu'}\phi_{\mu}|
\frac{\partial\phi_{\nu'}}{\partial a}\phi_{\nu}\right)
+\left(\phi_{\mu'}\phi_{\mu}|
\phi_{\nu'}\frac{\partial\phi_{\nu}}{\partial a}\right)
:param mol: Molecule class as defined by PySCF.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
lies.
coord = '0' for x
'1' for y
'2' for z
:returns: First order 4 dimensional pi tensor.
"""
omega = np.identity(2)
spin_j = np.einsum("ij,kl->ikjl", omega, omega)
pi_py = -mol.intor("int2e_ip1")[coord] #minus sign due to pyscf definition
j1_spatial = np.zeros((pi_py.shape[0],pi_py.shape[0],pi_py.shape[0],
pi_py.shape[0]))
for i in range(pi_py.shape[0]):
lambda_i = int(i in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for j in range(pi_py.shape[0]):
lambda_j = int(j in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for k in range(pi_py.shape[0]):
lambda_k = int(k in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for l in range(pi_py.shape[0]):
lambda_l = int(l in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
j1_spatial[i][j][k][l] += (pi_py[i][j][k][l] * lambda_i
+ pi_py[j][i][k][l] * lambda_j
+ pi_py[k][l][i][j] * lambda_k
+ pi_py[l][k][i][j] * lambda_l)
j1_spatial = np.einsum("abcd->acbd", j1_spatial,
optimize='optimal') #convert to physicists
j1 = np.kron(spin_j, j1_spatial)
k1 = np.einsum("ijkl->ijlk", j1,
optimize='optimal') #physicists notation
pi1 = j1 - k1
return pi1
def get_j1(mol, atom, coord):
r"""Calculates the 4 dimensional first order j tensor of coloumb integrals
by digesting the of 2 electron integrals given by PySCF.
Symmetry of the 2 electron integrals is manipulated to digest the PySCF
tensor, in which the first MO of each 2 electron integral has been
differentiated.
Each element is given by:
.. math::
\mathbf{\Pi_{\delta'\mu',\epsilon'\nu',\delta\mu,\epsilon\nu}^{(1)}}
= \mathbf{\Omega_{\delta'\delta}\Omega_{\epsilon'\epsilon}}
\left(\mu'\mu|\nu'\nu\right)^{(1)}
\left(\mu'\mu|\nu'\nu\right)^{(1)}
=\left(\frac{\partial\phi_{\mu'}}{\partial a}\phi_{\mu}|
\phi_{\nu'}\phi_{\nu}\right)
+\left(\phi_{\mu'}\frac{\partial\phi_{\mu}}{\partial a}|
\phi_{\nu'}\phi_{\nu}\right)
+\left(\phi_{\mu'}\phi_{\mu}|
\frac{\partial\phi_{\nu'}}{\partial a}\phi_{\nu}\right)
+\left(\phi_{\mu'}\phi_{\mu}|
\phi_{\nu'}\frac{\partial\phi_{\nu}}{\partial a}\right)
:param mol: Molecule class as defined by PySCF.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
lies.
coord = '0' for x
'1' for y
'2' for z
:returns: First order 4 dimensional j tensor.
"""
omega = np.identity(2)
spin_j = np.einsum("ij,kl->ikjl", omega, omega)
twoe = -mol.intor("int2e_ip1")[coord] #minus sign due to pyscf definition
j1_spatial = np.zeros((twoe.shape[0],twoe.shape[0],twoe.shape[0],
twoe.shape[0]))
for i in range(twoe.shape[0]):
lambda_i = int(i in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for j in range(twoe.shape[0]):
lambda_j = int(j in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for k in range(twoe.shape[0]):
lambda_k = int(k in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
for l in range(twoe.shape[0]):
lambda_l = int(l in range(mol.aoslice_by_atom()[atom][2],
mol.aoslice_by_atom()[atom][3]))
j1_spatial[i][j][k][l] += (twoe[i][j][k][l] * lambda_i
+ twoe[j][i][k][l] * lambda_j
+ twoe[k][l][i][j] * lambda_k
+ twoe[l][k][i][j] * lambda_l)
j1_spatial = np.einsum("abcd->acbd", j1_spatial,
optimize='optimal') #convert to physicists
j1 = np.kron(spin_j, j1_spatial)
return j1
def get_f1(pi0, p0, hcore1, pi1, p1):
r"""Calculate the first order fock matrix, defined by
.. math::
\mathbf{F^{(1)}}=\mathbf{H_{core}^{(1)}}+\mathbf{\Pi^{(1)}}\cdot
\mathbf{P^{(0)}}+\mathbf{\Pi^{(0)}}\cdot\mathbf{P^{(1)}}
:param pi0: 4 dimensional zeroth order Pi tensor of 2 electron integrals
:param p0: Zeroth order density matrix
:param hcore1: First order core hamiltonian after particular pertubation
:param pi1: 4 dimensional first order Pi tensor of differentiated 2
electron integrals after particular pertubation
:param p1: First order density matrix
:returns: First order fock matrix.
"""
f1_1 = hcore1
f1_2 = np.einsum("ijkl,lj->ik", pi0, p1, optimize='optimal')
f1_3 = np.einsum("ijkl,lj->ik", pi1, p0, optimize='optimal')
f1 = f1_1 + f1_2 + f1_3
return f1
def get_g1_x(f1_x, s1_x, eta0, nelec):
r"""Calculates the transformed first order correction to the coefficient
matrix G defined element wise by the following 2 equations:
.. math:: \tilde{G}_{ij}^{(1)} = \frac{\tilde{F}_{ij}^{(1)}
- \mathbf{\eta}_j^{(0)}\tilde{S}_{ij}^{(1)}}
{\mathbf{\eta}_j^{(0)} - \mathbf{\eta}_i^{(0)}}
.. math:: \tilde{G}_{jj}^{(1)} = -\frac{1}{2}\tilde{S}_{jj}^{(1)}
:param f1_x: First order transformed fock matrix.
:param s1_x: First order transformed overlap matrix.
:param eta0: Vector of zeroth order energy eigenvalues.
:param nelec: Number of electrons in the molecule, determines the number
of occupied orbitals.
:returns: transformed matrix G(1).
"""
nbasis = f1_x.shape[1]
nocc = nelec
g1_x = np.zeros_like(f1_x)
for j in range(nocc, nbasis):
for i in range(nocc):
delta_eta0 = eta0[j] - eta0[i]
g1_x[i,j] = (f1_x[i,j] - eta0[j]*s1_x[i,j])/delta_eta0
for j in range(nbasis):
g1_x[j,j] = -0.5*s1_x[j,j]
return g1_x
def g1_iteration(complexsymmetric: bool, mol, atom, coord, nelec,
g0_ghf = None):
r"""Calculates the first order coefficient matrix self consistently given
that :math:'\mathbf{G^{(1)}}' and :math:'\mathbf{F^{(1)}}' depend on one
another.
:param complexsymmetric: If :const:'True', :math:'/diamond = /star'.
If :const:'False', :math:'\diamond = \hat{e}'. Used here
when transforming quantities using the X matrix.
:param mol: Molecule class as defined by PySCF.
:param g0: Matrix of zeroth order molecular orbital coefficients
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
lies.
coord = '0' for x
'1' for y
'2' for z
:param nelec: The number of electrons in the molecule, determines which
orbitals are occupied and virtual.
:param g0_ghf: An optional argument for which the user can specify a g0
zeroth order molecular coefficient matrix in RHF. By default this
is set to None and g0 in RHF will be obtained from PySCF.
:returns: The converged first order coefficient matrix.
"""
if g0_ghf is None:
m = scf.RHF(mol)
m.verbose = 0
m.kernel()
g0_rhf = m.mo_coeff
g0 = rhf_to_ghf(g0_rhf, nelec)
else:
g0 = g0_ghf
x = g0
s0 = mol.intor("int1e_ovlp")
s0 = np.kron(np.identity(2), s0)
s1 = get_s1(mol, atom, coord)
p0 = get_p0(g0, complexsymmetric, nelec)
hcore0 = get_hcore0(mol)
pi0 = get_pi0(mol)
f0 = get_f0(hcore0, pi0, p0)
hcore1 = get_hcore1(mol, atom, coord)
pi1 = get_pi1(mol, atom, coord)
if not complexsymmetric:
p0_x = np.linalg.multi_dot([x.T.conj(), p0, x])
s1_x = np.linalg.multi_dot([x.T.conj(), s1, x])
f0_x = np.linalg.multi_dot([x.T.conj(), f0, x])
pi0_x = np.einsum("pi,ijkl,jr,kq,sl->prqs",
x.T.conj(), pi0, x, x, x.T.conj(),
optimize = 'optimal')
hcore1_x = np.linalg.multi_dot([x.T.conj(), hcore1, x])
pi1_x = np.einsum("pi,ijkl,jr,kq,sl->prqs",
x.T.conj(), pi1, x, x, x.T.conj(),
optimize = 'optimal')
else:
p0_x = np.linalg.multi_dot([x.T, p0, x])
s1_x = np.linalg.multi_dot([x.T, s1, x])
f0_x = np.linalg.multi_dot([x.T, f0, x])
pi0_x = np.einsum("pi,ijkl,jr,kq,sl->prqs",
x.T, pi0, x, x, x.T,
optimize = 'optimal')
hcore1_x = np.linalg.multi_dot([x.T, hcore1, x])
pi1_x = np.einsum("pi,ijkl,jr,kq,sl->prqs",
x.T, pi1, x, x, x.T,
optimize = 'optimal')
eta0, g0_x = np.linalg.eig(f0_x)
index = np.argsort(eta0)
eta0 = eta0[index]
g0_x = g0_x[:, index] #Order g0 columns according to eigenvalues
g1_x_guess = np.zeros_like(g0)
g1_x = g1_x_guess
iter_num = 0
delta_g1_x = 1
while delta_g1_x > 1e-14:
iter_num += 1
p1_x = get_p1(g0_x, g1_x, complexsymmetric, nelec)
f1_x = get_f1(pi0_x, p0_x, hcore1_x, pi1_x, p1_x)
g1_x_last = g1_x
g1_x = get_g1_x(f1_x, s1_x, eta0, nelec)
delta_g1_x = np.max(np.abs(g1_x - g1_x_last))
g1 = np.dot(x, g1_x)
return g1
def get_e1_nuc(mol, atom, coord):
r"""Calculates the first order nuclear repulsion energy.
This is given by the following expresison, where X_A is a particular
cartesian coordinate of atom A:
.. math::
$$E^{(1)}_{nuc} = \frac{\partial E^{(0)}_{nuc}}{\partial X_A}=
\sum\limits_{B \neq A}^N
\left(X_B-X_A\right)\frac{Z_AZ_B}{R^3_{AB}}$$
:param mol: The pyscf molecule class, from which the nuclear coordinates
and atomic numbers are taken.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
lies.
coord = '0' for x
'1' for y
'2' for z
:returns: The first order nuclear repulsion energy.
"""
e1_nuc = 0
a = atom
for b in range(len(mol.atom_charges())):
if b == atom:
continue
r_ab2 = np.dot(mol.atom_coord(a) - mol.atom_coord(b),
mol.atom_coord(a) - mol.atom_coord(b))
r_ab = np.sqrt(r_ab2)
r_ab3 = r_ab ** 3
x_ab = mol.atom_coord(b)[coord] - mol.atom_coord(a)[coord]
e1_nuc += x_ab * (mol.atom_charge(a) * mol.atom_charge(b)) / r_ab3
return e1_nuc
def get_e1_elec(mol, g1, atom, coord, complexsymmetric: bool, nelec,
g0_ghf = None):
r"""Calculates the first order electronic energy.
Defined as follows:
.. math::
E^{(1)}_{elec} = Tr\left(\mathbf{F'}^{(0)}\mathbf{P}^{(1)}
+ \mathbf{F'}^{(1)}\mathbf{P}^{(0)}\right)
where
.. math::
\mathbf{F'}^{(0)} = \mathbf{H_{core}^{(0)}}
+ \frac{1}{2}\mathbf{\Pi^{(0)}}\cdot\mathbf{P^{(0)}}
and
.. math::
mathbf{F'}^{(1)} = \mathbf{H_{core}^{(1)}}
+ \frac{1}{2}\mathbf{\Pi^{(1)}}\cdot\mathbf{P^{(0)}}
+ \frac{1}{2}\mathbf{\Pi^{(0)}}\cdot\mathbf{P^{(1)}}
:param mol: The pyscf molecule class, from which the nuclear coordinates
and atomic numbers are taken.
:param g0: The zeroth order coefficient matrix.
:param g1: The first order coefficient matrix.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
lies.
coord = '0' for x
'1' for y
'2' for z
:param complexsymmetric: If :const:'True', :math:'/diamond = /star'.
If :const:'False', :math:'\diamond = \hat{e}'.
:param nelec: The number of electrons in the molecule, determines which
orbitals are occupied and virtual.
:returns: The first order electronic energy
"""
if g0_ghf is None:
m = scf.RHF(mol)
m.verbose = 0
m.kernel()
g0_rhf = m.mo_coeff
g0 = rhf_to_ghf(g0_rhf, nelec)
else:
g0 = g0_ghf
p0 = get_p0(g0, complexsymmetric, nelec)
p1 = get_p1(g0, g1, complexsymmetric, nelec)
hcore0 = get_hcore0(mol)
pi0 = get_pi0(mol)
hcore1 = get_hcore1(mol, atom, coord)
pi1 = get_pi1(mol, atom, coord)
f0_prime_1e = hcore0
f1_prime_1e = hcore1
f0_prime_2e = 0.5 * np.einsum("ijkl,jl->ik", pi0, p0)
f1_prime_2e = (0.5 * np.einsum("ijkl,jl->ik", pi1, p0)
+ 0.5 * np.einsum("ijkl,jl->ik", pi0, p1))
e1_elec_1e = (np.einsum("ij,ji->",f0_prime_1e, p1)
+ np.einsum("ij,ji->",f1_prime_1e, p0))
e1_elec_2e = (np.einsum("ij,ji->",f0_prime_2e, p1)
+ np.einsum("ij,ji->",f1_prime_2e, p0))
e1_elec = e1_elec_1e + e1_elec_2e
return e1_elec
def get_e1_scf(mol, atom, coord, nelec, complexsymmetric, g0_ghf=None):
if g0_ghf is None:
g1 = g1_iteration(complexsymmetric, mol, atom, coord, nelec)
e1_elec = get_e1_elec(mol, g1, atom, coord, complexsymmetric, nelec)
else:
g1 = g1_iteration(complexsymmetric, mol, atom, coord, nelec, g0_ghf)
e1_elec = get_e1_elec(mol, g1, atom, coord, complexsymmetric, nelec,
g0_ghf)
e1_nuc = get_e1_nuc(mol, atom, coord)
return e1_elec + e1_nuc
def write_e1_mat(mol, nelec, complexsymmetric, g0_ghf = None):
r"""Writes matrix of ghf energy derivatives for each atom and coordinate.
:param mol: PySCF molecule object.
:param nelec: The number of electrons in the molecule, determines which
orbitals are occupied and virtual.
:param complexsymmetric: If :const:'True', :math:'/diamond = /star'.
If :const:'False', :math:'\diamond = \hat{e}'.
:param g0_ghf: An optional argument for which the user can specify a g0
zeroth order molecular coefficient matrix in RHF. By default this
is set to None and g0 in RHF will be obtained from PySCF.
:returns: natom x 3 matrix of ghf energy derivatives.
"""
e1_mat = np.zeros((mol.natm, 3))
for i in range(mol.natm):
for j in range(3):
if g0_ghf is None:
g1 = g1_iteration(complexsymmetric, mol, i, j, nelec)
e1_elec = get_e1_elec(mol, g1, i, j, complexsymmetric, nelec)
else:
g1 = g1_iteration(complexsymmetric, mol, i, j, nelec,
g0_ghf)
e1_elec = get_e1_elec(mol, g1, i, j, complexsymmetric, nelec,
g0_ghf)
e1_nuc = get_e1_nuc(mol, i, j)
e1 = e1_elec + e1_nuc
e1_mat[i,j] += e1
print("-------------- First order GHF energies --------------")
print('Atom x y z')
for i, n in enumerate(range(mol.natm)):
print('%d %s %15.10f %15.10f %15.10f' %
(n, mol.atom_symbol(n), e1_mat[i,0], e1_mat[i,1], e1_mat[i,2]))
print("------------------------------------------------------")
return e1_mat
def write_e1_single(mol, nelec, atom, coord, complexsymmetric, g0_ghf = None):
r"""Gives energy derivative for a specific pertubation defined by an atom
and coordinate.
:param mol: PySCF molecule object.
:param nelec: The number of electrons in the molecule, determines which
orbitals are occupied and virtual.
:param atom: Input for which atom is being perturbed, with atoms numbered
according to the PySCF molecule.
:param coord: Input for along which coordinate the pertubation of the atom
lies.
coord = '0' for x
'1' for y
'2' for z
:param complexsymmetric: If :const:'True', :math:'/diamond = /star'.
If :const:'False', :math:'\diamond = \hat{e}'.
:param g0_ghf: An optional argument for which the user can specify a g0
zeroth order molecular coefficient matrix in RHF. By default this
is set to None and g0 in RHF will be obtained from PySCF.
:returns: single scalar for the energy derivative of a specific
pertubation.
"""
if coord == 0:
pert = "x"
elif coord == 1:
pert = "y"
elif coord == 2:
pert = "z"
if g0_ghf is None:
g1 = g1_iteration(complexsymmetric, mol, atom, coord, nelec)
e1_elec = get_e1_elec(mol, g1, atom, coord, complexsymmetric, nelec)
else:
g1 = g1_iteration(complexsymmetric, mol, atom, coord, nelec, g0_ghf)
e1_elec = get_e1_elec(mol, g1, atom, coord, complexsymmetric, nelec,
g0_ghf)
e1_nuc = get_e1_nuc(mol, atom, coord)
e1 = e1_elec + e1_nuc
print("The molecule has atoms:")
for i, n in enumerate(range(mol.natm)):
print(n, mol.atom_pure_symbol(i), "at coordinates", mol.atom_coord(i))
print("\nThe", mol.atom_pure_symbol(atom), "atom with index", atom,
"at coordinates", mol.atom_coord(atom),
"is perturbed in the positive", pert, "direction\n")
print("########################")
print("First order electronic energy:\n", e1_elec)
print("First order nuclear repulsion energy:\n",
get_e1_nuc(mol,atom,coord))
print("Total first order energy:\n", get_e1_nuc(mol,atom,coord) + e1_elec)
print("########################\n")
return e1
|
988,842 | ad3e822a848fb09cb289c5f4a5df3359ac7a962e | '''
Membership Application Views
membership/views.py
@author Teerapat Kraisrisirikul (810Teams)
'''
from datetime import datetime
from django.contrib.auth import get_user_model
from django.utils.translation import gettext as _
from rest_framework import permissions, status, viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from clubs_and_events.settings import CLUB_VALID_MONTH, CLUB_VALID_DAY, CLUB_ADVANCED_RENEWAL
from community.models import Club, Event, CommunityEvent, Lab, Community
from community.permissions import IsRenewableClub, IsMemberOfBaseCommunity
from core.permissions import IsInPubliclyVisibleCommunity, IsInActiveCommunity, IsDeputyLeaderOfCommunity
from core.utils.filters import filter_queryset, filter_queryset_permission, get_latest_membership_log, limit_queryset
from core.utils.filters import get_active_community_ids
from core.utils.general import has_instance, remove_duplicates
from membership.models import Request, Membership, Invitation, CustomMembershipLabel, Advisory, MembershipLog
from membership.models import ApprovalRequest
from membership.permissions import IsAbleToRetrieveRequest, IsAbleToUpdateRequest, IsAbleToDeleteRequest
from membership.permissions import IsAbleToRetrieveInvitation, IsAbleToUpdateInvitation, IsAbleToDeleteInvitation
from membership.permissions import IsAbleToUpdateMembership, IsAbleToUpdateCustomMembershipLabel
from membership.permissions import IsAbleToRetrieveApprovalRequest, IsAbleToUpdateApprovalRequest
from membership.permissions import IsAbleToDeleteApprovalRequest, IsAbleToCreateAndDeleteAdvisory
from membership.serializers import ExistingRequestSerializer, NotExistingRequestSerializer
from membership.serializers import ExistingInvitationSerializer, NotExistingInvitationSerializer
from membership.serializers import MembershipSerializer, MembershipLogSerializer, AdvisorySerializer
from membership.serializers import NotExistingCustomMembershipLabelSerializer, ExistingCustomMembershipLabelSerializer
from membership.serializers import ExistingApprovalRequestSerializer, NotExistingApprovalRequestSerializer
from notification.notifier import notify, notify_membership_log
class RequestViewSet(viewsets.ModelViewSet):
''' Request view set '''
queryset = Request.objects.all()
http_method_names = ('get', 'post', 'put', 'patch', 'delete', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToRetrieveRequest())
elif self.request.method == 'POST':
return (permissions.IsAuthenticated(),)
elif self.request.method in ('PUT', 'PATCH'):
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateRequest())
elif self.request.method == 'DELETE':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToDeleteRequest())
return tuple()
def get_serializer_class(self):
''' Get serializer class '''
if self.request.method == 'POST':
return NotExistingRequestSerializer
return ExistingRequestSerializer
def list(self, request, *args, **kwargs):
''' List requests '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = filter_queryset(queryset, request, target_param='user', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='status', is_foreign_key=False)
queryset = limit_queryset(queryset, request)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
''' Create request '''
serializer = self.get_serializer(data=request.data, many=False)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
# Requesting to join the community event
# If already a member of base community, you can join without waiting to be approved.
instant_join = False
if has_instance(obj.community, CommunityEvent):
community_event = CommunityEvent.objects.get(pk=obj.community.id)
if IsMemberOfBaseCommunity().has_object_permission(request, None, community_event):
# Check for past membership to renew it, otherwise, create a new one.
try:
membership = Membership.objects.get(user_id=obj.user.id, community_id=obj.community.id)
membership.position = 0
membership.status = 'A'
membership.save()
except Membership.DoesNotExist:
membership = Membership.objects.create(user_id=obj.user.id, community_id=obj.community.id)
# Update request status
obj.status = 'A'
obj.save()
# Skip request notification, use membership log notification instead
instant_join = True
notify_membership_log(get_latest_membership_log(membership))
# Notification
if not instant_join:
users = [i.user for i in Membership.objects.filter(
community_id=obj.community.id, position__in=(1, 2, 3), status='A'
)]
notify(users=users, obj=obj)
serializer = self.get_serializer(obj)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
''' Update request '''
serializer = self.get_serializer(self.get_object(), data=request.data, many=False)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
# If the request is accepted, check for past membership to renew it, otherwise, create a new one.
if obj.status == 'A':
try:
membership = Membership.objects.get(user_id=obj.user.id, community_id=obj.community.id)
membership.position = 0
membership.status = 'A'
membership.save()
except Membership.DoesNotExist:
membership = Membership.objects.create(user_id=obj.user.id, community_id=obj.community.id)
# Request accepted notification
notify(users=(obj.user,), obj=obj)
# New member joined notification
notify_membership_log(get_latest_membership_log(membership))
return Response(serializer.data, status=status.HTTP_200_OK)
class InvitationViewSet(viewsets.ModelViewSet):
''' Invitation view set '''
queryset = Invitation.objects.all()
http_method_names = ('get', 'post', 'put', 'patch', 'delete', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToRetrieveInvitation())
elif self.request.method == 'POST':
return (permissions.IsAuthenticated(),)
elif self.request.method in ('PUT', 'PATCH'):
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateInvitation())
elif self.request.method == 'DELETE':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToDeleteInvitation())
return tuple()
def get_serializer_class(self):
''' Get serializer class '''
if self.request.method == 'POST':
return NotExistingInvitationSerializer
return ExistingInvitationSerializer
def list(self, request, *args, **kwargs):
''' List invitations '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = filter_queryset(queryset, request, target_param='invitor', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='invitee', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='status', is_foreign_key=False)
queryset = limit_queryset(queryset, request)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
''' Create invitation '''
serializer = self.get_serializer(data=request.data, many=False)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
notify((obj.invitee,), obj)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
''' Update invitation '''
serializer = self.get_serializer(self.get_object(), data=request.data, many=False)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
# If the invitation is accepted, check for past membership to renew it. Otherwise, create a new one.
if obj.status == 'A':
try:
membership = Membership.objects.get(user_id=obj.invitee.id, community_id=obj.community.id)
membership.position = 0
membership.status = 'A'
membership.save()
except Membership.DoesNotExist:
membership = Membership.objects.create(user_id=obj.invitee.id, community_id=obj.community.id)
# Notification
notify_membership_log(get_latest_membership_log(membership))
return Response(serializer.data, status=status.HTTP_200_OK)
class MembershipViewSet(viewsets.ModelViewSet):
''' Membership view set '''
queryset = Membership.objects.all()
serializer_class = MembershipSerializer
http_method_names = ('get', 'put', 'patch', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())
elif self.request.method in ('PUT', 'PATCH'):
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateMembership())
return tuple()
def list(self, request, *args, **kwargs):
''' List memberships '''
# Retrieve and order queryset
queryset = self.get_queryset().order_by('-position')
# Queryset Filters
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = filter_queryset(queryset, request, target_param='user', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='position', is_foreign_key=False)
queryset = filter_queryset(queryset, request, target_param='status', is_foreign_key=False)
# Community Type Filtering
try:
query = request.query_params.get('community_type')
if query == 'club':
club_ids = [i.id for i in Club.objects.all()]
queryset = queryset.filter(community_id__in=club_ids)
elif query == 'event':
community_event_ids = [i.id for i in CommunityEvent.objects.all()]
event_ids = [i.id for i in Event.objects.all() if i.id not in community_event_ids]
queryset = queryset.filter(community_id__in=event_ids)
elif query == 'community_event':
community_event_ids = [i.id for i in CommunityEvent.objects.all()]
queryset = queryset.filter(community_id__in=community_event_ids)
elif query == 'lab':
lab_ids = [i.id for i in Lab.objects.all()]
queryset = queryset.filter(community_id__in=lab_ids)
except ValueError:
queryset = None
# Custom Searching
try:
query = request.query_params.get('search')
if query is not None:
filtered_ids = [i.id for i in queryset if query in i.user.name]
queryset = queryset.filter(pk__in=filtered_ids)
except ValueError:
queryset = None
# Query Set Limiting
queryset = limit_queryset(queryset, request)
# Serialize and return response
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def update(self, request, *args, **kwargs):
''' Update membership '''
old_position = Membership.objects.get(pk=kwargs['pk']).position
old_status = Membership.objects.get(pk=kwargs['pk']).status
serializer = self.get_serializer(self.get_object(), data=request.data, many=False)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
# If the membership position is updated to 3, demote own position to 2.
if old_position != obj.position and obj.position == 3:
membership = Membership.objects.get(
user_id=request.user.id,
community_id=Membership.objects.get(pk=kwargs['pk']).community.id
)
membership.position = 2
membership.updated_by = request.user
membership.save()
# If the status is set to retired, get demoted to a normal member.
if old_status != obj.status and obj.status == 'R':
membership = Membership.objects.get(
user_id=request.user.id,
community_id=Membership.objects.get(pk=kwargs['pk']).community.id
)
membership.position = 0
membership.updated_by = request.user
membership.save()
# Notification
notify_membership_log(get_latest_membership_log(obj))
return Response(serializer.data, status=status.HTTP_200_OK)
class CustomMembershipLabelViewSet(viewsets.ModelViewSet):
''' Custom membership label view set '''
queryset = CustomMembershipLabel.objects.all()
http_method_names = ('get', 'post', 'put', 'patch', 'delete', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())
elif self.request.method == 'POST':
return (permissions.IsAuthenticated(),)
elif self.request.method in ('PUT', 'PATCH'):
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateCustomMembershipLabel())
elif self.request.method == 'DELETE':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsDeputyLeaderOfCommunity())
return tuple()
def get_serializer_class(self):
''' Get serializer class '''
if self.request.method == 'POST':
return NotExistingCustomMembershipLabelSerializer
return ExistingCustomMembershipLabelSerializer
def list(self, request, *args, **kwargs):
''' List custom membership labels '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = filter_queryset(queryset, request, target_param='membership', is_foreign_key=False)
if request.query_params.get('membership') is not None and len(queryset) == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
elif request.query_params.get('membership') is not None and len(queryset) == 1:
serializer = self.get_serializer(queryset[0], many=False)
else:
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class MembershipLogViewSet(viewsets.ModelViewSet):
''' Membership log view set '''
queryset = MembershipLog.objects.all()
serializer_class = MembershipLogSerializer
http_method_names = ('get', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())
return tuple()
def list(self, request, *args, **kwargs):
''' List membership logs '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
try:
# Filter selected user
query = request.query_params.get('user')
if query is not None:
membership_ids = [i.id for i in Membership.objects.filter(user_id=query)]
queryset = queryset.filter(membership_id__in=membership_ids)
# Filter selected community
query = request.query_params.get('community')
if query is not None:
membership_ids = [i.id for i in Membership.objects.filter(community_id=query)]
queryset = queryset.filter(membership_id__in=membership_ids)
# Filter out current memberships
query = request.query_params.get('exclude_current_memberships')
if query is not None and eval(query):
queryset = queryset.exclude(end_datetime=None)
except ValueError:
queryset = None
queryset = filter_queryset(queryset, request, target_param='position', is_foreign_key=False)
queryset = filter_queryset(queryset, request, target_param='status', is_foreign_key=False)
queryset = limit_queryset(queryset, request)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class AdvisoryViewSet(viewsets.ModelViewSet):
''' Advisory view set '''
queryset = Advisory.objects.all()
serializer_class = AdvisorySerializer
http_method_names = ('get', 'post', 'delete', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (IsInPubliclyVisibleCommunity(),)
elif self.request.method in ('POST', 'DELETE'):
return (permissions.IsAuthenticated(), IsAbleToCreateAndDeleteAdvisory())
return tuple()
def list(self, request, *args, **kwargs):
''' List advisories '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = filter_queryset(queryset, request, target_param='advisor', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)
try:
query = request.query_params.get('is_active')
if query is not None:
query = eval(query)
active_ids = [i.id for i in queryset if i.start_date <= datetime.now().date() <= i.end_date]
if query:
queryset = queryset.filter(pk__in=active_ids)
else:
queryset = queryset.exclude(pk__in=active_ids)
except ValueError:
queryset = None
queryset = limit_queryset(queryset, request)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class ApprovalRequestViewSet(viewsets.ModelViewSet):
''' Approval request view set '''
queryset = ApprovalRequest.objects.all()
http_method_names = ('get', 'post', 'put', 'patch', 'delete', 'head', 'options')
def get_permissions(self):
''' Get permissions '''
if self.request.method == 'GET':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToRetrieveApprovalRequest())
elif self.request.method == 'POST':
return (permissions.IsAuthenticated(),)
elif self.request.method in ('PUT', 'PATCH'):
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateApprovalRequest())
elif self.request.method == 'DELETE':
return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToDeleteApprovalRequest())
return (permissions.IsAuthenticated(),)
def get_serializer_class(self):
''' Get serializer class '''
if self.request.method == 'POST':
return NotExistingApprovalRequestSerializer
return ExistingApprovalRequestSerializer
def list(self, request, *args, **kwargs):
''' List approval requests '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)
queryset = filter_queryset(queryset, request, target_param='status', is_foreign_key=False)
queryset = limit_queryset(queryset, request)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def update(self, request, *args, **kwargs):
''' Update approval request '''
serializer = self.get_serializer(self.get_object(), data=request.data, many=False)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
if obj.status == 'A':
if has_instance(obj.community, Club):
club = Club.objects.get(pk=obj.community.id)
club.is_official = True
today = datetime.now().date()
if IsRenewableClub().has_object_permission(request, None, club):
valid_through = datetime(today.year, CLUB_VALID_MONTH, CLUB_VALID_DAY).date()
if today >= valid_through - CLUB_ADVANCED_RENEWAL:
valid_through = datetime(today.year + 1, CLUB_VALID_MONTH, CLUB_VALID_DAY).date()
club.valid_through = valid_through
club.save()
elif has_instance(obj.community, Event) and not has_instance(obj.community, CommunityEvent):
event = Event.objects.get(pk=obj.community.id)
event.is_approved = True
event.save()
# Notification
if datetime.today().date() <= event.end_date:
notify(get_user_model().objects.all(), event)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET'])
def get_membership_default_labels(request):
''' Get membership default labels API '''
return Response([
{
'position': 3,
'labels': {
'club': _('President'),
'event': _('President'),
'community_event': _('Event Creator'),
'lab': _('Lab Supervisor')
},
'labels_th': {
'club': _('ประธาน'),
'event': _('ประธาน'),
'community_event': _('ผู้สร้างกิจกรรม'),
'lab': _('อาจารย์ผู้ดูแลห้องปฏิบัติการ')
}
},
{
'position': 2,
'labels': {
'club': _('Vice President'),
'event': _('Vice President'),
'community_event': _('Event Co-Creator'),
'lab': _('Lab Co-Supervisor')
},
'labels_th': {
'club': _('รองประธาน'),
'event': _('รองประธาน'),
'community_event': _('ผู้ร่วมสร้างกิจกรรม'),
'lab': _('อาจารย์ผู้ร่วมดูแลห้องปฏิบัติการ')
}
},
{
'position': 1,
'labels': {
'club': _('Staff'),
'event': _('Staff'),
'community_event': _('Staff'),
'lab': _('Lab Helper')
},
'labels_th': {
'club': _('ทีมงาน'),
'event': _('ทีมงาน'),
'community_event': _('ทีมงาน'),
'lab': _('ผู้ช่วยดูแลห้องปฏิบัติการ')
}
},
{
'position': 0,
'labels': {
'club': _('Member'),
'event': _('Participator'),
'community_event': _('Participator'),
'lab': _('Lab Member')
},
'labels_th': {
'club': _('สมาชิก'),
'event': _('ผู้เข้าร่วม'),
'community_event': _('ผู้เข้าร่วม'),
'lab': _('สมาชิกห้องปฏิบัติการ')
}
},
], status=status.HTTP_200_OK)
@api_view(['GET'])
def get_past_memberships(request, user_id):
''' Get past memberships of a certain user API '''
# Validate user
try:
get_user_model().objects.get(pk=user_id)
except get_user_model().DoesNotExist:
return Response({'message': _('User not found.')}, status=status.HTTP_404_NOT_FOUND)
# Filter past memberships of the user in active communities
memberships = Membership.objects.filter(
user_id=user_id, community_id__in=get_active_community_ids()
).exclude(status='A')
# Exclude non-publicly visible communities if the current user is unauthenticated
if not request.user.is_authenticated:
memberships = [i for i in memberships if i.community.is_publicly_visible]
# Retrieve all past logs excluding the latest one
membership_logs = MembershipLog.objects.filter(
membership_id__in=[i.id for i in memberships]
).exclude(end_datetime=None)
# Retrieve all related community IDs
community_ids = remove_duplicates([i.membership.community.id for i in membership_logs])
past_memberships = list()
# Retrieve data according to each community ID
for i in community_ids:
# Retrieve community type
_community = Community.objects.get(pk=i)
if has_instance(_community, Club):
_community_type = 'club'
elif has_instance(_community, Event) and not has_instance(_community, CommunityEvent):
_community_type = 'event'
elif has_instance(_community, CommunityEvent):
_community_type = 'community_event'
elif has_instance(_community, Lab):
_community_type = 'lab'
else:
_community_type = 'community'
# Filter membership logs of a certain community
_membership_logs = [j for j in membership_logs if j.membership.community.id == i]
# Retrieve position
_position = max([j.position for j in _membership_logs])
# Retrieve other data
past_memberships.append({
'community_id': i,
'community_type': _community_type,
'start_datetime': min([j.start_datetime for j in _membership_logs]),
'end_datetime': max([j.end_datetime for j in _membership_logs]),
'position': _position,
'position_start_datetime': min([j.start_datetime for j in _membership_logs if j.position == _position]),
'position_end_datetime': max([j.end_datetime for j in _membership_logs if j.position == _position])
})
return Response(past_memberships, status=status.HTTP_200_OK)
|
988,843 | 7017e121eb2bdbd50359befe17b1225b3bd699ae | import spacy
from collections import Counter, defaultdict
import multiprocessing as mp
import numpy as np
class WordCloud():
def __init__(self, docs, ratings):
"""Initialize word cloud
Arguments:
docs {list[str]} -- list of document strings
ratings {list[float]} -- list of ratings float
"""
self.docs = docs
self.ratings = ratings
self.disablelayers = ['parser', 'ner', 'textcat']
self.pos = ['NOUN', 'PROPN']
self.nlp = spacy.load('en', disable=self.disablelayers)
self.color_range = ['#c70039', '#c70039',
'#ffd800', '#6fb98f', '#2b580c']
def getCounter(self):
"""Generates Counter of words with their sentiment colors
Returns:
tuple -- (list of word count dicts, list of word color dicts)
"""
word_count, noun_word_count = Counter(), Counter()
word_rating, noun_word_rating = defaultdict(list), defaultdict(list)
docs = self.nlp.pipe(
self.docs, n_process=1, disable=self.disablelayers)
for index, doc in enumerate(docs):
for token in doc:
if not token.is_stop and not token.is_punct and token.pos_ in self.pos:
if token.pos_ == 'PROPN':
word_count[token.lemma_] += 1
word_rating[token.lemma_].append(self.ratings[index])
else:
noun_word_count[token.lemma_] += 1
noun_word_rating[token.lemma_].append(self.ratings[index])
# if 0<=proper nouns<=5 found, add regular nouns
if not word_count or len(word_count) <= 5:
word_count += noun_word_count
word_rating = {**word_rating, **noun_word_rating}
word_color = {word: self.getColor(
ratings)[1] for word, ratings in word_rating.items()}
word_sentiment = {word: self.getColor(
ratings)[0] for word, ratings in word_rating.items()}
return word_count, word_color, word_sentiment
def getColor(self, ratings):
"""Generates sentiment color from the mean of ratings in which the word exists
Arguments:
ratings {list[float]} -- list of ratings in which the word exists
Returns:
str -- hex color of rating
"""
float_rating = np.around(np.mean(ratings), 2)
mean_rating = int(float_rating)
return float_rating, self.color_range[mean_rating - 1]
|
988,844 | 9ccb650bc1c4e2ec52e0cdb87b89d51d940c54af | TOKEN = '998969136:AAFS4NvKyHBsTjvWycKPMK4B_77oJ_MAbVY'
SAPA = " 👨🔧 Halo Perkenalkan Nama Saya Softeng Bot" |
988,845 | 7688c65d8c1a85b92f4a53a6c5cca4a8f66e75cb | import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from app.models import User, Post, Location
from app import db
def sendpandas(filename):
varfile=filename
df = pd.read_excel('uploads/{0}'.format(varfile), sheetname='Sheet1')
print(df.columns)
print(df['Standort'][0])
location = Location(city=df['Standort'][0], gateway=df['Gateway'][0], network=df['Network'][0],technologie=df['Technologie'][0])
db.session.add(location)
db.session.commit()
|
988,846 | 2d3819a6299bf80de2ec604921bffbc643ca2be2 | # we need pycrypto(dome)
from Crypto.Cipher import AES
import base64
with open("7.txt") as f:
raw = base64.b64decode(f.read())
key = "YELLOW SUBMARINE".encode()
cipher = AES.new(key, AES.MODE_ECB)
print(cipher.decrypt(raw).decode()) |
988,847 | 5459abc84331019d81d484c6da8b292e683a3d07 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-24 18:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Facility',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('facility_type', models.CharField(blank=True, max_length=30)),
('address', models.CharField(blank=True, max_length=100)),
('city', models.CharField(blank=True, max_length=25)),
],
),
migrations.CreateModel(
name='Inspections',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inspection_type', models.CharField(max_length=50)),
('inspection_date', models.DateField()),
('critical_points', models.IntegerField()),
('total_points', models.IntegerField()),
('inspection_details', models.TextField()),
('facility', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inspections.Facility')),
],
),
migrations.AlterUniqueTogether(
name='facility',
unique_together=set([('name', 'facility_type', 'address', 'city')]),
),
]
|
988,848 | 383f28a31f412b5fa46ad27394d8dabbad8ea60a | #+
# Copyright 2015 iXsystems, Inc.
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################
from datetime import timedelta
import logging
import logging.handlers
import copy
LOGGING_FORMAT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s'
def first_or_default(f, iterable, default=None):
i = list(filter(f, iterable))
if i:
return i[0]
return default
def exclude(d, *keys):
return {k: v for k, v in list(d.items()) if k not in keys}
def include(d, *keys):
return {k: v for k, v in list(d.items()) if k in keys}
def extend(d, d2):
ret = copy.copy(d)
ret.update(d2)
return ret
def normalize(d, d2):
for k, v in list(d2.items()):
d.setdefault(k, v)
def force_none(v):
if not v:
return None
return v
def materialized_paths_to_tree(lst, separator='.'):
result = {'children': {}, 'path': []}
def add(parent, path):
if not path:
return
p = path.pop(0)
c = parent['children'].get(p)
if not c:
c = {'children': {}, 'path': parent['path'] + [p], 'label': p}
parent['children'][p] = c
add(c, path)
for i in lst:
path = i.split(separator)
add(result, path)
return result
def to_timedelta(time_val):
num = int(time_val[:-1])
if time_val.endswith('s'):
return timedelta(seconds=num)
elif time_val.endswith('m'):
return timedelta(minutes=num)
elif time_val.endswith('h'):
return timedelta(hours=num)
elif time_val.endswith('d'):
return timedelta(days=num)
elif time_val.endswith('y'):
return timedelta(days=(365 * num))
def configure_logging(path, level):
logging.basicConfig(
level=logging.getLevelName(level),
format=LOGGING_FORMAT,
)
if path:
handler = FaultTolerantLogHandler(path)
handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
logging.root.removeHandler(logging.root.handlers[0])
logging.root.addHandler(handler)
class FaultTolerantLogHandler(logging.handlers.WatchedFileHandler):
def emit(self, record):
try:
logging.handlers.WatchedFileHandler.emit(self, record)
except IOError:
pass
|
988,849 | 581891dda6da12af1e30f47361dc08cb27459307 | import random
import torch
from .functions import one_hot
class Vocabulary:
def __init__(self, text):
self.__chars = sorted(list(set(text)))
self.size = len(self.__chars)
# char to index and index to char maps
self.char2ix = {ch: i for i, ch in enumerate(self.__chars)}
self.ix2char = {i: ch for i, ch in enumerate(self.__chars)}
def __getitem__(self, item):
if isinstance(item, str):
return self.char2ix[item]
if isinstance(item, int):
return self.ix2char[item]
raise ValueError(f"Should be either integer or string. Got {type(item)}")
def string2tensor(self, s):
s = list(s)
for i, ch in enumerate(s):
s[i] = self.char2ix[ch]
return torch.tensor(s).unsqueeze(1)
def text2tensor(self, text, max_length=-1):
data_length = len(text) if max_length < 0 else min(len(text), max_length)
text = text[:data_length]
data = self.string2tensor(text)
return data
def string2one_hot(self, s):
return self.tensor2one_hot(self.string2tensor(s))
def tensor2one_hot(self, t):
return one_hot(t, self.size)
def tensor2string(self, t):
return ''.join([self.ix2char[i] for i in t.flatten().tolist()])
def random_character(self):
return random.choice(self.__chars)
|
988,850 | c27ac5572f9c82fd7528f589469c103acf83d662 | loop_variable = 0
while loop_variable != 1:
age = int(input("How old are you? "))
if age >= 18:
print("You are an adult.")
loop_variable = 1
elif age < 0:
print("Invalid Choice.")
else:
print("You are a child.")
loop_variable = 1
|
988,851 | 86a57a1833a653f1a01b1cc9a84bfb7fc4656d9e | a = ["x"]
b = ["y"]
start = [1]
k=0
pow = int(input("Enter power: "))
print("(x + y)^" +str(pow))
for i in range(pow):
#print("\n")
binomial = []
binomial.append(1)
for i in range(0,len(start)-1):
binomial.append(start[i]+start[i+1])
binomial.append(1)
start = binomial
for j in range(pow,-1,-1):
print(start[j],end=" * ")
print(*a,end="^")
print(j,end=" * ")
print(*b,end="^")
print(k,end=" + ")
print(end=" ")
k+=1
#prints the row of coefficients
|
988,852 | 3a9ccb5b7780e5788a90cfcf805b02c14ace08e8 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import donkey
INPUTS_SIZE = 2 + donkey.ANGLES_WINDOW
class Policy(nn.Module):
def __init__(self, config):
super(Policy, self).__init__()
self.hidden_size = config.get('hidden_size')
self.recurring_cell = config.get('recurring_cell')
self.config = config
if self.recurring_cell == "gru":
self.fc1 = nn.Linear(INPUTS_SIZE, self.hidden_size)
self.gru = nn.GRUCell(self.hidden_size, self.hidden_size)
else:
self.fc1_a = nn.Linear(INPUTS_SIZE, self.hidden_size)
self.fc1_v = nn.Linear(INPUTS_SIZE, self.hidden_size)
self.fc2_a = nn.Linear(self.hidden_size, self.hidden_size)
self.fc3_a = nn.Linear(self.hidden_size, 2 * donkey.CONTROL_SIZE)
self.fc2_v = nn.Linear(self.hidden_size, self.hidden_size)
self.fc3_v = nn.Linear(self.hidden_size, 1)
self.train()
nn.init.xavier_normal(self.fc2_a.weight.data, nn.init.calculate_gain('tanh'))
nn.init.xavier_normal(self.fc3_a.weight.data, nn.init.calculate_gain('tanh'))
self.fc2_a.bias.data.fill_(0)
self.fc3_a.bias.data.fill_(0)
nn.init.xavier_normal(self.fc2_v.weight.data, nn.init.calculate_gain('tanh'))
nn.init.xavier_normal(self.fc3_v.weight.data, nn.init.calculate_gain('linear'))
self.fc2_v.bias.data.fill_(0)
self.fc3_v.bias.data.fill_(0)
if self.recurring_cell == "gru":
nn.init.xavier_normal(self.fc1.weight.data, nn.init.calculate_gain('tanh'))
self.fc1.bias.data.fill_(0)
nn.init.xavier_normal(self.gru.weight_ih.data)
nn.init.xavier_normal(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
else:
nn.init.xavier_normal(self.fc1_a.weight.data, nn.init.calculate_gain('tanh'))
nn.init.xavier_normal(self.fc1_v.weight.data, nn.init.calculate_gain('tanh'))
self.fc1_a.bias.data.fill_(0)
self.fc1_v.bias.data.fill_(0)
def forward(self, inputs, hiddens, masks):
if self.recurring_cell == "gru":
x = F.tanh(self.fc1(inputs))
if inputs.size(0) == hiddens.size(0):
x = hiddens = self.gru(x, hiddens * masks)
else:
x = x.view(-1, hiddens.size(0), x.size(1))
masks = masks.view(-1, hiddens.size(0), 1)
outputs = []
for i in range(x.size(0)):
hx = hiddens = self.gru(x[i], hiddens * masks[i])
outputs.append(hx)
x = torch.cat(outputs, 0)
a = v = x = F.tanh(x)
else:
a = F.tanh(self.fc1_a(inputs))
v = F.tanh(self.fc1_v(inputs))
a = F.tanh(self.fc2_a(a))
a = F.tanh(self.fc3_a(a))
v = F.tanh(self.fc2_v(v))
v = self.fc3_v(v)
return v, a, None, hiddens
def input_shape(self):
return (INPUTS_SIZE,)
def input(self, observation):
track_angles = [o.track_angles for o in observation]
track_position = [[o.track_position] for o in observation]
track_linear_speed = [[o.track_linear_speed] for o in observation]
# progress = [[o.progress] for o in observation]
observation = np.concatenate(
(
np.stack(track_angles),
np.stack(track_position),
np.stack(track_linear_speed),
# np.stack(progress),
),
axis=-1,
)
observation = torch.from_numpy(observation).float()
return observation
def auxiliary_present(self):
return False
def auxiliary_shape(self):
return (donkey.ANGLES_WINDOW,)
def auxiliary(self, observation):
track_angles = [o.track_angles for o in observation]
auxiliary = np.concatenate(
(
np.stack(track_angles),
),
axis=-1,
)
auxiliary = torch.from_numpy(auxiliary).float()
return auxiliary
def action(self, inputs, hiddens, masks, deterministic=False):
value, x, auxiliary, hiddens = self(inputs, hiddens, masks)
slices = torch.split(x, donkey.CONTROL_SIZE, 1)
action_mean = slices[0]
action_logstd = slices[1]
action_std = action_logstd.exp()
m = Normal(action_mean, action_std)
if deterministic is False:
actions = m.sample()
else:
actions = action_mean
# log_probs (sum on actions) -> batch x 1
log_probs = m.log_prob(actions).sum(-1, keepdim=True)
# entropy (sum on actions / mean on batch) -> 1x1
entropy = 0.5 + 0.5 * math.log(2 * math.pi) + action_logstd
entropy = entropy.sum(-1, keepdim=True)
return value, actions, auxiliary, hiddens, log_probs, entropy
def evaluate(self, inputs, hiddens, masks, actions):
value, x, auxiliary, hiddens = self(inputs, hiddens, masks)
slices = torch.split(x, donkey.CONTROL_SIZE, 1)
action_mean = slices[0]
action_logstd = slices[1]
action_std = action_logstd.exp()
m = Normal(action_mean, action_std)
# log_probs (sum on actions) -> batch x 1
log_probs = m.log_prob(actions).sum(-1, keepdim=True)
# entropy (sum on actions / mean on batch) -> 1x1
entropy = 0.5 + 0.5 * math.log(2 * math.pi) + action_logstd
entropy = entropy.sum(-1, keepdim=True)
return value, auxiliary, hiddens, log_probs, entropy
|
988,853 | c3a239437f1b2b7944f10bb6aa7a971735c324b3 | from .utils import *
from .api_resource import APIResource
|
988,854 | f9f8339954b62934308fbe79dc85786bf145f603 | from django.urls import path
from . import views
from django.contrib.auth.views import LoginView
urlpatterns = [
path('alumniLogin/', LoginView.as_view(), name="login_"),
path('', views.home_view, name="home"),
path('Login/Signup/', views.register),
path('alumniDetails/', views.info_input),
path('dashboard/', views.dashboard_view),
path('profile/', views.profile_view, name="profile"),
path('profile/edit/', views.info_edit),
path('logout/', views.logout, name="logout")
] |
988,855 | eb69b0ab455868a8d82ee72f934fea69202a78a1 | import pathlib
import ecole
SOURCE_DIR = pathlib.Path(__file__).parent.parent.parent.resolve()
DATA_DIR = SOURCE_DIR / "vendor/ecole/libecole/tests/data"
def get_model():
"""Return a Model object with a valid problem."""
model = ecole.scip.Model.from_file(str(DATA_DIR / "bppc8-02.mps"))
model.disable_cuts()
model.disable_presolve()
model.set_param("randomization/permuteconss", True)
model.set_param("randomization/permutevars", True)
model.set_param("randomization/permutationseed", 784)
model.set_param("randomization/randomseedshift", 784)
model.set_param("randomization/lpseed", 784)
return model
|
988,856 | ce192056fb2ebb67d1e6af96db8de33ae8aa194b | from PyObjCTools.TestSupport import *
from WebKit import *
class TestWKOpenPanelParameters (TestCase):
@onlyOn64Bit
@min_os_level('10.12')
def testMethods10_12(self):
self.assertResultIsBOOL(WKOpenPanelParameters.allowsMultipleSelection)
@onlyOn64Bit
@min_os_level('10.13.4')
def testMethods10_13_4(self):
self.assertResultIsBOOL(WKOpenPanelParameters.allowsDirectories)
if __name__ == "__main__":
main()
|
988,857 | e460c00d0fdcfcd6c0a519da34024a3f22beae3b | __author__ = 'ProvanAlex'
import sys
import os
import configparser
import pygame as pg
from pygame.locals import *
screensize = (800, 800)
directdict = {pg.K_LEFT: (-1, 0),
pg.K_RIGHT: (1, 0),
pg.K_UP: (0, -1),
pg.K_DOWN: (0, 1)}
class Player(object):
def __init__(self, rect, speed, image):
self.rect = pg.Rect(rect)
self.speed = speed
self.image = self.make_image(image)
def make_image(self, image):
image = pg.image.load(image)
image = pg.transform.scale(image, (25, 25))
return image
def update(self, screen_rect, keys):
for key in directdict:
if keys[key]:
self.rect.x += directdict[key][0] * self.speed
self.rect.y += directdict[key][1] * self.speed
self.rect.clamp(screen_rect)
def draw(self, surface):
surface.blit(self.image, self.rect)
class Tiles(object):
def __init__(self, imagefile, width, height):
image = pg.image.load(imagefile)
image_width, image_height = image.get_size()
self.image_width = image_width
self.image_height = image_height
self.image = image
self.width = width
self.height = height
def load_tile_table(self):
image = self.image
tile_table = []
for tile_x in range(0, self.image_width / self.width):
line = []
tile_table.append(line)
for tile_y in range(0, self.image_height / self.height):
rect = (tile_x * self.width, tile_y * self.height, self.width, self.height)
line.append(image.subsurface(rect))
return tile_table
class Gamemap(object):
def __init__(self, filename, mapname):
self.map = {}
self.key = []
self.width = width
self.height = height
def mapcreate(self, filename, mapname):
mapx = []
gamemap = []
mapxy = []
parser = configparser.ConfigParser()
parser.read(filename)
for section in parser.sections():
if len(section) == 1:
desc = dict(parser.items(section))
self.desc = desc
else:
leveldescriptiondict = dict(parser.items(section))
self.leveldescriptiondict = leveldescriptiondict
mapfile = open(mapname, 'r')
for line in mapfile:
gamemap.append(line)
mapy = dict(enumerate(gamemap)).keys()
for x in gamemap[0]:
if type(gamemap[0]) is str:
mapx.append(gamemap[0].index(x))
else:
print("this map is not a valid map")
mapxy = zip(mapx, mapy)
def get_tile(self, x, y):
try:
char = self.map[y][x]
except IndexError:
return {}
try:
return self.key[char]
except KeyError:
return {}
class Control(object):
def __init__(self):
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.display.set_caption("move me with the arrow keys")
self.screen = pg.display.set_mode(screensize)
self.screen_rect = self.screen.get_rect()
self.clock = pg.time.Clock()
self.fps = 60
self.done = False
self.keys = pg.key.get_pressed()
self.player = Player((0, 0, 100, 100), 5, "character.png")
def event_loop(self):
for event in pg.event.get():
self.keys = pg.key.get_pressed()
if event.type == pg.QUIT or self.keys[K_ESCAPE]:
self.done = True
def main_loop(self):
while not self.done:
self.event_loop()
self.player.update(self.screen_rect, self.keys)
self.screen.fill((0, 0, 0))
self.player.draw(self.screen)
pg.display.update()
self.clock.tick(self.fps)
if __name__ == "__main__":
run_it = Control()
run_it.main_loop()
pg.quit()
sys.exit()
|
988,858 | fd62795a330d7d13f4459ae8d693c4d5279fe4c4 | import numpy as np
import Ajua_env
env = Ajua_env.AJUA()
done = False
steps = 0
agent_1 = 0
agent_2 = 0
while not done:
steps += 1
action = np.random.randint(0, 18)
state, reward, done, _ = env.step(action)
if env.current_player == 9:
agent_2 += reward
else:
agent_1 += reward
print("\nAction: ",action, "Player: ", env.current_player, "Reward: ", reward)
env.render()
print(agent_2, agent_1) |
988,859 | bd935188f5d8dc00d185ec3ebd5eb2a8a05744a4 | import os
ENCODING = "utf-8" # кодировка
MSG_SIZE = 1024 # размер сообщения
WORKERS = 5
DEFAULT_SERVER_IP = ""
DEFAULT_PORT = 7777
# DEFAULT_SERVER_IP = "0.0.0.0"
# DEFAULT_PORT = 7777
# База данных для хранения данных сервера:
SERVER_DATABASE = "sqlite:///server/db/server_db.sqlite3"
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROGRAM = "server messenger"
VERSION = "0.0.8post0"
INSTALLED_MODULES = (
"echo",
"auth",
# "contact",
)
|
988,860 | a62f2235aa81b84b2656a065e179eb158155a175 |
import open_fortran_parser
from path import Path
import collections.abc
import pathlib
import re
import textwrap
import typing as t
import itertools
from pycropml.transpiler.pseudo_tree import Node
import logging
import typed_ast
import typed_ast.ast3 as ty
import ast
import xml.etree.ElementTree as ET
import horast
import horast.nodes as horast_nodes
def make_call_from_slice(slice_):
"""Transform code like '0:n:2' into 'slice(0, n, 2)'."""
assert isinstance(slice_, ast.Slice), type(slice_)
lower, upper, step = slice_.lower, slice_.upper, slice_.step
if lower is None and upper is None and step is None:
args = []
elif lower is not None and upper is None and step is None:
args = [lower, ast.NameConstant(None)]
elif lower is None and upper is not None and step is None:
args = [ast.NameConstant(None), upper]
elif lower is not None and upper is not None and step is None:
args = [lower, upper]
elif lower is not None and upper is None and step is not None:
args = [lower, ast.NameConstant(None), step]
elif lower is not None and upper is not None and step is not None:
args = [lower, upper, step]
else:
raise NotImplementedError('unsupported slice form: "{}"'.format(ast.dump(slice_)))
return ast.Call(ast.Name('slice', ast.Load()), args, [])
def make_expression_from_slice(slice_):
"""Transform code like '0:n:2' into a valid expression that is as simple as possible."""
assert isinstance(slice_, (
ast.Index, ast.Slice, ast.ExtSlice)), type(slice_)
if isinstance(slice_, ast.Index):
return slice_.value
if isinstance(slice_, ast.Slice):
lower, upper, step = slice_.lower, slice_.upper, slice_.step
if lower is None and upper is not None and step is None:
return upper
return make_call_from_slice(slice_)
assert isinstance(slice_, ast.ExtSlice)
elts = [make_expression_from_slice(dim) for dim in slice_.dims]
return ast.Tuple(elts=elts, ctx=ast.Load())
def make_st_ndarray(data_type,dimensions_or_sizes):
"""Create a typed_ast node equivalent to: st.ndarray[dimensions, data_type, sizes]."""
if isinstance(dimensions_or_sizes, int):
dimensions = dimensions_or_sizes
sizes = None
else:
dimensions = len(dimensions_or_sizes)
sizes = [make_expression_from_slice(size) for size in dimensions_or_sizes]
return ast.Subscript(
value=ast.Attribute(
value=ast.Name(id='st', ctx=ast.Load()),
attr='ndarray', ctx=ast.Load()),
slice=ast.Index(value=ast.Tuple(
elts=[ast.Num(n=dimensions), data_type] + [
ast.Tuple(elts=sizes, ctx=ast.Load())] if sizes else [],
ctx=ast.Load())),
ctx=ast.Load())
def make_numpy_constructor(function, arg, data_type):
return ast.Call(
func=ast.Attribute(
value=ast.Name(id='np', ctx=ast.Load()),
attr=function, ctx=ast.Load()),
args=[arg],
keywords=[ast.keyword(arg='dtype', value=data_type)])
FORTRAN_PYTHON_TYPE_PAIRS = {
('logical', None): 'bool',
('integer', None): 'int',
('real', None): 'float',
('character', t.Any): 'str',
('integer', 1): 'np.int8',
('integer', 2): 'np.int16',
('integer', 4): 'np.int32',
('integer', 8): 'np.int64',
('real', 2): 'np.float16',
('real', 4): 'np.float32',
('real', 8): 'np.float64'}
FORTRAN_PYTHON_OPERATORS = {
# binary
'+': (ast.BinOp, ast.Add),
'-': (ast.BinOp, ast.Sub),
'*': (ast.BinOp, ast.Mult),
# missing: MatMult
'/': (ast.BinOp, ast.Div),
'%': (ast.BinOp, ast.Mod),
'**': (ast.BinOp, ast.Pow),
'//': (ast.BinOp, ast.Add), # concatenation operator, only in Fortran
# LShift
# RShift
# BitOr
# BitXor
# BitAnd
# missing: FloorDiv
'.eq.': (ast.Compare, ast.Eq),
'==': (ast.Compare, ast.Eq),
'.ne.': (ast.Compare, ast.NotEq),
'/=': (ast.Compare, ast.NotEq),
'.lt.': (ast.Compare, ast.Lt),
'<': (ast.Compare, ast.Lt),
'.le.': (ast.Compare, ast.LtE),
'<=': (ast.Compare, ast.LtE),
'.gt.': (ast.Compare, ast.Gt),
'>': (ast.Compare, ast.Gt),
'.ge.': (ast.Compare, ast.GtE),
'>=': (ast.Compare, ast.GtE),
# Is
# IsNot
# In
# NotIn
'.and.': (ast.BoolOp, ast.And),
'.or.': (ast.BoolOp, ast.Or),
# unary
# '+': (ast.UnaryOp, ast.UAdd),
# '-': (ast.UnaryOp, ast.USub),
'.not.': (ast.UnaryOp, ast.Not),
# Invert: (ast.UnaryOp, ast.Invert)
}
INTRINSICS_FORTRAN_TO_PYTHON = {
# Fortran 77
'abs': 'abs', # or np.absolute
'acos': ('numpy', 'arccos'),
'aimag': None,
'aint': None,
'anint': None,
'asin': ('numpy', 'arcsin'),
'atan': ('numpy', 'arctan'),
'atan2': None,
'char': None,
'cmplx': None,
'conjg': ('numpy', 'conj'),
'cos': ('numpy', 'cos'),
'cosh': None,
'dble': 'float', # incorrect
'dim': None,
'dprod': None,
'exp': None,
'ichar': None,
'index': None,
'int': 'int',
'len': None,
'lge': None,
'lgt': None,
'lle': None,
'llt': None,
'log': None,
'log10': None,
'max': ('numpy', 'maximum'),
'min': ('numpy', 'minimum'),
'mod': None,
'nint': None,
'real': 'float',
'sign': ('numpy', 'sign'),
'sin': ('numpy', 'sin'),
'sinh': ('numpy', 'sinh'),
'sqrt': ('numpy', 'sqrt'),
'tan': ('numpy', 'tan'),
'tanh': ('numpy', 'tanh'),
# non-standard Fortran 77
'getenv': ('os', 'environ'),
# Fortran 90
# Character string functions
'achar': None,
'adjustl': None,
'adjustr': None,
'iachar': None,
'len_trim': None,
'repeat': None,
'scan': None,
'trim': ('str', 'rstrip'),
'verify': None,
# Logical function
'logical': None,
# Numerical inquiry functions
'digits': None,
'epsilon': ('numpy', 'finfo', 'eps'),
'huge': ('numpy', 'finfo', 'max'),
'maxexponent': None,
'minexponent': None,
'precision': None,
'radix': None,
'range': None,
'tiny': ('numpy', 'finfo', 'tiny'), # np.finfo(np.double).tiny ,
# Bit inquiry function
'bit_size': None,
# Vector- and matrix-multiplication functions
'dot_product': ('numpy', 'dot'),
'matmul': None,
# Array functions
'all': None,
'any': None,
'count': ('ndarray', 'count'),
'maxval': None,
'minval': None,
'product': None,
'sum': 'sum',
# Array location functions
'maxloc': ('numpy', 'argmax'),
'minloc': ('numpy', 'argmin'),
# Fortran 95
'cpu_time': None,
'present': 'is_not_none', # TODO: TMP
'set_exponent': None,
# Fortran 2003
# Fortran 2008
}
def separate_args_and_keywords(args_and_keywords):
args = []
keywords = []
for arg in args_and_keywords:
if isinstance(arg, ast.keyword):
keywords.append(arg)
else:
args.append(arg)
assert all(not isinstance(_, ast.keyword) for _ in args), args
return args, keywords
_LOG = logging.getLogger(__name__)
_LANG_NAME_UNRECOGNIZED_MSG = 'language name "{}" is not recognized'
class ContinueIteration(StopIteration):
"""Allows for "continue" keyword within a function called from within a loop."""
pass
class Registry:
"""General-purpose registry of objects."""
registered = None
@classmethod
def register(cls, member, keys) :
if cls.registered is None:
cls.registered = {}
for key in keys: cls.registered.set(key,member)
@classmethod
def find(cls, key) :
if cls.registered is None:
return None
return cls.registered.get(key, None)
class Language(Registry):
"""Properties of a programming language."""
def __init__(self, names, file_extensions, version):
"""Initialize a Language instance.
:param names: list of names of the language
:param file_extensions: file extensions, including the dot
"""
assert isinstance(names, collections.abc.Sequence), type(names)
assert names
assert isinstance(file_extensions, collections.abc.Sequence), type(file_extensions)
assert file_extensions
if __debug__:
for name in names:
assert isinstance(name, str), type(name)
assert name
for file_extension in file_extensions:
assert isinstance(file_extension, str), type(file_extension)
assert file_extension
assert file_extension.startswith('.'), file_extension
assert isinstance(version, tuple) or version is None
self.names = [name for name in names]
self.default_name = self.names[0]
self.file_extensions = [file_extension.lower() for file_extension in file_extensions]
self.default_file_extension = self.file_extensions[0]
self.version = version
@property
def lowercase_name(self):
return self.default_name.lower()
def has_name(self, name: str):
assert isinstance(name, str), type(name)
return name in self.names
def has_extension(self, file_extension: str):
assert isinstance(file_extension, str), type(file_extension)
return file_extension.lower() in self.file_extensions
def has_extension_of(self, path: pathlib.Path):
assert isinstance(path, pathlib.Path), type(path)
_, file_extension = path.splitext(path)
return self.has_extension(file_extension)
def __repr__(self):
return '<{} language object>'.format(self.default_name)
def validate_indentation(code: str, path: pathlib.Path = None):
"""Raise error if code isn't consistently indented (either only with spaces, or only with tabs).
Path is optional and used only for diagnostic purposes (i.e. if error happens).
"""
if not isinstance(code, str):
raise TypeError('code must be string but {} given'.format(type(code)))
assert path is None or isinstance(path, pathlib.Path), type(path)
lines = code.splitlines(keepends=True)
whitespace = r'[ \t]*'
mixed_indent = r'( {0}\t{0})|(\t{0} {0})'.format(whitespace)
indent_by_spaces = r'[ ]+'
indent_by_tabs = r'[\t]+'
indented_with_spaces = None # type: t.Optional[bool]
for i, line in enumerate(lines):
# check if indentation is not mixed
if re.match(mixed_indent, line) is not None:
raise ValueError('{}:{} mixed indentation found in {}'.format(
'<string>' if path is None else path, i, repr(line)))
# check if indentation type is consistent
if indented_with_spaces is None:
if re.match(indent_by_spaces, line) is not None:
indented_with_spaces = True
elif re.match(indent_by_tabs, line) is not None:
indented_with_spaces = False
elif indented_with_spaces:
if re.match(indent_by_tabs, line) is not None:
raise ValueError(
'{}:{} after space indent in previous lines, tab indent found in {}'
.format('<string>' if path is None else path, i, repr(line)))
else:
if re.match(indent_by_spaces, line) is not None:
raise ValueError(
'{}:{} after tab indent in previous lines, space indent found in {}'
.format('<string>' if path is None else path, i, repr(line)))
class Parser(Registry):
"""Extract abstract representation of syntax from the source code."""
def __init__(self, default_scopes: t.Sequence[t.Tuple[int, t.Optional[int]]] = None):
"""Initialize new Parser instance.
Default scopes, if provided, limit parsing to the given line sections unless the default
is overriden.
"""
if default_scopes is None:
default_scopes = [(0, None)]
self.default_scopes = default_scopes
def parse(self, code: str, path: pathlib.Path = None,
scopes: t.Sequence[t.Tuple[int, t.Optional[int]]] = None, dedent: bool = True):
"""Parse given code into a language-specific AST.
If path is provided, use it to guide the parser if necessary, as well as for diagnostics.
"""
assert isinstance(code, str), type(code)
assert path is None or isinstance(path, pathlib.Path), type(path)
assert scopes is None or isinstance(scopes, collections.abc.Sequence), type(scopes)
if scopes is None:
scopes = self.default_scopes
parsed_scopes = []
for begin, end in scopes:
assert isinstance(begin, int), type(begin)
assert end is None or isinstance(end, int), type(end)
if begin == 0 and end is None:
code_scope = code
else:
lines = code.splitlines(keepends=True)
if end is None:
end = len(lines)
code_scope = ''.join(lines[begin:end])
validate_indentation(code_scope, path)
if dedent:
code_scope = textwrap.dedent(code_scope)
parsed_scope = self._parse_scope(code_scope, path)
parsed_scopes.append(parsed_scope)
if len(scopes) == 1:
return parsed_scopes[0]
return self._join_scopes(parsed_scopes)
def _parse_scope(self, code: str, path: pathlib.Path = None):
raise NotImplementedError('{} is abstract'.format(type(self).__name__))
def _join_scopes(self, parsed_scopes):
raise NotImplementedError('{} cannot join multiple parsed scopes'
.format(type(self).__name__))
import pathlib
import xml.etree.ElementTree as ET
class FortranParser(Parser):
def _parse_scope(self, code: str, path: pathlib.Path = None) -> ET.Element:
assert path is not None, path
return open_fortran_parser.parse(path, verbosity=100, raise_on_error=True)
class XmlAST():
def __init__(self):
self._import_statements = dict()
self._transforms = [f for f in dir(self) if f.startswith('_') and not f.startswith('__')]
def get_one(self, node, xpath):
found = node.find(xpath)
if found is None:
raise SyntaxError('no "{}" found in "{}":\n{}'
.format(xpath, node.tag, ET.tostring(node).decode().rstrip()))
return found
def getAll(self, node, xpath, result=True):
found = node.findall(xpath)
if result and not found:
raise SyntaxError('no "{}" found in "{}":\n{}'
.format(xpath, node.tag, ET.tostring(node).decode().rstrip()))
return found
def generalize(self, syntax):
return self.transform_one(syntax)
def transform_one(self, node, warn=False, ignored = None, parent = None):
assert isinstance(node, ET.Element), type(node)
transform_name = '_{}'.format(node.tag.replace('-', '_'))
if transform_name not in self._transforms:
if ignored and node.tag in ignored:
raise ContinueIteration()
if warn:
if parent is None:
_LOG.warning('no transformer available for node "%s"', node.tag)
else:
_LOG.warning('no transformer available for node "%s", a subnode of "%s"',
node.tag, parent.tag)
_LOG.debug('%s', ET.tostring(node).decode().rstrip())
raise ContinueIteration()
if parent is None:
raise NotImplementedError('no transformer available for node "{}":\n{}'
.format(node.tag, ET.tostring(node).decode().rstrip()))
else:
raise NotImplementedError(
'no transformer available for node "{}", a subnode of "{}":\n{}'
.format(node.tag, parent.tag, ET.tostring(node).decode().rstrip()))
if ignored and node.tag in ignored:
_LOG.info('ignoring existing transformer for %s', node.tag)
raise ContinueIteration()
_transform = getattr(self, transform_name)
transformed = _transform(node)
return transformed
def transform_all(self, nodes, warn=False, skip_empty=False, ignored=False, parent = None):
assert isinstance(nodes, (ET.Element, collections.abc.Iterable)), type(nodes)
transformed = []
for node in nodes:
try:
assert isinstance(node, ET.Element), type(node)
if skip_empty and not node.attrib and len(node) == 0:
continue
transformed.append(self.transform_one(node, warn, ignored, parent))
except ContinueIteration:
continue
return transformed
def transform_all_subnodes(self, node, warn= False, skip_empty= False, ignored = None):
"""Transform all subnodes of a given node."""
assert isinstance(node, ET.Element), type(node)
return self.transform_all(node, warn, skip_empty, ignored, node)
@property
def import_statements(self):
return list(itertools.chain(*[statements
for _, statements in self._import_statements.items()]))
def ensure_import(self, canonical_name: str, alias: t.Optional[str] = None):
if (canonical_name, alias) not in self._import_statements:
if canonical_name in ('mpif.h', '?'): # TODO: other ways to include MPI?
self.ensure_mpi(canonical_name, alias)
else:
self._import_statements[canonical_name, alias] = {"type":"Import",
"module":canonical_name, "asname":alias}
def ensure_mpi(self, canonical_name, alias):
# if ('mpi4py', None) not in self._import_statements:
self._import_statements[canonical_name, alias] = {"type" : "importfrom", "namespace":'mpi4py', "name":'MPI', "asname":None}
def no_transform(self, node):
raise NotImplementedError(
'not implemented handling of:\n{}'.format(ET.tostring(node).decode().rstrip()))
class XmlASTFortran(XmlAST):
"""Transform Fortran AST in XML format into typed CyML AST.
The Fortran AST in XML format is provided by XML output generator for Open Fortran Parser.
"""
def __init__(self, split_declarations=True):
super().__init__()
self._split_declarations = split_declarations
self._now_parsing_file = False
def _file(self, node):
if not self._now_parsing_file:
self._now_parsing_file = True
body = self.transform_all_subnodes(node, ignored={'start-of-file', 'end-of-file'})
self._now_parsing_file = False
body = [self._import_statements] + body
else:
return {"type": 'ExprStatNode', "expr": {"type" : 'custom_call',"function" : "print",
"args":'file',"path": node.attrib['path']}}
return {"type":"top_level", "body":body}
def _module(self, node):
body = self.transform_all_subnodes(self.get_one(node, './body'), ignored={'statement','module-subprogram'})
members_node = node.find('./members')
conditional = body
if members_node is None:
return conditional
members = self.transform_all_subnodes(members_node, ignored ={'module-subprogram', 'module-subprogram-part',"attr-spec" })
if not members:
members = []
clsdef = {"type":node.attrib['name'],"body":members, "decorator_list":[]}
return {"type" : "start", "cond" : conditional, "defi" : clsdef}
def _specification(self, node: ET.Element):
print("node", node)
declarations = self.transform_all_subnodes(node, skip_empty=True, ignored={
'declaration-construct', 'specification-part',"attr-spec" })
print('de', declarations)
return {"type":'decl', "args":declarations}
def _declaration(self, node) :
declaration_type = node.attrib.get('type', None)
if declaration_type is None:
return
elif declaration_type == 'implicit':
return self._declaration_implicit(node)
elif declaration_type =='variable':
return {"type":"declaration", "decl":self._declaration_variable(node)}
elif declaration_type == 'parameter':
return self._declaration_parameter(node)
details = self.transform_all_subnodes(node, ignored={'attr-spec'})
return details if details else []
def _declaration_variable(self, node: ET.Element):
"""Reorganize data from multi-variable declaration into sequence of annotated assignments."""
# variable names
variables_and_values = self.transform_all_subnodes(
self.get_one(node, './variables'), skip_empty=True,
ignored={'entity-decl-list__begin', 'entity-decl-list','attr-spec' })
if not variables_and_values:
_LOG.error('%s', ET.tostring(node).decode().rstrip())
raise SyntaxError('at least one variable expected in variables list')
variables = [var for var, _ in variables_and_values]
# base type of variables
base_type = self.transform_one(self.get_one(node, './type'))
# dimensionality information (only for array types)
dimensions_node = node.find('./dimensions')
variable_dimensions = [getattr(var, 'fortran_metadata', {}).get('dimensions', None)
for var in variables]
has_variable_dimensions = any([_ is not None for _ in variable_dimensions])
if has_variable_dimensions and not self._split_declarations:
raise NotImplementedError('inline dimensions not implemented yet')
if dimensions_node is not None and has_variable_dimensions:
raise SyntaxError(
'declaration dimension data as well as per-variable dimension data present')
if dimensions_node is not None:
dimensions = self.transform_one(dimensions_node)
assert len(dimensions) >= 1
self.ensure_import('static_typing', 'st')
annotation = make_st_ndarray(base_type, dimensions)
annotations = [annotation for _ in variables]
elif has_variable_dimensions:
self.ensure_import('static_typing', 'st')
annotations = [base_type if _ is None else make_st_ndarray(base_type, _)
for _ in variable_dimensions]
else:
annotations = [base_type for _ in variables]
# initial values
if dimensions_node is not None:
values = [None if val is None else make_numpy_constructor('array', val, base_type)
for _, val in variables_and_values]
elif has_variable_dimensions:
assert len(variables_and_values) == len(variable_dimensions)
values = [None if val is None
else (val if dim is None else make_numpy_constructor('array', val, base_type))
for (_, val), dim in zip(variables_and_values, variable_dimensions)]
else:
values = [val for _, val in variables_and_values]
metadata = {'is_declaration': True}
intent_node = node.find('./intent')
if intent_node is not None:
metadata['intent'] = intent_node.attrib['type']
attributes = ('allocatable', 'asynchronous', 'external', 'intrinsic', 'optional',
'parameter', 'pointer', 'protected', 'save', 'target', 'value', 'volatile')
for attribute in attributes:
if node.find('./attribute-{}'.format(attribute)) is not None:
metadata['is_{}'.format(attribute)] = True
if metadata:
metadata_node = horast_nodes.Comment(
value=ast.Str(' Fortran metadata: {}'.format(repr(metadata))), eol=False)
_handled = {'variables', 'type', 'dimensions', 'intent'}
extra_results = self.transform_all_subnodes(node, ignored={
'type-declaration-stmt'} | _handled | {'attribute-{}'.format(_) for _ in attributes})
if extra_results:
_LOG.warning('ignoring additional information in the declaration:\n%s', extra_results)
if not self._split_declarations:
raise NotImplementedError()
assignments = [{"name":var, "type":ann, "value":val}
for var, ann, val in zip(variables, annotations, values)]
if metadata:
new_assignments = []
for assignment in assignments:
assignment.update({"metadata":metadata})
new_assignments.append(assignment)
new_assignments.append(metadata_node)
assignments = new_assignments
return assignments
def _attr_spec(self, node):
pass
def _declaration_parameter(self, node):
return
def _comment(self, node: ET.Element) :
comment = node.attrib['text']
if not comment or comment[0] not in ('!', 'c', 'C'):
raise SyntaxError('comment token {} has unexpected prefix'.format(repr(comment)))
comment = comment[1:]
return [comment]
def _statement(self, node):
details = self.transform_all_subnodes(node, ignored={
'action-stmt', 'executable-construct', 'execution-part-construct',
'do-term-action-stmt', # until do loop parsing implementation is changed
'execution-part'})
return [
detail if isinstance(detail, (
ast.Return, ast.Delete, ast.Assign, ast.AugAssign,
ast.AnnAssign, ast.For, ast.While,
ast.If, ast.With,
ast.Assert,
ast.Expr, ast.Pass, ast.Break,
ast.Continue))
else detail
for detail in details]
def _declaration_implicit(self, node):
subtype = node.attrib['subtype'].lower()
if subtype == 'none':
annotation = {"type":"implicit", "value":None}
implicit = annotation
#implicit.fortran_metadata = {'is_declaration': True}
return implicit
def _assignment(self, node):
target = self.transform_all_subnodes(self.get_one(node, './target'))
value = self.transform_all_subnodes(self.get_one(node, './value'))
if len(target) != 1:
raise SyntaxError(
'exactly 1 target expected but {} given {} in:\n{}'
.format(len(target), target, ET.tostring(node).decode().rstrip()))
target = target[0]
#assert isinstance(target, ast.AST)
if len(value) != 1:
raise SyntaxError(
'exactly 1 value expected but {} given {} in:\n{}'
.format(len(value), value, ET.tostring(node).decode().rstrip()))
value = value[0]
#assert isinstance(value, ast.AST)
return {"type": "assignment", "target":target, "value":value}
_intrinsics_converters = {}
def _subscripts(self, node: ET.Element, postprocess: bool = True):
subscripts = self.transform_all_subnodes(node, ignored={
'section-subscript-list__begin', 'section-subscript-list'})
assert len(subscripts) == int(node.attrib['count'])
if not postprocess:
return subscripts
if any(isinstance(_, ast.Slice) for _ in subscripts):
if len(subscripts) == 1:
return subscripts[0]
return ast.ExtSlice(dims=[
(_ if isinstance(_, (ast.Index, ast.Slice))
else ast.Index(value=_)) for _ in subscripts])
assert all(not isinstance(_, (ast.Index, ast.Slice, ast.ExtSlice))
for _ in subscripts), subscripts
if len(subscripts) == 1:
return ast.Index(value=subscripts[0])
return ast.Index(value=ast.Tuple(elts=subscripts, ctx=ast.Load()))
def _subscript(self, node: ET.Element):
subscripts = self.transform_all_subnodes(node, ignored={'section-subscript'})
if not subscripts:
assert node.attrib['type'] == 'empty'
return ast.Slice(lower=None, upper=None, step=None)
if len(subscripts) != 1:
self.no_transform(node)
assert node.attrib['type'] in ('simple', 'range')
return subscripts[0]
def _variable(self, node: ET.Element):
value_node = node.find('./initial-value')
value = None
if value_node is not None:
values = self.transform_all_subnodes(value_node, ignored={'initialization'})
assert len(values) == 1, values
value = values[0]
variable = node.attrib['name']
metadata = {}
dimensions_node = node.find('./dimensions')
if dimensions_node is not None:
metadata['dimensions'] = self.transform_one(dimensions_node)
if metadata:
variable.fortran_metadata = metadata
return variable, value
def _name(self, node: ET.Element):
name_str = node.attrib['id']
name = name_str
name_str = name_str.lower()
name_type = node.attrib['type'] if 'type' in node.attrib else None
is_intrinsic = name_str in self._intrinsics_converters
subscripts_node = node.find('./subscripts')
try:
args = self._subscripts(subscripts_node, postprocess=False) if subscripts_node else []
args, keywords = separate_args_and_keywords(args)
call = ast.Call(func=name, args=args, keywords=keywords)
if is_intrinsic:
if subscripts_node is None:
_LOG.warning('found intrinsic name "%s" without any subscripts', name_str)
else:
name_type = 'function'
call = self._intrinsics_converters[name_str](self, call)
except SyntaxError:
_LOG.info('transforming name to call failed as below (continuing despite that)',
exc_info=True)
slice_ = self._subscripts(subscripts_node) if subscripts_node else None
subscript = ast.Subscript(value=name, slice=slice_, ctx=ast.Load())
if name_type in ('procedure', 'function'):
return call
elif not subscripts_node:
return {"type":"local", "name":name, "pseudo_type":name_type}
elif name_type in ('variable',):
return {"type":"local", "name":subscript, "pseudo_type":name_type}
elif not slice_:
return call
elif name_type in ('ambiguous',):
return subscript
elif name_type is not None:
raise NotImplementedError('unrecognized name type "{}" in:\n{}'
.format(name_type, ET.tostring(node).decode().rstrip()))
elif name_type is None:
raise NotImplementedError('no name type in:\n{}'
.format(ET.tostring(node).decode().rstrip()))
raise NotImplementedError('not implemented handling of:\n{}'
.format(ET.tostring(node).decode().rstrip()))
def _value(self, node: ET.Element):
values = self.transform_all_subnodes(node)
assert len(values) == 1, values
return values[0]
def _operation(self, node: ET.Element):
if node.attrib['type'] == 'multiary':
return self._operation_multiary(node)
if node.attrib['type'] == 'unary':
return self._operation_unary(node)
raise NotImplementedError('not implemented handling of:\n{}'
.format(ET.tostring(node).decode().rstrip()))
def _operation_multiary(
self, node: ET.Element):
operators_and_operands = self.transform_all_subnodes(node, skip_empty=True, ignored={
'add-operand', 'mult-operand', 'power-operand', 'and-operand', 'or-operand',
'parenthesized_expr', 'primary', 'level-2-expr', 'level-3-expr'})
assert isinstance(operators_and_operands, list), operators_and_operands
assert len(operators_and_operands) % 2 == 1, operators_and_operands
operation_type, _ = operators_and_operands[1]
if operation_type is ast.BinOp:
return self._operation_multiary_arithmetic(operators_and_operands)
if operation_type is ast.BoolOp:
return self._operation_multiary_boolean(operators_and_operands)
if operation_type is ast.Compare:
return self._operation_multiary_comparison(operators_and_operands)
raise NotImplementedError('not implemented handling of:\n{}'
.format(ET.tostring(node).decode().rstrip()))
def _operation_multiary_arithmetic(
self, operators_and_operands: t.Sequence[t.Union[ast.AST, t.Tuple[
t.Type[ast.BinOp], t.Type[ast.AST]]]]):
operators_and_operands = list(reversed(operators_and_operands))
operators_and_operands += [(None, None)]
root_operation = None # type: ast.BinOp
operation = None # type: ast.BinOp
root_operation_type = None
root_operator_type = None
zippped = zip(operators_and_operands[::2], operators_and_operands[1::2])
for operand, (operation_type, operator_type) in zippped:
if root_operation is None:
root_operation_type = operation_type
root_operator_type = operator_type
if root_operation_type is not ast.BinOp:
raise NotImplementedError('root operation initialisation')
root_operation = ast.BinOp(
left=None, op=root_operator_type(), right=operand)
operation = root_operation
continue
if operation_type is not None:
assert operation_type is root_operation_type, (operation_type, root_operation_type)
operation.left = ast.BinOp(left=None, op=operator_type(), right=operand)
operation = operation.left
else:
operation.left = operand
return root_operation
def _operation_multiary_boolean(self, operators_and_operands):
operators_and_operands += [(None, None)]
root_operation = None
root_operation_type = None
root_operator_type = None
zippped = zip(operators_and_operands[::2], operators_and_operands[1::2])
for operand, (operation_type, operator_type) in zippped:
if root_operation is None:
root_operation_type = operation_type
root_operator_type = operator_type
if root_operation_type is not ast.BoolOp:
raise NotImplementedError('root operation initialisation')
root_operation = ast.BoolOp(
op=root_operator_type(), values=[operand])
continue
if operation_type is not None:
assert operation_type is root_operation_type, (operation_type, root_operation_type)
assert operator_type is root_operator_type, (operator_type, root_operator_type)
#root_operation.values.append(operand)
return root_operation
def _literal(self, node):
literal_type = node.attrib['type']
if literal_type == 'bool':
return ast.NameConstant(value={
'false': False,
'true': True}[node.attrib['value']])
if literal_type == 'int':
return ast.Num(n=int(node.attrib['value']))
if literal_type == 'real':
value = node.attrib['value']
if 'D' in value:
value = value.replace('D', 'E', 1)
return ast.Num(n=float(value))
if literal_type == 'char':
assert len(node.attrib['value']) >= 2
begin = node.attrib['value'][0]
end = node.attrib['value'][-1]
assert begin == end
return ast.Str(node.attrib['value'][1:-1], '')
_LOG.warning('%s', ET.tostring(node).decode().rstrip())
raise NotImplementedError('literal type "{}" not supported'.format(literal_type))
def _operation_multiary_comparison(
self, operators_and_operands: t.Sequence[t.Union[ast.AST, t.Tuple[
t.Type[ast.Compare], t.Type[ast.AST]]]]) -> ast.Compare:
assert len(operators_and_operands) == 3, operators_and_operands
left_operand, (operation_type, operator_type), right_operand = operators_and_operands
assert operation_type is ast.Compare
assert not isinstance(right_operand, list), right_operand
return ast.Compare(
left=left_operand, ops=[operator_type()], comparators=[right_operand])
def _operation_unary(self, node: ET.Element):
operators_and_operands = self.transform_all_subnodes(node, skip_empty=True, ignored={
'signed-operand', 'and-operand', 'parenthesized_expr', 'primary'})
assert isinstance(operators_and_operands, list), operators_and_operands
assert len(operators_and_operands) == 2, operators_and_operands
operation_type, operator_type = operators_and_operands[0]
if operation_type is ast.BinOp:
operation_type, operator_type = {
(ast.BinOp, ast.Add): (ast.UnaryOp, ast.UAdd),
(ast.BinOp, ast.Sub): (ast.UnaryOp, ast.USub)
}[operation_type, operator_type]
operand = operators_and_operands[1]
return operation_type(op=operator_type(), operand=operand)
def _operand(self, node: ET.Element):
operand = self.transform_all_subnodes(node, ignored={
'add-operand__add-op', 'mult-operand__mult-op', 'and-operand__not-op'})
if len(operand) != 1:
_LOG.warning('%s', ET.tostring(node).decode().rstrip())
# _LOG.error("%s", operand)
#_LOG.error([typed_astunparse.unparse(_).rstrip() for _ in operand])
raise SyntaxError(
'expected exactly one operand but got {} in:\n{}'
.format(len(operand), ET.tostring(node).decode().rstrip()))
return operand[0]
def _operator(
self, node: ET.Element):
return FORTRAN_PYTHON_OPERATORS[node.attrib['operator'].lower()]
def _subroutine(self, node):
header_node = self.get_one(node, './header')
arguments_node = header_node.find('./arguments')
if arguments_node is None:
arguments = {"type":"function_definition", "args":[]}
else:
arguments = self.transform_one(arguments_node)
body = self.transform_all_subnodes(self.get_one(node, './body'))
function_def = {"type":"function_definition", "name":node.attrib['name'], "params":arguments, "body":body, "decorator_list":[],
"return_type":[], "pseudo_type" : []}
members_node = node.find('./members')
if members_node is not None:
members = self.transform_all_subnodes(members_node, ignored={'module-subprogram',
'internal-subprogram', 'internal-subprogram-part'})
assert members
function_def.fortran_metadata = {'contains': members}
return function_def
def _arguments(self, node):
return Node(type = "name", name=self.transform_all_subnodes(node, ignored={
'dummy-arg-list__begin', 'dummy-arg-list',
'generic-name-list__begin', 'generic-name-list'}))
def _argument(self, node):
if 'name' not in node.attrib:
raise SyntaxError(
'"name" attribute not present in:\n{}'.format(ET.tostring(node).decode().rstrip()))
values = self.transform_all_subnodes(node, skip_empty=False, ignored={
'actual-arg', 'actual-arg-spec', 'dummy-arg'})
if values:
assert len(values) == 1
_LOG.warning('generating invalid Python AST: keyword() in arguments()')
return {"type": type(values[0]), "arg":node.attrib['name'], "value":values[0]}
return node.attrib['name']
def _type(self, node):
name = node.attrib['name'].lower()
length = self.transform_one(self.get_one(node, './length')) \
if node.attrib['hasLength'] == 'true' else None
kind = self.transform_one(self.get_one(node, './kind')) \
if node.attrib['hasKind'] == 'true' else None
if length is not None and kind is not None:
raise SyntaxError(
'only one of "length" and "kind" can be provided, but both were given'
' ("{}" and "{}" respectively) in:\n{}'
.format(length, kind, ET.tostring(node).decode().rstrip()))
if name == 'character':
if length is not None:
if isinstance(length, ast.Num):
length = length.n
_LOG.info(
'ignoring string length "%i" in:\n%s',
length, ET.tostring(node).decode().rstrip())
return ast.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, t.Any], mode='eval').body
elif length is not None:
self.ensure_import('numpy', 'np')
return ast.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, length], mode='eval').body
elif kind is not None:
self.ensure_import('numpy', 'np')
if isinstance(kind, ast.Num):
kind = kind.n
if not isinstance(kind, int):
python_type = ast.parse(
FORTRAN_PYTHON_TYPE_PAIRS[name, None], mode='eval').body
self.ensure_import('static_typing', 'st')
static_type = ast.Attribute(
value='st',
attr=python_type, ctx=ast.Load())
return ast.Subscript(
value=static_type, slice=ast.Index(value=kind), ctx=ast.Load())
return ast.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, kind], mode='eval').body
else:
if node.attrib['type'] == 'derived':
return ast.Call(func=ast.Name(id='type', ctx=ast.Load()),
args=[ast.Name(id=name, ctx=ast.Load())],
keywords=[])
assert node.attrib['type'] == 'intrinsic'
return FORTRAN_PYTHON_TYPE_PAIRS[name, None]
raise NotImplementedError(
'not implemented handling of:\n{}'.format(ET.tostring(node).decode().rstrip()))
from pathlib import Path
path = Path("C:/Users/midingoy/Documents/THESE/pycropml_pheno/test/Tutorial/test.f90")
path2 = Path("C:/Users/midingoy/Documents/THESE/pycropml_pheno/test/Tutorial/energybalance_pkg/src/f90/canopytemperature.f90")
with open(path2, "r") as fi:
code = fi.read()
res = FortranParser().parse(code, path2)
path3 = Path("C:/Users/midingoy/Documents/THESE/pycropml_pheno/test/Tutorial/xmlfortran.xml")
with open(path3, "w") as p:
p.write(ET.tostring(res).decode().rstrip())
xmlast = XmlASTFortran().transform_all_subnodes(res) |
988,861 | 6569ca20d80a05f564ebc848be03b27a8d312bdf | print("I hate this class")
print("I do too!")
print("ok")
|
988,862 | 14acfa82a4803112cb765488ca96507ffdfc6243 | # Generated by Django 3.2.6 on 2021-08-19 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('soju', '0015_delete_user'),
]
operations = [
migrations.AlterField(
model_name='beer',
name='ABV',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=3),
),
]
|
988,863 | 1dd33cdebcc6c4e6c1d9f93dad289a7d1aeda3f0 | from datetime import datetime as dt
from datetime import time
from matrr import models
def update_monkeys_derived_attribute():
cnt = 0;
for m in models.Monkey.objects.all():
cnt += 1
print cnt
m.populate_age_at_intox()
m.populate_drinking_category()
m.save()
def convert_MonkeyProtein_dates_to_correct_datetimes():
dates = (
(2002, 4, 15),
(2003, 3, 5),
(2003, 4, 28),
(2003, 4, 30),
(2003, 12, 19),
(2004, 8, 2),
)
times = (
(12, 0),
(17, 30),
(12, 0),
(17, 30),
(7, 0),
(12, 0),
)
for d, t in zip(dates, times):
old_datetime = dt(*d)
monkeys = models.MonkeyProtein.objects.filter(mpn_date=old_datetime)
new_datetime = old_datetime.combine(old_datetime.date(), time(*t))
monkeys.update(mpn_date=new_datetime)
def assign_cohort_institutions():
wfu = models.Institution.objects.get(ins_institution_name='Wake Forest University')
ohsu = models.Institution.objects.get(ins_institution_name='Oregon Health Sciences University, Technology Management')
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Cyno 1')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Cyno 2')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Cyno 3')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Cyno 8')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 1')
cohort.institution = wfu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 2')
cohort.institution = wfu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 4')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 5')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 6a')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 6b')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 7a')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Rhesus 7b')
cohort.institution = ohsu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Vervet 1')
cohort.institution = wfu
cohort.save()
cohort = models.Cohort.objects.get(coh_cohort_name='INIA Vervet 2')
cohort.institution = wfu
cohort.save()
def populate_mky_species():
for coh in models.Cohort.objects.exclude(coh_cohort_name__icontains='assay'):
coh.monkey_set.all().update(mky_species=coh.coh_species)
def delete_wonky_monkeys():
monkey_pks = [10043, 10050, 10053]
models = [models.MonkeyToDrinkingExperiment, models.MonkeyBEC, models.ExperimentEvent, models.MonkeyImage]
for model in models:
for mky in monkey_pks:
print "Deleting mky %d from table %s" % (mky, model.__name__)
model.objects.filter(monkey=mky).delete()
def update_data_tissue_availability():
data_category, cat_is_new = models.TissueCategory.objects.get_or_create(cat_name='Data')
data_names = ["Blood Ethanol Concentration", "Hormone", "Daily Ethanol Summary", "Ethanol Events",
"Necropsy Summary", "Electrophysiology", "Metabolite", "Protein"]
data_models = [models.MonkeyBEC, models.MonkeyHormone, models.MonkeyToDrinkingExperiment, models.ExperimentEvent,
models.NecropsySummary, models.MonkeyEphys, models.MonkeyMetabolite, models.MonkeyProtein]
for _name, _model in zip(data_names, data_models):
_tst, tst_is_new = models.TissueType.objects.get_or_create(tst_tissue_name=_name, category=data_category)
mkys = _model.objects.order_by().values_list('monkey', flat=True).distinct()
models.TissueSample.objects.filter(monkey__in=mkys, tissue_type=_tst).update(tss_sample_quantity=1, tss_units='whole')
# "Ethanol Drinks",
# ExperimentBout, ExperimentDrink,
### Experiment Bouts don't have an ebt.monkey field....
_tst, tst_is_new = models.TissueType.objects.get_or_create(tst_tissue_name="Ethanol Bouts", category=data_category)
for _mky in models.ExperimentBout.objects.order_by().values_list('mtd__monkey', flat=True).distinct():
_mky = models.Monkey.objects.get(pk=_mky)
models.TissueSample.objects.filter(monkey=_mky, tissue_type=_tst).update(tss_sample_quantity=1, tss_units='whole')
### Experiment Drinks don't have an edr.monkey field....
_tst, tst_is_new = models.TissueType.objects.get_or_create(tst_tissue_name="Ethanol Drinks", category=data_category)
for _mky in models.ExperimentDrink.objects.order_by().values_list('ebt__mtd__monkey', flat=True).distinct():
_mky = models.Monkey.objects.get(pk=_mky)
models.TissueSample.objects.filter(monkey=_mky, tissue_type=_tst).update(tss_sample_quantity=1, tss_units='whole')
print "Success."
|
988,864 | ac69f5312f23af0432ccf93b284a2ca3d2be1324 | import re
ID_PATTERN = re.compile("\d*:")
NODE_PATTERN = re.compile(r"(\d+):\[(.+)\]")
LEAF_PATTERN = re.compile(r"(\d+):(leaf=.+)")
EDGE_PATTERN = re.compile(r"yes=(\d+),no=(\d+),missing=(\d+)")
EDGE_PATTERN2 = re.compile(r"yes=(\d+),no=(\d+)")
def _parse_node(text):
match = NODE_PATTERN.match(text)
if match is not None:
return match
match = LEAF_PATTERN.match(text)
if match is not None:
return match
raise ValueError("Unable to parse node: {0}.".format(text))
def _parse_edge(text):
try:
match = EDGE_PATTERN.match(text)
if match is not None:
yes, no, missing = match.groups()
return yes, no, missing
except:
pass
match = EDGE_PATTERN2.match(text)
if match is not None:
yes, no = match.groups()
return yes, no, None
raise ValueError("Unable to parse edge: {0}.".format(text))
def build_tree(tree, indent, missing_condition, float_type):
rood_id = "0"
root_depth = 1
def _build_tree(nodes, node_id, depth, _float_type):
tree = ""
current_indent = indent * depth
if nodes[node_id]["leaf"]:
tree += "\n{indent}return {float_type}({value})".format(
indent=current_indent,
value=float(nodes[node_id]["value"]),
float_type=_float_type)
return tree
else:
if depth > root_depth:
tree += "\n"
tree += "{indent}if (features[{feature_index}] < {float_type}({threshold})) {condition_direction}{missing_condition} {{".format(
indent=current_indent,
feature_index=nodes[node_id]["value"]["feature"][1:],
threshold=nodes[node_id]["value"]["threshold"],
condition_direction="&& !" if nodes[node_id]["value"]["missing"] != nodes[node_id]["value"]["yes"] else "|| ",
missing_condition=missing_condition.format(
feature_index=nodes[node_id]["value"]["feature"][1:],
float_type=_float_type),
float_type=_float_type)
tree += _build_tree(nodes, nodes[node_id]["value"]["yes"], depth + 1, _float_type)
tree += "\n{indent}}} else {{".format(indent=current_indent)
tree += _build_tree(nodes, nodes[node_id]["value"]["no"], depth + 1, _float_type)
tree += "\n{indent}}}".format(indent=current_indent)
return tree
_nodes = {}
i = 0
while i < len(tree):
try:
idx = ID_PATTERN.match(tree[i]).group()[:-1]
except IndexError:
raise ValueError("Model format is not supported.")
_node = _parse_node(tree[i])
node_split = _node.group(2).split("=")
if node_split[0] == "leaf":
_nodes[idx] = {"leaf": True, "value": node_split[1]}
else:
i += 1
f, t = _node.group(2).split("<")
yes, no, missing = _parse_edge(tree[i])
_nodes[idx] = {
"leaf": False,
"value": {"yes": yes, "no": no, "missing": missing, "feature": f, "threshold": t}
}
i += 1
if rood_id not in _nodes:
raise ValueError('root node "{}" cannot be found.')
return _build_tree(nodes=_nodes, node_id=rood_id, depth=root_depth, _float_type=float_type)
|
988,865 | 1e08580b8c08a6174943a22e85f09213583e2c35 | # python equivalent for while and for loops
a = 5
while a>0:
print a
a = a-1
for i in range(0, 5):
print i
|
988,866 | 2855b2009573b3b60ca336ac2de28ed8dc816fe9 | """
hi rohit
"""
sentence = input("enter any sentence:")
frequency_words = {}
words = sentence.split()
for word in words:
if word in frequency_words:
frequency_words[word] += 1
else:
frequency_words[word] = 1
print("Text : ", sentence)
for word in sorted(frequency_words):
print("{:<} : {} ".format(word, frequency_words[word]))
|
988,867 | efb7a661e4dabb2e6d9ef072c7f2bc9f78d7d4ce | from PIL import Image
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
import clsdefect.transforms_image_mask_label as T
from clsdefect.preprocessing.preprocessing_training import get_label
import tqdm
from clsdefect.utils import list_all_files_sorted, list_all_folders, load_obj
classes = ['corrosion', 'fouling', 'delamination']
class PPGTrain2(Dataset):
def __init__(self, root: str, normalize: bool, area_threshold: float, ratio_threshold: float, percentages: str or None = None, patch_size: tuple = (64, 30)):
if percentages:
percentages = [int(i) for i in percentages.split('_')]
assert len(percentages) == 2
assert sum(percentages) == 100
assert 0 <= ratio_threshold <= 1
super().__init__()
self.root = root
self.classes = classes
self.ratio_th = ratio_threshold
self.area_threshold = area_threshold
transforms = []
scale = np.floor(patch_size[0] // patch_size[1])
pad = int((patch_size[0] - scale * patch_size[1]) // 2)
if scale != 1:
transforms.append(T.Resize(int(scale * patch_size[1])))
if pad != 0:
transforms.append(T.Pad(padding=pad))
transforms.append(T.ToTensorEncodeLabels(len(self.classes)))
if normalize:
transforms.append(T.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]]))
self.transforms = T.Compose(transforms)
self.list_patches = list_all_files_sorted(self.root, "*_patch.png")
self.list_masks = list_all_files_sorted(self.root, "*_mask.png")
self.list_metas = list_all_files_sorted(self.root, "*_meta.pkl")
# print(self.list_masks[0])
# assert len(self.list_masks) == len(self.list_patches) == len(self.list_metas)
self.length = len(self.list_masks)
if percentages:
self.l_train = round(self.length*percentages[0]/100)
self.l_val = self.length - self.l_train
if percentages[1] > 0:
assert self.l_train != 0 and self.l_val != 0
def __getitem__(self, idx):
mask_name = self.list_masks[idx]
meta_name = mask_name.replace('_mask.png', '_meta.pkl')
image_name = mask_name.replace('_mask.png', '_patch.png')
image = Image.open(image_name).convert("RGB")
meta = load_obj(meta_name)
# masks = [m.convert("1") for m in Image.open(mask_name).split()]
labels = list(set([get_label(overlap['label']) for overlap in meta['overlapping']
if overlap['ratio'] >= self.ratio_th or overlap['area'] >= self.area_threshold]))
# masks = [overlap['mask'] for overlap in meta['overlapping'] if overlap['label'] == 2 and (overlap['ratio'] >= self.ratio_th or overlap['area'] >= self.area_threshold)]
# masks = combine_images(masks)
# masks = None
masks = Image.open(mask_name)
if self.transforms is not None:
image, labels, masks = self.transforms(image, labels, masks)
return image, labels, masks
def __len__(self):
return self.length
if __name__ == "__main__":
batch_size = 20
data = PPGTrain2("/home/krm/ext/PPG/Classification_datasets/PPG/NewTest_32/",
normalize=True, area_threshold=0, ratio_threshold=0.1)
loader = torch.utils.data.DataLoader(dataset=data, num_workers=4, batch_size=batch_size, shuffle=False)
names = torch.zeros([len(data), 3])
for i, batch in enumerate(tqdm.tqdm(loader)):
patches, labels = batch
names[i*batch_size: (i+1)*batch_size] = labels
# continue
sum = names.sum(dim=0)
average = names.mean(dim=0)
print(sum)
print(average)
print('done')
|
988,868 | 403ac422badd9397741848cae2fc17f5494f18e9 | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='markdown-jinja',
version='1.0.0',
py_modules=['markdown_jinja'],
description='Python Markdown extension which adds Jinja2 support',
author='Józef Sokołowski',
author_email='pypi@qzb.me',
url='https://github.com/qzb/markdown-jinja/',
download_url='https://github.com/qzb/markdown-jinja/archive/v0.1.0.tar.gz',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'Topic :: Communications :: Email :: Filters',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Markup :: HTML'
],
install_requires=['Markdown>=3.0', 'Jinja2>=2.10']
)
|
988,869 | 76d654eb1a0faedab8dc18065d7f1e2665068001 | # 547. 朋友圈
# 班上有 N 名学生。其中有些人是朋友,有些则不是。他们的友谊具有是传递性。如果已知 A 是 B 的朋友,B 是 C 的朋友,那么我们可以认为 A 也是 C 的朋友。所谓的朋友圈,是指所有朋友的集合。
# 给定一个 N * N 的矩阵 M,表示班级中学生之间的朋友关系。如果M[i][j] = 1,表示已知第 i 个和 j 个学生互为朋友关系,否则为不知道。你必须输出所有学生中的已知的朋友圈总数。
# 示例 1:
# 输入:
# [[1,1,0],
# [1,1,0],
# [0,0,1]]
# 输出:2
# 解释:已知学生 0 和学生 1 互为朋友,他们在一个朋友圈。
# 第2个学生自己在一个朋友圈。所以返回 2 。
# 示例 2:
# 输入:
# [[1,1,0],
# [1,1,1],
# [0,1,1]]
# 输出:1
# 解释:已知学生 0 和学生 1 互为朋友,学生 1 和学生 2 互为朋友,所以学生 0 和学生 2 也是朋友,所以他们三个在一个朋友圈,返回 1 。
# 提示:
# 1 <= N <= 200
# M[i][i] == 1
# M[i][j] == M[j][i]
class FindUnion(object):
def __inti__(self,n):
self.set=[i for i in range(n)]
self.count=n
def find_set(self,x):
if self.set[x]!=x:
self.set[x]=self.find_set(self.set[x])
return self.set[x]
def union_set(self,x,y):
x_root,y_root=map(self.find_set,(x,y))
if x_root!=y_root:
self.set[min(x_root,y_root)]=max(x_root,y_root)
self.count-=1
def findCircleNum(A):
circle=FindUnion(len(A))
for i in range(len(A)):
for j in range(len(A)):
if A[i][j] and i!=j:
circle.union_set(i,j)
return circle.count
def findCircleNum2(A):
n =len(a)
visited=[False for i in range(n)]
cnt=0
def dfs(A,v,i):
for j in range(len(A)):
if A[i][j]==1 and visited[j]==False:
visited[j]=True
dfs(A,v,j)
for i in range(len(A)):
if visited[i]==False:
visited[i]=True
dfs(A,visited,i)
cnt+=1
return cnt |
988,870 | dbfbcff0bde04912d921db80ef1d9d2c661bb1f1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from uline_risk.handlers.baseHandlers import RESTfulHandler
from uline_risk.model.uline.info import MchBalance
from uline_risk.utils import db
class GetAllFreezeMerchant(RESTfulHandler):
def prepare(self):
pass
def get(self):
freeze_mch_ids = []
with db.uline_session_scope() as session:
db_freeze_account = session.query(MchBalance).filter(MchBalance.status == 1).all()
if db_freeze_account:
freeze_mch_ids = [each_account.mch_id for each_account in db_freeze_account]
response = self.generate_response_msg(**{"freeze_mch_ids": freeze_mch_ids})
self.write(response)
self.finish()
|
988,871 | 35effdc439d1b4072a819df621c8f2e8f9403f96 | import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
step = 0
MAX = 0
Threshold = 32
MAX_LENGTH = 256
ff = open("target_label.txt",'w')
for filename in os.listdir('image_1000/'): #'/home/h/a/hanlins/Desktop/OCR/image_1000/'):
#for i in range(1):
# filename = "TB1oAXWLXXXXXXoXXXXunYpLFXX.jpg"
filename_new = "image_1000/" + filename #"/home/h/a/hanlins/Desktop/OCR/image_1000/" + filename
img = cv2.imread(filename_new, cv2.IMREAD_GRAYSCALE)
if img is None:
continue
img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 20)
#ret,img = cv2.threshold(img, 127,255,cv2.THRESH_TOZERO)
step += 1
print(step)
if (step > 2):
exit()
#img = np.array(img)
filename_suf = filename.split(".jpg")[0]
filename_full = "txt_1000/" + filename_suf + ".txt" #"/home/h/a/hanlins/Desktop/OCR/txt_1000/" + filename_suf + ".txt"
with open(filename_full, 'r') as f:
# with open("/home/h/a/hanlins/Desktop/OCR/txt_1000/TB1oAXWLXXXXXXoXXXXunYpLFXX.txt",'r') as f:
data = f.readlines() # The txt file of 1 picture.
Num = 0
Err = 0
for line in data:
position = line.split(",")[0:8]
label = line.split(",")[8]
position = [int(float(position[i])) for i in range(8)]
X = [position[0], position[2], position[4], position[6]]
Y = [position[1], position[3], position[5], position[7]]
cnt = np.array([[position[0], position[1]],[position[2], position[3]],[position[4], position[5]],[position[6], position[7]]])
rect = cv2.minAreaRect(cnt)
angle = rect[2]
X_centre = rect[0][0]
Y_centre = rect[0][1]
if (angle == -90.0 or angle == 0.0):
X_start = min(X)
X_end = max(X)
Y_start = min(Y)
Y_end = max(Y)
crop_img = img[Y_start:Y_end, X_start:X_end]
if (crop_img.shape[1] < crop_img.shape[0]):
crop_img = np.rot90(crop_img)
x_s = Threshold
if crop_img.shape[0]==0:
continue
y_s = int((crop_img.shape[1]/crop_img.shape[0]) * Threshold)
crop_img = cv2.resize(crop_img,(y_s, x_s))
rest = MAX_LENGTH - crop_img.shape[1]
if rest < 0:
rest = 0
stack = np.ones([Threshold, rest]) * 255
crop_img = np.hstack((crop_img, stack))
# print(crop_img.shape)
# plt.imshow(crop_img, cmap="gray")
# print(label, Num, '0')
rewrite_name = "rewrite/" + filename_suf + "_" + str(Num) + ".jpg" #"/home/h/a/hanlins/Desktop/OCR/rewrite/" + filename_suf + "_" + str(Num) + ".jpg"
cv2.imwrite(rewrite_name,crop_img)
ff.write(rewrite_name + '__' + label)
Num += 1
else:
Y_MAX = np.shape(img)[0]
X_MAX = np.shape(img)[1]
M = np.float32([[1,0,X_MAX], [0,1,Y_MAX]])
img_new = cv2.warpAffine(img, M, (Y_MAX*3, X_MAX*3))
RECT = (rect[0][0] + X_MAX, rect[0][1] + Y_MAX)
M = cv2.getRotationMatrix2D(RECT, angle, 1)
img_new = cv2.warpAffine(img_new, M, (Y_MAX*3, X_MAX*3))
X_start = int(X_centre - rect[1][0]/2 + X_MAX)
X_end = int(X_centre + rect[1][0]/2 + X_MAX)
Y_start = int(Y_centre - rect[1][1]/2 + Y_MAX)
Y_end = int(Y_centre + rect[1][1]/2 + Y_MAX)
crop_img = img_new[Y_start:Y_end, X_start:X_end]
if (crop_img.shape[1] < crop_img.shape[0]):
crop_img = np.rot90(crop_img)
x_s = Threshold
if crop_img.shape[0]==0:
continue
# y_s = int((crop_img.shape[1]/crop_img.shape[0]) * Threshold)
y_s = int(MAX_LENGTH)
crop_img = cv2.resize(crop_img,(y_s, x_s))
print(crop_img.shape)
# plt.imshow(crop_img)
# plt.show()
# exit()
# rest = MAX_LENGTH - crop_img.shape[1]
# if rest < 0:
# rest = 0
# stack = np.ones([Threshold, rest]) * 255
# crop_img = np.hstack((crop_img, stack))
# if (X_start < 0):
# M = np.float32([[1,0,-X_start],[0,1,0]])
# img_new = cv2.warpAffine(img_new,M,(Y_MAX, X_MAX))
# X_end = X_end - X_start
# X_start = 0
# if (Y_start < 0):
# M = np.float32([[1,0,0],[0,1,-Y_start]])
# img_new = cv2.warpAffine(img_new,M,(Y_MAX, X_MAX))
# Y_end = Y_end - Y_start
# Y_start = 0
# if (X_end > X_MAX):
# M = np.float32([[1,0,X_MAX-X_end],[0,1,0]])
# img_new = cv2.warpAffine(img_new,M,(Y_MAX, X_MAX))
# X_start = X_start +X_MAX - X_end
# X_end = X_MAX
# if (Y_end > Y_MAX):
# M = np.float32([[1,0,0],[0,1,Y_MAX-Y_end]])
# img_new = cv2.warpAffine(img_new,M,(Y_MAX, X_MAX))
# Y_start = Y_start +Y_MAX - Y_end
# Y_end = Y_MAX
# crop_img1 = crop_img
# if (crop_img.shape[0] > crop_img.shape[1]):
# crop_img = crop_img.T
# if (crop_img1.shape[0] > Threshold):
# crop_img1 = cv2.resize(crop_img1, (Threshold, int(crop_img1.shape[1]*(Threshold/crop_img1.shape[0]))))
# if (crop_img1.shape[1] > Threshold):
# crop_img1 = cv2.resize(crop_img1, (int(crop_img1.shape[0]*(Threshold/crop_img1.shape[1])),Threshold))
# if crop_img1.shape[0] > MAX:
# MAX = crop_img1.shape[0]
# if crop_img1.shape[1] > MAX:
# MAX = crop_img1.shape[1]
# if(MAX == 2164):
# print(crop_img1.shape)
# print(crop_img.shape)
# exit()
# print(MAX)
rewrite_name = "rewrite/" + filename_suf + "_" + str(Num) + ".jpg" #"/home/h/a/hanlins/Desktop/OCR/rewrite/" + filename_suf + "_" + str(Num) + ".jpg"
cv2.imwrite(rewrite_name,crop_img)
ff.write(rewrite_name + '__' + label)
Num += 1
ff.close()
|
988,872 | 5a1d704effc959144d752bcc13425fbd102cfa21 | from aerolinea import Aerolinea
from destino import Destino
from terminal import Terminal
from avion import Avion
from horario import Horario
from boleto import Boleto
'''Casos prueba:
##PRUEBAS##
>>>
h1 = Horario("12:30 PM")#crea un horario
d1 = Destino("Hawaii", 1500)#crea un destino con costo base
t1 = Terminal()#crea una terminal
a1 = Avion("BOEING", 1, 1500, "Primera clase")#registra un nuevo avion
aerolinea = Aerolinea("Aeromexico", t1)#genera una neuva aerolinea
aerolinea.añadir_destino(d1)#añade un destino a la aerolinea
aerolinea.añadir_tipo_avion(a1)#añade un tipo de avion a la aerolinea
print(aerolinea.destinos)#imprimie los destinos de la aerolinea
print(boleto.ticket())#imprime el boleto
print(boleto2.ticket())#imprime el boleto
>>>
Aerolinea: Aeromexico
1.- Hawaii Hora:
1.- 12:30 PM
*** Aeromexico ***
Boleto #0
Destino: Hawaii
Clase: Primera clase
Hora de salida: 12:30 PM
Precio del boleto: $3000.0
'''
class Prueba:
def start(self):
#Crea tus objetos aqui
h1 = Horario("12:30 PM")#crea un horario
h2 = Horario("13:00 PM")#crea un horario
h3= Horario("06:00 AM")#crea un horario
h4= Horario("10:00 AM")#crea un horario
d1 = Destino("Hawaii", 1500)#crea un destino con costo base
d2 = Destino("Canada", 100)#crea un destino con costo base
d3 = Destino("China", 10000)#crea un destino con costo base
d4 = Destino("Oaxaca", 99999)#crea un destino con costo base
d1.añadir_horario(h1)#añade un horario al desitno
d1.añadir_horario(h3)#añade un horario al desitno
d1.añadir_horario(h2)#añade un horario al destino
d2.añadir_horario(h1)#añade un horario al destino
d2.añadir_horario(h2)#añade un horario al destino
d2.añadir_horario(h3)#añade un horario al destino
d2.añadir_horario(h4)#añade un horario al destino
d3.añadir_horario(h4)#añade un horario al destino
d4.añadir_horario(h1)#añade un horario al destino
t1 = Terminal()#crea una terminal
t2 = Terminal()#crea una terminal
a1 = Avion("BOEING", 1, 1500, "Primera clase")#registra un nuevo avion
a2 = Avion("AJ1800", 200, 100, "Clase turista")#registra un nuevo avion
a3 = Avion("AKKAD0", 200, 100, "Primera Clase")#registra un nuevo avion
a4 = Avion("MDP123", 150, 100, "Clase turista")#registra un nuevo avion
a5 = Avion("009OLO", 100, 100, "Clase turista")#registra un nuevo avion
aerolinea = Aerolinea("Aeromexico", t1)#genera una neuva aerolinea
aerolinea2 = Aerolinea("VivaAerobus", t2)#genera una nueva aerolinea
aerolinea3 = Aerolinea("MexicoVuela", t2)#genera una nueva aerolinea
aerolinea4 = Aerolinea("AeroNautica", t2)#genera una nueva aerolinea
aerolinea.añadir_destino(d1)#añade un destino a la aerolinea
aerolinea.añadir_destino(d2)#añade un destino a la aerolinea
aerolinea2.añadir_destino(d2)#añade un destino a la aerolinea
aerolinea3.añadir_destino(d1)#añade un destino a la aerolinea
aerolinea3.añadir_destino(d4)#añade un destino a la aerolinea
aerolinea3.añadir_destino(d3)#añade un destino a la aerolinea
aerolinea3.añadir_destino(d2)#añade un destino a la aerolinea
aerolinea4.añadir_destino(d2)#añade un destino a la aerolinea
aerolinea.añadir_tipo_avion(a1)#añade un tipo de avion a la aerolinea
aerolinea3.añadir_tipo_avion(a1)#añade un tipo de avion a la aerolinea
aerolinea3.añadir_tipo_avion(a2)#añade un tipo de avion a la aerolinea
aerolinea3.añadir_tipo_avion(a3)#añade un tipo de avion a la aerolinea
aerolinea3.añadir_tipo_avion(a4)#añade un tipo de avion a la aerolinea
aerolinea2.añadir_tipo_avion(a2)#añade un tipo de avion a la aerolinea
aerolinea.añadir_tipo_avion(a2)#añade un tipo de avion a la aerolinea
print(aerolinea.destinos)#imprimie los destinos de la aerolinea
print(aerolinea2.destinos)#imprime los destinos de la aerolinea
print(aerolinea3.destinos)#imprime los destinos de la aerolinea
boleto = Boleto(aerolinea, d1, h1, a1)#genera un boleto
print(boleto.ticket())#imprime el boleto
boleto2 = Boleto(aerolinea2, d2, h1, a1)#genera un boleto
boleto3 = Boleto(aerolinea2, d2, h2, a1)#genera un boleto
boleto4 = Boleto(aerolinea, d1, h1, a2)#genera un boleto
boleto5 = Boleto(aerolinea3, d4, h3, a4)#genera un boleto
print(boleto2.ticket())#imprime el boleto
print(boleto3.ticket())#imprime el boleto
print(boleto4.ticket())#imprime el boleto
print(boleto5.ticket())#imprime el boleto
if __name__ == "__main__":
Vuelos= Prueba()
Vuelos.start() |
988,873 | eb965643d9c31e48313dd5e916a07ca77859599e | """
http://openbookproject.net/thinkcs/python/english3e/classes_and_objects_II.html
"""
from Point import Point
class Rectangle:
"""
A class to manufacture rectangle objects
"""
def __init__(self, posn : Point, w : float, h: float):
"""
Initialize rectangle at posn, with width w, height h
:param posn: lower left corner of te rectangle
:type posn: Point
:param w: Width
:type w: float
:param h: Height
:type h: float
"""
self.corner = posn
self.width = w
self.height = h
def __repr__(self):
return "Rectangle(Corner:{},Width:{}, Height:{})".format(self.corner, self.width, self.height)
def __str__(self):
return "({},{},{})".format(self.corner, self.width, self.height)
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def grow(self, delta_width, delta_height):
"""
Grow (or shrink) this object by the deltas
:param delta_width: Growth of width
:type delta_width: float
:param delta_height: Growth of height
:type delta_height: float
"""
self.width += delta_width
self.height += delta_height
def move(self, dx, dy):
"""
Move this object by the deltas
:param dx: Move of the corner, delta,x
:type dx: float
:param dy: Move of the corner, delta,y
:type dy: float
"""
self.corner.x += dx
self.corner.y += dy
def area(self):
"""
returns the area
:return: Area of the rectangle
:rtype: float
"""
return self.height * self.width
def perimeter(self):
"""
find the perimeter of any rectangle
:return: perimeter
:rtype: float
"""
return 2 * (self.height + self.width)
def flip(self):
"""
swaps the width and the height of any rectangle instance
"""
self.width, self.height = self.height, self.width
def contains(self, point : Point):
"""
test if a Point falls within the rectangle
:param point:
:type point:
:return: True, the point is in the rectangle
:rtype: bool
"""
return ( self.corner.x <= point.x <= (self.corner.x + self.width)
and self.corner.y <= point.y <= (self.corner.y + self.height))
def intersect(self, rectangle):
"""
determine whether two rectangles collide
:param rectangle: other rectangle to test the intersection with
:type rectangle: Rectangle
:return: True, tha rectangles are intersect
:rtype: bool
"""
return self.contains(rectangle.corner) or rectangle.contains(self.corner) |
988,874 | 0dc52ee9e1fa12805d6d55bc272339883aa66baa | for i in range(10):
print(i)
print("Se acabo el programa")
#cont = 0
#while cont < 10:
# print(cont)
# cont = cont + 1
#print("Se acabo el programa") |
988,875 | 380cc7bf82280053b71ddcd3b6076f303863653f | import matplotlib.pyplot as plt
import deltametrics as dm
golfcube = dm.sample_data.golf()
fig, ax = plt.subplots(figsize=(8, 2))
golfcube.register_section('demo', dm.section.StrikeSection(distance_idx=5))
golfcube.sections['demo'].show('velocity')
|
988,876 | 35ca4f419cb7e9ee3c8ded13d9e64524122dd58d | #!/usr/bin/python3
import sys
import argparse
import string
import pprint
import re
pp = pprint.PrettyPrinter()
# Parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("input", help="Input file")
args = parser.parse_args()
def pretty_marbles():
output = []
for i in range(len(marbles)):
if i == current_marble_index:
output.append( '({0:2d})'.format(marbles[i]) )
else:
output.append( ' {0:2d} '.format(marbles[i]) )
return ''.join(output)
inputfile = open(args.input)
for line in inputfile:
# 10 players; last marble is worth 1618 points
parse = re.match(r'(\d+) players; last marble is worth (\d+) points', line)
if not parse:
print("Line does not match RE:", line.strip())
continue
num_players = int(parse.group(1))
num_marbles = int(parse.group(2))
marbles = [ 0 ]
next_marble_id = 1
current_marble_index = 0
scores = {}
for player in range(1, num_players + 1):
scores[player] = 0
player = 0
for next_marble_id in range(1, num_marbles + 1):
if next_marble_id % 1000 == 0:
print("Playing marble", next_marble_id, "of", num_marbles)
player = player % num_players + 1
if next_marble_id % 23 == 0:
# the current player keeps the marble they would have placed, adding it to their score
scores[player] += next_marble_id
# the marble 7 marbles counter-clockwise from the current marble is
# removed from the circle and also added to the current player's score.
remove_marble_index = ( current_marble_index - 7 ) % len(marbles)
remove_marble_value = marbles[remove_marble_index]
scores[player] += remove_marble_value
marbles.pop( remove_marble_index )
# The marble located immediately clockwise of the marble that was
# removed becomes the new current marble
current_marble_index = remove_marble_index
# print("Elf {0} takes marble {1:2d} fm index {2}".format( player, remove_marble_value, current_marble_index))
else:
# place the lowest-numbered remaining marble into the circle between
# the marbles that are 1 and 2 marbles clockwise of the current marble.
next_marble_index = ( current_marble_index + 1 ) % len(marbles) + 1
marbles.insert( next_marble_index, next_marble_id )
# print("Elf {0} added marble {1:2d} at index {2}".format( player, next_marble_id, current_marble_index))
# The marble that was just placed then becomes the current marble.
current_marble_index = next_marble_index
top_players = sorted( scores, key=lambda player: scores[player] )
top_score = scores[top_players[-1]]
print(num_players, top_score)
|
988,877 | f594d9fd9bd276fdb49b1b4993576fb61cd6bf21 | class Theme:
text_bg_color;
text_fg_color;
pass
class DarkTheme(Theme):
pass
class LightTheme(Theme):
pass
class BlueTheme(Theme):
pass
|
988,878 | 5da8260474b54fee20dcfd1441d23d204abcb0a2 | import json
import urllib.request
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
from bs4 import BeautifulSoup
import requests
from pytrends.request import TrendReq
import datetime
from pushbullet.pushbullet import PushBullet
from tokens import A_S, A_T, C_K, C_S, API_KEY
now = datetime.datetime.now()
#twitter
ACCESS_TOKEN = A_T
ACCESS_SECRET = A_S
CONSUMER_KEY = C_K
CONSUMER_SECRET = C_S
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter = Twitter(auth=oauth)
#run the script twice a day
if now.hour in [15,18]:
#polish
pol_trends = twitter.trends.place(_id = 23424923)
twittrendlistPL=[]
for i in pol_trends[0]['trends']:
twittrendlistPL.append(i['name'])
strPLT=" ,".join(str(x) for x in twittrendlistPL[0:15])
#print(strPLT)
#global trends
globaltrends=twitter.trends.place(_id = 1)
twittrendlist=[]
for i in globaltrends[0]['trends']:
twittrendlist.append(i['name'])
def isEnglish(s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
G=[i for i in twittrendlist if isEnglish(i)]
strGT=" ,".join(str(x) for x in G[0:15])
#print(strGT)
#Google Trends
pytrends = TrendReq(geo='GB-ENG',tz=360)
df=pytrends.trending_searches()
gtrendsus=df.title.tolist()
strGog=" ,".join(str(x) for x in gtrendsus[0:15])
#print(strGog)
#yahoo trending charts
page = requests.get("https://finance.yahoo.com/trending-tickers/")
soup = BeautifulSoup(page.content, 'html.parser')
base=soup.findAll('td', {'class':'data-col1 Ta(start) Pstart(10px) Miw(180px)'})
yhoo=[]
for i in base:
yhoo.append(i.get_text())
strYHOO=" ,".join(str(x) for x in yhoo[0:15])
#crypto trends to find
with urllib.request.urlopen("https://api.coinmarketcap.com/v2/ticker/") as url:
cmc = json.loads(url.read().decode())
names=[]
change=[]
for i in cmc['data']:
names.append(cmc['data'][i]['symbol'])
change.append(cmc['data'][i]['quotes']['USD']['percent_change_24h'])
change, names = zip(*sorted(zip(change, names)))
cmcstr=' ,'.join([str(a) + ': '+ str(b) + '%' for a,b in zip(names[-5:],change[-5:])])
apiKey = API_KEY
p = PushBullet(apiKey)
# Get a list of devices
devices = p.getDevices()
devices
p.pushNote(devices[0]["iden"], 'Daily news ', "***TREND TWT POLSKA:" + strPLT + "\n***TREND TWT GLOBAL: " + strGT + "\n***G US SEARCH: " + strGog + "\n***TOP TICKERS YAHOO: " + strYHOO + "\n***CMC TOP: " + cmcstr)
print('yes')
else:
print('not')
|
988,879 | f8a10c0c0ee33f999b8e4a1191e9944609d0e235 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file merge-label.py
# \author chenghuige
# \date 2017-11-24 15:52:00.828515
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys, os
#def get_cider_rank(cider):
# score = 0
# if cider > 4.5:
# score = 4
# elif cider > 3.:
# score = 3
# elif cider > 2.:
# score = 2
# elif cider > 1.:
# score = 1
# return score
def get_cider_rank(cider):
score = 0
if cider > 4.5:
score = 6
elif cider > 3.:
score = 5
elif cider > 2.:
score = 4
elif cider > 1.:
score = 3
elif cider > 0.5:
score = 2
elif cider > 0.2:
score = 1
return score
#def get_bleu4_rank(bleu4):
# score = 0
# if bleu4 > 0.9:
# score = 4
# elif bleu4 > 0.8:
# score = 3
# elif bleu4 > 0.7:
# score = 2
# elif bleu4 > 0.5:
# score = 1
# return score
def get_bleu4_rank(bleu4):
score = 0
if bleu4 > 0.95:
score = 6
if bleu4 > 0.9:
score = 5
elif bleu4 > 0.8:
score = 4
elif bleu4 > 0.7:
score = 3
elif bleu4 > 0.5:
score = 2
elif bleu4 > 0.1:
score = 1
return score
def get_meteor_rank(metric):
score = 0
if metric > 0.95:
score = 5
elif metric > 0.6:
score = 4
elif metric > 0.5:
score = 3
elif metric > 0.4:
score = 2
elif metric > 0.3:
score = 1
return score
def get_mix_rank(cider, bleu4):
score = 0
if bleu4 > 0.95 and cider > 5.0:
score = 8
elif bleu4 > 0.9 and cider > 4.5:
score = 7
elif bleu4 > 0.9 and cider > 3:
score = 6
elif bleu4 > 0.8 and cider > 3:
score = 5
elif bleu4 > 0.7 and cider > 2:
score = 4
elif cider > 1.2:
score = 3
elif cider > 0.5:
score = 2
elif cider > 0.1:
score = 1
if bleu4 < 0.1:
score = max(0, score - 2)
return score
#def get_mix_rank(cider, bleu4):
# score = 0
# if bleu4 > 0.9 and cider > 3.:
# score = 9
# elif bleu4 > 0.8 and cider > 2.0:
# score = 8
# elif bleu4 > 0.7 and cider > 1.4:
# score = 7
# elif bleu4 > 0.6 and cider > 1.0:
# score = 6
# elif bleu4 > 0.5 and cider > 0.8:
# score = 5
# elif bleu4 > 0.4 and cider > 0.5:
# score = 4
# elif bleu4 > 0.3 and cider > 0.4:
# score = 3
# elif bleu4 > 0.2 and cider > 0.3:
# score = 2
# elif bleu4 > 0.0001 and cider > 0.0001:
# score = 1
# return score
label_type = sys.argv[1]
ofile = './ensemble.train.modifylabel.txt'
if len(sys.argv) > 2:
ofile = sys.argv[2]
out = open(ofile, 'w')
infile = './ensemble.train.final.txt'
m = {}
is_header = True
for line in open(infile):
if is_header:
l = line.strip().split('\t')
print('\t'.join(l), file=out)
is_header = False
img, caption, score, feature = line.strip().split('\t', 3)
key = '%s\t%s' % (img, caption)
m[key] = feature
for line in open('./ensemble.best_metrics_all.txt'):
l = line.strip().split('\t')
img, caption, bleu4, cider, meteor, rouge = l[0], l[1].replace(' ', ''), l[2], l[3], l[4], l[5]
cider = float(cider)
#score = get_cider_rank(cider)
# bleu4 is better then cider
bleu4 = float(bleu4)
#score = get_bleu4_rank(bleu4)
meteor = float(meteor)
rouge = float(rouge)
if label_type == 'cider':
score = get_cider_rank(cider)
elif label_type == 'rcider':
score = cider
elif label_type == 'bleu4':
score = get_bleu4_rank(bleu4)
elif label_type == 'rbleu4':
score = bleu4
elif label_type == 'meteor':
score = get_meteor_rank(meteor)
elif label_type == 'rmeteor':
score = meteor
elif label_type == 'rrouge':
score = rouge
else:
score = get_mix_rank(cider, bleu4)
#score = cider
key = '%s\t%s' % (img, caption)
print(img, caption, score, m[key], sep='\t', file=out)
|
988,880 | 95ada1c188ddddb40673ed811f75308cd047fd49 | class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
numDic=['','','abc','def','ghi','jkl','mno','pqrs','tuv','wxyz']
if len(digits) == 0:
return []
res=['']
for i in digits:
temp=numDic[int(i)]
resTemp=[]
for j in temp:
for k in res:
resTemp.append(k+j)
res=resTemp
return res
|
988,881 | 72fcc652a0214aa82e432edd6df12af603e71854 | from builtins import any
import mysql.connector
ny_restaurants = []
# Populate database with array of dictionaries #
mydb = mysql.connector.connect(
host = 'localhost',
user = 'root',
passwd = 'i18111958',
database = 'myce_yelp'
)
mycursor = mydb.cursor()
mycursor.execute("DROP TABLE IF EXISTS restaurants")
mycursor.execute("SET SESSION sql_mode = ''")
mydb.commit()
mycursor.execute("CREATE TABLE restaurants (business_id VARCHAR(255) NULL, name VARCHAR(255) NULL, address VARCHAR(255) NULL, city VARCHAR(255) NULL, state VARCHAR(255) NULL, postal_code VARCHAR(255) NULL, latitude DECIMAL(13, 10) NULL, longitude DECIMAL(13, 10) NULL, stars DECIMAL(2, 1) NULL, review_count INTEGER(10) NULL)");
keys = ["business_id", "name", "address", "city", "state", "postal_code", "latitude", "longitude", "stars", "review_count"]
tup = (keys[0], keys[1], keys[2], keys[3], keys[4], keys[5], keys[6], keys[7], keys[8], keys[9])
print (tup)
with open ('yelp_academic_dataset_business.json') as f:
for line in f:
line = line.lower();
line = line.replace("\"", "")
# if not a restaurant or is closed or not in NY
#if "restaurant" not in line or "is_open:0" in line or "state:ny" not in line:
# continue
line = line[1:]
line = line[:-1]
info_map = {}
while (line != ""):
if line[0] == ',':
line = line[1:]
info_split = line[: line.find(',')]
key_value = info_split.split(':')
if len(key_value) > 1:
key = key_value[0];
needed = any(key in x for x in keys)
if needed:
info_map[key] = key_value[1];
#print ("KEY: " + key + " VALUE: " + info_map[key])
line = line[len(info_split)+1:]
if info_map: # if it has been populated
ny_restaurants.append(info_map)
# Populate database with array of dictionaries #
insertion_formula = "INSERT INTO restaurants (business_id, name, address, city, state, postal_code, latitude, longitude,stars, review_count) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
for d in ny_restaurants:
row = (d[keys[0]], d[keys[1]], d[keys[2]], d[keys[3]], d[keys[4]], d[keys[5]], d[keys[6]], d[keys[7]], d[keys[8]], d[keys[9]])
mycursor.execute(insertion_formula, row)
mydb.commit()
|
988,882 | dcb577eb1d7f97665e727184427249797c818998 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# created at : `(format-time-string "%F %T")`
# author : `user-full-name` <`user-mail-address`>
#
|
988,883 | b24c39ce9073b878da648a3cad8dbfa641d242d1 | """
@author Mayank Mittal
@email mittalma@ethz.ch
@brief Defines helper functions to create primitives and set their properties
into the stage programmatically.
"""
from typing import Tuple, Optional
from pxr import Usd, UsdGeom, UsdShade, Sdf, Semantics, PhysicsSchema
def create_prim(stage: Usd.Stage, path: str, prim_type: str,
translation: Optional[Tuple[float, float, float]] = None,
rotation: Optional[Tuple[float, float, float]] = None,
scale: Optional[Tuple[float, float, float]] = None,
ref: Optional[str] = None,
semantic_label: Optional[str] = None,
attributes: Optional[dict] = {}) -> Usd.Prim:
"""Create a prim, apply specified transforms, apply semantic label and set specified attributes.
@note The order in which the axis rotations are recorded in a Vec3* for the six rotateABC Euler triples
is always the same: vec[0] = X, vec[1] = Y, vec[2] = Z . The A, B, C in the op name dictate the
order in which their corresponding elements are consumed by the rotation, not how they are laid out.
Reference: https://graphics.pixar.com/usd/docs/api/class_usd_geom_xform_op.html
:param stage: The stage to add prim to.
:param path: The path of the new prim.
:param prim_type: Prim type name
:param translation: prim translation (applied last)
:param rotation: prim rotation in radians with rotation order XYZ.
:param scale: scaling factor in x, y, z.
:param ref: Path to the USD that this prim will reference.
:param semantic_label: Semantic label.
:param attributes: Key-value pairs of prim attributes to set.
"""
# Define prim in the input stage
prim = stage.DefinePrim(path, prim_type)
# Apply attributes from the input dictionary
for k, v in attributes.items():
prim.GetAttribute(k).Set(v)
# Load reference USD file.
if ref:
prim.GetReferences().AddReference(ref)
# Apply semantic label to the prim
if semantic_label:
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(semantic_label)
# Apply XFORM related properties to the prim
xform_api = UsdGeom.XformCommonAPI(prim)
# Apply rotation in XYZ coordinates in world frame
if rotation:
xform_api.SetRotate(rotation, UsdGeom.XformCommonAPI.RotationOrderXYZ)
# Apply scale to the prim
if scale:
xform_api.SetScale(scale)
# Apply transform (x, y, z) to the prim in world frame
if translation:
xform_api.SetTranslate(translation)
return prim
def add_preview_surface(stage: Usd.Stage, prim: Usd.Prim,
diffuse: Tuple[float, float, float],
roughness: float,
metallic: float):
"""Add a preview surface material using the metallic workflow.
Reference: https://graphics.pixar.com/usd/docs/UsdPreviewSurface-Proposal.html
:param stage: The stage.
:param prim: Geometric primitive in the scene.
:param diffuse: Parameter used as diffuseColor when using the specular workflow.
When using metallic workflow this is interpreted as albedo.
:param roughness: Roughness for the specular lobe. The value ranges from 0 to 1.
:param metallic: Shading properties of material (dielectric to metallic).
"""
# Create material associated to the prim
path = f"{prim.GetPath()}/mat"
material = UsdShade.Material.Define(stage, path)
# Create a shader associated with the material
pbrShader = UsdShade.Shader.Define(stage, f"{path}/shader")
pbrShader.CreateIdAttr("UsdPreviewSurface")
pbrShader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Float3).Set(diffuse)
pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(roughness)
pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(metallic)
# Bind shader to the material surface
material.CreateSurfaceOutput().ConnectToSource(pbrShader, "surface")
# Bind material to the prim
UsdShade.MaterialBindingAPI(prim).Bind(material)
def add_collision_mask(prim: Usd.Prim):
"""Add collision properties to the prim
:param prim: Geometric primitive in the scene.
"""
collisionAPI = PhysicsSchema.CollisionAPI.Apply(prim)
collisionAPI.CreatePhysicsMaterialRel()
collisionAPI.CreateCollisionGroupRel()
# EOF
|
988,884 | 38fcb99e5569104487b62c7d6792595edb148bc7 | """
@auther Hyunwoong
@since 7/5/2020
@see https://github.com/gusdnd852
""" |
988,885 | 8add5d342e48e8ae9b8b482ca824d6015c95a161 | from sqlalchemy import Column, Integer, ForeignKey, String
from sqlalchemy.orm import relationship
from database import Base, engine
class PersonModel(Base):
__tablename__ = 'person'
person_id = Column(Integer, primary_key=True)
username = Column('username', String)
fullname = Column('fullname', String)
Articles = relationship("ArticleModel")
class ArticleModel(Base):
__tablename__ = 'article'
article_id = Column(Integer, primary_key=True)
title = Column('title', String)
article_text = Column('article_text', String)
person_id = Column(ForeignKey("person.person_id"))
Base.prepare(engine) |
988,886 | 348f3488ab9934243983e0c5e40e9ca9043f0e06 |
def divisao(x, y):
try:
resultado = x / y
except ZeroDivisionError:
print("Divisão por zero")
else:
print(f"o resultado é {resultado}")
finally:
print("executando o finally")
if __name__ == "__main__":
divisao(2, 1)
divisao(2, 0)
divisao('2', '1') |
988,887 | 532f8bfe39d5c2037ea98eeccc4c6aa46bd6ed73 | import pytest
from LongestSubstring import Solution
def test_abcabcbb():
s = Solution()
input = 'abcabcbb'
expect = 3
result = s.lengthOfLongestSubstring(input)
assert expect == result
def test_bbbbb():
s = Solution()
input = 'bbbbb'
expect = 1
result = s.lengthOfLongestSubstring(input)
assert expect == result
def test_pwwkew():
s = Solution()
input = 'pwwkew'
expect = 3
result = s.lengthOfLongestSubstring(input)
assert expect == result |
988,888 | 374fdb5d6a78e73188ff275dee2cfd0c1ba91629 | def rev(a, b = 0):
if a == 0:
return b
else:
return rev(a / 10, b * 10 + a % 10)
print rev(2132353)
|
988,889 | 547bf6a783f05e9380d2347a9137f5d5c8befc63 | import unittest
from UAM_team_optimization.components.Aero.cdi_tail_comp import CDiTailComp
from openmdao.api import Problem
from openmdao.utils.assert_utils import assert_check_partials
class TestCDiTailComp(unittest.TestCase):
def test_component_and_derivatives(self):
prob = Problem()
prob.model = CDiTailComp()
prob.setup()
prob.run_model()
data = prob.check_partials(out_stream = None)
assert_check_partials(data, atol = 1.e-3, rtol = 1.e-3)
if __name__ == '__main__':
unittest.main()
|
988,890 | 7a49e14b64dbe3b093909b68d0a648f3f2af43bd |
# linked list
empty = 'empty'
def is_link(s):
"""s is a linked list if it is empty or a (first, rest) pair."""
return s == empty or (len(s) == 2 and is_link(s[1]))
def link(first, rest):
"""
Construct a linked list from its first element and the rest.
"""
assert is_link(rest), "rest must be a linked list."
return [first, rest]
def first(s):
"""Return the first element of a linked list s."""
assert is_link(s), "first only applies to linked lists."
assert s != empty, "empty linked list has no first element."
return s[0]
def rest(s):
"""Return the rest of the elements of a linked list s."""
assert is_link(s), "rest only applies to linked lists."
assert s != empty, "empty linked list has no rest."
return s[1]
# link是作为一个constructor的角色.
# first 和rest则是作为selector的角色.
# 测试
>>> four = link(1, link(2, link(3, link(4, empty))))
>>> first(four)
1
>>> rest(four)
[2, [3, [4, 'empty']]]
# length和element selection和sequence abstration中很重要的东西
def len_link(s):
"""Return the length of linked list s."""
length = 0
while s != empty:
s, length = rest(s), length + 1
return length
def getitem_link(s, i):
"""Return the element at index i of linked list s."""
while i > 0:
s, i = rest(s), i - 1
return first(s)
"""
s = rest(s)是说,把s的后半部分取出来, 放入循环就是说, 把s的后半部分的后半部分的后半部分取出来, 从外到内.一直取到第i层. 这时i就归零了. 然后return i层的第一个元素.
"""
# 测试
>>> len_link(four)
4
>>> getitem_link(four, 1)
2
# 递归式
def len_link_recursive(s):
"""Return the length of a linked list s."""
if s == empty:
return 0
return 1 + len_link_recursive(rest(s))
def getitem_link_recursive(s, i):
"""Return the element at index i of linked list s."""
if i == 0:
return first(s)
return getitem_link_recursive(rest(s), i - 1)
# 例如,
# [1, [2, [3, empty]]] 作为linked list的第2个,也就是
# [2, [3, empty]] 作为linked list的第1个,也就是
# [3, empty] 作为linked list的第0个,也就是 3
>>> def extend_link(s, t):
"""
Return a list with the elements of s followed by those of t.
extend的意思是把t弄到s的后面, 也就是里面.
"""
assert is_link(s) and is_link(t)
if s == empty:
return t
else:
return link(first(s), extend_link(rest(s), t))
# 很像是cons()的那种写法
# 如果看成列表的话, t是包裹在s的最里面的.
>>> extend_link(four, four)
[1, [2, [3, [4, [1, [2, [3, [4, 'empty']]]]]]]]
s ------ t ------------------
>>> def apply_to_all_link(f, s):
"""Apply f to each element of s."""
assert is_link(s)
if s == empty:
return s
else:
return link(f(first(s)), apply_to_all_link(f, rest(s)))
# 每次就把first(s)取出来应用函数f,然后剩下的根据same nature递归即可.
>>> apply_to_all_link(lambda x: x*x, four)
[1, [4, [9, [16, 'empty']]]]
>>> def keep_if_link(f, s):
"""Return a list with elements of s for which f(e) is true."""
assert is_link(s)
if s == empty:
return s
else:
kept = keep_if_link(f, rest(s))
if f(first(s)):
return link(first(s), kept)
else:
return kept
>>> keep_if_link(lambda x: x%2 == 0, four)
[2, [4, 'empty']]
>>> def join_link(s, separator):
"""Return a string of all elements in s separated by separator."""
if s == empty:
return ""
elif rest(s) == empty:
return str(first(s))
else:
return str(first(s)) + separator + join_link(rest(s), separator)
# so much like the TLS programming style.
>>> join_link(four, ", ")
'1, 2, 3, 4'
|
988,891 | 2661732c83055e71537b08b5e0c28b059264d92b | from django.test import TestCase
from polls.models import User
from polls.services.user_service import UserService
class UserServiceTestCase(TestCase):
@classmethod
def setUpClass(cls):
UserService().register_new_user('Test user','1234','test@gmail.com','country name','')
def setUp(self):
self.user = User.objects(username='Test user').first()
def tearDown(self):
pass
def test_login(self):
self.assertEqual(self.user.token, '')
self.assertFalse(self.user.isAuth)
UserService().login(self.user.username,self.user.password)
self.user.reload()
self.assertNotEqual(self.user.token, '')
self.assertTrue(self.user.isAuth)
def test_logout(self):
self.assertNotEqual(self.user.token, '')
self.assertTrue(self.user.isAuth)
UserService().logout(self.user.id)
self.user.reload()
self.assertEqual(self.user.token, '')
self.assertFalse(self.user.isAuth)
def test_change_password(self):
new_password = '12345'
UserService().change_user_data(self.user.id, new_password=new_password)
self.user.reload()
self.assertEqual(self.user.password, new_password)
def test_change_country_name(self):
new_country_name = 'New country'
UserService().change_user_data(self.user.id, new_country_name=new_country_name)
self.user.reload()
self.assertEqual(self.user.country.name, new_country_name)
def test_change_country_flag(self):
new_country_flag = 'Link on new country flag'
UserService().change_user_data(self.user.id, new_country_flag=new_country_flag)
self.user.reload()
self.assertEqual(self.user.country.link_img, new_country_flag)
@classmethod
def tearDownClass(cls):
user = User.objects(username='Test user').first()
UserService().delete_user_account(user.id,user.password)
|
988,892 | 9b8dea18cd12deea612c773e8f919b4b6ccbf3b9 | #!/usr/bin/env python
import time
from random import randint
import getopt
import sys
import os
import glob
import re
import tweepy
from keys import keys
def return_cred():
CONSUMER_KEY = keys['consumer_key']
CONSUMER_SECRET = keys['consumer_secret']
ACCESS_TOKEN = keys['access_token']
ACCESS_TOKEN_SECRET = keys['access_token_secret']
return CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
if __name__ == "__main__":
global tweets_to_be_sent, image_file, type_of_tweet
try:
opts, remainder = getopt.getopt(sys.argv[1:], "", ["type="])
for opt, arg in opts:
if opt in '--type':
type_of_tweet = arg
CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET = return_cred()
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=3, retry_delay=5,
retry_errors=set([401, 402, 500, 503, 226]))
if type_of_tweet == "image":
for i in range(1, 11):
listing = glob.glob("../tweet*")
for path in listing:
for filename in os.listdir(path):
if re.findall('([-\w]+\.(?:jpg|gif|png))', filename):
image_file = path+"/"+filename
elif re.findall('([-\w]+\.(?:txt))', filename):
text_file = filename
tweets_file = open(path+"/"+text_file, "r")
tweets_to_be_sent = tweets_file.readlines()
tweets_file.close()
s = api.update_with_media(image_file, tweets_to_be_sent[0] + " "+ str(i))
# log_file.write(str(i)+"\n")
nap = randint(0, 59)
print "sleeping for", nap, "seconds"
time.sleep(nap)
# log_file.close()
else:
for i in range(15, 20):
listing = glob.glob("../tweet*")
for path in listing:
for filename in os.listdir(path):
if re.findall('([-\w]+\.(?:txt))', filename):
text_file = filename
tweets_file = open(path+"/"+text_file, "r")
tweets_to_be_sent = tweets_file.readlines()
tweets_file.close()
m = str(i) + " " + tweets_to_be_sent[0]
s = api.update_status(status=m)
nap = randint(0, 59)
print "sleeping for", nap, "seconds"
time.sleep(nap)
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
# usage() write a function describing code usage
|
988,893 | 4ab523622f503ed03e441e46ae726d7a3b33f372 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AbstractNorm(object):
def _norm(self, x):
return torch.norm(x, p=1 if self.l1 else 2, dim=1)
class AbstractDropout(object):
def _dropout(self, x):
return F.dropout(x, p=self.dropout, training=self.training)
class AbstractSum(object):
def _sum(self, x):
return torch.sum(x, dim=1)
class AbstractTA(nn.Module, AbstractDropout):
def _score(self, s, o, rt):
raise NotImplementedError(f'this method should be implemented in {self.__class__}')
def __init__(self, args, e_cnt, r_cnt, t_cnt):
super().__init__()
self.dvc = args.dvc
self.dropout = args.dropout
self.l1 = args.l1
e_es = (2 if args.double_entity_embedding_size else 1) * args.embedding_size
r_es = (2 if args.double_relation_embedding_size else 1) * args.embedding_size
self.lstm = nn.LSTM(r_es, r_es, num_layers=1, batch_first=True).to(self.dvc)
for name, param in self.lstm.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param)
elif 'weight_hh' in name:
nn.init.orthogonal_(param)
elif 'bias' in name:
nn.init.zeros_(param)
self.e_embed = nn.Embedding(e_cnt, e_es).to(args.aux_dvc)
self.r_embed = nn.Embedding(r_cnt, r_es).to(self.dvc)
self.t_embed = nn.Embedding(t_cnt, r_es).to(self.dvc)
nn.init.xavier_uniform_(self.e_embed.weight)
nn.init.xavier_uniform_(self.r_embed.weight)
nn.init.xavier_uniform_(self.t_embed.weight)
def _rt_embed(self, r, t):
r_e = self.r_embed(r)
bs, ts = t.shape[:2]
t = t.reshape(-1)
t_e = self.t_embed(t).view(bs, ts, -1)
s_e = torch.cat((r_e.unsqueeze(1), t_e), dim=1)
_, (h, _) = self.lstm(s_e)
return h.squeeze()
def forward(self, s, o, r, t):
s_e = self._dropout(self.e_embed(s).to(self.dvc))
o_e = self._dropout(self.e_embed(o).to(self.dvc))
rt_e = self._dropout(self._rt_embed(r, t))
return self._score(s_e, o_e, rt_e)
class AbstractDE(torch.nn.Module, AbstractDropout):
def _score(self, st, ot, r):
raise NotImplementedError(f'this method should be implemented in {self.__class__}')
def __init__(self, args, e_cnt, r_cnt):
super().__init__()
self.dvc = args.dvc
self.dropout = args.dropout
self.l1 = args.l1
self.double_e_es = args.double_entity_embedding_size
self.double_r_es = args.double_relation_embedding_size
s_es = int(args.embedding_size * args.static_proportion)
r_es = args.embedding_size
t_es = r_es - s_es
s_es *= 2 if self.double_e_es else 1
r_es *= 2 if self.double_r_es else 1
t_es *= 2 if self.double_r_es else 1
self.e_embed = nn.Embedding(e_cnt, s_es).to(args.aux_dvc)
self.r_embed = nn.Embedding(r_cnt, r_es).to(self.dvc)
nn.init.xavier_uniform_(self.e_embed.weight)
nn.init.xavier_uniform_(self.r_embed.weight)
self.d_frq_embed = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_frq_embed = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
nn.init.xavier_uniform_(self.d_frq_embed.weight)
nn.init.xavier_uniform_(self.h_frq_embed.weight)
self.d_phi_embed = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_phi_embed = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
nn.init.xavier_uniform_(self.d_phi_embed.weight)
nn.init.xavier_uniform_(self.h_phi_embed.weight)
self.d_amp_embed = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_amp_embed = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
nn.init.xavier_uniform_(self.d_amp_embed.weight)
nn.init.xavier_uniform_(self.h_amp_embed.weight)
def _t_embed(self, e, d, h):
_d = self.d_amp_embed(e).to(self.dvc) * torch.sin(d.view(-1, 1) * self.d_frq_embed(e).to(self.dvc) +
self.d_phi_embed(e).to(self.dvc))
_h = self.h_amp_embed(e).to(self.dvc) * torch.sin(h.view(-1, 1) * self.h_frq_embed(e).to(self.dvc) +
self.h_phi_embed(e).to(self.dvc))
return _d + _h
def forward(self, s, o, r, t):
d, h = t[:, 0].float(), t[:, 1].float()
s_e = self.e_embed(s).to(self.dvc)
o_e = self.e_embed(o).to(self.dvc)
r_e = self.r_embed(r)
t_s = self._t_embed(s, d, h)
t_o = self._t_embed(o, d, h)
if self.double_e_es:
s_e_r, s_e_i = torch.chunk(s_e, 2, dim=1)
o_e_r, o_e_i = torch.chunk(o_e, 2, dim=1)
if self.double_r_es:
t_s_e, t_s_i = torch.chunk(t_s, 2, dim=1)
t_o_r, t_o_i = torch.chunk(t_o, 2, dim=1)
s_t = torch.cat((s_e_r, t_s_e, s_e_i, t_s_i), dim=1)
o_t = torch.cat((o_e_r, t_o_r, o_e_i, t_o_i), dim=1)
else:
s_t = torch.cat((s_e_r, t_s, s_e_i, t_s), dim=1)
o_t = torch.cat((o_e_r, t_o, o_e_i, t_s), dim=1)
else:
s_t = torch.cat((s_e, t_s), dim=1)
o_t = torch.cat((o_e, t_o), dim=1)
return self._score(s_t, o_t, r_e)
class TTransE(nn.Module, AbstractNorm):
def _score(self, s, o, r, t):
return (-1) * self._norm(s + r + t - o)
def __init__(self, args, e_cnt, r_cnt, t_cnt):
super().__init__()
self.dvc = args.dvc
self.l1 = args.l1
self.e_embed = nn.Embedding(e_cnt, args.embedding_size).to(args.aux_dvc)
self.r_embed = nn.Embedding(r_cnt, args.embedding_size).to(self.dvc)
self.t_embed = nn.Embedding(t_cnt, args.embedding_size).to(self.dvc)
nn.init.xavier_uniform_(self.e_embed.weight)
nn.init.xavier_uniform_(self.r_embed.weight)
nn.init.xavier_uniform_(self.t_embed.weight)
def forward(self, s, o, r, t):
s_e = self.e_embed(s).to(self.dvc)
o_e = self.e_embed(o).to(self.dvc)
r_e = self.r_embed(r)
t_e = self.t_embed(t).squeeze()
return self._score(s_e, o_e, r_e, t_e)
class TATransE(AbstractTA, AbstractNorm):
def _score(self, s, o, rt):
return (-1) * self._norm(s + rt - o)
class DETransE(AbstractDE, AbstractNorm):
def _score(self, st, ot, r):
return (-1) * self._norm(self._dropout(st + r - ot))
class TADistMult(AbstractTA, AbstractSum):
def _score(self, s, o, rt):
return self._sum(s * o * rt)
class DEDistMult(AbstractDE, AbstractSum):
def _score(self, st, ot, r):
return self._sum(self._dropout(st * ot * r))
class TAComplEx(AbstractTA, AbstractSum):
def _score(self, s, o, rt):
s_r, s_i = torch.chunk(s, 2, dim=1)
o_r, o_i = torch.chunk(o, 2, dim=1)
rt_r, rt_i = torch.chunk(rt, 2, dim=1)
sc_r = s_r * rt_r * o_r - s_i * rt_i * o_r
sc_i = s_r * rt_i * o_i + s_i * rt_r * o_i
return self._sum(torch.cat([sc_r, sc_i], dim=1))
class DEComplEx(AbstractDE, AbstractSum):
def _score(self, st, ot, r):
st_r, st_i = torch.chunk(st, 2, dim=1)
ot_r, ot_i = torch.chunk(ot, 2, dim=1)
r_r, r_i = torch.chunk(r, 2, dim=1)
sc_r = st_r * r_r * ot_r - st_i * r_i * ot_r
sc_i = st_r * r_i * ot_i + st_i * r_r * ot_i
return self._sum(torch.cat([sc_r, sc_i], dim=1))
class TASimplE(torch.nn.Module, AbstractDropout, AbstractSum):
def _score(self, s_s, o_s, rt_s, s_o, o_o, rt_o):
return self._sum(self._dropout((s_s * o_s * rt_s + s_o * o_o * rt_o) / 2.0))
def __init__(self, args, e_cnt, r_cnt, t_cnt):
super().__init__()
self.dvc = args.dvc
self.dropout = args.dropout
self.lstm_f = nn.LSTM(args.embedding_size, args.embedding_size, num_layers=1, batch_first=True).to(self.dvc)
self.lstm_i = nn.LSTM(args.embedding_size, args.embedding_size, num_layers=1, batch_first=True).to(self.dvc)
for name, param in self.lstm_f.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param)
elif 'weight_hh' in name:
nn.init.orthogonal_(param)
elif 'bias' in name:
nn.init.zeros_(param)
for name, param in self.lstm_i.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param)
elif 'weight_hh' in name:
nn.init.orthogonal_(param)
elif 'bias' in name:
nn.init.zeros_(param)
self.e_embed_s = nn.Embedding(e_cnt, args.embedding_size).to(args.aux_dvc)
self.e_embed_o = nn.Embedding(e_cnt, args.embedding_size).to(args.aux_dvc)
self.r_embed_f = nn.Embedding(r_cnt, args.embedding_size).to(self.dvc)
self.r_embed_i = nn.Embedding(r_cnt, args.embedding_size).to(self.dvc)
self.t_embed_f = nn.Embedding(t_cnt, args.embedding_size).to(self.dvc)
self.t_embed_i = nn.Embedding(t_cnt, args.embedding_size).to(self.dvc)
nn.init.xavier_uniform_(self.e_embed_s.weight)
nn.init.xavier_uniform_(self.e_embed_o.weight)
nn.init.xavier_uniform_(self.r_embed_f.weight)
nn.init.xavier_uniform_(self.r_embed_i.weight)
nn.init.xavier_uniform_(self.t_embed_f.weight)
nn.init.xavier_uniform_(self.t_embed_i.weight)
def _rt_embed(self, r_embed, t_embed, lstm, r, t):
r_e = r_embed(r)
bs, ts = t.shape[:2]
t = t.reshape(-1)
t_e = t_embed(t).view(bs, ts, -1)
s_e = torch.cat((r_e.unsqueeze(1), t_e), dim=1)
_, (h, _) = lstm(s_e)
return h.squeeze()
def forward(self, s, o, r, t):
d, h = t[:, 0].float(), t[:, 1].float()
s_e_s = self._dropout(self.e_embed_s(s).to(self.dvc))
o_e_s = self._dropout(self.e_embed_o(o).to(self.dvc))
s_e_o = self._dropout(self.e_embed_s(o).to(self.dvc))
o_e_o = self._dropout(self.e_embed_o(s).to(self.dvc))
rt_e_f = self._dropout(self._rt_embed(self.r_embed_f, self.t_embed_f, self.lstm_f, r, t))
rt_e_i = self._dropout(self._rt_embed(self.r_embed_i, self.t_embed_i, self.lstm_i, r, t))
return self._score(s_e_s, o_e_s, rt_e_f, s_e_o, o_e_o, rt_e_i)
class DESimplE(torch.nn.Module, AbstractDropout, AbstractSum):
def _score(self, st_s, ot_s, r_s, st_o, ot_o, r_o):
return self._sum(self._dropout((st_s * ot_s * r_s + st_o * ot_o * r_o) / 2.0))
def __init__(self, args, e_cnt, r_cnt):
super().__init__()
self.dvc = args.dvc
self.dropout = args.dropout
s_es = int(args.embedding_size * args.static_proportion)
r_es = args.embedding_size
t_es = r_es - s_es
self.e_embed_s = nn.Embedding(e_cnt, s_es).to(args.aux_dvc)
self.e_embed_o = nn.Embedding(e_cnt, s_es).to(args.aux_dvc)
self.r_embed_f = nn.Embedding(r_cnt, r_es).to(self.dvc)
self.r_embed_i = nn.Embedding(r_cnt, r_es).to(self.dvc)
nn.init.xavier_uniform_(self.e_embed_s.weight)
nn.init.xavier_uniform_(self.e_embed_o.weight)
nn.init.xavier_uniform_(self.r_embed_f.weight)
nn.init.xavier_uniform_(self.r_embed_i.weight)
self.d_frq_embed_s = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.d_frq_embed_o = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_frq_embed_s = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_frq_embed_o = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
nn.init.xavier_uniform_(self.d_frq_embed_s.weight)
nn.init.xavier_uniform_(self.d_frq_embed_o.weight)
nn.init.xavier_uniform_(self.h_frq_embed_s.weight)
nn.init.xavier_uniform_(self.h_frq_embed_o.weight)
self.d_phi_embed_s = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.d_phi_embed_o = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_phi_embed_s = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_phi_embed_o = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
nn.init.xavier_uniform_(self.d_phi_embed_s.weight)
nn.init.xavier_uniform_(self.d_phi_embed_o.weight)
nn.init.xavier_uniform_(self.h_phi_embed_s.weight)
nn.init.xavier_uniform_(self.h_phi_embed_o.weight)
self.d_amp_embed_s = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.d_amp_embed_o = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_amp_embed_s = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
self.h_amp_embed_o = nn.Embedding(e_cnt, t_es).to(args.aux_dvc)
nn.init.xavier_uniform_(self.d_amp_embed_s.weight)
nn.init.xavier_uniform_(self.d_amp_embed_o.weight)
nn.init.xavier_uniform_(self.h_amp_embed_s.weight)
nn.init.xavier_uniform_(self.h_amp_embed_o.weight)
def _t_embed(self, e, d, h, md):
if md == 'S':
_d = self.d_amp_embed_s(e).to(self.dvc) * torch.sin(d.view(-1, 1) * self.d_frq_embed_s(e).to(self.dvc)
+ self.d_phi_embed_s(e).to(self.dvc))
_h = self.h_amp_embed_s(e).to(self.dvc) * torch.sin(h.view(-1, 1) * self.h_frq_embed_s(e).to(self.dvc)
+ self.h_phi_embed_s(e).to(self.dvc))
elif md == 'O':
_d = self.d_amp_embed_o(e).to(self.dvc) * torch.sin(d.view(-1, 1) * self.d_frq_embed_o(e).to(self.dvc)
+ self.d_phi_embed_o(e).to(self.dvc))
_h = self.h_amp_embed_o(e).to(self.dvc) * torch.sin(h.view(-1, 1) * self.h_frq_embed_o(e).to(self.dvc)
+ self.h_phi_embed_o(e).to(self.dvc))
return _d + _h
def forward(self, s, o, r, t):
d, h = t[:, 0].float(), t[:, 1].float()
s_e_s = self.e_embed_s(s).to(self.dvc)
o_e_s = self.e_embed_o(o).to(self.dvc)
s_e_o = self.e_embed_s(o).to(self.dvc)
o_e_o = self.e_embed_o(s).to(self.dvc)
r_e_f = self.r_embed_f(r)
r_e_i = self.r_embed_i(r)
t_e_s_s = self._t_embed(s, d, h, 'S')
t_e_o_o = self._t_embed(o, d, h, 'O')
t_e_o_s = self._t_embed(o, d, h, 'S')
t_e_s_o = self._t_embed(s, d, h, 'O')
s_t_s_s = torch.cat((s_e_s, t_e_s_s), dim=1)
o_t_s_o = torch.cat((o_e_s, t_e_o_o), dim=1)
s_t_o_s = torch.cat((s_e_o, t_e_o_s), dim=1)
o_t_o_o = torch.cat((o_e_o, t_e_s_o), dim=1)
return self._score(s_t_s_s, o_t_s_o, r_e_f, s_t_o_s, o_t_o_o, r_e_i)
class TARotatE(AbstractTA, AbstractNorm):
pi = 3.14159265358979323846
def _score(self, s, o, rt):
s_r, s_i = torch.chunk(s, 2, dim=1)
o_r, o_i = torch.chunk(o, 2, dim=1)
rt_p = (self.pi * rt) / self.e_r
rt_r = torch.cos(rt_p)
rt_i = torch.sin(rt_p)
sc_r = s_r * rt_r - s_i * rt_i - o_r
sc_i = s_r * rt_i + s_i * rt_r - o_i
return (-1) * self._norm(torch.cat([sc_r, sc_i], dim=1))
def __init__(self, args, e_cnt, r_cnt, t_cnt):
super().__init__(args, e_cnt, r_cnt, t_cnt)
self.e_r = 1 # NOTE: LSTM outputs are from [-1, 1]
class DERotatE(AbstractDE, AbstractNorm):
pi = 3.14159265358979323846
def _score(self, st, ot, r):
st_r, st_i = torch.chunk(st, 2, dim=1)
ot_r, ot_i = torch.chunk(ot, 2, dim=1)
r_p = (self.pi * r) / self.e_r
r_r = torch.cos(r_p)
r_i = torch.sin(r_p)
sc_r = st_r * r_r - st_i * r_i - ot_r
sc_i = st_r * r_i + st_i * r_r - ot_i
return (-1) * self._norm(self._dropout(torch.cat([sc_r, sc_i], dim=1)))
def __init__(self, args, e_cnt, r_cnt):
super().__init__(args, e_cnt, r_cnt)
self.e_r = 6 / np.sqrt((2 if self.double_r_es else 1) * args.embedding_size)
|
988,894 | 9c151c5f4b644c2c6a75a65f2daf7d2447f15528 | def is_alpha(sym):
return 'a' <= sym <= 'z'
def is_digit(sym):
return '0' <= sym <= '9'
def is_allowed_special(sym):
return sym == ' ' or sym == "\""
|
988,895 | 78081b341b70388eed60c45df86914fd1c275727 | # 闭包与装饰器
def make_power(y):
def fx(arg):
return arg ** y
return fx
pow2 = make_power(2)
print('3**2 = ', pow2(3))
pow3 = make_power(3)
print('3**3 = ', pow3(3))
# 用参数返回响应数学函数的实例
# y = a*x**2 + b*x + c
def make_function(a, b, c):
def fx(x):
return a * x**2 + b * x + c
return fx
fx1 = make_function(4, 5, 6)
print(fx1(2))
# 定义装饰器
def mydeco(fn):
def fx():
print('+++++++++++')
fn()
print('+++++++++++')
return fx
# myfunc = mydeco(myfunc) 等同于装饰器
@mydeco
def myfunc():
print('muyfunc called')
myfunc()
myfunc()
myfunc()
|
988,896 | bcb9d770ce29b33c612a7e46bc6cb792e358608e | import base64
import binascii
from tools import cryptotools
import requests
from requests.auth import HTTPBasicAuth
from secret.credentials import Credentials
def main():
level = 18
level_about = 'In this level you can see a sourcecode of vulnerable website, which is quite handy. We can see \n' \
'some messy code which seems to actually do nothing like logging in user. The interesting part \n' \
'is comment in the beginning of a code that says 640 $maxid should be enough. We need to execute \n' \
'print_credentials function with $_SESSION["admin"] set to 1. But there is no place in code \n' \
'where we can set this to 1. We assume that there already is a session on the server with\n'\
'set $_SESSION["admin"] to 1. So we will try every of 640 possible session ids.'
print("Hi there!\nyou've just run natas level " + str(level) +
" (http://overthewire.org/wargames/natas/). Credentials are available in the secrets/credentials.py")
print("\n" + level_about + "\n\n")
# Real solution starts here :-)
level_credentials = HTTPBasicAuth('natas' + str(level), Credentials.natas_credentials['natas' + str(level)])
for i in range(0, 641):
cookie = {"PHPSESSID": str(i)}
r = requests.get('http://natas18.natas.labs.overthewire.org/index.php', cookies=cookie, auth=level_credentials)
if r.text.__contains__("You are logged in as a regular user"):
print("false: " + cookie.get("PHPSESSID"))
else:
print("SUCCESS PHPSESSID = " + cookie.get("PHPSESSID"))
print(r.text)
break
return True
if __name__ == '__main__':
main()
|
988,897 | 4963319159895d14aeaa9977910b8487a8a00c2e | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from django.contrib.auth import get_user_model
from impact.v1.views.base_history_view import BaseHistoryView
from impact.v1.events import (
UserBecameConfirmedJudgeEvent,
UserBecameConfirmedMentorEvent,
UserBecameDesiredJudgeEvent,
UserBecameDesiredMentorEvent,
UserBecameFinalistEvent,
UserCreatedEvent,
UserJoinedStartupEvent,
UserReceivedNewsletterEvent,
)
User = get_user_model()
class UserHistoryView(BaseHistoryView):
view_name = "user_history"
model = User
event_classes = [UserBecameConfirmedJudgeEvent,
UserBecameConfirmedMentorEvent,
UserBecameDesiredJudgeEvent,
UserBecameDesiredMentorEvent,
UserBecameFinalistEvent,
UserCreatedEvent,
UserJoinedStartupEvent,
UserReceivedNewsletterEvent]
|
988,898 | edf27e88fe09bbae43071d84c97a2fd13e219aa1 | import StringIO
import urllib
from django.http import HttpResponse
import PIL
import PIL.ImageDraw as imdraw
# import PIL.ImageEnhance as imenhance
# import PIL.ImageFont as imfont
def reduce_opacity(im, opacity):
"""Returns an image with reduced opacity."""
assert opacity >= 0 and opacity <= 1
if im.mode != 'RGBA':
im = im.convert('RGBA')
else:
im = im.copy()
alpha = im.split()[3]
alpha = PIL.ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
def resize(im, targetWidth):
targetHeight = int(targetWidth * float(im.size[1] / im.size[0]))
return im.resize((targetWidth, targetHeight), PIL.Image.ANTIALIAS)
def watermark(im, credits, opacity=1):
"""Adds a watermark to an image."""
# First resize mark to make it appropriate size
logoURL = 'https://s3.amazonaws.com/static.thecrimson.com' + \
'/images/feature/thc-logo-large.png'
imfile = StringIO.StringIO(urllib.urlopen(logoURL).read())
mark = PIL.Image.open(imfile)
mark = mark.resize((100, 10), PIL.Image.ANTIALIAS)
if opacity < 1:
mark = reduce_opacity(mark, opacity)
if im.mode != 'RGBA':
im = im.convert('RGBA')
# create a transparent layer the size of the image and draw the
# watermark in that layer.
# font = imfont.truetype(font='/static/fonts/ColabLig-webfont.ttf',
# size=15, index=0, encoding='')
layer = PIL.Image.new('RGBA', im.size, (0, 0, 0, 0))
draw = imdraw.Draw(im)
position = (im.size[0] - mark.size[0] - 10, 10)
layer.paste(mark, position)
# draw.text((x, y),"Sample Text",(r,g,b),font)
draw.text((10, im.size[1] - 20), credits, (255, 255, 255))
# composite the watermark with the layer
watermarkedImage = PIL.Image.composite(layer, im, layer)
response = HttpResponse(content_type='image/png')
watermarkedImage.save(response, 'PNG')
return response
|
988,899 | 8d5b356f09564c78a24994a5dd9ac4004fdc641a | __author__ = 'Administrator'
#coding:utf-8
from parseIni import *
from parseIniFile import *
def test0():
# 一般写法
# f = open("0.ini","rb")
# print getValue(f,"global","port");
# f.close()
# 自动管理close,try catch [with open as 从python2.5引入(需要通过 from __future__ import with_statement 导入后才可以使用),从 2.6 版本开始缺省可用]
with open("0.ini","rb") as f:
print getValue(f,"global","port");
def test1():
# 一般写法
# f = open("1.ini","rb")
# strFileContent = f.read()
# f.close()
# 自动管理close,try catch [with open as 从python2.5引入(需要通过 from __future__ import with_statement 导入后才可以使用),从 2.6 版本开始缺省可用]
with open("1.ini","rb") as f:
strFileContent = f.read()
vardict = {}
var1 = getPlatformMap(strFileContent)
for k,v in var1.items():
var2 = getSectionMap(v)
dict3 = {}
for k2,v2 in var2.items():
var3 = getValueMap(v2)
dict3[k2] = var3
vardict[k] = dict3
print vardict["part2"]["global"]["ip"]
test0();
test1(); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.