index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,400 | ec725d8279ef89a0784234c7cbc2c8809b31493c | def num_ways(n, dp):
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 2
if dp[n] == -1:
take_one_step = num_ways(n - 1, dp)
take_2steps = num_ways(n - 2, dp)
dp[n] = take_one_step + take_2steps
return dp[n]
|
990,401 | 983cf227b5f12c06a27b08905f31cb05498916ae | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# numpy.random随机数
# print np.random.random(3)
# print np.random.rand(3,4)
# print np.random.randn(3,4)
# 创建
s = pd.Series([1,2,6,np.nan,44,4])
dates = pd.date_range('20170406',periods = 6)
df = pd.DataFrame(np.random.randn(6,4),index = dates,columns = ['a','b','c','d'])
df = pd.DataFrame(np.arange(12).reshape(3,4))
df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
# 属性
# print df2.dtypes
# print df2.index
# print df2.columns
# print df2.values
# print df2.describe()
# print df2.T
# axis=1 横向排序,axis=0 纵向排序,都是以index/columns排序
# print df2.sort_index(axis=1,ascending = False)
# print df2.sort_index(axis=0,ascending = False)
# print df2.sort_values(by='E',ascending = False)
# 选择数据
dates = pd.date_range('20170406',periods = 6)
df = pd.DataFrame(np.arange(24).reshape(6,4),index = dates,columns = ['a','b','c','d'])
# 选择列
# print df.a
# print df['a']
# 选择行
# print df[0:3]
# print df['2017-04-06':'2017-04-08']
# 标签选择 select by label
# print df.loc['20170406']
# print df.loc[:,['a','b']]
# print df.loc['20170406',['a','b']]
# print df.loc['2017-04-06',['a','b']]
# 位置选项 select by position
# print df.iloc[5,3]
# print df.iloc[3:5,1:3]
# print df.iloc[[1,3,5],1:3]
# 标签选择和位置选择混合,mixed selection:ix
# print df.ix[:3,['a','c']]
# booean indexing
# print df[df.a>8]
# 设置值
dates = pd.date_range('20170406',periods = 6)
df = pd.DataFrame(np.arange(24).reshape(6,4),index = dates,columns = ['a','b','c','d'])
df.iloc[2,2] = 1111
df.loc['20170406','b'] = 222222
# a列>4的所有行列
df[df.a>4] = 0
# a列>4的所有行,a列
df.b[df.a>4] = 0
df['f'] = np.nan
df['f'] = pd.Series([1,2,3,4,5,6],index=pd.date_range('20170406',periods = 6))
# print df
# 处理丢失数据
dates = pd.date_range('20170406',periods = 6)
df = pd.DataFrame(np.arange(24).reshape(6,4),index = dates,columns = ['a','b','c','d'])
df.iloc[0,1] = np.nan
df.iloc[1,2] = np.nan
# 丢掉行axis=0,how={'any','all'},any有一个nan则丢弃,all全部则丢弃
df = df.dropna(axis=0,how='any')
# 数据填充
df = df.fillna(value = 0)
# 判断是否是nan
# print df.isnull()
# 是否包含nan的值
# print np.any(df.isnull())==True
# print df
# pandas 导入导出
data = pd.read_csv('Student.csv')
# data.to_pickle('Student.pickle')
# print type(data)
data = pd.read_pickle('Student.pickle')
# print data
# 合并 concatenating
df1 = pd.DataFrame(np.ones((3,4))*0,columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1,columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*2,columns=['a','b','c','d'])
# 上下合并axis=0纵向合并,axis=1横向合并,ignore_index = True重新索引
res = pd.concat([df1,df2,df3],axis=0,ignore_index = True)
# join,['inner','outer']
df1 = pd.DataFrame(np.ones((3,4))*0,columns=['a','b','c','d'],index=[1,2,3])
df2 = pd.DataFrame(np.ones((3,4))*1,columns=['b','c','d','e'],index=[2,3,4])
res = pd.concat([df1,df2],join='inner',ignore_index=True)
# 左右合并,join_axes,用df1的索引,缺省用nan,多余丢弃
res = pd.concat([df1,df2],axis=1,join_axes=[df1.index])
# 缺省用nan
res = pd.concat([df1,df2],axis=1)
# print res
# append
df1 = pd.DataFrame(np.ones((3,4))*0,columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1,columns=['b','c','d','e'],index=[2,3,4])
df2 = pd.DataFrame(np.ones((3,4))*1,columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*1,columns=['a','b','c','d'])
# 往下追加
res = df1.append([df2,df3],ignore_index=True)
# 添加一行
s1 = pd.Series([1,2,3,4],index=['a','b','c','d'])
res = df1.append(s1,ignore_index=True)
# print res
# merge合并
left = pd.DataFrame({'key':['K0','K1','K2','K3'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']})
right = pd.DataFrame({'key':['K0','K1','K2','K3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']})
# 基于key这个列的合并
res = pd.merge(left,right,on='key')
left = pd.DataFrame({'key1':['K0','K0','K1','K2'],
'key2':['K0','K1','K0','K1'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']})
right = pd.DataFrame({'key1':['K0','K1','K1','K2'],
'key2':['K0','K0','K0','K0'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']})
# 两个列的合并 how=[left,right,outer,inner],sql中两个表的链接
res = pd.merge(left,right,on=['key1','key2'],how='right')
# indicator显示merge是如何合并的
df1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']})
df2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]})
# res = pd.merge(df1,df2,on='col1',how='outer',indicator=True)
res = pd.merge(df1,df2,on='col1',how='outer',indicator='indicator_colunm')
# merged by index
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
res = pd.merge(left,right,left_index=True,right_index=True,how='outer')
res = pd.merge(left,right,left_index=True,right_index=True,how='inner')
# handle 给列重命名
boys = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'age': [1, 2, 3]})
girls = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'age': [4, 5, 6]})
res = pd.merge(boys,girls,on='k',suffixes=['_boy','_girl'],how='outer')
# print girls
# print res
# 数据可视化 plot data
# Series
data = pd.Series(np.random.randn(1000),index=np.arange(1000))
data = data.cumsum()
# data.plot()
# plt.show()
# DataFrame
data = pd.DataFrame(np.random.randn(1000,4),
index=np.arange(1000),
columns=list('ABCD'))
data = data.cumsum()
# print data.head()
# plot的方法 bar hist box kde area scatter hexbin pie
# data.plot()
ax = data.plot.scatter(x='A', y='B', color='DarkBlue', label="Class 1")
data.plot.scatter(x='A', y='C', color='LightGreen', label='Class 2', ax=ax)
plt.show() |
990,402 | f490716f71987696c511c768ee980082d9bc1501 | from itsdangerous import URLSafeTimedSerializer as usts
import base64
# generate verify key
class Token():
def __init__(self, security_key):
self.security_key = security_key
self.salt = base64.encodestring(security_key)
def generate_validate_token(self, username):
serializer = usts(self.security_key)
return serializer.dumps(username, self.salt)
def confirm_validate_token(self, token, expiration=3600):
serializer = usts(self.security_key)
return serializer.loads(token, salt=self.salt, max_age=expiration)
|
990,403 | ca999f9ee62bf239db435101b3ef38c7a1db7fd9 | from typing import Tuple
import torch
import torch.nn
import torch.nn.functional as F
from torch import Tensor
def square(x: torch.Tensor) -> torch.Tensor:
return torch.pow(x, torch.tensor([2.0], device=x.device))
class LPBatchNorm1d(torch.nn.BatchNorm1d):
"""
"""
def __init__(self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True):
super(LPBatchNorm1d, self).__init__(
num_features=num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
)
def forward(self, inputs: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:
input_mean, input_var = inputs
m = F.batch_norm(
input_mean,
running_mean=self.running_mean,
running_var=self.running_var,
weight=self.weight,
bias=self.bias,
training=False,
momentum=self.momentum,
eps=self.eps,
)
running_var = self.running_var
weight = self.weight
# check for channel dimension
if input_var.dim() == 3:
running_var = running_var.unsqueeze(dim=1)
weight = weight.unsqueeze(dim=1)
invstd_squared = 1.0 / (running_var + self.eps)
v = input_var * invstd_squared * square(weight)
return m, v |
990,404 | d9d963f52694b36d03074d36af899c0dec09bf8c | club_info = {'club_url': 'https://www.futbin.com///18/leagues/EFL%20League%20Two?page=1&club=346', 'club_logo': 'https://cdn.futbin.com/content/fifa18/img/clubs/346.png', 'club_name': 'Yeovil Town'}
players = {}
players['Bailey'] = {'player_url': 'https://www.futbin.com//18/player/8047/James Bailey', 'player_name': 'James Bailey', 'player_rating': '66', 'player_shortname': 'Bailey', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/184352.png?v=2'}
players['Dickson'] = {'player_url': 'https://www.futbin.com//18/player/9020/Ryan Dickson', 'player_name': 'Ryan Dickson', 'player_rating': '65', 'player_shortname': 'Dickson', 'player_position': 'LB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/184750.png?v=2'}
players['Smith'] = {'player_url': 'https://www.futbin.com//18/player/9964/Nathan Smith', 'player_name': 'Nathan Smith', 'player_rating': '64', 'player_shortname': 'Smith', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/82.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/187206.png?v=2'}
players['Zoko'] = {'player_url': 'https://www.futbin.com//18/player/9841/François Zoko', 'player_name': 'François Zoko', 'player_rating': '64', 'player_shortname': 'Zoko', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/108.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/112362.png?v=2'}
players['Nelson'] = {'player_url': 'https://www.futbin.com//18/player/10346/Sid Nelson', 'player_name': 'Sid Nelson', 'player_rating': '64', 'player_shortname': 'Nelson', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/221525.png?v=2'}
players['Seager'] = {'player_url': 'https://www.futbin.com//18/player/17458/Ryan Seager', 'player_name': 'Ryan Seager', 'player_rating': '63', 'player_shortname': 'Seager', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/213900.png?v=2'}
players['Khan'] = {'player_url': 'https://www.futbin.com//18/player/11163/Otis Khan', 'player_name': 'Otis Khan', 'player_rating': '63', 'player_shortname': 'Khan', 'player_position': 'LW', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/222463.png?v=2'}
players['Fisher'] = {'player_url': 'https://www.futbin.com//18/player/17493/Alex Fisher', 'player_name': 'Alex Fisher', 'player_rating': '61', 'player_shortname': 'Fisher', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/223404.png?v=2'}
players['Smith'] = {'player_url': 'https://www.futbin.com//18/player/12209/Connor Smith', 'player_name': 'Connor Smith', 'player_rating': '61', 'player_shortname': 'Smith', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/25.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/169793.png?v=2'}
players['James'] = {'player_url': 'https://www.futbin.com//18/player/12400/Tom James', 'player_name': 'Tom James', 'player_rating': '61', 'player_shortname': 'James', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/50.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/222583.png?v=2'}
players['Krysiak'] = {'player_url': 'https://www.futbin.com//18/player/12739/Artur Krysiak', 'player_name': 'Artur Krysiak', 'player_rating': '60', 'player_shortname': 'Krysiak', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/37.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/183140.png?v=2'}
players['Alfei'] = {'player_url': 'https://www.futbin.com//18/player/12768/Daniel Alfei', 'player_name': 'Daniel Alfei', 'player_rating': '60', 'player_shortname': 'Alfei', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/50.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/200386.png?v=2'}
players['Nelson'] = {'player_url': 'https://www.futbin.com//18/player/18526/Stuart Nelson', 'player_name': 'Stuart Nelson', 'player_rating': '60', 'player_shortname': 'Nelson', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/153100.png?v=2'}
players['Olomola'] = {'player_url': 'https://www.futbin.com//18/player/13073/Olufela Olomola', 'player_name': 'Olufela Olomola', 'player_rating': '60', 'player_shortname': 'Olomola', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/233289.png?v=2'}
players['Surridge'] = {'player_url': 'https://www.futbin.com//18/player/13491/Sam Surridge', 'player_name': 'Sam Surridge', 'player_rating': '59', 'player_shortname': 'Surridge', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/234249.png?v=2'}
players['Sowunmi'] = {'player_url': 'https://www.futbin.com//18/player/13373/Omar Sowunmi', 'player_name': 'Omar Sowunmi', 'player_rating': '59', 'player_shortname': 'Sowunmi', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/223069.png?v=2'}
players['Gray'] = {'player_url': 'https://www.futbin.com//18/player/13685/Jake Gray', 'player_name': 'Jake Gray', 'player_rating': '58', 'player_shortname': 'Gray', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/212492.png?v=2'}
players['Barnes'] = {'player_url': 'https://www.futbin.com//18/player/16584/Marcus Barnes', 'player_name': 'Marcus Barnes', 'player_rating': '58', 'player_shortname': 'Barnes', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/221202.png?v=2'}
players['Donnellan'] = {'player_url': 'https://www.futbin.com//18/player/17572/Shaun Donnellan', 'player_name': 'Shaun Donnellan', 'player_rating': '57', 'player_shortname': 'Donnellan', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/25.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/231407.png?v=2'}
players['Worthington'] = {'player_url': 'https://www.futbin.com//18/player/14179/Matt Worthington', 'player_name': 'Matt Worthington', 'player_rating': '57', 'player_shortname': 'Worthington', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/237379.png?v=2'}
players['Maddison'] = {'player_url': 'https://www.futbin.com//18/player/14342/Jonathan Maddison', 'player_name': 'Jonathan Maddison', 'player_rating': '56', 'player_shortname': 'Maddison', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/226049.png?v=2'}
players['Mugabi'] = {'player_url': 'https://www.futbin.com//18/player/14291/Bevis Mugabi', 'player_name': 'Bevis Mugabi', 'player_rating': '56', 'player_shortname': 'Mugabi', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/209285.png?v=2'}
players['Davies'] = {'player_url': 'https://www.futbin.com//18/player/14547/Keston Davies', 'player_name': 'Keston Davies', 'player_rating': '55', 'player_shortname': 'Davies', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/50.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/225127.png?v=2'}
players['Santos'] = {'player_url': 'https://www.futbin.com//18/player/14738/Santos', 'player_name': 'Santos', 'player_rating': '54', 'player_shortname': 'Santos', 'player_position': 'RM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/54.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/212295.png?v=2'}
players['Browne'] = {'player_url': 'https://www.futbin.com//18/player/15204/Rhys Browne', 'player_name': 'Rhys Browne', 'player_rating': '51', 'player_shortname': 'Browne', 'player_position': 'LW', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/63.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/213156.png?v=2'}
|
990,405 | 23e788a1aa2bac5f57ddf521ea207aebbb35d524 | #!/usr/bin/env python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Copyright (C) 2017 ScyllaDB
import bisect
import collections
import re
import sys
import subprocess
from enum import Enum
from typing import Any
# special binary path/module indicating that the address is from the kernel
KERNEL_MODULE = '<kernel>'
class Addr2Line:
# Matcher for a line that appears at the end a single decoded
# address, which we force by adding a dummy 0x0 address. The
# pattern varies between binutils addr2line and llvm-addr2line
# so we match both.
dummy_pattern = re.compile(
r"(.*0x0000000000000000: \?\? \?\?:0\n)" # addr2line pattern
r"|"
r"(.*0x0: \?\? at \?\?:0\n)" # llvm-addr2line pattern
)
def __init__(self, binary, concise=False, cmd_path="addr2line"):
self._binary = binary
# Print warning if binary has no debug info according to `file`.
# Note: no message is printed for system errors as they will be
# printed also by addr2line later on.
output = subprocess.check_output(["file", self._binary])
s = output.decode("utf-8")
if s.find('ELF') >= 0 and s.find('debug_info', len(self._binary)) < 0:
print('{}'.format(s))
options = f"-{'C' if not concise else ''}fpia"
self._input = subprocess.Popen([cmd_path, options, "-e", self._binary], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
if concise:
self._output = subprocess.Popen(["c++filt", "-p"], stdin=self._input.stdout, stdout=subprocess.PIPE, universal_newlines=True)
else:
self._output = self._input
# If a library doesn't exist in a particular path, addr2line
# will just exit. We need to be robust against that. We
# can't just wait on self._addr2line since there is no
# guarantee on what timeout is sufficient.
self._input.stdin.write('\n')
self._input.stdin.flush()
res = self._output.stdout.readline()
self._missing = res == ''
def _read_resolved_address(self):
res = self._output.stdout.readline()
# remove the address
res = res.split(': ', 1)[1]
line = ''
while Addr2Line.dummy_pattern.fullmatch(line) is None:
res += line
line = self._output.stdout.readline()
return res
def __call__(self, address):
if self._missing:
return " ".join([self._binary, address, '\n'])
# We print a dummy 0x0 address after the address we are interested in
# which we can look for in _read_address
self._input.stdin.write(address + '\n0x0\n')
self._input.stdin.flush()
return self._read_resolved_address()
class KernelResolver:
"""A resolver for kernel addresses which tries to read from /proc/kallsyms."""
LAST_SYMBOL_MAX_SIZE = 1024
def __init__(self, kallsyms='/proc/kallsyms'):
syms : list[tuple[int, str]] = []
ksym_re = re.compile(r'(?P<addr>[0-9a-f]+) (?P<type>.+) (?P<name>\S+)')
warnings_left = 10
self.error = None
try:
f = open(kallsyms, 'r')
except OSError as e:
self.error = f'Cannot open {kallsyms}: {e}'
print(self.error)
return
try:
for line in f:
m = ksym_re.match(line)
if not m:
if warnings_left > 0: # don't spam too much
print(f'WARNING: {kallsyms} regex match failure: {line.strip()}', file=sys.stdout)
warnings_left -= 1
else:
syms.append((int(m.group('addr'), 16), m.group('name')))
finally:
f.close()
if not syms:
# make empty kallsyms (?) an error so we can assum len >= 1 below
self.error = 'kallsyms was empty'
print(self.error)
return
syms.sort()
if syms[-1][0] == 0:
# zero values for all symbols means that kptr_restrict blocked you
# from seeing the kernel symbol addresses
print('kallsyms is restricted, set /proc/sys/kernel/kptr_restrict to 0 to decode')
self.error = 'kallsyms is restricted'
return
# split because bisect can't take a key func before 3.10
self.sym_addrs : tuple[int]
self.sym_names : tuple[str]
self.sym_addrs, self.sym_names = zip(*syms) # type: ignore
def __call__(self, addrstr):
if self.error:
return addrstr + '\n'
sa = self.sym_addrs
sn = self.sym_names
slen = len(sa)
address = int(addrstr, 16)
idx = bisect.bisect_right(sa, address) - 1
assert -1 <= idx < slen
if idx == -1:
return f'{addrstr} ({sa[0] - address} bytes before first symbol)\n'
if idx == slen - 1:
# We can easily detect symbol addresses which are too small: they fall before
# the first symbol in kallsyms, but for too large it is harder: we can't really
# distinguish between an address that is in the *very last* function in the symbol map
# and one which is beyond that, since kallsyms doesn't include symbol size. Instead
# we use a bit of a quick and dirty heuristic: if the symbol is *far enough* beyond
# the last symbol we assume it is not valid. Most likely, the overwhelming majority
# of cases are invalid (e.g., due to KASLR) as the final symbol in the map is usually
# something obscure.
lastsym = sa[-1]
if address - lastsym > self.LAST_SYMBOL_MAX_SIZE:
return f'{addrstr} ({address - lastsym} bytes after last symbol)\n'
saddr = sa[idx]
assert saddr <= address
return f'{sn[idx]}+0x{address - saddr:x}\n'
class BacktraceResolver(object):
class BacktraceParser(object):
class Type(Enum):
ADDRESS = 1
SEPARATOR = 2
def __init__(self):
addr = "0x[0-9a-f]+"
path = "\S+"
token = f"(?:{path}\+)?{addr}"
full_addr_match = f"(?:(?P<path>{path})\s*\+\s*)?(?P<addr>{addr})"
ignore_addr_match = f"(?:(?P<path>{path})\s*\+\s*)?(?:{addr})"
self.oneline_re = re.compile(f"^((?:.*(?:(?:at|backtrace):?|:))?(?:\s+))?({token}(?:\s+{token})*)(?:\).*|\s*)$", flags=re.IGNORECASE)
self.address_re = re.compile(full_addr_match, flags=re.IGNORECASE)
self.syslog_re = re.compile(f"^(?:#\d+\s+)(?P<addr>{addr})(?:.*\s+)\({ignore_addr_match}\)\s*$", flags=re.IGNORECASE)
self.kernel_re = re.compile(fr'^.*kernel callstack: (?P<addrs>(?:{addr}\s*)+)$')
self.asan_re = re.compile(f"^(?:.*\s+)\({full_addr_match}\)(\s+\(BuildId: [0-9a-fA-F]+\))?$", flags=re.IGNORECASE)
self.asan_ignore_re = re.compile(f"^=.*$", flags=re.IGNORECASE)
self.generic_re = re.compile(f"^(?:.*\s+){full_addr_match}\s*$", flags=re.IGNORECASE)
self.separator_re = re.compile('^\W*-+\W*$')
def split_addresses(self, addrstring: str, default_path=None):
addresses : list[dict[str, Any]] = []
for obj in addrstring.split():
m = re.match(self.address_re, obj)
assert m, f'addr did not match address regex: {obj}'
#print(f" >>> '{obj}': address {m.groups()}")
addresses.append({'path': m.group(1) or default_path, 'addr': m.group(2)})
return addresses
def __call__(self, line):
def get_prefix(s):
if s is not None:
s = s.strip()
return s or None
# order here is important: the kernel callstack regex
# needs to come first since it is more specific and would
# otherwise be matched by the online regex which comes next
m = self.kernel_re.match(line)
if m:
return {
'type': self.Type.ADDRESS,
'prefix': 'kernel callstack: ',
'addresses' : self.split_addresses(m.group('addrs'), KERNEL_MODULE)
}
m = re.match(self.oneline_re, line)
if m:
#print(f">>> '{line}': oneline {m.groups()}")
return {
'type': self.Type.ADDRESS,
'prefix': get_prefix(m.group(1)),
'addresses': self.split_addresses(m.group(2))
}
m = re.match(self.syslog_re, line)
if m:
#print(f">>> '{line}': syslog {m.groups()}")
ret = {'type': self.Type.ADDRESS}
ret['prefix'] = None
ret['addresses'] = [{'path': m.group('path'), 'addr': m.group('addr')}]
return ret
m = re.match(self.asan_ignore_re, line)
if m:
#print(f">>> '{line}': asan ignore")
return None
m = re.match(self.asan_re, line)
if m:
#print(f">>> '{line}': asan {m.groups()}")
ret = {'type': self.Type.ADDRESS}
ret['prefix'] = None
ret['addresses'] = [{'path': m.group('path'), 'addr': m.group('addr')}]
return ret
m = re.match(self.generic_re, line)
if m:
#print(f">>> '{line}': generic {m.groups()}")
ret = {'type': self.Type.ADDRESS}
ret['prefix'] = None
ret['addresses'] = [{'path': m.group('path'), 'addr': m.group('addr')}]
return ret
match = re.match(self.separator_re, line)
if match:
return {'type': self.Type.SEPARATOR}
#print(f">>> '{line}': None")
return None
def __init__(self, executable, kallsyms='/proc/kallsyms', before_lines=1, context_re='', verbose=False, concise=False, cmd_path='addr2line'):
self._executable = executable
self._kallsyms = kallsyms
self._current_backtrace = []
self._prefix = None
self._before_lines = before_lines
self._before_lines_queue = collections.deque(maxlen=before_lines)
self._i = 0
self._known_backtraces = {}
if context_re is not None:
self._context_re = re.compile(context_re)
else:
self._context_re = None
self._verbose = verbose
self._concise = concise
self._cmd_path = cmd_path
self._known_modules = {}
self._get_resolver_for_module(self._executable) # fail fast if there is something wrong with the exe resolver
self.parser = self.BacktraceParser()
def _get_resolver_for_module(self, module):
if not module in self._known_modules:
if module == KERNEL_MODULE:
resolver = KernelResolver(kallsyms=self._kallsyms)
else:
resolver = Addr2Line(module, self._concise, self._cmd_path)
self._known_modules[module] = resolver
return self._known_modules[module]
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self._print_current_backtrace()
def resolve_address(self, address, module=None, verbose=None):
if module is None:
module = self._executable
if verbose is None:
verbose = self._verbose
resolved_address = self._get_resolver_for_module(module)(address)
if verbose:
resolved_address = '{{{}}} {}: {}'.format(module, address, resolved_address)
return resolved_address
def _print_resolved_address(self, module, address):
sys.stdout.write(self.resolve_address(address, module))
def _backtrace_context_matches(self):
if self._context_re is None:
return True
if any(map(lambda x: self._context_re.search(x) is not None, self._before_lines_queue)):
return True
if (not self._prefix is None) and self._context_re.search(self._prefix):
return True
return False
def _print_current_backtrace(self):
if len(self._current_backtrace) == 0:
return
if not self._backtrace_context_matches():
self._current_backtrace = []
return
for line in self._before_lines_queue:
sys.stdout.write(line)
if not self._prefix is None:
print(self._prefix)
self._prefix = None
backtrace = "".join(map(str, self._current_backtrace))
if backtrace in self._known_backtraces:
print("[Backtrace #{}] Already seen, not resolving again.".format(self._known_backtraces[backtrace]))
print("") # To separate traces with an empty line
self._current_backtrace = []
return
self._known_backtraces[backtrace] = self._i
print("[Backtrace #{}]".format(self._i))
for module, addr in self._current_backtrace:
self._print_resolved_address(module, addr)
print("") # To separate traces with an empty line
self._current_backtrace = []
self._i += 1
def __call__(self, line):
res = self.parser(line)
if not res:
self._print_current_backtrace()
if self._before_lines > 0:
self._before_lines_queue.append(line)
elif self._before_lines < 0:
sys.stdout.write(line) # line already has a trailing newline
else:
pass # when == 0 no non-backtrace lines are printed
elif res['type'] == self.BacktraceParser.Type.SEPARATOR:
pass
elif res['type'] == self.BacktraceParser.Type.ADDRESS:
addresses = res['addresses']
if len(addresses) > 1:
self._print_current_backtrace()
if len(self._current_backtrace) == 0:
self._prefix = res['prefix']
for r in addresses:
if r['path']:
self._current_backtrace.append((r['path'], r['addr']))
else:
self._current_backtrace.append((self._executable, r['addr']))
if len(addresses) > 1:
self._print_current_backtrace()
else:
print(f"Unknown '{line}': {res}")
raise RuntimeError("Unknown result type {res}")
|
990,406 | 40e4e112c31e2fc69bdde9e0fbb3ce2daeb6778e |
howmany = 0
for num in range(264793, 803935 + 1):
n = str(num)
if any([n[0]==n[1] and n[1]!=n[2], n[1]==n[2] and n[2]!=n[3] and n[0]!=n[1], n[2]==n[3] and n[3]!=n[4] and n[1]!=n[2], n[3]==n[4] and n[4]!=n[5] and n[2]!=n[3], n[4]==n[5] and n[3]!=n[4]]):
if n[0]<=n[1] and n[1]<=n[2] and n[2]<=n[3] and n[3]<=n[4] and n[4]<=n[5]:
howmany += 1
print(howmany)
|
990,407 | 3cfc3df40cdb0322728fcbaf7db450eba4fadf80 | import json
import requests
from urllib.request import urlopen
from Forecast.Forecast import Forecast
from Actual.Actual import Actual
class Buienradar:
def __init__(self):
self.get_json_data()
def get_json_data(self):
url = 'https://api.buienradar.nl/data/public/2.0/jsonfeed'
response = urlopen(url)
data = response.read()
encoding = response.info().get_content_charset()
html = data.decode(encoding)
json_data = json.loads(html)
self.set_json_data(json_data)
def set_json_data(self, json):
forecast_data = json['forecast']
actual_data = json['actual']
self.Forecast = Forecast(forecast_data)
self.Actual = Actual(actual_data)
buienradar = Buienradar()
print(buienradar.Forecast.Fivedayforecast.Day_one.day)
print(buienradar.Forecast.Fivedayforecast.Day_three.maxtemperature)
print(buienradar.Forecast.Shortterm.forecast)
print(buienradar.Forecast.Longterm.forecast)
print(buienradar.Actual.sunrise)
print(buienradar.Actual.sunset)
print(buienradar.Actual.station_by_name("arnhem"))
print(buienradar.Actual.station_by_id(6330)) |
990,408 | 95b44ba00152ae08bdfae7abf18d92885f8489bf | ############################################################################################################
# TV Data source: nl_TVGids
# Notes:
# Provides Dutch tv data.
# Changelog:
# 15-11-06 Fix: get description regex
# 07-03-08 Updated & Fix for myTV v1.18
############################################################################################################
from mytvLib import *
import xbmcgui, re, time
from os import path
# Translate genre to english
GENRE_TRANSLATIONS = {
"Serie" : "Series",
"Amusement":"Entertainment",
"Animatie":"Animation",
"Documentaire":"Documentary",
"Erotiek":"Adult",
"Informatief":"Interests",
"Jeugd":"Children",
"Kunst/Cultuur":"Music and Arts",
"Misdaad":"Drama",
"Muziek":"Music",
"Natuur":"Documentary",
"Nieuws/Actualiteiten":"Interests",
"Theater":"Drama",
"Wetenschap":"Special"
}
# Add to this list genres (in english) you wish datasource to additionally lookup.
# Each genre added will potentially slow the datasource.
GENRES_TO_FIND = ["Film","Sport","Serie"]
class ListingData:
def __init__(self, cache):
debug("ListingData.__init__")
self.cache = cache
self.name = os.path.splitext(os.path.basename( __file__))[0] # get filename without path & ext
self.BASE_URL = "http://www.tvgids.nl"
self.channelListURL = self.BASE_URL + "/index.php"
self.channelURL = self.BASE_URL + "/zoeken/?trefwoord="
self.CHANNELS_FILENAME = os.path.join(cache,"Channels_"+ self.name + ".dat")
self.GENRE_URL = self.BASE_URL + "?trefwoord=&station=$STATION&dagdeel=0.0&genre=$GENRE"
self.HEADERS = {'Accept':'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.9) Gecko/20071025'}
def getName(self):
return self.name
# download or load if exists, a list of all available channels.
# return: list [chID, chName]
def getChannels(self):
debug("ListingData.getChannels()")
debug("getChannels()")
regex = 'option value="(\d+)" >(.+?)<'
return getChannelsLIB(self.channelListURL, self.CHANNELS_FILENAME, regex, \
startStr="Hoofdzenders", headers=self.HEADERS)
# download channel data, using either dayDelta or dataDate.
# filename = filename to save downloaded data file as.
# chID = unique channel ID, used to reference channel in URL.
# chName = display name of channel
# dayDelta = day offset from today. ie 0 is today, 1 is tomorrow ...
# fileDate = use to calc programme start time in secs since epoch.
# return Channel class or -1 if http fetch error, or None for other
def getChannel(self, filename, chID, chName, dayDelta, fileDate):
debug("ListingData.getChannel() dayDelta: %s chID=%s fileDate=%s" % (dayDelta,chID,fileDate))
progList = []
lastStartTime = 0
# download data file if file doesnt exists
dataFilename = os.path.join(self.cache, "%s_%s.html" % (chID, fileDate))
url = self.channelURL + "&station=" + chID+"&dagdeel="+str(dayDelta)+'.0'
if not fileExist(dataFilename):
doc = fetchCookieURL(url, dataFilename, headers=self.HEADERS)
else:
doc = readFile(dataFilename)
# check for timout, exception error, or empty page
if not doc:
return doc
doc = doc.decode('latin-1')
# fetch listings filtered by genre, each one added here will slow down the whole process
# extract prog link as key
regex = '>\d+:\d+ - \d+:\d+<.*?href=\"(.*?)\"'
genreURL = self.channelURL + "&station=&dagdeel="+str(dayDelta)+'.0'
progsGenreDict = {}
for genre in GENRES_TO_FIND:
genreFilename = os.path.join(self.cache, "%s_%s_%s.dat" % (self.name,genre,fileDate)) # all channels
progsList = self._findChannelGenre(genreURL + "&genre="+genre, regex, genreFilename)
# make prog the key and save genre against it
for prog in progsList:
progsGenreDict[prog] = self.translateGenre(genre)
# print "progsGenreDict=", progsGenreDict
# get days listing page (no genre)
regex = ">(\d+:\d+) - (\d+:\d+)</th>.*?href=\"(.*?)\">(.*?)<"
matches = findAllRegEx(doc, regex)
for match in matches:
try:
startTime = match[0]
endTime = match[1]
link = match[2]
title = cleanHTML(decodeEntities(match[3]))
# prog stored with a genre ?
try:
progGenre = progsGenreDict[link]
except:
progGenre = ""
descLink = decodeEntities(link)
if descLink:
descLink = self.BASE_URL + descLink
# convert starttime to secs since epoch
startSecsEpoch = startTimeToSecs(lastStartTime, startTime, fileDate)
lastStartTime = startSecsEpoch
endSecsEpoch = startTimeToSecs(lastStartTime, endTime, fileDate)
# print title, startTime, progGenre
progList.append( {
TVData.PROG_STARTTIME : float(startSecsEpoch),
TVData.PROG_ENDTIME : float(endSecsEpoch),
TVData.PROG_TITLE : title,
TVData.PROG_GENRE : progGenre,
TVData.PROG_DESCLINK : descLink
} )
except:
print "bad programme scrape", match
# return progList
return progList
# Translate language genre into english.
def translateGenre(self, genre):
try:
return GENRE_TRANSLATIONS[genre]
except:
return genre
# load page based on a genre, store prog
def _findChannelGenre(self, url, regex, genreFilename):
debug("> _findChannelGenre()")
progsList = []
doc = ""
if not fileExist(genreFilename):
doc = fetchCookieURL(url, genreFilename, headers=self.HEADERS)
else:
doc = readFile(genreFilename)
if doc:
progsList = findAllRegEx(doc, regex)
debug("< _findChannelGenre() found=" + str(len(progsList)))
return progsList
#
# Download url and regex parse it to extract description.
#
def getLink(self, link, title):
debug("ListingData.getLink()")
return getDescriptionLink(link, "<span>"+title+"</span>(?:.+?)<p>(.+?)</", headers=self.HEADERS)
|
990,409 | 02da39613270126c756cbe636b222bbba56efe0c | # -*- coding: utf-8 -*-
import requests
import json
userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
header = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Length': '39',
'Content-Type': 'application/json;charset=UTF-8',
'Host': 'pedro.7yue.pro',
'Pragma': 'no-cache',
'Origin': 'http://face.cms.7yue.pro',
"Referer": "http://face.cms.7yue.pro/",
'User-Agent': userAgent,
}
def login(account, password):
postUrl = "http://pedro.7yue.pro/cms/user/login"
postData = {
"username": account,
"password": password,
}
responseRes = requests.post(postUrl, data = json.dumps(postData), headers = header)
print(f"statusCode = {responseRes.status_code}")
print(f"text = {responseRes.text}")
if __name__ == "__main__":
login("root", "123456")
|
990,410 | 68f42e9a437f8e6b21a93143dca6191b7f13fb0c | import subprocess
import time
import os
import pandas as pd
import numpy as np
import datetime
import pyautogui
pyautogui.PAUSE = 0.3
pyautogui.FAILSAFE = True
import configparser
config = configparser.ConfigParser()
config.read('config.cfg')
import argparse
def main():
subprocess.Popen("C:\Program Files\Fortinet\FortiClient\FortiClient.exe")
time.sleep(5)
# move to center
pyautogui.moveTo(pyautogui.size().width / 2,
pyautogui.size().height / 2)
pyautogui.moveRel(0,170)
pyautogui.click()
pyautogui.typewrite(config['CREDENTIALS']['VPN_PASSWORD'])
pyautogui.moveRel(0,60)
pyautogui.click()
vpn_connected = pyautogui.locateOnScreen('locations/vpn_connected.png')
retry = 0
while not vpn_connected:
time.sleep(30)
vpn_connected = pyautogui.locateOnScreen('locations/vpn_connected.png')
retry += 1
if retry == 6:
raise ValueError("wait for connected more than 3 minutes, abort the job")
print('SUCCESS : Connected to VPN')
if __name__ == "__main__":
main() |
990,411 | 4c645efc5f72ba643ee4b32f21160adb312607cf | from collections import defaultdict
import torch
import numpy as np
from utils.utils import time_it
from algorithms.agents.base_agent import AgentTrain
class PPO(AgentTrain):
"""Proximal Policy Optimization implementation
This class contains core PPO methods:
several training epochs on one rollout,
splitting rollout into mini-batches,
policy and (optional) value clipping.
In addition this class may use rollback policy loss
and can recompute advantage
"""
def __init__(
self,
*args,
ppo_n_epoch,
ppo_n_mini_batches,
ppo_epsilon=0.2,
use_ppo_value_loss=False,
rollback_alpha=0.05,
recompute_advantage=False,
**kwargs
):
"""
PPO algorithm class
:param args: PolicyGradient class args
:param ppo_n_epoch: int, number of training epoch on one rollout
:param ppo_n_mini_batches: int, number of mini-batches into which
the training data is divided during one epoch
:param ppo_epsilon: float, policy (and optionally value) clipping parameter
:param use_ppo_value_loss: bool, switches value loss function
from PPO-like clipped (True) or simple MSE (False).
Currently only MSE loss is supported.
:param rollback_alpha: float, policy-rollback loss parameter.
Rollback is turned on if rollback_alpha > 0
:param recompute_advantage: bool, if True the returns and advantage
will be recomputed after each nn update
:param kwargs:
"""
super().__init__(*args, **kwargs)
self.ppo_epsilon = ppo_epsilon
self.use_ppo_value_loss = use_ppo_value_loss
self.recompute_advantage = recompute_advantage
self.rollback_alpha = rollback_alpha
self.ppo_n_epoch = ppo_n_epoch
self.ppo_n_mini_batches = ppo_n_mini_batches
def _policy_loss(self, policy_old, policy, actions, advantage):
log_pi_for_actions_old = policy_old
log_pi_for_actions = self.policy_distribution.log_prob(policy, actions)
log_prob_ratio = log_pi_for_actions - log_pi_for_actions_old
log_prob_ratio.clamp_max_(20)
prob_ratio = log_prob_ratio.exp()
if self.rollback_alpha > 0:
policy_loss = self._rollback_loss(prob_ratio, advantage)
else:
policy_loss = self._clipped_loss(prob_ratio, advantage)
policy_loss = policy_loss.mean()
return policy_loss.mean()
def _clipped_loss(self, prob_ratio, advantage):
prob_ratio_clamp = torch.clamp(
prob_ratio, 1.0 - self.ppo_epsilon, 1.0 + self.ppo_epsilon
)
surrogate_1 = prob_ratio * advantage
surrogate_2 = prob_ratio_clamp * advantage
policy_loss = torch.min(surrogate_1, surrogate_2)
return policy_loss
def _rollback_loss(self, prob_ratio, advantage):
eps = self.ppo_epsilon
alpha = self.rollback_alpha
pos_adv_rollback = torch.where(
prob_ratio <= 1.0 + eps,
prob_ratio,
-alpha * prob_ratio + (1.0 + alpha) * (1.0 + eps)
)
neg_adv_rollback = torch.where(
prob_ratio >= 1.0 - eps,
prob_ratio,
-alpha * prob_ratio + (1.0 + alpha) * (1.0 - eps)
)
policy_loss = advantage * torch.where(
advantage >= 0,
pos_adv_rollback,
neg_adv_rollback
)
return policy_loss
def _clipped_value_loss(self, values_old, values, returns):
# clipped value loss, PPO-style
clipped_value = values_old + torch.clamp(
(values - values_old), -self.ppo_epsilon, self.ppo_epsilon
)
surrogate_1 = (values - returns) ** 2
surrogate_2 = (clipped_value - returns) ** 2
value_loss = 0.5 * torch.max(surrogate_1, surrogate_2)
return value_loss.mean()
@staticmethod
def _mse_value_loss(values, returns):
# simple MSE loss, works better than clipped PPO-style
value_loss = 0.5 * ((values - returns) ** 2)
return value_loss.mean()
def _value_loss(self, values_old, values, returns):
if self.use_ppo_value_loss:
value_loss = self._clipped_value_loss(values_old, values, returns)
else:
value_loss = self._mse_value_loss(values, returns)
return value_loss
def _calc_losses(
self,
policy_old,
policy, values, actions,
returns, advantage
):
# value_loss = self._value_loss(values_old, values, returns)
value_loss = self._mse_value_loss(values, returns)
policy_loss = self._policy_loss(policy_old, policy, actions, advantage)
entropy = self.policy_distribution.entropy(policy, actions).mean()
loss = value_loss - policy_loss - self.entropy * entropy
return value_loss, policy_loss, entropy, loss
@time_it
def _ppo_train_step(
self,
step,
rollout_t, row, col,
policy_old, returns, advantage
):
observations, actions, rewards, not_done = rollout_t
# 1) call nn, recompute returns and advantage if needed
# advantage always computed by training net,
# so it is unnecessary to recompute adv at the first train-op
if self.recompute_advantage and step != 0:
# to compute returns and advantage we _have_ to call nn.forward(...) on full data
policy, value, returns, advantage = self._compute_returns(observations, rewards, not_done)
policy, value = policy[row, col], value[row, col]
else:
# here we can call nn.forward(...) only on interesting data
# observations[row, col] has only batch dimension =>
# need to unsqueeze and squeeze back
policy, value = self.actor_critic_nn(observations[row, col].unsqueeze(0))
policy, value = policy.squeeze(0), value.squeeze(0)
# 2) calculate losses
value_loss, policy_loss, entropy, loss = self._calc_losses(
policy_old[row, col],
policy, value,
actions[row, col], returns[row, col], advantage[row, col]
)
# 3) calculate image_aug loss if needed
if self.image_augmentation_alpha > 0.0:
(policy_div, value_div), img_aug_time = self._augmentation_loss(
policy.detach().unsqueeze(0),
value.detach().unsqueeze(0),
observations[row, col].unsqueeze(0)
)
loss += self.image_augmentation_alpha * (policy_div + value_div)
upd = {
'policy_div': policy_div.item(),
'value_div': value_div.item(),
'img_aug_time': img_aug_time
}
# optimize
grad_norm = self._optimize_loss(loss)
# 4) store training results in dict and return
result = {
'value_loss': value_loss.item(),
'policy_loss': policy_loss.item(),
'entropy': entropy.item(),
'loss': loss.item(),
'grad_norm': grad_norm
}
if self.image_augmentation_alpha > 0.0:
# noinspection PyUnboundLocalVariable
result.update(upd)
return result
@time_it
def _ppo_epoch(
self,
rollout_t, time, batch,
policy_old, returns, advantage
):
# goes once trough rollout
epoch_result = defaultdict(float)
mean_train_op_time = 0
# select indices to train on during epoch
n_transitions = time * batch
flatten_indices = np.arange(n_transitions)
np.random.shuffle(flatten_indices)
num_batches = self.ppo_n_mini_batches
batch_size = n_transitions // num_batches
for step, start_id in enumerate(range(0, n_transitions, batch_size)):
indices_to_train_on = flatten_indices[start_id:start_id + batch_size]
row = indices_to_train_on // batch
col = indices_to_train_on - batch * row
train_op_result, train_op_time = self._ppo_train_step(
step,
rollout_t, row, col,
policy_old, returns, advantage
)
for key, value in train_op_result.items():
epoch_result[key] += value / num_batches
mean_train_op_time += train_op_time
return epoch_result, mean_train_op_time / num_batches
def _train_fn(self, rollout):
"""
:param rollout: tuple (observations, actions, rewards, is_done, log_probs),
where each one is np.array of shape [time, batch, ...] except observations,
observations of shape [time + 1, batch, ...]
I want to store 'log_probs' inside rollout
because online policy (i.e. the policy gathered rollout)
may differ from the trained policy
:return: (loss_dict, time_dict)
"""
# 'done' converts into 'not_done' inside '_rollout_to_tensors' method
observations, actions, rewards, not_done, policy_old = self._rollout_to_tensors(rollout)
time, batch = actions.size()[:2]
rollout_t = (observations, actions, rewards, not_done)
result_log = defaultdict(float)
mean_epoch_time = 0
mean_train_op_time = 0
time_log = dict()
with torch.no_grad():
_, _, returns, advantage = self._compute_returns(
observations, rewards, not_done
)
n = self.ppo_n_epoch
for ppo_epoch in range(n):
(epoch_result, mean_train_op_time), epoch_time = self._ppo_epoch(
rollout_t, time, batch, policy_old, returns, advantage
)
for key, value in epoch_result.items():
result_log[key] += value / n
mean_epoch_time += epoch_time
time_log['mean_ppo_epoch'] = mean_epoch_time / n
time_log['mean_train_op'] = mean_train_op_time / n
if self.image_augmentation_alpha > 0.0:
time_log['img_aug'] = result_log.pop('img_aug_time')
return result_log, time_log
# TODO: recurrence:
# 1) masking in _train_fn
# 2) loss averaging, i.e. (mask * loss).sum() / mask.sum()
# 3) index select fn, select indices randomly (like now) for feed-forward model,
# 4) select only rows for recurrent model. Look how it is done in iKostrikov repo
@staticmethod
def _mask_after_done(done):
pad = torch.zeros(done.size(1), dtype=torch.float32, device=done.device)
done_sum = torch.cumsum(done, dim=0)[:-1]
done_sum = torch.cat([pad, done_sum], dim=0)
# noinspection PyTypeChecker
mask = 1.0 - done_sum.clamp_max(1.0)
return mask
|
990,412 | e3cbed7eca01956cbb86a1b44714102d82b060aa | # https://codeforces.com/problemset/problem/1099/A
w, h = map(int, input().split())
u1, d1 = map(int, input().split())
u2, d2 = map(int, input().split())
while h > 0:
w += h
if d1 == h:
w = max(w - u1, 0)
if d2 == h:
w = max(w - u2, 0)
h -= 1
print(w) |
990,413 | 971abbc55da014c655ed52d23df82bf594a19017 | # -*- coding:utf-8 -*-
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.http import Request,FormRequest
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
host='http://www.zhihu.com'
class ZhihuUserSpider(CrawlSpider):
name = 'zhihu_user'
allowed_domains = ['zhihu.com']
start_urls = ["http://www.zhihu.com/lookup/people",]
#使用rule时候,不要定义parse方法
rules = (
Rule(SgmlLinkExtractor(allow=("/lookup/class/[^/]+/?$", )), follow=True,callback='parse_item'),
Rule(SgmlLinkExtractor(allow=("/lookup/class/$", )), follow=True,callback='parse_item'),
Rule(SgmlLinkExtractor(allow=("/lookup/people", )), callback='parse_item'),
)
# def __init__(self, *a, **kwargs):
# super(ZhihuLoginSpider, self).__init__(*a, **kwargs)
def __init__(self):
self.headers =HEADER
self.cookies =COOKIES
def start_requests(self):
return [FormRequest(
"http://www.zhihu.com/login",
formdata = {'email':'448186083@qq.com',
'password':'hshy1987'
},
callback = self.after_login
)]
def after_login(self, response):
for url in self.start_urls:
yield self.make_requests_from_url(url)
def parse_item(self, response):
selector = Selector(response)
for link in selector.xpath('//div[@id="suggest-list-wrap"]/ul/li/div/a/@href').extract():
#link ===> /people/javachen
yield Request(host+link+"/about", callback=self.parse_user)
def parse_user(self, response):
selector = Selector(response)
#抓取用户信息,此处省略代码 |
990,414 | b36fc8f4c98f1fdc2ff3b39b65469c7866fe6ddb | while True:
try:
n=int(raw_input())
num=[]
for i in range(n):
num= list(map(int, raw_input().split()))
break
except:
break
if 1 <=len(num)<=100000 :
odd,eve=[],[]
for i in range(n):
if i%2==0 and num[i]%2!=0:
odd.append(num[i])
elif i%2!=0 and num[i]%2==0:
odd.append(num[i])
for k in odd:
print k,
else:
print "invalid"
|
990,415 | d1ec22d84e639f89dbf6f41b0465e09a98fa8494 | x=list(input())
y=list(input())
z=[]
for i in x:
if i in y:
z.append(i)
print(''.join(z))
|
990,416 | 9806b2ac736d31cef46eea61d00ad9746db36bb1 | x = 50
def func():
global x
x=55
print(f'Global X from function x = {x}')
print(f'X before calling func() with global X. x = {x}')
func()
print(f'X after func() x = {x}') |
990,417 | 193faf016735525faf99b1fe7be57402900f71d2 | """ Layer service """
from RWAPIMicroservicePython import request_to_microservice
from geetiles.errors import LayerNotFound
class LayerService(object):
@staticmethod
def execute(config):
response = request_to_microservice(config)
if not response or response.get('errors'):
raise LayerNotFound(message='Layer not found')
layer = response.get('data', None).get('attributes', None)
return layer
@staticmethod
def get(layer):
config = {
'uri': '/v1/layer/' + layer,
'method': 'GET'
}
return LayerService.execute(config)
|
990,418 | cec3c22fd636bedf4713d426b2825bb5e9a95593 | from cuneiform_ecc import *
def test_encode_eng():
s = "Open the door and enter the room but expect to be stifled by the sights and sound."
expected = "OPCENRTH_EDHOOBRARNDQENRTEXRTJHELROEOM_BUVTEXXPLECGTTLOBPESWTIAFLQEDHBYZTH_ESWIGOHT_SASNDQSOFUNGD_C"
actual = encode_eng(s)
assert actual == expected, actual + " != " + expected
def test_plug():
x = plug("A", None, "C")
assert x == "C", "plug(A, None, C) != C"
x = plug(None, "C", "C")
assert x == "A", "plug(None, C, C) != A"
x = plug("S", "R", None)
assert x == "I", "plug(S, R None) != I"
def text_reconstruct():
txt = """
When kingship was lowered from heaven
The kingship was in Eridu.
In Eridu Alulim became king
and reigned for many years
"""
expected = "WHENKINGSHIPWASLOWEREDFROMHEAVENTHEKINGSHIPWASINERIDUINERIDUALULIMBECAMEKINGANDREIGNEDFORMANYYEARS"
actual = textual_reconstruction(random_obscure(transliterate(encode_eng(txt))))
assert actual == expected, actual + " != " + expected
if __name__ == "__main__":
test_plug()
test_encode_eng()
text_reconstruct()
|
990,419 | 77f52a4f1b07d9b71bc50f571792bed7a88572ac | import os , sys
sys.path.append(os.getcwd())
import pytest
import fnvhash_c
import time
@pytest.mark.asyncio
async def test_convert():
assert fnvhash_c.convert_char_into_int(b'12\x00a') == fnvhash_c.convert_char_into_int('12\x00a') == 825360481
try:
fnvhash_c.convert_char_into_int('12345')
except Exception as e:
assert isinstance(e , AttributeError)
assert fnvhash_c.convert_int_into_char(825373440) == b'123\x00'
assert fnvhash_c.convert_char_into_int(b'\x84\x8d1}') == 2223845757
assert fnvhash_c.convert_int_into_char(2223845757) == b'\x84\x8d1}' |
990,420 | 50a6753ea87c01ed7964da06cec2e4ae5ce59c29 | """
Affine Transformations:
Preserve the # of vertices
Preserve the order of vertices
Scaling/Dilation
Translation
Rotation
Moving/Translation
(x,y)-T(2,3)->(x+2,y+3)
(x,y,z)-T(a,b,c)->(x+a,y+b,z+c)
|1 0 0 a||x| |x+a|
|0 1 0 b||y|-|y+b|
|0 0 1 c||z|-|z+c|
|0 0 0 1||1| | 1 |
Scaling/Dilation
(x,y,z)-D(a,b,c)->(xa,yb,zc)
|a 0 0 0||x| |ax|
|0 b 0 0||y|-|by|
|0 0 c 0||z|-|cz|
|0 0 0 1||1| |1 |
Rotation (about the origin)
(x,y,z)-R(t<theta>,a<axis that stays>)->(xcost-ysint,ycost+xsint)
#if z
|cost -sint 0 0||x| |xcost-ysint|
|sint cost 0 0||y|-|ycost+xsint|
|0 0 1 0||z|-|z |
|0 0 0 1||1| |1 |
#if x
|1 0 0 0||x| |x |
|0 cost -sint 0||y|-|ycost-zsint|
|0 sint cost 0||z|-|ysint+zcost|
|0 0 0 1||1| |1 |
#if y
|cost 0 -sint 0||x| |xcost-zsint|
|0 1 0 0||y|-|y |
|sint 0 cost 0||z|-|xsint+zcost|
|0 0 0 1||1| |1 |
E3 = R * E2
S * E1
T * E0
E3 = (R*S*T)*E0 #Master Transformation Matrix
"""
|
990,421 | 64d71ce2f65dd83cd8568be7dab91fab5ecd40ef | from __future__ import print_function, division, unicode_literals
import warnings
import os
import sys
from configobj import ConfigObj
os_release = ConfigObj('/etc/os-release')
ID = os_release['ID']
VERSION_ID = os_release['VERSION_ID']
IDS = [ID] + os_release['ID_LIKE'].split(' ')
def sys_path_append_match(dir_path):
for distro in IDS:
backend_path = os.path.join(dir_path, distro)
if os.path.exists(backend_path):
sys.path.append(backend_path)
return True
return False
|
990,422 | 6b89c3a8885d7f7dd1bf25acadbde05059be49dd | #!/usr/bin/env python3
import dbxref.resolver
import requests
import logging
import json
import argparse
logger = logging.getLogger(__name__)
def main():
"""main()method for script usage"""
# AVAILABLE for implementation:
# 'go_terms', 'member_databases', 'integrated', 'entry_annotations', ''
#
# USED:
# basics: 'accession', 'type', 'description', 'counters', 'entry_id', 'source_database', 'name'
# hierarchy
# wikipedia
# literature
# cross_references
# overlaps_with
parser = argparse.ArgumentParser(description="Retrieve InterPro documents and convert them into json")
parser.add_argument("--basics", "-b", action="store_true", help="Include basic information such as accession, "
"type, name, description, counters, entry_id and "
"source_database")
parser.add_argument("--hierarchy", "-hi", action="store_true", help="")
parser.add_argument("--wikipedia", "-w", action="store_true", help="")
parser.add_argument("--literature", "-l", action="store_true", help="")
parser.add_argument("--cross_references", "-cr", action="store_true", help="")
parser.add_argument("--overlaps", "-o", action="store_true", help="")
parser.add_argument("dbxrefs", nargs=argparse.REMAINDER)
args = parser.parse_args()
# if nothing specified, output all available information for the entry
if None not in (args.basics, args.hierarchy, args.wikipedia, args.literature, args.cross_references, args.overlaps):
args.basics = True
args.hierarchy = True
args.wikipedia = True
args.literature = True
args.cross_references = True
args.overlaps = True
dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)
documents = retrieve(dbxrefs, basics=args.basics, hierarchy=args.hierarchy, wikipedia=args.wikipedia,
literature=args.literature, cross_references=args.cross_references, overlaps=args.overlaps)
print(json.dumps(documents, sort_keys=True, indent=4))
def retrieve(dbxrefs, basics=True, hierarchy=True, wikipedia=True, literature=True, cross_references=True, overlaps=True):
"""Retrieve json document from InterPro REST api, filter information by selected Options and parse into new json"""
resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)
documents = []
for entry in resolved:
# Construct URL for retrieve
json_url = entry['locations']['json'][0]
logger.debug('URL: %s', json_url)
r = requests.get(json_url)
logger.debug('Content: %s', r.text)
ipro = json.loads(r.text)
# Parse retrieved json file by selected Options
output = {"id": entry["dbxref"]}
if basics:
try:
output.update(accession=ipro["metadata"]["accession"], entry_type=ipro["metadata"]["type"],
description=ipro["metadata"]["description"], counters=ipro["metadata"]["counters"],
entry_id=ipro["metadata"]["entry_id"], name=ipro["metadata"]["name"],
source_database=ipro["metadata"]["source_database"])
except KeyError:
logger.warning("One or more basic information were not available for the given entry. Please check your output.")
if hierarchy:
try:
output.update(hierarchy=ipro["metadata"]["hierarchy"])
except KeyError:
logger.warning("Hierarchy information was not available for the given entry.")
if wikipedia:
try:
output.update(wikipedia=ipro["metadata"]["wikipedia"])
except KeyError:
logger.warning("Wikipedia articles were not available for the given entry.")
if literature:
try:
output.update(literature=ipro["metadata"]["literature"])
except KeyError:
logger.warning("Literature was not available for the given entry.")
if cross_references:
try:
output.update(cross_references=ipro["metadata"]["cross_references"])
except KeyError:
logger.warning("Cross_references were not available for the given entry.")
if overlaps:
try:
output.update(overlaps=ipro["metadata"]["overlaps_with"])
except KeyError:
logger.warning("Overlap information was not available for the given entry.")
documents.append(output)
return documents
if __name__ == "__main__":
main()
|
990,423 | b7c36b7c3c5d222f4a88cad38a26f29c3417dc8d | import numpy as np
a1 = np.array([2,3,4])
print(a1)
a2 = np.random.randint(1,20,9).reshape(3,3)
print(a2)
print(a2) |
990,424 | a4b650fa33b5abec534edf52ed555bb822559329 | import cPickle
import os
import twitter # https://github.com/ianozsvald/python-twitter
# Usage:
# $ # setup CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET
# as environment variables
# $ python get_data.py # downloads friend and follower data to ./data
# Errors seen at runtime:
# raise URLError(err)
# urllib2.URLError: <urlopen error [Errno 104] Connection reset by peer>
DATA_DIR = "data" # storage directory for friend/follower data
# list of screen names that we'll want to analyse
screen_names = ['ianozsvald', 'annotateio', 'morconsulting', 'fluffyemily',
'jot', 'brouberol',
'markpriestley', 'steeevem', 'lovedaybrooke',
'jameshaycock', 'localben']
def get_filenames(screen_name):
"""Build the friends and followers filenames"""
return os.path.join(DATA_DIR, "%s.friends.pickle" % (screen_name)), os.path.join(DATA_DIR, "%s.followers.pickle" % (screen_name))
if __name__ == "__main__":
t = twitter.Api(consumer_key=os.getenv('CONSUMER_KEY'),
consumer_secret=os.getenv('CONSUMER_SECRET'),
access_token_key=os.getenv('ACCESS_TOKEN_KEY'),
access_token_secret=os.getenv('ACCESS_TOKEN_SECRET'))
print t.VerifyCredentials()
print "Downloading friends and followers for:", screen_names
for screen_name in screen_names:
fr_filename, fo_filename = get_filenames(screen_name)
print "Checking for:", fr_filename, fo_filename
if not os.path.exists(fr_filename):
print "Getting friends for", screen_name
fr = t.GetFriends(user=screen_name)
cPickle.dump(fr, open(fr_filename, "w"), protocol=2)
if not os.path.exists(fo_filename):
print "Getting followers for", screen_name
fo = t.GetFollowers(user=screen_name)
cPickle.dump(fo, open(fo_filename, "w"), protocol=2)
|
990,425 | 144e409cba424880b03cc9137167f8b6416e65b7 | from MidoHelper import Note, InstrumentHelper, MidoHelper
from functools import reduce
import random
def getTransitionMatrix(seq, deg = 1):
'''
Get sequence of notes seq as input, calculate the Markov chain transition matrix of degree deg. Return it as dictionary cnt.
init is the frequency of notes in the whole sequence (used as initializing probability distribution in music generation)
'''
init = dict()
cnt = dict()
cseq = tuple()
# Convert original sequence into new deg-degree sequence.
curSymbol = tuple()
for i in range(deg):
curSymbol = curSymbol + (seq[i],)
cseq = cseq + (curSymbol, )
for i in range(deg, len(seq)):
curSymbol = curSymbol[1:] + (seq[i],)
cseq = cseq + (curSymbol, )
for i in range(len(cseq)):
if init.get(cseq[i]) is None:
# Add a new note into init
init[cseq[i]] = 1
else:
init[cseq[i]] = init[cseq[i]] + 1
if i + 1 < len(cseq):
tr = (cseq[i], cseq[i + 1])
if cnt.get(tr) is None:
cnt[tr] = 1
else:
cnt[tr] = cnt[tr] + 1
return (init, cnt)
def generateRandomNotes(init: dict, cnt: dict, length: int):
'''
Get init and cnt as output by getTransitionMatrix, generate a random sequence of length length using this Markov model.
'''
gen = []
if length == 0:
return gen
# Randomly sample a initial note from distribution init.
cur = random.choices(list(init.keys()), list(init.values()), k = 1)[0]
gen.append(cur)
for i in range(length - 1):
weight = []
terminal = True
# Calculate transition from current note cur.
for note in init.keys():
if cnt.get((cur, note)) is None:
weight.append(0)
else:
weight.append(cnt[(cur, note)])
terminal = False
if terminal:
# cur is a terminal node in the model. Sample again from initial distribution.
cur = random.choices(list(init.keys()), list(init.values()), k=1)[0]
else:
# Sample by this distribution
cur = random.choices(list(init.keys()), weight)[0]
gen.append(cur)
# In high degree model, we get just one new note by each transition.
ret = gen[0] + tuple(map(lambda x: x[-1], gen[1:]))
return ret
def splitNotes(notes):
# Split note into unit-timed notes.
return reduce(lambda x, y: x + y, map(lambda x: (Note(x.pitch, 1.0 / 8, x.noteOn), ) * round(x.duration * 8), notes))
def getCompoundRandomMusic(deg: int, *trk):
'''
Get multiple note sequences trk, generate random music by their joint distribution.
'''
# Convert input sequences into a single sequence with each element as a "compound note".
composedSeq = tuple(zip(*tuple(map(splitNotes, trk))))
init, cnt = getTransitionMatrix(composedSeq, deg)
res = generateRandomNotes(init, cnt, len(composedSeq) - deg + 1)
# Convert back into separate sequences.
return reduce(lambda x, y: [x[i] + y[i] for i in range(len(x))], list(map(lambda x: [(x[i], ) for i in range(len(x))], res)))
def getIndependentRandomMusic(deg: int, *trk):
'''
Get multiple note sequences trk, generate random music separately for each sequence.
'''
ret = tuple()
for tk in trk:
init, cnt = getTransitionMatrix(tk, deg)
ret = ret + (generateRandomNotes(init, cnt, len(tk) - deg + 1), )
return ret
def printTransitionMatrix(init, cnt):
'''
Output transition matrix denoted by cnt into trans.txt.
'''
with open("trans.txt", "w") as f:
f.write("Notes:\n")
for note in init.keys():
f.write("({}) ".format('->'.join([n.pitchName() for n in note])))
f.write("\n")
for fr in init.keys():
w = [0] * len(init)
sum = 0
for i, to in enumerate(init.keys()):
if cnt.get((fr, to)) is not None:
sum = sum + cnt[(fr, to)]
w[i] = cnt[(fr, to)]
if sum == 0:
sum = 1
f.write("[")
for v in w:
f.write("{0:.2f},".format(float(v) / sum))
f.write("],\n")
def printTransitionMatrix2(init, cnt):
with open("trans.txt", "w") as f:
f.write("Notes:\n")
for note in init.keys():
f.write(" {}".format(note[0].pitch))
f.write("\n")
for fr in init.keys():
w = [0] * len(init)
sum = 0
for i, to in enumerate(init.keys()):
if cnt.get((fr, to)) is not None:
sum = sum + cnt[(fr, to)]
w[i] = cnt[(fr, to)]
if sum == 0:
sum = 1
for v in w:
f.write("{0:.2f} ".format(float(v) / sum))
f.write("\n")
def drawTransitionMatrix(init, cnt):
'''
Generate (unfortunately badly drawn) network from the input Markov model.
'''
import networkx as nx
import pylab
G = nx.DiGraph()
for n in init.keys():
G.add_node(n)
for k, v in cnt.items():
G.add_edge(k[0], k[1], weight = v)
edge_labels = dict([((u, v,), d['weight']) for u, v, d in G.edges(data = True)])
pos = nx.spring_layout(G)
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labels)
nx.draw(G, pos)
pylab.show()
# inst = [InstrumentHelper.Clarinet, InstrumentHelper.Bassoon, InstrumentHelper.Violin]
# m = getCompoundRandomMusic(1, *[MidoHelper.read('cuphead.mid', trackId=x).getDefaultTrackNotes() for x in [1, 4, 7]])
# output = MidoHelper.read("cuphead.mid", trackId = 1)
# output = MidoHelper(output.tempo, output.numerator, output.denominator)
# for i, st in enumerate(m):
# output.addTrack(st, inst[i])
# output.export("cuphead_random_compound_deg1.mid")
#
# m = getIndependentRandomMusic(1, *[MidoHelper.read('cuphead.mid', trackId=x).getDefaultTrackNotes() for x in [1, 4, 7]])
# output = MidoHelper.read("cuphead.mid", trackId = 1)
# output = MidoHelper(output.tempo, output.numerator, output.denominator)
# for i, st in enumerate(m):
# output.addTrack(st, inst[i])
# output.export("cuphead_random_independent_deg1.mid")
mido = MidoHelper.read('music/sample/RV156_viola.mid', trackId = 1)
init, cnt = getTransitionMatrix(mido.getDefaultTrackNotes(), 2)
printTransitionMatrix(init, cnt)
#midi = MidoHelper.read('cuphead.mid', trackId=1)
#midi2 = MidoHelper.read('cuphead.mid', trackId=2)
#res = splitNotes(midi)
#res2 = splitNotes(midi2)
#init, cnt = getTransitionMatrix(tuple(zip(res, res2)))
#rdm = generateRandomNotes(init, cnt, min(len(res), len(res2)))
#output = MidoHelper(midi.tempo, midi.numerator, midi.denominator)
#output.addTrack(res)
#output.export("cuphead1_recovered.mid")
#for deg in range(1, 4):
# cupheadinit, cupheadtrans = getTransitionMatrix(midi.getDefaultTrackNotes(), deg)
# output = MidoHelper(midi.tempo, midi.numerator, midi.denominator)
# output.addTrack(generateRandomNotes(cupheadinit, cupheadtrans, len(midi.getDefaultTrackNotes()) - deg + 1))
# output.export('cuphead1_random_deg{0}.mid'.format(deg))
#deg = 1
###
#clarinetMidi = MidoHelper.read('cuphead.mid', trackId=1)
#bassonMidi = MidoHelper.read('cuphead.mid', trackId=4)
#violinMidi = MidoHelper.read('cuphead.mid', trackId=7)
#cupheadinitcl, cupheadtranscl = getTransitionMatrix(clarinetMidi.getDefaultTrackNotes(), 4)
#cupheadinitba, cupheadtransba = getTransitionMatrix(bassonMidi.getDefaultTrackNotes(), 4)
#cupheadinitvi, cupheadtransvi = getTransitionMatrix(bassonMidi.getDefaultTrackNotes(), 4)
#output = MidoHelper(midi.tempo, midi.numerator, midi.denominator)
#output.addTrack(generateRandomNotes(cupheadinitcl, cupheadtranscl, len(midi.getDefaultTrackNotes()) - deg + 1), InstrumentHelper.Clarinet)
#output.addTrack(generateRandomNotes(cupheadinitba, cupheadtransba, len(midi.getDefaultTrackNotes()) - deg + 1), InstrumentHelper.Bassoon)
#output.addTrack(generateRandomNotes(cupheadinitvi, cupheadtransvi, len(midi.getDefaultTrackNotes()) - deg + 1), InstrumentHelper.Violin)
#output.export('clarinet-basson.mid')
|
990,426 | e13bce6acc27a9a11e330e8d5c05084475ce4dea | import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from PIL import Image
import PIL.ImageOps
X, Y = fetch_openml("mnist_784", version = 1, return_X_y = True)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 9, train_size = 7500, test_size = 2500)
X_train_scaled = X_train / 255
X_test_scaled = X_test / 255
clf = LogisticRegression(solver = "saga", multi_class = "multinomial").fit(X_train_scaled, Y_train)
def get_prediction(image):
img = Image.open(image)
img_bw = img.convert("L")
img_bw_resized = img_bw.resize((28, 28), Image.ANTIALIAS)
pixel_filter = 20
min_pixel = np.percentile(img_bw_resized, pixel_filter)
img_bw_resized_scaled = np.clip(img_bw_resized - min_pixel, 0, 255)
max_pixel = np.max(img_bw_resized)
img_bw_resized_scaled = np.asarray(img_bw_resized_scaled) / max_pixel
test_sample = np.array(img_bw_resized_scaled).reshape(1, 784)
test_pred = clf.predict(test_sample)
return test_pred[0] |
990,427 | 3f2d5ee6a6fd5d154d56a927cb3964f55268c5d8 | import time
from indy import anoncreds, wallet
import json
import logging
from indy import blob_storage
from indy import pool
from src.utils import run_coroutine, path_home, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
async def demo():
logger.info("Anoncreds Revocation sample -> started")
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Issuer Wallet and Get Wallet Handle
await wallet.create_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
issuer['wallet'] = await wallet.open_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 2. Create Prover Wallet and Get Wallet Handle
await wallet.create_wallet(prover['wallet_config'], prover['wallet_credentials'])
prover['wallet'] = await wallet.open_wallet(prover['wallet_config'], prover['wallet_credentials'])
# 3. Issuer create Credential Schema
schema = {
'name': 'gvt',
'version': '1.0',
'attributes': '["age", "sex", "height", "name"]'
}
issuer['schema_id'], issuer['schema'] = await anoncreds.issuer_create_schema(issuer['did'], schema['name'],
schema['version'],
schema['attributes'])
store[issuer['schema_id']] = issuer['schema']
# 4. Issuer create Credential Definition for Schema
cred_def = {
'tag': 'cred_def_tag',
'type': 'CL',
'config': json.dumps({"support_revocation": True})
}
issuer['cred_def_id'], issuer['cred_def'] = await anoncreds.issuer_create_and_store_credential_def(
issuer['wallet'], issuer['did'], issuer['schema'], cred_def['tag'], cred_def['type'], cred_def['config'])
store[issuer['cred_def_id']] = issuer['cred_def']
# 5. Issuer create Revocation Registry
issuer['tails_writer_config'] = json.dumps({'base_dir': str(path_home().joinpath("tails")), 'uri_pattern': ''})
issuer['tails_writer'] = await blob_storage.open_writer('default', issuer['tails_writer_config'])
revoc_reg_def = {
'tag': 'cred_def_tag',
'config': json.dumps({"max_cred_num": 5, 'issuance_type': 'ISSUANCE_ON_DEMAND'})
}
(issuer['rev_reg_id'], issuer['rev_reg_def'], issuer['rev_reg_entry']) = \
await anoncreds.issuer_create_and_store_revoc_reg(issuer['wallet'], issuer['did'], None, revoc_reg_def['tag'],
issuer['cred_def_id'], revoc_reg_def['config'],
issuer['tails_writer'])
store[issuer['rev_reg_id']] = {
'definition': issuer['rev_reg_def'],
'value': issuer['rev_reg_entry']
}
# 6. Prover create Master Secret
prover['master_secret_id'] = await anoncreds.prover_create_master_secret(prover['wallet'], None)
# 7. Issuer create Credential Offer
issuer['cred_offer'] = await anoncreds.issuer_create_credential_offer(issuer['wallet'], issuer['cred_def_id'])
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# 8. Prover create Credential Request
prover['cred_req'], prover['cred_req_metadata'] = \
await anoncreds.prover_create_credential_req(prover['wallet'], prover['did'], prover['cred_offer'],
prover['cred_def'], prover['master_secret_id'])
# 9. Issuer open Tails reader
issuer['blob_storage_reader'] = await blob_storage.open_reader('default', issuer['tails_writer_config'])
# 10. Issuer create Credential
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
(cred_json, rev_id, rev_reg_delta_json) = \
await anoncreds.issuer_create_credential(issuer['wallet'], issuer['cred_offer'], issuer['cred_req'],
issuer['cred_values'], issuer['rev_reg_id'],
issuer['blob_storage_reader'])
issuer['rev_id'] = rev_id
store[issuer['rev_reg_id']]['delta'] = rev_reg_delta_json
prover['cred'] = cred_json
# 11. Prover store Credential
cred = json.loads(prover['cred'])
prover['rev_reg_id'] = cred['rev_reg_id']
prover['rev_reg_def'] = store[prover['rev_reg_id']]['definition']
prover['rev_reg_delta'] = store[prover['rev_reg_id']]['delta']
await anoncreds.prover_store_credential(prover['wallet'], None, prover['cred_req_metadata'],
prover['cred'], prover['cred_def'], prover['rev_reg_def'])
# 11. Prover gets Credentials for Proof Request
verifier['proof_req'] = json.dumps({
'nonce': '123432421212',
'name': 'proof_req_1',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'name'}
},
'requested_predicates': {
'predicate1_referent': {'name': 'age', 'p_type': '>=', 'p_value': 18}
},
"non_revoked": {"from": 80, "to": 100}
})
prover['proof_req'] = verifier['proof_req']
# Prover gets Credentials for attr1_referent
prover['cred_search_handle'] = \
await anoncreds.prover_search_credentials_for_proof_req(prover['wallet'], prover['proof_req'], None)
creds_for_attr1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'attr1_referent', 10)
prover['cred_for_attr1'] = json.loads(creds_for_attr1)[0]['cred_info']
# Prover gets Credentials for predicate1_referent
creds_for_predicate1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'predicate1_referent', 10)
prover['cred_for_predicate1'] = json.loads(creds_for_predicate1)[0]['cred_info']
await anoncreds.prover_close_credentials_search_for_proof_req(prover['cred_search_handle'])
# 12. Prover creates revocation state
timestamp = 100
prover['tails_reader_config'] = json.dumps({'base_dir': str(path_home().joinpath("tails")), 'uri_pattern': ''})
prover['blob_storage_reader'] = await blob_storage.open_reader('default', prover['tails_reader_config'])
rev_state_json = await anoncreds.create_revocation_state(prover['blob_storage_reader'], prover['rev_reg_def'],
prover['rev_reg_delta'], timestamp,
prover['cred_for_attr1']['cred_rev_id'])
# 13. Prover create Proof for Proof Request
prover['requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {'attr1_referent': {
'cred_id': prover['cred_for_attr1']['referent'], 'revealed': True, 'timestamp': timestamp}
},
'requested_predicates': {
'predicate1_referent': {'cred_id': prover['cred_for_predicate1']['referent'], 'timestamp': timestamp}
}
})
schemas_json = json.dumps({prover['schema_id']: json.loads(prover['schema'])})
cred_defs_json = json.dumps({prover['cred_def_id']: json.loads(prover['cred_def'])})
revoc_states_json = json.dumps({prover['rev_reg_id']: {timestamp: json.loads(rev_state_json)}})
prover['proof'] = \
await anoncreds.prover_create_proof(prover['wallet'], prover['proof_req'], prover['requested_creds'],
prover['master_secret_id'], schemas_json, cred_defs_json, revoc_states_json)
verifier['proof'] = prover['proof']
# 12. Verifier verify proof
proof = json.loads(verifier['proof'])
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
identifier = proof['identifiers'][0]
verifier['cred_def_id'] = identifier['cred_def_id']
verifier['schema_id'] = identifier['schema_id']
verifier['rev_reg_id'] = identifier['rev_reg_id']
verifier['cred_def'] = store[verifier['cred_def_id']]
verifier['schema'] = store[verifier['schema_id']]
verifier['rev_reg_def'] = store[verifier['rev_reg_id']]['definition']
verifier['rev_reg_value'] = store[verifier['rev_reg_id']]['delta']
schemas_json = json.dumps({verifier['schema_id']: json.loads(verifier['schema'])})
cred_defs_json = json.dumps({verifier['cred_def_id']: json.loads(verifier['cred_def'])})
revoc_ref_defs_json = json.dumps({verifier['rev_reg_id']: json.loads(verifier['rev_reg_def'])})
revoc_regs_json = json.dumps({verifier['rev_reg_id']: {timestamp: json.loads(verifier['rev_reg_value'])}})
assert await anoncreds.verifier_verify_proof(verifier['proof_req'], verifier['proof'], schemas_json, cred_defs_json,
revoc_ref_defs_json, revoc_regs_json)
# 13. Close and delete Issuer wallet
await wallet.close_wallet(issuer['wallet'])
await wallet.delete_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 14. Close and delete Prover wallet
await wallet.close_wallet(prover['wallet'])
await wallet.delete_wallet(prover['wallet_config'], prover['wallet_credentials'])
logger.info("Anoncreds Revocation sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete
|
990,428 | 008de82e089d5287bd54de8d48f6dd9a7aa099a8 | import re
# input_lines = '''\
# aaaaa-bbb-z-y-x-123[abxyz]
# a-b-c-d-e-f-g-h-987[abcde]
# not-a-real-room-404[oarel]
# totally-real-room-200[decoy]'''.splitlines()
input_lines=open('input.txt')
good =[]
for line in input_lines:
match = re.match(r'^([a-z-]+)([0-9]+)\[([a-z]+)\]', line)
name, sector, checksum = match.groups()
count = {}
for letter in name:
if not letter.isalpha():
continue
count[letter] = count.get(letter, 0) + 1
decoded = ''.join(sorted(count, key=lambda x: (-count[x], x)))
if decoded.startswith(checksum):
good.append((name, int(sector)))
# good = [('qzmt-zixmtkozy-ivhz-', 343)]
def rotate(word, n):
result = ''
for char in word:
pos = ord(char) - ord('a') + n
result += chr(pos % 26 + ord('a'))
return result
for name, sector in good:
print(sector, ' '.join(rotate(p, sector) for p in name.split('-') if p)) |
990,429 | cc9f1ac7003c67af0f6a4a5a7e3f0a6fc13e074d | import numpy as np
from math import floor
from transform import lerp
import numpy as np
import random
def make_vertex(vertices, n, x, y, height, width, centered=False):
if centered:
vertices[n][0] = x - (height + 1)/2
vertices[n][1] = y - (width + 1)/2
else:
vertices[n][0] = x
vertices[n][1] = y
def generate_grid(height, width=None, scale=1, centered=False):
if width==None:
width = height
#vertices = np.array([], np.float32)
#faces = np.array([], np.uint32)
vertices = np.zeros(shape=((height + 1)*(width + 1), 3), dtype=np.float32)
faces = np.zeros(shape=(height*width*2, 3), dtype=np.uint32)
for i in range(height):
for j in range(0, width):
n = i * (width + 1) + j
# generate one vertex
make_vertex(vertices, n, i, j, height, width, centered)
# generate two faces
faces[i*2*width + 2*j][0] = n
faces[i*2*width + 2*j][1] = n + 1
faces[i*2*width + 2*j][2] = n + width + 1
faces[i*2*width + 2*j + 1] [0] = n + 1
faces[i*2*width + 2*j + 1][1] = n + 2 + width
faces[i*2*width + 2*j + 1][2] = n + width + 1
# last vertex in the row
make_vertex(vertices, i*(width + 1) + width, i, j, height, width, centered)
# last row of vertices
for j in range(width + 1):
make_vertex(vertices, height*(width + 1) + j, i, j, height, width, centered)
return scale*vertices, faces
def randomize_height(height_map, width, height, step):
for p in range(0, height, step):
for q in range(0, width, step):
height_map[p*width + q][2]=random.gauss(0, 0.50)
defined_grad = []
used_grad = []
def randomize_gradient(width, height, step):
#gradient = [0]*(width*height)
gradient = np.zeros(shape=((width + 1)*(height + 1), 2), dtype=float)
#global defined_grad
for p in range(0, height + 1, step):
for q in range(0, width + 1, step):
#gradient[p*width + q][0]=random.gauss(0, 0.05)
#gradient[p*width + q][1]=random.gauss(0, 0.05)
#defined_grad.append(p*width + q)
gradient[p*(width + 1) + q][0]=random.uniform(-1,1)
gradient[p*(width + 1) + q][1]=random.uniform(-1,1)
#return 0.08*gradient
return gradient
def dotGridGradient(gradient, width, ix, iy, x, y):
dx = x - ix;
dy = y - iy;
#global used_grad
#if iy + width*ix not in used_grad:
# used_grad.append(iy + width*ix)
return (dx*gradient[iy + (width+1)*ix][0] + dy*gradient[iy + (width+1)*ix][1]);
#def perlin(x, y, vertices):
# x0 = floor(x)
# x1 = x0 + 1
# y0 = floor(y)
# y1 = y0 + 1
#
# sx = x - x0
# sy = y - y0
#
# n0 = dotGridGradient(x0, y0, x, y)
# n1 = dotGridGradient(x1, y0, x, y)
# ix0 = lerp(n0, n1, sx)
# n0 = dotGridGradient(x0, y1, x, y)
# n1 = dotGridGradient(x1, y1, x, y)
# ix1 = lerp(n0, n1, sx)
# value = lerp(ix0, ix1, sy)
#
# return value
def lerp_smooth(a, b, fraction):
f = lambda t: t * t * t * (t * (t * 6 - 15) + 10)
return f(1-fraction)*a + f(fraction)*b
def generate_perlin_grid(height, width=None, step=25, scale=1, centered=False):
if width==None:
width = height
vertices, faces = generate_grid(height, width, scale, centered)
gradient = randomize_gradient(width, height, step)
for p in range(0, height - step):
for q in range(0, width - step):
#if p%step != 0 and q%step != 0:
p0, q0 = step*(p // step), step*(q // step)
#p1, q1 = p0 + 1, q0 + 1
p1, q1 = p0 + step, q0 + step
sp, sq = p % step, q % step
n0 = dotGridGradient(gradient, width, p0, q0, p, q)
n1 = dotGridGradient(gradient, width, p1, q0, p, q)
ix0 = lerp_smooth(n0, n1, sp/step)
#ix0 = lerp_smooth(n0, n1, sp)
n0 = dotGridGradient(gradient, width, p0, q1, p, q)
n1 = dotGridGradient(gradient, width, p1, q1, p, q)
ix1 = lerp_smooth(n0, n1, sp/step)
#ix1 = lerp_smooth(n0, n1, sp)
value = lerp_smooth(ix0, ix1, sq/step)
#value = lerp_smooth(ix0, ix1, sq)
vertices[p*(width+1) + q][2] = value
print([z[2] for z in vertices])
#print(len(used_grad))
#print(len(defined_grad))
#print("=================================")
#for i in defined_grad:
# if i not in used_grad:
# print(i)
return vertices, faces
|
990,430 | a256679d9e2f55d07e47a4b10e487aec02429caa | N = int(input())
MAP = [input() for _ in range(N)]
# print(MAP)
def dfs(y,x,size):
if size == 0:
return
c = True
asdf = MAP[y][x]
for i in range(y,y+size):
for j in range(x,x+size):
if asdf != MAP[i][j]:
c = False
# print(c)
if c:
print(asdf,end="")
else:
print("(",end="")
dfs(y, x, size // 2)
dfs(y, x + (size//2), size // 2)
dfs(y + (size//2), x, size // 2)
dfs(y + (size//2) , x + (size//2), size // 2)
print(")",end="")
dfs(0,0,N) |
990,431 | 7ec88f942593aa665758bb0bcc074713a2bbc528 | numbers = ["a","b","c","d","e"]
print(numbers[0],numbers[1], numbers[2], numbers[3], numbers[4]) |
990,432 | f43a5846e6cc53347b2f1e84b1c44d2bbeceb6be | from gip.third_party import get_image_size # NOQA
|
990,433 | 9f8d9ffe488f870bf70c762f02045ad1716754ae | import json
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
# Create your views here.
from .models import *
'''
handle with the good
'''
def index(request):
typelist = TypeInfo.objects.all()
type00 = typelist[0].goodsinfo_set.order_by('-id')[0:4]
type01 = typelist[0].goodsinfo_set.order_by('gclick')[0:3]
type10 = typelist[1].goodsinfo_set.order_by('-id')[0:4]
type11 = typelist[1].goodsinfo_set.order_by('gclick')[0:3]
type20 = typelist[2].goodsinfo_set.order_by('-id')[0:4]
type21 = typelist[2].goodsinfo_set.order_by('gclick')[0:3]
type30 = typelist[3].goodsinfo_set.order_by('-id')[0:4]
type31 = typelist[3].goodsinfo_set.order_by('gclick')[0:3]
type40 = typelist[4].goodsinfo_set.order_by('-id')[0:4]
type41 = typelist[4].goodsinfo_set.order_by('gclick')[0:3]
type50 = typelist[5].goodsinfo_set.order_by('-id')[0:4]
type51 = typelist[5].goodsinfo_set.order_by('gclick')[0:3]
context = {
'title': '首页',
'type00': type00, 'type01': type01,
'type10': type10, 'type11': type11,
'type20': type20, 'type21': type21,
'type30': type30, 'type31': type31,
'type40': type40, 'type41': type41,
'type50': type50, 'type51': type51,
}
request.session.set_expiry(0)
try:
user_name = request.session['user_name']
except:
request.session['count'] = 0
return render(request, 'goods/index.html', context=context)
def detail(request, gid):
goods = GoodsInfo.objects.get(pk=int(gid))
goods.gclick += 1
goods.save()
news = goods.gtype.goodsinfo_set.order_by('-id')[0:2]
typeinfo = TypeInfo.objects.get(pk=goods.gtype_id)
context = {
'title': goods.gtype.title,
'goods': goods,
'news': news,
'typeinfo': typeinfo,
'gid': gid
}
return render(request, 'goods/detail.html', context=context)
def list(request, tid, pindex, sort):
'''
tid: 1-6
pindex: current page
sort: 1,default 2,price 3,hot
:param request:
:param tid:
:param pindex:
:param sort:
:return:
'''
typelist = TypeInfo.objects.get(pk=int(tid))
news = typelist.goodsinfo_set.order_by('-id')[0:2]
goodslist = []
if sort == '1':
goodslist = GoodsInfo.objects.filter(gtype_id=int(tid)).order_by('-id')
elif sort == '2':
goodslist = GoodsInfo.objects.filter(gtype_id=int(tid)).order_by('-gprice')
elif sort == '3':
goodslist = GoodsInfo.objects.filter(gtype_id=int(tid)).order_by('-gclick')
paginator = Paginator(goodslist, 10)
page = paginator.page(int(pindex))
context = {
'title': typelist.title,
'page': page,
'paginator': paginator,
'pindex': pindex,
'typeinfo': typelist,
'sort': sort,
'news': news
}
return render(request, 'goods/list.html', context=context)
def immedi_buy(request, gid, num):
user = UserInfo.objects.get(id=request.session['user_id'])
goods = GoodsInfo.objects.get(pk=int(gid))
count = 1
context = {
'title': '提交订单',
'count': count,
'goods': goods,
'user': user,
'num': num,
'total': goods.gprice * int(num)
}
uid = request.session['user_id']
cartitem = CartItem(user_id=uid, goods_id=gid, num=num)
cart = CartItem.objects.filter(pk=cartitem.id)
total = 0
for cartitem in cart:
total += cartitem.num * cartitem.goods.gprice
return render(request, 'shop/place_order.html', context=context)
def show_cart(request):
try:
uid = request.session['user_id']
cart = CartItem.objects.filter(user_id=uid)
request.session['count'] = cart.count()
total = 0
for cartitem in cart:
total += cartitem.subtotal
context = {
'title': '购物车',
'cart': cart,
'total': total
}
return render(request, 'goods/cart.html', context=context)
except:
alert = 1
return render(request, 'user/login.html', locals())
def add_cart(request, gid, num):
gid = int(gid)
num = int(num)
uid = request.session['user_id']
cart = CartItem.objects.filter(goods_id=gid, user_id=uid)
# 先判断 该用户 购物车中 是否 存在 该商品
# 如果纯在,则仅作数量上的 加法
if len(cart) >= 1:
cartitem = cart[0]
cartitem.num += num
cartitem.subtotal += (num * cartitem.goods.gprice)
cartitem.save()
else:
cartitem = CartItem(user_id=uid, goods_id=gid, num=num)
cartitem.subtotal = cartitem.goods.gprice * num
cartitem.save()
# 判断请求方式 是否是ajax,若是则返回json格式的 商品数量即可
if request.is_ajax():
num = CartItem.objects.filter(user_id=uid).count()
return JsonResponse({'num': num})
else:
return redirect(show_cart)
def delete_cart(request, id):
id = int(id)
CartItem.objects.get(pk=id).delete()
return redirect(show_cart)
def edit_cart(request, method, cid):
method = int(method)
cid = int(cid)
cartitem = CartItem.objects.get(id=cid)
if method == 1:
cartitem.num += 1
else:
cartitem.num -= 1
cartitem.save()
return redirect(show_cart)
def search(request, key_word, pindex, sort):
if sort == '1':
search_list = GoodsInfo.objects.filter(gtitle__contains=key_word).order_by('-id')
elif sort == '2':
search_list = GoodsInfo.objects.filter(gtitle__contains=key_word).order_by('-gprice')
elif sort == '3':
search_list = GoodsInfo.objects.filter(gtitle__contains=key_word).order_by('-gclick')
paginator = Paginator(search_list, 10)
page = paginator.page(int(pindex))
return render(request, 'goods/search_result_show.html', {
'title': '查询结果-' + key_word,
'keyword': key_word,
'page': page,
'paginator': paginator,
'sort': sort
})
def settle(request):
cart = request.GET['cart']
total = request.GET['total']
count = request.GET['count']
context = {
'title': '提交订单',
'cart': cart,
'count': count,
'total': total
}
return render(request, 'shop/place_order.html', context=context)
|
990,434 | 3a4dea845ed180d01113039399bd8ce86cc374a8 | # coding: utf-8
import sys
from bisect import bisect_left, bisect_right
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
R = ir()
target = [1200, 2800]
i = bisect_right(target, R)
answer = ['ABC', 'ARC', 'AGC'][i]
print(answer)
|
990,435 | f755643db0f9dcac454a5e7837f37e6a056454c0 | """ Main detector module which can be used for training and inferencing. """
from typing import List
import collections
import torch
import efficientnet, bifpn, retinanet_head
from third_party import (
postprocess,
regression,
anchors,
)
_MODEL_SCALES = {
# (resolution, backbone, bifpn channels, num bifpn layers, head layers)
"efficientdet-b0": (512, "efficientnet-b0", 64, 3, 3),
"efficientdet-b1": (640, "efficientnet-b1", 88, 4, 3),
"efficientdet-b2": (768, "efficientnet-b2", 112, 5, 3),
"efficientdet-b3": (896, "efficientnet-b3", 160, 6, 4),
"efficientdet-b4": (1024, "efficientnet-b4", 224, 7, 4),
"efficientdet-b5": (1280, "efficientnet-b5", 288, 7, 4),
}
class EfficientDet(torch.nn.Module):
""" Implementatin of EfficientDet originally proposed in
[1] Mingxing Tan, Ruoming Pang, Quoc Le.
EfficientDet: Scalable and Efficient Object Detection.
CVPR 2020, https://arxiv.org/abs/1911.09070 """
def __init__(
self,
num_classes: int,
backbone: str,
levels: List[int] = [3, 4, 5, 6, 7],
num_levels_extracted: int = 3,
num_detections_per_image: int = 100,
score_threshold: float = 0.05,
) -> None:
"""
Args:
params: (bifpn channels, num bifpns, num retina net convs)
Usage:
>>> net = EfficientDet(10, "efficientdet-b0", score_threshold=0)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> len(out)
2
>>> len(out[0])
5
>>> len(out[1])
5
>>> with torch.no_grad():
... out = net.predict(torch.randn(1, 3, 512, 512))
>>> type(out[0][0])
<class 'third_party.postprocess.BoundingBox'>
"""
super().__init__()
self.levels = levels
self.num_pyramids = len(levels)
self.num_levels_extracted = num_levels_extracted
self.num_detections_per_image = num_detections_per_image
self.backbone = efficientnet.EfficientNet(
_MODEL_SCALES[backbone][1], num_classes=num_classes
)
self.backbone.delete_classification_head()
# Get the output feature for the pyramids we need
features = self.backbone.get_pyramid_channels()[-num_levels_extracted:]
params = _MODEL_SCALES[backbone]
# Create the BiFPN with the supplied parameter options.
self.fpn = bifpn.BiFPN(
in_channels=features,
out_channels=params[2],
num_bifpns=params[3],
levels=[3, 4, 5],
bifpn_height=5,
)
self.anchors = anchors.AnchorGenerator(
img_height=params[0],
img_width=params[0],
pyramid_levels=levels,
anchor_scales=[1.0, 1.25, 1.50],
)
# Create the retinanet head.
self.retinanet_head = retinanet_head.RetinaNetHead(
num_classes,
in_channels=params[2],
anchors_per_cell=self.anchors.num_anchors_per_cell,
num_convolutions=params[4],
)
if torch.cuda.is_available():
self.anchors.all_anchors = self.anchors.all_anchors.cuda()
self.anchors.anchors_over_all_feature_maps = [
anchors.cuda() for anchors in self.anchors.anchors_over_all_feature_maps
]
self.postprocess = postprocess.PostProcessor(
num_classes=num_classes,
anchors_per_level=self.anchors.anchors_over_all_feature_maps,
regressor=regression.Regressor(),
max_detections_per_image=num_detections_per_image,
score_threshold=score_threshold,
)
self.eval()
def __call__(self, x: torch.Tensor) -> torch.Tensor:
levels = self.backbone.forward_pyramids(x)
# Only keep the levels specified during construction.
levels = collections.OrderedDict(
[item for item in levels.items() if item[0] in self.levels]
)
levels = self.fpn(levels)
classifications, regressions = self.retinanet_head(levels)
return classifications, regressions
def predict(self, x: torch.Tensor) -> List[postprocess.BoundingBox]:
""" Member function to perform inference on an input image tensor. """
return self.postprocess(*self(x))
|
990,436 | ffb0aab7110c1349a7e2e7372b77d8be7d0295bd | # percol configuration file
percol.import_keymap({
"C-j" : lambda percol: percol.command.select_next(),
"C-k" : lambda percol: percol.command.select_previous()
})
|
990,437 | 666c12af0a4b0069ac76e669fa1eab6a0ded0765 | """Some useful pre-processing functions"""
import re
import os
from nltk.corpus import PlaintextCorpusReader
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
__license__ = "MIT License"
def remove_punctuations(text):
res = text
if re.match(r"[0-9]+\.", text) is None:
res = re.sub("[\-:?;.,]", "", res)
res = re.sub("z", "s", res)
res = re.sub("x", "ks", res)
res = re.sub(r" +", " ", res)
return res
if __name__ == "__main__":
# pos_annotated_text = PoeticEddaPOSTaggedReader("Völuspá")
text = PlaintextCorpusReader(os.path.join(poetic_edda, "Völuspá", "txt_files", "pos"),
"pos_tagged.txt")
# print(pos_annotated_text.tagged_words()[:50])
print(text.raw()[:50])
|
990,438 | e1bde05e76f25fba1f4e8353659ba0e53cbcdc28 | ####Itertools####
from itertools import permutations, product, combinations_with_replacement, combinations
data = ['A', 'B', 'C']
##permutations##
result = list(permutations(data, 3))
print(result)
#result = [('A', 'B', 'C'), ('A', 'C', 'B'), ('B', 'A', 'C'), ('B', 'C', 'A'), ('C', 'A', 'B'), ('C', 'B', 'A')]
##prdocut## AA,BB,CC: ok, AB != BA
result = list(product(data, repeat=2))
print(result)
#result = ('A', 'A'), ('A', 'B'), ('A', 'C'), ('B', 'A'), ('B', 'B'), ('B', 'C'), ('C', 'A'), ('C', 'B'), ('C', 'C')]
##combinations_with_replacement## AA,BB,CC: ok, but BA = AB
result = list(combinations_with_replacement(data, 2))
print(result)
#result = [('A', 'A'), ('A', 'B'), ('A', 'C'), ('B', 'B'), ('B', 'C'), ('C', 'C')]
##combinations## AA,BB,CC: not allowed, AB != BA
result = list(combinations(data, 2))
print(result)
#result = [('A', 'B'), ('A', 'C'), ('B', 'C')]
result = list(combinations(data, 3))
print(result)
#result = [('A', 'B', 'C')]
|
990,439 | 4e8973073db152fd867f8185ae7e80808f5c2c3d | from Handler import Verifycode, BaseHandler, Passport, Profile
from tornado.web import RequestHandler
import os
handler = [
(r'/api/pitcode', Verifycode.PicCodeHandler),
(r'/api/smscode', Verifycode.SMSCodeHandler),
(r'/api/register', Passport.RegisterHandler),
(r'/api/login', Passport.LoginHandler),
(r'/api/check_login', Passport.CheckLoginHandler),
(r'/api/logout', Passport.LogoutHandler),
(r'/api/profile/avatar', Profile.AvatarHandler),
(r'/api/profile/name',Profile.NameHandler),
(r'/api/profile/auth', Profile.AuthHandler),
(r'/api/profile', Profile.ProfileHandler),
(r'/(.*)', BaseHandler.StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), 'html'), 'default_filename': 'index.html'}),
] |
990,440 | a3992991afd406f454b8cfe74feb08c7a280ca19 |
# Script Name : noform.py
# Author : johnny trevisani
# Created : 20 Dec 2016
# Last Modified : 21 Dec 2016
# Version : 1.0
# Modifications : documentation for initial version
# Description : Prompt user for ratings
Bg = input ("BIGOTRY Rating (1-5):")
Of = input ("OFFENSIVENESS Rating (1-5):")
Ap = input ("APPEARENCE Rating (1-5):")
E = input ("EGO Rating (1-5):")
Bl = input ("BODY LANGUAGE Rating (1-5):")
M = input ("ME MENTIONS Rating (1-5):")
Ag = input ("AGGRESSIVE Rating (1-5):")
Pa = input ("PASSIVE AGGRESIVE Rating (1-5):")
H = input ("HOSTILITY Rating (1-5):")
S = input ("SMILE Rating (1-5):")
C = input ("CREEPY Rating (1-5):")
P = input ("PARANOID Rating (1-5):")
B = input ("BRAG Rating (1-5):")
L = input ("LIE Rating (1-5):")
R = input ("RIGHTEOUSNESS Rating (1-5):")
Mn = input ("MANNERS Rating (1-5):")
# Perform calculated asshole score based on the following formula
#
# A = Bg(3)+Of(2)/Ap+((Ag/Pa)*3)-M*(B+Bl-L)-(C+P)+H(2)+(-E*R)-Mn-S(.5)
A = (float(Bg)*3 + float(Of)*2) / float(Ap) + ( float(Ag) / float(Pa) * 3 ) - float(M) * ( float(B) + float(Bl) - float(L) ) - (float(C)+float(P)) + (float(H)*2) + (- float(E) * float(R) ) - float(Mn) - float(S)*.5
# Display the asshole factor
print('The asshole factor is {0}' . format(A))
|
990,441 | 767ce4ad0c2a84790ea0e69b970e74bebce7d36b | """xmltramp: Make XML documents easily accessible."""
__version__ = "1.22"
__author__ = "Aaron Swartz"
__copyright__ = "(C) 2003 Aaron Swartz. GNU GPL 2"
class Element:
def __init__(self, name, attrs=None, children=None):
self._name = name
self._attrs = attrs or {}
self._dir = children or []
self._text = ''
def __repr__(self, recursive=0):
name = "<"+self._name
if self._attrs.keys():
name += ' ' + ' '.join([key+'="'+self._attrs[key]+'"'
for key in self._attrs.keys()])
name += ">"
if self._text.strip(): name += self._text
if recursive:
for element in self._dir:
name += element.__repr__(1)
if self._text.strip() or recursive: name += "</"+self._name+">"
return name
def __str__(self):
return ' '.join(self._text.split())
def __getattr__(self, attr):
for item in self._dir:
if item._name == attr: return item
raise KeyError
def __getitem__(self, item):
if isinstance(item, type(0)): return self._dir[item]
else: return self._attrs[item]
from xml.sax.handler import EntityResolver, DTDHandler, ContentHandler, ErrorHandler
class Seeder(EntityResolver, DTDHandler, ContentHandler, ErrorHandler):
def __init__(self):
self.stack = []
ContentHandler.__init__(self)
def startElement(self, name, attrs):
self.stack.append(Element(name, attrs))
def characters(self, ch):
self.stack[-1]._text += ch
def endElement(self, name):
element = self.stack.pop()
element._text = element._text.strip()
if self.stack:
self.stack[-1]._dir.append(element)
else:
self.result = element
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
def seed(fileobj):
seeder = Seeder()
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
parser.setContentHandler(seeder)
parser.parse(fileobj)
return seeder.result
def parse(text):
from StringIO import StringIO
return seed(StringIO(text))
def load(url):
import urllib
return seed(urllib.urlopen(url))
if __name__ == "__main__":
d = Element("monkey")
assert repr(d) == "<monkey>"
d._dir = [Element("head"), Element("body"), Element("tail", {'type':'long'})]
assert repr(d[1]) == "<body>"
assert repr(d.tail) == '<tail type="long">'
assert d.tail['type'] == "long"
d = parse('<bing> <bang> <bong>center</bong> </bang> </bing>')
assert d._name == "bing"
assert d._text == ''
assert d.bang.bong._text == "center"
assert str(d.bang.bong) == "center"
d = parse('<a>\nbaz\nbiz\n</a>')
d._text == "baz\nbiz"
str(d) == "baz biz"
# No guarantees of the this being true if an element
# contains both text and child elements or there's extra
# whitespace lying around:
doc = '<a top="1"><b middle="2"><c bottom="3">d</c></b></a>'
parse(doc).__repr__(1) == doc
|
990,442 | 1a8071b36f28569ddd804c1af9096653ead0c2a1 | from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count
import queue
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
return_set = set([()])
for num in nums:
for t in list(return_set):
return_set.add(t+(num,))
return list(map(lambda x:list(x),return_set))
sol = Solution()
# input
nums = [1,2,2]
# output
output = sol.subsetsWithDup(nums)
# answer
answer = [[1,2],[2],[1],[1,2,2],[2,2],[]]
print(output, answer, sorted(answer) == sorted(output))
# input
nums = [0]
# output
output = sol.subsetsWithDup(nums)
# answer
answer = [[0],[]]
print(output, answer, sorted(answer) == sorted(output))
|
990,443 | 8b2fc40cf717aaacbb5f6568ba2dd38db7ae8ef5 | import pandas as pd
import sqlite3
from Prepare_file_10min_interval import process_dataframe_insert_10min_interval
def add_weather_data() :
# read csv file number of rows including the column name 2502762
df = pd.read_csv(r'D:/Server Code/Required_csvs'
r'/final_1.csv',
sep=',', usecols=['connectorID', 'type', 'parkID', 'timestamp', 'status',
'municipality_name', 'population_density_inhabitants_per_kmsq',
'station_accesstype_pub_priv', 'LOCATION', 'join_NAME', 'join_type',
'distance_nearest_highway', 'IntensityofCars'],
low_memory=False)
# convert the timestamp to the datetime format
df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S')
# drop duplicates
df = df.drop_duplicates()
# drop connectors without 'type', 'station_accesstype_pub_priv', 'join_type',
# 'population_density_inhabitants_per_kmsq'
df = df.dropna(
subset=['type', 'station_accesstype_pub_priv', 'join_type', 'population_density_inhabitants_per_kmsq'])
print(df)
# ouput has 1801886 rows
final_table = process_dataframe_insert_10min_interval.include_intervals_one(df)
weather_data_path = "D:/Server Code/Required_csvs/weather_data_8_code_1.csv"
weather_data = pd.read_csv(
r"D:/Server Code/Required_csvs/Weather/weather_data_8_code_1.csv", encoding='utf-8')
weather_data['DTG'] = pd.to_datetime(weather_data['DTG'], format="%Y-%m-%d %H:%M:%S")
weather_data['DTG_New'] = pd.to_datetime(weather_data['DTG_New'], format="%Y-%m-%d %H:%M:%S")
final_table['timestamp'] = pd.to_datetime(final_table['timestamp'], format="%Y-%m-%d %H:%M:%S")
print(final_table)
conn = sqlite3.connect(':memory:')
f = open('D:/Server Code/Output/weather_data_added_1.csv',
'w',
encoding='utf-8', newline='')
cur = conn.cursor()
# conn.text_factory = str
weather_data.to_sql('weather_data', conn, index=False)
final_table.to_sql('final_table', conn, index=False)
sqlcode = '''select c.* ,co.* from final_table c inner join weather_data co where (c.timestamp >= co.DTG and c.timestamp <= co.DTG_New) and (c.LOCATION = co.LOCATION) '''
# newdf = pd.read_sql_query(sqlcode, conn)
cur.execute(sqlcode)
print("Executed query")
# Get data in batches
while True :
# Read the data
df = pd.DataFrame(cur.fetchmany(100000))
# We are done if there are no data
if len(df) == 0 :
break
# Let's write to the file
else :
df.rename(
columns={0 : 'timestamp', 1 : 'connectorID', 2 : 'type', 3 : 'parkID', 4 : 'status', 5 : 'municipality',
6 : 'population_density', 7 : 'station_accesstype', 8 : 'join_LOCATION', 9 : 'join_NAME',
10 : 'road_type',
11 : 'distance_nearest_highway', 12 : 'intensity_of_cars', 13 : 'DTG', 14 : 'LOCATION',
15 : 'NAME', 16 : 'LATITUDE', 17 : 'LONGITUDE', 18 : 'weather_current', 19 : 'WW_PAST_10',
20 : 'DTG_New'}, inplace=True)
df = df.drop(
columns=['LOCATION', 'NAME', 'LATITUDE', 'LONGITUDE', 'join_LOCATION', 'join_NAME', 'WW_PAST_10',
'DTG_New', 'DTG'], axis=1)
df_1 =df
# df.to_csv(f, index=False, encoding='utf-8')
# Clean up
print('Done')
f.close()
cur.close()
conn.close()
return df_1
|
990,444 | 9440b51b1dc66b81fd56c8761311550afed69fff | # Generated by Django 2.2.2 on 2019-06-24 14:39
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('flexpage', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flexpage',
name='body',
field=wagtail.core.fields.StreamField([('intro_block', wagtail.core.blocks.StructBlock([('h1', wagtail.core.blocks.CharBlock(required=True)), ('content', wagtail.core.blocks.TextBlock())])), ('product_block', wagtail.core.blocks.StructBlock([('products', wagtail.core.blocks.ListBlock(wagtail.core.blocks.PageChooserBlock('formpage.CatalogPage')))])), ('BlockWithIcon', wagtail.core.blocks.StructBlock([('h2', wagtail.core.blocks.CharBlock(required=True)), ('icon_block', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock()), ('h3', wagtail.core.blocks.CharBlock()), ('p', wagtail.core.blocks.TextBlock())])))])), ('Text', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.RichTextBlock())]))], null=True),
),
]
|
990,445 | 8a0e4e76f144ae0cd3bf5916da557bd4f97edeb0 | # Function to check wheter some account is registered in the bank
def exist(bank, account: int) -> bool:
if account < len(bank.getClients()) and account >= 0:
return True
else:
return False
class Bank:
def __init__(self) -> None:
self.clients = []
def getClients(self):
return self.clients
def createAccount(self, client) -> None:
client.setAccount(len(self.clients))
client.setBalance(0.0)
self.clients.append(client)
def transferFunds(self, command, client) -> None:
otherAccount = command.getBeneficiaryAccount()
value = command.getValue()
if not exist(self, otherAccount):
raise Exception("Beneficiary account does not exist")
elif otherAccount == client.getAccount():
raise Exception("Can't transfer to your own account")
elif value > client.getBalance():
raise Exception("Insuficient funds to make this transaction")
elif value <= 0:
raise Exception("Transfer value has to be a positive float")
else:
client.setBalance(client.getBalance() - value)
self.clients[otherAccount].setBalance(self.clients[otherAccount].getBalance() + value)
client.insertOperation(command)
print("\nR${0:.2f} were transfered from your account to account {1}".format(value, otherAccount))
print("Current Balance : R${:.2f}".format(client.getBalance()))
def withdrawMoney(self, command, client) -> None:
value = command.getValue()
if value > client.getBalance():
raise Exception("Insuficient funds")
elif value <= 0:
raise Exception("Request should be a positive value")
else:
client.setBalance(client.getBalance() - value)
client.insertOperation(command)
print("\nR${:.2f} were withdrawn from your account".format(value))
print("Current Balance : R${:.2f}".format(client.getBalance()))
def depositMoney(self, command, client) -> None:
value = command.getValue()
if value <= 0:
raise Exception("Deposit must be a positive value")
client.setBalance(client.getBalance() + value)
client.insertOperation(command)
print("\nR${:.2f} were deposited in your account".format(value))
print("Current Balance : R${:.2f}".format(client.getBalance()))
def checkBalance(self, command, client) -> None:
print("\nCurrent Balance : R${:.2f}".format(client.getBalance()))
def checkStatement(self, command, client) -> None:
print("\n----------------------STATEMENT-----------------------")
for operation in client.getStatement():
if operation.__class__.__name__ == "DepositMoney":
print("Money Deposited, Value = R${:.2f}".format(operation.getValue()))
elif operation.__class__.__name__ == "WithdrawMoney":
print("Money Withdrawn, Value = R${:.2f}".format(operation.getValue()))
elif operation.__class__.__name__ == "TransferFunds":
print("Transaction, Beneficiary Account: {0}, Value = R${1:.2f}".format(operation.getBeneficiaryAccount(), operation.getValue()))
else:
raise Exception("Exists at least one registered operation that shouldn't be part of the client's statement")
print("\nCurrent Balance : R${:.2f}".format(client.getBalance()))
print("------------------------------------------------------") |
990,446 | 74140a43e6f7ac7bc7cca289f1c620bf9b075db3 | # -*- coding: utf-8 -*-
from xml.dom.minidom import parseString
from langdetect import detect as language_detect
import pycountry
FILE_NAME_A = 'feed_a.xml'
FILE_NAME_B = 'feed_b.xml'
FILE_NAME_C = 'feed_c.xml'
DEFAULT_ENCODING = 'utf-8'
class BaseParser(object):
def __init__(self, file_name):
BaseParser.output(file_name)
with open(file_name, 'r', encoding=DEFAULT_ENCODING) as f:
self.file_content = f.read()
self.job_tag_name, self.url_tag_name = '', ''
def clear_text(self):
self.file_content = self.file_content.replace(' ', ' ')
def get_description(self, job):
return ''
def parse(self):
self.clear_text()
dom = parseString(self.file_content)
jobs = dom.getElementsByTagName(self.job_tag_name)
for job in jobs:
url = BaseParser.get_text(
job.getElementsByTagName(self.url_tag_name)[0]).strip()
url = url.replace('&', '&')
description = self.get_description(job)
language_code = language_detect(description)
BaseParser.output(' job #{}:'.format(jobs.index(job) + 1))
BaseParser.output(' url: {}'.format(url))
BaseParser.output(' language: {} ({})'.format(
language_code, BaseParser.get_language_name(language_code)))
@staticmethod
def output(text):
print(text)
@staticmethod
def get_text(node_list):
return ''.join(node.data for node in node_list.childNodes
if node.nodeType == node.TEXT_NODE)
@staticmethod
def get_language_name(code):
return pycountry.languages.get(alpha_2=code).name
class ParserA(BaseParser):
def __init__(self, file_name):
super(ParserA, self).__init__(file_name)
self.job_tag_name, self.url_tag_name = 'job', 'url'
def get_description(self, job):
return BaseParser.get_text(
job.getElementsByTagName('description')[0]).strip()
class ParserB(BaseParser):
def __init__(self, file_name):
super(ParserB, self).__init__(file_name)
self.job_tag_name, self.url_tag_name = 'PositionOpening', 'InternetReference'
def get_description(self, job):
return job.getElementsByTagName('FormattedPositionDescription')[0].\
childNodes[1].firstChild.nodeValue.strip()
class ParserC(BaseParser):
def __init__(self, file_name):
super(ParserC, self).__init__(file_name)
self.job_tag_name, self.url_tag_name = 'stellenangebot', 'url'
def get_description(self, job):
return job.getElementsByTagName('candidate')[0].firstChild.nodeValue
def main():
parser_a = ParserA(FILE_NAME_A)
parser_a.parse()
parser_b = ParserB(FILE_NAME_B)
parser_b.parse()
parser_c = ParserC(FILE_NAME_C)
parser_c.parse()
if __name__ == '__main__':
main()
|
990,447 | 5d0668602b312f808d0d83f93f81607c3e86b85d | #!/usr/bin/env python
import paho.mqtt.client as paho # pip install paho-mqtt
import time
import logging
import sys
import pigpio
from config import *
MQTT_PREFIX = 'gas'
FREQUENCY_S = 1
GAS_GPIO = 27
m3abs = 0.0
gpio = None
reed_state_old = 1
def read_state():
global m3abs
with open('local-gas-connector.state') as f:
m3abs = float(f.readline().strip())
logging.info("Read initial value: {:.2f}".format(m3abs))
def init_gpio():
global gpio
gpio = pigpio.pi()
gpio.set_mode(GAS_GPIO, pigpio.INPUT)
gpio.set_pull_up_down(GAS_GPIO, pigpio.PUD_UP)
logging.info('GPIO initialized')
def data():
global reed_state_old
global m3abs
values = {}
reed_state = gpio.read(GAS_GPIO)
if reed_state == 1:
logging.debug("Reed state open")
reed_state_old = reed_state
else:
logging.debug("Reed state closed")
if reed_state_old != reed_state:
reed_state_old = reed_state
m3abs += 0.01
values['volume'] = "{:.2f}".format(m3abs)
values['tick'] = '1'
logging.debug("m3abs: {:.2f}".format(m3abs))
return values
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
read_state()
init_gpio()
mqttc = paho.Client('local-gas-connector', clean_session=True)
# mqttc.enable_logger()
mqttc.will_set("{}/connectorstatus".format(MQTT_PREFIX), "Local Gas Connector: LOST_CONNECTION", 0, retain=True)
mqttc.connect(BROKER_HOST, BROKER_PORT, 60)
logging.info("Connected to {}:{}".format(BROKER_HOST, BROKER_PORT))
mqttc.publish("{}/connectorstatus".format(MQTT_PREFIX), "Local Gas Connector: ON-LINE", retain=True)
# initial value
(result, mid) = mqttc.publish("{}/{}".format(MQTT_PREFIX, 'volume'), str("{:.2f}".format(m3abs)), 0, retain=True)
logging.debug("Pubish Result: {} MID: {} for {}: {}".format(result, mid, 'volume', "{:.2f}".format(m3abs))) # noqa E501
mqttc.loop_start()
while True:
try:
values = data()
for k, v in values.items():
(result, mid) = mqttc.publish("{}/{}".format(MQTT_PREFIX, k), str(v), 0, retain=True)
logging.debug("Pubish Result: {} MID: {} for {}: {}".format(result, mid, k, v)) # noqa E501
time.sleep(FREQUENCY_S)
except KeyboardInterrupt:
break
except Exception:
raise
mqttc.publish("{}/connectorstatus".format(MQTT_PREFIX), "Local Gas Connector: OFF-LINE", retain=True)
mqttc.disconnect()
mqttc.loop_stop() # waits, until DISCONNECT message is sent out
logging.info("Disconnected from to {}:{}".format(BROKER_HOST, BROKER_PORT))
|
990,448 | f5d1abb9f6297d222c56b3b297033bee812e57fb | """
As a henchman on Commander Lambda's space station, you're expected to be resourceful, smart, and a quick thinker. It's not easy building a doomsday device and capturing bunnies
at the same time, after all! In order to make sure that everyone working for her is sufficiently quick-witted, Commander Lambda has installed new flooring outside the henchman
dormitories. It looks like a chessboard, and every morning and evening you have to solve a new movement puzzle in order to cross the floor. That would be fine if you got to be
the rook or the queen, but instead, you have to be the knight. Worse, if you take too much time solving the puzzle, you get "volunteered" as a test subject for the LAMBCHOP
doomsday device!
To help yourself get to and from your bunk every day, write a function called answer(src, dest) which takes in two parameters: the source square, on which you start, and the
destination square, which is where you need to land to solve the puzzle. The function should return an integer representing the smallest number of moves it will take for you to
travel from the source square to the destination square using a chess knight's moves (that is, two squares in any direction immediately followed by one square perpendicular to
that direction, or vice versa, in an "L" shape). Both the source and destination squares will be an integer between 0 and 63, inclusive, and are numbered like the example
chessboard below:
-------------------------
| 0| 1| 2| 3| 4| 5| 6| 7|
-------------------------
| 8| 9|10|11|12|13|14|15|
-------------------------
|16|17|18|19|20|21|22|23|
-------------------------
|24|25|26|27|28|29|30|31|
-------------------------
|32|33|34|35|36|37|38|39|
-------------------------
|40|41|42|43|44|45|46|47|
-------------------------
|48|49|50|51|52|53|54|55|
-------------------------
|56|57|58|59|60|61|62|63|
-------------------------
"""
from collections import deque
class Node:
"""
Node class to define a node for a binary tree traversal.
x, y are the coordinate positions of a node on the chess board
dist is the minimum distance from the source
"""
def __init__(self, x, y, dist=0):
self.x = x
self.y = y
self.dist = dist
def __hash__(self):
return hash((self.x, self.y, self.dist))
def __eq__(self, other):
return (self.x, self.y, self.dist) == (other.x, other.y, other.dist)
def solution(src, dest):
"""
Convert the src and dest provided to their coordinates in the chess board
then use these coordinates to search for the shortest path.
"""
# Define a board position lookup table
board = [[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55], [56, 57, 58, 59, 60, 61, 62, 63]]
# Constant value for square board dimensions
N = 8
src_node = None
dest_node = None
# Get the x, y positions of src and dest cells
for x in range(N):
for y in range(N):
if board[x][y] == src:
src_node = Node(x, y)
if board[x][y] == dest:
dest_node = Node(x, y)
# Search for the shortest path from src to dest and compute it's length
path_length = search(src_node, dest_node, N)
print(path_length)
return path_length
def neighbours(x, y, N):
"""
Get the neighbouring positions to a particular coordinate pos using knight's rule:
- Get the x, y values of the position
- For all potential knight moves compute the new x and y values
- Check if these coordinates are within the bounds of the board
- If so add it to a set of legal actions
- Return the actions
"""
actions = set()
# All the potential moves that can be made by a knight
knight_moves = [(1, 2), (1, -2), (-1, 2), (-1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1)]
# Compute the new x, y values
for move in knight_moves:
new_x = x + move[0]
new_y = y + move[1]
# Check if the new x, y values are within the bounds of the board
if not(new_x >= N or new_x < 0 or new_y >= N or new_y < 0):
actions.add((new_x, new_y))
return actions
def search(src, dest, N):
"""
BFS tree traversal using nodes in a queue.
We will use deque from collections here as it has O(1) time complexity
as opposed to list.pop() which has O(n) time complexity.
"""
# Initialize a queue and enqueue the src node
queue = deque()
queue.append(src)
# Initialize an empty set of explored positions
explored = set()
# While nodes in queue
while queue:
# Pop the front node from the queue
node = queue.popleft()
# Get the x, y and dist values from the node
x = node.x
y = node.y
dist = node.dist
# If the current node is the destination, return the distance
if x == dest.x and y == dest.y:
return dist
# If node is a new node then explore it
if node not in explored:
explored.add(node)
for action in neighbours(x, y, N):
queue.append(Node(action[0], action[1], dist + 1))
# If no path exists return a length of 0
return 0
if __name__ == '__main__':
# Test cases
solution(0, 1)
solution(19, 36)
solution(0, 63)
|
990,449 | 3220560e50edbe6c223486c8adbcdda00bf2ddc3 | import torch.nn as nn
import torch
import math
import pdb
class ConvBasic(nn.Module):
def __init__(self, nIn, nOut, kernel=3, stride=1,
padding=1):
super(ConvBasic, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride,
padding=padding, bias=False),
nn.BatchNorm2d(nOut),
nn.ReLU(True)
)
def forward(self, x):
return self.net(x)
class ConvBN(nn.Module):
def __init__(self, nIn, nOut, type: str, bottleneck,
bnWidth):
"""
a basic conv in MSDNet, two type
:param nIn:
:param nOut:
:param type: normal or down
:param bottleneck: use bottlenet or not
:param bnWidth: bottleneck factor
"""
super(ConvBN, self).__init__()
layer = []
nInner = nIn
if bottleneck is True:
nInner = min(nInner, bnWidth * nOut)
layer.append(nn.Conv2d(
nIn, nInner, kernel_size=1, stride=1, padding=0, bias=False))
layer.append(nn.BatchNorm2d(nInner))
layer.append(nn.ReLU(True))
if type == 'normal':
layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,
stride=1, padding=1, bias=False))
elif type == 'down':
layer.append(nn.Conv2d(nInner, nOut, kernel_size=3,
stride=2, padding=1, bias=False))
else:
raise ValueError
layer.append(nn.BatchNorm2d(nOut))
layer.append(nn.ReLU(True))
self.net = nn.Sequential(*layer)
def forward(self, x):
return self.net(x)
class ConvDownNormal(nn.Module):
def __init__(self, nIn1, nIn2, nOut, bottleneck, bnWidth1, bnWidth2):
super(ConvDownNormal, self).__init__()
self.conv_down = ConvBN(nIn1, nOut // 2, 'down',
bottleneck, bnWidth1)
self.conv_normal = ConvBN(nIn2, nOut // 2, 'normal',
bottleneck, bnWidth2)
def forward(self, x):
res = [x[1],
self.conv_down(x[0]),
self.conv_normal(x[1])]
return torch.cat(res, dim=1)
class ConvNormal(nn.Module):
def __init__(self, nIn, nOut, bottleneck, bnWidth):
super(ConvNormal, self).__init__()
self.conv_normal = ConvBN(nIn, nOut, 'normal',
bottleneck, bnWidth)
def forward(self, x):
if not isinstance(x, list):
x = [x]
res = [x[0],
self.conv_normal(x[0])]
return torch.cat(res, dim=1)
class MSDNFirstLayer(nn.Module):
def __init__(self, nIn, nOut, args):
super(MSDNFirstLayer, self).__init__()
self.layers = nn.ModuleList()
if args.data.startswith('cifar'):
self.layers.append(ConvBasic(nIn, nOut * args.grFactor[0],
kernel=3, stride=1, padding=1))
elif args.data == 'ImageNet':
conv = nn.Sequential(
nn.Conv2d(nIn, nOut * args.grFactor[0], 7, 2, 3),
nn.BatchNorm2d(nOut * args.grFactor[0]),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1))
self.layers.append(conv)
nIn = nOut * args.grFactor[0]
for i in range(1, args.nScales):
self.layers.append(ConvBasic(nIn, nOut * args.grFactor[i],
kernel=3, stride=2, padding=1))
nIn = nOut * args.grFactor[i]
def forward(self, x):
res = []
for i in range(len(self.layers)):
x = self.layers[i](x)
res.append(x)
return res
class MSDNLayer(nn.Module):
def __init__(self, nIn, nOut, args, inScales=None, outScales=None):
super(MSDNLayer, self).__init__()
self.nIn = nIn
self.nOut = nOut
self.inScales = inScales if inScales is not None else args.nScales
self.outScales = outScales if outScales is not None else args.nScales
self.nScales = args.nScales
self.discard = self.inScales - self.outScales
self.offset = self.nScales - self.outScales
self.layers = nn.ModuleList()
if self.discard > 0:
nIn1 = nIn * args.grFactor[self.offset - 1]
nIn2 = nIn * args.grFactor[self.offset]
_nOut = nOut * args.grFactor[self.offset]
self.layers.append(ConvDownNormal(nIn1, nIn2, _nOut, args.bottleneck,
args.bnFactor[self.offset - 1],
args.bnFactor[self.offset]))
else:
self.layers.append(ConvNormal(nIn * args.grFactor[self.offset],
nOut * args.grFactor[self.offset],
args.bottleneck,
args.bnFactor[self.offset]))
for i in range(self.offset + 1, self.nScales):
nIn1 = nIn * args.grFactor[i - 1]
nIn2 = nIn * args.grFactor[i]
_nOut = nOut * args.grFactor[i]
self.layers.append(ConvDownNormal(nIn1, nIn2, _nOut, args.bottleneck,
args.bnFactor[i - 1],
args.bnFactor[i]))
def forward(self, x):
if self.discard > 0:
inp = []
for i in range(1, self.outScales + 1):
inp.append([x[i - 1], x[i]])
else:
inp = [[x[0]]]
for i in range(1, self.outScales):
inp.append([x[i - 1], x[i]])
res = []
for i in range(self.outScales):
res.append(self.layers[i](inp[i]))
return res
class ParallelModule(nn.Module):
"""
This module is similar to luatorch's Parallel Table
input: N tensor
network: N module
output: N tensor
"""
def __init__(self, parallel_modules):
super(ParallelModule, self).__init__()
self.m = nn.ModuleList(parallel_modules)
def forward(self, x):
res = []
for i in range(len(x)):
res.append(self.m[i](x[i]))
return res
class ClassifierModule(nn.Module):
def __init__(self, m, channel, num_classes):
super(ClassifierModule, self).__init__()
self.m = m
self.linear = nn.Linear(channel, num_classes)
def forward(self, x):
res = self.m(x[-1])
res = res.view(res.size(0), -1)
return self.linear(res)
class MSDNet(nn.Module):
def __init__(self, args):
super(MSDNet, self).__init__()
self.blocks = nn.ModuleList()
self.classifier = nn.ModuleList()
self.nBlocks = args.nBlocks
self.steps = [args.base]
self.args = args
n_layers_all, n_layer_curr = args.base, 0
for i in range(1, self.nBlocks):
self.steps.append(args.step if args.stepmode == 'even'
else args.step * i + 1)
n_layers_all += self.steps[-1]
print("building network of steps: ")
print(self.steps, n_layers_all)
nIn = args.nChannels
for i in range(self.nBlocks):
print(' ********************** Block {} '
' **********************'.format(i + 1))
m, nIn = \
self._build_block(nIn, args, self.steps[i],
n_layers_all, n_layer_curr)
self.blocks.append(m)
n_layer_curr += self.steps[i]
if args.data.startswith('cifar100'):
self.classifier.append(
self._build_classifier_cifar(nIn * args.grFactor[-1], 100))
elif args.data.startswith('cifar10'):
self.classifier.append(
self._build_classifier_cifar(nIn * args.grFactor[-1], 10))
elif args.data == 'ImageNet':
self.classifier.append(
self._build_classifier_imagenet(nIn * args.grFactor[-1], 1000))
else:
raise NotImplementedError
for m in self.blocks:
if hasattr(m, '__iter__'):
for _m in m:
self._init_weights(_m)
else:
self._init_weights(m)
for m in self.classifier:
if hasattr(m, '__iter__'):
for _m in m:
self._init_weights(_m)
else:
self._init_weights(m)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _build_block(self, nIn, args, step, n_layer_all, n_layer_curr):
layers = [MSDNFirstLayer(3, nIn, args)] \
if n_layer_curr == 0 else []
for i in range(step):
n_layer_curr += 1
inScales = args.nScales
outScales = args.nScales
if args.prune == 'min':
inScales = min(args.nScales, n_layer_all - n_layer_curr + 2)
outScales = min(args.nScales, n_layer_all - n_layer_curr + 1)
elif args.prune == 'max':
interval = math.ceil(1.0 * n_layer_all / args.nScales)
inScales = args.nScales - math.floor(1.0 * (max(0, n_layer_curr - 2)) / interval)
outScales = args.nScales - math.floor(1.0 * (n_layer_curr - 1) / interval)
else:
raise ValueError
layers.append(MSDNLayer(nIn, args.growthRate, args, inScales, outScales))
print('|\t\tinScales {} outScales {} inChannels {} outChannels {}\t\t|'.format(inScales, outScales, nIn, args.growthRate))
nIn += args.growthRate
if args.prune == 'max' and inScales > outScales and \
args.reduction > 0:
offset = args.nScales - outScales
layers.append(
self._build_transition(nIn, math.floor(1.0 * args.reduction * nIn),
outScales, offset, args))
_t = nIn
nIn = math.floor(1.0 * args.reduction * nIn)
print('|\t\tTransition layer inserted! (max), inChannels {}, outChannels {}\t|'.format(_t, math.floor(1.0 * args.reduction * _t)))
elif args.prune == 'min' and args.reduction > 0 and \
((n_layer_curr == math.floor(1.0 * n_layer_all / 3)) or
n_layer_curr == math.floor(2.0 * n_layer_all / 3)):
offset = args.nScales - outScales
layers.append(self._build_transition(nIn, math.floor(1.0 * args.reduction * nIn),
outScales, offset, args))
nIn = math.floor(1.0 * args.reduction * nIn)
print('|\t\tTransition layer inserted! (min)\t|')
print("")
return nn.Sequential(*layers), nIn
def _build_transition(self, nIn, nOut, outScales, offset, args):
net = []
for i in range(outScales):
net.append(ConvBasic(nIn * args.grFactor[offset + i],
nOut * args.grFactor[offset + i],
kernel=1, stride=1, padding=0))
return ParallelModule(net)
def _build_classifier_cifar(self, nIn, num_classes):
interChannels1, interChannels2 = 128, 128
conv = nn.Sequential(
ConvBasic(nIn, interChannels1, kernel=3, stride=2, padding=1),
ConvBasic(interChannels1, interChannels2, kernel=3, stride=2, padding=1),
nn.AvgPool2d(2),
)
return ClassifierModule(conv, interChannels2, num_classes)
def _build_classifier_imagenet(self, nIn, num_classes):
conv = nn.Sequential(
ConvBasic(nIn, nIn, kernel=3, stride=2, padding=1),
ConvBasic(nIn, nIn, kernel=3, stride=2, padding=1),
nn.AvgPool2d(2)
)
return ClassifierModule(conv, nIn, num_classes)
def forward(self, x):
res = []
for i in range(self.nBlocks):
x = self.blocks[i](x)
res.append(self.classifier[i](x))
return res
|
990,450 | c6321faa13d61951d91ace37c3b88272dd877587 | import numpy as np
import scipy.stats
import pytest
from astropy.io.fits import getdata
try:
import specutils
except ImportError:
HAS_SPECUTILS = False
else:
HAS_SPECUTILS = True
from skypy.galaxy.spectrum import dirichlet_coefficients, kcorrect_spectra
def test_sampling_coefficients():
alpha0 = np.array([2.079, 3.524, 1.917, 1.992, 2.536])
alpha1 = np.array([2.265, 3.862, 1.921, 1.685, 2.480])
z1 = 1.
redshift = np.full(1000, 2.0, dtype=float)
redshift_reshape = np.atleast_1d(redshift)[:, np.newaxis]
alpha = np.power(alpha0, 1. - redshift_reshape / z1) * \
np.power(alpha1, redshift_reshape / z1)
a0 = alpha.sum(axis=1)
# Check the output shape if redshift is an array
coefficients = dirichlet_coefficients(redshift, alpha0, alpha1, z1)
assert coefficients.shape == (len(redshift), len(alpha0)), \
'Shape of coefficients array is not (len(redshift), len(alpha0)) '
# the marginalised distributions are beta distributions with a = alpha_i
# and b = a0-alpha_i
for a, c in zip(alpha.T, coefficients.T):
d, p = scipy.stats.kstest(c, 'beta', args=(a, a0 - a))
assert p >= 0.01, \
'Not all marginal distributions follow a beta distribution.'
# test sampling with weights
weight = [3.47e+09, 3.31e+06, 2.13e+09, 1.64e+10, 1.01e+09]
coefficients = dirichlet_coefficients(redshift, alpha0, alpha1, z1, weight)
assert coefficients.shape == (len(redshift), len(alpha0)), \
'Shape of coefficients array is not (len(redshift), len(alpha0)) '
# Test output shape if redshift is a scalar
redshift = 2.0
coefficients = dirichlet_coefficients(redshift, alpha0, alpha1)
assert coefficients.shape == (len(alpha0),), \
'Shape of coefficients array is not (len(alpha0),) ' \
'if redshift array is float.'
# Test raising ValueError of alpha1 and alpha0 have different size
alpha0 = np.array([1, 2, 3])
alpha1 = np.array([4, 5])
redshift = np.linspace(0, 2, 10)
with pytest.raises(ValueError):
dirichlet_coefficients(redshift, alpha0, alpha1)
# Test that ValueError is risen if alpha0 or alpha1 is a scalar.
scalar_alpha = 1.
with pytest.raises(ValueError):
dirichlet_coefficients(redshift, scalar_alpha, alpha1)
with pytest.raises(ValueError):
dirichlet_coefficients(redshift, alpha0, scalar_alpha)
# bad weight parameter
with pytest.raises(ValueError):
dirichlet_coefficients(redshift, [2.5, 2.5], [2.5, 2.5], weight=[1, 2, 3])
def test_kcorrect_spectra():
# Download template data
kcorrect_templates_url = "https://github.com/blanton144/kcorrect/raw/" \
"master/data/templates/k_nmf_derived.default.fits"
lam = getdata(kcorrect_templates_url, 11)
templates = getdata(kcorrect_templates_url, 1)
# Test that the shape of the returned flux density corresponds to (nz, nl)
coefficients = np.array([[0.2, 0.2, 0.2, 0.2, 0.2],
[0, 0.1, 0.2, 0.3, 0.4]])
z = np.array([0.5, 1])
mass = np.array([5 * 10 ** 10, 7 * 10 ** 9])
lam_o, sed = kcorrect_spectra(z, mass, coefficients)
assert sed.shape == (len(z), len(lam))
# Test that for redshift=0, mass=1 and coefficients=[1,0,0,0,0]
# the returned wavelengths and spectra match the template data
coefficients = np.array([1, 0, 0, 0, 0])
z = np.array([0])
mass = np.array([1])
lam_o, sed = kcorrect_spectra(z, mass, coefficients)
assert np.allclose(lam_o, lam)
assert np.allclose(sed, templates[0])
@pytest.mark.skipif(not HAS_SPECUTILS, reason='test requires specutils')
def test_mag_ab_standard_source():
from astropy import units
from skypy.galaxy.spectrum import mag_ab
# create a bandpass
bp_lam = np.logspace(0, 4, 1000)*units.AA
bp_tx = np.exp(-((bp_lam - 1000*units.AA)/(100*units.AA))**2)*units.dimensionless_unscaled
bp = specutils.Spectrum1D(spectral_axis=bp_lam, flux=bp_tx)
# test that the AB standard source has zero magnitude
lam = np.logspace(0, 4, 1000)*units.AA
flam = 0.10884806248538730623*units.Unit('erg s-1 cm-2 AA')/lam**2
spec = specutils.Spectrum1D(spectral_axis=lam, flux=flam)
m = mag_ab(spec, bp)
assert np.isclose(m, 0)
@pytest.mark.skipif(not HAS_SPECUTILS, reason='test requires specutils')
def test_mag_ab_redshift_dependence():
from astropy import units
from skypy.galaxy.spectrum import mag_ab
# make a wide tophat bandpass
bp_lam = np.logspace(-10, 10, 3)*units.AA
bp_tx = np.ones(3)*units.dimensionless_unscaled
bp = specutils.Spectrum1D(spectral_axis=bp_lam, flux=bp_tx)
# create a narrow gaussian source
lam = np.logspace(0, 3, 1000)*units.AA
flam = np.exp(-((lam - 100*units.AA)/(10*units.AA))**2)*units.Unit('erg s-1 cm-2 AA-1')
spec = specutils.Spectrum1D(spectral_axis=lam, flux=flam)
# array of redshifts
z = np.linspace(0, 1, 11)
# compute the AB magnitude at different redshifts
m = mag_ab(spec, bp, z)
# compare with expected redshift dependence
np.testing.assert_allclose(m, m[0] - 2.5*np.log10(1 + z))
@pytest.mark.skipif(not HAS_SPECUTILS, reason='test requires specutils')
def test_mag_ab_multi():
from astropy import units
from skypy.galaxy.spectrum import mag_ab
# 5 redshifts
z = np.linspace(0, 1, 5)
# 2 Gaussian bandpasses
bp_lam = np.logspace(0, 4, 1000) * units.AA
bp_mean = np.array([[1000], [2000]]) * units.AA
bp_width = np.array([[100], [10]]) * units.AA
bp_tx = np.exp(-((bp_lam-bp_mean)/bp_width)**2)*units.dimensionless_unscaled
bp = specutils.Spectrum1D(spectral_axis=bp_lam, flux=bp_tx)
# 3 Flat Spectra
lam = np.logspace(0, 4, 1000)*units.AA
A = np.array([[2], [3], [4]])
flam = A * 0.10884806248538730623*units.Unit('erg s-1 cm-2 AA')/lam**2
spec = specutils.Spectrum1D(spectral_axis=lam, flux=flam)
# Compare calculated magnitudes with truth
magnitudes = mag_ab(spec, bp, z)
truth = -2.5 * np.log10(A * (1+z)).T[:, np.newaxis, :]
assert magnitudes.shape == (5, 2, 3)
np.testing.assert_allclose(*np.broadcast_arrays(magnitudes, truth))
|
990,451 | 821133fd95f0dff2c2014f09489c41f7d2b4f6d8 | Number Patter increasing - element removed at each level having maximum value
1 2 3 4 5
1 2 3 4
1 2 3
1 2
1
Solution Code:
rows = int(input())
for num in range(1,rows+1):
for k in range(rows+1, num, -1):
print(rows-k+2,end = " ") # This line is the reason why, number has been started gettin printed from 1
print("")
#EOF Reached
|
990,452 | 71b885fcd2ba962ac1cfd630150c47aacc32a2d1 | import importlib
import sys
from pyfix.codec import Codec
from pyfix.journaler import DuplicateSeqNoError
from pyfix.message import FIXMessage, MessageDirection
from pyfix.session import *
from enum import Enum
from pyfix.event import FileDescriptorEventRegistration, EventType, TimerEventRegistration
class ConnectionState(Enum):
UNKNOWN = 0
DISCONNECTED = 1
CONNECTED = 2
LOGGED_IN = 3
LOGGED_OUT = 4
class FIXException(Exception):
class FIXExceptionReason(Enum):
NOT_CONNECTED = 0
DECODE_ERROR = 1
ENCODE_ERROR = 2
def __init__(self, reason, description = None):
super(Exception, self).__init__(description)
self.reason = reason
class SessionWarning(Exception):
pass
class SessionError(Exception):
pass
class FIXConnectionHandler(object):
def __init__(self, engine, protocol, sock=None, addr=None, observer=None):
self.codec = Codec(protocol)
self.engine = engine
self.connectionState = ConnectionState.CONNECTED
self.session = None
self.addr = addr
self.observer = observer
self.msgBuffer = b''
self.heartbeatPeriod = 30.0
self.msgHandlers = []
self.sock = sock
self.heartbeatTimerRegistration = None
self.expectedHeartbeatRegistration = None
self.socketEvent = FileDescriptorEventRegistration(self.handle_read, sock, EventType.READ)
self.engine.eventManager.registerHandler(self.socketEvent)
def address(self):
return self.addr
def disconnect(self):
self.handle_close()
def _notifyMessageObservers(self, msg, direction, persistMessage=True):
if persistMessage is True:
self.engine.journaller.persistMsg(msg, self.session, direction)
for handler in filter(lambda x: (x[1] is None or x[1] == direction) and (x[2] is None or x[2] == msg.msgType), self.msgHandlers):
handler[0](self, msg)
def addMessageHandler(self, handler, direction = None, msgType = None):
self.msgHandlers.append((handler, direction, msgType))
def removeMessageHandler(self, handler, direction = None, msgType = None):
remove = filter(lambda x: x[0] == handler and
(x[1] == direction or direction is None) and
(x[2] == msgType or msgType is None), self.msgHandlers)
for h in remove:
self.msgHandlers.remove(h)
def _sendHeartbeat(self):
self.sendMsg(self.codec.protocol.messages.Messages.heartbeat())
def _expectedHeartbeat(self, type, closure):
logging.warning("Expected heartbeat from peer %s" % (self.expectedHeartbeatRegistration ,))
self.sendMsg(self.codec.protocol.messages.Messages.test_request())
def registerLoggedIn(self):
self.heartbeatTimerRegistration = TimerEventRegistration(lambda type, closure: self._sendHeartbeat(), self.heartbeatPeriod)
self.engine.eventManager.registerHandler(self.heartbeatTimerRegistration)
# register timeout for 10% more than we expect
self.expectedHeartbeatRegistration = TimerEventRegistration(self._expectedHeartbeat, self.heartbeatPeriod * 1.10)
self.engine.eventManager.registerHandler(self.expectedHeartbeatRegistration)
def registerLoggedOut(self):
if self.heartbeatTimerRegistration is not None:
self.engine.eventManager.unregisterHandler(self.heartbeatTimerRegistration)
self.heartbeatTimerRegistration = None
if self.expectedHeartbeatRegistration is not None:
self.engine.eventManager.unregisterHandler(self.expectedHeartbeatRegistration)
self.expectedHeartbeatRegistration = None
def _handleResendRequest(self, msg):
protocol = self.codec.protocol
responses = []
beginSeqNo = msg[protocol.fixtags.BeginSeqNo]
endSeqNo = msg[protocol.fixtags.EndSeqNo]
if int(endSeqNo) == 0:
endSeqNo = sys.maxsize
logging.info("Received resent request from %s to %s", beginSeqNo, endSeqNo)
replayMsgs = self.engine.journaller.recoverMsgs(self.session, MessageDirection.OUTBOUND, beginSeqNo, endSeqNo)
gapFillBegin = int(beginSeqNo)
gapFillEnd = int(beginSeqNo)
for replayMsg in replayMsgs:
msgSeqNum = int(replayMsg[protocol.fixtags.MsgSeqNum])
if replayMsg[protocol.fixtags.MsgType] in protocol.msgtype.sessionMessageTypes:
gapFillEnd = msgSeqNum + 1
else:
if self.engine.shouldResendMessage(self.session, replayMsg):
if gapFillBegin < gapFillEnd:
# we need to send a gap fill message
gapFillMsg = FIXMessage(protocol.msgtype.SEQUENCERESET)
gapFillMsg.setField(protocol.fixtags.GapFillFlag, 'Y')
gapFillMsg.setField(protocol.fixtags.MsgSeqNum, gapFillBegin)
gapFillMsg.setField(protocol.fixtags.NewSeqNo, str(gapFillEnd))
responses.append(gapFillMsg)
# and then resent the replayMsg
replayMsg.removeField(protocol.fixtags.BeginString)
replayMsg.removeField(protocol.fixtags.BodyLength)
replayMsg.removeField(protocol.fixtags.SendingTime)
replayMsg.removeField(protocol.fixtags.SenderCompID)
replayMsg.removeField(protocol.fixtags.TargetCompID)
replayMsg.removeField(protocol.fixtags.CheckSum)
replayMsg.setField(protocol.fixtags.PossDupFlag, "Y")
responses.append(replayMsg)
gapFillBegin = msgSeqNum + 1
else:
gapFillEnd = msgSeqNum + 1
responses.append(replayMsg)
if gapFillBegin < gapFillEnd:
# we need to send a gap fill message
gapFillMsg = FIXMessage(protocol.msgtype.SEQUENCERESET)
gapFillMsg.setField(protocol.fixtags.GapFillFlag, 'Y')
gapFillMsg.setField(protocol.fixtags.MsgSeqNum, gapFillBegin)
gapFillMsg.setField(protocol.fixtags.NewSeqNo, str(gapFillEnd))
responses.append(gapFillMsg)
return responses
def handle_read(self, type, closure):
protocol = self.codec.protocol
try:
msg = self.sock.recv(8192)
if msg:
self.msgBuffer = self.msgBuffer + msg
(decodedMsg, parsedLength) = self.codec.decode(self.msgBuffer)
self.msgBuffer = self.msgBuffer[parsedLength:]
while decodedMsg is not None and self.connectionState != ConnectionState.DISCONNECTED:
self.processMessage(decodedMsg)
(decodedMsg, parsedLength) = self.codec.decode(self.msgBuffer)
self.msgBuffer = self.msgBuffer[parsedLength:]
if self.expectedHeartbeatRegistration is not None:
self.expectedHeartbeatRegistration.reset()
else:
logging.debug("Connection has been closed")
self.disconnect()
except ConnectionError as why:
logging.debug("Connection has been closed %s" % (why, ))
self.disconnect()
def handleSessionMessage(self, msg):
return -1
def processMessage(self, decodedMsg):
protocol = self.codec.protocol
beginString = decodedMsg[protocol.fixtags.BeginString]
if beginString != protocol.beginstring:
logging.warning("FIX BeginString is incorrect (expected: %s received: %s)", (protocol.beginstring, beginString))
self.disconnect()
return
msgType = decodedMsg[protocol.fixtags.MsgType]
try:
responses = []
if msgType in protocol.msgtype.sessionMessageTypes:
(recvSeqNo, responses) = self.handleSessionMessage(decodedMsg)
else:
recvSeqNo = decodedMsg[protocol.fixtags.MsgSeqNum]
# validate the seq number
(seqNoState, lastKnownSeqNo) = self.session.validateRecvSeqNo(recvSeqNo)
if seqNoState is False:
# We should send a resend request
logging.info("Requesting resend of messages: %s to %s" % (lastKnownSeqNo, 0))
responses.append(protocol.messages.Messages.resend_request(lastKnownSeqNo, 0))
# we still need to notify if we are processing Logon message
if msgType == protocol.msgtype.LOGON:
self._notifyMessageObservers(decodedMsg, MessageDirection.INBOUND, False)
else:
self.session.setRecvSeqNo(recvSeqNo)
self._notifyMessageObservers(decodedMsg, MessageDirection.INBOUND)
for m in responses:
self.sendMsg(m)
except SessionWarning as sw:
logging.warning(sw)
except SessionError as se:
logging.error(se)
self.disconnect()
except DuplicateSeqNoError:
try:
if decodedMsg[protocol.fixtags.PossDupFlag] == "Y":
logging.debug("Received duplicate message with PossDupFlag set")
except KeyError:
pass
finally:
logging.error("Failed to process message with duplicate seq no (MsgSeqNum: %s) (and no PossDupFlag='Y') - disconnecting" % (recvSeqNo, ))
self.disconnect()
def handle_close(self):
if self.connectionState != ConnectionState.DISCONNECTED:
logging.info("Client disconnected")
self.registerLoggedOut()
self.sock.close()
self.connectionState = ConnectionState.DISCONNECTED
self.msgHandlers.clear()
if self.observer is not None:
self.observer.notifyDisconnect(self)
self.engine.eventManager.unregisterHandler(self.socketEvent)
def sendMsg(self, msg):
if self.connectionState != ConnectionState.CONNECTED and self.connectionState != ConnectionState.LOGGED_IN:
raise FIXException(FIXException.FIXExceptionReason.NOT_CONNECTED)
encodedMsg = self.codec.encode(msg, self.session).encode('utf-8')
self.sock.send(encodedMsg)
if self.heartbeatTimerRegistration is not None:
self.heartbeatTimerRegistration.reset()
decodedMsg, junk = self.codec.decode(encodedMsg)
try:
self._notifyMessageObservers(decodedMsg, MessageDirection.OUTBOUND)
except DuplicateSeqNoError:
logging.error("We have sent a message with a duplicate seq no, failed to persist it (MsgSeqNum: %s)" % (decodedMsg[self.codec.protocol.fixtags.MsgSeqNum]))
class FIXEndPoint(object):
def __init__(self, engine, protocol):
self.engine = engine
self.protocol = importlib.import_module(protocol)
self.connections = []
self.connectionHandlers = []
def writable(self):
return True
def start(self, host, port):
pass
def stop(self):
pass
def addConnectionListener(self, handler, filter):
self.connectionHandlers.append((handler, filter))
def removeConnectionListener(self, handler, filter):
for s in self.connectionHandlers:
if s == (handler, filter):
self.connectionHandlers.remove(s)
def notifyDisconnect(self, connection):
self.connections.remove(connection)
for handler in filter(lambda x: x[1] == ConnectionState.DISCONNECTED, self.connectionHandlers):
handler[0](connection)
|
990,453 | fddf0eec2ad563cb37b7a7e9c026f5f983e79fb5 | import serial
import crc16
import constants
import time
SERIAL_READ_START = 0xAF
SERIAL_READ_END = 0xFE
SERIAL_SEND_START = 0xDE
SERIAL_SEND_END = 0xED
class SerialDispatcher():
def __init__(self):
self.callback_list = {}
self.interval_list = {}
self.initialized = False
def initialize(self, port, baudrate):
self.ser_con = serial.Serial()
self.ser_con.port = port
self.ser_con.baudrate = baudrate
self.ser_con.timeout = None
try:
self.ser_con.open()
except serial.SerialException:
return False
self.ser_con.flushOutput()
self.ser_con.flushInput()
time.sleep(2)
self.__send_init_message()
self.initialized = True
print("Initialized serial dispatcher")
return True
def __send_init_message(self):
crc_list = []
crc_list.append(constants.SERIAL_VERSION)
length = 0
for _, val in self.interval_list.iteritems():
if val > 0:
length += 1
crc_list.append(length)
for idx, val in self.interval_list.iteritems():
crc_list.append(idx)
crc_list.append(val & 0xFF)
crc_list.append(val >> 8)
crc = crc16.crc16_ccitt(crc_list, len(crc_list))
self.ser_con.write(chr(SERIAL_SEND_START))
for val in crc_list:
self.ser_con.write(chr(val))
self.ser_con.write(chr(crc & 0xFF))
self.ser_con.write(chr(crc >> 8))
self.ser_con.write(chr(SERIAL_SEND_END))
def append_callback(self, idx, cb, interval):
if self.initialized:
print("Can not append callback function when already initialized")
return False
else:
self.callback_list[idx.value] = cb
self.interval_list[idx.value] = interval
print("Add new callback function for " + idx.name +
", Interval = " + str(interval))
return True
def __read_byte(self):
return ord(self.ser_con.read())
def dispatch(self):
# Read until start sequence occurs
while self.__read_byte() != SERIAL_READ_START:
time.sleep(0.01)
buffer = []
# Version
buffer.append(self.__read_byte())
# Type
buffer.append(self.__read_byte())
# Length
buffer.append(self.__read_byte())
# Payload
for i in range(buffer[2]):
buffer.append(self.__read_byte())
# CRC Checksum
crc1 = self.__read_byte()
crc2 = self.__read_byte()
crc = (crc2 << 8) | crc1
last_byte = self.__read_byte()
if last_byte != SERIAL_READ_END:
return constants.SerialDispatchError.NO_TERMINATION_BYTE
crc_t = crc16.crc16_ccitt(buffer, buffer[2] + 3)
if crc != crc_t:
return constants.SerialDispatchError.CHECKSUM_ERROR
if buffer[0] != constants.SERIAL_VERSION:
return constants.SerialDispatchError.VERSION_ERROR
payload = []
for i in range(3, len(buffer), 2):
payload.append((buffer[i + 1] << 8) | buffer[i])
if buffer[1] in self.callback_list:
self.callback_list[buffer[1]](payload)
|
990,454 | 45c4e9f9332051e59693e45b83a0cec2ae6b2552 | a = ["avinash","umesh","Atique"]
b = ["arun","vinesh","sahir"]
zipped = list(zip(a,b))
print(zipped) |
990,455 | 6bf88cd4778bf5e330e6a18a4242234cb7ea3411 | # encoding: utf-8
import event
import time
from myLogger import logger
class RestEvent(event.Event):
def __init__(self, eventJS, eventJSSearch, eventServer):
self.eventJS = eventJS
self.eventJSSearch = eventJSSearch
self.eventServer = eventServer
self.tourLocations = eventJS.get("tourLocations")
self.itemTags = eventJS.get("itemTags")
self.eventItem = eventJS.get("eventItem")
self.titel = self.eventItem.get("title").strip()
logger.info("eventItemId %s %s", self.titel, self.eventItem.get("eventItemId"))
def getTitel(self):
return self.titel
def getEventItemId(self):
return self.eventItem.get("eventItemId")
def getPublishDate(self):
datum = self.eventItem.get("cPublishDate")
datum = event.convertToMEZOrMSZ(datum)
return datum
def getFrontendLink(self):
return "https://touren-termine.adfc.de/radveranstaltung/" + self.eventItem.get("cSlug")
def getSlug(self):
return self.eventItem.get("cSlug")
def getBackendLink(self):
return "https://intern-touren-termine.adfc.de/modules/events/" + self.eventItem.get("eventItemId")
def getNummer(self):
num = self.eventJSSearch.get("eventNummer")
if num is None:
num = "999"
return num
def getAbfahrten(self):
abfahrten = []
for tourLoc in self.tourLocations:
typ = tourLoc.get("type")
logger.debug("typ %s", typ)
if typ != "Startpunkt" and typ != "Treffpunkt":
continue
if not tourLoc.get("withoutTime"):
if len(abfahrten) == 0: # for first loc, get starttime from eventItem, beginning in tourloc is often wrong
beginning = self.getDatum()[1]
else:
beginning = tourLoc.get("beginning")
logger.debug("beginning %s", beginning) # '2018-04-24T12:00:00'
beginning = event.convertToMEZOrMSZ(beginning) # '2018-04-24T14:00:00'
beginning = beginning[11:16] # 14:00
else:
beginning = ""
name = tourLoc.get("name")
street = tourLoc.get("street")
city = tourLoc.get("city")
logger.debug("name '%s' street '%s' city '%s'", name, street, city)
loc = name
if city != "":
if loc == "":
loc = city
else:
loc = loc + " " + city
if street != "":
if loc == "":
loc = street
else:
loc = loc + " " + street
if typ == "Startpunkt":
if self.isTermin():
typ = "Treffpunkt"
else:
typ = "Start"
abfahrt = (typ, beginning, loc)
abfahrten.append(abfahrt)
return abfahrten
def getStartpunkt(self):
# return first loc that is Startpunkt or Treffpunkt
for tourLoc in self.tourLocations:
typ = tourLoc.get("type")
if typ != "Startpunkt" and typ != "Treffpunkt":
continue
name = tourLoc.get("name")
street = tourLoc.get("street")
city = tourLoc.get("city")
latitude = tourLoc.get("latitude")
longitude = tourLoc.get("longitude")
return (name, street, city, latitude, longitude)
return None
def getBeschreibung(self, raw):
desc = self.eventItem.get("description")
desc = event.removeHTML(desc)
desc = event.removeSpcl(desc)
if raw:
return desc
desc = event.normalizeText(desc)
return desc
def getKurzbeschreibung(self):
desc = self.eventItem.get("cShortDescription")
desc = event.normalizeText(desc)
return desc
def isTermin(self):
return self.eventItem.get("eventType") == "Termin"
def getSchwierigkeit(self):
if self.isTermin():
return "-"
schwierigkeit = self.eventItem.get("cTourDifficulty")
# apparently either 0 or between 1.0 and 5.0
i = int(schwierigkeit + 0.5)
return i # ["unbekannt", "sehr einfach, "einfach", "mittel", "schwer", "sehr schwer"][i] ??
"""
itemtags has categories
for Termine:
"Aktionen, bei denen Rad gefahren wird" : getKategorie, e.g. Fahrrad-Demo, Critical Mass
"Radlertreff / Stammtisch / Öffentliche Arbeits..." : getKategorie, e.g. Stammtisch
"Serviceangebote": getKategorie, e.g. Codierung, Selbsthilfewerkstatt
"Versammlungen" : getKategorie, e.g. Aktiventreff, Mitgliederversammlung
"Vorträge & Kurse": getKategorie, e.g. Kurse, Radreisevortrag
for Touren:
"Besondere Charakteristik /Thema": getZusatzInfo
"Besondere Zielgruppe" : getZusatzInfo
"Geeignet für": getRadTyp
"Typen (nach Dauer und Tageslage)" : getKategorie, e.g. Ganztagstour
"Weitere Eigenschaften" : getZusatzinfo, e.g. Bahnfahrt
"""
def getMerkmale(self):
merkmale = []
for itemTag in self.itemTags:
tag = itemTag.get("tag")
merkmale.append(tag)
return merkmale
def getKategorie(self):
for itemTag in self.itemTags:
tag = itemTag.get("tag")
category = itemTag.get("category")
if category.startswith("Aktionen,") or category.startswith("Radlertreff") or category.startswith("Service") \
or category.startswith("Versammlungen") or category.startswith("Vortr") \
or category.startswith("Typen "):
return tag
return "Ohne"
def getRadTyp(self):
# wenn nur Rennrad oder nur Mountainbike, dann dieses, sonst Tourenrad
rtCnt = 0
for itemTag in self.itemTags:
category = itemTag.get("category")
if category.startswith("Geeignet "):
rtCnt += 1
for itemTag in self.itemTags:
tag = itemTag.get("tag")
category = itemTag.get("category")
if category.startswith("Geeignet "):
if rtCnt == 1 and (tag == "Rennrad" or tag == "Mountainbike"):
return tag
return "Tourenrad"
def getZusatzInfo(self):
besonders = []
weitere = []
zielgruppe = []
for itemTag in self.itemTags:
tag = itemTag.get("tag")
category = itemTag.get("category")
if category == "Besondere Charakteristik /Thema":
besonders.append(tag)
if category == "Weitere Eigenschaften":
weitere.append(tag)
if category == "Besondere Zielgruppe":
zielgruppe.append(tag)
zusatzinfo = []
if len(besonders) > 0:
besonders = "Besondere Charakteristik/Thema: " + ", ".join(besonders)
zusatzinfo.append(besonders)
if len(weitere) > 0:
weitere = "Weitere Eigenschaften: " + ", ".join(weitere)
zusatzinfo.append(weitere)
if len(zielgruppe) > 0:
zielgruppe = "Besondere Zielgruppe: " + ", ".join(zielgruppe)
zusatzinfo.append(zielgruppe)
return zusatzinfo
def getStrecke(self):
tl = self.eventItem.get("cTourLengthKm")
return str(tl) + " km"
def getHoehenmeter(self):
h = self.eventItem.get("cTourHeight")
return str(h)
def getCharacter(self):
c = self.eventItem.get("cTourSurface")
return event.character[c]
def getDatum(self):
datum = self.eventItem.get("beginning")
datum = event.convertToMEZOrMSZ(datum)
# fromisoformat defined in Python3.7, not used by Scribus
# date = datetime.fromisoformat(datum)
logger.debug("datum <%s>", str(datum))
day = str(datum[0:10])
date = time.strptime(day, "%Y-%m-%d")
weekday = event.weekdays[date.tm_wday]
res = (weekday + ", " + day[8:10] + "." + day[5:7] + "." + day[0:4], datum[11:16], datum)
return res
def getDatumRaw(self):
return self.eventItem.get("beginning")
def getEndDatum(self):
enddatum = self.eventItem.get("end")
enddatum = event.convertToMEZOrMSZ(enddatum)
# fromisoformat defined in Python3.7, not used by Scribus
# enddatum = datetime.fromisoformat(enddatum)
logger.debug("enddatum %s", str(enddatum))
day = str(enddatum[0:10])
date = time.strptime(day, "%Y-%m-%d")
weekday = event.weekdays[date.tm_wday]
res = (weekday + ", " + day[8:10] + "." + day[5:7] + "." + day[0:4], enddatum[11:16])
return res
def getEndDatumRaw(self):
return self.eventItem.get("end")
def getPersonen(self):
personen = []
organizer = self.eventItem.get("cOrganizingUserId")
if organizer is not None and len(organizer) > 0:
org = self.eventServer.getUser(organizer)
if org is not None:
personen.append(str(org))
organizer2 = self.eventItem.get("cSecondOrganizingUserId")
if organizer2 is not None and len(organizer2) > 0 and organizer2 != organizer:
org = self.eventServer.getUser(organizer2)
if org is not None:
personen.append(str(org))
return personen
def getImagePreview(self):
return self.eventJS.get("imagePreview")
def getImageUrl(self):
imageId = self.eventJS.get("eventItemImages")[0].get("imageId")
return f"https://intern-touren-termine.adfc.de/api/images/{imageId}/download"
# Response header von /images/id/download:
# Location: https://adfcrtp.blob.core.cloudapi.de/public-production/2b6a400f-d5ac-46bf-9133-b53ecd5a180c/bille-bei-billwerder.jpg
def getName(self):
tourLoc = self.tourLocations[0]
return tourLoc.get("name")
def getCity(self):
tourLoc = self.tourLocations[0]
return tourLoc.get("city")
def getStreet(self):
tourLoc = self.tourLocations[0]
return tourLoc.get("street")
def getLatLon(self):
tourLoc = self.tourLocations[0]
return (tourLoc.get("latitude"), tourLoc.get("longitude"),)
def isExternalEvent(self):
return self.eventItem.get("cExternalEvent") == "true"
def istEntwurf(self):
return False # Rest gibt keine Entwürfe zurück
def getPrices(self):
minPrice = 9999999.0
maxPrice = 0.0
itemPrices = self.eventJS.get("eventItemPrices");
for itemPrice in itemPrices:
price = itemPrice.get("price")
if price < minPrice:
minPrice = price
if price > maxPrice:
maxPrice = price
return (minPrice, maxPrice)
class User:
def __init__(self, userJS):
u = userJS.get("user")
self.firstName = u.get("firstName")
self.lastName = u.get("lastName")
try:
self.phone = u.get("cellPhone")
if self.phone is None or self.phone == "":
self.phone = userJS.get("temporaryContacts")[0].get("phone")
except Exception:
self.phone = None
def __repr__(self):
name = self.firstName + " " + self.lastName
if self.phone is not None and self.phone != "":
name += " (" + self.phone + ")"
return name
|
990,456 | ef5ca93cbe5cef31dd55727651930c2cc04e3c30 | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.models
from torchvision import datasets, transforms
import seaborn as sns
from tqdm import tqdm
from dataloader import RetinopathyDataset
from torch.utils.data import DataLoader
from models import ResNetPretrain
from utils import *
sns.set_style("whitegrid")
device = torch.device("cuda")
torch.cuda.set_device(1)
def cal_acc(model, loader):
correct = 0
preds = []
targets = []
with torch.no_grad():
for (data, target) in tqdm(loader):
data, target = data.to(device, dtype=torch.float), target.to(device, dtype=torch.long)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
preds.extend(pred)
targets.extend(target.view_as(pred))
correct += pred.eq(target.view_as(pred)).sum().item()
return (correct / len(loader.dataset)) * 100, targets, preds
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
batch_size = 8
augmentation = [
transforms.RandomCrop(480),
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
]
train_dataset = RetinopathyDataset('./data', 'train', augmentation=augmentation)
test_dataset = RetinopathyDataset('./data', 'test')
train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size*4, pin_memory=True)
to_train = False
if to_train:
model_names = ["Resnet18", "Resnet50", "Resnet18_pretrain", "Resnet50_pretrain"]
load_models = [False, False, False, False]
# model_names = ["Resnet50_pretrain_2", "Resnet50_2"]
model_names = ["Resnet50_pretrain_23"]#, "Resnet50_2"]
# model_names = ["Resnet18_pretrain_2", "Resnet18_2"]
model_names = ['Resnet18_2']#Resnet50_2']#, "Resnet18_2"]
load_models = [False, False]
for idx, model_name in enumerate(model_names):
print(model_name)
if model_name == "Resnet18_2":
model = ResNetPretrain(18, pretrained=False).to(device)
if load_models[idx]:
model.load_state_dict(torch.load("./" + model_name + ".pth"))
iteration = 1
elif model_name == "Resnet50_2":
model = ResNetPretrain(50, pretrained=False).to(device)
if load_models[idx]:
model.load_state_dict(torch.load("./" + model_name + ".pth"))
iteration = 1
elif model_name == "Resnet18_pretrain_2":
if load_models[idx]:
model = ResNetPretrain(18, pretrained=False).to(device)
model.load_state_dict(torch.load("./" + model_name + ".pth"))
else:
model = ResNetPretrain(18, pretrained=True).to(device)
iteration = 15
elif model_name == "Resnet50_pretrain_23":
if load_models[idx]:
model = ResNetPretrain(50, pretrained=False).to(device)
model.load_state_dict(torch.load("./" + model_name + ".pth"))
else:
model = ResNetPretrain(50, pretrained=True).to(device)
iteration = 80
else:
print("Error! Cannot recognize model name.")
train_accs = []
test_accs = []
max_acc = 0
model.train(mode=True)
optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)
for epoch in range(iteration):
print("epoch:", epoch)
correct = 0
for (data, target) in tqdm(train_loader):
data, target = data.to(device, dtype=torch.float), target.to(device, dtype=torch.long)
optimizer.zero_grad()
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
train_acc = (correct / len(train_loader.dataset)) * 100
print('train_acc: ', train_acc)
train_accs.append(train_acc)
model.train(mode=False)
test_acc, targets, preds = cal_acc(model, test_loader)
model.train(mode=True)
if test_acc > max_acc:
max_acc = test_acc
torch.save(model.state_dict(), "./" + model_name + ".pth")
print("test_acc:", test_acc)
test_accs.append(test_acc)
if test_acc>=82:
break
print(train_accs)
print(test_accs)
plt.plot(train_accs, label="train")
plt.plot(test_accs, label="test")
plt.title(model_name)
plt.legend(loc='lower right')
plt.savefig(model_name + "_result.png")
plt.clf()
plt.cla()
plt.close()
else:
model_names = ["./weight/Resnet18_2.pth", "./weight/Resnet50_2.pth",
"./weight/Resnet18_pretrain_2.pth", "./weight/Resnet50_pretrain_23_82.pth"]#"./Resnet50_pretrain_2.pth"]
models = [ResNetPretrain(18, pretrained=False).to(device),
ResNetPretrain(50, pretrained=False).to(device),
ResNetPretrain(18, pretrained=False).to(device),
ResNetPretrain(50, pretrained=False).to(device)]
print("Testing")
for idx, name in enumerate(model_names):
print(name[2:-6])
model = models[idx]
model.load_state_dict(torch.load(name))
model.eval()
acc, targets, preds = cal_acc(model, test_loader)
targets = torch.stack(targets)
preds = torch.stack(preds)
plot_confusion_matrix(targets.cpu().numpy(), preds.cpu().numpy(), name[2:-6])
print("model:", name, ", acc:", acc)
|
990,457 | a0a46f21416a5f5e20b59486216f4f358411252c | #!/usr/bin/env python
# pylint: disable=E1101
# E1101: Allow imports from currentThread
"""
_ExecuteMaster_
Overseer object that traverses a task and invokes the type based executor
for each step
"""
from builtins import object
import logging
import os
import threading
import traceback
import WMCore.WMSpec.Steps.StepFactory as StepFactory
from WMCore.WMException import WMException
from WMCore.WMSpec.Steps.WMExecutionFailure import WMExecutionFailure
from WMCore.WMSpec.WMStep import WMStepHelper
class ExecuteMaster(object):
"""
_ExecuteMaster_
Traverse the given task and invoke the execute framework
If an emulator is provided, then invoke the appropriate emulator
instead of the executor
"""
def __init__(self):
pass
def __call__(self, task, wmbsJob):
"""
_operator(task)_
Load and run executors for all steps in Task, if an emulator is
available for that step, use it instead.
"""
myThread = threading.currentThread
try:
myThread.watchdogMonitor.setupMonitors(task, wmbsJob)
myThread.watchdogMonitor.notifyJobStart(task)
except WMException:
self.toTaskDirectory()
raise
except Exception as ex:
msg = "Encountered unhandled exception while starting monitors:\n"
msg += str(ex) + '\n'
msg += str(traceback.format_exc()) + '\n'
logging.error(msg)
self.toTaskDirectory()
raise WMExecutionFailure(msg)
failureUpstream = False
for step in task.steps().nodeIterator():
try:
helper = WMStepHelper(step)
stepType = helper.stepType()
stepName = helper.name()
if failureUpstream:
# for chained steps, don't execute further steps if a
# failure has already happened
helper.addOverride("previousCmsRunFailure", True)
executor = StepFactory.getStepExecutor(stepType)
result = self.doExecution(executor, step, wmbsJob)
logging.info("StepName: %s, StepType: %s, with result: %r", stepName, stepType, result)
if result: # can be either None, or the step exit code
failureUpstream = True
except WMException as ex:
msg = "Encountered error while running ExecuteMaster:\n"
msg += str(ex) + "\n"
logging.error(msg)
self.toTaskDirectory()
break
except Exception as ex:
msg = "Encountered error while running ExecuteMaster:\n"
msg += str(ex) + "\n"
msg += str(traceback.format_exc()) + "\n"
self.toTaskDirectory()
logging.error(msg)
break
try:
myThread.watchdogMonitor.notifyJobEnd(task)
except WMException:
self.toTaskDirectory()
except Exception as ex:
msg = "Encountered unhandled exception while ending the job:\n"
msg += str(ex) + '\n'
msg += str(traceback.format_exc()) + '\n'
logging.error(msg)
self.toTaskDirectory()
return
def doExecution(self, executor, step, job):
"""
_doExecution_
Invoke the Executor for the step provided
TODO: Add Monitoring thread & setup
TODO: Exception Handling
TODO: pre/post outcome can change the next execution task, need to
ensure that this happens
"""
myThread = threading.currentThread
# Tell the watchdog that we're starting the step
myThread.watchdogMonitor.notifyStepStart(step)
self.toStepDirectory(step)
executor.initialise(step, job)
executionObject = executor
error = False
if executor.emulationMode:
executionObject = executor.emulator
preOutcome = executionObject.pre()
if preOutcome is not None:
logging.info("Pre Executor Task Change: %s", preOutcome)
executor.saveReport()
self.toTaskDirectory()
myThread.watchdogMonitor.notifyStepEnd(step=step,
stepReport=executor.report)
executor.saveReport()
return preOutcome
try:
executor.report.setStepStartTime(stepName=executor.stepName)
executionObject.execute()
except WMExecutionFailure as ex:
executor.diagnostic(ex.code, executor, ExceptionInstance=ex)
executor.report.addError(executor.stepName, ex.code, "WMAgentStepExecutionError", str(ex))
error = True
except Exception as ex:
logging.error("Exception occured when executing step")
logging.error("Exception is %s", ex)
logging.error("Traceback: ")
logging.error(traceback.format_exc())
executor.diagnostic(99109, executor, ExceptionInstance=ex)
executor.report.addError(executor.stepName, 99109, "WMAgentStepExecutionError", str(ex))
error = True
executor.report.setStepStopTime(stepName=executor.stepName)
# TODO: Handle generic Exception that indicates development/code errors
executor.saveReport()
postOutcome = executionObject.post()
if postOutcome is not None:
logging.info("Post Executor Task Change: %s", postOutcome)
executor.saveReport()
self.toTaskDirectory()
myThread.watchdogMonitor.notifyStepEnd(step=step,
stepReport=executor.report)
executor.saveReport()
return postOutcome
self.toTaskDirectory()
# Okay, we're done, set the job to successful
stepExitCode = executor.report.getExitCode() # 0 is successful
if not error and not stepExitCode:
executor.report.setStepStatus(stepName=executor.stepName,
status=0)
executor.saveReport()
# Tell the watchdog that we're done with the step
myThread.watchdogMonitor.notifyStepEnd(step=step,
stepReport=executor.report)
executor.saveReport()
return stepExitCode
def toStepDirectory(self, step):
"""
_toStepDirectory_
Switch current working directory to the step location
within WMTaskSpace
"""
stepName = WMStepHelper(step).name()
from WMTaskSpace import taskSpace
stepSpace = taskSpace.stepSpace(stepName)
os.chdir(stepSpace.location)
def toTaskDirectory(self):
"""
_toTaskDirectory_
Switch to current working directory to the task location
within WMTaskSpace
"""
from WMTaskSpace import taskSpace
os.chdir(taskSpace.location)
return
|
990,458 | f8b2e84d3fd20a6cd6d778548338c77f26437dc8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
ConversationHandler)
import logging
from firebase import firebase
firebase = firebase.FirebaseApplication('your-firebase-database-link')
data_d={}
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
DEPART, PHONE, LOCATION = range(3)
def start(bot, update):
reply_keyboard = [['Fire','Health','Police']]
update.message.reply_text(
'Hello, and welcome to Suraksha!'
'What is your department?',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return DEPART
def depart(bot, update):
user = update.message.from_user
chat_id = user_id = update.effective_user.id
data_d['chat_id'] = chat_id
data_d['dept'] = update.message.text
data_d['name'] = user.first_name
logger.info("Department of %s: %s", user.first_name, update.message.text)
update.message.reply_text('I see! Please send me your phone number.',
reply_markup=ReplyKeyboardRemove())
return PHONE
def phone(bot, update):
user = update.message.from_user
data_d['mobile']=update.message.text
logger.info("Phone number of %s: %s", user.first_name, update.message.text)
update.message.reply_text('Thanks! Now, send me your location please,')
return LOCATION
def location(bot, update):
user = update.message.from_user
user_location = update.message.location
data_d['loc']=str(user_location.latitude)+','+str(user_location.longitude)
logger.info("Location of %s: %f / %f", user.first_name, user_location.latitude,
user_location.longitude)
update.message.reply_text('Thanks, you have been succesfully registered!')
print(data_d)
result = firebase.post('/provider',data_d)
return ConversationHandler.END
def cancel(bot, update):
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text('Bye! I hope we can talk again some day.',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def error(bot, update, error):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, error)
def main():
# Create the EventHandler and pass it your bot's token.
updater = Updater("key:secret")
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
DEPART: [RegexHandler('^(Fire|Health|Police)$', depart)],
PHONE: [MessageHandler(Filters.text, phone)],
LOCATION: [MessageHandler(Filters.location, location)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dp.add_handler(conv_handler)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
990,459 | 29401251ce102f56fd4e95fba4540d70b120907b | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import io
import featurize
input_simple = u"""mereven merev/ADJ
A a/ART
részletezni részletez/VERB
hullámzik hullámzik/VERB
terítse terít/VERB
pillanat pillanat/NOUN
mozgásban mozgás/NOUN
mi mi/NOUN
mindenre minden/NOUN"""
input_with_cases = u"""mereven merev/ADJ[MANNER]/ADV
A a/ART
részletezni részletez/VERB<INF>
hullámzik hullámzik/VERB
terítse terít/VERB<SUBJUNC-IMP><DEF>
pillanat pillanat/NOUN
mozgásban mozgás/NOUN<CAS<INE>>
mi mi/NOUN<PERS<1>><PLUR>
mindenre minden/NOUN<CAS<SBL>>
önmagában önmaga/NOUN<PERS><CAS<INE>>
széksorokban széksor/NOUN<PLUR><CAS<INE>>
háttérre háttér/NOUN<CAS<SBL>>"""
input_with_duplicates = u"""s1 l1
s1 l2
s2 l1
s1 l1
s1 l2
s2 l2"""
class FeaturizerTest(unittest.TestCase):
def test_pos_extract(self):
w = featurize.WebCorpusExtractor(grep_filter=["NOUN", "VERB"])
f = featurize.Featurizer(2, 20, label_extractor=w)
f.featurize_stream(io.StringIO(input_simple))
self.assertEqual(len(f.dataset), 4)
self.assertTrue(f.dataset.full)
def test_pos_extract_not_enough_input(self):
w = featurize.WebCorpusExtractor(grep_filter=["NOUN", "VERB"])
f = featurize.Featurizer(200, 20, label_extractor=w)
f.featurize_stream(io.StringIO(input_simple))
self.assertFalse(f.dataset.full)
def test_regex_extract(self):
w = featurize.WebCorpusExtractor(regex_filter=[
r'<CAS<([^<>]+)>',
])
f = featurize.Featurizer(2, 20, label_extractor=w)
f.featurize_stream(io.StringIO(input_with_cases))
self.assertEqual(len(f.dataset), 4)
def test_regex_extract2(self):
w = featurize.WebCorpusExtractor(regex_filter=[
r'<CAS<([^<>]+)>',
])
f = featurize.Featurizer(2, 20, label_extractor=w)
f.featurize_stream(io.StringIO(input_with_cases))
self.assertTrue(f.dataset.full)
def test_regex_extract_not_enough_input(self):
w = featurize.WebCorpusExtractor(regex_filter=[
r'<CAS<([^<>]+)>',
])
f = featurize.Featurizer(6, 11, label_extractor=w)
f.featurize_stream(io.StringIO(input_with_cases))
self.assertEqual(len(f.dataset), 4)
def test_regex_extract_not_enough_input2(self):
w = featurize.WebCorpusExtractor(regex_filter=[
r'<CAS<([^<>]+)>',
])
f = featurize.Featurizer(6, 11, label_extractor=w)
f.featurize_stream(io.StringIO(input_with_cases))
self.assertFalse(f.dataset.full)
def test_empty_extractor(self):
s = len(input_with_cases.strip().split('\n'))
f = featurize.Featurizer(3)
f.featurize_stream(io.StringIO(input_with_cases))
self.assertEqual(len(f.dataset), s)
def test_empty_extractor2(self):
f = featurize.Featurizer(3)
f.featurize_stream(io.StringIO(input_with_cases))
self.assertIn('részletez/VERB<INF>', f.dataset.labels)
def test_keep_duplicates(self):
s = len(input_with_duplicates.split('\n'))
f = featurize.Featurizer(30, 300, skip_duplicates=False)
f.featurize_stream(io.StringIO(input_with_duplicates))
self.assertEqual(len(f.dataset), s)
def test_skip_duplicates(self):
s = len(set(input_with_duplicates.split('\n')))
f = featurize.Featurizer(30, 300, skip_duplicates=True)
f.featurize_stream(io.StringIO(input_with_duplicates))
self.assertEqual(len(f.dataset), s)
class WebCorpusExtractorTest(unittest.TestCase):
def test_echo_filter(self):
w = featurize.WebCorpusExtractor()
self.assertEqual(w.extract_label("abc"), "abc")
def test_grep_filter(self):
w = featurize.WebCorpusExtractor(grep_filter=["NOUN", "VERB"])
self.assertEqual(w.extract_label("abc"), None)
self.assertEqual(w.extract_label("NOUNabc"), "NOUN")
self.assertEqual(w.extract_label("NOUNabcVERB"), "NOUN")
def test_regex_filter(self):
w = featurize.WebCorpusExtractor(regex_filter=[
r'([abc])', r'(\w\d)\d', r'^(defg)$',
])
self.assertEqual(w.extract_label("abc"), 'a')
self.assertEqual(w.extract_label("d92"), 'd9')
self.assertEqual(w.extract_label("defg"), 'defg')
self.assertEqual(w.extract_label("defgh"), None)
def test_grep_and_regex_filter(self):
w = featurize.WebCorpusExtractor(
grep_filter=["NOUN", "VERB"],
regex_filter=[r'<([^<>]+)>']
)
self.assertEqual(w.extract_label("NOUN<CAS<ACC>"), "NOUNACC")
self.assertEqual(w.extract_label("<CAS<ACC>"), None)
class NGramFeaturizerTest(unittest.TestCase):
def test_padding_positional(self):
f = featurize.NGramFeaturizer(2, 3,
max_sample_per_class=2, use_padding=True)
f.featurize_stream(io.StringIO("abc\tdef"))
features = f.dataset.samples[0].features
self.assertEqual(set(features.values()), {' a', 'ab', 'bc', 'c '})
self.assertEqual(set(features.keys()), {'2.0', '2.1', '2.2', '2.3'})
def test_no_padding_positional(self):
f = featurize.NGramFeaturizer(2, 3, max_sample_per_class=2,
use_padding=False)
f.featurize_stream(io.StringIO("abc\tdef"))
features = f.dataset.samples[0].features
self.assertEqual(set(features.values()), {'ab', 'bc'})
self.assertEqual(set(features.keys()), {'2.0', '2.1'})
def test_padding_bagof(self):
f = featurize.NGramFeaturizer(2, 5, max_sample_per_class=2,
use_padding=True, bagof=True)
f.featurize_stream(io.StringIO("abcab\tdef"))
features = f.dataset.samples[0].features
self.assertEqual(set(features.keys()), {'ab', 'bc', 'ca', ' a', 'b '})
self.assertEqual(set(features.values()), {True})
def test_no_padding_bagof(self):
f = featurize.NGramFeaturizer(2, 5, max_sample_per_class=2,
use_padding=False, bagof=True)
f.featurize_stream(io.StringIO("abcab\tdef"))
features = f.dataset.samples[0].features
self.assertEqual(set(features.keys()), {'ab', 'bc', 'ca'})
self.assertEqual(set(features.values()), {True})
def test_last_char(self):
f = featurize.NGramFeaturizer(2, 3, max_sample_per_class=2,
use_padding=False, bagof=False)
f.featurize_stream(io.StringIO("abcdef\tdef"))
features = f.dataset.samples[0].features
self.assertEqual(set(features.keys()), {'2.0', '2.1'})
self.assertEqual(set(features.values()), {'de', 'ef'})
def test_last_char_with_padding(self):
f = featurize.NGramFeaturizer(2, 3, max_sample_per_class=2,
use_padding=True, bagof=False)
f.featurize_stream(io.StringIO("abcdef\tdef"))
features = f.dataset.samples[0].features
self.assertEqual(set(features.keys()), {'2.0', '2.1', '2.2', '2.3'})
self.assertEqual(set(features.values()), {'de', 'ef', ' d', 'f '})
class CharacterSequenceFeaturizerTester(unittest.TestCase):
def test_init(self):
f = featurize.CharacterSequenceFeaturizer(5, 10)
self.assertIsInstance(f, featurize.Featurizer)
self.assertEqual(f.dataset.max_sample_per_class, 10)
def test_feature_extraction(self):
f = featurize.CharacterSequenceFeaturizer(2, 10)
f.featurize_stream(io.StringIO("abc\tdef"))
s = f.dataset.samples.pop()
self.assertEqual(s.features, [{'ch': 'b'}, {'ch': 'c'}])
def test_feature_extraction_short_word(self):
f = featurize.CharacterSequenceFeaturizer(3, 10)
f.featurize_stream(io.StringIO("ab\tdef"))
s = f.dataset.samples.pop()
self.assertEqual(s.features,
[{'ch': ' '}, {'ch': 'a'}, {'ch': 'b'}])
def test_feature_extraction_several_lines(self):
f = featurize.CharacterSequenceFeaturizer(3, 10)
f.featurize_stream(io.StringIO(input_simple))
l = len(input_simple.strip().split('\n'))
self.assertEqual(len(f.dataset), l)
def test_skip_rare(self):
f = featurize.CharacterSequenceFeaturizer(3, 10, replace_rare=False)
f.featurize_stream(io.StringIO("aßbc\ta\nabc\ta"))
s1 = f.dataset.samples.pop()
s2 = f.dataset.samples.pop()
self.assertEqual(s1.features, s2.features)
def test_replace_rare(self):
f = featurize.CharacterSequenceFeaturizer(3, 10, replace_rare=True)
f.featurize_stream(io.StringIO("aßbc\ta\naデbc\ta"))
s1 = f.dataset.samples[0]
s2 = f.dataset.samples[1]
self.assertEqual(s1.features, s2.features)
def test_lower(self):
f = featurize.CharacterSequenceFeaturizer(3, 10)
f.featurize_stream(io.StringIO("AbCd\ta\nabCD\ta"))
s1 = f.dataset.samples[0]
s2 = f.dataset.samples[1]
self.assertEqual(s1.features, s2.features)
def test_replace_punct(self):
f = featurize.CharacterSequenceFeaturizer(3, 10)
f.featurize_stream(io.StringIO("a!?\ta\na#'\ta"))
s1 = f.dataset.samples[0]
s2 = f.dataset.samples[1]
self.assertEqual(s1.features, s2.features)
def test_different_alphabet(self):
f = featurize.CharacterSequenceFeaturizer(3, 10, alphabet='abcd',
replace_rare=True)
f.featurize_stream(io.StringIO("axz\ta\nakl\ta"))
s1 = f.dataset.samples[0]
s2 = f.dataset.samples[1]
self.assertEqual(s1.features, s2.features)
def test_replace_rare_char(self):
f = featurize.CharacterSequenceFeaturizer(3, 10, rare_char='x')
f.featurize_stream(io.StringIO("aデ\ta\nax\ta"))
s1 = f.dataset.samples[0]
s2 = f.dataset.samples[1]
self.assertEqual(s1.features, s2.features)
class MatrixCreationTester(unittest.TestCase):
def test_2d_unique_samples(self):
f = featurize.NGramFeaturizer(1, 3, max_sample_per_class=2,
use_padding=False)
f.featurize_stream(io.StringIO("abc\tdef"))
X = f.dataset.X
self.assertEqual(X.shape, (1, 3))
y = f.dataset.y
self.assertEqual(y.shape, (1, 1))
def test_2d_unique_samples2(self):
f = featurize.NGramFeaturizer(1, 3, max_sample_per_class=2,
use_padding=False)
f.featurize_stream(io.StringIO("abc\tdef\nabd\t12"))
X = f.dataset.X
self.assertEqual(X.shape, (2, 4))
y = f.dataset.y
self.assertEqual(y.shape, (2, 2))
def test_2d_nonunique_samples(self):
f = featurize.NGramFeaturizer(1, 3, max_sample_per_class=2,
skip_duplicates=False, use_padding=False)
f.featurize_stream(io.StringIO("abc\tdef\nabd\t12\nabc\tdef"))
X = f.dataset.X
self.assertEqual(X.shape, (3, 4))
y = f.dataset.y
self.assertEqual(y.shape, (3, 2))
def test_3d_simple(self):
f = featurize.CharacterSequenceFeaturizer(3, 10)
f.featurize_stream(io.StringIO("abb\ta"))
X = f.dataset.X
self.assertEqual(X.shape, (1, 3, 2))
y = f.dataset.y
self.assertEqual(y.shape, (1, 1))
def test_3d_unique_samples(self):
f = featurize.CharacterSequenceFeaturizer(3, 10)
f.featurize_stream(io.StringIO("abb\ta\nabb\ta\nbcd\tb"))
X = f.dataset.X
self.assertEqual(X.shape, (2, 3, 4))
y = f.dataset.y
self.assertEqual(y.shape, (2, 2))
if __name__ == '__main__':
unittest.main()
|
990,460 | 3b6d6b65f664f6743d294802d709a9c45e42f4c5 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 14:15:29 2019
new-2-gram
2. 使用新数据源完成语言模型的训练
按照我们上文中定义的prob_2函数,我们更换一个文本数据源,获得新的Language Model:
下载文本数据集(你可以在以下数据集中任选一个,也可以两个都使用)
可选数据集1,保险行业问询对话集:
https://github.com/Computing-Intelligence/insuranceqa-corpus-zh/raw/release/corpus/pool/train.txt.gz
可选数据集2:豆瓣评论数据集:
https://github.com/Computing-Intelligence/datasource/raw/master/movie_comments.csv
修改代码,获得新的2-gram语言模型
进行文本清洗,获得所有的纯文本
将这些文本进行切词
送入之前定义的语言模型中,判断文本的合理程度
@author: us
"""
import jieba
from collections import Counter
import pandas as pd
import re
'''
def get_1_gram_count(word):
#输入word的计数
if word in words_count: return words_count[word]
else:
return words_count.most_common()[-1][-1]
def get_2_gram_count(word):
#输入俩字词的计数
if word in _2_gram_word_counts: return _2_gram_word_counts[word]
else:
return _2_gram_word_counts.most_common()[-1][-1]
'''
def get_gram_count(word, wc):
#输入word,从wc这个dict匹配计数数量
if word in wc: return wc[word]
else:
return wc.most_common()[-1][-1]
'''
#
get_gram_count('你',words_count)
#
get_gram_count('那么', _2_gram_word_counts)
'''
def two_gram_model(sentence, corpus):
#FILE = open(corpus,'rb').read()
# print(len(FILE))
df = pd.read_table(corpus,header=None)
corpus = ''.join(df[0].tolist())
# 提取中文语料
def get_chinese(line):
line_chinese = re.findall('[\u4e00-\u9fa5]+',line,re.S)
return line_chinese
TOKENS = ''.join(get_chinese(corpus))
def cut(string):
return list(jieba.cut(string))
TOKENS = cut(TOKENS)
#两个词一组
_2_gram_words = [TOKENS[i] + TOKENS[i+1] for i in range(len(TOKENS)-1)]
#两词计数
_2_gram_word_counts = Counter(_2_gram_words)
#每个词计数
words_count = Counter(TOKENS)
# 2-gram langauge model
tokens = cut(sentence)
probability = 1
for i in range(len(tokens)-1):
word = tokens[i]
next_word = tokens[i+1]
_two_gram_c = get_gram_count(word+next_word, _2_gram_word_counts)
_one_gram_c = get_gram_count(next_word, words_count)
pro = _two_gram_c / _one_gram_c
probability *= pro
return probability
if __name__=='__main__':
corpus = 'C:/Users/us/Desktop/train.txt'
pro = two_gram_model('这个保险什么时候赔付', corpus)
print(pro)
|
990,461 | 93eded113d737d03cbcbcb517a77c14049e17793 | from enum import Enum
from src.items.aged_brie import AgedBrieItem
from src.items.backstage_passes import BackstagePassesItem
from src.items.conjured import ConjuredItem
from src.items.legendary import LegendaryItem
from src.items.normal import NormalItem
class Categories(Enum):
normal = NormalItem
aged_brie = AgedBrieItem
legendary = LegendaryItem
backstage_passes = BackstagePassesItem
conjured = ConjuredItem
|
990,462 | b11e02f7f1d3f5d057c467e6e0e8d2e25e52fc01 | # -*- coding: utf-8 -*-
# 匿名函数
def add(x,y):
return x+y
print(add(1,2))
f = lambda x,y: x+y
print(f(1,2))
# lambda 表达式
# lambda parameter_list: expression(不能写代码语句)
# 三元表达式
# x,y。若 x 大于 y 返回 x 否则 y
# x > y ? x : y
# 条件为真时返回的结果 if 判断条件 else 条件为假时的返回结果
x = 2
y = 1
r = x if x > y else y
print(r) |
990,463 | 35951a55d53ff9c849c86e55ed799f6b2fe9c466 | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
from odoo import models, fields, api, exceptions, _
class DocumentationBoard(models.Model):
_name = 'documentation.board'
_description = _('Dashboard')
@api.depends('used_model')
def _get_count_item(self):
for r in self:
res = 0
if r.used_model:
res = len(self.env[r.used_model].search([]))
r.item_count = res
name = fields.Char(string=_('Name'), required=True, translate=True)
used_model = fields.Char(string=_('Model'), required=True)
used_view = fields.Char(string=_('View'), required=True)
item_count = fields.Integer(compute='_get_count_item', string=_('Item count'))
item_name = fields.Char(string=_('Items name'), translate=True)
@api.multi
def action_open_view(self):
if self.used_view:
return self.env['ir.actions.act_window'].for_xml_id('module_documentation', self.used_view) |
990,464 | b74f1a28fe59b33192283d8b3f3c7a1f65b46649 | from dataclasses import dataclass, field
from .logger_params import LoggerParams
@dataclass()
class PredictParams:
""" Model predictions config ORM class. """
name: str = 'predict'
experiment: str = 'default_experiment'
data_path: str = 'heart.csv'
model_path: str = 'model.pkl'
save_path: str = 'results.csv'
logging: LoggerParams = field(default_factory=LoggerParams)
|
990,465 | 6029db8b64880b499698dd18ce50d7d580148074 | class RebarLayoutRule(Enum,IComparable,IFormattable,IConvertible):
"""
The rule for how the rebars in rebar set are laid out
enum RebarLayoutRule,values: FixedNumber (1),MaximumSpacing (2),MinimumClearSpacing (4),NumberWithSpacing (3),Single (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
FixedNumber=None
MaximumSpacing=None
MinimumClearSpacing=None
NumberWithSpacing=None
Single=None
value__=None
|
990,466 | 50f2ff6ea2aa7239f829ff37d1dcbef90b4b510d | #!/usr/bin/env python
import socket
import sys
HOST = '192.168.200.132'
PORT = 50012
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Failed to create socket,Error code: ' + str(msg[0]) + ', Error message: ' + msg[1]
sys.exit()
print 'Socket Created.'
s.bind((HOST, PORT))
print 'Socket bind complete'
s.listen(5)
print 'Socket is listening now.'
try:
while True:
conn, addr = s.accept()
print 'Connected by', addr
while True:
data = conn.recv(1024)
if not data:
conn.close()
break
print ('[Client %s:%s said]:%s' % (addr[0], addr[1], data))
reply = 'server has received your message.'
conn.sendall(reply)
conn.close()
finally:
s.close()
|
990,467 | e47d8e90d3228e9db4de647ca6705930ea421f37 | #!/usr/bin/env python
from geometry_msgs.msg import PoseStamped, TwistStamped, Point, Quaternion
import rospy
import math
import tf
from std_msgs.msg import Float32, Bool, String
from VelocityController import VelocityController
from sensor_msgs.msg import Imu
import numpy as np
import copy
class position_control():
def __init__(self):
print 'Initialising position control'
rospy.init_node('position_control', anonymous=True)
rate = rospy.Rate(20)
self.lidar_height = Float32
self.lidar_height = 0.0
self.actual_height = Float32
self.actual_height = 0.0
self.real_pose = PoseStamped()
# ----------Subscribers----------#
rospy.Subscriber('/position_control/set_mode', String, self.set_mode_callback)
rospy.Subscriber('/position_control/set_position', PoseStamped, self.set_pose_callback)
rospy.Subscriber('/position_control/set_velocity', PoseStamped, self.set_velocity_callback)
rospy.Subscriber('/position_control/set_velocityPose', PoseStamped, self.set_velpose_callback)
rospy.Subscriber('/position_control/set_x_pid', Point, self.set_x_pid)
rospy.Subscriber('/position_control/set_y_pid', Point, self.set_y_pid)
rospy.Subscriber('/position_control/set_z_pid', Point, self.set_z_pid)
rospy.Subscriber('/position_control/set_yaw_pid', Point, self.set_yaw_pid)
#Set max output velocity on PID in velocity control
rospy.Subscriber('/position_control/set_xy_vel', Float32, self.set_xy_vel)
rospy.Subscriber('/position_control/set_z_vel', Float32, self.set_z_vel)
self.local_pose = PoseStamped()
rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self._local_pose_callback)
self.local_velocity = TwistStamped()
rospy.Subscriber('/mavros/local_position/velocity', TwistStamped, self._local_velocity_callback)
self.local_imu = Imu()
rospy.Subscriber('/mavros/imu/data', Imu, self._local_imu_callback)
rospy.Subscriber('Laser_LidarLite', Float32, self._local_lidar_callback)
# pos
self._pose_pub = rospy.Publisher('mavros/setpoint_position/local', PoseStamped, queue_size=10)
self._pose_msg = PoseStamped()
self._vel_pose_msg = PoseStamped()
self._pos_state = "posctr"
# vel
self._vel_pub = rospy.Publisher('mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10)
self._vel_msg = TwistStamped()
self._vel_state = "velctr"
self._velpose_pub = rospy.Publisher('mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10)
self._velpose_msg = PoseStamped()
self._velpose_state = "velposctr"
self.dist = rospy.Publisher('/position_control/distance', Bool, queue_size=10)
self._real_pose = rospy.Publisher('/position_control/Real_pose', PoseStamped, queue_size=10)
self.yawangle = rospy.Publisher('/position_control/Yawangle', Float32, queue_size=10)
self.pitchangle = rospy.Publisher('/position_control/Pitchangle', Float32, queue_size=10)
self.rollangle = rospy.Publisher('/position_control/Rollangle', Float32, queue_size=10)
self.pid_out_pub = rospy.Publisher('/position_control/pid_out', TwistStamped, queue_size=10)
self.current_publisher = self._pose_pub
self.current_message = self._pose_msg
# self._pose_msg.pose.position.z = 3
self._pose_msg = self.local_pose
self._pose_msg.pose.position.x = 0
self._pose_msg.pose.position.y = 0
self._pose_msg.pose.position.z = 3
self.set_pose(self._pose_msg)
self.des_vel = TwistStamped()
self.current_mode = String()
self.current_mode.data = 'posctr'
self.vController = VelocityController()
self.vController.set_x_pid(1.0, 0.0, 0.0, 1) # 0.15 #MARCUS: 2.8, 0.913921, 0.0, 1
self.vController.set_y_pid(1.0, 0.0, 0.0, 1) # 2.1, 0.713921, 0.350178 #MARCUS: 2.8, 0.913921, 0.0, 1
self.vController.set_z_pid(1.0, 0.0, 0.0, 0.3) # 0.15 #MARCUS: 1.3, 2.4893, 0.102084, 1
# self.vController.set_yaw_pid(3.6,1.33333,0.1875,1)
self.vController.set_yaw_pid(1, 0, 0, 1)#1, 1.33333, 0.1875, 1
print 'Init done'
while not rospy.is_shutdown():
if self.current_mode.data == 'posctr':
self._pose_pub.publish(self._pose_msg)
elif self.current_mode.data == 'velctr':
self.vController.setTarget(self._vel_pose_msg.pose)
self.des_vel = self.vController.update(self.real_pose)
self._vel_pub.publish(self.des_vel)
self.pid_out_pub.publish(self.des_vel)
elif self.current_mode.data == 'velposctr':
self.vController.setTarget(self._velpose_msg.pose)
self.des_velpos = self.vController.update(self.real_pose)
self._velpose_pub.publish(self.des_velpos)
self.pid_out_pub.publish(self.des_velpos)
else:
print "No such position mode"
self._real_pose.publish(self.real_pose)
self.check_distance()
self.get_angles()
rate.sleep()
def set_mode_callback(self, data):
self.current_mode = data
def set_vel(self, vel):
self._vel_msg.twist.linear.x = vel.twist.linear.x
self._vel_msg.twist.linear.y = vel.twist.linear.y
self._vel_msg.twist.linear.z = vel.twist.linear.z
def set_pose(self, pose):
self._pose_msg.pose.position.x = self.local_pose.pose.position.x + pose.pose.position.x
self._pose_msg.pose.position.y = self.local_pose.pose.position.y + pose.pose.position.y
self._pose_msg.pose.position.z = pose.pose.position.z - (self.actual_height - self.local_pose.pose.position.z)
#self._pose_msg.pose.orientation.x = self._orient_msg.pose.orientation.x
#self._pose_msg.pose.orientation.y = self._orient_msg.pose.orientation.y
#self._pose_msg.pose.orientation.z = self._orient_msg.pose.orientation.z
#self._pose_msg.pose.orientation.w = self._orient_msg.pose.orientation.w
def set_vel_pose(self, vel_pose):
# print(vel_pose.pose)
self._vel_pose_msg.pose.position.x = self.local_pose.pose.position.x + vel_pose.pose.position.x
self._vel_pose_msg.pose.position.y = self.local_pose.pose.position.y + vel_pose.pose.position.y
self._vel_pose_msg.pose.position.z = vel_pose.pose.position.z
print(self._vel_pose_msg)
# print(self._vel_pose_msg.pose)
self._vel_pose_msg.pose.orientation.x = vel_pose.pose.orientation.x
self._vel_pose_msg.pose.orientation.y = vel_pose.pose.orientation.y
self._vel_pose_msg.pose.orientation.z = vel_pose.pose.orientation.z
self._vel_pose_msg.pose.orientation.w = vel_pose.pose.orientation.w
def set_velpose_pose(self, vel_pose):
self._velpose_msg.pose.position.x = vel_pose.pose.position.x
self._velpose_msg.pose.position.y = vel_pose.pose.position.y
self._velpose_msg.pose.position.z = vel_pose.pose.position.z
# print(self._vel_pose_msg.pose)
self._velpose_msg.pose.orientation.x = vel_pose.pose.orientation.x
self._velpose_msg.pose.orientation.y = vel_pose.pose.orientation.y
self._velpose_msg.pose.orientation.z = vel_pose.pose.orientation.z
self._velpose_msg.pose.orientation.w = vel_pose.pose.orientation.w
def _local_pose_callback(self, data):
self.local_pose = data
self.real_pose = copy.deepcopy(self.local_pose)
self.real_pose.pose.position.z = self.actual_height
def _local_velocity_callback(self, data):
self.local_velocity = data
def _local_imu_callback(self, data):
self.local_imu = data
def _local_lidar_callback(self, data):
self.lidar_height = (data.data / 100)
X = self.local_imu.orientation.x
Y = self.local_imu.orientation.y
Z = self.local_imu.orientation.z
W = self.local_imu.orientation.w
(roll, pitch, Yaw) = tf.transformations.euler_from_quaternion([X, Y, Z, W])
Comp = -math.sin(pitch) * 0.22 # 0.22 = 22cm from rotation centrum
self.actual_height = ((self.lidar_height * (math.cos(pitch) * math.cos(roll))) - Comp)-0.3
def set_pose_callback(self, data):
self.set_pose(data)
def set_velocity_callback(self, data):
self.set_vel_pose(data)
def set_velpose_callback(self, data):
self.set_velpose_pose(data)
def set_x_pid(self, data):
self.vController.set_x_pid(data.x, data.y, data.z)
def set_y_pid(self, data):
self.vController.set_y_pid(data.x, data.y, data.z)
def set_z_pid(self, data):
self.vController.set_z_pid(data.x, data.y, data.z)
def set_yaw_pid(self, data):
self.vController.set_yaw_pid(data.x, data.y, data.z)
def set_xy_vel(self, data):
self.vController.set_xy_vel(data)
def set_z_vel(self, data):
self.vController.set_z_vel(data)
def get_angles(self):
X = self.local_imu.orientation.x
Y = self.local_imu.orientation.y
Z = self.local_imu.orientation.z
W = self.local_imu.orientation.w
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion([X, Y, Z, W])
self.yawangle.publish(math.degrees(yaw)) # Yaw in degrees
self.pitchangle.publish(math.degrees(pitch)) # Pitch in degrees
self.rollangle.publish(math.degrees(roll)) # Roll in degrees
def check_distance(self):
if self.current_mode.data == 'posctr':
booldist = self.is_at_position(self.local_pose.pose.position, self._pose_msg.pose.position, 0.5)
boolvel = self.hover_velocity()
self.dist.publish(booldist and boolvel)
elif self.current_mode.data == 'velctr':
# print("target vel_pos: {}".format(vel_pose_tot))
booldist = self.is_at_position(self.real_pose.pose.position, self._vel_pose_msg.pose.position, 0.2)
boolvel = self.hover_velocity()
self.dist.publish(booldist and boolvel)
elif self.current_mode.data == 'velposctr':
# print("target vel_pos: {}".format(vel_pose_tot))
booldist = self.is_at_position(self.real_pose.pose.position, self._velpose_msg.pose.position, 0.3)
boolvel = self.hover_velocity()
self.dist.publish(booldist and boolvel)
def is_at_position(self, p_current, p_desired, offset):
des_pos = np.array((p_desired.x,
p_desired.y,
p_desired.z))
cur_pos = np.array((p_current.x,
p_current.y,
p_current.z))
distance = np.linalg.norm(des_pos - cur_pos)
return distance < offset
def hover_velocity(self):
return self.local_velocity.twist.linear.x < 0.2 and self.local_velocity.twist.linear.y < 0.2 and self.local_velocity.twist.linear.z < 0.2
if __name__ == '__main__':
try:
position_control()
except rospy.ROSInterruptException:
pass
|
990,468 | 4ba155810d52cbc1c2c86f66c7eb606ad4a0fdc1 | import cv2
##############################################################
# These parameter values are indicative. You should choose your own
# according to properties of the method you want to demonstrate
h = 5
templateWindowSize = 7
searchWindowSize = 30
##############################################################
img = cv2.imread('alley-highNoise.png')
dst = cv2.fastNlMeansDenoisingColored(img, None, h, h, templateWindowSize, searchWindowSize)
cv2.imwrite('search30.png', dst)
|
990,469 | 59da7ae8d872ea39abc4ffd93bf6bac4ffe535c1 | import sys
last_text = ""
last_lang = ""
last_sf = 0
for line in sys.stdin:
line = line.strip()
infos = line.split("\t")
if len(infos) != 3:
continue
lang = infos[0]
text = infos[1]
sf = int(infos[2])
if (last_lang != "" and lang != last_lang) or (last_text != "" and text != last_text):
print last_lang + "\t" + last_text + "\t" + str(last_sf)
last_lang = lang
last_text = text
last_sf = sf
continue
last_lang = lang
last_text = text
last_sf += sf
print last_lang + "\t" + last_text + "\t" + str(last_sf)
|
990,470 | fb3c19d3c7f093ea13859e82161fa0c85cd2cfc9 | from django import forms
from django.forms import ModelForm
from blog_app.models import Post,Comment
class PostForm(ModelForm):
class Meta():
model=Post
fields= ('author','title','text')
widgets ={
'title':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea postcontent'})
}
class CommentForm(ModelForm):
class Meta():
model=Comment
fields=('author','text')
widgets ={
'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'})
}
class UserRegistrationForm(forms.Form):
username = forms.CharField(
required = True,
label = 'Username',
max_length = 32
)
email = forms.CharField(
required = True,
label = 'Email',
max_length = 32,
)
password = forms.CharField(
required = True,
label = 'Password',
max_length = 32,
widget = forms.PasswordInput()
)
|
990,471 | 2dba0bd99af4070651b637ff6f469c820307734c | import webapp2
import cgi
import re
def build_page(username, password, ver_pass, email, error_username, error_password, error_ver_pass, error_email):
header = "<h1>Signup</h1>"
user_label= "<label style='margin:2% 4%; font-#weight: bold; font-size: 14px; '>Username</label>"
user_input= "<input type='text' name='username' value='{0}'>".format(username)
#added to try and run user error
error_username= "<p style='color:red' name='error_username' >{0}</p>".format(error_username)
pw_label= "<label style='margin:2% 4%; font-#weight: bold; font-size: 14px; '>Password</label>"
pw_input= "<input type='password' name='password' value=''>".format(password)
error_password= "<p style='color:red' name='error_password' >{0}</p>".format(error_password)
ver_pw_label= "<label style='margin:2% 4%; font-#weight: bold; font-size: 14px; '>Verify Password</label>"
ver_pw_input= "<input type='password' name='ver_pass' value=''>".format(ver_pass)
error_ver_pass= "<p style='color:red' name='error_ver_pass' >{0}</p>".format(error_ver_pass)
email_label= "<label style='margin:2% 4%; font-#weight: bold; font-size: 14px; '>Email (optional)</label>"
email_input= "<input type='text' name='email' value=''>".format(email)
error_email= "<p style='color:red' name='error_email' >{0}</p>".format(error_email)
submit= "<input type='submit'/>"
form= "<form method='post'>" + user_label + user_input + error_username + "<br>" + pw_label + pw_input + error_password + "<br>" +ver_pw_label + ver_pw_input + error_ver_pass + "<br>" + email_label + email_input + error_email + "<br>" + submit + "</form>"
return header + form
#added to try and validate
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
#end of validate
class Signup(webapp2.RequestHandler):
def get(self):
content= build_page('', '', '', '','','','','')
self.response.write(content)
def post(self):
#added to check if error exist
have_error = False
#end of error exist check
username = self.request.get('username')
password = self.request.get("password")
ver_pass = self.request.get("ver_pass")
email = self.request.get("email")
error_username = self.request.get('error_username')
error_password = self.request.get('error_password')
error_ver_pass = self.request.get('error_ver_pass')
error_email = self.request.get('error_email')
#self.response.write(content) #<-comment out to try and get validate to work
if not valid_username(username):
error_username= "That's not a valid username."
have_error= True
if not valid_password(password):
error_password= "That's not a valid password."
have_error= True
if password != ver_pass:
error_ver_pass = "Your passwords didn't match."
have_error= True
if not valid_email(email):
error_email= "Please enter a valid email."
if have_error:
content= build_page(username, password, ver_pass, email, error_username, error_password, error_ver_pass, error_email)
self.response.out.write(content)
else:
self.redirect('/welcome?username=' + username)
class Welcome(webapp2.RequestHandler):
def get(self):
username= self.request.get('username')
if valid_username(username):
self.response.write("Hello, " + username)
else:
self.redirect('Signup')
app = webapp2.WSGIApplication([
('/', Signup),
('/signup', Signup),
('/welcome', Welcome)
], debug=True)
|
990,472 | 1b7952898f4c65a3305ed23cd5c0a61314ca7c1a | import torch.nn as nn
import torch.nn.functional as F
class DNNet(nn.Module):
def __init__(self, in_num, out_num):
super(DNNet, self).__init__()
self.in_num = in_num
self.conv1 = nn.Conv1d(1, 16, 49, stride=1, padding=0, bias=False)
self.pool1 = nn.MaxPool1d(kernel_size=4, stride=4)
self.bn1 = nn.BatchNorm1d(16)
self.conv2 = nn.Conv2d(1, 16, (16, 21), stride=1, padding=0, bias=False)
self.pool2 = nn.MaxPool1d(kernel_size=4, stride=4)
self.bn2 = nn.BatchNorm1d(16)
self.classifier = nn.Sequential(
nn.Linear((((in_num - 48) // 4 - 20) // 4) * 16, 100),
nn.Linear(100, out_num),
nn.Softmax(dim=1))
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.pool1(x)
x = x.view(-1, 1, 16, (self.in_num - 48) // 4)
x = self.conv2(x)
x = x.view(-1, 16, ((self.in_num - 48) // 4 - 20))
x = F.relu(self.bn2(x))
x = self.pool2(x)
x = x.view(-1, (((self.in_num - 48) // 4 - 20) // 4) * 16)
x = self.classifier(x)
return x
|
990,473 | b813794d9d9e56c57ff4e18141e15904a8bcfc77 | import pandas as pd
import sys,os
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame,read_csv
from numpy import random
"""
print("Python version "+sys.version)
print("Pandas version "+pd.__version__)
print("Matplotlib version "+matplotlib.__version__)
"""
#数据创建
names = ['Bob','Jessica','Mary','John','Mel']
births = [968,155,77,578,973]
BabyDataSet = list(zip(names,births))
df = pd.DataFrame(data = BabyDataSet,columns=['Names','Births'])
df.to_csv('births1880.csv',index=False,header=False)
#获取数据
Location = r'./births1880.csv'
df = pd.read_csv(Location,names=['Names','Births'])
os.remove(Location)
#准备数据,即查看数据类型是否符合要求
#分析数据
Sorted = df.sort_values(['Births'],ascending=False)
Sorted.head(1);
df['Births'].max()
#表现数据
df['Births'].plot.bar()
MaxValue = df['Births'].max()
MaxName = df['Names'][df['Births'] == df['Births'].max()].values
Text = str(MaxValue)+"-"+MaxName
plt.annotate(Text,xy=(1,MaxValue),xytext=(8,0),xycoords=('axes fraction','data'),textcoords='offset points')
print("The most popular name")
df[df['Births'] == df['Births'].max()]
|
990,474 | 9284b2a97491328b4f6feb7bf8df3d5171b0b5b5 | import logging
from os import path
class DynPathFileHandler(logging.FileHandler):
"""A handler class which writes formatted logging records to disk files."""
def __init__(self, filename, dirpath='.', mode='a', encoding=None, delay=False, errors=None): # noqa: WPS211
"""Open the specified file and use it as the stream for logging.""" # noqa: DAR101
super().__init__(
path.join(path.expanduser(dirpath), filename),
mode,
encoding,
delay,
errors,
)
|
990,475 | ada3a32f9d26ad5f2d1eed4c702ea46308dfebb7 |
class QuerySet():
"""
查询结果存放集,按照sql返回的结果集顺序存放,顺序获取
"""
def __init__(self):
self._set = {}
self._set[0]= 0
self._next_key = 0
self._put_key = 1
def next(self):
"""
从QuerySet 里取下一个结果集,如果没有更多结果集返回 None
"""
self._next_key += 1
key = self._next_key
return self._set.get(key,None)
def value(self,v):
"""
从当前结果集里取给定字段的值
"""
key = self._next_key
d = self._set[key][0]
return d.get(v,None)
def put(self,result):
"""
存放结果集到 QuerySet
"""
key = self._put_key
self._set[key] = result
self._put_key += 1
def setExcuteError(self,e):
"""
存放error信息到 QuerySet
"""
self._set[0] = e
@property
def error(self):
"""
查询结果的error信息
"""
return self._set[0] |
990,476 | 8d46e18299710a6989e7ada32bb5eaf12be31bf7 | def search_array(char,array):
i=0;
final=0;
while i<len(array):
if array[i]==char:
final+=1;
i+=1;
return final;
|
990,477 | a6c82a9b751a8c7e83fa67aa30673c38326d427e | #!/usr/bin/env python
"""
Creates an HTTP server with basic auth and websocket communication.
"""
import argparse
import base64
import hashlib
import os
import time
import threading
import webbrowser
import numpy as np
import cv2
import picamera
from picamera.array import PiRGBArray
from PIL import Image
from datetime import datetime
import RPi.GPIO as GPIO
from imutils.object_detection import non_max_suppression
import io as ioEx
try:
import cStringIO as io
except ImportError:
import io
import tornado.web
import tornado.websocket
import signal
from tornado.ioloop import PeriodicCallback
from tornado.escape import json_decode
from video import create_capture
from common import clock, draw_str
# Hashed password for comparison and a cookie for login cache
ROOT = os.path.normpath(os.path.dirname(__file__))
with open(os.path.join(ROOT, "password.txt")) as in_file:
PASSWORD = in_file.read().strip()
COOKIE_NAME = "camp"
uploadDir = os.path.join(ROOT, "upload/")
thread1 = None
detector = None
args = None
resolutions = {"high": (1280, 720), "medium": (640, 480), "low": (320, 240), "360" : (480, 360)}
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.output(18, GPIO.LOW)
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
nested = cv2.CascadeClassifier("haarcascade_eye.xml")
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
class RecordVideo(threading.Thread):
"""
Thread checking URLs.
"""
def __init__(self, camera, camera_width, camera_height):
"""
Constructor.
@param urls list of urls to check
@param output file to write urls output
"""
threading.Thread.__init__(self)
self._camera = camera
self._stop = threading.Event()
self._frame_lock = threading.Lock()
self._writer_lock = threading.Lock()
self._frame = None
self._fourcc = None
self._writer = None
self._width = camera_width
self._height = camera_height
self._back = np.zeros((320,240,3), dtype="uint8")
self._is_ready = False
self._is_recorded = False
def run(self):
"""
Thread run method. Check URLs one by one.
"""
# initialize the video stream and allow the camera
# sensor to warmup
print("[Recorder] warming up camera...")
time.sleep(2.0)
self._fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
#ret, frame = self._camera.read()
#(self._height, self._width) = frame.shape[:2]
print("[Recorder] can start")
self._is_ready = True
ret = False
frame = None
stream = ioEx.BytesIO()
while (not self._stop.is_set()):
if args.use_usb:
ret, frame = self._camera.read()
else:
ret = True
self._camera.capture(stream, format='jpeg', use_video_port=True)
# Construct a numpy array from the stream
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
# "Decode" the image from the array, preserving colour
frame = cv2.imdecode(data, 1)
if ret==True:
self._frame_lock.acquire()
self._frame = frame
self._frame_lock.release()
# write the flipped frame
self._writer_lock.acquire()
if not (self._writer is None):
self._writer.write(frame)
self._writer_lock.release()
time.sleep(0.001)
if not (self._writer is None):
self._writer.release()
print('[Recorder] end thread')
def stop(self):
self._stop.set()
print('[Recorder] stop thread')
def stopped(self):
return self._stop.isSet()
def getImage(self):
clone = None
if not (self._frame is None):
self._frame_lock.acquire()
clone = self._frame.copy()
self._frame_lock.release()
else:
clone = self._back.copy()
return clone
def isRecorded(self):
return self._is_recorded
def snap(self):
clone = None
if not (self._frame is None):
self._frame_lock.acquire()
clone = self._frame.copy()
self._frame_lock.release()
else:
clone = self._back.copy()
fileName = time.strftime("%Y%m%d-%H%M%S") + '.jpg'
cv2.imwrite(uploadDir + fileName, clone)
return fileName
def startRecord(self):
if self._is_ready and not self._is_recorded:
self._writer_lock.acquire()
self._writer = cv2.VideoWriter(uploadDir + time.strftime("%Y%m%d-%H%M%S") + ".avi", self._fourcc, 20.0, (self._width, self._height), True)
self._writer_lock.release()
self._is_recorded = True
return True
return False
def stopRecord(self):
if self._is_ready and self._is_recorded:
self._writer_lock.acquire()
self._writer.release()
self._writer = None
self._writer_lock.release()
self._is_recorded = False
return True
return False
class MotionDetection(threading.Thread):
"""
Thread checking URLs.
"""
def __init__(self, video):
"""
Constructor.
@param urls list of urls to check
@param output file to write urls output
"""
threading.Thread.__init__(self)
self._stop = threading.Event()
self._pause = True
self._video = video
#self._fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
#self._fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
self._fgbg = cv2.createBackgroundSubtractorMOG2()
self._init = True
self._hasMotion = False
def run(self):
"""
Thread run method. Check URLs one by one.
"""
pre_stop = False
begin_t = 0
end_t = 0
self.removeNoise()
while (not self.stopped()):
if self._pause:
time.sleep(0.001)
continue
frame = self._video.getImage()
if not (frame is None):
fgmask = self._fgbg.apply(frame)
hist = cv2.calcHist([fgmask],[0],None,[256],[0,256])
white_count = hist[255]
if (white_count > 500):
if not self._video.isRecorded() and not self._pause:
if self._video.startRecord():
self._hasMotion = True
print('[Detector] start record video')
else:
print('[Detector] start record video fail!')
pre_stop = False
elif (white_count <= 100) and self._video.isRecorded():
if not pre_stop:
pre_stop = True
begin_t = clock()
else:
end_t = clock()
if end_t - begin_t > 10:
if self._video.stopRecord():
self._hasMotion = False
print('[Detector] stop record video')
else:
print('[Detector] stop record video fail!')
if self._video.isRecorded():
self._hasMotion = False
self._video.stopRecord()
print('[Detector] end Thread')
def pause(self):
self._pause = True
self._video.stopRecord()
self._hasMotion = False
print('[Detector] pause thread')
def resume(self):
self._pause = False
self.removeNoise()
print('[Detector] resume thread')
def stop(self):
self._stop.set()
print('[Detector] stop thread')
def stopped(self):
return self._stop.isSet()
def removeNoise(self):
frame = self._video.getImage()
self._fgbg.apply(frame)
def hasMotion(self):
return self._hasMotion
class IndexHandler(tornado.web.RequestHandler):
def get(self):
#if args.require_login and not self.get_secure_cookie(COOKIE_NAME):
# self.redirect("/login")
#else:
self.render("index.html")
class FileManagerHandler(tornado.web.RequestHandler):
def get(self):
#if args.require_login and not self.get_secure_cookie(COOKIE_NAME):
# self.redirect("/login")
#else:
self.render("filemanager.html")
class SnapHandler(tornado.web.RequestHandler):
"""docstring for SnapHandeler"""
def post(self):
fileName = thread1.snap()
self.write(fileName)
class RecordHandler(tornado.web.RequestHandler):
"""docstring for SnapHandeler"""
def post(self):
json_obj = json_decode(self.request.body)
if (json_obj['status'] == 'start'):
thread1.startRecord()
self.write('Recorder started!')
else:
thread1.stopRecord()
self.write('Recorder stopped!')
class DetectHandler(tornado.web.RequestHandler):
"""docstring for SnapHandeler"""
def post(self):
json_obj = json_decode(self.request.body)
if (json_obj['status'] == 'start'):
detector.resume()
self.write('Detector started!')
else:
detector.pause()
self.write('Detector stopped!')
class LoginHandler(tornado.web.RequestHandler):
def get(self):
self.render("login.html")
def post(self):
password = self.get_argument("password", "")
if hashlib.sha512(password).hexdigest() == PASSWORD:
self.set_secure_cookie(COOKIE_NAME, str(time.time()))
self.redirect("/")
else:
time.sleep(1)
self.redirect(u"/login?error")
class WebSocket(tornado.websocket.WebSocketHandler):
def on_message(self, message):
"""Evaluates the function pointed to by json-rpc."""
# Start an infinite loop when this is called
if message == "read_camera":
self.camera_loop = PeriodicCallback(self.loop, 10)
self.camera_loop.start()
# Extensibility for other methods
else:
print("Unsupported function: " + message)
def loop(self):
"""Sends camera images in an infinite loop."""
sio = io.StringIO()
if True:
img = thread1.getImage()
t = clock()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#fgmask = fgbg.apply(frame)
#hist = cv2.calcHist([fgmask],[0],None,[256],[0,256])
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
if len(rects) == 0:
#print "List is empty"
# detect people in the image
(rects, weights) = hog.detectMultiScale(gray, winStride=(4, 4),
padding=(8, 8), scale=1.05)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)
if detector.hasMotion():
if len(rects) == 0:
GPIO.output(18, GPIO.LOW)
else:
GPIO.output(18, GPIO.HIGH)
draw_rects(img, rects, (0, 255, 0))
#if not self.nested.empty():
# for x1, y1, x2, y2 in rects:
# roi = gray[y1:y2, x1:x2]
# vis_roi = vis[y1:y2, x1:x2]
# subrects = detect(roi.copy(), self.nested)
# draw_rects(vis_roi, subrects, (255, 0, 0))
dt = clock() - t
draw_str(img, (20, 20), 'time: %.1f ms' % (dt*1000))
#draw_str(fgmask, (20, 20), 'white count: %02d' % hist[255])
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
#img = Image.fromarray(fgmask, mode='L')
img.save(sio, "JPEG")
try:
self.write_message(base64.b64encode(sio.getvalue()))
except tornado.websocket.WebSocketClosedError:
self.camera_loop.stop()
def main():
global thread1
global detector
global args
# Commandline parser
parser = argparse.ArgumentParser(description="Starts a webserver that "
"connects to a webcam.")
parser.add_argument("--port", type=int, default=8000, help="The "
"port on which to serve the website.")
parser.add_argument("--resolution", type=str, default="low", help="The "
"video resolution. Can be high, medium, or low.")
parser.add_argument("--require-login", action="store_true", help="Require "
"a password to log in to webserver.")
parser.add_argument("--use-usb", action="store_true", help="Use a USB "
"webcam instead of the standard Pi camera.")
args = parser.parse_args()
camera = None
camera_width = None
camera_height = None
# Select camera
if args.use_usb:
camera = cv2.VideoCapture(0)
else:
camera = picamera.PiCamera()
camera.start_preview()
if args.resolution in resolutions:
camera_width, camera_height = resolutions[args.resolution]
if args.use_usb:
camera.set(3, camera_width)
camera.set(4, camera_height)
else:
camera.resolution = resolutions[args.resolution]
else:
raise Exception("%s not in resolution options." % args.resolution)
# Web config
handlers = [(r"/", IndexHandler),
(r"/login", LoginHandler),
(r"/websocket", WebSocket),
(r"/snap", SnapHandler),
(r"/record", RecordHandler),
(r"/detect", DetectHandler),
(r"/fm", FileManagerHandler)]
settings = {
"blog_title": u"Untibot App",
"cookie_secret":PASSWORD,
"template_path":os.path.join(os.path.dirname(__file__), "templates"),
"static_path":os.path.join(os.path.dirname(__file__), "public")
}
application = tornado.web.Application(handlers, **settings)
application.listen(args.port)
#webbrowser.open("http://localhost:%d/" % args.port, new=2)
# Create Recorder and Detector threads
thread1 = RecordVideo(camera, camera_width, camera_height)
detector = MotionDetection(thread1)
ioloop = tornado.ioloop.IOLoop.instance()
try:
thread1.start()
detector.start()
ioloop.start()
pass
except KeyboardInterrupt:
detector.stop()
thread1.stop()
ioloop.stop()
GPIO.cleanup()
pass
else:
pass
finally:
pass
if __name__ == "__main__":
main()
|
990,478 | aa33860d0bb113b7650330b738e866b98b2c769f | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def insertionSortList(self, head: ListNode) -> ListNode:
cur = parent = ListNode(None)
while head:
while cur.next and cur.next.val < head.val:
cur = cur.next
print(cur.next)
cur.next, head.next, head = head, cur.next, head.next
cur = parent
return cur.next
sol = Solution()
K = sol.insertionSortList([4, 1, 2, 3])
print(K) |
990,479 | 1def283c1ea4b413a46e005bf5e27c1888c4e3f0 | for l in range(1,5):
#print("\U0001f600" * l)
i = 1
smileys = ""
while (i <= l):
smileys += "\U0001f600"
i = i+1
print(smileys)
|
990,480 | 1480a40562dcb772bc020318b3552ef885aeb430 | import argparse
import math
import re
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Tuple
from support import timing
INT_RE = re.compile(r'-?\d+')
class Vector(NamedTuple):
x: int
y: int
z: int
def add(self, other: 'Vector') -> 'Vector':
return self._replace(
x=self.x + other.x,
y=self.y + other.y,
z=self.z + other.z,
)
class Moon(NamedTuple):
pos: Vector
vel: Vector
@classmethod
def parse(cls, s: str) -> 'Moon':
x_s, y_s, z_s = INT_RE.findall(s)
return cls(
pos=Vector(int(x_s), int(y_s), int(z_s)),
vel=Vector(0, 0, 0),
)
def lcm(x: int, y: int) -> int:
return x * y // math.gcd(x, y)
def axis(moons: List[Moon], axis: str) -> List[Tuple[int, int]]:
return [(getattr(m.pos, axis), getattr(m.vel, axis)) for m in moons]
def compute(s: str) -> int:
moons = [Moon.parse(line) for line in s.splitlines()]
periods: Dict[str, int] = {}
axes = {k: axis(moons, k) for k in ('x', 'y', 'z')}
q = 0
while len(periods) < 3:
q += 1
for i, moon in enumerate(moons):
v_x, v_y, v_z = moon.vel
for o_moon in moons:
if o_moon is moon:
continue
if o_moon.pos.x > moon.pos.x:
v_x += 1
elif o_moon.pos.x < moon.pos.x:
v_x -= 1
if o_moon.pos.y > moon.pos.y:
v_y += 1
elif o_moon.pos.y < moon.pos.y:
v_y -= 1
if o_moon.pos.z > moon.pos.z:
v_z += 1
elif o_moon.pos.z < moon.pos.z:
v_z -= 1
moons[i] = moon._replace(vel=Vector(v_x, v_y, v_z))
for i, moon in enumerate(moons):
moons[i] = moon._replace(pos=moon.pos.add(moon.vel))
for k, v in axes.items():
if k not in periods:
if axis(moons, k) == v:
periods[k] = q
return lcm(lcm(periods['x'], periods['y']), periods['z'])
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('data_file')
args = parser.parse_args()
with open(args.data_file) as f, timing():
print(compute(f.read()))
return 0
if __name__ == '__main__':
exit(main())
|
990,481 | 42f8485df7b3d7e9f0dfd2a0a7af82bb5dc6493b | print('''The Collatz conjecture is a conjecture in mathematics that concerns a sequence defined as follows:
start with any positive integer n. Then each term is obtained from the previous term as follows:
if the previous term is even, the next term is one half the previous term.
If the previous term is odd, the next term is 3 times the previous term plus 1.
The conjecture is that no matter what value of n, the sequence will always reach 1.''')
print('more information here: https://en.wikipedia.org/wiki/Collatz_conjecture')
# https://github.com/YanSoum/Collatz
#V1.0: https://trinket.io/python/32e27b5beb
#V2.0: https://trinket.io/python/b1adabe04f
#V3.0: https://trinket.io/python/d3bdba0162
#V4.0: https://trinket.io/python/85917cb31d
#V4.1: https://trinket.io/python/c8f3efc641
#V4.2: https://trinket.io/python/c5d06fac04
#V4.3: https://trinket.io/python/8ba4d67136
def collatz(value):
while value != 1:
if odd_or_even(value) == 0:
value = (value // 2)
print(value, 'even')
elif odd_or_even(value) == 1:
value = (3 * value + 1)
print(value, 'odd')
continue
def odd_or_even(intNumber):
num_test = (intNumber % 2)
if num_test == 2 or num_test == 0:
return num_test
elif num_test == 1:
return num_test
#A function to get the users input
def userInput():
print('Type a positive integer number: ')
return input()
#User input validation
def userInValNum(): #userInputValidationNumber
Pass = None
while Pass != 1:
userIn = userInput()
try:
int(userIn)
except ValueError:
print('oups something went wrong, try again')
Pass = 0
else:
value = int(userIn)
if value == 1:
print('if you input 1 the script will stop running as it outputs 1.')
elif value == 0:
print('with 0 as an input the script will be stuck in an endless loop')
elif value <= 0:
print("negative numbers end up in a never ending loop")
elif value >= 2:
return int(userIn)
collatz(userInValNum())
print('end')
|
990,482 | 3be0b75b9ea0304f2adc90194f5a26148a440d10 | #!/usr/bin/python
"""
Python wrapper of the botan crypto library
https://botan.randombit.net
(C) 2015,2017,2018 Jack Lloyd
(C) 2015 Uri Blumenthal (extensions and patches)
Botan is released under the Simplified BSD License (see license.txt)
This module uses the ctypes module and is usable by programs running
under at least CPython 2.7, CPython 3.4 and 3.5, or PyPy.
It uses botan's ffi module, which exposes a C API.
This version of the module requires FFI API version 20180713, which was
introduced in Botan 2.8
"""
from ctypes import CDLL, POINTER, byref, c_void_p, c_size_t, c_uint32, c_int, c_char, c_char_p, create_string_buffer
from sys import version_info
from time import strptime, mktime
from binascii import hexlify
from datetime import datetime
BOTAN_FFI_VERSION = 20180713
#
# Base exception for all exceptions raised from this module
#
class BotanException(Exception):
def __init__(self, message, rc=0):
if rc == 0:
super(BotanException, self).__init__(message)
else:
descr = botan.botan_error_description(rc).decode('ascii')
super(BotanException, self).__init__("%s: %d (%s)" % (message, rc, descr))
self.rc = rc
#
# Module initialization
#
def load_botan_dll(expected_version):
possible_dll_names = ['libbotan-2.dylib', 'libbotan-2.so'] + \
['libbotan-2.so.%d' % (v) for v in reversed(range(8, 16))]
for dll_name in possible_dll_names:
try:
dll = CDLL(dll_name)
dll.botan_ffi_supports_api.argtypes = [c_uint32]
dll.botan_ffi_supports_api.restype = c_int
if dll.botan_ffi_supports_api(expected_version) == 0:
return dll
except OSError:
pass
return None
botan = load_botan_dll(BOTAN_FFI_VERSION) # pylint: disable=invalid-name
if botan is None:
raise BotanException("Could not find a usable Botan shared object library")
#
# ctypes function prototypes
#
def errcheck_for(fn_name):
def errcheck(rc, _func, _args):
# No idea what to do if return value isn't an int, just return it
if not isinstance(rc, int):
return rc
if rc >= 0:
return rc
if rc == -10: # insufficient buffer space, pass up to caller
return rc
raise BotanException('%s failed' % (fn_name), rc)
return errcheck
botan.botan_version_string.argtypes = []
botan.botan_version_string.restype = c_char_p
botan.botan_error_description.argtypes = [c_int]
botan.botan_error_description.restype = c_char_p
# RNG
botan.botan_rng_init.argtypes = [c_void_p, c_char_p]
botan.botan_rng_init.errcheck = errcheck_for('botan_rng_init')
botan.botan_rng_destroy.argtypes = [c_void_p]
botan.botan_rng_destroy.errcheck = errcheck_for('botan_rng_destroy')
botan.botan_rng_reseed.argtypes = [c_void_p, c_size_t]
botan.botan_rng_reseed.errcheck = errcheck_for('botan_rng_reseed')
botan.botan_rng_get.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_rng_get.errcheck = errcheck_for('botan_rng_get')
# Hash function
botan.botan_hash_init.argtypes = [c_void_p, c_char_p, c_uint32]
botan.botan_hash_init.errcheck = errcheck_for('botan_hash_init')
botan.botan_hash_destroy.argtypes = [c_void_p]
botan.botan_hash_destroy.errcheck = errcheck_for('botan_hash_destroy')
botan.botan_hash_name.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_hash_name.errcheck = errcheck_for('botan_hash_name')
botan.botan_hash_clear.argtypes = [c_void_p]
botan.botan_hash_clear.errcheck = errcheck_for('botan_hash_clear')
botan.botan_hash_output_length.argtypes = [c_void_p, POINTER(c_size_t)]
botan.botan_hash_output_length.errcheck = errcheck_for('botan_hash_output_length')
botan.botan_hash_update.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_hash_update.errcheck = errcheck_for('botan_hash_update')
botan.botan_hash_final.argtypes = [c_void_p, POINTER(c_char)]
botan.botan_hash_final.errcheck = errcheck_for('botan_hash_final')
# MAC
botan.botan_mac_init.argtypes = [c_void_p, c_char_p, c_uint32]
botan.botan_mac_init.errcheck = errcheck_for('botan_mac_init')
botan.botan_mac_destroy.argtypes = [c_void_p]
botan.botan_mac_destroy.errcheck = errcheck_for('botan_mac_destroy')
botan.botan_mac_name.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_mac_name.errcheck = errcheck_for('botan_mac_name')
botan.botan_mac_clear.argtypes = [c_void_p]
botan.botan_mac_clear.errcheck = errcheck_for('botan_mac_clear')
botan.botan_mac_output_length.argtypes = [c_void_p, POINTER(c_size_t)]
botan.botan_mac_output_length.errcheck = errcheck_for('botan_mac_output_length')
botan.botan_mac_set_key.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_mac_set_key.errcheck = errcheck_for('botan_mac_set_key')
botan.botan_mac_update.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_mac_update.errcheck = errcheck_for('botan_mac_update')
botan.botan_mac_final.argtypes = [c_void_p, POINTER(c_char)]
botan.botan_mac_final.errcheck = errcheck_for('botan_mac_final')
# Cipher
botan.botan_cipher_init.argtypes = [c_void_p, c_char_p, c_uint32]
botan.botan_cipher_init.errcheck = errcheck_for('botan_cipher_init')
botan.botan_cipher_destroy.argtypes = [c_void_p]
botan.botan_cipher_destroy.errcheck = errcheck_for('botan_cipher_destroy')
botan.botan_cipher_name.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_cipher_name.errcheck = errcheck_for('botan_cipher_name')
botan.botan_cipher_get_default_nonce_length.argtypes = [c_void_p, POINTER(c_size_t)]
botan.botan_cipher_get_default_nonce_length.errcheck = errcheck_for('botan_cipher_get_default_nonce_length')
botan.botan_cipher_get_update_granularity.argtypes = [c_void_p, POINTER(c_size_t)]
botan.botan_cipher_get_update_granularity.errcheck = errcheck_for('botan_cipher_get_update_granularity')
botan.botan_cipher_get_tag_length.argtypes = [c_void_p, POINTER(c_size_t)]
botan.botan_cipher_get_tag_length.errcheck = errcheck_for('botan_cipher_get_tag_length')
botan.botan_cipher_valid_nonce_length.argtypes = [c_void_p, c_size_t]
botan.botan_cipher_valid_nonce_length.errcheck = errcheck_for('botan_cipher_valid_nonce_length')
botan.botan_cipher_clear.argtypes = [c_void_p]
botan.botan_cipher_clear.errcheck = errcheck_for('botan_cipher_clear')
botan.botan_cipher_set_key.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_cipher_set_key.errcheck = errcheck_for('botan_cipher_set_key')
botan.botan_cipher_set_associated_data.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_cipher_set_associated_data.errcheck = errcheck_for('botan_cipher_set_associated_data')
botan.botan_cipher_start.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_cipher_start.errcheck = errcheck_for('botan_cipher_start')
botan.botan_cipher_update.argtypes = [c_void_p, c_uint32,
POINTER(c_char), c_size_t, POINTER(c_size_t),
POINTER(c_char), c_size_t, POINTER(c_size_t)]
botan.botan_cipher_update.errcheck = errcheck_for('botan_cipher_update')
# Bcrypt
botan.botan_bcrypt_generate.argtypes = [POINTER(c_char), POINTER(c_size_t),
c_char_p, c_void_p, c_size_t, c_uint32]
botan.botan_bcrypt_generate.errcheck = errcheck_for('botan_bcrypt_generate')
botan.botan_bcrypt_is_valid.argtypes = [c_char_p, c_char_p]
botan.botan_bcrypt_is_valid.errcheck = errcheck_for('botan_bcrypt_is_valid')
# PBKDF
botan.botan_pbkdf.argtypes = [c_char_p, POINTER(c_char), c_size_t, c_char_p, c_void_p, c_size_t, c_size_t]
botan.botan_pbkdf.errcheck = errcheck_for('botan_pbkdf')
botan.botan_pbkdf_timed.argtypes = [c_char_p, POINTER(c_char), c_size_t, c_char_p,
c_void_p, c_size_t, c_size_t, POINTER(c_size_t)]
botan.botan_pbkdf_timed.errcheck = errcheck_for('botan_pbkdf_timed')
# Scrypt
botan.botan_scrypt.argtypes = [POINTER(c_char), c_size_t, c_char_p, POINTER(c_char), c_size_t,
c_size_t, c_size_t, c_size_t]
botan.botan_scrypt.errcheck = errcheck_for('botan_scrypt')
# KDF
botan.botan_kdf.argtypes = [c_char_p, POINTER(c_char), c_size_t, POINTER(c_char), c_size_t,
POINTER(c_char), c_size_t, POINTER(c_char), c_size_t]
botan.botan_kdf.errcheck = errcheck_for('botan_kdf')
# Public key
botan.botan_pubkey_destroy.argtypes = [c_void_p]
botan.botan_pubkey_destroy.errcheck = errcheck_for('botan_pubkey_destroy')
botan.botan_pubkey_estimated_strength.argtypes = [c_void_p, POINTER(c_size_t)]
botan.botan_pubkey_estimated_strength.errcheck = errcheck_for('botan_pubkey_estimated_strength')
botan.botan_pubkey_algo_name.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_pubkey_algo_name.errcheck = errcheck_for('botan_pubkey_algo_name')
botan.botan_pubkey_export.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t), c_uint32]
botan.botan_pubkey_export.errcheck = errcheck_for('botan_pubkey_export')
botan.botan_pubkey_fingerprint.argtypes = [c_void_p, c_char_p,
POINTER(c_char), POINTER(c_size_t)]
botan.botan_pubkey_fingerprint.errcheck = errcheck_for('botan_pubkey_fingerprint')
botan.botan_privkey_create.argtypes = [c_void_p, c_char_p, c_char_p, c_void_p]
botan.botan_privkey_create.errcheck = errcheck_for('botan_privkey_create')
botan.botan_privkey_algo_name.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_privkey_algo_name.errcheck = errcheck_for('botan_privkey_algo_name')
botan.botan_privkey_export_pubkey.argtypes = [c_void_p, c_void_p]
botan.botan_privkey_export_pubkey.errcheck = errcheck_for('botan_privkey_export_pubkey')
botan.botan_privkey_destroy.argtypes = [c_void_p]
botan.botan_privkey_destroy.errcheck = errcheck_for('botan_privkey_destroy')
botan.botan_privkey_export.argtypes = [c_void_p, POINTER(c_char), c_void_p]
botan.botan_privkey_export.errcheck = errcheck_for('botan_privkey_export')
# PK Encryption
botan.botan_pk_op_encrypt_create.argtypes = [c_void_p, c_void_p, c_char_p, c_uint32]
botan.botan_pk_op_encrypt_create.errcheck = errcheck_for('botan_pk_op_encrypt_create')
botan.botan_pk_op_encrypt_output_length.argtypes = [c_void_p, c_size_t, POINTER(c_size_t)]
botan.botan_pk_op_encrypt_output_length.errcheck = errcheck_for('botan_pk_op_encrypt_output_length')
botan.botan_pk_op_encrypt_destroy.argtypes = [c_void_p]
botan.botan_pk_op_encrypt_destroy.errcheck = errcheck_for('botan_pk_op_encrypt_destroy')
botan.botan_pk_op_encrypt.argtypes = [c_void_p, c_void_p,
POINTER(c_char), POINTER(c_size_t),
POINTER(c_char), c_size_t]
botan.botan_pk_op_encrypt.errcheck = errcheck_for('botan_pk_op_encrypt')
# PK Decryption
botan.botan_pk_op_decrypt_create.argtypes = [c_void_p, c_void_p, c_char_p, c_uint32]
botan.botan_pk_op_decrypt_create.errcheck = errcheck_for('botan_pk_op_decrypt_create')
botan.botan_pk_op_decrypt_output_length.argtypes = [c_void_p, c_size_t, POINTER(c_size_t)]
botan.botan_pk_op_decrypt_output_length.errcheck = errcheck_for('botan_pk_op_decrypt_output_length')
botan.botan_pk_op_decrypt_destroy.argtypes = [c_void_p]
botan.botan_pk_op_decrypt_destroy.errcheck = errcheck_for('botan_pk_op_decrypt_destroy')
botan.botan_pk_op_decrypt.argtypes = [c_void_p,
POINTER(c_char), POINTER(c_size_t),
POINTER(c_char), c_size_t]
botan.botan_pk_op_decrypt.errcheck = errcheck_for('botan_pk_op_encrypt')
# PK Signatures
botan.botan_pk_op_sign_create.argtypes = [c_void_p, c_void_p, c_char_p, c_uint32]
botan.botan_pk_op_sign_create.errcheck = errcheck_for('botan_pk_op_sign_create')
botan.botan_pk_op_sign_destroy.argtypes = [c_void_p]
botan.botan_pk_op_sign_destroy.errcheck = errcheck_for('botan_pk_op_sign_destroy')
botan.botan_pk_op_sign_update.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_pk_op_sign_update.errcheck = errcheck_for('botan_pk_op_sign_update')
botan.botan_pk_op_sign_finish.argtypes = [c_void_p, c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_pk_op_sign_finish.errcheck = errcheck_for('botan_pk_op_sign_finish')
# PK Verification
botan.botan_pk_op_verify_create.argtypes = [c_void_p, c_void_p, c_char_p, c_uint32]
botan.botan_pk_op_verify_create.errcheck = errcheck_for('botan_pk_op_verify_create')
botan.botan_pk_op_verify_destroy.argtypes = [c_void_p]
botan.botan_pk_op_verify_destroy.errcheck = errcheck_for('botan_pk_op_verify_destroy')
botan.botan_pk_op_verify_update.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_pk_op_verify_update.errcheck = errcheck_for('botan_pk_op_verify_update')
botan.botan_pk_op_verify_finish.argtypes = [c_void_p, POINTER(c_char), c_size_t]
botan.botan_pk_op_verify_finish.errcheck = errcheck_for('botan_pk_op_verify_finish')
# MCEIES
botan.botan_mceies_encrypt.argtypes = [c_void_p, c_void_p, c_char_p, POINTER(c_char), c_size_t,
POINTER(c_char), c_size_t, POINTER(c_char), POINTER(c_size_t)]
botan.botan_mceies_encrypt.errcheck = errcheck_for('botan_mceies_encrypt')
botan.botan_mceies_decrypt.argtypes = [c_void_p, c_char_p, POINTER(c_char), c_size_t,
POINTER(c_char), c_size_t, POINTER(c_char), POINTER(c_size_t)]
botan.botan_mceies_decrypt.errcheck = errcheck_for('botan_mceies_decrypt')
# Key Agreement
botan.botan_pk_op_key_agreement_export_public.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_pk_op_key_agreement_export_public.errcheck = errcheck_for('botan_pk_op_key_agreement_export_public')
botan.botan_pk_op_key_agreement_create.argtypes = [c_void_p, c_void_p, c_char_p, c_uint32]
botan.botan_pk_op_key_agreement_create.errcheck = errcheck_for('botan_pk_op_key_agreement_create')
botan.botan_pk_op_key_agreement_destroy.argtypes = [c_void_p]
botan.botan_pk_op_key_agreement_destroy.errcheck = errcheck_for('botan_pk_op_key_agreement_destroy')
botan.botan_pk_op_key_agreement.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t),
POINTER(c_char), c_size_t, POINTER(c_char), c_size_t]
botan.botan_pk_op_key_agreement.errcheck = errcheck_for('botan_pk_op_key_agreement')
# X509 certs
botan.botan_x509_cert_load_file.argtypes = [POINTER(c_void_p), c_char_p]
botan.botan_x509_cert_load_file.errcheck = errcheck_for('botan_x509_cert_load_file')
botan.botan_x509_cert_load.argtypes = [POINTER(c_void_p), POINTER(c_char), c_size_t]
botan.botan_x509_cert_load.errcheck = errcheck_for('botan_x509_cert_load')
botan.botan_x509_cert_destroy.argtypes = [c_void_p]
botan.botan_x509_cert_destroy.errcheck = errcheck_for('botan_x509_cert_destroy')
botan.botan_x509_cert_get_time_starts.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_time_starts.errcheck = errcheck_for('botan_x509_cert_get_time_starts')
botan.botan_x509_cert_get_time_expires.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_time_expires.errcheck = errcheck_for('botan_x509_cert_get_time_expires')
botan.botan_x509_cert_to_string.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_to_string.errcheck = errcheck_for('botan_x509_cert_to_string')
botan.botan_x509_cert_get_fingerprint.argtypes = [c_void_p, c_char_p,
POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_fingerprint.errcheck = errcheck_for('botan_x509_cert_get_fingerprint')
botan.botan_x509_cert_get_serial_number.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_serial_number.errcheck = errcheck_for('botan_x509_cert_get_serial_number')
botan.botan_x509_cert_get_authority_key_id.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_authority_key_id.errcheck = errcheck_for('botan_x509_cert_get_authority_key_id')
botan.botan_x509_cert_get_subject_key_id.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_subject_key_id.errcheck = errcheck_for('botan_x509_cert_get_subject_key_id')
botan.botan_x509_cert_get_public_key_bits.argtypes = [c_void_p, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_public_key_bits.errcheck = errcheck_for('botan_x509_cert_get_public_key_bits')
botan.botan_x509_cert_get_public_key.argtypes = [c_void_p, c_void_p]
botan.botan_x509_cert_get_public_key.errcheck = errcheck_for('botan_x509_cert_get_public_key')
botan.botan_x509_cert_get_subject_dn.argtypes = [c_void_p, c_char_p, c_size_t, POINTER(c_char), POINTER(c_size_t)]
botan.botan_x509_cert_get_subject_dn.errcheck = errcheck_for('botan_x509_cert_get_subject_dn')
#
# Internal utilities
#
def _call_fn_returning_vec(guess, fn):
buf = create_string_buffer(guess)
buf_len = c_size_t(len(buf))
rc = fn(buf, byref(buf_len))
if rc == -10 and buf_len.value > len(buf):
return _call_fn_returning_vec(buf_len.value, fn)
assert buf_len.value <= len(buf)
return buf.raw[0:int(buf_len.value)]
def _call_fn_returning_string(guess, fn):
# Assumes that anything called with this is returning plain ASCII strings
# (base64 data, algorithm names, etc)
v = _call_fn_returning_vec(guess, fn)
return v.decode('ascii')[:-1]
def _ctype_str(s):
assert isinstance(s, str)
if version_info[0] < 3:
return s
else:
return s.encode('utf-8')
def _ctype_to_str(s):
if version_info[0] < 3:
return s.encode('utf-8')
else:
return s.decode('utf-8')
def _ctype_bits(s):
if version_info[0] < 3:
if isinstance(s, str):
return s
else:
raise Exception("Internal error - unexpected type %s provided to _ctype_bits" % (type(s).__name__))
else:
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode('utf-8')
else:
raise Exception("Internal error - unexpected type %s provided to _ctype_bits" % (type(s).__name__))
def _ctype_bufout(buf):
if version_info[0] < 3:
return str(buf.raw)
else:
return buf.raw
def _hex_encode(buf):
return hexlify(buf).decode('ascii')
#
# Versions
#
def version_major():
return botan.botan_version_major()
def version_minor():
return botan.botan_version_minor()
def version_patch():
return botan.botan_version_patch()
def version_string():
return botan.botan_version_string().decode('ascii')
#
# RNG
#
class rng(object): # pylint: disable=invalid-name
# Can also use type "system"
def __init__(self, rng_type='system'):
self.rng = c_void_p(0)
botan.botan_rng_init(byref(self.rng), _ctype_str(rng_type))
def __del__(self):
botan.botan_rng_destroy(self.rng)
def reseed(self, bits=256):
botan.botan_rng_reseed(self.rng, bits)
def get(self, length):
out = create_string_buffer(length)
l = c_size_t(length)
botan.botan_rng_get(self.rng, out, l)
return _ctype_bufout(out)
#
# Hash function
#
class hash_function(object): # pylint: disable=invalid-name
def __init__(self, algo):
flags = c_uint32(0) # always zero in this API version
self.hash = c_void_p(0)
botan.botan_hash_init(byref(self.hash), _ctype_str(algo), flags)
def __del__(self):
botan.botan_hash_destroy(self.hash)
def algo_name(self):
return _call_fn_returning_string(32, lambda b, bl: botan.botan_hash_name(self.hash, b, bl))
def clear(self):
botan.botan_hash_clear(self.hash)
def output_length(self):
l = c_size_t(0)
botan.botan_hash_output_length(self.hash, byref(l))
return l.value
def update(self, x):
botan.botan_hash_update(self.hash, _ctype_bits(x), len(x))
def final(self):
out = create_string_buffer(self.output_length())
botan.botan_hash_final(self.hash, out)
return _ctype_bufout(out)
#
# Message authentication codes
#
class message_authentication_code(object): # pylint: disable=invalid-name
def __init__(self, algo):
flags = c_uint32(0) # always zero in this API version
self.mac = c_void_p(0)
botan.botan_mac_init(byref(self.mac), _ctype_str(algo), flags)
def __del__(self):
botan.botan_mac_destroy(self.mac)
def clear(self):
botan.botan_mac_clear(self.mac)
def algo_name(self):
return _call_fn_returning_string(32, lambda b, bl: botan.botan_mac_name(self.mac, b, bl))
def output_length(self):
l = c_size_t(0)
botan.botan_mac_output_length(self.mac, byref(l))
return l.value
def set_key(self, key):
botan.botan_mac_set_key(self.mac, key, len(key))
def update(self, x):
botan.botan_mac_update(self.mac, x, len(x))
def final(self):
out = create_string_buffer(self.output_length())
botan.botan_mac_final(self.mac, out)
return _ctype_bufout(out)
class cipher(object): # pylint: disable=invalid-name
def __init__(self, algo, encrypt=True):
flags = 0 if encrypt else 1
self.cipher = c_void_p(0)
botan.botan_cipher_init(byref(self.cipher), _ctype_str(algo), flags)
def __del__(self):
botan.botan_cipher_destroy(self.cipher)
def algo_name(self):
return _call_fn_returning_string(32, lambda b, bl: botan.botan_cipher_name(self.cipher, b, bl))
def default_nonce_length(self):
l = c_size_t(0)
botan.botan_cipher_get_default_nonce_length(self.cipher, byref(l))
return l.value
def update_granularity(self):
l = c_size_t(0)
botan.botan_cipher_get_update_granularity(self.cipher, byref(l))
return l.value
def key_length(self):
kmin = c_size_t(0)
kmax = c_size_t(0)
botan.botan_cipher_query_keylen(self.cipher, byref(kmin), byref(kmax))
return kmin.value, kmax.value
def tag_length(self):
l = c_size_t(0)
botan.botan_cipher_get_tag_length(self.cipher, byref(l))
return l.value
def is_authenticated(self):
return self.tag_length() > 0
def valid_nonce_length(self, nonce_len):
rc = botan.botan_cipher_valid_nonce_length(self.cipher, nonce_len)
return True if rc == 1 else False
def clear(self):
botan.botan_cipher_clear(self.cipher)
def set_key(self, key):
botan.botan_cipher_set_key(self.cipher, key, len(key))
def set_assoc_data(self, ad):
botan.botan_cipher_set_associated_data(self.cipher, ad, len(ad))
def start(self, nonce):
botan.botan_cipher_start(self.cipher, nonce, len(nonce))
def _update(self, txt, final):
inp = txt if txt else ''
inp_sz = c_size_t(len(inp))
inp_consumed = c_size_t(0)
out = create_string_buffer(inp_sz.value + (self.tag_length() if final else 0))
out_sz = c_size_t(len(out))
out_written = c_size_t(0)
flags = c_uint32(1 if final else 0)
botan.botan_cipher_update(self.cipher, flags,
out, out_sz, byref(out_written),
_ctype_bits(inp), inp_sz, byref(inp_consumed))
# buffering not supported yet
assert inp_consumed.value == inp_sz.value
return out.raw[0:int(out_written.value)]
def update(self, txt):
return self._update(txt, False)
def finish(self, txt=None):
return self._update(txt, True)
def bcrypt(passwd, rng_instance, work_factor=10):
"""
Bcrypt password hashing
"""
out_len = c_size_t(64)
out = create_string_buffer(out_len.value)
flags = c_uint32(0)
botan.botan_bcrypt_generate(out, byref(out_len), _ctype_str(passwd),
rng_instance.rng, c_size_t(work_factor), flags)
b = out.raw[0:int(out_len.value)-1]
if b[-1] == '\x00':
b = b[:-1]
return _ctype_to_str(b)
def check_bcrypt(passwd, passwd_hash):
rc = botan.botan_bcrypt_is_valid(_ctype_str(passwd), _ctype_str(passwd_hash))
return rc == 0
#
# PBKDF
#
def pbkdf(algo, password, out_len, iterations=10000, salt=None):
if salt is None:
salt = rng().get(12)
out_buf = create_string_buffer(out_len)
botan.botan_pbkdf(_ctype_str(algo), out_buf, out_len,
_ctype_str(password), salt, len(salt), iterations)
return (salt, iterations, out_buf.raw)
def pbkdf_timed(algo, password, out_len, ms_to_run=300, salt=None):
if salt is None:
salt = rng().get(12)
out_buf = create_string_buffer(out_len)
iterations = c_size_t(0)
botan.botan_pbkdf_timed(_ctype_str(algo), out_buf, out_len, _ctype_str(password),
salt, len(salt), ms_to_run, byref(iterations))
return (salt, iterations.value, out_buf.raw)
#
# Scrypt
#
def scrypt(out_len, password, salt, n=1024, r=8, p=8):
out_buf = create_string_buffer(out_len)
botan.botan_scrypt(out_buf, out_len, _ctype_str(password),
_ctype_bits(salt), len(salt), n, r, p)
return out_buf.raw
#
# KDF
#
def kdf(algo, secret, out_len, salt, label):
out_buf = create_string_buffer(out_len)
out_sz = c_size_t(out_len)
botan.botan_kdf(_ctype_str(algo), out_buf, out_sz,
secret, len(secret),
salt, len(salt),
label, len(label))
return out_buf.raw[0:int(out_sz.value)]
#
# Public and private keys
#
class public_key(object): # pylint: disable=invalid-name
def __init__(self, obj=c_void_p(0)):
self.pubkey = obj
def __del__(self):
botan.botan_pubkey_destroy(self.pubkey)
def estimated_strength(self):
r = c_size_t(0)
botan.botan_pubkey_estimated_strength(self.pubkey, byref(r))
return r.value
def algo_name(self):
return _call_fn_returning_string(32, lambda b, bl: botan.botan_pubkey_algo_name(self.pubkey, b, bl))
def encoding(self, pem=False):
flag = 1 if pem else 0
return _call_fn_returning_vec(4096, lambda b, bl: botan.botan_pubkey_export(self.pubkey, b, bl, flag))
def fingerprint(self, hash_algorithm='SHA-256'):
n = hash_function(hash_algorithm).output_length()
buf = create_string_buffer(n)
buf_len = c_size_t(n)
botan.botan_pubkey_fingerprint(self.pubkey, _ctype_str(hash_algorithm), buf, byref(buf_len))
return _hex_encode(buf[0:int(buf_len.value)])
class private_key(object): # pylint: disable=invalid-name
def __init__(self, algo, params, rng_instance):
self.privkey = c_void_p(0)
if algo == 'rsa':
algo = 'RSA'
params = "%d" % (params)
elif algo == 'ecdsa':
algo = 'ECDSA'
elif algo == 'ecdh':
if params == 'curve25519':
algo = 'Curve25519'
params = ''
else:
algo = 'ECDH'
elif algo in ['mce', 'mceliece']:
algo = 'McEliece'
params = "%d,%d" % (params[0], params[1])
botan.botan_privkey_create(byref(self.privkey),
_ctype_str(algo), _ctype_str(params), rng_instance.rng)
def __del__(self):
botan.botan_privkey_destroy(self.privkey)
def algo_name(self):
return _call_fn_returning_string(32, lambda b, bl: botan.botan_privkey_algo_name(self.privkey, b, bl))
def get_public_key(self):
pub = c_void_p(0)
botan.botan_privkey_export_pubkey(byref(pub), self.privkey)
return public_key(pub)
def export(self):
n = 4096
buf = create_string_buffer(n)
buf_len = c_size_t(n)
rc = botan.botan_privkey_export(self.privkey, buf, byref(buf_len))
if rc != 0:
buf = create_string_buffer(buf_len.value)
botan.botan_privkey_export(self.privkey, buf, byref(buf_len))
return buf[0:int(buf_len.value)]
class pk_op_encrypt(object): # pylint: disable=invalid-name
def __init__(self, key, padding):
self.op = c_void_p(0)
flags = c_uint32(0) # always zero in this ABI
botan.botan_pk_op_encrypt_create(byref(self.op), key.pubkey, _ctype_str(padding), flags)
def __del__(self):
botan.botan_pk_op_encrypt_destroy(self.op)
def encrypt(self, msg, rng_instance):
outbuf_sz = c_size_t(0)
botan.botan_pk_op_encrypt_output_length(self.op, len(msg), byref(outbuf_sz))
outbuf = create_string_buffer(outbuf_sz.value)
botan.botan_pk_op_encrypt(self.op, rng_instance.rng, outbuf, byref(outbuf_sz), msg, len(msg))
return outbuf.raw[0:int(outbuf_sz.value)]
class pk_op_decrypt(object): # pylint: disable=invalid-name
def __init__(self, key, padding):
self.op = c_void_p(0)
flags = c_uint32(0) # always zero in this ABI
botan.botan_pk_op_decrypt_create(byref(self.op), key.privkey, _ctype_str(padding), flags)
def __del__(self):
botan.botan_pk_op_decrypt_destroy(self.op)
def decrypt(self, msg):
outbuf_sz = c_size_t(0)
botan.botan_pk_op_decrypt_output_length(self.op, len(msg), byref(outbuf_sz))
outbuf = create_string_buffer(outbuf_sz.value)
botan.botan_pk_op_decrypt(self.op, outbuf, byref(outbuf_sz), _ctype_bits(msg), len(msg))
return outbuf.raw[0:int(outbuf_sz.value)]
class pk_op_sign(object): # pylint: disable=invalid-name
def __init__(self, key, padding):
self.op = c_void_p(0)
flags = c_uint32(0) # always zero in this ABI
botan.botan_pk_op_sign_create(byref(self.op), key.privkey, _ctype_str(padding), flags)
def __del__(self):
botan.botan_pk_op_sign_destroy(self.op)
def update(self, msg):
botan.botan_pk_op_sign_update(self.op, _ctype_str(msg), len(msg))
def finish(self, rng_instance):
outbuf_sz = c_size_t(0)
botan.botan_pk_op_sign_output_length(self.op, byref(outbuf_sz))
outbuf = create_string_buffer(outbuf_sz.value)
botan.botan_pk_op_sign_finish(self.op, rng_instance.rng, outbuf, byref(outbuf_sz))
return outbuf.raw[0:int(outbuf_sz.value)]
class pk_op_verify(object): # pylint: disable=invalid-name
def __init__(self, key, padding):
self.op = c_void_p(0)
flags = c_uint32(0) # always zero in this ABI
botan.botan_pk_op_verify_create(byref(self.op), key.pubkey, _ctype_str(padding), flags)
def __del__(self):
botan.botan_pk_op_verify_destroy(self.op)
def update(self, msg):
botan.botan_pk_op_verify_update(self.op, _ctype_bits(msg), len(msg))
def check_signature(self, signature):
rc = botan.botan_pk_op_verify_finish(self.op, _ctype_bits(signature), len(signature))
if rc == 0:
return True
return False
#
# MCEIES encryption
# Must be used with McEliece keys
#
def mceies_encrypt(mce, rng_instance, aead, pt, ad):
return _call_fn_returning_vec(len(pt) + 1024, lambda b, bl:
botan.botan_mceies_encrypt(mce.pubkey,
rng_instance.rng,
_ctype_str(aead),
_ctype_bits(pt),
len(pt),
_ctype_bits(ad),
len(ad),
b, bl))
def mceies_decrypt(mce, aead, ct, ad):
#msg = cast(msg, c_char_p)
#ll = c_size_t(ll)
return _call_fn_returning_vec(len(ct), lambda b, bl:
botan.botan_mceies_decrypt(mce.privkey,
_ctype_str(aead),
_ctype_bits(ct),
len(ct),
_ctype_bits(ad),
len(ad),
b, bl))
class pk_op_key_agreement(object): # pylint: disable=invalid-name
def __init__(self, key, kdf_name):
self.op = c_void_p(0)
flags = c_uint32(0) # always zero in this ABI
botan.botan_pk_op_key_agreement_create(byref(self.op), key.privkey, kdf_name, flags)
self.m_public_value = _call_fn_returning_vec(
0, lambda b, bl: botan.botan_pk_op_key_agreement_export_public(key.privkey, b, bl))
def __del__(self):
botan.botan_pk_op_key_agreement_destroy(self.op)
def public_value(self):
return self.m_public_value
def agree(self, other, key_len, salt):
return _call_fn_returning_vec(key_len, lambda b, bl:
botan.botan_pk_op_key_agreement(self.op, b, bl,
other, len(other),
salt, len(salt)))
#
# X.509 certificates
#
class x509_cert(object): # pylint: disable=invalid-name
def __init__(self, filename=None, buf=None):
if filename is None and buf is None:
raise BotanException("No filename or buf given")
if filename is not None and buf is not None:
raise BotanException("Both filename and buf given")
elif filename is not None:
self.x509_cert = c_void_p(0)
botan.botan_x509_cert_load_file(byref(self.x509_cert), _ctype_str(filename))
elif buf is not None:
self.x509_cert = c_void_p(0)
botan.botan_x509_cert_load(byref(self.x509_cert), _ctype_bits(buf), len(buf))
def __del__(self):
botan.botan_x509_cert_destroy(self.x509_cert)
def time_starts(self):
starts = _call_fn_returning_string(
16, lambda b, bl: botan.botan_x509_cert_get_time_starts(self.x509_cert, b, bl))
if len(starts) == 13:
# UTC time
struct_time = strptime(starts, "%y%m%d%H%M%SZ")
elif len(starts) == 15:
# Generalized time
struct_time = strptime(starts, "%Y%m%d%H%M%SZ")
else:
raise BotanException("Unexpected date/time format for x509 start time")
return datetime.fromtimestamp(mktime(struct_time))
def time_expires(self):
expires = _call_fn_returning_string(
16, lambda b, bl: botan.botan_x509_cert_get_time_expires(self.x509_cert, b, bl))
if len(expires) == 13:
# UTC time
struct_time = strptime(expires, "%y%m%d%H%M%SZ")
elif len(expires) == 15:
# Generalized time
struct_time = strptime(expires, "%Y%m%d%H%M%SZ")
else:
raise BotanException("Unexpected date/time format for x509 expire time")
return datetime.fromtimestamp(mktime(struct_time))
def to_string(self):
return _call_fn_returning_string(
4096, lambda b, bl: botan.botan_x509_cert_to_string(self.x509_cert, b, bl))
def fingerprint(self, hash_algo='SHA-256'):
n = hash_function(hash_algo).output_length() * 3
return _call_fn_returning_string(
n, lambda b, bl: botan.botan_x509_cert_get_fingerprint(self.x509_cert, _ctype_str(hash_algo), b, bl))
def serial_number(self):
return _call_fn_returning_vec(
32, lambda b, bl: botan.botan_x509_cert_get_serial_number(self.x509_cert, b, bl))
def authority_key_id(self):
return _call_fn_returning_vec(
32, lambda b, bl: botan.botan_x509_cert_get_authority_key_id(self.x509_cert, b, bl))
def subject_key_id(self):
return _call_fn_returning_vec(
32, lambda b, bl: botan.botan_x509_cert_get_subject_key_id(self.x509_cert, b, bl))
def subject_public_key_bits(self):
return _call_fn_returning_vec(
512, lambda b, bl: botan.botan_x509_cert_get_public_key_bits(self.x509_cert, b, bl))
def subject_public_key(self):
pub = c_void_p(0)
botan.botan_x509_cert_get_public_key(self.x509_cert, byref(pub))
return public_key(pub)
def subject_dn(self, key, index):
return _call_fn_returning_string(
0, lambda b, bl: botan.botan_x509_cert_get_subject_dn(self.x509_cert, _ctype_str(key), index, b, bl))
|
990,483 | cbaa9e351d775d53c603c83ecbc4874f43ecea51 | from setuptools import find_packages, setup
with open("README.md") as fd:
long_description = fd.read()
setup(
name="tamizdat",
version="0.2.1",
description="flibusta.net indexing and email delivery service",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ioreshnikov/tamizdat",
author="Ivan Oreshnikov",
author_email="oreshnikov.ivan@gmail.com",
python_requirements=[],
install_requires=[
"jinja2",
"lxml",
"python-telegram-bot>=12.0",
"py3-validate-email",
"peewee",
"requests",
"transliterate",
"faker"
],
extra_requires=[],
packages=find_packages(exclude=("tests",)),
package_data={
"tamizdat": ["templates/*.md"]
},
scripts=["bin/tamizdat"],
include_package_data=True,
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3"
])
|
990,484 | c9e556f4b99dcf9eb81a4b68a75dd858fff61d04 | # Author: Raphael Fonseca
# Social Computing and Social Network Analysis Laboratory, Federal University of Rio de Janeiro, Brazil
# Create Date: 2015-07
# Last Update: 2017-07-27
# -*- coding: utf-8 -*-
import oauth2 as oauth
import json
import time
import pymongo
# API Authentication - Fill in credentials
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_KEY = ""
ACCESS_SECRET = ""
# Prepare Authentication
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
access_token = oauth.Token(key=ACCESS_KEY, secret=ACCESS_SECRET)
client = oauth.Client(consumer, access_token)
# Connect to Database
clientMongo = pymongo.MongoClient("localhost", 27017)
db = clientMongo.chosenDatabase
# Choose if you will use since_id. If you do choose to use it, include it in the URL.
since_id = ''
# Geolocation of Central Point
# Format: Latitude, Longitude, Radius from position
# Example: -22.903426,-43.191478,70km
geo=''
# Starting Date.
# Format: YYYY-MM-DD
since=''
# Until the day prior to...
# Format: YYYY-MM-DD.
until=''
# List of Terms, separate by commas
query = ['']
# Language
# Format: &lang=languageCode
# Example for Portuguese: &lang=pt
language=""
for q in query:
max_id = '0'
tweetsCounter = 0
continueFlag = 1
while(continueFlag == 1):
try:
if(max_id == '0'):
URL = "https://api.twitter.com/1.1/search/tweets.json?geocode="+geo+"&since="+since+"&until="+until+"&count=100"+language
else:
URL = "https://api.twitter.com/1.1/search/tweets.json?geocode="+geo+"&since="+since+"&until="+until+"&count=100"+language+"&max_id="+str(max_id)
max_id_prior = max_id
response, data = client.request(URL, "GET")
localCollection = json.loads(data)
for tweet in localCollection['statuses']:
db.localCollection.update({'id': tweet['id']},tweet, upsert=True)
tweetsCounter = tweetsCounter + 1
tweet['text']==dict
tx = tweet['text']
print("Search Term: "+ q)
print("\n")
print(str("Tweet: " + tx.encode('utf-8')))
print("\n")
print("Number of tweets with current term: ")
print(tweetsCounter)
print("\n")
print("Tweet ID: ")
print(max_id)
print('======================================================')
max_id = tweet['id'] - 1
time.sleep(2)
if(max_id == max_id_prior):
continueFlag = 0
except Exception, e:
print(e)
print('slept')
time.sleep(15*60)
pass
|
990,485 | 7abbdaa1911bb74d62e6b688ef6ef05bd6cf482c | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 14:53:48 2019
@author: Administrator
"""
s=input("enter a string:")
list=[s[i] for i in range(len(s))]
print(list) |
990,486 | 141c4fd0e48091ef81b8bc1b842a906418fc2ea0 | import time
import argparse as arg
import datetime
import os
import torch
import torch.nn as nn
import torch.nn.utils as utils
import torch.optim as optim
import torchvision.utils as vision_utils
from tensorboardX import SummaryWriter
from problem4.losses import ssim as ssim_criterion
from problem4.losses import depth_loss as gradient_criterion
from problem4.data import get_data_loaders
from problem4.utils import AverageMeter, DepthNorm, colorize, init_or_load_model
def train(epochs,
trainloader,
testloader,
lr=0.0001,
save="checkpoints/",
theta=0.1,
device="cuda",
pretrained=True,
checkpoint=None):
num_trainloader = len(trainloader)
num_testloader = len(testloader)
# Training utils
model_prefix = "monocular_"
device = torch.device("cuda:0" if device == "cuda" else "cpu")
theta = theta
save_count = 0
epoch_loss = []
batch_loss = []
sum_loss = 0
if checkpoint:
print("Loading from checkpoint ...")
model, optimizer, start_epoch = init_or_load_model(pretrained=pretrained,
epochs=epochs,
lr=lr,
ckpt=checkpoint,
device=device
)
print("Resuming from: epoch #{}".format(start_epoch))
else:
print("Initializing fresh model ...")
model, optimizer, start_epoch = init_or_load_model(pretrained=pretrained,
epochs=epochs,
lr=lr,
ckpt=None,
device=device
)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
if pretrained:
log_dir = 'runs/pretrained'
else:
log_dir = 'runs/not_pretrained'
# Logging
writer = SummaryWriter(log_dir,comment="{}-training".format(model_prefix))
# Loss functions
l1_criterion = None #TODO initialize the L1 Loss to be used for optimizing the output depth
# Starting training
print("Starting training ... ")
for epoch in range(start_epoch, epochs):
model.train()
model = model.to(device)
batch_time = AverageMeter()
loss_meter = AverageMeter()
epoch_start = time.time()
end = time.time()
for idx, batch in enumerate(trainloader):
optimizer.zero_grad()
image_x = torch.Tensor(batch["image"]).to(device)
depth_y = torch.Tensor(batch["depth"]).to(device=device)
normalized_depth_y = DepthNorm(depth_y)
preds = None # TODO call your model on the image input to get its predictions
# calculating the losses
l1_loss = None # TODO call the l1_criterion with the predictions and normalized depth
ssim_loss = torch.clamp(
(1-ssim_criterion(preds, normalized_depth_y, 1000.0/10.0))*0.5,
min=0,
max=1
)
gradient_loss = gradient_criterion(normalized_depth_y, preds, device=device)
net_loss = (1.0 * ssim_loss) + (1.0 * torch.mean(gradient_loss)) + (theta * torch.mean(l1_loss))
loss_meter.update(net_loss.data.item(), image_x.size(0))
net_loss.backward()
optimizer.step()
# Time metrics
batch_time.update(time.time() - end)
end = time.time()
eta = str(datetime.timedelta(seconds=int(batch_time.val*(num_trainloader-idx))))
# Logging
num_iters = epoch * num_trainloader + idx
if idx % 5 == 0 :
print(
"Epoch: #{0} Batch: {1}/{2}\t"
"Time (current/total) {batch_time.val:.3f}/{batch_time.sum:.3f}\t"
"eta {eta}\t"
"LOSS (current/average) {loss.val:.4f}/{loss.avg:.4f}\t"
.format(epoch, idx, num_trainloader, batch_time=batch_time, eta=eta, loss=loss_meter)
)
writer.add_scalar("Train/Loss", loss_meter.val, num_iters)
if idx%1000 == 0:
if pretrained:
ckpt_path = save+"ckpt_{}_pretrained.pth".format(epoch)
else:
ckpt_path = save+"ckpt_{}_not_pretrained.pth".format(epoch)
torch.save({
"epoch": epoch,
"model_state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
"loss": loss_meter.avg
}, ckpt_path)
LogProgress(model, writer, testloader, num_iters, device)
del image_x
del depth_y
del preds
print(
"----------------------------------\n"
"Epoch: #{0}, Avg. Net Loss: {avg_loss:.4f}\n"
"----------------------------------"
.format(
epoch, avg_loss=loss_meter.avg
)
)
def LogProgress(model, writer, test_loader, epoch, device):
""" To record intermediate results of training"""
model.eval()
sequential = test_loader
sample_batched = next(iter(sequential))
image = torch.Tensor(sample_batched["image"]).to(device)
depth = torch.Tensor(sample_batched["depth"]).to(device)
if epoch == 0:
writer.add_image("Train.1.Image", vision_utils.make_grid(image.data, nrow=6, normalize=True), epoch)
if epoch == 0:
writer.add_image("Train.2.Image", colorize(vision_utils.make_grid(depth.data, nrow=6, normalize=False)), epoch)
output = DepthNorm(model(image))
writer.add_image("Train.3.Ours", colorize(vision_utils.make_grid(output.data, nrow=6, normalize=False)), epoch)
writer.add_image("Train.4.Diff", colorize(vision_utils.make_grid(torch.abs(output-depth).data, nrow=6, normalize=False)), epoch)
del image
del depth
del output
|
990,487 | 3b88f1956e8e6bb7f314389856d67a7b424bf424 | ii = [('KembFFF.py', 3), ('RennJIT.py', 1), ('AubePRP2.py', 1), ('LeakWTI2.py', 1), ('KembFJ1.py', 1), ('WilkJMC3.py', 9), ('LeakWTI3.py', 1), ('PettTHE.py', 2), ('TennAP.py', 1), ('PeckJNG.py', 1), ('KnowJMM.py', 1), ('WilkJMC2.py', 4), ('CarlTFR.py', 11), ('LyttELD.py', 1), ('KiddJAE.py', 2), ('CoolWHM.py', 1), ('ClarGE.py', 3), ('IrviWVD.py', 1), ('DaltJMA.py', 1), ('DibdTRL2.py', 4), ('WadeJEB.py', 6), ('TalfTIT.py', 2), ('GodwWLN.py', 3), ('KirbWPW2.py', 2), ('MartHRW.py', 1), ('WestJIT.py', 1), ('CoolWHM3.py', 2), ('EdgeMHT.py', 1), ('LyttELD3.py', 1), ('FerrSDO.py', 1), ('ThomGLG.py', 2), ('StorJCC.py', 1), ('KembFJ2.py', 1), ('MackCNH2.py', 1), ('AinsWRR2.py', 1), ('MereHHB2.py', 1), ('ClarGE3.py', 3), ('RogeSIP.py', 1), ('EvarJSP.py', 5), ('BeckWRE.py', 2), ('TaylIF.py', 3), ('DibdTBR.py', 1), ('KeigTSS.py', 2), ('ClarGE4.py', 2), ('AdamJOA.py', 1)] |
990,488 | e50f0232df4f40575e8a982195e26e99f96eb3fc | from __future__ import print_function
import numpy as np
import utils as u
import models as m
# constants
data_dir = 'data/'
train_filename = 'loan_train.csv'
test_filename = 'loan_testx.csv'
def get_ranges():
train_range = u.get_file_ranges(data_dir + train_filename)
test_range = u.get_file_ranges(data_dir + test_filename)
p1 = len(train_range)
p2 = len(test_range)
for i in range(p2):
print('train', train_range[i+1])
print('test', test_range[i])
print()
print(p1, p2)
return train_range, test_range
def get_non_float_ranges():
train_range, test_range = get_ranges()
p1 = len(train_range)
p2 = len(test_range)
lines = []
for i in range(p2):
if test_range[i][1] != 'float':
lines.append((test_range[i][0], test_range[i][2].union(train_range[i+1][2])))
u.write_lines(lines, data_dir + 'non_float_ranges.txt')
# return X_train (n, p'), Y_train (n, 1), X_test (m, p')
# X_train and X_test are processed in the same way
def get_data(method = 3):
train_lines = u.get_lines(data_dir + train_filename)
names = u.split_line(train_lines[0])
train_all = u.process_lines_by_name(train_lines[1:], names, method = method)
test_lines = u.get_lines(data_dir + test_filename)
test_all = u.process_lines_by_name(test_lines[1:], names[1:], method = method)
train_XY = np.array(train_all)
X_train = train_XY[:, 1:]
Y_train = train_XY[:, :1]
X_test = np.array(test_all)
print('X_train shape', X_train.shape)
print('Y_train shape', Y_train.shape)
print('X_test shape', X_test.shape)
return X_train, Y_train, X_test
def main():
get_data()
#print('do we use python?')
if __name__ == '__main__':
main()
|
990,489 | 65ebec88ef5ad0bfd1da22aa68c0e675122ca916 | from binaryninja import *
class RunInBackground(BackgroundTaskThread):
def __init__(self, msg, func, *args, **kwargs):
BackgroundTaskThread.__init__(self, msg, True)
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(self, *self.args, **self.kwargs)
# LowLevelILFunction isn't provided with a source_function during LLIL generation, but we need it to access the BinaryView.
# https://github.com/Vector35/binaryninja-api/issues/551
def get_llil_view(llil):
func = core.BNGetLowLevelILOwnerFunction(llil.handle)
if func is None:
return None
return BinaryView(handle = core.BNGetFunctionData(func))
def mlil_ssa_trace_var(mlil, var):
for _ in range(100):
if var.operation == MediumLevelILOperation.MLIL_VAR_SSA:
index = mlil.get_ssa_var_definition(var.src)
if index is None:
return var
var = mlil[index]
elif var.operation == MediumLevelILOperation.MLIL_SET_VAR_SSA:
var = var.src
else:
return var
return None
def mlil_ssa_get_phi_defs(mlil, phis):
return [ mlil[mlil.get_ssa_var_definition(phi)] for phi in phis ]
def mlil_ssa_trace_var_phis(mlil, var):
# TODO: Can this be simplified?
pending = set()
ignored = set()
results = set()
pending.add(var)
while pending:
var = mlil_ssa_trace_var(mlil, pending.pop())
if var.operation == MediumLevelILOperation.MLIL_VAR_PHI:
if var.dest in ignored:
continue
ignored.add(var.dest)
pending |= set(mlil_ssa_get_phi_defs(mlil, var.src))
else:
results.add(var)
# MediumLevelILInstruction has no __eq__
results = list(results)
if len(set(insn.expr_index for insn in results)) > 1:
return None
return results[0]
def mlil_ssa_solve_branch_dependence(mlil, lhs, rhs):
lhs_branches = lhs.branch_dependence
rhs_branches = rhs.branch_dependence
for index, lhs_dependence in lhs_branches.items():
if index not in rhs_branches:
continue
rhs_dependence = rhs_branches[index]
if lhs_dependence == rhs_dependence:
continue
branch = mlil[index]
if branch.operation != MediumLevelILOperation.MLIL_IF:
continue
if lhs_dependence == ILBranchDependence.FalseBranchDependent:
lhs, rhs = rhs, lhs
return branch, lhs, rhs
return None
def get_raw_values(values):
if values.type == RegisterValueType.ConstantValue:
return [values.value]
if values.type == RegisterValueType.ConstantPointerValue:
return [values.value]
if values.type == RegisterValueType.LookupTableValue:
return [ v.to_value for v in values.table ]
if values.type == RegisterValueType.InSetOfValues:
return values.values
def get_xref_llil(xref):
return xref.function.get_low_level_il_at(xref.address)
def get_stack_offset(arch, insn):
value = insn.get_reg_value(arch.stack_pointer)
if value.type != RegisterValueType.StackFrameOffset:
return None
return value.offset
def are_values_executable(view, values):
if values.type == RegisterValueType.ImportedAddressValue:
return True
raw_values = get_raw_values(values)
return raw_values is not None and all(view.is_offset_executable(v) for v in raw_values)
|
990,490 | 0de6d19df61185d86066d94086a1a993a96ab887 | import sys
import pytest
import numpy as np
from typing import List, Union, Literal, Set
from config import CONFIG
sys.path.insert(0, str(CONFIG.src))
from card_simulator import Card_Deck
from Hands_generator import Hands_Generator
@pytest.fixture
def hands_generator():
hands_generator = Hands_Generator()
return hands_generator
@pytest.fixture
def card_deck():
card_deck = Card_Deck()
return card_deck
@pytest.fixture
def random_card_list(card_deck):
random_ind = np.random.randint(0, 52, 1)[0]
random_card = card_deck.cards[random_ind]
random_card_list = [random_card]
return random_card_list
def test_get_straight(hands_generator, random_card_list):
for _ in range(1000):
card_deck = Card_Deck()
card_deck.shuffle()
straight = hands_generator.get_straight(random_card_list, card_deck)
assert len(straight) == 7, f"Must only generate 7 cards. Got {straight} of length {len(straight)}."
count = 1
sorted_straight = sorted(straight, key=lambda x: x.value)
current_card = sorted_straight[0]
for idx, card in enumerate(sorted_straight):
if idx > 0 and card.value - 1 == current_card.value:
current_card = card
count += 1
assert count >= 5, f"Cards: {sorted_straight}. Length of cards: {len(sorted_straight)}"
def test_get_same_value(hands_generator, random_card_list, card_deck):
for _ in range(1000):
card_deck = Card_Deck()
card_deck.shuffle()
one_pair = hands_generator.get_same_value(random_card_list, card_deck, type_of_pair="one")
one_pair_unique = set([x.value for x in one_pair])
one_pair_suit = set([x.suit for x in one_pair])
assert len(one_pair) == 2, "One pair must contain two cards!"
assert len(one_pair_unique) == 1, "One pair must contain only 1 unique value!"
assert len(one_pair_suit) == 2, "One pair must contain 2 differnt suits!"
two_pair = hands_generator.get_same_value(random_card_list, card_deck, type_of_pair="two")
two_pair_unique = set([x.value for x in two_pair])
two_pair_suits = set([x.suit for x in two_pair])
assert len(two_pair) == 4, "Two pairs must contain 4 cards!"
assert len(two_pair_unique) == 2, "Two pair must contain 2 unique values!"
assert len(two_pair_suits) >= 2, "Two pair must contain at least 2 different suits!"
three_of_a_kind = hands_generator.get_same_value(random_card_list, card_deck, "three")
three_of_a_kind_unique = set([x.value for x in three_of_a_kind])
three_of_a_kind_suits = set([x.suit for x in three_of_a_kind])
assert len(three_of_a_kind) == 3, "Three of a kind must contain 3 cards!"
print(three_of_a_kind)
assert len(three_of_a_kind_unique) == 1, "Three of a kind must contain only 1 unique value!"
assert len(three_of_a_kind_suits) == 3, "Three of a kind must contain 3 suits!"
four_of_a_kind = hands_generator.get_same_value(random_card_list, card_deck, "four")
four_of_a_kind_unique = set([x.value for x in four_of_a_kind])
four_of_a_kind_suit = set([x.suit for x in four_of_a_kind])
assert len(four_of_a_kind) == 4, "Four of a kind must contain 4 cards!"
assert len(four_of_a_kind_unique) == 1, "Four of a kind must contain only 1 unique value"
assert len(four_of_a_kind_suit) == 4, "Four of a kind must contain 4 suits!"
if __name__ == "__main__":
pass
|
990,491 | f638b6db723284109520689ca4fbcb91e697df30 | # coding: utf-8
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://www.vauxoo.com>).
# All Rights Reserved
###############################################################################
# Credits:
# Coded by: Katherine Zaoral <kathy@vauxoo.com>
# Planified by: Humberto Arocha <hbto@vauxoo.com>
# Audited by: Humberto Arocha <hbto@vauxoo.com>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class ChangeInvoiceSinCredwizard(osv.TransientModel):
"""
Wizard that changes the invoice sin_cred field.
"""
_name = 'change.invoice.sin.cred'
_description = 'Change Invoice Tax Exempt'
_columns = {
'sin_cred': fields.boolean(
'Tax Exempt',
default=lambda s: s._context.get('invoice_sin_cred'),
help='Tax Exempt'),
'sure': fields.boolean('Are you sure?'),
}
def set_sin_cred(self, cr, uid, ids, context=None):
"""
Change the sin cred field in the invoice
@return
"""
context = context or {}
ids = isinstance(ids, (int, long)) and [ids] or ids
inv_obj = self.pool.get('account.invoice')
inv_ids = context.get('active_ids', [])
data = self.browse(cr, uid, ids[0], context=context)
if not data.sure:
raise osv.except_osv(
_("Error!"),
_("Please confirm that you want to do this by checking the"
" option"))
if inv_ids:
inv_obj.write(cr, uid, inv_ids, {
'sin_cred': data.sin_cred}, context=context)
return {}
|
990,492 | dbca30303cafe5c4bda912b03e7c3069dc593eaa | import time
import os
import process as pro1
print("Welcome to TURUCALLER".center(50,'*'))
time.sleep(1)
print('\nPress 1 to login.\nPress 2 to SignUp')
ch1=int(input('Enter your choice: '))
if ch1==1:
import login
name=login.login()
time.sleep(5)
os.system('cls')
pro1.process.pro(name)
elif ch1==2:
import create as cr
name=cr.create.signup()
pro1.process.pro(name)
else:
print('It looks you entered a wrong choice')
time.sleep(2)
os.system('cls')
import execute as e
e
|
990,493 | 7afeac90e0c1496f915902c51676bc215d2a4eff | # FILE-INDEXER/VALUE-GENERATOR, Written by Benjamin Jack Cullen
import os
import sys
import csv
import time
import codecs
import distutils.dir_util
import fileinput
import datetime
# Files & Paths
mainDir = 'Indexes'
encode = u'\u5E73\u621015\u200e,'
config = 'config.conf'
rawPath = (mainDir+'/Raw-Indexes')
csvPath = (mainDir+'/CSV-Indexes')
distutils.dir_util.mkpath(rawPath)
distutils.dir_util.mkpath(csvPath)
rawUserVideo = (mainDir+'/Raw-Indexes/raw-user-video-index.py')
csvUserVideo = (mainDir+'/CSV-Indexes/csv-user-video-index.py')
# Data
vidext = [".webm", ".mkv", ".flv", ".vob", ".ogb", ".ogg", ".gif", ".gifv", ".mng", ".avi",
".mov", ".wmv", ".yuv", ".rm", ".rmvb", ".asf", ".mp4", ".m4p", ".m4v", ".mpg", ".mp2",
".mpeg", ".mpe", ".mpv", ".m2v", ".svi", ".3gp", ".3g2", ".mxf", ".roq", ".nsv", ".f4v", ".f4p",
".f4a", ".f4b"]
target_vid = ''
target_root_vid = ''
live_path = []
indexed_path = []
write_request = False
def write_index():
global target_vid
global target_root_vid
global rawUserVideo
global csvUserVideo
global vidext
global fullpath
global write_request
print('index user video: writing ...')
refreshIndex = open(rawUserVideo, "w").close()
for dirName, subdirList, fileList in os.walk(target_vid):
for fname in fileList:
if fname.endswith(tuple(vidext)):
fullpath = os.path.join(target_root_vid, dirName, fname)
# print('writing path:',fullpath)
to_file_path.append(fullpath)
i = 0
for to_file_paths in to_file_path:
txtFile = codecs.open(rawUserVideo, "a", encoding="utf-8")
# print('writing path:', to_file_path[i])
txtFile.writelines(to_file_path[i] + "\n")
txtFile.close()
i += 1
time.sleep(2)
open(csvUserVideo, "w").close
ifile = codecs.open(rawUserVideo, "r", encoding="utf-8")
reader = csv.reader(ifile)
ofile = codecs.open(csvUserVideo, "w", encoding="utf-8")
writer = csv.writer(ofile, delimiter=' ', quotechar='"', quoting=csv.QUOTE_ALL)
for row in reader:
writer.writerow(row)
ifile.close()
ofile.close()
print('index user video: wrote ...')
time.sleep(2)
write_request = False
def get_live_paths():
global target_vid
global target_root_vid
global live_path
global vidext
for dirName, subdirList, fileList in os.walk(target_vid):
for fname in fileList:
if fname.endswith(tuple(vidext)):
fullpath = os.path.join(target_root_vid, dirName, fname)
live_path.append(fullpath)
# print(fullpath)
def get_indexed_paths():
global indexed_path
global csvUserVideo
global csvUserVideo
with codecs.open(rawUserVideo, 'r', encoding='utf-8') as fo:
for line in fo:
line = line.strip()
line = line.replace('"','')
if line not in indexed_path:
# print('indexed path:', line)
indexed_path.append(line)
def compare_index_to_live_path():
global live_path
global indexed_path
global write_request
# print('comparing indexed paths to live paths')
i = 0
for indexed_paths in indexed_path:
if indexed_path[i] not in live_path:
# print('not in fs:', indexed_path[i])
write_request = True
i += 1
def compare_live_path_to_index():
global live_path
global indexed_path
global write_request
# print('comparing live paths to indexed paths')
i = 0
for live_paths in live_path:
if live_path[i] not in indexed_path:
# print('not indexed:',live_path[i])
write_request = True
i += 1
def get_config():
global target_vid
global target_root_vid
with open(config, 'r') as fo:
for line in fo:
line = line.strip()
if line.startswith('DIRVID:'):
target_vid = line.replace('DIRVID:', '')
target_vid = target_vid.strip()
target_root_vid = str(target_vid.split('\\')[0]+'\\')
# print(target_root_vid, target_vid)
while 1 == 1:
if not os.path.exists(rawUserVideo):
open(rawUserVideo, 'w').close()
if not os.path.exists(csvUserVideo):
open(csvUserVideo, 'w').close()
get_config()
get_live_paths()
get_indexed_paths()
compare_index_to_live_path()
compare_live_path_to_index()
to_file_path = []
indexed_path = []
live_path = []
if write_request == True:
# print('re-write request: True')
write_index()
## elif write_request == False:
## print('re-write request: False')
time.sleep(1)
|
990,494 | 565e01d561903b43a58119b178a317af7b76e69e | import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from IPython.html.widgets import interact
from sklearn.linear_model import LogisticRegression
from visualization_helper import plot_proba_function
from sklearn.datasets.samples_generator import make_blobs
def solve_kmeans_exercise(X, n_clusters=10):
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(X)
for i in range(n_clusters if n_clusters <= 12 else 12):
plt.subplot(4, 3, i+1)
plt.imshow(kmeans.cluster_centers_[i].reshape((8, 8)))
plt.axis('off')
def solve_logistic_regression_exercise():
interact(plot_logistic_regression, N=[5, 200], C=[1, 1e3])
def solve_image_compression_exercise(image, n_clusters):
X = (image / 255.0).reshape(-1, 3)
clu = KMeans(n_clusters=n_clusters)
clu.fit(X)
colors = clu.cluster_centers_
new_flower = colors[clu.predict(X)].reshape(image.shape)
plt.imshow(new_flower)
plt.grid(False)
def plot_logistic_regression(N=5, C=1):
"""Plot Logistic Regression and its decision function.
Parameters:
N - Number of datapoints used to train the SVM.
C - the regularization term.
"""
X, y = make_blobs(n_samples=200, centers=2, random_state=0,
cluster_std=0.60)
X_train, y_train = X[:N], y[:N]
X_test, y_test = X[N:], y[N:]
clf = LogisticRegression(C=C)
clf.fit(X_train, y_train)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=50, cmap='spring')
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=50, cmap='spring',
alpha=0.2)
plt.xlim(-1, 4)
plt.ylim(-1, 6)
plot_proba_function(clf, plt.gca())
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test) if len(X_test) > 0 else 'NA'
plt.title('Train Accuracy = {0}; Test Accuracy = {1}; coef = {2}'.format(
train_score, test_score, clf.coef_))
|
990,495 | 69203a26b2e2b13e29423e2ff4fff862e0bcaa7e | from django.contrib.gis import admin
from geonames.models import Geoname, Alternate
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin import SimpleListFilter
class CityListFilter(SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('is topographically')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'topo'
def lookups(self, request, model_admin):
return (
('city', _('city or town')),
('country', _('country')),
('continent', _('continent')),
)
def queryset(self, request, queryset):
selected = self.value()
if selected == 'city':
queryset = queryset.filter(fcode='PPL')
elif selected == 'country':
queryset = queryset.filter(fcode='PCLI')
elif selected == 'continent':
queryset = queryset.filter(fcode='CONT')
return queryset
class AlternateInline(admin.TabularInline):
model = Alternate
class GeonameAdmin(admin.GeoModelAdmin):
search_fields = ('name',)
list_display = ('name', 'country', 'timezone')
list_filter = (CityListFilter, 'country', 'timezone')
inlines = (AlternateInline,)
admin.site.register(Geoname, GeonameAdmin)
|
990,496 | 6e554baa852aefa141b40de389acfcfd80795562 | from django.shortcuts import render
from django.views.generic import CreateView,ListView,DetailView,UpdateView,DeleteView
from blogs.models import Posts
from django.contrib.auth.mixins import LoginRequiredMixin,UserPassesTestMixin
# Create your views here.
def index(request):
return render (request,"blogs/index.html")
class PostCreateView(CreateView):
model = Posts
fields=["Title","Content","Date_Of_Upload","Image"]
def form_valid(self,form):
form.instance.author=self.request.user
return super().form_valid(form)
class PostListView(ListView):
model=Posts
class PostDetailView(DetailView):
model=Posts
class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model=Posts
fields=["Title","Content","Image"]
template_name="blogs/update.html"
def form_valid(self,form):
form.instance.author=self.request.user
return super().form_valid(form)
def test_func(self):
post=self.get_object()
return self.request.user==post.author
class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
model=Posts
success_url="/"
def test_func(self):
post=self.get_object()
return self.request.user==post.author
def home_screen_view(request):
context = {}
query = ""
query = request.GET.get('q', '')
context['query'] = str(query)
print("home_screen_view: " + str(query))
blog_posts = sorted(get_blog_queryset(query), key=attrgetter('date_updated'), reverse=True)
# Pagination
page = request.GET.get('page', 1)
blog_posts_paginator = Paginator(blog_posts, BLOG_POSTS_PER_PAGE)
try:
blog_posts = blog_posts_paginator.page(page)
except PageNotAnInteger:
blog_posts = blog_posts_paginator.page(BLOG_POSTS_PER_PAGE)
except EmptyPage:
blog_posts = blog_posts_paginator.page(blog_posts_paginator.num_pages)
context['blog_posts'] = blog_posts
return render(request, "personal/home.html", context)
|
990,497 | 26f7331d235ee7a9b7a69e561789fa679340fee7 | import matplotlib.pyplot as plt
import matplotlib.cm
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
import matplotlib.cm as cm
def draw_map(x,y):
l_lon = -123.3116
r_lon = -122.9923
u_lat = 49.3341
l_lat = 49.1808
fig, ax = plt.subplots(figsize=(20,20))
m = Basemap(resolution='c',projection='merc',area_thresh = 0.1,
lat_0=(u_lat-l_lat)/2, lon_0=(l_lon-r_lon)/2,
llcrnrlon=l_lon, llcrnrlat=l_lat,urcrnrlon=r_lon, urcrnrlat=u_lat)
x1, y1 = m(x,y)
m.readshapefile('vancouver_canada_land_coast','vancouver',
color='black',
zorder=2)
m.scatter(x1, y1,s=1,c='#F2BDBE',alpha=0.2,marker='.')
plt.show()
return |
990,498 | 2502f46e5fa910c72ba490f98231dc29b99fe642 | import tensorflow as tf
import numpy as np
def __init__():
def recurrent_network():
hidden_size = 100
seq_length = 5
learning_rate = 1e-1
with tf.variable_scope("yolo"):
input_hidden = tf.get_variable('l1',[hidden_size, 5])
hidden_hidden = tf.get_variable('l2',[hidden_size,hidden_size])
hidden_output = tf.get_variable('l3',[hidden_size,hidden_size])
bias_hidden = tf.get_variable('b1', [hidden_size,1], initializer=tf.zeros_initializer())
bias_output = tf.get_variable('b2', [hidden_size,1], initializer=tf.zeros_initializer())
|
990,499 | 0f15e585f2680a7a83e0f9ee4e952cc89a313255 | import pefile
from sys import argv
if len(argv) < 2:
print "%s module.dll [0xOFFSET]"
exit()
module_name = argv[1]
pe = pefile.PE(module_name)
#bytes = pe.section[index].get_data()
base = int( argv[2], 16 ) if len(argv) > 2 else pe.OPTIONAL_HEADER.ImageBase
for sym in pe.DIRECTORY_ENTRY_EXPORT.symbols:
print "0x%x %s" % ( base + sym.address, sym.name )
for section in pe.sections:
print "%s 0x%x" % ( section.Name.strip(), pe.OPTIONAL_HEADER.ImageBase + section.VirtualAddress )
#print section
if section.IMAGE_SCN_MEM_EXECUTE:
opcodes = section.get_data()
offset = section.PointerToRawData + opcodes.find("\x90\x90\x90\x90\x90\x90\x90")
pe.set_bytes_at_offset(offset, bytes(_bytes))
pe.write("_%s" % module_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.