index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,600 | 01da8493c1c735a7ff806dde9b16a6a927817f5e | def gen_primes():
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
primes = gen_primes()
string = ""
y = 0
a = gen_primes()
while len(string) < (10000+5+1):
new_prime = next(a)
string = string + str(new_prime)
y+=1
print(string)
print(len(string))
print(string[10000:10000+5])
|
999,601 | 45cc3a76afc1ed591ab23efa0831e8b4ff25be40 | if __name__ == "__main__":
total()
escape()
print_escape()
print_eat()
import colorama
from colorama import Fore, Back, Style
colorama.init()
def total(*params):
res = 0
for item in params:
res += sum(item)
return res
def escape(*params):
res2 = 0
for item in params:
res2 += sum(item)
return res2
def print_eat(result):
print(Fore.YELLOW+"Поласувавши смачненьким, Змій зрозумів, що разом із Васелиною з’їв ще й усі шашки які вона з’їла!\n Ретельно підрахувавши внього виявилось: "+Fore.RED+str(result)+Fore.YELLOW+" шт. шашок!"+Fore.BLUE+"\n \"Гарно пограли\" "+Fore.YELLOW+"- розсміявся Змій.")
def print_escape(result,result2):
print(Fore.YELLOW+"Змій не зміг з’їсти Васелину! Проте він з’їв: "+Fore.RED+str(result)+Fore.YELLOW+" шт. шашок загалом!\n","Васелина ж з’їла: "+Fore.RED+str(result2)+Fore.YELLOW+" шт. шашок! Та вирішила зі Змієм більше не грати!") |
999,602 | a6d9069fb46a2a08ec6329e2e80399a31a157c49 | import sys
import array
import copy
file = open("../inputs/day13input.txt", "r")
directions = file.read().split("\n")
layers = {}
#map each location to the length of the array at that location
for d in directions:
dSplit = d.split(":")
leftSide = int(dSplit[0])
rightSide = int(dSplit[1].strip())
layers[leftSide] = rightSide
delay = 0
cont = True
#iterate until we find the correct delay
while cont:
seen = False
#for each layer, see if the scanner will be at index 0 after delay + y steps, where y is the location of the layer
for y in layers:
if (delay + y)%(2*layers[y]-2) == 0:
delay += 1
seen = True
break
cont = seen
print(delay)
|
999,603 | e483d65e9e64f3f5efa68d37e2654fce49244dfa | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
secondary_sales = pd.read_csv("WC_DS_Ex1_Sec_Sales.csv",parse_dates=[3],index_col=[3]).sort_index()
secondary_sales['promotions'] = 1 - secondary_sales['SP']/secondary_sales['MRP']
train = secondary_sales[['Store_Code','Category','MRP','SP','promotions','Sales_Qty']]
target = secondary_sales[['Sales_Qty']]
#Process the information for store1
Store1 = secondary_sales['Store_Code'] == 'Store1'
Store1_secondary_sales = secondary_sales[Store1]
Store1_secondary_sales = Store1_secondary_sales[['MRP','SP','promotions','Sales_Qty']]
Store1_secondary_sales_week = Store1_secondary_sales.resample('W').sum()
Store1_secondary_sales_week['Store_Code'] = 'Store1'
#Process for store2
Store2 = secondary_sales['Store_Code'] == 'Store2'
Store2_secondary_sales = secondary_sales[Store2]
Store2_secondary_sales = Store2_secondary_sales[['MRP','SP','promotions','Sales_Qty']]
Store2_secondary_sales_week = Store2_secondary_sales.resample('W').sum()
Store2_secondary_sales_week['Store_Code'] = 'Store2'
#Process for store3
Store3 = secondary_sales['Store_Code'] == 'Store3'
Store3_secondary_sales = secondary_sales[Store3]
Store3_secondary_sales = Store3_secondary_sales[['MRP','SP','promotions','Sales_Qty']]
Store3_secondary_sales_week = Store3_secondary_sales.resample('W').sum()
Store3_secondary_sales_week['Store_Code'] = 'Store3'
secondary_sales_week = pd.concat([Store1_secondary_sales_week,Store2_secondary_sales_week,Store3_secondary_sales_week],axis=0)
secondary_sales_week.reset_index(inplace=True)
secondary_sales_week = secondary_sales_week.sort_values(by='Date',ascending=True)
L1 = sns.lineplot(x='Date',y='Sales_Qty',data=secondary_sales_week)
#Fetaure engineering in Date column
#Feature engineering from date
column_1 = secondary_sales_week['Date']
temp = pd.DataFrame({"year": column_1.dt.year,
"month": column_1.dt.month,
"day": column_1.dt.day,
#"hour": column_1.dt.hour,
"dayofyear": column_1.dt.dayofyear,
"week": column_1.dt.week,
"weekofyear": column_1.dt.weekofyear,
#"dayofweek": column_1.dt.dayofweek,
#"weekday": column_1.dt.weekday,
"quarter": column_1.dt.quarter,
})
secondary_sales_week.reset_index(drop=True, inplace=True)
temp.reset_index(drop=True, inplace=True)
secondary_sales_week = pd.concat([secondary_sales_week,temp],axis=1)
Date = secondary_sales_week['Date']
secondary_sales_week.drop(columns=['Date'],axis=1,inplace=True)
target = secondary_sales_week['Sales_Qty']
secondary_sales_week.drop(columns=['Sales_Qty'],axis=1,inplace=True)
secondary_sales_week = pd.get_dummies(secondary_sales_week)
#Let's check the normality shape of sales
from statsmodels.graphics.gofplots import qqplot
qqplot(target, line='s')
plt.show()
D1 = sns.distplot(target)
#train test split of dataset
from sklearn.model_selection import train_test_split
X_train , X_test , y_train,y_test = train_test_split(secondary_sales_week,target,test_size=0.2,random_state=29)
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(random_state = 29,oob_score = True)
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [3,5,10,15,15],
'max_features': ['auto','sqrt','log2'],
'min_samples_leaf': [5,10,20],
'min_samples_split': [3,5,10,15],
'n_estimators': [30,60,100,200,300]
}
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# Fit the grid search to the data
grid_search.fit(X_train, y_train)
grid_search.best_params_
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('Accuracy = {:0.2f}%.'.format(accuracy))
return accuracy
best_grid = grid_search.best_estimator_
grid_accuracy = evaluate(best_grid, X_test, y_test)
pred = grid_search.predict(X_test)
Date=pd.DataFrame(Date)
x = Date[252:]
plt.figure(figsize=(15,8))
l1 = sns.lineplot(x=x['Date'],y=pred,color='blue',label='Predictions')
l1 = sns.lineplot(x=x['Date'],y=y_test,color='red',label='True value')
|
999,604 | 758b808b2b114a5a76d1b9f1880a7d1e557c2be6 | def climbingStaircase(n, k):
rs = Staircase().climb(n, k)
return sorted(rs)
class Staircase:
def __init__(self):
self.mem = dict()
def climb(self, n, k):
print(f"climbingStaircase({n}, {k})")
if n == 1:
return [[1]]
elif n == 0:
return [[]]
rs = []
for i in range(n-1, max(n-k, 0)-1, -1):
if i in self.mem:
paths = self.mem[i]
else:
paths = climbingStaircase(i, k)
self.mem[i] = paths
for path in paths:
print(f"i={i}, n={n}, path={path}, append={[n-i]}")
rs.append(path + [n-i])
print(f"climbing({n}), return rs={rs}")
print(f"mem={self.mem}")
return rs
print(climbingStaircase(4, 2))
|
999,605 | 41fd187be56e648d3ecb2c0a0c02aaf7b691dac0 | pyg = 'ei'
original = raw_input('Escribe una palabra:')
if len(original) > 0 and original.isalpha():
palabra = original.lower()
primera = palabra[0].lower()
if primera == "a" or primera == "e" or primera == "i" or primera == "o" or primera == "u":
nueva_palabra = palabra + pyg
else:
nueva_palabra = palabra[1:] + palabra[0:1] + pyg
else:
print 'Vacío'
|
999,606 | c7604abd4f26f3e60b7568b085fb3fb9acc27f21 | #!/usr/bin/env python3
#-*- Coded By SadCode -*-
import re
import requests
import urllib.request
import json
import codecs
import os
import sys
import argparse
import mutagen
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, APIC
CLIENTID="Oa1hmXnTqqE7F2PKUpRdMZqWoguyDLV0"
API = "https://api.soundcloud.com/i1/tracks/{0}/streams?client_id={1}"
headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) '
'Gecko/20100101 Firefox/55.0'
}
def get_id(html):
"""
Getting the ID of the song
"""
try:
song_id = re.findall('soundcloud://sounds:(.*?)"', html)[0]
return song_id
except IndexError:
print("\033[91m✘ Could not find song ID\033[0m")
sys.exit()
def get_tags(html):
"""
Getting the tags so that we can put it into the
music file
"""
title = re.findall('"title":"(.*?)",', html)[0]
title = codecs.getdecoder("unicode_escape")(title)[0]
artist = re.findall('"username":"(.*?)",', html)[0]
artist = codecs.getdecoder("unicode_escape")(artist)[0]
genre = re.findall('"genre":"(.*?)",', html)[0]
genre = codecs.getdecoder("unicode_escape")(genre)[0]
return title, artist, genre
def get_album_art_url(html):
"""
Getting the album art url so that we can download it
and add it to the music file later
"""
return re.findall('img src="(.*?)" width="500"', html)[0]
def tag(fname, title, artist, genre, arturl):
try:
tag = EasyID3(fname)
except mutagen.id3.ID3NoHeaderError:
tag = mutagen.File(fname, easy=True)
tag.add_tags()
tag['artist'] = artist
tag['title'] = title
# Giving the album the same name as
# the title beacause
# I cant find the album name
tag['album'] = title
tag['genre'] = genre
tag.save()
id3 = ID3(fname)
imagename = str(title.replace("/", "\\")+"500x500.jpg")
image = urllib.request.urlretrieve(arturl, imagename)
print("\033[92m✔ Album art downloaded\033[0m")
imagedata = open(imagename, "rb").read()
id3.add(APIC(3, 'image/jpeg', 3, 'Front cover', imagedata))
id3.save(v2_version=3)
# Always leave the place better than you found it ;)
os.remove(imagename)
def main():
parser = argparse.ArgumentParser(description = "Download SoundCloud music at 128kbps with album art and tags")
parser.add_argument('url', action="store", help="URL to the song")
args = parser.parse_args()
r = requests.get(args.url, headers=headers)
print("\033[92m✔ Fetched needed data\033[0m")
html = r.text
song_id = get_id(html)
title = get_tags(html)[0]
artist = get_tags(html)[1]
genre = get_tags(html)[2]
arturl = get_album_art_url(html)
json_url = API.format(song_id, CLIENTID)
data = requests.get(json_url, headers=headers)
data = json.loads(data.text)
# Getting the file url with the best quality
file_url = data["http_mp3_128_url"]
# Example file name --> Adele - Hello.mp3
fname = str(artist+" - "+title.replace("/", "")+".mp3")
urllib.request.urlretrieve(file_url, fname)
print("\033[92m✔ Downloaded:\033[0m {0} by {1}".format(title, artist))
# Making the file beautiful
tag(fname, title, artist, genre, arturl)
print("\033[92m✔ Saved:\033[0m {}".format(fname))
if __name__=="__main__":
main()
|
999,607 | 07d3a9be938d89a832fc534dfe6ea14c2dc737ac | # 编写一个程序,通过填充空格来解决数独问题。
# 一个数独的解法需遵循如下规则:
# 数字 1-9 在每一行只能出现一次。
# 数字 1-9 在每一列只能出现一次。
# 数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
# 空白格用 '.' 表示。
# https://leetcode-cn.com/problems/sudoku-solver
class Solution:
def solveSudoku(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
pass
|
999,608 | 2374e8feb344a24c3de015b933318a426e45f9d7 | def angle_diagram(x1,x2,x3,x4,palm_point,input_image):
row = input_image.shape[0]
col = input_image.shape[1]
copy = np.zeros((row,col))
copy = input_image
#box coordinates
x1=[40,15]
x2=[20,25]
x4=[40,65]
x3=[60,55]
palm_point = [75,75]
top_mid = [(x1[0]+x2[0])//2,(x1[1]+x2[1])//2]
base_mid = [(x3[0]+x4[0])//2,(x3[1]+x4[1])//2]
block_mid = [(top_mid[0]+base_mid[0])//2,(top_mid[1]+base_mid[1])//2]
#BELOW COMMENTED CODE DRAWS A BOX AROUND IMAGE'S SEGMENT
#drawing the block == top
# m = (x2[1]-x1[1])/(x2[0]-x1[0])
# c = x2[1] - m*x2[0]
# for x in range(x2[0],x1[0]):
# y=int(m*x+c)
# blank[x,y] = 255
# #drawing the block == bottom
# m = (x4[1]-x3[1])/(x4[0]-x3[0])
# c = x4[1] - m*x4[0]
# for x in range(x4[0],x3[0]):
# y=int(m*x+c)
# blank[x,y] = 255
# #drawing the block == right
# m = (x4[1]-x2[1])/(x4[0]-x2[0])
# c = x4[1] - m*x4[0]
# for x in range(x2[0],x4[0]):
# y=int(m*x+c)
# blank[x,y] = 255
# #drawing the block == left
# m = (x3[1]-x1[1])/(x3[0]-x1[0])
# c = x3[1] - m*x3[0]
# for x in range(x1[0],x3[0]):
# y=int(m*x+c)
# blank[x,y] = 255
m = (base_mid[1]-top_mid[1])/(base_mid[0]-top_mid[0])
c = base_mid[1] - m*base_mid[0]
for x in range(block_mid[0],base_mid[0]):
y=int(m*x+c)
copy[x,y] = 255
print(top_mid,base_mid,block_mid)
m_to_palm = (base_mid[1]-palm_point[1])/(base_mid[0]-palm_point[0])
c_of_palm = palm_point[1] - m_to_palm*palm_point[0]
print(m_to_palm,c_of_palm)
for x in range(base_mid[0],palm_point[0]):
# print(m,x,c_of_palm,y)
y=int(m_to_palm*x+c_of_palm)
copy[x,y] = 255
theta = atan(m_to_palm)
theta = int((theta/pi)*180)
if(m_to_palm>0):
theta += 90
else:
theta = -1*theta
print(theta,"degrees")
fig = plt.figure()
ax0 = fig.add_subplot(2,1,1)
ax0.imshow(input_image,cmap='gray')
ax1 = fig.add_subplot(2,1,2)
ax1.imshow(copy,cmap='gray')
plt.show()
|
999,609 | c05154e87f3ecac43cc910f0bea3b6d397f7a59b | import numpy as np
import requests, pybel, json
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Float, Text, Boolean, String, ForeignKey, UniqueConstraint
Base = declarative_base()
metadata = Base.metadata
db_file = 'sqlite:///ilthermo.db?check_same_thread=False'
engine = create_engine(db_file, echo=False)
Session = sessionmaker(engine)
session = Session()
class Property(Base):
__tablename__ = 'property'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
datas = relationship('Data', lazy='dynamic')
def __repr__(self):
return '<Property: %i %s>' % (self.id, self.name)
class Paper(Base):
__tablename__ = 'paper'
id = Column(Integer, primary_key=True)
year = Column(Integer)
title = Column(Text, unique=True)
author = Column(Text)
datas = relationship('Data', lazy='dynamic')
def __repr__(self):
return '<Paper: %i %s>' % (self.id, self.author)
class DataSet(Base):
__tablename__ = 'dataset'
id = Column(Integer, primary_key=True)
code = Column(String(5))
searched = Column(Boolean)
class Ion(Base):
__tablename__ = 'ion'
id = Column(Integer, primary_key=True)
charge = Column(Integer)
name = Column(Text, unique=True)
searched = Column(Boolean)
popular = Column(Boolean, default=False)
selected = Column(Boolean, default=False)
smiles = Column(Text)
iupac = Column(Text)
ignored = Column('validated', Boolean, default=False)
duplicate = Column(Integer)
category = Column(Text)
n_paper = Column(Integer)
times = Column(Integer)
molecules_cation = relationship('Molecule', lazy='dynamic', foreign_keys='Molecule.cation_id')
molecules_anion = relationship('Molecule', lazy='dynamic', foreign_keys='Molecule.anion_id')
def __repr__(self):
if self.charge > 0:
return '<Ion +%i: %i %s>' % (abs(self.charge), self.id, self.name)
else:
return '<Ion -%i: %i %s>' % (abs(self.charge), self.id, self.name)
@property
def molecules(self):
if self.charge > 0:
return self.molecules_cation
else:
return self.molecules_anion
@property
def n_heavy(self):
try:
py_mol = pybel.readstring('smi', self.smiles)
except:
raise Exception('Smiles not valid')
return py_mol.OBMol.NumHvyAtoms()
def update_smiles_from_pubchem(self):
r = requests.get(
'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/%s/%s/property/IUPACName,CanonicalSMILES,IsomericSMILES,MolecularFormula/JSON'
% ('name', self.name), timeout=5)
j = r.json()
print(j)
try:
self.cid = j['PropertyTable']['Properties'][0]['CID']
self.iupac = j['PropertyTable']['Properties'][0]['IUPACName']
smiles = j['PropertyTable']['Properties'][0]['CanonicalSMILES']
py_mol = pybel.readstring('smi', smiles)
self.smiles = py_mol.write('can').strip()
self.formula = py_mol.formula
except Exception as e:
print(repr(e))
def update_formula(self):
self.formula = pybel.readstring('smi', self.smiles).formula
class Molecule(Base):
__tablename__ = 'molecule'
__table_args__ = (UniqueConstraint('cation_id', 'anion_id', name='ion_id'),)
id = Column(Integer, primary_key=True)
code = Column(String(6))
name = Column(Text, unique=True)
cation_id = Column(Integer, ForeignKey(Ion.id))
anion_id = Column(Integer, ForeignKey(Ion.id))
formula = Column(Text)
popular = Column(Boolean, default=False)
selected = Column(Boolean, default=False)
fit = Column(Text)
cation = relationship('Ion', foreign_keys='Molecule.cation_id')
anion = relationship('Ion', foreign_keys='Molecule.anion_id')
datas = relationship('Data', lazy='dynamic')
def __repr__(self):
return '<Molecule: %i %s>' % (self.id, self.name)
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True)
molecule_id = Column(Integer, ForeignKey(Molecule.id))
paper_id = Column(Integer, ForeignKey(Paper.id))
property_id = Column(Integer, ForeignKey(Property.id))
phase = Column(String(20))
t = Column(Float)
p = Column(Float, nullable=True)
value = Column(Float)
stderr = Column(Float)
molecule = relationship('Molecule', foreign_keys='Data.molecule_id')
paper = relationship('Paper', foreign_keys='Data.paper_id')
property = relationship('Property', foreign_keys='Data.property_id')
def __repr__(self):
return '<Data: %s: %.1f %.1f %f>' % (self.property.name, self.t or 0, self.p or 0, self.value)
|
999,610 | e2fa8c83d258b204cc120301eae1faac267d8756 | n = float(input())
if (n - int(n)) >= 0.5:
print(int(n) + 1)
else:
print(int(n))
|
999,611 | d0f2c31348babcf527b5aebf40aab9ec85bfc8e5 | #!/usr/bin/python3 -u
from tango.server import run
import os
from ZaberTMMAxis import ZaberTMMAxis
from ZaberTMMCtrl import ZaberTMMCtrl
# Run ZaberTMMCtrl and ZaberTMMAxis
run([ZaberTMMCtrl, ZaberTMMAxis]) |
999,612 | c345d59cee43bc166bf3570dc83e0d73b711acf5 | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudformation/create-change-set.html
if __name__ == '__main__':
"""
delete-change-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudformation/delete-change-set.html
describe-change-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudformation/describe-change-set.html
execute-change-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudformation/execute-change-set.html
list-change-sets : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudformation/list-change-sets.html
"""
parameter_display_string = """
# stack-name : The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stackâs information with the information that you submit, such as a modified template or different parameter input values.
# change-set-name : The name of the change set. The name must be unique among all change sets that are associated with the specified stack.
A change set name can contain only alphanumeric, case sensitive characters and hyphens. It must start with an alphabetic character and cannot exceed 128 characters.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("cloudformation", "create-change-set", "stack-name", "change-set-name", add_option_dict)
|
999,613 | 7a5325e00f0a316ddd27e5cdc3e3e03ac771a339 | '''
272. Closest Binary Search Tree Value II
Given the root of a binary search tree, a target value, and an integer k, return the k values in the BST that are closest to the target. You may return the answer in any order.
You are guaranteed to have only one unique set of k values in the BST that are closest to the target.
Example 1:
Example 1:
4
/ \
2 5
/ \
1 3
Input: root = [4,2,5,1,3], target = 3.714286, k = 2
Output: [4,3]
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def closestKValues(self, root: TreeNode, target: float, k: int) -> List[int]:
self.ans = deque()
self.inorder(root, target, k)
return self.ans
def inorder(self, root, target, k):
stack = []
node = root
while node or stack:
while node:
stack.append(node)
node = node.left
node = stack.pop()
self.ans.append(node.val)
if len(self.ans) > k:
if abs(self.ans[-1]-target) < abs(self.ans[0]-target):
self.ans.popleft()
else:
self.ans.pop()
node = node.right |
999,614 | 158e68668b2977e277f87a9f23b10dc703aad1db | import json
import pprint as pp
import math
#Authors: Evan Bluhm, Alex French, Elizabeth Gass, Samuel Gass, Margaret Yim
class StarSolver():
graph = {}
def get_node_with_min_f_score(node_set):
min_value = math.Inf
best_node = None
for n in node_set.values():
if n.f_score < min_value:
best_node = n
min_value = n.f_score
return best_node
def solveTheMaze(self, start_node, goal_node):
closed_set = {}
open_set = {start_node}
# gScore[start] := 0
start_node.exact_cost_to = math.inf
# // For each node, the total cost of getting from the start node to the goal
# // by passing by that node. That value is partly known, partly heuristic.
# fScore := map with default value of Infinity
# // For the first node, that value is completely heuristic.
# fScore[start] := heuristic_cost_estimate(start, goal)
start_node.f_score = heuristic_cost_estimate(start_node, goal_node)
# while openSet is not empty
while open_set != {}:
current_node = get_node_with_min_f_score(open_set)
# current := the node in openSet having the lowest fScore[] value
if current_node.id == goal_node.id:
return reconstruct_path(current_node)
# if current = goal
# return reconstruct_path(cameFrom, current)
# openSet.Remove(current)
# closedSet.Add(current)
# for each neighbor of current
# if neighbor in closedSet
# continue // Ignore the neighbor which is already evaluated.
# // The distance from start to a neighbor
# tentative_gScore := gScore[current] + dist_between(current, neighbor)
# if neighbor not in openSet // Discover a new node
# openSet.Add(neighbor)
# else if tentative_gScore >= gScore[neighbor]
# continue // This is not a better path.
# // This path is the best until now. Record it!
# cameFrom[neighbor] := current
# gScore[neighbor] := tentative_gScore
# fScore[neighbor] := gScore[neighbor] + heuristic_cost_estimate(neighbor, goal)
# return failure
def heuristic_cost_estimate(self, neighbor, goal):
pass
def read_maze_file(self, filename):
with open(filename) as input:
graph_json = json.load(input)
for n in graph_json["nodes"]:
self.graph[n["id"]] = GraphNode(n["id"], n["pos"][0], n["pos"][1], n["neighbors"])
def print_graph_to_text(self):
for node in self.graph.values():
print("node %d: " % node.id)
print(" x: %d, y: %d" % (node.x, node.y))
print(" neighbors: ", end="")
pp.pprint(node.neighbors)
class GraphNode:
def __init__(self, id, x, y, neighbors):
self.id = id
self.x = x
self.y = y
self.neighbors = neighbors
self.came_from = None
self.exact_cost_to = math.inf
self.f_cost = math.inf
def set_came_from(self, came_from):
self.came_from = came_from
def set_exact_cost_to(self, exact_cost_to):
self.exact_cost_to = exact_cost_to
if __name__ == "__main__":
ss = StarSolver()
ss.read_maze_file('input_1.json')
ss.print_graph_to_text()
|
999,615 | 594e43b10b57189fe81d909bad79f05f7dfece44 | import pandas as pd
import quandl, math, datetime
import numpy as np
from sklearn import preprocessing,svm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
style.use('ggplot')
# cross validation is used to shuffle the data so that the data do not get biased
# Preprocessing lets you have the features between 1 adnd -1 for better accuracy and time complexity.
# Through svm we can also do regression (svm.SVR())
df = quandl.get('WIKI/GOOGL')
df =df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
# Updating our data set
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_col = 'Adj. Close' # To be predicted
df.fillna(-99999, inplace=True) # Replacing all nan values with outlier.
forecast_out = int(math.ceil(0.01 * len(df))) # Testing data
df['label'] = df[forecast_col].shift(-forecast_out)
# Defined features will be capital x and labels will be small y
X = np.array(df.drop(['label'], 1))
X = preprocessing.scale(X)
'''
preprcessing.scale : To remove biasness. For example
x = (1,40,4000,400000) is very biased towards higher
values.GEnerally required for spatial data sets.
'''
X = X[:-forecast_out]
X_lately = X[-forecast_out:]
df.dropna(inplace=True)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = LinearRegression(n_jobs = -1) # Another Algo : svm.SVR() : N-JOBS enables to run all processing power in paraller.(Threading)
clf.fit(X_train, y_train)
with open('linearregression.pickle', 'wb') as f: # to prevent retraining model again and again we use pickel and save it into a file.
pickle.dump(clf, f)
pickle_in = open('linearregression.pickle','rb')
clf = pickle.load(pickle_in)
accuracy = clf.score(X_test, y_test)
forecast_set = clf.predict(X_lately) # gives the y predicted values
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp() # timestamp is the pandas eqivalent of python's datetime.
one_day = 86400
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix) # to convert timestamp into datetime
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i] # df.loc['label'] extracts the row with the particular label
# df.loc[next_date] is used for putting the dates on the axis
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
|
999,616 | 3ea65a0da4393be49b9412a0db31c1cfac6a8905 | import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.base import BaseEstimator
from typing import Text, Dict
def evaluate(df: pd.DataFrame, target_column: Text, clf: BaseEstimator) -> Dict:
"""Evaluate classifier on a dataset
Args:
df {pandas.DataFrame}: dataset
target_column {Text}: target column name
clf {sklearn.base.BaseEstimator}: classifier (trained model)
Returns:
Dict: Dict of reported metrics
'f1' - F1 score
'cm' - Confusion Matrix
'actual' - true values for test data
'predicted' - predicted values for test data
"""
# Get X and Y
y_test = df.loc[:, target_column].values.astype('int32')
X_test = df.drop(target_column, axis=1).values.astype('float32')
prediction = clf.predict(X_test)
f1 = f1_score(y_true=y_test, y_pred=prediction, average='macro')
cm = confusion_matrix(prediction, y_test)
return {
'f1': f1,
'cm': cm,
'actual': y_test,
'predicted': prediction
}
|
999,617 | 47d345ab6c8fd9f6f913956aaf7108b5d08810f1 | """Tests xonsh tools."""
import datetime as dt
import os
import pathlib
import stat
import warnings
from tempfile import TemporaryDirectory
import pytest
from xonsh import __version__
from xonsh.lexer import Lexer
from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS, PYTHON_VERSION_INFO
from xonsh.pytest.tools import skip_if_on_windows
from xonsh.tools import (
EnvPath,
all_permutations,
always_false,
always_true,
argvquote,
balanced_parens,
bool_or_int_to_str,
bool_or_none_to_str,
bool_to_str,
check_for_partial_string,
check_quotes,
deprecated,
dynamic_cwd_tuple_to_str,
ends_with_colon_token,
ensure_slice,
ensure_string,
ensure_timestamp,
env_path_to_str,
escape_windows_cmd_string,
executables_in,
expand_case_matching,
expand_path,
expandvars,
find_next_break,
get_line_continuation,
get_logical_line,
get_portions,
iglobpath,
is_balanced,
is_bool,
is_bool_or_int,
is_bool_or_none,
is_callable,
is_completion_mode,
is_completions_display_value,
is_dynamic_cwd_width,
is_env_path,
is_float,
is_int,
is_int_as_str,
is_logfile_opt,
is_nonstring_seq_of_strings,
is_path,
is_regex,
is_slice_as_str,
is_string,
is_string_or_callable,
is_string_seq,
is_tok_color_dict,
is_writable_file,
logfile_opt_to_str,
path_to_str,
pathsep_to_seq,
pathsep_to_set,
pathsep_to_upper_seq,
register_custom_style,
replace_logical_line,
seq_to_pathsep,
seq_to_upper_pathsep,
set_to_pathsep,
simple_random_choice,
str_to_env_path,
str_to_path,
subexpr_before_unbalanced,
subexpr_from_unbalanced,
subproc_toks,
swap_values,
to_bool,
to_bool_or_int,
to_bool_or_none,
to_completion_mode,
to_completions_display_value,
to_dynamic_cwd_tuple,
to_int_or_none,
to_logfile_opt,
)
LEXER = Lexer()
LEXER.build()
INDENT = " "
TOOLS_ENV = {"EXPAND_ENV_VARS": True, "XONSH_ENCODING_ERRORS": "strict"}
ENCODE_ENV_ONLY = {"XONSH_ENCODING_ERRORS": "strict"}
PATHEXT_ENV = {"PATHEXT": [".COM", ".EXE", ".BAT"]}
def test_random_choice():
lst = [1, 2, 3]
r = simple_random_choice(lst)
assert r in lst
with pytest.raises(ValueError):
simple_random_choice(range(1010101))
def test_subproc_toks_x():
exp = "![x]"
obs = subproc_toks("x", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_ls_l():
exp = "![ls -l]"
obs = subproc_toks("ls -l", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_git():
s = 'git commit -am "hello doc"'
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_git_semi():
s = 'git commit -am "hello doc"'
exp = f"![{s}];"
obs = subproc_toks(s + ";", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_git_nl():
s = 'git commit -am "hello doc"'
exp = f"![{s}]\n"
obs = subproc_toks(s + "\n", lexer=LEXER, returnline=True)
assert exp == obs
def test_bash_macro():
s = "bash -c ! export var=42; echo $var"
exp = f"![{s}]\n"
obs = subproc_toks(s + "\n", lexer=LEXER, returnline=True)
assert exp == obs
def test_python_macro():
s = 'python -c ! import os; print(os.path.abspath("/"))'
exp = f"![{s}]\n"
obs = subproc_toks(s + "\n", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls():
s = "ls -l"
exp = INDENT + f"![{s}]"
obs = subproc_toks(INDENT + s, mincol=len(INDENT), lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls_nl():
s = "ls -l"
exp = INDENT + f"![{s}]\n"
obs = subproc_toks(
INDENT + s + "\n", mincol=len(INDENT), lexer=LEXER, returnline=True
)
assert exp == obs
def test_subproc_toks_indent_ls_no_min():
s = "ls -l"
exp = INDENT + f"![{s}]"
obs = subproc_toks(INDENT + s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls_no_min_nl():
s = "ls -l"
exp = INDENT + f"![{s}]\n"
obs = subproc_toks(INDENT + s + "\n", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls_no_min_semi():
s = "ls"
exp = INDENT + f"![{s}];"
obs = subproc_toks(INDENT + s + ";", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls_no_min_semi_nl():
s = "ls"
exp = INDENT + f"![{s}];\n"
obs = subproc_toks(INDENT + s + ";\n", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_ls_comment():
s = "ls -l"
com = " # lets list"
exp = f"![{s}]{com}"
obs = subproc_toks(s + com, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_ls_42_comment():
s = "ls 42"
com = " # lets list"
exp = f"![{s}]{com}"
obs = subproc_toks(s + com, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_ls_str_comment():
s = 'ls "wakka"'
com = " # lets list"
exp = f"![{s}]{com}"
obs = subproc_toks(s + com, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls_comment():
ind = " "
s = "ls -l"
com = " # lets list"
exp = f"{ind}![{s}]{com}"
obs = subproc_toks(ind + s + com, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_indent_ls_str():
ind = " "
s = 'ls "wakka"'
com = " # lets list"
exp = f"{ind}![{s}]{com}"
obs = subproc_toks(ind + s + com, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_ls_l_semi_ls_first():
lsdl = "ls -l"
ls = "ls"
s = f"{lsdl}; {ls}"
exp = f"![{lsdl}]; {ls}"
obs = subproc_toks(s, lexer=LEXER, maxcol=6, returnline=True)
assert exp == obs
def test_subproc_toks_ls_l_semi_ls_second():
lsdl = "ls -l"
ls = "ls"
s = f"{lsdl}; {ls}"
exp = f"{lsdl}; ![{ls}]"
obs = subproc_toks(s, lexer=LEXER, mincol=7, returnline=True)
assert exp == obs
def test_subproc_toks_hello_mom_first():
fst = "echo 'hello'"
sec = "echo 'mom'"
s = f"{fst}; {sec}"
exp = f"![{fst}]; {sec}"
obs = subproc_toks(s, lexer=LEXER, maxcol=len(fst) + 1, returnline=True)
assert exp == obs
def test_subproc_toks_hello_mom_second():
fst = "echo 'hello'"
sec = "echo 'mom'"
s = f"{fst}; {sec}"
exp = f"{fst}; ![{sec}]"
obs = subproc_toks(s, lexer=LEXER, mincol=len(fst), returnline=True)
assert exp == obs
def test_subproc_toks_hello_bad_leading_single_quotes():
obs = subproc_toks('echo "hello', lexer=LEXER, returnline=True)
assert obs is None
def test_subproc_toks_hello_bad_trailing_single_quotes():
obs = subproc_toks('echo hello"', lexer=LEXER, returnline=True)
assert obs is None
def test_subproc_toks_hello_bad_leading_triple_quotes():
obs = subproc_toks('echo """hello', lexer=LEXER, returnline=True)
assert obs is None
def test_subproc_toks_hello_bad_trailing_triple_quotes():
obs = subproc_toks('echo hello"""', lexer=LEXER, returnline=True)
assert obs is None
def test_subproc_toks_hello_mom_triple_quotes_nl():
s = 'echo """hello\nmom"""'
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_comment():
exp = None
obs = subproc_toks("# I am a comment", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_not():
exp = "not ![echo mom]"
obs = subproc_toks("not echo mom", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_paren():
exp = "(![echo mom])"
obs = subproc_toks("(echo mom)", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_paren_ws():
exp = "(![echo mom]) "
obs = subproc_toks("(echo mom) ", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_not_paren():
exp = "not (![echo mom])"
obs = subproc_toks("not (echo mom)", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_and_paren():
exp = "True and (![echo mom])"
obs = subproc_toks("True and (echo mom)", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_paren_and_paren():
exp = "(![echo a]) and (echo b)"
obs = subproc_toks("(echo a) and (echo b)", maxcol=9, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_semicolon_only():
exp = None
obs = subproc_toks(";", lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval():
s = "echo @(1+1)"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval_multiline_string():
s = 'echo @("""hello\nmom""")'
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_twopyeval():
s = "echo @(1+1) @(40 + 2)"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval_parens():
s = "echo @(1+1)"
inp = f"({s})"
exp = f"(![{s}])"
obs = subproc_toks(inp, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_twopyeval_parens():
s = "echo @(1+1) @(40+2)"
inp = f"({s})"
exp = f"(![{s}])"
obs = subproc_toks(inp, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval_nested():
s = "echo @(min(1, 42))"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
@pytest.mark.parametrize(
"phrase",
[
"xandy",
"xory",
"xand",
"andy",
"xor",
"ory",
"x-and",
"x-or",
"and-y",
"or-y",
"x-and-y",
"x-or-y",
"in/and/path",
"in/or/path",
],
)
def test_subproc_toks_and_or(phrase):
s = "echo " + phrase
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval_nested_parens():
s = "echo @(min(1, 42))"
inp = f"({s})"
exp = f"(![{s}])"
obs = subproc_toks(inp, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_capstdout():
s = "echo $(echo bat)"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_capproc():
s = "echo !(echo bat)"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval_redirect():
s = 'echo @("foo") > bar'
inp = f"{s}"
exp = f"![{s}]"
obs = subproc_toks(inp, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_greedy_parens():
s = "(sort)"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True)
assert exp == obs
def test_subproc_toks_greedy_parens_inp():
s = "(sort) < input.txt"
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True)
assert exp == obs
def test_subproc_toks_greedy_parens_statements():
s = '(echo "abc"; sleep 1; echo "def")'
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True)
assert exp == obs
def test_subproc_toks_greedy_parens_statements_with_grep():
s = '(echo "abc"; sleep 1; echo "def") | grep'
exp = f"![{s}]"
obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True)
assert exp == obs
LOGICAL_LINE_CASES = [
("""x = 14 + 2""", 0, "x = 14 + 2", 1),
(
"""x = \\
14 \\
+ 2
""",
0,
"x = 14 + 2",
3,
),
(
"""y = 16
14 \\
+ 2
""",
1,
"14 + 2",
2,
),
(
'''x = """wow
mom"""
''',
0,
'x = """wow\nmom"""',
2,
),
# test from start
(
"echo --option1 value1 \\\n"
" --option2 value2 \\\n"
" --optionZ valueZ",
0,
"echo --option1 value1 --option2 value2 --optionZ valueZ",
3,
),
# test from second line
(
"echo --option1 value1 \\\n"
" --option2 value2 \\\n"
" --optionZ valueZ",
1,
"echo --option1 value1 --option2 value2 --optionZ valueZ",
3,
),
('"""\n', 0, '"""', 1),
]
@pytest.mark.parametrize("src, idx, exp_line, exp_n", LOGICAL_LINE_CASES)
def test_get_logical_line(src, idx, exp_line, exp_n, xession):
lines = src.splitlines()
line, n, start = get_logical_line(lines, idx)
assert exp_line == line
assert exp_n == n
@pytest.mark.parametrize("src, idx, exp_line, exp_n", LOGICAL_LINE_CASES)
def test_replace_logical_line(src, idx, exp_line, exp_n, xession):
lines = src.splitlines()
logical = exp_line
while idx > 0 and lines[idx - 1].endswith("\\"):
idx -= 1
replace_logical_line(lines, logical, idx, exp_n)
exp = src.replace("\\\n", "").strip()
lc = get_line_continuation() + "\n"
obs = "\n".join(lines).replace(lc, "").strip()
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("f(1,10),x.y", True),
('"x"', True),
("'y'", True),
('b"x"', True),
("r'y'", True),
("f'z'", True),
('"""hello\nmom"""', True),
],
)
def test_check_quotes(inp, exp):
obs = check_quotes(inp)
assert exp is obs
@pytest.mark.parametrize("inp", ["f(1,10),x.y"])
def test_is_balanced_parens(inp):
obs = is_balanced(inp, "(", ")")
assert obs
@pytest.mark.parametrize("inp", ["f(x.", "f(1,x." "f((1,10),x.y"])
def test_is_not_balanced_parens(inp):
obs = is_balanced(inp, "(", ")")
assert not obs
@pytest.mark.parametrize(
"inp, exp", [("f(x.", "x."), ("f(1,x.", "x."), ("f((1,10),x.y", "x.y")]
)
def test_subexpr_from_unbalanced_parens(inp, exp):
obs = subexpr_from_unbalanced(inp, "(", ")")
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("f(x.", "f"),
("f(1,x.", "f"),
("f((1,10),x.y", "f"),
("wakka().f((1,10),x.y", ".f"),
("wakka(f((1,10),x.y", "f"),
("wakka(jawakka().f((1,10),x.y", ".f"),
("wakka(jawakka().f((1,10),x.y)", "wakka"),
],
)
def test_subexpr_before_unbalanced_parens(inp, exp):
obs = subexpr_before_unbalanced(inp, "(", ")")
assert exp == obs
@pytest.mark.parametrize(
"line, exp",
[
("", True),
("wakka jawaka", True),
("rm *; echo hello world", True),
("()", True),
("f()", True),
("echo * yo ; echo eggs", True),
("(", False),
(")", False),
("(cmd;", False),
("cmd;)", False),
],
)
def test_balanced_parens(line, exp):
obs = balanced_parens(line, lexer=LEXER)
if exp:
assert obs
else:
assert not obs
@pytest.mark.parametrize(
"line, exp",
[
("if 1:", True),
("elif 2: #comment", True),
("elif 3: #colon comment:", True),
("else: ", True),
("for s in '#not-a-comment':", True),
("", False),
("#comment", False),
("#colon comment:", False),
("print('hello')", False),
("print('hello') #colon comment:", False),
],
)
def test_ends_with_colon_token(line, exp):
obs = ends_with_colon_token(line, lexer=LEXER)
if exp:
assert obs
else:
assert not obs
@pytest.mark.parametrize(
"line, mincol, exp",
[
("ls && echo a", 0, 4),
("ls && echo a", 6, None),
("ls && echo a || echo b", 6, 14),
("(ls) && echo a", 1, 4),
("not ls && echo a", 0, 8),
("not (ls) && echo a", 0, 8),
("bash -c ! export var=42; echo $var", 0, 35),
('python -c ! import os; print(os.path.abspath("/"))', 0, 51),
("echo * yo ; echo eggs", 0, 11),
],
)
def test_find_next_break(line, mincol, exp):
obs = find_next_break(line, mincol=mincol, lexer=LEXER)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(42, True),
(42.0, False),
("42", False),
("42.0", False),
([42], False),
([], False),
(None, False),
("", False),
],
)
def test_is_int(inp, exp):
obs = is_int(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(42.0, True),
(42.000101010010101010101001010101010001011100001101101011100, True),
(42, False),
("42", False),
("42.0", False),
([42], False),
([], False),
(None, False),
("", False),
(False, False),
(True, False),
],
)
def test_is_float(inp, exp):
obs = is_float(inp)
assert exp == obs
def test_is_string_true():
assert is_string("42.0")
def test_is_string_false():
assert not is_string(42.0)
def test_is_callable_true():
assert is_callable(lambda: 42.0)
def test_is_callable_false():
assert not is_callable(42.0)
@pytest.mark.parametrize("inp", ["42.0", lambda: 42.0])
def test_is_string_or_callable_true(inp):
assert is_string_or_callable(inp)
def test_is_string_or_callable_false():
assert not is_string(42.0)
@pytest.mark.parametrize("inp", [42, "42"])
def test_always_true(inp):
assert always_true(inp)
@pytest.mark.parametrize("inp,exp", [(42, 42), ("42", 42), ("None", None)])
def test_to_optional_int(inp, exp):
assert to_int_or_none(inp) == exp
@pytest.mark.parametrize("inp", [42, "42"])
def test_always_false(inp):
assert not always_false(inp)
@pytest.mark.parametrize("inp, exp", [(42, "42"), ("42", "42")])
def test_ensure_string(inp, exp):
obs = ensure_string(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("", set()),
("a", {"a"}),
(os.pathsep.join(["a", "b"]), {"a", "b"}),
(os.pathsep.join(["a", "b", "c"]), {"a", "b", "c"}),
],
)
def test_pathsep_to_set(inp, exp):
obs = pathsep_to_set(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(set(), ""),
({"a"}, "a"),
({"a", "b"}, os.pathsep.join(["a", "b"])),
({"a", "b", "c"}, os.pathsep.join(["a", "b", "c"])),
],
)
def test_set_to_pathsep(inp, exp):
obs = set_to_pathsep(inp, sort=(len(inp) > 1))
assert exp == obs
@pytest.mark.parametrize("inp", ["42.0", ["42.0"]])
def test_is_string_seq_true(inp):
assert is_string_seq(inp)
def test_is_string_seq_false():
assert not is_string_seq([42.0])
def test_is_nonstring_seq_of_strings_true():
assert is_nonstring_seq_of_strings(["42.0"])
def test_is_nonstring_seq_of_strings_false():
assert not is_nonstring_seq_of_strings([42.0])
@pytest.mark.parametrize(
"inp, exp",
[
("", []),
("a", ["a"]),
(os.pathsep.join(["a", "b"]), ["a", "b"]),
(os.pathsep.join(["a", "b", "c"]), ["a", "b", "c"]),
],
)
def test_pathsep_to_seq(inp, exp):
obs = pathsep_to_seq(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
([], ""),
(["a"], "a"),
(["a", "b"], os.pathsep.join(["a", "b"])),
(["a", "b", "c"], os.pathsep.join(["a", "b", "c"])),
],
)
def test_seq_to_pathsep(inp, exp):
obs = seq_to_pathsep(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("", []),
("a", ["A"]),
(os.pathsep.join(["a", "B"]), ["A", "B"]),
(os.pathsep.join(["A", "b", "c"]), ["A", "B", "C"]),
],
)
def test_pathsep_to_upper_seq(inp, exp):
obs = pathsep_to_upper_seq(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
([], ""),
(["a"], "A"),
(["a", "b"], os.pathsep.join(["A", "B"])),
(["a", "B", "c"], os.pathsep.join(["A", "B", "C"])),
],
)
def test_seq_to_upper_pathsep(inp, exp):
obs = seq_to_upper_pathsep(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp", [(pathlib.Path("/home/wakka"), True), ("/home/jawaka", False)]
)
def test_is_path(inp, exp):
obs = is_path(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("/home/wakka", False),
(["/home/jawaka"], False),
(EnvPath(["/home/jawaka"]), True),
(EnvPath(["jawaka"]), True),
(EnvPath(b"jawaka:wakka"), True),
],
)
def test_is_env_path(inp, exp):
obs = is_env_path(inp)
assert exp == obs
@pytest.mark.parametrize("inp, exp", [("/tmp", pathlib.Path("/tmp")), ("", None)])
def test_str_to_path(inp, exp):
obs = str_to_path(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("/home/wakka", ["/home/wakka"]),
("/home/wakka" + os.pathsep + "/home/jawaka", ["/home/wakka", "/home/jawaka"]),
(b"/home/wakka", ["/home/wakka"]),
],
)
def test_str_to_env_path(inp, exp):
obs = str_to_env_path(inp)
assert exp == obs.paths
@pytest.mark.parametrize(
"inp, exp",
[
pytest.param(
pathlib.Path("///tmp"),
"/tmp",
marks=pytest.mark.skipif(
ON_WINDOWS and PYTHON_VERSION_INFO > (3, 11),
reason="Python 3.12 on windows changed its behavior of resolving additional slashes in paths",
),
),
],
)
def test_path_to_str(inp, exp):
obs = path_to_str(inp)
if ON_WINDOWS:
exp = exp.replace("/", "\\")
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(["/home/wakka"], "/home/wakka"),
(["/home/wakka", "/home/jawaka"], "/home/wakka" + os.pathsep + "/home/jawaka"),
],
)
def test_env_path_to_str(inp, exp):
obs = env_path_to_str(inp)
assert exp == obs
@pytest.mark.parametrize(
"left, right, exp",
[
(
EnvPath(["/home/wakka"]),
["/home/jawaka"],
EnvPath(["/home/wakka", "/home/jawaka"]),
),
(["a"], EnvPath(["b"]), EnvPath(["a", "b"])),
(EnvPath(["c"]), EnvPath(["d"]), EnvPath(["c", "d"])),
],
)
def test_env_path_add(left, right, exp):
obs = left + right
assert is_env_path(obs)
assert exp == obs
def test_env_path_add_replace_no_dupes_front_replace_existing():
# Test replaces without dupes when added to front when adding existing entry
path = EnvPath(
[os.pathsep.join(["home", "wakka"]), os.pathsep.join(["home", "wakka", "bin"])]
)
path.add(os.pathsep.join(["home", "wakka", "bin"]), front=True, replace=True)
assert path == [
os.pathsep.join(["home", "wakka", "bin"]),
os.pathsep.join(["home", "wakka"]),
]
def test_env_path_add_replace_no_dupes_front_replace_multiple():
# Test replaces without dupes when added to front when multiple existing occurrences
path = EnvPath(
[
os.pathsep.join(["home", "wakka"]),
os.pathsep.join(["home", "wakka", "bin"]),
os.pathsep.join(["home", "wakka", "bin"]),
]
)
path.add(os.pathsep.join(["home", "wakka", "bin"]), front=True, replace=True)
assert path == [
os.pathsep.join(["home", "wakka", "bin"]),
os.pathsep.join(["home", "wakka"]),
]
def test_env_path_add_replace_no_dupes_back_replace_multiple():
# Test replaces without dupes when not added to front
path = EnvPath(
[
os.pathsep.join(["home", "wakka"]),
os.pathsep.join(["home", "wakka", "bin"]),
os.pathsep.join(["home", "wakka", "bin"]),
]
)
path.add(os.pathsep.join(["home", "wakka", "bin"]), front=False, replace=True)
assert path == [
os.pathsep.join(["home", "wakka"]),
os.pathsep.join(["home", "wakka", "bin"]),
]
def test_env_path_add_pathlib():
os.pathsep.join(["home", "wakka", "bin"])
path = EnvPath(
[
os.pathsep.join(["home", "wakka"]),
os.pathsep.join(["home", "wakka", "bin"]),
os.pathsep.join(["home", "wakka", "bin"]),
]
)
path.add(
pathlib.Path(os.pathsep.join(["home", "wakka", "bin"])),
front=False,
replace=True,
)
assert path == [
os.pathsep.join(["home", "wakka"]),
os.pathsep.join(["home", "wakka", "bin"]),
]
# helper
def expand(path):
return os.path.expanduser(os.path.expandvars(path))
@pytest.mark.parametrize("env", [TOOLS_ENV, ENCODE_ENV_ONLY])
@pytest.mark.parametrize(
"inp, exp",
[
("xonsh_dir", "xonsh_dir"),
(".", "."),
("../", "../"),
("~/", "~/"),
(b"~/../", "~/../"),
],
)
def test_env_path_getitem(inp, exp, xession, env):
xession.env = env
obs = EnvPath(inp)[0] # call to __getitem__
if env.get("EXPAND_ENV_VARS"):
assert expand(exp) == obs
else:
assert exp == obs
@pytest.mark.parametrize("env", [TOOLS_ENV, ENCODE_ENV_ONLY])
@pytest.mark.parametrize(
"inp, exp",
[
(
os.pathsep.join(["xonsh_dir", "../", ".", "~/"]),
["xonsh_dir", "../", ".", "~/"],
),
(
"/home/wakka" + os.pathsep + "/home/jakka" + os.pathsep + "~/",
["/home/wakka", "/home/jakka", "~/"],
),
],
)
def test_env_path_multipath(inp, exp, xession, env):
# cases that involve path-separated strings
xession.env = env
if env == TOOLS_ENV:
obs = [i for i in EnvPath(inp)]
assert [expand(i) for i in exp] == obs
else:
obs = [i for i in EnvPath(inp)]
assert [i for i in exp] == obs
@pytest.mark.parametrize(
"inp, exp",
[
(pathlib.Path("/home/wakka"), ["/home/wakka".replace("/", os.sep)]),
(pathlib.Path("~/"), ["~"]),
(pathlib.Path("."), ["."]),
(
["/home/wakka", pathlib.Path("/home/jakka"), "~/"],
["/home/wakka", "/home/jakka".replace("/", os.sep), "~/"],
),
(["/home/wakka", pathlib.Path("../"), "../"], ["/home/wakka", "..", "../"]),
(["/home/wakka", pathlib.Path("~/"), "~/"], ["/home/wakka", "~", "~/"]),
],
)
def test_env_path_with_pathlib_path_objects(inp, exp, xession):
xession.env = TOOLS_ENV
# iterate over EnvPath to acquire all expanded paths
obs = [i for i in EnvPath(inp)]
assert [expand(i) for i in exp] == obs
@pytest.mark.parametrize("inp", ["42.0", [42.0]])
def test_is_nonstring_seq_of_strings_false1(inp):
assert not is_nonstring_seq_of_strings(inp)
# helper
def mkpath(*paths):
"""Build os-dependent paths properly."""
return os.sep + os.sep.join(paths)
@pytest.mark.parametrize(
"inp, exp",
[
(
[mkpath("home", "wakka"), mkpath("home", "jakka"), mkpath("home", "yakka")],
[mkpath("home", "wakka"), mkpath("home", "jakka")],
)
],
)
def test_env_path_slice_get_all_except_last_element(inp, exp):
obs = EnvPath(inp)[:-1]
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(
[mkpath("home", "wakka"), mkpath("home", "jakka"), mkpath("home", "yakka")],
[mkpath("home", "jakka"), mkpath("home", "yakka")],
)
],
)
def test_env_path_slice_get_all_except_first_element(inp, exp):
obs = EnvPath(inp)[1:]
assert exp == obs
@pytest.mark.parametrize(
"inp, exp_a, exp_b",
[
(
[
mkpath("home", "wakka"),
mkpath("home", "jakka"),
mkpath("home", "yakka"),
mkpath("home", "takka"),
],
[mkpath("home", "wakka"), mkpath("home", "yakka")],
[mkpath("home", "jakka"), mkpath("home", "takka")],
)
],
)
def test_env_path_slice_path_with_step(inp, exp_a, exp_b):
obs_a = EnvPath(inp)[0::2]
assert exp_a == obs_a
obs_b = EnvPath(inp)[1::2]
assert exp_b == obs_b
@pytest.mark.parametrize(
"inp, exp",
[
(
[
mkpath("home", "wakka"),
mkpath("home", "xakka"),
mkpath("other", "zakka"),
mkpath("another", "akka"),
mkpath("home", "bakka"),
],
[mkpath("other", "zakka"), mkpath("another", "akka")],
)
],
)
def test_env_path_keep_only_non_home_paths(inp, exp):
obs = EnvPath(inp)[2:4]
assert exp == obs
@pytest.mark.parametrize("inp", [True, False])
def test_is_bool_true(inp):
assert is_bool(inp)
@pytest.mark.parametrize("inp", [1, "yooo hooo!"])
def test_is_bool_false(inp):
assert not is_bool(inp)
@pytest.mark.parametrize(
"inp, exp",
[
(True, True),
(False, False),
(None, False),
("", False),
("0", False),
("False", False),
("NONE", False),
("TRUE", True),
("1", True),
(0, False),
(1, True),
],
)
def test_to_bool(inp, exp):
obs = to_bool(inp)
assert exp == obs
@pytest.mark.parametrize("inp, exp", [(True, "1"), (False, "")])
def test_bool_to_str(inp, exp):
assert bool_to_str(inp) == exp
@pytest.mark.parametrize(
"inp, exp",
[(True, True), (False, True), (1, True), (0, True), ("Yolo", False), (1.0, False)],
)
def test_is_bool_or_int(inp, exp):
obs = is_bool_or_int(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(True, True),
(False, False),
(1, 1),
(0, 0),
("", False),
(0.0, False),
(1.0, True),
("T", True),
("f", False),
("0", 0),
("10", 10),
],
)
def test_to_bool_or_int(inp, exp):
obs = to_bool_or_int(inp)
assert exp == obs
@pytest.mark.parametrize("inp, exp", [(True, "1"), (False, ""), (1, "1"), (0, "0")])
def test_bool_or_int_to_str(inp, exp):
obs = bool_or_int_to_str(inp)
assert exp == obs
@pytest.mark.parametrize("inp", [True, False, None])
def test_is_bool_or_none_true(inp):
assert is_bool_or_none(inp)
@pytest.mark.parametrize("inp", [1, "yooo hooo!"])
def test_is_bool_or_none_false(inp):
assert not is_bool_or_none(inp)
@pytest.mark.parametrize(
"inp, exp",
[
(True, True),
(False, False),
(None, None),
("", False),
("0", False),
("False", False),
("NONE", None),
("TRUE", True),
("1", True),
(0, False),
(1, True),
],
)
def test_to_bool_or_none(inp, exp):
obs = to_bool_or_none(inp)
assert exp == obs
@pytest.mark.parametrize("inp, exp", [(True, "1"), (False, ""), (None, "None")])
def test_bool_or_none_to_str(inp, exp):
assert bool_or_none_to_str(inp) == exp
@pytest.mark.parametrize(
"inp, exp",
[
(42, slice(42, 43)),
(0, slice(0, 1)),
(None, slice(None, None, None)),
(slice(1, 2), slice(1, 2)),
("-1", slice(-1, None, None)),
("42", slice(42, 43)),
("-42", slice(-42, -41)),
("1:2:3", slice(1, 2, 3)),
("1::3", slice(1, None, 3)),
(":", slice(None, None, None)),
("1:", slice(1, None, None)),
("[1:2:3]", slice(1, 2, 3)),
("(1:2:3)", slice(1, 2, 3)),
((4, 8, 10), slice(4, 8, 10)),
([10, 20], slice(10, 20)),
],
)
def test_ensure_slice(inp, exp):
obs = ensure_slice(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
((range(50), slice(25, 40)), list(i for i in range(25, 40))),
(
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [slice(1, 4), slice(6, None)]),
[2, 3, 4, 7, 8, 9, 10],
),
(([1, 2, 3, 4, 5], [slice(-2, None), slice(-5, -3)]), [4, 5, 1, 2]),
],
)
def test_get_portions(inp, exp):
obs = get_portions(*inp)
assert list(obs) == exp
@pytest.mark.parametrize(
"inp",
[
"42.3",
"3:asd5:1",
"test",
"6.53:100:5",
"4:-",
"2:15-:3",
"50:-:666",
object(),
[1, 5, 3, 4],
("foo"),
],
)
def test_ensure_slice_invalid(inp):
with pytest.raises(ValueError):
ensure_slice(inp)
@pytest.mark.parametrize(
"inp, exp",
[
("42", True),
("42.0", False),
(42, False),
([42], False),
([], False),
(None, False),
("", False),
(False, False),
(True, False),
],
)
def test_is_int_as_str(inp, exp):
obs = is_int_as_str(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("20", False),
("20%", False),
((20, "c"), False),
((20.0, "m"), False),
((20.0, "c"), True),
((20.0, "%"), True),
],
)
def test_is_dynamic_cwd_width(inp, exp):
obs = is_dynamic_cwd_width(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(42, False),
(None, False),
("42", False),
("-42", False),
(slice(1, 2, 3), False),
([], False),
(False, False),
(True, False),
("1:2:3", True),
("1::3", True),
("1:", True),
(":", True),
("[1:2:3]", True),
("(1:2:3)", True),
("r", False),
("r:11", False),
],
)
def test_is_slice_as_str(inp, exp):
obs = is_slice_as_str(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("throwback.log", True),
("", True),
(None, True),
(True, False),
(False, False),
(42, False),
([1, 2, 3], False),
((1, 2), False),
(("wrong", "parameter"), False),
pytest.param("/dev/null", True, marks=skip_if_on_windows),
],
)
def test_is_logfile_opt(inp, exp):
obs = is_logfile_opt(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(True, None),
(False, None),
(1, None),
(None, None),
("throwback.log", "throwback.log"),
pytest.param("/dev/null", "/dev/null", marks=skip_if_on_windows),
pytest.param(
"/dev/nonexistent_dev",
"/dev/nonexistent_dev"
if is_writable_file("/dev/nonexistent_dev")
else None,
marks=skip_if_on_windows,
),
("~/log", os.path.expanduser("~/log")),
],
)
def test_to_logfile_opt(inp, exp):
obs = to_logfile_opt(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
(None, ""),
("", ""),
("throwback.log", "throwback.log"),
("/dev/null", "/dev/null"),
],
)
def test_logfile_opt_to_str(inp, exp):
obs = logfile_opt_to_str(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("20", (20.0, "c")),
("20%", (20.0, "%")),
((20, "c"), (20.0, "c")),
((20, "%"), (20.0, "%")),
((20.0, "c"), (20.0, "c")),
((20.0, "%"), (20.0, "%")),
("inf", (float("inf"), "c")),
],
)
def test_to_dynamic_cwd_tuple(inp, exp):
obs = to_dynamic_cwd_tuple(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[((20.0, "c"), "20.0"), ((20.0, "%"), "20.0%"), ((float("inf"), "c"), "inf")],
)
def test_dynamic_cwd_tuple_to_str(inp, exp):
obs = dynamic_cwd_tuple_to_str(inp)
assert exp == obs
@pytest.mark.parametrize(
"st, esc",
[
("", ""),
("foo", "foo"),
("foo&bar", "foo^&bar"),
('foo$?-/_"\\', 'foo$?-/_^"\\'),
("^&<>|", "^^^&^<^>^|"),
("()<>", "^(^)^<^>"),
],
)
def test_escape_windows_cmd_string(st, esc):
obs = escape_windows_cmd_string(st)
assert esc == obs
@pytest.mark.parametrize(
"st, esc, forced",
[
("", '""', None),
("foo", "foo", '"foo"'),
(
r'arg1 "hallo, "world"" "\some\path with\spaces")',
r'"arg1 \"hallo, \"world\"\" \"\some\path with\spaces\")"',
None,
),
(
r'"argument"2" argument3 argument4',
r'"\"argument\"2\" argument3 argument4"',
None,
),
(r'"\foo\bar bar\foo\" arg', r'"\"\foo\bar bar\foo\\\" arg"', None),
(
r"\\machine\dir\file.bat",
r"\\machine\dir\file.bat",
r'"\\machine\dir\file.bat"',
),
(
r'"\\machine\dir space\file.bat"',
r'"\"\\machine\dir space\file.bat\""',
None,
),
],
)
def test_argvquote(st, esc, forced):
obs = argvquote(st)
assert esc == obs
if forced is None:
forced = esc
obs = argvquote(st, force=True)
assert forced == obs
@pytest.mark.parametrize("inp", ["no string here", ""])
def test_partial_string_none(inp):
assert check_for_partial_string(inp) == (None, None, None)
@pytest.mark.parametrize(
"leaders", [(("", 0), ("not empty", 9)), (("not empty", 9), ("", 0))]
)
@pytest.mark.parametrize("prefix", ["b", "rb", "r"])
@pytest.mark.parametrize("quote", ['"', '"""'])
def test_partial_string(leaders, prefix, quote):
(l, l_len), (f, f_len) = leaders
s = prefix + quote
t = s + "test string" + quote
t_len = len(t)
# single string
test_string = l + t + f
obs = check_for_partial_string(test_string)
exp = l_len, l_len + t_len, s
assert obs == exp
# single partial
test_string = l + f + s + "test string"
obs = check_for_partial_string(test_string)
exp = l_len + f_len, None, s
assert obs == exp
# two strings
test_string = l + t + f + l + t + f
obs = check_for_partial_string(test_string)
exp = (l_len + t_len + f_len + l_len), (l_len + t_len + f_len + l_len + t_len), s
assert obs == exp
# one string, one partial
test_string = l + t + f + l + s + "test string"
obs = check_for_partial_string(test_string)
exp = l_len + t_len + f_len + l_len, None, s
assert obs == exp
def test_executables_in(xession):
expected = set()
types = ("file", "directory", "brokensymlink")
if ON_WINDOWS:
# Don't test symlinks on windows since it requires admin
types = ("file", "directory")
executables = (True, False)
with TemporaryDirectory() as test_path:
for _type in types:
for executable in executables:
fname = f"{_type}_{executable}"
if _type == "none":
continue
if _type == "file" and executable:
ext = ".exe" if ON_WINDOWS else ""
expected.add(fname + ext)
else:
ext = ""
path = os.path.join(test_path, fname + ext)
if _type == "file":
with open(path, "w") as f:
f.write(fname)
elif _type == "directory":
os.mkdir(path)
elif _type == "brokensymlink":
tmp_path = os.path.join(test_path, "i_wont_exist")
with open(tmp_path, "w") as f:
f.write("deleteme")
os.symlink(tmp_path, path)
os.remove(tmp_path)
if executable and not _type == "brokensymlink":
os.chmod(path, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR)
if ON_WINDOWS:
xession.env = PATHEXT_ENV
result = set(executables_in(test_path))
else:
result = set(executables_in(test_path))
assert expected == result
@pytest.mark.parametrize(
"inp, exp",
[
("yo", "[Yy][Oo]"),
("[a-f]123e", "[a-f]123[Ee]"),
("${HOME}/yo", "${HOME}/[Yy][Oo]"),
("./yo/mom", "./[Yy][Oo]/[Mm][Oo][Mm]"),
("Eßen", "[Ee][Ss]?[Ssß][Ee][Nn]"),
],
)
def test_expand_case_matching(inp, exp):
obs = expand_case_matching(inp)
assert exp == obs
@pytest.mark.parametrize(
"inp, exp",
[
("foo", "foo"),
("$foo $bar", "bar $bar"),
("$unk $foo $bar", "$unk bar $bar"),
("$foobar", "$foobar"),
("$foo $spam", "bar eggs"),
("$unk $foo $spam", "$unk bar eggs"),
("$unk $foo $unk $spam $unk", "$unk bar $unk eggs $unk"),
("$an_int$spam$a_bool", "42eggsTrue"),
("$unk$an_int$spam$a_bool", "$unk42eggsTrue"),
("bar$foo$spam$foo $an_int $none", "barbareggsbar 42 "),
("$unk bar$foo$spam$foo $an_int $none", "$unk barbareggsbar 42 "),
("$foo/bar", "bar/bar"),
("$unk/$foo/bar", "$unk/bar/bar"),
("${'foo'} $spam", "bar eggs"),
("$unk ${'unk'} ${'foo'} $spam", "$unk ${'unk'} bar eggs"),
("${'foo'} ${'a_bool'}", "bar True"),
("${'foo'}bar", "barbar"),
("${'foo'}/bar", "bar/bar"),
("${'unk'}/${'foo'}/bar", "${'unk'}/bar/bar"),
("${\"foo'}", "${\"foo'}"),
("$?bar", "$?bar"),
("$foo}bar", "bar}bar"),
("${'foo", "${'foo"),
(b"foo", "foo"),
(b"$foo bar", "bar bar"),
(b"$unk $foo bar", "$unk bar bar"),
(b"${'foo'}bar", "barbar"),
(b"${'unk'}${'foo'}bar", "${'unk'}barbar"),
],
)
def test_expandvars(inp, exp, xession):
"""Tweaked for xonsh cases from CPython `test_genericpath.py`"""
xession.env.update(
dict({"foo": "bar", "spam": "eggs", "a_bool": True, "an_int": 42, "none": None})
)
assert expandvars(inp) == exp
@pytest.mark.parametrize(
"inp, fmt, exp",
[
(572392800.0, None, 572392800.0),
("42.1459", None, 42.1459),
(
dt.datetime(2016, 8, 2, 13, 24),
None,
dt.datetime(2016, 8, 2, 13, 24).timestamp(),
),
("2016-8-10 16:14", None, dt.datetime(2016, 8, 10, 16, 14).timestamp()),
(
"2016/8/10 16:14:40",
"%Y/%m/%d %H:%M:%S",
dt.datetime(2016, 8, 10, 16, 14, 40).timestamp(),
),
],
)
def test_ensure_timestamp(inp, fmt, exp, xession):
xession.env["XONSH_DATETIME_FORMAT"] = "%Y-%m-%d %H:%M"
obs = ensure_timestamp(inp, fmt)
assert exp == obs
@pytest.mark.parametrize("expand_user", [True, False])
@pytest.mark.parametrize(
"inp, expand_env_vars, exp_end",
[
("~/test.txt", True, "/test.txt"),
("~/$foo", True, "/bar"),
("~/test/$a_bool", True, "/test/True"),
("~/test/$an_int", True, "/test/42"),
("~/test/$none", True, "/test/"),
("~/$foo", False, "/$foo"),
],
)
def test_expand_path(expand_user, inp, expand_env_vars, exp_end, xession):
if os.sep != "/":
inp = inp.replace("/", os.sep)
exp_end = exp_end.replace("/", os.sep)
xession.env.update({"foo": "bar", "a_bool": True, "an_int": 42, "none": None})
xession.env["EXPAND_ENV_VARS"] = expand_env_vars
path = expand_path(inp, expand_user=expand_user)
if expand_user:
home_path = os.path.expanduser("~")
assert path == home_path + exp_end
else:
assert path == "~" + exp_end
def test_swap_values():
orig = {"x": 1}
updates = {"x": 42, "y": 43}
with swap_values(orig, updates):
assert orig["x"] == 42
assert orig["y"] == 43
assert orig["x"] == 1
assert "y" not in orig
@pytest.mark.parametrize(
"arguments, expected_docstring",
[
(
{"deprecated_in": "0.5.10", "removed_in": "0.6.0"},
"my_function has been deprecated in version 0.5.10 and will be removed "
"in version 0.6.0",
),
(
{"deprecated_in": "0.5.10"},
"my_function has been deprecated in version 0.5.10",
),
(
{"removed_in": "0.6.0"},
"my_function has been deprecated and will be removed in version 0.6.0",
),
({}, "my_function has been deprecated"),
],
)
def test_deprecated_docstrings_with_empty_docstring(arguments, expected_docstring):
@deprecated(**arguments)
def my_function():
pass
assert my_function.__doc__ == expected_docstring
@pytest.mark.parametrize(
"arguments, expected_docstring",
[
(
{"deprecated_in": "0.5.10", "removed_in": "0.6.0"},
"Does nothing.\n\nmy_function has been deprecated in version 0.5.10 and "
"will be removed in version 0.6.0",
),
(
{"deprecated_in": "0.5.10"},
"Does nothing.\n\nmy_function has been deprecated in version 0.5.10",
),
(
{"removed_in": "0.6.0"},
"Does nothing.\n\nmy_function has been deprecated and will be removed "
"in version 0.6.0",
),
({}, "Does nothing.\n\nmy_function has been deprecated"),
],
)
def test_deprecated_docstrings_with_nonempty_docstring(arguments, expected_docstring):
@deprecated(**arguments)
def my_function():
"""Does nothing."""
pass
assert my_function.__doc__ == expected_docstring
def test_deprecated_warning_raised():
@deprecated()
def my_function():
pass
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
my_function()
assert issubclass(warning.pop().category, DeprecationWarning)
def test_deprecated_warning_contains_message():
@deprecated()
def my_function():
pass
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
my_function()
assert str(warning.pop().message) == "my_function has been deprecated"
@pytest.mark.parametrize("expired_version", ["0.1.0", __version__])
def test_deprecated_past_expiry_raises_assertion_error(expired_version):
@deprecated(removed_in=expired_version)
def my_function():
pass
with pytest.raises(AssertionError):
my_function()
@skip_if_on_windows
def test_iglobpath_no_dotfiles(xession):
d = os.path.dirname(__file__)
g = d + "/*"
files = list(iglobpath(g, include_dotfiles=False))
assert d + "/.somedotfile" not in files
@skip_if_on_windows
def test_iglobpath_dotfiles(xession):
d = os.path.dirname(__file__)
g = d + "/*"
files = list(iglobpath(g, include_dotfiles=True))
assert d + "/.somedotfile" in files
@skip_if_on_windows
def test_iglobpath_no_dotfiles_recursive(xession):
d = os.path.dirname(__file__)
g = d + "/**"
files = list(iglobpath(g, include_dotfiles=False))
assert d + "/bin/.someotherdotfile" not in files
@skip_if_on_windows
def test_iglobpath_dotfiles_recursive(xession):
d = os.path.dirname(__file__)
g = d + "/**"
files = list(iglobpath(g, include_dotfiles=True))
assert d + "/bin/.someotherdotfile" in files
def test_iglobpath_empty_str(monkeypatch, xession):
# makes sure that iglobpath works, even when os.scandir() and os.listdir()
# fail to return valid results, like an empty filename
def mockscandir(path):
yield ""
if hasattr(os, "scandir"):
monkeypatch.setattr(os, "scandir", mockscandir)
def mocklistdir(path):
return [""]
monkeypatch.setattr(os, "listdir", mocklistdir)
paths = list(iglobpath("some/path"))
assert len(paths) == 0
def test_all_permutations():
obs = {"".join(p) for p in all_permutations("ABC")}
exp = {
"A",
"B",
"C",
"AB",
"AC",
"BA",
"BC",
"CA",
"CB",
"ACB",
"CBA",
"BAC",
"CAB",
"BCA",
"ABC",
}
assert obs == exp
@pytest.mark.parametrize(
"name, styles, refrules",
[
("test1", {}, {}), # empty styles
(
"test2",
{"Token.Literal.String.Single": "#ff0000"},
{"Token.Literal.String.Single": "#ff0000"},
), # str key
(
"test3",
{"Literal.String.Single": "#ff0000"},
{"Token.Literal.String.Single": "#ff0000"},
), # short str key
(
"test4",
{"RED": "#ff0000"},
{"Token.Color.RED": "#ff0000"},
), # color
],
)
def test_register_custom_style(name, styles, refrules):
style = register_custom_style(name, styles)
if HAS_PYGMENTS:
assert style is not None
for rule, color in style.styles.items():
if str(rule) in refrules:
assert refrules[str(rule)] == color
@pytest.mark.parametrize(
"val, exp",
[
("default", True),
("menu-complete", True),
("def", False),
("xonsh", False),
("men", False),
],
)
def test_is_completion_mode(val, exp):
assert is_completion_mode(val) is exp
@pytest.mark.parametrize(
"val, exp",
[
("", "default"),
(None, "default"),
("default", "default"),
("DEfaULT", "default"),
("m", "menu-complete"),
("mEnu_COMPlete", "menu-complete"),
("menu-complete", "menu-complete"),
],
)
def test_to_completion_mode(val, exp):
assert to_completion_mode(val) == exp
@pytest.mark.parametrize(
"val",
[
"de",
"defa_ult",
"men_",
"menu_",
],
)
def test_to_completion_mode_fail(val):
with pytest.warns(RuntimeWarning):
obs = to_completion_mode(val)
assert obs == "default"
@pytest.mark.parametrize(
"val, exp",
[
("none", True),
("single", True),
("multi", True),
("", False),
(None, False),
("argle", False),
],
)
def test_is_completions_display_value(val, exp):
assert is_completions_display_value(val) == exp
@pytest.mark.parametrize(
"val, exp",
[
("none", "none"),
(False, "none"),
("false", "none"),
("single", "single"),
("readline", "readline"), # todo: check this
("multi", "multi"),
(True, "multi"),
("TRUE", "multi"),
],
)
def test_to_completions_display_value(val, exp):
assert to_completions_display_value(val) == exp
@pytest.mark.parametrize("val", [1, "", "argle"])
def test_to_completions_display_value_fail(val):
with pytest.warns(RuntimeWarning):
obs = to_completions_display_value(val)
assert obs == "multi"
def test_is_regex_true():
assert is_regex("cat")
def test_is_regex_false():
assert not is_regex("**")
from xonsh.style_tools import Token
@pytest.mark.parametrize(
"val, exp",
[
(
{
Token.Literal.String: "bold ansigreen",
"Token.Name.Tag": "underline ansiblue",
},
True,
),
(
{
Token.Literal.String: "bold ansigreen",
1: "bold ansigreen",
},
False,
),
({1: "bold ansigreen"}, False),
(
{Token.Literal.String: "123"},
False,
),
(
{"Token.Name.Tag": 123},
False,
),
],
)
def test_is_tok_color_dict(val, exp):
assert is_tok_color_dict(val) == exp
|
999,618 | b87cfe08c1bc27da8b25d66c690f81e980bfc360 |
from django.conf.urls import url, include
from django.urls import path
from rest_framework import permissions
from .serializers import *
from .views import *
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView,
)
faculty_list = FacultyViewSet.as_view({
'get': 'list', # Get lists
})
class_list = ClassViewSet.as_view({
'get': 'list', # Get lists
})
position_list = PositionViewSet.as_view({
'get': 'list', # Get lists
})
area_list = AreaViewSet.as_view({
'get': 'list', # Get lists
})
area_detail = AreaViewSet.as_view({
'get' : 'retrieve'
})
image_area = AreaViewSet.as_view({
'get' : 'get_image_area'
})
urlpatterns = [
url(r'^auth/login/$', MyTokenObtainPairView.as_view()),
path('auth/forgot-password/', forgot_password_view),
path('auth/forgot-password/<uidb64>/<token>', reset_password_view, name='reset_password'),
# user
path('account/get-user-profile/', get_profile_view),
path('account/update-user-profile/', update_user_profile_view),
path('account/change-password/', change_password_view),
path("account/change-avatar/", UserAvatarUpload.as_view(), name="rest_user_avatar_upload"),
# Static data
path('faculty/', faculty_list),
path('class/', class_list),
path('position/', position_list),
path('area/', area_list),
path('area/<slug:slug>/', area_detail),
path('area-image/', image_area),
path('', include('api.sinhvien.urls')),
path('', include('api.nhanvien.urls')),
path('', include('api.room.urls')),
path('', include('api.quanlytaichinh.urls')),
path('', include('api.quanlynhansu.urls')),
# path('', include('api.students_of_coach.urls')),
] |
999,619 | 96f2230ed57be6225cb9ab1164b7e4e9660a23c6 | # -*- coding: utf-8 -*
"""
实现模型的调用
"""
from flyai.dataset import Dataset
from model import Model
data = Dataset()
model = Model(data)
# p = model.predict(
# source="新华社 北京 5 月 8 日 电 中国 跳水 名将 余卓成 7 日 在 美国 佛罗里达州 举行 的 国际泳联 跳水 大奖赛 上 , 获得 男子 一米板 冠军 。 新华社 北京 5 月 8 日 电 中国 跳水 名将 余卓成 7 日 在 美国 佛罗里达州 举行 的 国际泳联 跳水 大奖赛 上 , 获得 男子 一米板 冠军 。 新华社 北京 5 月 8 日 电 中国 跳水 名将 余卓成 7 日 在 美国 佛罗里达州 举行 的 国际泳联 跳水 大奖赛 上 , 获得 男子 一米板 冠军 。 新华社 北京 5 月 8 日 电 中国 跳水 名将 余卓成 7 日 在 美国 佛罗里达州 举行 的 国际泳联 跳水 大奖赛 上 , 获得 男子 一米板 冠军 。 新华社 北京 5 月 8 日 电 中国 跳水 名将 余卓成 7 日 在 美国 佛罗里达州 举行 的 国际泳联 跳水 大奖赛 上 , 获得 男子 一米板 冠军 。 新华社 北京 5 月 8 日 电 中国 跳水 名将 余卓成 7 日 在 美国 佛罗里达州 举行 的 国际泳联 跳水 大奖赛 上 , 获得 男子 一米板 冠军 。")
# before = "新华社 北京"
# print(len(before.split(' ')))
# p = model.predict(source=before)
p = model.predict_all(
[{"source": "非法 行医 罪"},
{"source": "在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。"},
{
"source": "在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议 上 , 联合国 副 秘书长 兼 联合国 国际 禁毒署 署长 阿拉 奇 代表 联合国 秘书长 安南 致 闭幕词 。 在 闭幕 会议"},
{"source": "何静鹏 两口子 双双 下岗 , 在 和 兴 街道 办事处 社区 服务中心 的 帮助 下 , 腾出 自家 的 屋子 办起 了 学生 食堂 , 不但 解决 了 小 区内 几十个 学生 吃 中午饭 难 的 问题 , 连 附近 的 大学生 也 有 不少"}
])
print([len(i) for i in p])
print(p)
|
999,620 | d4670ed2ae4038a376c84fd5fcd378ea6596a806 | """
The tspio module basically contains the messy code that is avoided in the
IOModule but has to exist, like showing file dialogues, parsing files
and writing files, as well as some helper methods for string construction
from data structures."""
import re
import ast
import os
import tsputil
from Node import Node
try:
# for Python2
from tkFileDialog import asksaveasfile, askopenfile
except ImportError:
# for Python3
from tkinter.filedialog import asksaveasfile, askopenfile
def parse_tsp_file(file):
""" Parses data from a tspfile with regexes and returns a tuple
holding the nodes and groupinformation"""
# define regular expressions for the fields to parse
regexes = {'name': re.compile("NAME : (.*)"),
'comment': re.compile("COMMENT : (?!STARTNODE :|STARTNODES : |CLUSTERS :)(.*)"),
'single_start': re.compile("COMMENT : STARTNODE : ([0-9])+"),
'multi_start': re.compile("COMMENT : STARTNODES : (.*)"),
'nodes':
re.compile(
r"([0-9]+)\ *([0-9]*\.?[0-9]*)\ *([0-9]*\.?[0-9]*)",
re.MULTILINE),
'groups': re.compile("COMMENT : CLUSTERS : (.*)")}
# initialize results
result = {'name': 'No Name', 'comment': '', 'startnodes': [],
'nodes': [], 'groups': []}
# Define application rules
def apply_match(regex_name, match):
"""Applies a specific processing rule for each regex sperately as the
fields vary in data types and structures"""
if regex_name is 'name':
result['name'] = match.group(1)
elif regex_name is 'single_start':
result['startnodes'] = [int(match.group(1))]
elif regex_name is 'multi_start':
result['startnodes'] = ast.literal_eval(match.group(1))
elif regex_name is 'groups':
result['groups'] = ast.literal_eval(
match.group(1).replace(" ", ""))
elif regex_name is 'comment':
result['comment'] += match.group(1) + "\n"
elif regex_name is 'nodes':
result['nodes'].append([int(float(match.group(2))),
int(float(match.group(3)))])
# Process the lines in the file and check for matches for each regular
# expression
_file = open(file, 'r')
lines = _file.readlines()
for line in lines:
if len(line):
for regex_name in regexes:
match = re.match(regexes[regex_name], line)
if match:
apply_match(regex_name, match)
_file.close()
return result
def get_groups(nodes):
""" return an array holding all occuring colorids of the given nodeset"""
return list(set([node.color for node in nodes]))
def construct_groups_string(nodes):
""" Constructs a string representing the grouping of nodes """
groups = get_groups(nodes)
if len(groups) <= 1:
return ""
else:
result = []
for color in groups:
# +1 because .tsp nodes are indexed with 1
group = [node.nid + 1 for node in nodes if node.color == color]
result.append(group)
return str(result)
def construct_startnodes_string(nodes):
""" Looksup every node with the start bit and constructs a string of
the list of the ids of those nodes."""
res = [node.nid for node in nodes if node.start]
if len(res):
return str(res)
else:
return ""
def parse_solution_file(file):
""" Returns the concatenated lines 1 to END """
result = ""
_file = open(file, 'r')
lines = _file.readlines()
for line in range(1, len(lines)):
result += lines[line]
return result
def import_tsp(scale):
""" Shows a filedialog to select a file to open and calls the callback
with the parsed data """
# show a open-file-dialog
filename = askopenfile()
# if the user selected a file, delete old data,parse the file and
# load the new data. If the user canceled the selection, do nothing.
if filename:
data = parse_tsp_file(filename.name)
#Construct the list of ungrouped nodes
color = tsputil.COLORS[0]
node_list = [Node(index, int(node[0] / scale), int(node[1] / scale), color)
for (index, node) in enumerate(data['nodes'])]
# if the nodes are grouped, change node colors accordingly
for (index, group) in enumerate(data['groups']):
for nid in group:
node_list[nid-1].color = tsputil.COLORS[index]
#mark nodes as startnode if specified
for nid in data['startnodes']:
node_list[nid].start = True
result = data
result['nodes'] = node_list
return result
else:
return None
def export_tsp(nodes, scale, comment, pre_filename=None):
""" Exports the problem data in .tsp format """
filename = pre_filename
if comment is None:
comment = "PUT PROBLEM DESCRIPTION HERE"
# check if the function was called with a filename
if filename is None:
filename = asksaveasfile(defaultextension=".tsp")
# check if the user did select a file
if filename:
_file = open(filename.name, 'w')
_file.write("NAME : " + os.path.basename(filename.name) + "\n")
_file.write("COMMENT : " + comment + "\n")
groups = construct_groups_string(nodes)
if not groups == "":
_file.write("COMMENT : CLUSTERS : " + groups + "\n")
startnodes = construct_startnodes_string(nodes)
if not startnodes == "":
_file.write("COMMENT : STARTNODES : " + startnodes + "\n")
_file.write("TYPE: TSP" + "\n")
_file.write("DIMENSION: " + str(len(nodes)) + "\n")
_file.write("EDGE_WEIGHT_TYPE : EUC_2D" + "\n")
_file.write("NODE_COORD_SECTION" + "\n")
for (index, node) in enumerate(nodes):
_file.write(str(index + 1) + " " + str(node.x_coord * scale) +
" " + str(node.y_coord * scale) + "\n")
_file.write("EOF")
_file.close()
return os.path.basename(filename.name)
def export_tikz(nodes, scale, path):
""" Exports the problem data as a tikz graphic in .tex format """
filename = asksaveasfile(defaultextension=".tex")
if filename:
_file = open(filename.name, 'w')
_file.write("\\begin{tikzpicture}\n")
_file.write("\\begin{axis}[%\n")
_file.write("width=\\textwidth,\n")
_file.write("scale only axis,\n")
_file.write("xmin=-100,\n")
_file.write("xmax=2700,\n")
_file.write("ymin=-100,\n")
_file.write("ymax=2100,\n")
_file.write("y dir=reverse,\n")
_file.write("axis x line*=bottom,\n")
_file.write("axis y line*=left\n")
_file.write("]\n")
for group in get_groups(nodes):
_file.write(
"""\\addplot [color=black,mark size=5.0pt,
only marks,mark=*,mark options={solid,
fill=""" + group.lower() + "},forget plot]\n")
_file.write("table[row sep=crcr]{%\n")
for node in nodes:
if node.color == group:
_file.write(
str(node.x_coord * scale) + " " +
str(node.y_coord * scale) + "\\\\\n")
_file.write("};\n")
if not path is None:
_file.write("\\addplot [draw=black,forget plot]\n")
_file.write("table[row sep=crcr]{%\n")
for path_node in path['Tour']:
print(path_node)
node = nodes[int(path_node)]
print(node)
_file.write(
str(node.x_coord * scale) + " " +
str(node.y_coord * scale) + "\\\\\n")
_file.write("};\n")
_file.write("\\end{axis}\n")
_file.write("\\end{tikzpicture}%\n")
_file.close()
|
999,621 | 5dd8f34c65644ec3314e1ad102613aa2e1bdab3e | from django.db import models
from django.db.models import Sum
from datetime import datetime
from pytz import timezone
class Destination(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(null=True, max_length=100)
def __str__(self):
return str(self.name)
class Product(models.Model):
product_name = models.CharField(max_length=50)
product_type = models.CharField(max_length=50)
UPC = models.CharField(max_length=20)
bottle_size = models.IntegerField()
def __str__(self):
return str(self.product_name)
@property
def number_of_strays(self):
return Stray.objects.filter(name=self).count()
@property
def number_of_cases(self):
return InventoryItem.objects.filter(name=self).filter(date_removed=None).count()
@property
def total_liters(self):
cases = InventoryItem.objects.filter(name=self)
liters = []
for case in cases:
liters.append(case.liters)
return sum(liters)
@property
def total_wine_gallons(self):
return round((float(self.total_liters) * .264172),2)
@property
def total_proof_gallons(self):
cases = InventoryItem.objects.filter(name=self)
proof_gallons = []
for case in cases:
proof_gallons.append(case.proof_gallons)
return round(sum(proof_gallons),2)
class InventoryItem(models.Model):
case_number = models.IntegerField(unique=True)
date_assigned = models.DateField()
name = models.ForeignKey(Product, on_delete=models.PROTECT)
proof = models.DecimalField(max_digits=5,decimal_places=2)
date_removed = models.DateTimeField(null=True,blank=True)
destination = models.CharField(null=True,max_length=100,blank=True)
case_fraction = models.DecimalField(max_digits=5,decimal_places=4,default=1.0)
def __str__(self):
return str(self.name)
@property
def bottles_per_case(self):
if self.name.bottle_size == 750:
return int(round(6 * self.case_fraction,0))
elif self.name.bottle_size == 375:
return int(round(12 * self.case_fraction,0))
@property
def product(self):
return self.name.product_type
@property
def liters(self):
return round(self.name.bottle_size/1000 * self.bottles_per_case,2)
@property
def wine_gallons(self):
return round((float(self.liters) * .264172),2)
@property
def proof_gallons(self):
return round((int(self.proof)/100 * self.wine_gallons), 2)
class Stray(models.Model):
date_assigned = models.DateField()
name = models.ForeignKey(Product, on_delete=models.PROTECT)
proof = models.DecimalField(max_digits=5,decimal_places=2)
def __str__(self):
return str(self.name)
@property
def product(self):
return self.name.product_type
|
999,622 | 4ae7f07d5148a901e4001c869ac2b173da409241 | ##
# This is the web service form for the Crazy Ivan RDFa Test Harness script.
# License: Creative Commons Attribution Share-Alike
# @author Manu Sporny
import os, os.path
import re
from re import search
from urllib2 import urlopen
import urllib
from rdflib.Graph import Graph
import xml.sax.saxutils
from mod_python import apache
BASE_TEST_CASE_URL = "http://rdfa.digitalbazaar.com/test-suite/test-cases/"
##
# Retrieves all of the test cases from the given test suite manifest URL and
# filters the RDF using the given status filter.
#
# @param testSuiteManifestUrl A fully-qualified URL to the RDF file that
# contains the test manifest.
# @param statusFilter The status filter, usually something like "approved",
# "onhold", or "unreviewed".
# @returns a tuple containing all of the filtered test cases including
# unit test number, title, XHTML URL, SPARQL URL and status.
def retrieveTestCases(testSuiteManifestUrl, statusFilter):
# query the RDFa test manifest and generate test methods in the
# RDFaOnlineTest unittest
q = """
PREFIX test: <http://www.w3.org/2006/03/test-description#>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
SELECT ?html_uri ?sparql_uri ?title ?status ?expected_results
FROM <%s>
WHERE
{
?t dc:title ?title .
?t test:informationResourceInput ?html_uri .
?t test:informationResourceResults ?sparql_uri .
?t test:reviewStatus ?status .
OPTIONAL
{
?t test:expectedResults ?expected_results .
}
}
""" % (testSuiteManifestUrl)
# Construct the graph from the given RDF and apply the SPARQL filter above
g = Graph()
unittests = []
for html, sparql, title, status_url, expected_results in g.query(q):
status = status_url.split("#")[-1]
if(status == statusFilter):
num = search(r'(\d+)\..?html', html).groups(1)
if(expected_results == None):
expected_results = 'true'
unittests.append((int(num[0]),
str(title),
str(html),
str(sparql),
str(status),
str(expected_results)))
# Sorts the unit tests in unit test number order.
def sorttests(a, b):
if(a[0] < b[0]):
return -1
elif(a[0] == b[0]):
return 0
else:
return 1
unittests.sort(sorttests)
return unittests
##
# Performs a given unit test given the RDF extractor URL, sparql engine URL,
# HTML file and SPARQL validation file.
#
# @param rdf_extractor_url The RDF extractor web service.
# @param sparql_engine_url The SPARQL engine URL.
# @param html_url the HTML file to use as input.
# @param sparql_url the SPARQL validation file to use on the RDF graph.
def performUnitTest(rdf_extractor_url, sparql_engine_url,
html_url, sparql_url, expected_result):
# Build the RDF extractor URL
rdf_extract_url = rdf_extractor_url + urllib.quote(html_url)
# Build the SPARQL query
sparql_query = urlopen(sparql_url).read()
sparql_query = sparql_query.replace("ASK WHERE",
"ASK FROM <%s> WHERE" % \
(rdf_extract_url,))
# Build the SPARQLer service URL
sparql_engine_url += urllib.quote(sparql_query)
sparql_engine_url += "&default-graph-uri=&stylesheet=%2Fxml-to-html.xsl"
# Call the SPARQLer service
sparql_engine_result = urlopen(sparql_engine_url).read()
# TODO: Remove this hack, it's temporary until Michael Hausenblas puts
# an "expected SPARQL result" flag into the test manifest.
query_result = "<boolean>%s</boolean>" % (expected_result,)
sparql_value = (sparql_engine_result.find(query_result) != -1)
return sparql_value
##
# Writes all the available test cases.
#
# Writes the test case alternatives for the given URL
def writeTestCaseRetrievalError(req, tc):
req.write("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN"
"http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html version="XHTML+RDFa 1.0" xmlns="http://www.w3.org/1999/xhtml"
xmlns:xhv="http://www.w3.org/1999/xhtml/vocab#"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:test="http://www.w3.org/2006/03/test-description#">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>RDFa Test Suite: Test Cases</title>
</head>
<body>
<p>
This feature is not implemented yet, but when it is, you will be able
to view all tests cases available via this test suite.
</p>
</body>
</html>
""")
##
# Writes the test case alternatives for the given URL
#
# Writes the test case alternatives for the given URL
def writeTestCaseAlternatives(req, arguments):
filename = arguments.split("/")[-1]
req.write("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN"
"http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html version="XHTML+RDFa 1.0" xmlns="http://www.w3.org/1999/xhtml"
xmlns:xhv="http://www.w3.org/1999/xhtml/vocab#"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:test="http://www.w3.org/2006/03/test-description#">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>RDFa Test Suite: Select a Test Case Document</title>
</head>
<body>
<p>
The following documents are associated with this test case:
<ul>
<li><a href="%sxhtml1/%s.xhtml">XHTML 1.1</li>
<li><a href="%shtml4/%s.html">HTML4</li>
<li><a href="%shtml5/%s.html">HTML5</li>
<li><a href="%sxhtml1/%s.sparql">SPARQL for XHTML 1.1</li>
<li><a href="%shtml4/%s.sparql">SPARQL for HTML4</li>
<li><a href="%shtml5/%s.sparql">SPARQL for HTML5</li>
</ul>
</p>
</body>
</html>""" % (BASE_TEST_CASE_URL, filename, BASE_TEST_CASE_URL, filename,
BASE_TEST_CASE_URL, filename, BASE_TEST_CASE_URL, filename,
BASE_TEST_CASE_URL, filename, BASE_TEST_CASE_URL, filename))
##
# Writes a test case document for the given URL.
def writeTestCaseDocument(req, path):
validDocument = True
version = path[-2]
document = path[-1]
namespaces = ""
body = ""
# Generate the filename that resides on disk
filename = os.path.join(req.document_root(), "test-suite")
if(document.endswith(".sparql")):
filename += "/" + os.path.join("tests", document)
else:
filename += "/tests/%s.txt" % (document.split(".")[0])
# Check to see if the file exists and extract the body of the document
if(os.path.exists(filename)):
bfile = open(filename, "r")
lines = bfile.readlines()
foundHead = False
# Don't search for the head of the document if a SPARQL document
# was requested
if(document.endswith(".sparql")):
foundHead = True
# Extract the namespaces from the top of the document and build
# the body of the document
for line in lines:
if("<head" in line):
foundHead = True
if(not foundHead):
namespaces += line
else:
body += line
else:
req.status = apache.HTTP_NOT_FOUND
# Trim up the namespaces string
namespaces = namespaces[:-1]
# Create the regular expression to rewrite the contents of the XHTML and
# SPARQL files
tcpath = BASE_TEST_CASE_URL + version
htmlre = re.compile("([0-9]{4,4})\.xhtml")
tcpathre = re.compile("\$TCPATH")
if(document.endswith(".xhtml") and version == "xhtml1"):
req.content_type = "application/xhtml+xml"
req.write("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" version="XHTML+RDFa 1.0"
%s>\n""" % (namespaces,))
req.write(tcpathre.sub(tcpath, body))
req.write("</html>")
elif(document.endswith(".html") and version == "html4"):
req.content_type = "text/html"
req.write("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n""")
req.write("""<html version="XHTML+RDFa 1.0"
%s>\n""" % (namespaces,))
# Rename all of the test case .xhtml files to .html
req.write(tcpathre.sub(tcpath, htmlre.sub("\\1.html", body)))
req.write("</html>")
elif(document.endswith(".html") and version == "html5"):
req.content_type = "text/html"
if(len(namespaces) > 0):
req.write("""<!DOCTYPE html>
<html
%s>\n""" % (namespaces,))
else:
req.write("""<!DOCTYPE html>
<html version="HTML+RDFa 1.0">\n""")
# Rename all of the test case .xhtml files to .html
req.write(tcpathre.sub(tcpath, htmlre.sub("\\1.html", body)))
req.write("</html>")
elif(document.endswith(".sparql")):
req.content_type = "application/sparql-query"
if(version != "xhtml1"):
# Rename all of the test case .xhtml files to .html
req.write(tcpathre.sub(tcpath, htmlre.sub("\\1.html", body)))
else:
req.write(tcpathre.sub(tcpath, body))
else:
req.status = apache.HTTP_NOT_FOUND
##
# Writes the unit test HTML to the given request object.
#
# @param req the HTTP request object.
# @param test a tuple containing the unit test number, HTML file, SPARQL file,
# and the status of the test.
def writeUnitTestHtml(req, test):
num = test[0]
title = test[1]
html_url = test[2]
sparql_url = test[3]
status = test[4]
expected_result = test[5]
formatted_num = "%04i" % (num,)
req.write("""
<p class=\"unittest\">
[<span id=\"unit-test-status-%i\">
<a id=\"unit-test-anchor-%i\"
href=\"javascript:checkUnitTest(%i,'%s','%s','%s')\">
<span id=\"unit-test-result-%i\">TEST</span></a>
</span>]
Test #%i (%s): <span id=\"unit-test-description-%i\">%s</span>
[<span id=\"unit-test-details-status-%i\">
<a href=\"javascript:showUnitTestDetails(%i, '%s', '%s')\">show details</a>
|
<a href=\"javascript:hideUnitTestDetails(%i)\">hide details</a>
|
<a href=\"http://rdfa.digitalbazaar.com/test-suite/test-cases/%s\">source</a>
</span>
]<div style=\"margin-left: 50px\" id=\"unit-test-details-%i\">
</div>
</p>
""" % (num, num, num, html_url, sparql_url, expected_result, num, num,
status, num, title, num, num, html_url, sparql_url, num, formatted_num,
num))
##
# Checks a unit test and outputs a simple unit test result as HTML.
#
# @param req the HTML request object.
# @param num the unit test number.
# @param rdf_extractor_url The RDF extractor web service.
# @param sparql_engine_url The SPARQL engine URL.
# @param html_url the HTML file to use as input.
# @param sparql_url the SPARQL file to use when validating the RDF graph.
def checkUnitTestHtml(req, num, rdfa_extractor_url, sparql_engine_url,
html_url, sparql_url, expected_result):
if(performUnitTest(rdfa_extractor_url, sparql_engine_url,
html_url, sparql_url, expected_result) == True):
req.write("<span id=\"unit-test-anchor-%s\" style=\"text-decoration: underline; color: #090\" onclick=\"javascript:checkUnitTest(%s, '%s', '%s', '%s')\"><span id='unit-test-result-%s>PASS</span></span></span>" % (num, num, html_url, sparql_url, expected_result, num))
else:
req.write("<span id=\"unit-test-anchor-%s\" style=\"text-decoration: underline; font-weight: bold; color: #f00\" onclick=\"javascript:checkUnitTest(%s, '%s', '%s', '%s')\"><span id='unit-test-result-%s>FAIL</span></span>" % (num, num, html_url, sparql_url, expected_result, num))
##
# Outputs the details related to a given unit test given the unit test number,
# RDF extractor URL, sparql engine URL, HTML file and SPARQL validation file.
# The output is written to the req object as HTML.
#
# @param req the HTTP request.
# @param num the unit test number.
# @param rdf_extractor_url The RDF extractor web service.
# @param sparql_engine_url The SPARQL engine URL.
# @param html_url the HTML file to use as input.
# @param sparql_url the SPARQL validation file to use on the RDF graph.
def retrieveUnitTestDetailsHtml(req, num, rdf_extractor_url, n3_extractor_url,
html_url, sparql_url):
# Build the RDF extractor URL
rdf_extract_url = rdf_extractor_url + urllib.quote(html_url)
# Build the N3 extractor URL
n3_extract_url = n3_extractor_url + urllib.quote(html_url)
# Get the SPARQL query
sparql_query = urlopen(sparql_url).read()
# Get the XHTML data
xhtml_text = urlopen(html_url).read()
# get the triples in N3 format
n3_text = urlopen(n3_extract_url).read()
# Get the RDF text
rdf_text = urlopen(rdf_extract_url).read()
# Get the SPARQL text
sparql_text = sparql_query
req.write("""
<h3>Test #%s XHTML</h3>
<p><pre>\n%s\n</pre></p>
<h3>Test #%s N3</h3>
<p><pre>\n%s\n</pre></p>
<h3>Test #%s RDF</h3>
<p><pre>\n%s\n</pre></p>
<h3>Test #%s SPARQL</h3>
<p><pre>\n%s\n</pre></p>
""" % (num, xml.sax.saxutils.escape(xhtml_text),
num, xml.sax.saxutils.escape(n3_text),
num, xml.sax.saxutils.escape(rdf_text),
num, xml.sax.saxutils.escape(sparql_text)))
##
# The handler function is what is called whenever an apache call is made.
#
# @param req the HTTP request.
#
# @return apache.OK if there wasn't an error, the appropriate error code if
# there was a failure.
def handler(req):
# File that runs an apache test.
status = apache.OK
puri = req.parsed_uri
service = puri[-3]
argstr = puri[-2]
args = {}
# Convert all of the arguments from their URL-encoded value to normal text
if(argstr and len(argstr) > 0):
if("&" in argstr):
for kv in argstr.split("&"):
key, value = kv.split("=", 1)
args[urllib.unquote(key)] = urllib.unquote(value)
elif("=" in argstr):
key, value = argstr.split("=")
args[urllib.unquote(key)] = urllib.unquote(value)
# Retrieve all of the unit tests from the W3C website
if(service.startswith("/test-suite/test-cases")):
req.content_type = 'text/html'
document = service.replace("/test-suite/test-cases", "").split("/")
if(len(document) <= 2):
writeTestCaseRetrievalError(req, document[-1])
elif(len(document) == 3):
if(service.endswith(".xhtml") or service.endswith(".html") or
service.endswith(".sparql")):
writeTestCaseDocument(req, document)
else:
writeTestCaseAlternatives(req, document[-1])
else:
req.write("ERROR DOCUMENT:" + str(document))
elif(service == "/test-suite/retrieve-tests"):
req.content_type = 'text/html'
if(args.has_key('manifest') and args.has_key('status')):
unittests = retrieveTestCases(args['manifest'], args['status'])
for ut in unittests:
writeUnitTestHtml(req, ut)
else:
req.write("<span style=\"text-decoration: underline; font-weight: bold; color: #f00\">ERROR: Could not retrieve test suite manifest, RDF url or status was not specified!</span>")
# Check a particular unit test
elif(service == "/test-suite/check-test"):
req.content_type = 'text/html'
if(args.has_key('id') and args.has_key('source') and
args.has_key('sparql') and args.has_key('rdfa-extractor') and
args.has_key('sparql-engine') and args.has_key('expected-result')):
checkUnitTestHtml(req, args['id'], args['rdfa-extractor'],
args['sparql-engine'],
args['source'], args['sparql'],
args['expected-result'])
else:
req.write("ID, RDFA-EXTRACTOR, SPARQL-ENGINE, XHTML and " + \
"SPARQL not specified in request to test harness!")
req.write("ARGS:" + str(args))
# Retrieve the details about a particular unit test
elif(service == "/test-suite/test-details"):
req.content_type = 'text/html'
if(args.has_key('id') and args.has_key('xhtml') and
args.has_key('sparql') and args.has_key('rdfa-extractor') and
args.has_key('n3-extractor')):
retrieveUnitTestDetailsHtml(req, args['id'],
args['rdfa-extractor'],
args['n3-extractor'],
args['xhtml'], args['sparql'])
else:
req.write("ID, XHTML, SPARQL, RDFA-EXTRACTOR or N3-EXTRACTOR " + \
"was not specified in the request URL to the" + \
"test harness!")
else:
req.content_type = 'text/html'
req.write("<b>ERROR: Unknown CrazyIvan service: %s</b>" % (service,))
return status
|
999,623 | bfdb0ef399e0aa6cf40710235ebd4ecd45101bb1 | class Node(object):
def __init__(self,data):
self.data = data
self.nxt = None
class Stack(object):
def __init__(self, top):
self.top = top
def pop(self):
if not self.top:
return None
output = self.top.data
self.top = self.top.next
return output
def push(self, data):
elem = Node(data)
elem.next = self.top
self.top = elem
def peek(self):
if not self.top:
return None
return self.top.data
class Queue(object):
def __init__(self, start):
self.first = self.last = start
def enqueue(self, item):
if not self.first:
self.first = self.last = Node(item)
else:
self.last.next = Node(item)
self.last = self.last.next
def dequeue(self):
if not self.first:
return None
item = self.first
self.first = self.first.next
return item.data
|
999,624 | a5a957cdb92325d970ccecaea0e1a8da4d3ce844 | from application import db
from sqlalchemy.sql import text
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
title = db.Column(db.String(144), nullable=False)
author = db.Column(db.String(144), nullable=False)
description = db.Column(db.String(526), nullable=True)
isbn = db.Column(db.String(13), nullable=False)
book_notes = db.relationship("Note", backref='book', lazy=True)
def __init__(self, title, author, description, isbn):
self.title = title
self.author = author
self.description = description
self.isbn = isbn
@staticmethod
def most_popular_books():
stmt = text("SELECT Book.id, Book.title, Book.author, COUNT(read_books.book_id) FROM Book"
" LEFT JOIN read_books ON Book.id = read_books.book_id"
" GROUP BY Book.id"
" HAVING COUNT(read_books.book_id) > 0"
" ORDER BY 4 DESC"
" LIMIT 10")
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"id":row[0], "title":row[1], "author":row[2], "reads":row[3]})
return response
@staticmethod
def most_notes():
stmt = text("SELECT Book.id, Book.title, Book.author, COUNT(Note.book_id) FROM Book"
" LEFT JOIN Note ON Note.book_id = Book.id"
" GROUP BY Book.id"
" HAVING COUNT(Note.book_id) > 0"
" ORDER BY 4 DESC"
" LIMIT 10")
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"id":row[0], "title":row[1], "author":row[2], "notes":row[3]})
return response
@staticmethod
def most_recent():
stmt = text("SELECT Book.id, Book.title, Book.author, Book.date_created FROM Book"
" GROUP By Book.id"
" ORDER BY 4 DESC "
" LIMIT 5")
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"id":row[0], "title":row[1], "author":row[2]})
return response
@staticmethod
def basic_search(search_term):
stmt = text("SELECT Book.id, Book.title, Book.author FROM Book"
" WHERE lower(Book.title) LIKE lower(:search_term)"
" OR lower(Book.author) LIKE lower(:search_term)").params(search_term=search_term)
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"id":row[0], "title":row[1], "author":row[2]})
return response
@staticmethod
def book_notes_data(book_id):
stmt = text("SELECT Note.note, Note.account_id, Account.name FROM Note"
" LEFT JOIN Account ON Note.account_id = Account.id"
" WHERE Note.book_id = :book_id").params(book_id=book_id)
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"note":row[0], "user_id":row[1], "user_name":row[2]})
return response
|
999,625 | 0dbebcbfcb84d2a6deabea913be6f2e751b99de1 | #4.6
#Write a program to prompt the user for hours and rate per hour using input to compute gross pay.
#Pay should be the normal rate for hours up to 40 and time-and-a-half for the hourly rate for all hours worked above 40 hours.
#Put the logic to do the computation of pay in a function called computepay() and use the function to do the computation.
#The function should return a value. Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75).
#You should use input to read a string and float() to convert the string to a number.
#Do not worry about error checking the user input unless you want to - you can assume the user types numbers properly.
#Do not name your variable sum or use the sum() function.
#solution
#--------
def computepay(h,r):
if H>40:
reg=h*r
otp=(h-40.0)*(r*0.5)
xp=reg+otp
else:
xp=h*r
return xp
H=input("Enter Hours:")
R=input("Enter Rate:")
h=int(H)
r=float(R)
p=computepay(h,r)
print("Pay",p)
#Your Output
#-----------
#498.75
#Desired Output
#--------------
#498.75
|
999,626 | 6b6804906b67f6728ba12bab3ba51f7a6b66f534 | from .fluxdens import airgap, airgap_fft
from .bch import torque, torque_fft, force, force_fft,\
fluxdens_surface, winding_current, winding_flux, \
voltage, voltage_fft, transientsc_demag, \
i1beta_torque, i1beta_ld, i1beta_lq, i1beta_psid, i1beta_psiq, i1beta_psim, i1beta_up, \
idq_ld, idq_lq, idq_psid, idq_psim, idq_psiq, idq_torque
from .char import mtpa, mtpv, characteristics, efficiency_map, losses_map
from .forcedens import forcedens, forcedens_surface, forcedens_fft
from .nc import spel, mesh, demag, demag_pos, \
flux_density, max_flux_density, min_flux_density, \
airgap_flux_density_pos, loss_density
from .mcv import mcv_hbj, mcv_muer, felosses
from .phasor import i1beta_phasor, iqd_phasor, phasor
from .wdg import mmf, mmf_fft, zoneplan, winding_factors, winding
|
999,627 | 89db014a5157795a14eae486e39a209c1aa43655 | '''
1528. Shuffle String
Given a string s and an integer array indices of the same length.
The string s will be shuffled such that the character at the ith position moves to indices[i] in the shuffled string.
Return the shuffled string.
Input: s = "codeleet", indices = [4,5,6,7,0,2,1,3]
Output: "leetcode"
Explanation: As shown, "codeleet" becomes "leetcode" after shuffling.
'''
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
copy = s
s = list(s)
l = len(indices)
for i in range(l):
s[indices[i]] = copy[i]
res = ''.join(s)
return res
|
999,628 | 481d7e6b8c0cd005c8144c1d404f39874f290b59 | import askers
import relayer
import updates
import convert
import fields
import properties
import representations
import term
from term import Term as T
from ipdb import set_trace as debug
import dispatch
import strings
#TODO I should be allowed to have representation changes interleaved with updates?
#FIXME the explicit references to 'bindings' probably aren't kosher...
#FIXME I'm still not handling the references vs. object level on questions well
#(I think that everything is kosher, but I am just equivocating on where to put the conversions)
#FIXME There is a serious risk that I'm making some call to a dispatcher directly,
#and that I'm ending up using a dirty asker rather than a fresh one
#I don't know what I should do to track that better,
#but I think that I should probably do something
#FIXME I want the variables to be represented literally I think
#needing to unquote them continues to drive home the awkwardness of the current arrangement
#TODO this can easily take quadratic time in cases where it could be done in linear time,
#because you could pool up several updates and just turn them into replacements
#TODO should I throw an error if the user gets two updates without refreshing in the middle?
class ContextUpdater(relayer.Relayer):
def __init__(self, *args, **kwargs):
super(ContextUpdater, self).__init__(*args, **kwargs)
self.internal = {}
self.changed = {}
self.original = {}
self.current = {}
self.updates = {}
self.source = {}
if self.Q is not None:
self.question_bindings = self.Q.question.bindings
for k, v in self.question_bindings.iteritems():
self.tag(in_question(T.from_str(k)), v)
#FIXME I think that I need to think more about simple updates vs. proper updates
def tag(self, source, v):
internal = len(self.internal)
self.internal[v.id] = internal
self.original[internal] = v
self.changed[internal] = False
self.current[internal] = v
self.updates[internal] = updates.trivial()
self.source[internal] = source
#FIXME this should work even if v is a child, or whatever...
#I should also use a better system for tracking these things
#(and for deciding whether updates should propagate)
def refresh(self, v):
if v.id in self.internal:
internal = self.internal[v.id]
result = self.current[internal]
#FIXME this can cause terrible trouble if two different things being tracked
#have the same id...
#my current approach will probably have a hard time dealing with that
self.internal[result.id] = internal
return result
else:
raise ValueError("refreshing an untagged value")
#TODO check for items that were made out of things you care about
#if one of them is updated, see if it implies an update to something you care about
#TODO if B is a field of A and A is updated, propagate the change to B
def update(self, change, v, repr_change=None):
if repr_change is None:
repr_change = updates.lift(change)
default_repr_change = True
else:
default_repr_change = False
if v.id in self.internal:
internal = self.internal[v.id]
#if we are updating stale information...
#apply the update, but not any representation change
#(if info is stale, probably just a representation change...)
if v.id != self.current[internal].id:
if change.head == updates.trivial.head:
return True
else:
repr_change = updates.lift(change)
self.updates[internal] = updates.compose(self.updates[internal], change)
self.current[internal] = convert.unquote(
self,
self.ask_firmly(updates.apply_update(
repr_change,
representations.quote(self.current[internal])
))
)
self.changed[internal] = True
return True
else:
#FIXME think more about how this propagation ought to work
#it seems like something is going oddly w.r.t levels of abstraction
#also should I propagate back across field accesses? I don't know...
#also this whole thing seems kind of like a mess, I don't expect it to work consistently
def propagate_back(s):
if s.head == term.because.head:
return propagate_back(s['operation'])
elif s.head == term.explain.head:
return propagate_back(s['operation']) or propagate_back(s['prior'])
elif s.head == term.accessing.head:
if change.head == updates.trivial.head:
parent = s['term']
binding = s['binding']
return self.update(
updates.trivial(),
parent,
repr_change=updates.apply_to_field(
representations.referent_of(T.from_str(binding)),
repr_change
).explain("tracing backwards from [v]", v=v)
)
else:
return False
elif s.head == askers.answering.head:
Q = s['Q']
if Q.head == fields.get_field.head:
parent = Q['object']
field = Q['field']
return self.update(
updates.apply_to_field(field, change),
parent,
repr_change=updates.apply_to_field(updates.lift_field(field), repr_change)
)
elif Q.head == convert.convert.head:
previous = Q['value']
return self.update(
change,
previous,
repr_change=None
)
return False
return propagate_back(v.source)
def incoming_update(self, source, Q, update, repr_change=None):
if source.head == in_question.head:
referenced = Q.question[strings.to_str(self, source['s'])]
self.update(update, referenced, repr_change)
def process_response(self, response, Q, *args, **kwargs):
#FIXME seems bad to redefine the dispatcher each time...
update_handler = dispatch.SimpleDispatcher("contextual response processor", ("response",))
@update_handler(context_update.head)
def process_update(source, update, repr):
repr_change = updates.become(repr)
self.incoming_update(source, Q, update, repr_change=repr_change)
return properties.trivial()
result = update_handler.dispatch(response)
if result is None:
result = super(ContextUpdater, self).process_response(response, Q, *args, **kwargs)
return result
def set_repr(self, v, new_repr):
self.update(updates.trivial(), v, repr_change=updates.become(new_repr))
def reply(self, *args, **kwargs):
reply = super(ContextUpdater, self).reply(*args, **kwargs)
if self.Q is None:
return reply
responses = []
for internal in self.internal.values():
if self.changed[internal]:
update = self.updates[internal]
source = self.source[internal]
responses.append(context_update(
source,
update,
representations.quote(self.current[internal])
))
return reply.add(properties.combine(responses))
class UntaggedUpdateError(ValueError):
pass
context_update = term.simple(
"the value of [source] at the referenced question should be updated by applying [update], "
"and the result should be represented as [repr]",
"source", "update", "repr"
)
in_question = term.simple("the object referred to as [s] in the referenced question", "s")
|
999,629 | cd575be7f20f3ca2b1f9d8144e9422c19b6875aa | # Given a linked list, rotate the list to the right by k places, where k is non-negative.
# Example 1:
# Input: 1->2->3->4->5->NULL, k = 2
# Output: 4->5->1->2->3->NULL
# Explanation:
# rotate 1 steps to the right: 5->1->2->3->4->NULL
# rotate 2 steps to the right: 4->5->1->2->3->NULL
# Example 2:
# Input: 0->1->2->NULL, k = 4
# Output: 2->0->1->NULL
# Explanation:
# rotate 1 steps to the right: 2->0->1->NULL
# rotate 2 steps to the right: 1->2->0->NULL
# rotate 3 steps to the right: 0->1->2->NULL
# rotate 4 steps to the right: 2->0->1->NULL
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
return self.rotateRight_sol2(head, k)
def rotateRight_sol1(self, head: ListNode, k: int) -> ListNode:
# brute force solution
# time limit exceeded
count = 0
while count < k:
prevToLast = self.findPrevToLast(head)
if prevToLast is None:
# there is only one element
return head
last = prevToLast.next
prevToLast.next = last.next
last.next = head
head = last
count = count+1
return head
def findPrevToLast(self, head: ListNode) -> ListNode:
curr = head
if curr is None:
return None
while curr.next is not None and curr.next.next is not None:
curr = curr.next
# curr could be head if head is the only node. then curr.next is None
# but if more then 1 node then curr is always second last node
if curr.next is None:
return None
return curr
def rotateRight_sol2(self, head: ListNode, k: int) -> ListNode:
# k could be >= length of list. In that case effective rotation is only
# k modulo (length of list). k==len(list) means after rotation its the same
# list as original
if head is None or head.next is None:
return head
length = 1
last = head
# find length of list
while last.next is not None:
last = last.next
length += 1
# re-calc k so that k < length
k = k % length
if k == 0:
# same list as original
return head
# now right shifting k nodes also means cutting list at
# length - k - 1 node from from and adding remaining nodes at
# head. So get to the cutting point
cutting_point = length - k - 1
curr = head
while cutting_point > 0:
curr = curr.next
cutting_point = cutting_point - 1
# now list should end at curr node and remaining curr+1 to last node
# should add to the head
last.next = head
head = curr.next
curr.next = None
return head
|
999,630 | e8011f792a8c39d8a0be6ff40586e0911406d1ee | from django.shortcuts import render
from django.http import HttpResponse
from django.views import generic
from django.urls import reverse
from .models import webContent, Artpiece
def home(request):
# think about making this another static .py file to change later
# also add in my welcome message to it instead of hard coded?
content = webContent()
context = content.getDictionary()
return render(request, 'portfolio/home.html', context)
def sketchbook(request):
return HttpResponse("This is the sketchbook page.")
def about(request):
content = webContent()
context = content.getDictionary()
return render(request, 'portfolio/about.html', context)
def contact(request):
content = webContent()
context = content.getDictionary()
return render(request, 'portfolio/contact.html', context)
class PortfolioView(generic.ListView):
template_name = 'portfolio/bootStrapPortfolio.html'
context_object_name = 'image_list'
model = Artpiece
# def portfolio(request):
# return render(request, 'portfolio/portfolio.html')
# Create your views here.
# make a template that is returned with a context
# import anything i'll need in the html templates here in the views
|
999,631 | a3614272c8925e97e549b426733c18b94418429b | from django.contrib import admin
from dal import autocomplete
from .models import Topic
from .forms import TopicForm
# Register your models here.
class TopicAdmin(admin.ModelAdmin):
form = TopicForm
fieldsets = (
(None, {
'fields': ('title', 'slug', 'parent_topic', 'description', 'tags')
}),
('Status', {
'fields': ('published',),
}),
('Properties', {
'fields': ('logo_url',),
}),
('Metadata', {
'classes': ('collapse',),
'fields': ('created', 'updated'),
}),
)
list_display = ('title', 'parent_topic', 'subtopics_count', 'talks_count', 'updated')
list_filter = ['created', 'updated']
search_fields = ['title',]
date_hierarchy = 'created'
ordering = ['-updated']
readonly_fields = ('logo_url', 'talks_count', 'subtopics_count')
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Topic, TopicAdmin) |
999,632 | 965ff8ec6ef19347d5208f82c8fb39d65402ae9a | #!/usr/bin/env python3
"""
用于测试客户端修改属性
"""
import socket
import time
HOST = 'localhost'
PORT = 9998
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
# send_data=b'12.12,325.40,83.71,15.52,38.63,76.51,193.03,258.50,258.70#13.11,331.50,83.46,16.38,40.52,79.33,196.36,258.30,258.50#14.08,337.10,83.63,17.21,42.36,85.14,211.88,258.70,258.50#15.10,342.70,83.08,18.18,44.35,89.91,221.76,258.70,258.60'
send_data=b'Polling'
s.sendall(send_data)
print("client is begining ")
while True:
try:
print("begin to waitting")
# s.sendall(send_data)
data = s.recv(1024)
if not data:
break
if "####" in data.decode():
print("get it")
s.sendall(b":291f2218-78cd-11ea-8a09-34e6d76a8659#Done")
print('Received', repr(data))
except KeyboardInterrupt:
s.close()
print('Keyboard Interrupt')
break
if __name__ == "__main__":
main()
# print('Received', repr(data)) |
999,633 | d6d0c2ef5e399815432ded0580c47e312ad24b1e | from dateutil.relativedelta import relativedelta
from django.contrib.auth import get_user_model
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from softdelete.models import SoftDeleteModel
User = get_user_model()
class TimeStampedModel(models.Model):
"""
Modelo abstracto que implementa los campos de seguimiento
de cambios al modelo.
"""
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class OwnedModel(models.Model):
"""
Modelo abstracto que implementa el campo owner(propietario) del recurso.
"""
owner = models.ForeignKey(
User,
on_delete=models.CASCADE,
help_text=_('Usuario propietario del recurso')
)
class Meta:
abstract = True
class Person(TimeStampedModel, SoftDeleteModel):
"""
Modelo abstracto que encapsula toda la información de una persona.
"""
DOCUMENT_TYPE_OTR = '00'
DOCUMENT_TYPE_DNI = '01'
DOCUMENT_TYPE_EXT = '04'
DOCUMENT_TYPE_RUC = '06'
DOCUMENT_TYPE_PSS = '07'
DOCUMENT_TYPE_PNC = '11'
DOCUMENT_TYPE_CHOICES = (
(DOCUMENT_TYPE_OTR, 'OTROS'),
(DOCUMENT_TYPE_DNI, 'L.E. / DNI'),
(DOCUMENT_TYPE_EXT, 'CARNET EXT.'),
(DOCUMENT_TYPE_RUC, 'RUC'),
(DOCUMENT_TYPE_PSS, 'PASAPORTE'),
(DOCUMENT_TYPE_PNC, 'P. NAC.')
)
document_type = models.CharField(
_('Document Type'),
max_length=2,
choices=DOCUMENT_TYPE_CHOICES,
default=DOCUMENT_TYPE_DNI,
blank=True,
null=True,
help_text=_('Tipo de documento')
)
document_number = models.CharField(
_('Document Number'),
max_length=15,
blank=True,
null=True,
help_text=_('Número de documento')
)
first_name = models.CharField(
_('First Name'),
max_length=50,
blank=True,
null=True,
help_text=_('Nombres')
)
last_name = models.CharField(
_('Last Name'),
max_length=50,
blank=True,
null=True,
help_text=_('Apellidos')
)
birthdate = models.DateField(
_('Birthdate'),
blank=True,
null=True,
help_text=_('Fecha de nacimiento')
)
mobile_phone_number = models.CharField(
_('Mobile phone number'),
max_length=15,
blank=True,
null=True,
help_text=_('Número de celular')
)
home_phone_number = models.CharField(
_('Home phone number'),
max_length=15,
blank=True,
null=True,
help_text=_('Número de teléfono casa')
)
work_phone_number = models.CharField(
_('Work phone number'),
max_length=15,
blank=True,
null=True,
help_text=_('Número de oficina o centro de trabajo')
)
nationality = models.CharField(
_('Nationality'),
max_length=30,
blank=True,
null=True,
help_text=_('Nacionalidad')
)
class Meta:
abstract = True
unique_together = ('document_type', 'document_number')
def __str__(self):
return '{} {}'.format(self.document_number, self.last_name)
def get_full_name(self, separator=' ', order=None):
if order == 'first':
return '{}{}{}'.format(self.first_name, separator, self.last_name)
return '{}{}{}'.format(self.last_name, separator, self.first_name)
@property
def age(self):
return relativedelta(timezone.now().date(), self.birthdate)
class Ubigeo(models.Model):
"""
Modelo que maneja la información de ubigeo
"""
code = models.CharField(
_('Code'),
max_length=6,
primary_key=True,
help_text=_('Código de ubigeo')
)
department = models.CharField(
_('Department'),
max_length=30,
help_text=_('Departamento')
)
province = models.CharField(
_('Province'),
max_length=30
)
district = models.CharField(
_('District'),
max_length=30,
help_text=_('Distrito')
)
class Meta:
ordering = (
'department',
'province',
'district'
)
def __str__(self):
return '{} - {} - {}'.format(
self.department, self.province, self.district
)
|
999,634 | 3a2627b477e2717eb63afd9371928725665cfc4e | ../../scripts/convertMetadataRCDB.py |
999,635 | c21759c88ff4ae8e79004551231d020cf2a6961f | from google.appengine.ext import ndb
import json
from model.package import Package
class SerializeHelper(object):
@staticmethod
def serialize_to_dict(ndb_model_instance, exclude_properties=[]):
result = ndb_model_instance.to_dict(exclude=exclude_properties)
if ndb_model_instance.key:
result['id'] = ndb_model_instance.key.id()
if type(ndb_model_instance) is Package:
for day in result['days']:
day['date'] = day['date'].strftime("%Y-%m-%d")
for property_name in ndb_model_instance._properties:
# If it is a key property, we neet to fetch it from the DB
# and set it in the dictionary to be returned
if type(ndb_model_instance._properties[property_name]) == ndb.KeyProperty:
if type(result[property_name]) == list:
list_of_fetched_ndb_entities = ndb.get_multi(result[property_name])
result[property_name] = SerializeHelper.serialize_list_to_list_of_dict(list_of_fetched_ndb_entities)
else:
result[property_name] = result[property_name].id()
return result
@staticmethod
def serialize_to_json(ndb_model_instance):
return json.dumps(SerializeHelper.serialize_to_dict(ndb_model_instance))
@staticmethod
def serialize_list_to_list_of_dict(ndb_model_instances, exclude_properties=[]):
result = []
for instance in ndb_model_instances:
instance_dict = SerializeHelper.serialize_to_dict(instance, exclude_properties)
result.append(instance_dict)
return result
@staticmethod
def deserialize_entity_from_str(str, ndb_model_class):
return SerializeHelper.deserialize_entity_from_json_object(json.loads(str), ndb_model_class)
@staticmethod
def deserialize_entity_from_json_object(json_dictionary, ndb_model_class):
if type(ndb_model_class) == str:
ndb_model_class = eval(ndb_model_class)
new_entity = ndb_model_class()
for sent_property_name, sent_property_value in json_dictionary.iteritems():
if sent_property_name == 'id':
new_entity.key = ndb.Key(ndb_model_class,sent_property_value)
elif sent_property_name in ndb_model_class._properties:
value_to_set = sent_property_value
if type(ndb_model_class._properties[sent_property_name]) == ndb.KeyProperty:
if type(sent_property_value) == list:
items = []
# If it is a key property,
# we need to create the related entity, and set the key property of the related entity
# in the main entity
for item_to_deserialize in sent_property_value:
related_entity = SerializeHelper.deserialize_entity_from_json_object(item_to_deserialize,
ndb_model_class._properties[sent_property_name]._kind)
related_entity.put()
items.append(related_entity.key)
value_to_set = items
else:
value_to_set = SerializeHelper.deserialize_entity_from_json_object(sent_property_value,
ndb_model_class._properties[sent_property_name]._kind)
setattr(new_entity, sent_property_name, value_to_set)
else:
raise Exception("Unknown property sent: %s" % sent_property_name)
return new_entity |
999,636 | 2068cae8b9362794ae87f14b65dd0428c487aabf | def my_func(s):
global flag
sum = 0
flag = False
for i in s:
if i.isdigit():
sum += int(i)
if i == "q":
flag = True
break
return sum
total = 0
while True:
my_list = input("Stroka: ").split()
total += my_func((my_list))
print(total)
if flag:
break
|
999,637 | 7fe897a61c85e1dab110cab381adff881d9728eb | import numpy as np
import pandas as pd
import re
import string
import nltk
import matplotlib.pyplot as plt
import tensorflow as tf
import seaborn as sns
from sklearn.feature_extraction import text
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
from sklearn.metrics import confusion_matrix
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation,SimpleRNN, LSTM,SpatialDropout1D
from keras.layers.embeddings import Embedding
from keras.layers.wrappers import Bidirectional
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
def remove_url(text):
url=re.compile(r"https?://\S+|www\.\S+")
return url.sub(r" ",text)
def remove_html(text):
cleanr = re.compile('<.*?>')
return cleanr.sub(r" ",text)
def remove_num(texts):
output = re.sub(r'\d+', '', texts)
return output
def remove_punc(text):
table=str.maketrans(' ',' ',string.punctuation)
return text.translate(table)
files = open("data/reviews.ft.txt", "r")
files = files.readlines()
num_train = 40000
num_test = 10000
train_file = [x for x in files[:num_train]]
test_file = [x for x in files[num_train:num_test + num_train]]
train_labels = [0 if x.split(' ')[0] == '__label__1' else 1 for x in train_file]
train_sentences = [x.split(' ', 1)[1][:-1].lower() for x in train_file]
test_labels = [0 if x.split(' ')[0] == '__label__1' else 1 for x in test_file]
test_sentences = [x.split(' ', 1)[1][:-1].lower() for x in test_file]
train = pd.DataFrame({'text':train_sentences,'label':train_labels})
test = pd.DataFrame({'text':test_sentences,'label':test_labels})
train.describe()
test.describe()
train['text']=train.text.map(lambda x:remove_url(x))
train['text']=train.text.map(lambda x:remove_html(x))
train['text']=train.text.map(lambda x:remove_punc(x))
train['text']=train['text'].map(remove_num)
test['text']=test.text.map(lambda x:remove_url(x))
test['text']=test.text.map(lambda x:remove_html(x))
test['text']=test.text.map(lambda x:remove_punc(x))
test['text']=test['text'].map(remove_num)
max_length=100
vocab_size=12000
embedding_dim=64
trunc_type="post"
oov_tok="<OOV>"
padding_type="post"
tokenizer = Tokenizer(num_words=vocab_size,oov_token=oov_tok)
tokenizer.fit_on_texts(train['text'])
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(train['text'])
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(test['text'])
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
model = Sequential()
model.add(Embedding(vocab_size, embedding_dim, input_length=max_length))
model.add(SpatialDropout1D(0.2))
model.add(Bidirectional(LSTM(256, dropout=0.2)))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
adam=Adam(lr=0.0001)
model.compile(loss='binary_crossentropy',optimizer=adam,metrics=['accuracy'])
history=model.fit(training_padded,train['label'], epochs=15, batch_size=256,verbose = 1,callbacks = [EarlyStopping(monitor='val_accuracy', patience=2)],validation_data=(testing_padded,test['label']))
pred = model.predict(testing_padded)
mat = confusion_matrix(test['label'], np.where(pred > 0.5, 1, 0))
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label')
plt.savefig("LTSM_con.png")
|
999,638 | 2f772a28852f67144363e9729d5c08d7540858f7 | from PIL import Image
img=Image.open('IMG.PNG')
im=img.convert('RGB')
im.save('IMG.pdf')
|
999,639 | 676b6add9021f5fd9b4a45a3a4dd64c156b89e8c | #tuple_nesting_example_005.py
# Tuples can contain nested elements
i_am_a_nested_tuple = ((1,2,3), (4,5,6), (7,8,9))
print ( i_am_a_nested_tuple )
# Real world example
#
dwarves = ( (1, "Dopey"), (2, "Grumpy") ,(3, "Sneezy"), ( 4, "Bashful"), (5, "doc"), ( 6, "Happy"),( 7, "sleepy"))
print (dwarves)
# Using the standard for loop to iterate through the tuple
for dwarf in dwarves:
print (dwarf)
#
for xy in dwarf:
print ( xy )
#
print ("Iterating using the for loop with range(len())")
for i in range(len(dwarves)):
print(dwarves[i])
i += 1
# Comprehension
#
print ("Using List comprehension - Single line statement, amazing isn't it ")
[print(i) for i in dwarves]
|
999,640 | 44cecdd3635058727c524ea11eec83fa1107c64d | # Generated by Django 2.2 on 2020-05-17 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0009_category_image'),
]
operations = [
migrations.AlterField(
model_name='category',
name='image',
field=models.ImageField(default='', upload_to='category', verbose_name='Изображение'),
),
]
|
999,641 | 1881be2d7f3df25b8220f7fa236406ab649e2720 | def moveStone(stones:list):
stones.sort()
n = len(stones)
mx = stones[n-1] - stones[0] -n+1
mx -= min(stones[1]-stones[0]-1,stones[n-1]-stones[n-2]-1)
mi = mx
right = 0
for left in range(n):
# find the stones already in this windows([le, le + n - 1])
while right<n-1 and stones[right+1] - stones[left]+1<=n:
right+=1
unoccupied = n - (right-left+1)
if right - left + 1 == n - 1 and stones[right] - stones[left] + 1 == n - 1:
unoccupied = 2
mi = min(mi,unoccupied)
return [mi,mx]
lisss = [int(x) for x in input().split(",")]
print(moveStone(lisss)) |
999,642 | 6f1e931a7860756de8f2e49d8c7572adb623c3f3 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#########################################
# Function: To get the io state
# Usage: python get_io_status.py
# Date: 2015-11-25
# Version: 1.0
#########################################
import subprocess
import sys
def io_stats(device,item):
child1 = 'iostat -dkx | grep '+ device
child2 = 'iostat -dk | grep '+ device
stdout, stderr = subprocess.Popen(child1,shell=True,stdout=subprocess.PIPE).communicate()
data = stdout.split()
stdout2, stderr2 = subprocess.Popen(child2,shell=True,stdout=subprocess.PIPE).communicate()
data2 = stdout2.split()
if item == 'rps':
print data[3]
elif item == 'wps':
print data[4]
elif item == 'avgqu-sz':
print data[-4]
elif item == 'await':
print data[-3]
elif item == 'svctm':
print data[-2]
elif item == 'util':
print data[-1]
elif item == 'tps':
print data2[1]
if len(sys.argv) == 3:
io_stats(sys.argv[1],sys.argv[2])
else:
print "Usage: " + sys.argv[0] + ' device rps|wps|avgqu-sz|await|svctm|util|tps '
|
999,643 | 3b4fad50f980f3cab2d1653fef6a13dcb7e80277 | import os
CDN_DOMAIN = os.getenv("CDN_DOMAIN", None)
CDN_HTTPS = True
RATELIMIT_HEADERS_ENABLED = True
RATELIMIT_HEADER_RETRY_AFTER_VALUE = "delta-seconds"
|
999,644 | f2a4a6fb9deefed4c07e71fe86ead90bd6073b90 | from tensorflow.keras.preprocessing import image
from tensorflow.keras import models
import cv2
import numpy as np
import argparse
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications import mobilenet_v2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
model = models.load_model('model/model.mod')
img = cv2.imread(args["image"])
image = cv2.resize(img, (128, 128))
image = (image[...,::-1].astype(np.float64))
image = mobilenet_v2.preprocess_input(image)
image = np.expand_dims(image, axis=0)
a = np.argmax(model.predict(image)[0])
b = np.amax(model.predict(image)[0]) * 100
c ="{:.2f} %".format(b)
category = 'cat' if a == 0 else 'dog'
img = cv2.putText(img, category + ': ' + c , (20,25),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow('img',img)
cv2.waitKey(0)
|
999,645 | 545d82e5a8d6a38a0ffed9832cd54ecccb9ca607 | a = input()
b = input()
a_dict, b_dict = {x:0 for x in range(1,10)}, {x:0 for x in range(1,10)}
for c in a:
a_dict[int(c)] += 1
for c in b:
b_dict[int(c)] += 1
def find_match(num=10):
for i in range(1,10):
if a_dict[i] != 0 and b_dict[num-i] != 0:
return i,num-i
|
999,646 | d8ac7030260ac0f30ab55947e5e75dea1f582e9c | students = [
{'name': 'Rezso', 'age': 9.5, 'candies': 2},
{'name': 'Gerzson', 'age': 10, 'candies': 1},
{'name': 'Aurel', 'age': 7, 'candies': 3},
{'name': 'Zsombor', 'age': 12, 'candies': 5}
]
# create a function that takes a list of students and prints:
# - Who has got more candies than 4 candies
# create a function that takes a list of students and prints:
# - how many candies they have on average
def candies_filter(list):
list_max_candies = []
for i in range(0, len(list)):
for key, value in list[i].items():
if key == 'candies':
if value > 4:
list_max_candies.append(list[i])
for i in range(0, len(list_max_candies)):
for key, value in list_max_candies[i].items():
if key == 'name':
print(value)
candies_filter(students)
def candies_average(list):
sum_candies = 0
for i in range(0, len(list)):
for key, value in list[i].items():
if key == 'candies':
sum_candies += value
print(sum_candies / len(list))
candies_average(students)
|
999,647 | f264612b85678c423b5cafec0c4d128ddf9a9b7f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions used for training a CNN."""
import keras as ks
import os
import numpy as np
from keras import backend as K
from input_utilities import *
from generator import *
from plot_scripts import plot_input_plots
from plot_scripts import plot_traininghistory
from plot_scripts import plot_validation
class TensorBoardWrapper(ks.callbacks.TensorBoard):
"""Up to now (05.10.17), Keras doesn't accept TensorBoard callbacks with validation data that is fed by a generator.
Supplying the validation data is needed for the histogram_freq > 1 argument in the TB callback.
Without a workaround, only scalar values (e.g. loss, accuracy) and the computational graph of the model can be saved.
This class acts as a Wrapper for the ks.callbacks.TensorBoard class in such a way,
that the whole validation data is put into a single array by using the generator.
Then, the single array is used in the validation steps. This workaround is experimental!"""
def __init__(self, batch_gen, nb_steps, **kwargs):
super(TensorBoardWrapper, self).__init__(**kwargs)
self.batch_gen = batch_gen # The generator.
self.nb_steps = nb_steps # Number of times to call next() on the generator.
def on_epoch_end(self, epoch, logs):
# Fill in the `validation_data` property.
# After it's filled in, the regular on_epoch_end method has access to the validation_data.
imgs, tags = None, None
for s in xrange(self.nb_steps):
ib, tb = next(self.batch_gen)
ib = np.asarray(ib)
if imgs is None and tags is None:
imgs = np.zeros(((ib.shape[0],) + (self.nb_steps * ib.shape[1],) + ib.shape[2:]), dtype=ib.dtype)
tags = np.zeros(((self.nb_steps * tb.shape[0],) + tb.shape[1:]), dtype=tb.dtype)
imgs[ : , s * ib.shape[1]:(s + 1) * ib.shape[1]] = ib
tags[s * tb.shape[0]:(s + 1) * tb.shape[0]] = tb
self.validation_data = [imgs[0], imgs[1], tags, np.ones(imgs[0].shape[0]), 0.0]
# self.validation_data = [list(imgs), tags, np.ones(imgs[0].shape[0]), 0.0]
print len(self.validation_data)
print self.model.inputs[0].shape
tensors = (self.model.inputs +
self.model.targets +
self.model.sample_weights)
if self.model.uses_learning_phase:
print 'learn phase', K.learning_phase()
tensors += [K.learning_phase()]
print len(tensors)
return super(TensorBoardWrapper, self).on_epoch_end(epoch, logs)
class BatchLevelPerformanceLogger(ks.callbacks.Callback):
# Gibt loss aus über alle :display batches, gemittelt über die letzten :display batches
def __init__(self, display, skipBatchesVal, steps_per_epoch, args, genVal):
ks.callbacks.Callback.__init__(self)
self.Valseen = 0
self.averageLoss = 0.0
self.averageAcc = 0.0
self.averageValLoss = 0.0
self.averageValAcc = 0.0
self.steps_per_epoch = steps_per_epoch
self.steps = steps_per_epoch // display
self.skipBatchesVal = skipBatchesVal
self.args = args
self.seen = int(self.args.num_weights) * self.steps_per_epoch
self.logfile_train_fname = self.args.folderOUT + 'log_train.txt'
self.logfile_train = None
self.genVal = genVal
def on_train_begin(self, logs={}):
if self.args.resume:
os.system("cp %s %s" % (self.args.folderMODEL + 'log_train.txt', self.logfile_train_fname))
return
def on_batch_end(self, batch, logs={}):
self.seen += 1
self.averageLoss += logs.get('loss')
self.averageAcc += logs.get('acc')
if self.seen % self.skipBatchesVal == 0:
self.Valseen += 1
valLoss, valAcc = tuple(self.model.evaluate_generator(self.genVal, steps=1))
self.averageValLoss += valLoss
self.averageValAcc += valAcc
if self.seen % self.steps == 0:
averaged_loss = self.averageLoss / self.steps
averaged_acc = self.averageAcc / self.steps
averaged_ValLoss = self.averageValLoss / self.Valseen if self.Valseen > 0 else 0.0
averaged_ValAcc = self.averageValAcc / self.Valseen if self.Valseen > 0 else 0.0
print
print 'average loss/acc train:', averaged_loss, averaged_acc
print 'average loss/acc test:', averaged_ValLoss, averaged_ValAcc
batchnumber_float = (self.seen - self.steps / 2.) / float(self.steps_per_epoch) # + self.epoch - 1 # start from zero
self.loglist.append('\n{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(self.seen, batchnumber_float, averaged_loss, averaged_acc, averaged_ValLoss, averaged_ValAcc))
self.averageLoss = 0
self.averageAcc = 0
self.averageValLoss = 0
self.averageValAcc = 0
self.Valseen = 0
def on_epoch_begin(self, epoch, logs={}):
self.loglist = []
def on_epoch_end(self, epoch, logs={}):
self.logfile_train = open(self.logfile_train_fname, 'a+')
if os.stat(self.logfile_train_fname).st_size == 0: self.logfile_train.write("#Batch\tBatch_float\tLoss\tAcc\tValLoss\tValAcc")
for batch_statistics in self.loglist: # only write finished epochs to the .txt
self.logfile_train.write(batch_statistics)
self.logfile_train.flush()
os.fsync(self.logfile_train.fileno())
self.logfile_train.close()
try:
plot_validation.plot_learning_curve(self.args.folderOUT, np.loadtxt(self.logfile_train_fname, unpack=True))
except: print 'plotting learning curve not successfull. Skipping'
class EpochLevelPerformanceLogger(ks.callbacks.Callback):
def __init__(self, args, files, var_targets):
ks.callbacks.Callback.__init__(self)
self.validation_data = None
self.args = args
self.files = files
self.eventsVal = min([getNumEvents(self.files), 2000])
self.eventsPerBatch = 50
self.iterationsVal = round_down(self.eventsVal, self.eventsPerBatch) / self.eventsPerBatch
self.genVal = generate_batches_from_files(self.files, batchsize=self.eventsPerBatch, class_type=var_targets, yield_mc_info=1)
def on_train_begin(self, logs={}):
self.losses = []
if self.args.resume:
os.system("cp %s %s" % (self.args.folderMODEL + "save.p", self.args.folderOUT + "save.p"))
else:
pickle.dump({}, open(self.args.folderOUT + "save.p", "wb"))
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
Y_PRED, Y_TRUE, EVENT_INFO = [], [], []
for i in xrange(self.val_iterations):
Y_PRED_temp, Y_TRUE_temp, EVENT_INFO_temp = predict_events(self.model, self.genVal)
Y_PRED.extend(Y_PRED_temp)
Y_TRUE.extend(Y_TRUE_temp)
EVENT_INFO.extend(EVENT_INFO_temp)
# Eval_dict = {'Y_PRED': np.asarray(Y_PRED), 'Y_TRUE': np.asarray(Y_TRUE), 'EVENT_INFO': np.asarray(EVENT_INFO)}
# obs = plot.make_plots(self.args.folderOUT, dataIn=dataIn, epoch=str(epoch), sources='th', position='S5')
self.dict_out = pickle.load(open(self.args.folderOUT + "save.p", "rb"))
self.dict_out[epoch] = {'Y_PRED': np.asarray(Y_PRED), 'Y_TRUE': np.asarray(Y_TRUE), 'EVENT_INFO': np.asarray(EVENT_INFO),
'loss': logs['loss'], 'acc': logs['acc'],
'val_loss': logs['val_loss'], 'val_acc': logs['val_acc']}
pickle.dump(self.dict_out, open(self.args.folderOUT + "save.p", "wb"))
on_epoch_end_plots(folderOUT=self.args.folderOUT, epoch=epoch, data=self.dict_out[epoch])
# print logs.keys()
# plot_train_and_test_statistics(modelname)
# plot_weights_and_activations(test_files[0][0], n_bins, class_type, xs_mean, swap_4d_channels, modelname,
# epoch[0], file_no, str_ident)
# plot_traininghistory(args)
return |
999,648 | 3e6a39875bd27ce4f9762a0ced2fc0a8cc0fee3d | #!/usr/bin/env python
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'socialchan.settings'
import django
django.setup()
import pika
from pickle import loads, dumps
from base64 import b64decode, b64encode
import functions
#parameters = pika.URLParameters('amqp://empifJS6:JLdttxhTIzSoZSsgWvbPf8PFTRySIifY@leaping-marjoram-40.bigwig.lshift.net:10743/MxfKYu-j5XB0')
#connection = pika.BlockingConnection(parameters)
#channel = connection.channel()
#def on_request(ch, method, props, body):
#print " llegooooo"
#data = loads( b64decode(body) )
#response = getattr(functions, data["command"] )(data["args"])
#response = b64encode( dumps(response) )
#ch.basic_publish(exchange='',
#routing_key=props.reply_to,
#properties=pika.BasicProperties(correlation_id = \
#props.correlation_id),
#body=response)
#ch.basic_ack(delivery_tag = method.delivery_tag)
#channel.basic_qos(prefetch_count=1)
#channel.basic_consume(on_request, queue='rpc_queue')
#print " [x] Awaiting RPC requests"
#channel.start_consuming()
def on_request(ch, method, props, body):
print " llegooooo"
data = loads( b64decode(body) )
response = getattr(functions, data["command"] )(data["args"])
response = b64encode( dumps(response) )
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=response)
ch.basic_ack(delivery_tag = method.delivery_tag)
def on_open(connection):
connection.channel(on_channel_open)
# Step #4
def on_channel_open(channel):
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print " [x] Awaiting RPC requests"
# Step #1: Connect to RabbitMQ
parameters = pika.URLParameters('amqp://empifJS6:JLdttxhTIzSoZSsgWvbPf8PFTRySIifY@leaping-marjoram-40.bigwig.lshift.net:10743/MxfKYu-j5XB0')
connection = pika.SelectConnection(parameters=parameters,
on_open_callback=on_open)
try:
# Step #2 - Block on the IOLoop
connection.ioloop.start()
# Catch a Keyboard Interrupt to make sure that the connection is closed cleanly
except KeyboardInterrupt:
# Gracefully close the connection
connection.close()
# Start the IOLoop again so Pika can communicate, it will stop on its own when the connection is closed
connection.ioloop.start()
|
999,649 | 88915163509c9b81f059cb917e76a9e4cfc2b281 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 11:46:21 2017
@author: Philipp
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
#import colorcet as cc
#from datetime import datetime
#import matplotlib.collections as collections
from scipy.signal import argrelextrema, hilbert, chirp
from scipy import interpolate
def ztan(t,T,z0,kappa, s=1):
pi = np.pi
om = 2*pi/T
mu = pi/(om*np.sin(kappa*pi))
cos1 = np.cos(om*t)
cos2 = np.cos(kappa*pi)
out = 1+2*z0*((1/pi)*np.arctan(s*mu*(cos1-cos2)))
return out
def get_extrema(y,t):
"""
take two arrays: y values and corresponding time array
finds local maxima and minima
finds adjacent values next to local maxima and minima
return list with maxima and minima
both list entries contain three arrays corresponding to actual extrema, and both neighbors
"""
assert y.size > 3, "y array passed to get_extrema not large enough"
imax = y.size-1
i = 1
tmax = []
tmax_after = []
tmax_before = []
ymax = []
ymax_after = []
ymax_before = []
tmin = []
tmin_after = []
tmin_before = []
ymin = []
ymin_after = []
ymin_before = []
while i < imax:
if (y[i] > y[i+1]) & (y[i] > y[i-1]):
tmax.append(t[i])
tmax_after.append(t[i+1])
tmax_before.append(t[i-1])
ymax.append(y[i])
ymax_after.append(y[i+1])
ymax_before.append(y[i-1])
if (y[i] < y[i+1]) & (y[i] < y[i-1]):
tmin.append(t[i])
tmin_after.append(t[i+1])
tmin_before.append(t[i-1])
ymin.append(y[i])
ymin_after.append(y[i+1])
ymin_before.append(y[i-1])
i = i+1
maxima = [tmax,tmax_before,tmax_after,ymax,ymax_before,ymax_after]
maxima = np.array(maxima).T
minima = [tmin,tmin_before,tmin_after,ymin,ymin_before,ymin_after]
minima = np.array(minima).T
return([maxima,minima])
def interpolate(m):
"""
takes an array with three x and three corresponding y values as input
define parabolic function through three points and
returns local maximum as array([time, y value])
"""
x1 = m[0]
x2 = m[1]
x3 = m[2]
y1 = m[3]
y2 = m[4]
y3 = m[5]
denom = (x1 - x2)*(x1 - x3)*(x2 - x3)
A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom
B = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / denom
C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom
xext = -B/(2*A)
yext = A*xext**2 + B*xext + C
return(np.array([xext,yext]))
def get_max(a,t):
ex = get_extrema(a,t)[0]
out = ipol(ex,1)
return out
def get_min(a,t):
ex = get_extrema(a,t)[1]
out = ipol(ex,1)
return out
def get_xmax(a,t):
ex = get_extrema(a,t)[0]
out = ipol(ex,0)
return out
def get_xmin(a,t):
ex = get_extrema(a,t)[1]
out = ipol(ex,0)
return out
def ipol(arr,nr):
l=[]
for x in arr:
l.append(interpolate(x)[nr])
return np.asarray(l)
def get_periods(a,t):
"""
take array containing time stamps of maxima
returns the mean of time differences between maxima
"""
ex = get_extrema(a,t)[1]
l = ipol(ex,0)
diff = np.diff(l)
return diff
####### implement biological model Hong et al 2008
### dictionary of parameters
### rate constants per hour
#rate constants for original model
rate = {
'k1' : 1.8,
'k2' : 1.8,
'k3' : 0.05,
'k4' : 0.23,
'k5' : 0.27,
'k6' : 0.07,
'k7' : 0.5,
'k8' : 0.8,
'k9' : 40.0,
'k10' : 0.3,
'k11' : 0.05,
'k12' : 0.02,
'k13' : 50.0,
'k14' : 1.0,
'k15' : 8.0,
'K' : 1.25,
'K2' : 1.0
}
### rate constants for frq1
rate1 = {
'k1' : 1.8,
'k2' : 1.8,
'k3' : 0.15,
'k4' : 0.23,
'k5' : 0.4,
'k6' : 0.1,
'k7' : 0.5,
'k8' : 0.8,
'k9' : 40.0,
'k10' : 0.3,
'k11' : 0.05,
'k12' : 0.02,
'k13' : 50.0,
'k14' : 1.0,
'k15' : 8.0,
'K' : 1.25,
'K2' : 1.0
}
### rate constants for frq7
rate7 = {
'k1' : 1.8,
'k2' : 1.8,
'k3' : 0.05,
'k4' : 0.23,
'k5' : 0.15,
'k6' : 0.01,
'k7' : 0.5,
'k8' : 0.8,
'k9' : 40.0,
'k10' : 0.3,
'k11' : 0.05,
'k12' : 0.02,
'k13' : 50.0,
'k14' : 1.0,
'k15' : 8.0,
'K' : 1.25,
'K2' : 1.0
}
### define ODE clock function
def clock(state, t, rate, T, z0, kappa,signal=ztan):
### purpose:simulate Hong et al 2008 model for neuropora clock
### define state vector
frq_mrna = state[0]
frq_c = state[1]
frq_n = state[2]
wc1_mrna = state[3]
wc1_c = state[4]
wc1_n = state[5]
frq_n_wc1_n = state[6]
### ODEs Hong et al 2008
### letzter summand unklar bei dtfrqmrna
dt_frq_mrna = (signal(t, T, z0, kappa) * rate['k1'] * (wc1_n**2) / (rate['K'] + (wc1_n**2))) - (rate['k4'] * frq_mrna)
dt_frq_c = rate['k2'] * frq_mrna - ((rate['k3'] + rate['k5']) * frq_c)
dt_frq_n = (rate['k3'] * frq_c) + (rate['k14'] * frq_n_wc1_n) - (frq_n * (rate['k6'] + (rate['k13'] * wc1_n)))
dt_wc1_mrna = rate['k7'] - (rate['k10'] * wc1_mrna)
dt_wc1_c = (rate['k8'] * frq_c * wc1_mrna / (rate['K2'] + frq_c)) - ((rate['k9'] + rate['k11']) * wc1_c)
dt_wc1_n = (rate['k9'] * wc1_c) - (wc1_n * (rate['k12'] + (rate['k13'] * frq_n))) + (rate['k14'] * frq_n_wc1_n)
dt_frq_n_wc1_n = rate['k13'] * frq_n * wc1_n - ((rate['k14'] + rate['k15']) * frq_n_wc1_n)
### derivatives
dt_state = [dt_frq_mrna,
dt_frq_c,
dt_frq_n,
dt_wc1_mrna,
dt_wc1_c,
dt_wc1_n,
dt_frq_n_wc1_n]
return dt_state
### set initial state and time vector
### set initial conditions for each ODE
frq_mrna0 = 4.0
frq_c0 = 30.0
frq_n0 = 0.1
wc1_mrna0 = (0.5 / 0.3)
wc1_c0 = 0.03225
wc1_n0 = 0.35
frq_n_wc1_n0 = 0.18
state0 = [frq_mrna0,
frq_c0,
frq_n0,
wc1_mrna0,
wc1_c0,
wc1_n0,
frq_n_wc1_n0]
def border_behavior(z_strength, z_per, strain = rate, tcycle = 80, kappa = 0.5, signal = ztan):
### simulate arnold tongue
z0= z_strength
T = z_per
### define t so that there are enough temp cycles to cut out transients
### considering highest tau value
t = np.arange(0,tcycle*T,0.1)
state = odeint(clock,state0,t,args=(strain,T,z0,kappa,signal))
### find time after 85 temp cycles (time res. is 0.1)
### then get system state x0 after 85 temp cycles
### do the same for extrinsic zeitgeber function
lt = int(-(25*10*T))
x0 = state[lt:,1]
tn = t[lt:]
### get extrema and neighbors for zeitgeber function and simulated data
frq_per = get_periods(x0, tn)
period = np.mean(frq_per)
print period
### define entrainment criteria
### T-tau should be < 5 minutes
### period std should be small
z_minima=np.arange(T/2,tcycle*T,T)
#ph = get_phase(x0,tn,z0,tn)
### normalize phase to 2pi and set entr to that phase
a = get_xmax(state[:,1],t)[-15:-1]
b = z_minima[-15:-1]
c = a-b
c[c<0]=T+c[c<0]
phase = 2*np.pi*c/T
save_to = '/home/burt/neurospora/figures/desync/'
fig, axes = plt.subplots(2,1,figsize=(12,9))
axes = axes.flatten()
ax = axes[0]
ax1 = axes[1]
ax.plot(tn,x0,"k")
ax1.plot(tn, ztan(tn,T,z0,0.5),"k")
#fig.savefig(save_to+"border"+"T_"+str(T)+".png")
#plt.close(fig)
#zstr = 0.05
#T = 27
#border_behavior(zstr,20.0)
def border2(zstr,T):
t = np.arange(0,5000,.1)
state = odeint(clock,state0,t,args=(rate,T,zstr,.5,ztan))
z = ztan(t,T,zstr,.5)
trans = 30000
t = t[-trans:]
frq = state[-trans:,1]
frq_mean = np.mean(frq)
frq_detrend = frq-frq_mean
z = z[-trans:]
z_mean = np.mean(z)
z_detrend = z-z_mean
border_cut = 10000
hann = np.hanning(len(frq_detrend))
hamm = np.hamming(len(frq_detrend))
black= np.blackman(len(frq_detrend))
frq_signal = hilbert(hamm*frq_detrend)
frq_signal = frq_signal[border_cut:-border_cut]
frq_envelope = np.abs(frq_signal)
frq_phase = np.angle(frq_signal)
z_signal = hilbert(hamm*z_detrend)
z_signal = z_signal[border_cut:-border_cut]
z_envelope = np.abs(z_signal)
z_phase = np.angle(z_signal)
phase_diff = np.arctan2(np.sin(z_phase-frq_phase),np.cos(z_phase-frq_phase))
phase_diff_max_idx = argrelextrema(phase_diff, np.greater)[0]
phase_diff_max = phase_diff[phase_diff_max_idx]
t_phase = phase_diff_max_idx/10
mask = np.ma.array(phase_diff_max)
mask[phase_diff_max>3.05] = np.ma.masked
tn = t[border_cut:-border_cut]
"""
fig = plt.figure()
ax0 = fig.add_subplot(321)
ax0.plot(t, hamm*frq_detrend, label='signal')
ax0.plot(tn, frq_envelope, label='envelope')
ax0.legend()
ax1 = fig.add_subplot(322)
ax1.plot(tn, frq_phase)
ax3 = fig.add_subplot(323)
ax3.plot(t, z_detrend, label='z signal')
ax3.plot(tn, z_envelope, label='z envelope')
ax3.legend()
ax4 = fig.add_subplot(324)
ax4.plot(tn, z_phase)
ax5 = fig.add_subplot(325)
ax5.scatter(tn,phase_diff,c = "k",s=.1)
ax5.set_ylim(-4,4)
"""
N = len(frq_detrend)/2+1
X = np.linspace(0, 5, N, endpoint=True)
Y = np.fft.fft(frq_detrend*hamm)
xn=X[X>0]
xn = 1/xn
yn=2.0*np.abs(Y[1:N])/N
power = 2.*np.abs(Y[1:N])**2/N
norm = np.max(power)
fig = plt.figure()
ax = fig.add_subplot(311)
ax.plot(xn, yn)
ax.set_xlim(5,50)
ax.set_title("T = "+str(T)+" , z = "+str(zstr))
ax2 = fig.add_subplot(312)
ax2.plot(xn,power/norm)
ax2.set_xlim(5,50)
ax3 = fig.add_subplot(313)
ax3.scatter(tn,phase_diff,c = "k",s=.1)
ax3.set_ylim(-4,4)
"""
duration = 1.0
fs = 400.0
samples = int(fs*duration)
t = np.arange(samples) / fs
signal = chirp(t, 20.0, t[-1], 100.0)
signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
analytic_signal = hilbert(signal)
amplitude_envelope = np.abs(analytic_signal)
instantaneous_phase = np.unwrap(np.angle(analytic_signal))
instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0*np.pi) * fs)
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.plot(t, signal, label='signal')
ax0.plot(t, amplitude_envelope, label='envelope')
ax0.set_xlabel("time in seconds")
ax0.legend()
ax1 = fig.add_subplot(212)
ax1.plot(t[1:], instantaneous_frequency)
ax1.set_xlabel("time in seconds")
ax1.set_ylim(0.0, 120.0)
"""
#### make fourier analysis
#border_behavior(zstr,20.0)
"""
t = np.arange(0,2000,.1)
state = odeint(clock,state0,t,args=(rate,T,zstr,.5,ztan))
frq = state[-18000:,1]
frq_mean = np.mean(frq)
frq_detrend = frq-frq_mean
hann = np.hanning(len(frq_detrend))
hamm = np.hamming(len(frq_detrend))
black= np.blackman(len(frq_detrend))
frq_signal = hilbert(hann*frq_detrend)
frq_envelope = np.abs(frq_signal)
tn = t[-18000:]
fig = plt.figure()
ax0 = fig.add_subplot(111)
ax0.plot(tn, frq_signal, label='signal')
ax0.plot(tn, frq_envelope, label='envelope')
ax0.set_xlabel("time in seconds")
ax0.legend()
""" |
999,650 | 034c6678f4cdbf5e305ccd11ba0fc42f5dacc403 | # Generated by Django 2.0.3 on 2018-04-17 18:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0007_auto_20180325_1605'),
]
operations = [
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=256)),
('card_limit', models.PositiveIntegerField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subcolumns', to='projects.Column')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='card',
name='column',
field=models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.CASCADE, to='projects.Column'),
preserve_default=False,
),
migrations.AddField(
model_name='lane',
name='column',
field=models.ManyToManyField(blank=True, related_name='lanes', to='projects.Column'),
),
]
|
999,651 | a77314b3ef00f3a35076c3a995db677cb20f1a49 | import logging
from matplotlib import pyplot as plt
from scipy.stats import binom
import config
from utils import get_json_from_file, SECONDS_IN_MONTH
def prompt_tech_selection(technologies=config.TECHNOLOGIES):
"""
Ask user to select a technology from the list, return selection
"""
print("Select technology number for probability analysis from:")
for i, tech in enumerate(technologies):
print(f"{i + 1}. {tech}")
print("Insert technology number for probability analysis:")
user_input = input()
if not user_input.isdigit() or int(user_input) > len(technologies) or int(user_input) < 1:
selection = 1
logging.warning(f"Bad input, using default selection {selection}")
else:
selection = int(user_input)
return technologies[selection - 1]
def likelihood_prediction():
"""
Full flow of asking user to choose a technology and displaying its' likelihood to appear
"""
# Get info
selected_word = prompt_tech_selection()
article_json = get_json_from_file()
# Calculate results
total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)
probability = selected_word_counter / total_word_counter
total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds
months_in_train_set = total_time / SECONDS_IN_MONTH
expected_posts_per_month = int(total_word_counter / months_in_train_set)
# Show results
print_text_results(expected_posts_per_month, probability, selected_word)
plot_likelihood(expected_posts_per_month, probability)
def print_text_results(total_monthly_articles, probability, selected_word):
"""
Print the text part of the likelihood calculation result
"""
print(f"Probability of word {selected_word} per title: {probability}")
print(f"Expecting {total_monthly_articles} overall HN posts per month")
prob_at_least_once = 1 - (1 - probability) ** total_monthly_articles
print(f"Probability of ~{prob_at_least_once} for '{selected_word}' to appear at least once next month ")
def count_occurrences(article_json, selected_word):
"""
Count occurrences of a specific word in the titles of a list of articles
"""
selected_word = selected_word.lower()
total_titles = 0 # some rows miss the title field, so not using len()
selected_word_counter = 0
for row in article_json:
if 'title' in row:
title = row['title']
total_titles += 1
for word_in_title in title.lower().split():
if word_in_title == selected_word:
selected_word_counter += 1
return total_titles, selected_word_counter
def plot_likelihood(expected_posts_per_month, probability):
"""
Show a graph of the likelihood for number of occurrences
"""
bar_amount = max(10, int(5 * expected_posts_per_month * probability)) # at least 10 bars, not too long of a tail
print("Generating likelihood plot")
distribution = [binom.pmf(option, expected_posts_per_month, probability) for option in range(bar_amount)]
plt.bar(range(bar_amount), distribution)
plt.xlabel("occurrences")
plt.ylabel("likelihood")
plt.title("Likelihood of word occurences next month")
plt.show()
|
999,652 | bf1824be116e29cc383b901615791ddc76298936 | from django import template
from django.conf import settings
import datetime
register = template.Library()
@register.simple_tag
def requirejs_version():
version_str = ''
if settings.JS_BUILD_VERSION:
version_str = settings.JS_BUILD_VERSION
elif settings.DEBUG:
version_str = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
return version_str |
999,653 | cdec42e5e3f4f9a914f5b2d2a4fbff5ec44b83b2 | k = input().split()
n1 = int(k[0])
n2 = int(k[1])
ans = n1 * n2
print(ans) |
999,654 | fe979a65bce5a25ea8e2585971ca0b94694e35c3 | import cv2
import numpy as np
import matplotlib.pyplot as plt
BLUE, GREEN, RED = (255, 0, 0), (0, 255, 0), (0, 0, 255)
WHITE_LOWER, WHITE_UPPER = (200, 200, 200), (255, 255, 255)
BLACK_LOWER, BLACK_UPPER = (0, 0, 0), (10, 10, 10)
def create_trackbar():
def nothing():
pass
cv2.namedWindow("Trackbar")
cv2.createTrackbar("Hue (-)", "Trackbar", 0, 255, nothing)
cv2.createTrackbar("Val (-)", "Trackbar", 0, 255, nothing)
cv2.createTrackbar("Sat (-)", "Trackbar", 0, 255, nothing)
cv2.createTrackbar("Hue (+)", "Trackbar", 255, 255, nothing)
cv2.createTrackbar("Val (+)", "Trackbar", 255, 255, nothing)
cv2.createTrackbar("Sat (+)", "Trackbar", 255, 255, nothing)
def update_trackbar():
lower = cv2.getTrackbarPos("Hue (-)", "Trackbar"), \
cv2.getTrackbarPos("Val (-)", "Trackbar"), \
cv2.getTrackbarPos("Sat (-)", "Trackbar")
upper = cv2.getTrackbarPos("Hue (+)", "Trackbar"), \
cv2.getTrackbarPos("Val (+)", "Trackbar"), \
cv2.getTrackbarPos("Sat (+)", "Trackbar")
return lower, upper
def mask_from_color(image, lower, upper, erode=0, dilate=0):
mask = cv2.inRange(image, lower, upper)
if erode:
mask = cv2.erode(mask, None, iterations=erode)
if dilate:
mask = cv2.dilate(mask, None, iterations=dilate)
return mask
def resize_same(image, new_height=False, new_width=False):
ar = image.shape[1]/image.shape[0]
if new_height:
new_width = int(ar * new_height)
elif new_width:
new_height = int(new_width/ar)
return cv2.resize(image, dsize=(new_width, new_height))
def image_template_matching(image, template, maxCorr):
result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF)
(_, new_maxCorr, _, maxLoc) = cv2.minMaxLoc(result)
if new_maxCorr > maxCorr:
best_template, best_corr = template, new_maxCorr
(tH, tW) = template.shape[:2]
(xi, yi) = (int(maxLoc[0]), int(maxLoc[1]))
(xf, yf) = (int((maxLoc[0] + tW)), int((maxLoc[1] + tH)))
else:
return None
return (best_template, best_corr, xi, yi, xf, yf)
# def image_image_matching(image_1, image_2, maxCorr, edging=False)
# if edged:
# image_1 = cv2.Canny(image_1, 75, 200)
# image_2 = cv2.Canny(image_2, 75, 200)
# image_template_matching(image, template, maxCorr):
def image_center_region(image, region_width=False, region_height=False, ret_ranges=False):
if region_width and region_height:
xc, yc = image.shape[0]//2, image.shape[1]//2
x_min, x_max = xc-region_width//2, xc+region_width//2
y_min, y_max = yc-region_height//2, yc+region_height//2
if ret_ranges:
return image[x_min:x_max, y_min:y_max, :], x_min, x_max, y_min, y_max
return image[x_min:x_max, y_min:y_max, :]
ar = image.shape[1]/image.shape[0]
if region_height:
region_width = int(ar * region_height)
elif region_width:
region_height = int(region_width/ar)
xc, yc = image.shape[0]//2, image.shape[1]//2
x_min, x_max = xc-region_width//2, xc+region_width//2
y_min, y_max = yc-region_height//2, yc+region_height//2
if ret_ranges:
return image[x_min:x_max, y_min:y_max, :], x_min, x_max, y_min, y_max
return image[x_min:x_max, y_min:y_max, :]
def color_ranges_from_roi(roi):
c0_min, c0_max = int(roi[:,:,0].min()), int(roi[:,:,0].max())
c1_min, c1_max = int(roi[:,:,1].min()), int(roi[:,:,1].max())
c2_min, c2_max = int(roi[:,:,2].min()), int(roi[:,:,2].max())
return (c0_min, c1_min, c2_min), (c0_max, c1_max, c2_max)
def plt_imshow_bgr(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
def plt_imshow_multi(*args, rows=2, cols=2):
images = list(*args)
for i in range(len(images)):
plt.subplot(rows, cols, i+1)
plt.imshow(images[i], 'gray')
plt.title(str(i+1))
def crop_center(image, to_remove):
return image[to_remove:-to_remove, to_remove:-to_remove,:] |
999,655 | 23d978bad36233873ffddbc0cc24925be0f677c2 | #imports
import pymysql
#functions
def create(data):
connection = pymysql.connect("localhost","root","helloworld","lifesaver")
cursor = connection.cursor()
query = "INSERT INTO donors VALUES('{}','{}','{}','{}','{}','{}','1998-07-08');".format(data['name'],data['city'],data['email'],data['phone'],data['bloodgroup'],data['password'])
try:
cursor.execute(query)
connection.commit()
flag = True
except Exception as e:
connection.rollback()
print(e)
flag = False
connection.close()
return flag
def log_in(data):
connection = pymysql.connect("localhost","root","helloworld","lifesaver")
cursor = connection.cursor()
query = "SELECT phone,name,password from donors where phone='{}';".format(data['phone'])
try:
cursor.execute(query)
values = cursor.fetchone()
if values[2]==data['password']:
data['name'] = values[1]
flag = True
else:
flag = False
except Exception as e:
flag = False
print(e)
connection.close()
return flag
def update_date(data):
connection = pymysql.connect("localhost","root","helloworld","lifesaver")
cursor = connection.cursor()
query = "UPDATE donors SET lastdonation = '{}' WHERE phone = '{}'".format(data['day'],data['phone'])
try:
cursor.execute(query)
connection.commit()
flag = True
except Exception as e:
connection.rollback()
print(e)
flag = False
connection.close()
return flag
def delete(data):
connection = pymysql.connect("localhost","root","helloworld","lifesaver")
cursor = connection.cursor()
query = "DELETE FROM donors WHERE phone = '{}'".format(data['phone'])
try:
cursor.execute(query)
connection.commit()
flag = True
except Exception as e:
connection.rollback()
print(e)
flag = False
connection.close()
return flag |
999,656 | 6789a3578556d8c40d341ab3010a3fadfaf3e315 | import json
import operator
import os
#input file
filepath = "../Data/tokenized/"
#output file
lpath = "../Data/all_labels.json"
max_index = 10
fw = open(lpath,'w')
labels = dict()
def getLabels(fil):
fin = open(fil,'r')
data = json.load(fin)
for row in data:
for each in row["tags"]:
if labels.has_key(each):
labels[each] = labels[each]+1
else:
labels.setdefault(each,1)
for dir, subdir, files in os.walk(filepath):
for filename in files:
f = os.path.join(dir,filename)
getLabels(f)
sorted_labels = sorted(labels.items(), key = operator.itemgetter(1), reverse = True)
json.dump(sorted_labels[0:max_index], fw)
|
999,657 | 1888c0128cfb80efad93875e343b22eee7d2c900 | import numpy as np
x = [[10, 20], [14, 18]]
X = np.array(x)
y = [[1.7], [1.4]]
y = np.array(y)
def sigmoid(x, deriv = False):
if deriv == True:
return sigmoid(x)*(1 - sigmoid(x))
return 1/(1 + 2.718281**(-x))
class NeuralNetwork:
def __init__(self, X, hidden1, hidden2, hidden3, num_class):
self.X = X
self.hidden1 = hidden1
self.hidden2 = hidden2
self.hidden3 = hidden3
self.classnum = num_class
self.weights = {'w1':np.random.randn(self.X.shape[1], self.hidden1),
'w2':np.random.randn(self.hidden1, self.hidden2),
'w3':np.random.randn(self.hidden2, self.hidden3),
'wout':np.random.randn(self.hidden3, self.classnum)}
self.biases = {'b1':np.random.randn(1, self.hidden1),
'b2':np.random.randn(1, self.hidden2),
'b3':np.random.randn(1, self.hidden3),
'bout':np.random.randn(1, self.classnum)}
def predict(self):
return self.layerout
def train(self, y, learning_rate):
i = 0
for epoch in range(100000):
if i >= self.X.shape[0] - 5:
i = 0
self.layer1 = sigmoid(np.add(np.dot(self.X[i:i+5], self.weights['w1']), self.biases['b1']))
self.layer2 = sigmoid(np.add(np.dot(self.layer1, self.weights['w2']), self.biases['b2']))
self.layer3 = sigmoid(np.add(np.dot(self.layer2, self.weights['w3']), self.biases['b3']))
self.layerout = sigmoid(np.add(np.dot(self.layer3, self.weights['wout']), self.biases['bout']))
errorout = self.layerout - y[i:i+5]
delout = errorout * sigmoid(self.layerout, True)
error3 = np.dot(delout, self.weights['wout'].T)
del3 = error3 * sigmoid(self.layer3, True)
error2 = np.dot(del3, self.weights['w3'].T)
del2 = error2 * sigmoid(self.layer2, True)
error1 = np.dot(del2, self.weights['w2'].T)
del1 = error1 * sigmoid(self.layer1, True)
self.weights['wout'] += np.dot(self.layer3.T, delout) * learning_rate
self.weights['w3'] += np.dot(self.layer2.T, del3) * learning_rate
self.weights['w2'] += np.dot(self.layer1.T, del2) * learning_rate
self.weights['w1'] += np.dot(self.X[i:i+5].T, del1) * learning_rate
self.biases['bout'] += np.sum(delout, axis=0) * learning_rate
self.biases['b3'] += np.sum(del3, axis=0) * learning_rate
self.biases['b2'] += np.sum(del2, axis=0) * learning_rate
self.biases['b1'] += np.sum(del1, axis=0) * learning_rate
if epoch%10000 == 0:
print('Error: {}'.format(np.mean(np.abs(errorout))))
model = NeuralNetwork(X, 3, 3, 3, 1)
model.train(y, 0.001)
|
999,658 | f8f34bdbbe39b188b92542a91dc28f04cc5004bd | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ast import literal_eval
from textwrap import dedent
from typing import List, Set
from unittest import skipIf
import libcst as cst
import libcst.matchers as m
from libcst.matchers import (
call_if_inside,
call_if_not_inside,
leave,
MatcherDecoratableTransformer,
MatcherDecoratableVisitor,
visit,
)
from libcst.testing.utils import UnitTest
def fixture(code: str) -> cst.Module:
return cst.parse_module(dedent(code))
class MatchersGatingDecoratorsTest(UnitTest):
def test_call_if_inside_transform_simple(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_inside(m.FunctionDef())
def leave_SimpleString(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
self.leaves.append(updated_node.value)
return updated_node
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
self.assertEqual(visitor.leaves, ['"baz"', '"foobar"'])
def test_call_if_inside_verify_original_transform(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.func_visits: List[str] = []
self.str_visits: List[str] = []
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.str_visits.append(node.value)
def visit_FunctionDef(self, node: cst.FunctionDef) -> None:
self.func_visits.append(node.name.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.func_visits, ["foo", "bar"])
self.assertEqual(visitor.str_visits, ['"baz"'])
def test_call_if_inside_collect_simple(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_inside(m.FunctionDef())
def leave_SimpleString(self, original_node: cst.SimpleString) -> None:
self.leaves.append(original_node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
self.assertEqual(visitor.leaves, ['"baz"', '"foobar"'])
def test_call_if_inside_verify_original_collect(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.func_visits: List[str] = []
self.str_visits: List[str] = []
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.str_visits.append(node.value)
def visit_FunctionDef(self, node: cst.FunctionDef) -> None:
self.func_visits.append(node.name.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.func_visits, ["foo", "bar"])
self.assertEqual(visitor.str_visits, ['"baz"'])
def test_multiple_visitors_collect(self) -> None:
# Set up a simple visitor with multiple visit decorators.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
@call_if_inside(m.ClassDef(m.Name("A")))
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
def foo() -> None:
return "foo"
class A:
def foo(self) -> None:
return "baz"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
def test_multiple_visitors_transform(self) -> None:
# Set up a simple visitor with multiple visit decorators.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
@call_if_inside(m.ClassDef(m.Name("A")))
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
def foo() -> None:
return "foo"
class A:
def foo(self) -> None:
return "baz"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
def test_call_if_not_inside_transform_simple(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_not_inside(m.FunctionDef())
def leave_SimpleString(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
self.leaves.append(updated_node.value)
return updated_node
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"'])
self.assertEqual(visitor.leaves, ['"foo"', '"bar"'])
def test_visit_if_inot_inside_verify_original_transform(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.func_visits: List[str] = []
self.str_visits: List[str] = []
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.str_visits.append(node.value)
def visit_FunctionDef(self, node: cst.FunctionDef) -> None:
self.func_visits.append(node.name.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.func_visits, ["foo", "bar"])
self.assertEqual(visitor.str_visits, ['"foo"', '"bar"', '"foobar"'])
def test_call_if_not_inside_collect_simple(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_not_inside(m.FunctionDef())
def leave_SimpleString(self, original_node: cst.SimpleString) -> None:
self.leaves.append(original_node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"'])
self.assertEqual(visitor.leaves, ['"foo"', '"bar"'])
def test_visit_if_inot_inside_verify_original_collect(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.func_visits: List[str] = []
self.str_visits: List[str] = []
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.str_visits.append(node.value)
def visit_FunctionDef(self, node: cst.FunctionDef) -> None:
self.func_visits.append(node.name.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.func_visits, ["foo", "bar"])
self.assertEqual(visitor.str_visits, ['"foo"', '"bar"', '"foobar"'])
class MatchersVisitLeaveDecoratorsTest(UnitTest):
def test_visit_transform(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@visit(m.FunctionDef(m.Name("foo") | m.Name("bar")))
def visit_function(self, node: cst.FunctionDef) -> None:
self.visits.append(node.name.value)
@leave(m.FunctionDef(m.Name("bar") | m.Name("baz")))
def leave_function(
self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef
) -> cst.FunctionDef:
self.leaves.append(updated_node.name.value)
return updated_node
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ["foo", "bar"])
self.assertEqual(visitor.leaves, ["bar", "baz"])
def test_visit_collector(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@visit(m.FunctionDef(m.Name("foo") | m.Name("bar")))
def visit_function(self, node: cst.FunctionDef) -> None:
self.visits.append(node.name.value)
@leave(m.FunctionDef(m.Name("bar") | m.Name("baz")))
def leave_function(self, original_node: cst.FunctionDef) -> None:
self.leaves.append(original_node.name.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ["foo", "bar"])
self.assertEqual(visitor.leaves, ["bar", "baz"])
def test_stacked_visit_transform(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@visit(m.FunctionDef(m.Name("foo")))
@visit(m.FunctionDef(m.Name("bar")))
def visit_function(self, node: cst.FunctionDef) -> None:
self.visits.append(node.name.value)
@leave(m.FunctionDef(m.Name("bar")))
@leave(m.FunctionDef(m.Name("baz")))
def leave_function(
self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef
) -> cst.FunctionDef:
self.leaves.append(updated_node.name.value)
return updated_node
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ["foo", "bar"])
self.assertEqual(visitor.leaves, ["bar", "baz"])
def test_stacked_visit_collector(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@visit(m.FunctionDef(m.Name("foo")))
@visit(m.FunctionDef(m.Name("bar")))
def visit_function(self, node: cst.FunctionDef) -> None:
self.visits.append(node.name.value)
@leave(m.FunctionDef(m.Name("bar")))
@leave(m.FunctionDef(m.Name("baz")))
def leave_function(self, original_node: cst.FunctionDef) -> None:
self.leaves.append(original_node.name.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ["foo", "bar"])
self.assertEqual(visitor.leaves, ["bar", "baz"])
self.assertEqual(visitor.leaves, ["bar", "baz"])
def test_duplicate_visit_transform(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: Set[str] = set()
self.leaves: Set[str] = set()
@visit(m.FunctionDef(m.Name("foo")))
def visit_function1(self, node: cst.FunctionDef) -> None:
self.visits.add(node.name.value + "1")
@visit(m.FunctionDef(m.Name("foo")))
def visit_function2(self, node: cst.FunctionDef) -> None:
self.visits.add(node.name.value + "2")
@leave(m.FunctionDef(m.Name("bar")))
def leave_function1(
self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef
) -> cst.FunctionDef:
self.leaves.add(updated_node.name.value + "1")
return updated_node
@leave(m.FunctionDef(m.Name("bar")))
def leave_function2(
self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef
) -> cst.FunctionDef:
self.leaves.add(updated_node.name.value + "2")
return updated_node
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, {"foo1", "foo2"})
self.assertEqual(visitor.leaves, {"bar1", "bar2"})
def test_duplicate_visit_collector(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: Set[str] = set()
self.leaves: Set[str] = set()
@visit(m.FunctionDef(m.Name("foo")))
def visit_function1(self, node: cst.FunctionDef) -> None:
self.visits.add(node.name.value + "1")
@visit(m.FunctionDef(m.Name("foo")))
def visit_function2(self, node: cst.FunctionDef) -> None:
self.visits.add(node.name.value + "2")
@leave(m.FunctionDef(m.Name("bar")))
def leave_function1(self, original_node: cst.FunctionDef) -> None:
self.leaves.add(original_node.name.value + "1")
@leave(m.FunctionDef(m.Name("bar")))
def leave_function2(self, original_node: cst.FunctionDef) -> None:
self.leaves.add(original_node.name.value + "2")
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, {"foo1", "foo2"})
self.assertEqual(visitor.leaves, {"bar1", "bar2"})
def test_gated_visit_transform(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: Set[str] = set()
self.leaves: Set[str] = set()
@call_if_inside(m.FunctionDef(m.Name("foo")))
@visit(m.SimpleString())
def visit_string1(self, node: cst.SimpleString) -> None:
self.visits.add(literal_eval(node.value) + "1")
@call_if_not_inside(m.FunctionDef(m.Name("bar")))
@visit(m.SimpleString())
def visit_string2(self, node: cst.SimpleString) -> None:
self.visits.add(literal_eval(node.value) + "2")
@call_if_inside(m.FunctionDef(m.Name("baz")))
@leave(m.SimpleString())
def leave_string1(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
self.leaves.add(literal_eval(updated_node.value) + "1")
return updated_node
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
@leave(m.SimpleString())
def leave_string2(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
self.leaves.add(literal_eval(updated_node.value) + "2")
return updated_node
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobarbaz"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, {"baz1", "foo2", "bar2", "baz2", "foobarbaz2"})
self.assertEqual(
visitor.leaves, {"foobarbaz1", "foo2", "bar2", "foobar2", "foobarbaz2"}
)
def test_gated_visit_collect(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: Set[str] = set()
self.leaves: Set[str] = set()
@call_if_inside(m.FunctionDef(m.Name("foo")))
@visit(m.SimpleString())
def visit_string1(self, node: cst.SimpleString) -> None:
self.visits.add(literal_eval(node.value) + "1")
@call_if_not_inside(m.FunctionDef(m.Name("bar")))
@visit(m.SimpleString())
def visit_string2(self, node: cst.SimpleString) -> None:
self.visits.add(literal_eval(node.value) + "2")
@call_if_inside(m.FunctionDef(m.Name("baz")))
@leave(m.SimpleString())
def leave_string1(self, original_node: cst.SimpleString) -> None:
self.leaves.add(literal_eval(original_node.value) + "1")
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
@leave(m.SimpleString())
def leave_string2(self, original_node: cst.SimpleString) -> None:
self.leaves.add(literal_eval(original_node.value) + "2")
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobarbaz"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, {"baz1", "foo2", "bar2", "baz2", "foobarbaz2"})
self.assertEqual(
visitor.leaves, {"foobarbaz1", "foo2", "bar2", "foobar2", "foobarbaz2"}
)
def test_transform_order(self) -> None:
# Set up a simple visitor with a visit and leave decorator.
class TestVisitor(MatcherDecoratableTransformer):
@call_if_inside(m.FunctionDef(m.Name("bar")))
@leave(m.SimpleString())
def leave_string1(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
return updated_node.with_changes(
value=f'"prefix{literal_eval(updated_node.value)}"'
)
@call_if_inside(m.FunctionDef(m.Name("bar")))
@leave(m.SimpleString())
def leave_string2(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
return updated_node.with_changes(
value=f'"{literal_eval(updated_node.value)}suffix"'
)
@call_if_inside(m.FunctionDef(m.Name("bar")))
def leave_SimpleString(
self, original_node: cst.SimpleString, updated_node: cst.SimpleString
) -> cst.SimpleString:
return updated_node.with_changes(
value=f'"{"".join(reversed(literal_eval(updated_node.value)))}"'
)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
def baz() -> None:
return "foobarbaz"
"""
)
visitor = TestVisitor()
actual = module.visit(visitor)
expected = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "prefixraboofsuffix"
def baz() -> None:
return "foobarbaz"
"""
)
self.assertTrue(expected.deep_equals(actual))
def test_call_if_inside_visitor_attribute(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_inside(m.FunctionDef())
def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.leaves.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
self.assertEqual(visitor.leaves, ['"baz"', '"foobar"'])
def test_call_if_inside_transform_attribute(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_inside(m.FunctionDef())
def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.leaves.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
self.assertEqual(visitor.leaves, ['"baz"', '"foobar"'])
def test_call_if_not_inside_visitor_attribute(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableVisitor):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_not_inside(m.FunctionDef())
def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.leaves.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"'])
self.assertEqual(visitor.leaves, ['"foo"', '"bar"'])
def test_call_if_not_inside_transform_attribute(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
self.leaves: List[str] = []
@call_if_not_inside(m.FunctionDef(m.Name("foo")))
def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
@call_if_not_inside(m.FunctionDef())
def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None:
self.leaves.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"'])
self.assertEqual(visitor.leaves, ['"foo"', '"bar"'])
def test_init_with_unhashable_types(self) -> None:
# Set up a simple visitor with a call_if_inside decorator.
class TestVisitor(MatcherDecoratableTransformer):
def __init__(self) -> None:
super().__init__()
self.visits: List[str] = []
@call_if_inside(
m.FunctionDef(m.Name("foo"), params=m.Parameters([m.ZeroOrMore()]))
)
def visit_SimpleString(self, node: cst.SimpleString) -> None:
self.visits.append(node.value)
# Parse a module and verify we visited correctly.
module = fixture(
"""
a = "foo"
b = "bar"
def foo() -> None:
return "baz"
def bar() -> None:
return "foobar"
"""
)
visitor = TestVisitor()
module.visit(visitor)
# We should have only visited a select number of nodes.
self.assertEqual(visitor.visits, ['"baz"'])
class MatchersUnionDecoratorsTest(UnitTest):
@skipIf(bool(sys.version_info < (3, 10)), "new union syntax not available")
def test_init_with_new_union_annotation(self) -> None:
class TransformerWithUnionReturnAnnotation(m.MatcherDecoratableTransformer):
@m.leave(m.ImportFrom(module=m.Name(value="typing")))
def test(
self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom
) -> cst.ImportFrom | cst.RemovalSentinel:
pass
# assert that init (specifically _check_types on return annotation) passes
TransformerWithUnionReturnAnnotation()
|
999,659 | 646d5eb158ec122f71dba04b0676e646bf0c04a3 | from tutorialpress.core.views.categoria import CategoriaViewSet
from tutorialpress.core.views.publicacao import PublicacaoViewSet
|
999,660 | 6fa14cf56759263bfa5410e87688b5e7c8384cd6 | from .base import *
import os
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'blogdb',
'USER': 'admin',
'PASSWORD': 'Rhoden5130!',
#いつかRDSに分けた時用 'HOST': 'blogdb.caa7627qv4ac.ap-northeast-1.rds.amazonaws.com',
'HOST': 'ip-172-31-45-127.ap-northeast-1.compute.internal',
'PORT': '3306',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
#STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static")
#]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') |
999,661 | 49f2fd91ec73dfa54baef8bc5efd6d1073db36c3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: 'orleven'
import os
from lib.core.data import paths
from lib.utils.connect import ClientSession
from lib.core.enums import VUL_LEVEL
from lib.core.enums import VUL_TYPE
from lib.core.enums import SERVICE_PORT_MAP
from script import Script
class POC(Script):
def __init__(self, target=None):
self.service_type = SERVICE_PORT_MAP.WEBLOGIC
self.name = 'weblogic burst'
self.keyword = ['weblogic']
self.info = 'weblogic burst'
self.type = VUL_TYPE.WEAKPASS
self.level = VUL_LEVEL.HIGH
Script.__init__(self, target=target, service_type=self.service_type)
async def prove(self):
await self.get_url()
if self.base_url:
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
usernamedic = self.read_file(self.parameter['U']) if 'U' in self.parameter.keys() else self.read_file(
os.path.join(paths.DICT_PATH, 'weblogic_usernames.txt'))
passworddic = self.read_file(self.parameter['P']) if 'P' in self.parameter.keys() else self.read_file(
os.path.join(paths.DICT_PATH, 'weblogic_passwords.txt'))
url = self.base_url + 'console/j_security_check'
async with ClientSession() as session:
async for (username, password) in self.generate_dict(usernamedic, passworddic): # 登陆失败错误过多会锁账户,不建议尝试爆破过多,5次以下差不多
data = 'j_username={}&j_password={}&j_character_encoding=UTF-8'.format(username, password)
async with session.post(url=url, data=data, headers=headers, allow_redirects=False) as res:
if res != None and res.status == 302:
location = res.headers.get('Location', '')
if '/console' in location and '/login/LoginForm.jsp' not in location and '/console/j_security_check' not in location:
# if ('Home Page' in text or 'WebLogic Server Console' in text and 'console.portal' in text):
self.flag = 1
self.res.append({"info": username + "/" + password, "key": "weblogic burst"})
return |
999,662 | 584b17176691594cbd32fb0ed9208c0971dfe646 | '''
Created on Sep 28, 2011
@author: Arif
'''
import xmlrpclib
if __name__ == '__main__':
s = xmlrpclib.ServerProxy('http://localhost:8080')
print "test()"
print ">>", s.test()
|
999,663 | 4897fabae99941264f29fc56ed430ebfb29e3725 | __author__ = 'will'
import pickle
import numpy as np
data = pickle.load( open( "trainingdata.p", "rb" ), encoding="latin1" )
n_images = len(data)
test, training = data[0:int(n_images/3)], data[int(n_images/3):]
def get_training_data():
trX = np.array([np.reshape(a[2],a[2].shape[0]**2) for a in training])
print(np.shape(trX)[1])
trY = np.zeros((len(training)),dtype=np.float)
for i, data in enumerate(training):
trY[i] = float(data[0])
return trX, trY
def get_test_data():
teX = np.array([np.reshape(a[2],a[2].shape[0]**2) for a in test])
teY = np.zeros((len(test)),dtype=np.float)
for i, data in enumerate(test):
teY[i] = float(data[0])
return teX,teY
|
999,664 | 81694c64fa592a374d06e9bd0bc8fcd634197f4e | cases = int(input())
for a in range (cases):
flower, distance = [int(x) for x in input().split()]
currentX = 0
currentY = 0
counter = 0
for b in range(flower):
fx, fy = [int(x) for x in input().split()]
distance -= (abs(currentX-fx)**2 + abs(currentY-fy)**2)**0.5
currentX = fx
currentY = fy
if distance < 0:
continue
else:
counter += 1
exit = False
notPrime = False
answer = 0
for x in range(counter,1,-1):
notPrime = False
for y in range(int(x**0.5)+1,1,-1):
if x%y==0 and x!=y:
notPrime = True
break
if notPrime == False:
print(x)
exit = True
break
if exit == False:
print(0)
|
999,665 | fe6bb4a1b18fda23275f51c568bad66b8e3657d7 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2020 - 2021
import copy
import json
import os
import traceback
from rucio.client.client import Client as RucioClient
from rucio.common.exception import (CannotAuthenticate as RucioCannotAuthenticate)
from idds.common import exceptions
from idds.common.constants import (TransformType, CollectionType, CollectionStatus,
ContentStatus, ContentType,
ProcessingStatus, WorkStatus)
from idds.common.utils import run_command
# from idds.workflow.work import Work
from idds.workflow.work import Processing
from idds.atlas.workflow.atlascondorwork import ATLASCondorWork
class ATLASActuatorWork(ATLASCondorWork):
def __init__(self, executable=None, arguments=None, parameters=None, setup=None,
work_tag='actuating', exec_type='local', sandbox=None, work_id=None,
name=None,
primary_input_collection=None, other_input_collections=None, input_collections=None,
primary_output_collection=None, other_output_collections=None,
output_collections=None, log_collections=None,
logger=None,
workload_id=None,
agent_attributes=None,
output_json=None):
"""
Init a work/task/transformation.
:param setup: A string to setup the executable enviroment, it can be None.
:param executable: The executable.
:param arguments: The arguments.
:param parameters: A dict with arguments needed to be replaced.
:param work_type: The work type like data carousel, hyperparameteroptimization and so on.
:param exec_type: The exec type like 'local', 'remote'(with remote_package set), 'docker' and so on.
:param sandbox: The sandbox.
:param work_id: The work/task id.
:param primary_input_collection: The primary input collection.
:param other_input_collections: List of the input collections.
:param output_collections: List of the output collections.
# :param workflow: The workflow the current work belongs to.
:param sandbox: The sandbox to be uploaded or the container path.
:param executable: The executable command.
:param arguments: The arguments for the executable.
"""
super(ATLASActuatorWork, self).__init__(executable=executable, arguments=arguments, work_tag=work_tag,
parameters=parameters, setup=setup, work_type=TransformType.Actuating,
exec_type=exec_type, sandbox=sandbox, work_id=work_id,
primary_input_collection=primary_input_collection,
other_input_collections=other_input_collections,
primary_output_collection=primary_output_collection,
other_output_collections=other_output_collections,
input_collections=input_collections,
output_collections=output_collections,
log_collections=log_collections,
logger=logger,
agent_attributes=agent_attributes)
self.output_json = output_json
self.terminated = False
self.tocancel = False
# if self.agent_attributes and 'atlashpowork' in self.agent_attributes:
# self.agent_attributes = self.agent_attributes['atlashpowork']
# self.logger.info("agent_attributes: %s" % self.agent_attributes)
# if self.agent_attributes and 'workdir' in self.agent_attributes and self.agent_attributes['workdir']:
# self.set_workdir(self.agent_attributes['workdir'])
# self.logger.info("workdir: %s" % self.get_workdir())
if agent_attributes:
self.set_agent_attributes(agent_attributes)
def set_agent_attributes(self, attrs, req_attributes=None):
super(ATLASActuatorWork, self).set_agent_attributes(attrs)
if self.agent_attributes and 'workdir' in self.agent_attributes and self.agent_attributes['workdir']:
if req_attributes and 'request_id' in req_attributes and 'workload_id' in req_attributes and 'transform_id' in req_attributes:
req_dir = 'request_%s_%s/transform_%s' % (req_attributes['request_id'],
req_attributes['workload_id'],
req_attributes['transform_id'])
self.set_workdir(os.path.join(self.agent_attributes['workdir'], req_dir))
self.logger.info("workdir: %s" % self.get_workdir())
########################################## # noqa E266
def generate_new_task(self):
self.logger.info("Work %s parameters for next task: %s" % (self.internal_id, str(self.get_parameters_for_next_task())))
if self.get_parameters_for_next_task():
return True
else:
return False
####### functions for transformer ######## # noqa E266
###################################### # noqa E266
def set_output_data(self, data):
# overwrite to transfer the output of current task to next task
super(ATLASActuatorWork, self).set_output_data(data)
super(ATLASActuatorWork, self).set_parameters_for_next_task(data)
def get_rucio_client(self):
try:
client = RucioClient()
except RucioCannotAuthenticate as error:
self.logger.error(error)
self.logger.error(traceback.format_exc())
raise exceptions.IDDSException('%s: %s' % (str(error), traceback.format_exc()))
return client
def poll_external_collection(self, coll):
try:
if coll.status in [CollectionStatus.Closed]:
return coll
else:
client = self.get_rucio_client()
did_meta = client.get_metadata(scope=coll.scope, name=coll.name)
coll.coll_metadata['bytes'] = did_meta['bytes']
coll.coll_metadata['total_files'] = did_meta['length']
coll.coll_metadata['availability'] = did_meta['availability']
coll.coll_metadata['events'] = did_meta['events']
coll.coll_metadata['is_open'] = did_meta['is_open']
coll.coll_metadata['run_number'] = did_meta['run_number']
coll.coll_metadata['did_type'] = did_meta['did_type']
coll.coll_metadata['list_all_files'] = False
if (('is_open' in coll.coll_metadata and not coll.coll_metadata['is_open'])
or ('force_close' in coll.coll_metadata and coll.coll_metadata['force_close'])): # noqa: W503
coll_status = CollectionStatus.Closed
else:
coll_status = CollectionStatus.Open
coll.status = coll_status
if 'did_type' in coll.coll_metadata:
if coll.coll_metadata['did_type'] == 'DATASET':
coll_type = CollectionType.Dataset
elif coll.coll_metadata['did_type'] == 'CONTAINER':
coll_type = CollectionType.Container
else:
coll_type = CollectionType.File
else:
coll_type = CollectionType.Dataset
coll.coll_metadata['coll_type'] = coll_type
return coll
except Exception as ex:
self.logger.error(ex)
self.logger.error(traceback.format_exc())
raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc()))
def get_input_collections(self):
# return [self.primary_input_collection] + self.other_input_collections
colls = [self._primary_input_collection] + self._other_input_collections
for coll_int_id in colls:
coll = self.collections[coll_int_id]
coll = self.poll_external_collection(coll)
self.collections[coll_int_id] = coll
return super(ATLASActuatorWork, self).get_input_collections()
def get_input_contents(self):
"""
Get all input contents from DDM.
"""
try:
ret_files = []
coll = self.collections[self._primary_input_collection]
ret_file = {'coll_id': coll['coll_id'],
'scope': coll['scope'],
'name': coll['name'],
'bytes': coll.coll_metadata['bytes'],
'adler32': None,
'min_id': 0,
'max_id': coll.coll_metadata['total_files'],
'content_type': ContentType.File,
'content_metadata': {'total_files': coll['coll_metadata']['total_files']}
}
ret_files.append(ret_file)
return ret_files
except Exception as ex:
self.logger.error(ex)
self.logger.error(traceback.format_exc())
raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc()))
def get_mapped_inputs(self, mapped_input_output_maps):
ret = []
for map_id in mapped_input_output_maps:
inputs = mapped_input_output_maps[map_id]['inputs']
# if 'primary' is not set, the first one is the primary input.
primary_input = inputs[0]
for ip in inputs:
if 'primary' in ip['content_metadata'] and ip['content_metadata']['primary']:
primary_input = ip
ret.append(primary_input)
return ret
def get_new_input_output_maps(self, mapped_input_output_maps={}):
"""
New inputs which are not yet mapped to outputs.
:param mapped_input_output_maps: Inputs that are already mapped.
"""
inputs = self.get_input_contents()
mapped_inputs = self.get_mapped_inputs(mapped_input_output_maps)
mapped_inputs_scope_name = [ip['scope'] + ":" + ip['name'] for ip in mapped_inputs]
new_inputs = []
new_input_output_maps = {}
for ip in inputs:
ip_scope_name = ip['scope'] + ":" + ip['name']
if ip_scope_name not in mapped_inputs_scope_name:
new_inputs.append(ip)
# to avoid cheking new inputs if there are no new inputs anymore
if (not new_inputs and 'status' in self.collections[self._primary_input_collection]
and self.collections[self._primary_input_collection]['status'] in [CollectionStatus.Closed]): # noqa: W503
self.set_has_new_inputs(False)
else:
mapped_keys = mapped_input_output_maps.keys()
if mapped_keys:
next_key = max(mapped_keys) + 1
else:
next_key = 1
for ip in new_inputs:
out_ip = copy.deepcopy(ip)
out_ip['coll_id'] = self.collections[self._primary_output_collection]['coll_id']
new_input_output_maps[next_key] = {'inputs': [ip],
'outputs': [out_ip]}
next_key += 1
self.unfinished_points = 1
return new_input_output_maps
def get_processing(self, input_output_maps, without_creating=False):
if self.active_processings:
return self.processings[self.active_processings[0]]
else:
if not without_creating:
return self.create_processing(input_output_maps)
return None
def create_processing(self, input_output_maps):
processing_metadata = {}
proc = Processing(processing_metadata=processing_metadata)
self.add_processing_to_processings(proc)
self.active_processings.append(proc.internal_id)
return proc
def get_status_statistics(self, registered_input_output_maps):
status_statistics = {}
self.total_output_files = 0
self.processed_output_file = 0
for map_id in registered_input_output_maps:
outputs = registered_input_output_maps[map_id]['outputs']
self.total_output_files += 1
for content in outputs:
if content['status'].name not in status_statistics:
status_statistics[content['status'].name] = 0
status_statistics[content['status'].name] += 1
if content['status'] == ContentStatus.Available:
self.processed_output_file += 1
self.status_statistics = status_statistics
return status_statistics
def syn_collection_status(self):
input_collections = self.get_input_collections()
output_collections = self.get_output_collections()
# log_collections = self.get_log_collections()
for input_collection in input_collections:
input_collection['total_files'] = 1
input_collection['processed_files'] = 1
for output_collection in output_collections:
output_collection['total_files'] = self.total_output_files
output_collection['processed_files'] = self.processed_output_file
def syn_work_status(self, registered_input_output_maps):
self.get_status_statistics(registered_input_output_maps)
self.syn_collection_status()
if self.is_processings_terminated() and not self.has_new_inputs:
if self.is_processings_finished():
self.status = WorkStatus.Finished
elif self.is_processings_failed():
self.status = WorkStatus.Failed
elif self.is_processings_subfinished():
self.status = WorkStatus.SubFinished
else:
self.status = WorkStatus.Transforming
####### functions for carrier ######## # noqa E266
###################################### # noqa E266
def get_rucio_setup_env(self):
script = "export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase\n"
script += "source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh\n"
script += "export RUCIO_ACCOUNT=pilot\n"
script += "localSetupRucioClients\n"
return script
def generate_processing_script_sandbox(self, processing):
arguments = self.parse_arguments()
script = "#!/bin/bash\n\n"
script += self.get_rucio_setup_env()
script += "\n"
script += "sandbox=%s\n" % str(self.sandbox)
script += "executable=%s\n" % str(self.executable)
script += "arguments=%s\n" % str(arguments)
script += "output_json=%s\n" % str(self.output_json)
script += "\n"
script += "env\n"
script += "echo $X509_USER_PROXY\n"
script += "\n"
script += "echo 'user id:'\n"
script += "id\n"
script += "\n"
script += "wget $sandbox\n"
script += 'base_sandbox="$(basename -- $sandbox)"\n'
script += 'tar xzf $base_sandbox\n'
dataset = self.collections[self._primary_input_collection]
script += 'rucio download %s:%s\n' % (dataset['scope'], dataset['name'])
script += 'chmod +x %s\n' % str(self.executable)
script += "echo '%s' '%s'\n" % (str(self.executable), str(arguments))
script += '%s %s\n' % (str(self.executable), str(arguments))
script += 'ls\n\n'
long_id = self.get_long_id(processing)
script_name = 'processing_%s.sh' % long_id
script_name = os.path.join(self.get_working_dir(processing), script_name)
with open(script_name, 'w') as f:
f.write(script)
run_command("chmod +x %s" % script_name)
return script_name
def get_output_json(self, processing):
# job_dir = self.get_working_dir(processing)
if self.output_json:
return self.output_json
elif 'output_json' in self.agent_attributes and self.agent_attributes['output_json']:
output_json = self.agent_attributes['output_json']
else:
output_json = 'idds_output.json'
return output_json
def generate_processing_script(self, processing):
self.output_json = self.get_output_json(processing)
script_name = self.generate_processing_script_sandbox(processing)
return script_name, None
def get_output_files(self, processing):
return [self.output_json]
def submit_processing(self, processing):
if 'job_id' in processing['processing_metadata']:
pass
else:
job_id, errors = self.submit_condor_processing(processing)
if errors:
self.add_errors(errors)
processing['processing_metadata']['job_id'] = job_id
processing['processing_metadata']['errors'] = str(self.get_errors())
def abort_processing(self, processing):
self.tocancel = True
def parse_processing_outputs(self, processing):
request_id = processing['request_id']
workload_id = processing['workload_id']
processing_id = processing['processing_id']
if not self.output_json:
return None, 'Request(%s)_workload(%s)_processing(%s) output_json(%s) is not defined' % (request_id, workload_id,
processing_id, self.output_json)
job_dir = self.get_working_dir(processing)
full_output_json = os.path.join(job_dir, self.output_json)
if not os.path.exists(full_output_json):
return None, '%s is not created' % str(full_output_json)
else:
try:
with open(full_output_json, 'r') as f:
data = f.read()
outputs = json.loads(data)
if not outputs:
return outputs, "No points generated: the outputs is empty"
return outputs, None
except Exception as ex:
return None, 'Failed to load the content of %s: %s' % (str(full_output_json), str(ex))
def poll_processing(self, processing):
job_status, job_err_msg = self.poll_condor_job_status(processing, processing['processing_metadata']['job_id'])
processing_outputs = None
if job_status in [ProcessingStatus.Finished]:
job_outputs, parser_errors = self.parse_processing_outputs(processing)
if job_outputs:
processing_status = ProcessingStatus.Finished
processing_err = None
processing_outputs = job_outputs
else:
processing_status = ProcessingStatus.Failed
processing_err = parser_errors
elif self.tocancel:
processing_status = ProcessingStatus.Cancelled
processing_outputs = None
processing_err = None
else:
processing_status = job_status
processing_err = job_err_msg
return processing_status, processing_outputs, processing_err
def poll_processing_updates(self, processing, input_output_maps):
processing_status, processing_outputs, processing_err = self.poll_processing(processing)
processing_metadata = processing['processing_metadata']
if not processing_metadata:
processing_metadata = {}
if processing_err:
processing_err = processing_err.strip()
if processing_err:
self.add_errors(processing_err)
processing_metadata['errors'] = str(self.get_errors())
update_processing = {'processing_id': processing['processing_id'],
'parameters': {'status': processing_status,
'processing_metadata': processing_metadata,
'output_metadata': processing_outputs}}
updated_contents = []
return update_processing, updated_contents, {}
|
999,666 | 435e0e42171adf5e98cd88d0397c475c408320d7 | from .point_wise_feed_forward import PositionwiseFeedForward
from .squeeze_embedding import SqueezeEmbedding
from .attention import Attention
from .bert_aen import BERTAEN
from .bert_spc import BERTSPC
|
999,667 | 289ead22655b067efce3559bc6610eec44d2b291 | import numpy as np
def pca(x,k):
u=np.mean(x,axis=0) #计算均值
x=x-u #均值归一化
sigma=np.cov(x,rowvar=0) #计算协方差矩阵
w,v=np.linalg.eig(sigma) #w为特征值 v为特征向量
index=np.argsort(-w) #特征值从大到小排序,返回索引
index_change=index[:k] #取前k个
v_change=v[:,index_change]
z=x.dot(v_change)
return z |
999,668 | ddf61398be8d3022c7b3994460c725dc2613da64 | from twisted.internet.protocol import Factory
from twisted.internet import reactor, protocol
from network.UOPacketRx import *
from config.uopy import UoPYConfig
import globals
uoConfig = UoPYConfig()
class UOServerProtocol(protocol.Protocol):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
self.factory.numConnections += 1
if globals.DEBUG:
print("Connections: {}".format(self.factory.numConnections))
def dataReceived(self, data):
request = UOPacketRx(data, self.transport)
request.verifyPacket()
def connectionLost(self, reason=None):
self.factory.numConnections -= 1
if globals.DEBUG:
print("Connections: {}".format(self.factory.numConnections))
class UOServerFactory(Factory):
numConnections = 0
def __init__(self):
print("UoPY Emu v0.0.1a")
print("PORT: {}".format(uoConfig.ReadServers()[0]["port"]))
def buildProtocol(self, addr):
return UOServerProtocol(self)
def runserver():
reactor.listenTCP(uoConfig.ReadServers()[0]["port"], UOServerFactory())
reactor.run()
|
999,669 | 400c9ea2fe642a135b842f5487ce70b0b73730c3 | >>> items = ['aaa', 111, (4,5),2.01]
>>> tests = [(4,5),3,14]
>>> for key in tests:
... for item in items:
... if key == item:
... print(key,'was found')
... break
#替代方案:
for key in tests:
if key in items:
print(key,'was found')
else:
print(key, 'was not found')
>>> seq1='spam'
>>> seq2='scam'
>>> res=[]
>>> for x in seq1:
... if x in seq2:
... res.append(x)
...
>>> res
['s', 'a', 'm']
|
999,670 | 346244590108c675cac278de7ab88f89623e5a87 | import requests
from datetime import datetime
import config
class FriendsParser:
client_id = 7881208
client_secret = config.client_secret
display = 'page'
scope = 'friends'
def __init__(self):
print('Parser object was created...')
self.token = ''
self.access_token = ''
self.user_screen_names = []
self.profiles = []
self.user_ids = []
self.mutual_ids = []
self.mutual_friends = []
def output_result(self):
if self.mutual_friends:
print("Mutual friends:")
for (count, mutual_friend) in enumerate(self.mutual_friends):
print(f"{count+1}) {mutual_friend['profile_url']} {mutual_friend['name']} {mutual_friend['photo_id']}")
else:
print("0 Mutual friends were found")
def get_info_by_ids(self):
step = 999
for i in range(0, len(self.mutual_ids), step):
user_ids_str = ','.join(self.mutual_ids[:i+step])
fields = 'photo_id,nickname,screen_name'
api_users_get = f'https://api.vk.com/method/users.get?user_ids={user_ids_str}&fields={fields}' \
f'&access_token={self.access_token}&v=5.131'
resp = requests.get(api_users_get)
response = resp.json()
if 'error' in response:
print(f'Error #{response["error"]["error_code"]}', response['error']['error_msg'])
else:
for profile_info in response['response']:
photo_url = 'https://vk.com/images/camera_200.png'
nickname = ''
if 'photo_id' in profile_info:
photo_url = f'https://vk.com/{profile_info["screen_name"]}?photo={profile_info["photo_id"]}' \
f'&z=photo{profile_info["photo_id"]}'
if 'nickname' in profile_info:
nickname = f' {str(profile_info["nickname"])}' # there is space before nickname
mutual_friend = {
'profile_url': f"https://vk.com/id{str(profile_info['id'])}",
'name': f"{profile_info['first_name']} {profile_info['last_name']}{nickname}",
'photo_id': photo_url,
'screen_name': profile_info['screen_name']
}
self.mutual_friends.append(mutual_friend)
def check_mutual(self):
step = 99
for i in range(0, len(self.user_ids), step):
source_uid = self.user_ids[i]
to_count = i+step
target_uids = ','.join(self.user_ids[i+1:to_count])
api_vk_getmutual = f'https://api.vk.com/method/friends.getMutual?source_id={source_uid}' \
f'&target_uids={target_uids}&access_token={self.access_token}&v=5.131'
resp = requests.get(api_vk_getmutual)
response = resp.json()
if 'error' in response:
print(f'Error #{response["error"]["error_code"]}', response['error']['error_msg'])
else:
for mutual_dict in response['response']:
for mutual_id in mutual_dict['common_friends']:
if mutual_id not in self.mutual_ids:
self.mutual_ids.append(str(mutual_id)) # mutual id is str
def input_profiles(self):
step = 999
raw_input = str(input('Enter VK profile urls separated by space: '))
profile_urls = raw_input.split(' ')
for i in range(0, len(profile_urls), step):
user_screen_names = []
for profile_url in profile_urls[i:i+step]:
user_screen_names.append(profile_url.split('/')[-1])
user_screen_names_str = ','.join(user_screen_names)
api_users_get = f'https://api.vk.com/method/users.get?user_ids={user_screen_names_str}' \
f'&access_token={self.access_token}&v=5.131'
resp = requests.get(api_users_get)
response = resp.json()
if 'error' in response:
print(f'Error #{response["error"]["error_code"]}', response['error']['error_msg'])
return False
else:
for profile in response['response']:
self.user_ids.append(str(profile['id'])) # profile id is str
return True
def get_token(self):
url = f'https://oauth.vk.com/authorize?client_id={self.client_id}&display={self.display}&scope={self.scope}' \
f'&response_type=code&v=5.131'
print('Go to url below and copy CODE from address row')
print(url)
self.token = str(input('Enter code: '))
if len(self.token):
access_url = f"https://oauth.vk.com/access_token?client_id={self.client_id}" \
f"&client_secret={self.client_secret}&code={self.token}"
resp = requests.get(access_url)
if resp.status_code != 200:
print(f'Error #{resp.status_code}', resp.json()['error_description'])
return False
else:
self.access_token = resp.json()['access_token']
print('Access token for VK received')
return True
def run(self):
got_token = self.get_token()
while not got_token:
got_token = self.get_token()
profiles_entered = self.input_profiles()
while not profiles_entered:
profiles_entered = self.input_profiles()
start_time = datetime.now()
self.check_mutual()
self.get_info_by_ids()
self.output_result()
end_time = datetime.now()
print(f"Functions calls in {end_time-start_time} seconds")
p = FriendsParser()
p.run()
|
999,671 | 04309b3aebed9ad2e254500c878082c411874de8 | #
# Copyright (C) 2013 Andrian Nord. See Copyright Notice in main.py
#
from ljd.util.log import errprint
import ljd.bytecode.instructions as ins
import ljd.rawdump.constants
import ljd.rawdump.debuginfo
import ljd.rawdump.code
FLAG_HAS_CHILD = 0b00000001
FLAG_IS_VARIADIC = 0b00000010
FLAG_HAS_FFI = 0b00000100
FLAG_JIT_DISABLED = 0b00001000
FLAG_HAS_ILOOP = 0b00010000
class _State():
def __init__(self, parser):
for key, value in parser.__dict__.items():
setattr(self, key, value)
self.upvalues_count = 0
self.complex_constants_count = 0
self.numeric_constants_count = 0
self.instructions_count = 0
self.debuginfo_size = 0
def read(parser, prototype):
parser = _State(parser)
size = parser.stream.read_uleb128()
if size == 0:
return False
if not parser.stream.check_data_available(size):
errprint("File truncated")
return False
start = parser.stream.pos
r = True
r = r and _read_flags(parser, prototype)
r = r and _read_counts_and_sizes(parser, prototype)
r = r and _read_instructions(parser, prototype)
r = r and _read_constants(parser, prototype)
r = r and _read_debuginfo(parser, prototype)
end = parser.stream.pos
#if r:
# assert end - start == size, \
# "Incorrectly read: from {0} to {1} ({2}) instead of {3}"\
# .format(start, end, end - start, size)
return r
def _read_flags(parser, prototype):
bits = parser.stream.read_byte()
prototype.flags.has_ffi = bool(bits & FLAG_HAS_FFI)
bits &= ~FLAG_HAS_FFI
prototype.flags.has_iloop = bool(bits & FLAG_HAS_ILOOP)
bits &= ~FLAG_HAS_ILOOP
prototype.flags.has_jit = not (bits & FLAG_JIT_DISABLED)
bits &= ~FLAG_JIT_DISABLED
prototype.flags.has_sub_prototypes = bool(bits & FLAG_HAS_CHILD)
bits &= ~FLAG_HAS_CHILD
prototype.flags.is_variadic = bool(bits & FLAG_IS_VARIADIC)
bits &= ~FLAG_IS_VARIADIC
if bits != 0:
errprint("Unknown prototype flags: {0:08b}", bits)
return False
return True
def _read_counts_and_sizes(parser, prototype):
prototype.arguments_count = parser.stream.read_byte()
prototype.framesize = parser.stream.read_byte()
parser.upvalues_count = parser.stream.read_byte()
parser.complex_constants_count = parser.stream.read_uleb128()
parser.numeric_constants_count = parser.stream.read_uleb128()
parser.instructions_count = parser.stream.read_uleb128()
#if parser.flags.is_stripped:
parser.debuginfo_size = 0
#else:
# parser.debuginfo_size = parser.stream.read_uleb128()
if parser.debuginfo_size == 0:
return True
prototype.first_line_number = parser.stream.read_uleb128()
prototype.lines_count = parser.stream.read_uleb128()
parser.lines_count = prototype.lines_count
return True
def _read_instructions(parser, prototype):
i = 0
if prototype.flags.is_variadic:
header = ins.FUNCV()
else:
header = ins.FUNCF()
header.A = prototype.framesize
prototype.instructions.append(header)
while i < parser.instructions_count:
instruction = ljd.rawdump.code.read(parser)
if not instruction:
return False
#print ("inst:%s" % instruction.name)
#print ("%x" % instruction.opcode)
#print ("%x" % instruction.A)
#print ("%x" % instruction.B)
#print ("%x" % instruction.CD)
prototype.instructions.append(instruction)
i += 1
return True
def _read_constants(parser, prototype):
return ljd.rawdump.constants.read(parser, prototype.constants)
def _read_debuginfo(stream, prototype):
if stream.debuginfo_size == 0:
return True
return ljd.rawdump.debuginfo.read(stream,
prototype.first_line_number,
prototype.debuginfo)
|
999,672 | fac9546a48ef3f3c0f1fda2c6770429b43ff35db | def judge(string, head, tail):
if head >= tail:
return True
if string[head] == string[tail]:
return judge(string, head + 1, tail - 1)
else:
return False
if __name__ == '__main__':
string = input("input your string: ")
if judge(string, 0, len(string)-1):
print("{} 是回文".format(string))
else:
print("{} 不是回文".format(string))
|
999,673 | d17ebbd9a57ddc4d34445531aaaf510de6af6d4c | """
Database calls
"""
from app.util.mssql import MSSQL
def get_user_etrade_params(userId):
with MSSQL() as db:
sql = """
SELECT *
FROM dbo.Users_Etrade_Session
WHERE UserID = ?
"""
return db.query_one(sql, (userId,))
def save_auth_request(token, secret, userId):
with MSSQL() as db:
sql = """
UPDATE dbo.Users_Etrade_Session
SET RequestToken = ?,
RequestSecret = ?
WHERE UserID = ?
"""
return db.execute(sql, (token, secret, userId))
def save_session(token, secret, userId):
with MSSQL() as db:
sql = f"""
UPDATE dbo.Users_Etrade_Session
SET AccessToken = ?,
AccessSecret = ?,
CreateDateTime = GETDATE()
WHERE UserID = ?
"""
return db.execute(sql, (token, secret, userId))
def get_users():
with MSSQL() as db:
sql = """
SELECT DISTINCT u.UserLogin, u.UserID
FROM Monkeys m
JOIN Accounts_Users au ON m.AccountID = au.AccountID
JOIN Users u ON au.UserID = u.UserID
"""
return db.query(sql)
def get_user_monkeys(userId):
with MSSQL() as db:
sql = """
SELECT m.MonkeyName, m.MonkeyID
FROM Monkeys m
JOIN Accounts_Users au ON m.AccountID = au.AccountID
JOIN Users u ON au.UserID = u.UserID
WHERE u.UserID = ?
"""
return db.query(sql, (userId,))
def get_monkey_positions(monkeyId):
with MSSQL() as db:
sql = """
SELECT Symbol, Shares, OpenDate, OpenPrice, isShort, CurrentDate, CurrentPrice
FROM Positions
WHERE MonkeyId = ? AND CloseDate is null;
"""
return db.query(sql, (monkeyId,)) |
999,674 | 31115626675943b41e17f8630d0289057c8635e9 | from threading import Lock
class RWLock(object):
""" RWLock class; this is meant to allow an object to be read from by
multiple threads, but only written to by a single thread at a time.
Original Source Code: https://gist.github.com/tylerneylon/a7ff6017b7a1f9a506cf75aa23eacfd6
Credit: Tyler Neylon
"""
def __init__(self):
self.w_lock = Lock()
self.num_r_lock = Lock()
self.num_r = 0
# ___________________________________________________________________
# Reading methods.
def r_acquire(self):
if not self.num_r_lock.acquire(blocking=False):
return False
self.num_r += 1
if self.num_r == 1:
# If we can't acquire the write lock, there is a writer and no readers allowed.
if not self.w_lock.acquire(blocking=False):
self.num_r -= 1
self.num_r_lock.release()
return False
self.num_r_lock.release()
return True
def r_release(self):
assert self.num_r > 0
self.num_r_lock.acquire()
self.num_r -= 1
if self.num_r == 0:
self.w_lock.release()
self.num_r_lock.release()
# ___________________________________________________________________
# Writing methods.
def w_acquire(self):
return self.w_lock.acquire(blocking=False)
def w_release(self):
self.w_lock.release()
def upgradeLock(self): # Read Lock -> Write Lock
upgraded = False
if self.num_r_lock.acquire(blocking=False):
if self.num_r == 1: # Write Lock is always acquired when there is at least one reader.
self.num_r -= 1 # To "upgrade", you must be the only reader
upgraded = True
self.num_r_lock.release()
return upgraded
def downgradeLock(self): # Write Lock -> Read Lock
downgraded = False
if self.num_r_lock.acquire(blocking=False):
if self.num_r <= 1:
self.num_r += 1
self.w_release()
self.num_r_lock.release()
downgraded = True
return downgraded
|
999,675 | 70c1239bbbab7d7cccbeaaceaa16c9cd891bd235 |
from cx_Freeze import setup, Executable
copyDependentFiles=True
silent = True
includes = ["lxml", "lxml._elementpath", "lxml.etree", "pyreadline", "pyreadline.release", "gzip"]
excludes = ["tcl", "tk", "tkinter"]
setup(
name = "The Mole",
version = "0.3",
description = "The Mole: Digging up your data",
options = {
"build_exe": {
"includes": includes,
"excludes": excludes
}
},
executables = [Executable("mole.py")]
)
|
999,676 | b99fe97ca5d2d529c670a89ebc266e574e163fc9 | #!/usr/bin/env python3.7
# FULL LINE COMMENT
print("Hello, World!") # Trailing Comment
|
999,677 | 67f79944a0d8db49d6c6226e9f115f6cd9f1e628 | #this is static load balancing for the mobile back haul
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
#from ryu.controller import mac_to_port
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
#from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
#from ryu.lib import mac
from ryu.topology.api import get_switch, get_link
#from ryu.app.wsgi import ControllerBase
from ryu.topology import event
import networkx as nx
class ProjectController(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ProjectController, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.topology_api_app = self
self.name = 'dynamic_lb'
self.net=nx.DiGraph()
self.nodes = {}
self.links = {}
self.no_of_nodes = 0
self.no_of_links = 0
self.i=0
def ls(self,obj):
print("\n".join([x for x in dir(obj) if x[0] != "_"]))
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = datapath.ofproto_parser.OFPMatch( in_port=in_port, eth_dst=dst)
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=1, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures , CONFIG_DISPATCHER)
def switch_features_handler(self , ev):
print "swith number",ev.msg.datapath.id,"is added!!"
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS , actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, instructions=inst)
datapath.send_msg(mod)
# add rules to switch s1
if ev.msg.datapath.id == 1:
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
port_5 = 5
queue_1 = parser.OFPActionSetQueue(0)
actions_1 = [queue_1, parser.OFPActionOutput(port_5)]
port_6 = 6
queue_2 = parser.OFPActionSetQueue(0)
actions_2 = [queue_2, parser.OFPActionOutput(port_6)]
weight_1 = 100
weight_2 = 100
watch_port = ofproto_v1_3.OFPP_ANY
watch_group = ofproto_v1_3.OFPQ_ALL
buckets = [
parser.OFPBucket(weight_1, watch_port, watch_group, actions_1),
parser.OFPBucket(weight_2, watch_port, watch_group, actions_2)]
group_id = 40
req = parser.OFPGroupMod(
datapath, datapath.ofproto.OFPFC_ADD,
datapath.ofproto.OFPGT_SELECT, group_id, buckets)
datapath.send_msg(req)
match = parser.OFPMatch(in_port=3, eth_type=0x0800, ipv4_src="172.16.10.10", ipv4_dst="172.16.60.10",ip_dscp="26")
match = parser.OFPMatch(in_port=2, eth_type=0x0800, ipv4_src="172.16.20.30", ipv4_dst="172.16.60.10",ip_dscp="26")
match = parser.OFPMatch(in_port=1, eth_type=0x0800, ipv4_src="172.16.40.20", ipv4_dst="172.16.60.10",ip_dscp="26")
match = parser.OFPMatch(in_port=4, eth_type=0x0800, ipv4_src="172.16.30.40", ipv4_dst="172.16.60.10",ip_dscp="26")
actions = [datapath.ofproto_parser.OFPActionGroup(40)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=3, instructions=inst)
datapath.send_msg(mod)
# adding rules for swith number s4
if ev.msg.datapath.id == 4:
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(in_port=1, eth_type=0x0800, ipv4_src="10.0.0.4", ipv4_dst="10.0.0.3")
actions = [parser.OFPActionOutput(2)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=3, instructions=inst)
datapath.send_msg(mod)
#adding rules to switch number s3
if ev.msg.datapath.id == 3:
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(in_port=1, eth_type=0x0800, ipv4_src="10.0.0.1", ipv4_dst="10.0.0.3")
match = parser.OFPMatch(in_port=1, eth_type=0x0800, ipv4_src="10.0.0.2", ipv4_dst="10.0.0.3")
actions = [parser.OFPActionOutput(2)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=3, instructions=inst)
datapath.send_msg(mod)
#adding rules to switch number s4
if ev.msg.datapath.id == 5:
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
port_1 = 1
queue_1 = parser.OFPActionSetQueue(0)
actions_1 = [queue_1, parser.OFPActionOutput(port_1)]
port_2 = 2
queue_2 = parser.OFPActionSetQueue(0)
actions_2 = [queue_2, parser.OFPActionOutput(port_2)]
weight_1 = 100
weight_2 = 100
watch_port = ofproto_v1_3.OFPP_ANY
watch_group = ofproto_v1_3.OFPQ_ALL
buckets = [
parser.OFPBucket(weight_1, watch_port, watch_group, actions_1),
parser.OFPBucket(weight_2, watch_port, watch_group, actions_2)]
group_id = 70
req = parser.OFPGroupMod(
datapath, datapath.ofproto.OFPFC_ADD,
datapath.ofproto.OFPGT_SELECT, group_id, buckets)
datapath.send_msg(req)
match = parser.OFPMatch(in_port=3, eth_type=0x0800, ipv4_src="10.0.0.3", ipv4_dst="10.0.0.1")
match = parser.OFPMatch(in_port=3, eth_type=0x0800, ipv4_src="10.0.0.2", ipv4_dst="10.0.0.2")
match = parser.OFPMatch(in_port=3, eth_type=0x0800, ipv4_src="10.0.0.1", ipv4_dst="10.0.0.3")
actions = [datapath.ofproto_parser.OFPActionGroup(70)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=3, instructions=inst)
datapath.send_msg(mod)
# actions = [parser.OFPActionOutput(3)]
#
# inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
#
# mod = datapath.ofproto_parser.OFPFlowMod(
#
# datapath=datapath, match=match, cookie=0,
#
# command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
#
# priority=3, instructions=inst)
#
# datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
#parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
#ignore lldp packets
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
if src not in self.net:
self.net.add_node(src)
self.net.add_edge(dpid,src,{'port':in_port})
self.net.add_edge(src,dpid)
#print src in self.net
if dst in self.net:
path=nx.shortest_path(self.net,src,dst)
next=path[path.index(dpid)+1]
out_port=self.net[dpid][next]['port']
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions)
datapath.send_msg(out)
@set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self, ev):
switch_list = get_switch(self.topology_api_app, None)
switches = [switch.dp.id for switch in switch_list]
self.net.add_nodes_from(switches)
print '#### The Switches added are ###'
for switch in switch_list:
print switch
links_list = get_link(self.topology_api_app, None)
links=[(link.src.dpid,link.dst.dpid,{'port':link.src.port_no}) for link in links_list]
#print links
self.net.add_edges_from(links)
links=[(link.dst.dpid,link.src.dpid,{'port':link.dst.port_no}) for link in links_list]
#print links
self.net.add_edges_from(links)
print "**********List of links"
print self.net.edges()
|
999,678 | 0676a10180068ddaa4c0ec2a5c36e0901e31fa1b | import django_userhistory.models as models
from django.contrib import admin
try:
admin.site.register(models.UserAction)
admin.site.register(models.UserHistory)
admin.site.register(models.UserTrackedContent)
except Exception, e:
pass |
999,679 | 2e261ddb9df959ead1419419ae64135536fa83ea | # -*- coding: utf8 -*-
import inspect
import os
import sys
import random
import time
import multiprocessing
import logging
import socket
import select
import unittest
BASE_TEST_PATH = os.path.join('test', 'data')
RANDOM_SEED = 42
NUM_DIRS = 10
NUM_SUBDIRS = 10
FILES_PER_DIR = 10
FILE_SIZE = 1024*8
RANDOM_DATA_BUF_SIZE = 1024*1024*4
POLL_WAIT_SECONDS = 5
FILE_DELAY = 0.25
if __package__ is None:
# test code is run from the ./test directory. add the parent
# directory to the path so that we can see all the isi_ps code.
current_file = inspect.getfile(inspect.currentframe())
base_path = os.path.dirname(os.path.dirname(os.path.abspath(current_file)))
sys.path.insert(0, base_path)
from HydraWorker import HydraWorker
from HydraClient import HydraClient
from HydraClient import HydraClientProcess
from HydraServer import HydraServer
from HydraServer import HydraServerProcess
"""
This method creates files with random data in them using a single buffer
"""
def create_files(path, num, size, buffer = None, buf_size = 1024*1024, prefix = 'file', force_overwrite = False):
if buffer is None:
buffer = bytearray(random.getrandbits(8) for x in xrange(buf_size))
for i in xrange(num):
offset = random.randrange(buf_size)
bytes_to_write = size
if force_overwrite is False:
try:
file_lstat = os.lstat(os.path.join(path, '%s%d'%(prefix, i)))
if file_lstat.st_size == size:
continue
except:
pass
with open(os.path.join(path, '%s%d'%(prefix, i)), 'wb') as f:
while bytes_to_write > 0:
remainder = buf_size - offset
if bytes_to_write < remainder:
f.write(buffer[offset:(offset+bytes_to_write)])
bytes_to_write = 0
else:
f.write(buffer[offset:buf_size])
bytes_to_write -= remainder
class HydraTestClassSlowFileProcess(HydraWorker):
def __init__(self, args={}):
super(HydraTestClassSlowFileProcess, self).__init__(args)
# Set a default delay of 0.5 seconds per file processed
self.file_delay = FILE_DELAY
def setFileDelay(self, delay_in_seconds):
self.file_delay = delay_in_seconds
def filter_subdirectories(self, root, dirs, files):
"""
Fill in docstring
"""
return dirs, files
def handle_directory_pre(self, dir):
"""
Fill in docstring
"""
return False
def handle_directory_post(self, dir):
"""
Fill in docstring
"""
return False
def handle_file(self, dir, file):
"""
Fill in docstring
"""
#if file == "skip_check":
# return False
if self.file_delay > 0:
time.sleep(self.file_delay)
file_lstats = os.lstat(os.path.join(dir, file))
#logging.getLogger().debug("Proc file: %s"%os.path.join(dir, file))
return True
def handle_extended_ops(self, data):
if data.get('op') == 'setdelay':
self.file_delay = data.get('payload')
return True
class TestHydraServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.buffer_size = RANDOM_DATA_BUF_SIZE
random.seed(RANDOM_SEED)
cls.base_path = os.path.dirname(os.path.dirname(os.path.abspath(current_file)))
cls.test_path = os.path.join(cls.base_path, BASE_TEST_PATH)
# Check for skip file named 'skip_check' and bypass creation/check if it is present
if os.path.isfile(os.path.join(cls.test_path, 'skip_check')):
return
logging.getLogger().info("Setting up file structure. This may take time")
cls.rand_buffer = bytearray(random.getrandbits(8) for x in xrange(cls.buffer_size))
for i in xrange(NUM_DIRS):
cur_path = os.path.join(cls.test_path, "dir%s"%i)
try:
os.makedirs(cur_path, exists_ok = True)
except:
pass
create_files(cur_path, FILES_PER_DIR, FILE_SIZE, cls.rand_buffer, cls.buffer_size)
for j in xrange(NUM_SUBDIRS):
sub_path = os.path.join(cur_path, "subdir%s"%j)
try:
os.makedirs(sub_path)
except:
pass
create_files(sub_path, FILES_PER_DIR, FILE_SIZE)
@classmethod
def tearDownClass(cls):
print("tearDownClass called")
try:
#cls.server.close()
cls.server = None
except:
pass
cls.rand_buffer = None
cls = None
#@unittest.skip("")
def test_1_spawn_server_and_shutdown(self):
svr = HydraServerProcess()
svr.start()
svr.send({'cmd': 'shutdown'})
svr.join(5)
try:
self.assertFalse(svr.is_alive())
except:
svr.terminate()
raise
#@unittest.skip("")
def test_2_single_client_connection_and_shutdown(self):
svr = HydraServerProcess()
svr.start()
clients = []
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
for c in clients:
c.start()
logging.getLogger().debug("Waiting 2 seconds for clients to connect before shutdown")
time.sleep(2)
svr.send({'cmd': 'shutdown'})
logging.getLogger().debug("Waiting for shutdown up to 10 seconds")
svr.join(10)
try:
self.assertFalse(svr.is_alive())
except:
svr.terminate()
raise
#@unittest.skip("")
def test_3_multiple_client_connection_and_shutdown(self):
svr = HydraServerProcess()
svr.start()
clients = []
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
for c in clients:
c.start()
logging.getLogger().debug("Waiting 2 seconds for clients to connect before shutdown")
time.sleep(2)
svr.send({'cmd': 'shutdown'})
logging.getLogger().debug("Waiting for shutdown up to 10 seconds")
svr.join(10)
try:
self.assertFalse(svr.is_alive())
except:
svr.terminate()
raise
#@unittest.skip("")
def test_4_single_client_single_dir(self):
svr = HydraServerProcess()
svr.start()
clients = []
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
for c in clients:
c.start()
inputs = [svr]
# Wait for server to be idle before submitting work
found = False
for i in range(2):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'state':
if cmd['state'] == 'idle':
found = True
break
self.assertTrue(found, msg="Server never returned to idle state")
found = False
logging.getLogger().debug("Submitting work")
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir0')]})
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'state':
if cmd['state'] == 'idle':
found = True
break
else:
break
self.assertTrue(found, msg="Server never returned to idle state")
logging.getLogger().debug("Server is idle. Requesting shutdown")
svr.send({'cmd': 'shutdown'})
logging.getLogger().debug("Waiting for final stats update")
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'stats':
self.assertEqual(cmd['stats']['processed_files'], 110)
self.assertEqual(cmd['stats']['processed_dirs'], 11)
break
logging.getLogger().debug("Waiting for shutdown up to 10 seconds")
svr.join(10)
try:
self.assertFalse(svr.is_alive())
except:
svr.terminate()
raise
#@unittest.skip("")
def test_5_multiple_client_multiple_dir(self):
svr = HydraServerProcess()
svr.start()
clients = []
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
for c in clients:
c.start()
inputs = [svr]
logging.getLogger().debug("Submitting work")
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir0')]})
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir1')]})
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir2')]})
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir3')]})
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir4')]})
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir5')]})
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path, 'dir6')]})
found = False
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'state':
if cmd['state'] == 'processing':
found = True
break
self.assertTrue(found, msg="Server never sent state change to processing state")
found = False
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'state':
if cmd['state'] == 'idle':
found = True
break
self.assertTrue(found, msg="Server never returned to idle state")
logging.getLogger().debug("Server is idle. Shutting down.")
svr.send({'cmd': 'shutdown'})
logging.getLogger().debug("Waiting for final stats update")
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'stats':
self.assertEqual(cmd['stats']['processed_files'], 770)
self.assertEqual(cmd['stats']['processed_dirs'], 77)
break
logging.getLogger().debug("Waiting for shutdown up to 10 seconds")
svr.join(10)
try:
self.assertFalse(svr.is_alive())
except:
svr.terminate()
raise
#@unittest.skip("")
def test_6_multiple_client_large_dir(self):
svr = HydraServerProcess()
svr.start()
clients = []
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
clients.append(HydraClientProcess({'svr': '127.0.0.1', 'port': 8101, 'file_handler': HydraTestClassSlowFileProcess}))
for c in clients:
c.start()
inputs = [svr]
logging.getLogger().debug("Submitting work")
svr.send({'cmd': 'submit_work', 'paths': [os.path.join(self.test_path)]})
found = False
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'state':
if cmd['state'] == 'processing':
found = True
break
self.assertTrue(found, msg="Server never sent state change to processing state")
found = False
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'state':
if cmd['state'] == 'idle':
found = True
break
self.assertTrue(found, msg="Server never sent state change to idle state")
logging.getLogger().debug("Server is idle. Get individual client stats then shutdown")
svr.send({'cmd': 'get_stats', 'data': 'individual_clients'})
for i in range(20):
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS*5)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'stats_individual_clients':
logging.getLogger().debug("Individual client stat: %s"%cmd['stats'])
else:
break
logging.getLogger().debug("Shutting down")
svr.send({'cmd': 'shutdown'})
logging.getLogger().debug("Waiting for final stats update")
for i in range(20):
try:
readable, _, _ = select.select(inputs, [], [], POLL_WAIT_SECONDS)
if len(readable) > 0:
cmd = svr.recv()
if cmd['cmd'] == 'stats':
self.assertEqual(cmd['stats']['processed_files'], 1101)
self.assertEqual(cmd['stats']['processed_dirs'], 111)
break
except KeyboardInterrupt:
break
svr.send({'cmd': 'shutdown'})
logging.getLogger().debug("Waiting for shutdown up to 10 seconds")
svr.join(10)
try:
self.assertFalse(svr.is_alive())
except:
svr.terminate()
raise
if __name__ == '__main__':
root = logging.getLogger()
root.setLevel(logging.WARN)
#root.setLevel(logging.DEBUG)
#root.setLevel(9)
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(process)d - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
suite1 = unittest.TestLoader().loadTestsFromTestCase(TestHydraServer)
all_tests = unittest.TestSuite([suite1])
unittest.TextTestRunner(verbosity=2).run(all_tests)
|
999,680 | 55c8bdc42efd0affdc0e57c77816b66a38ce931d | import csv
def write_to_csv(list,mode):
fieldnames = ['id', 'title', 'year', 'runtime', 'genre', 'director', 'cast', 'writer', 'language', 'country', 'awards', 'imdb_rating', 'imdb_voutes', 'box_office']
with open('Backend_movies.csv', mode, encoding='utf-8' ) as nn:
csvwriter = csv.DictWriter(nn, fieldnames=fieldnames)
csvwriter.writeheader()
for r in list:
csvwriter.writerow(r)
print(r)
def count_awards(title):
wins=0
with open('Backend_movies.csv','r',encoding ='UTF-8', newline='')as f:
movie=csv.DictReader(f, delimiter=',')
for p in movie:
if title in p['title']:
iterable = p['awards'].split()
if len(iterable)<4 or 'wins' not in iterable:
continue
elif 'Won' in iterable:
wins=int(iterable[len(iterable)-5])+int(iterable[1])
elif 'Nominated' in iterable:
wins=int(iterable[len(iterable)-5])
else:
wins=int(iterable[len(iterable)-5])
return wins
def count_nominations(title): #comment not sure about: are wins also nominations?
nominations=0
with open('Backend_movies.csv','r',encoding ='UTF-8', newline='')as f:
movie=csv.DictReader(f, delimiter=',')
for p in movie:
if title in p['title']:
iterable = p['awards'].split()
if len(iterable)<4 or 'wins' not in iterable:
continue
elif 'Won' in iterable:
nominations=int(iterable[len(iterable)-2])
elif 'Nominated' in iterable:
nominations=int(iterable[len(iterable)-2])+int(iterable[2])
else:
nominations=int(iterable[len(iterable)-2])
return nominations
def extract_boxoffice(title):
earned=0
with open('Backend_movies.csv','r',encoding ='UTF-8', newline='')as f:
movie=csv.DictReader(f, delimiter=',')
for p in movie:
if title in p['title']:
if any(i.isdigit() for i in p['box_office'])==True:
earned = int((p['box_office'].replace('$', '')).replace(',',''))
return earned
|
999,681 | 3e87984bf8590f3d9ce0482e0b1a6cd3b22c8dad | # -*- coding: utf-8 -*-
import asyncio
import structlog
from async_timeout import timeout
from sanic import response
import ujson
from ..typedefs import HTTPRequest
from ..typedefs import HTTPResponse
from ..utils import async_include_methods
from ..utils import async_nowait_middleware
logger = structlog.get_logger(__name__)
@async_include_methods(include_http_methods=('POST',))
async def get_response(request: HTTPRequest) -> None:
# return cached response from cache if all requests were in cache
cache_group = request.app.config.cache_group
cache_read_timeout = request.app.config.cache_read_timeout
jsonrpc_request = request.json
try:
async with timeout(cache_read_timeout):
cached_response = await cache_group.get_jsonrpc_response(jsonrpc_request)
if cache_group.is_complete_response(jsonrpc_request, cached_response):
jussi_cache_key = cache_group.x_jussi_cache_key(jsonrpc_request)
return response.json(
cached_response, headers={'x-jussi-cache-hit': jussi_cache_key})
except ConnectionRefusedError as e:
logger.error('error connecting to redis cache', e=e)
except asyncio.TimeoutError:
logger.info('request exceeded cache read timeout',
timeout=cache_read_timeout)
except Exception as e:
logger.exception('error querying cache for response', exc_info=e)
@async_nowait_middleware
@async_include_methods(include_http_methods=('POST',))
async def cache_response(request: HTTPRequest, response: HTTPResponse) -> None:
try:
if 'x-jussi-cache-hit' in response.headers:
return
cache_group = request.app.config.cache_group
jsonrpc_request = request.json
jsonrpc_response = ujson.loads(response.body)
last_irreversible_block_num = request.app.config.last_irreversible_block_num
await cache_group.cache_jsonrpc_response(request=jsonrpc_request,
response=jsonrpc_response,
last_irreversible_block_num=last_irreversible_block_num)
except Exception as e:
logger.error('error caching response',
e=e,
request=request.json.log_extra())
|
999,682 | 959aafc23368084c7abb27d183f3699ad1736740 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/2 17:57
# @Author : LiangJiangHao
# @Software: PyCharm
import os
import sys
import requests
from lxml import html
import json
import time
import bs4
from selenium import webdriver
driver=webdriver.Chrome()
f=open('url.txt','a+')
for x in range(1,50):
print('正在抓取第%s页数据'%x)
url='https://search.bilibili.com/all?keyword=b站看片&from_source=banner_search&spm_id_from=333.334.banner_link.1&page=%s'%x
print(url)
driver.get(url)
if x==1:
driver.refresh()
time.sleep(1)
page=driver.page_source
soup = bs4.BeautifulSoup(page, "lxml")
divs = soup.find_all('li', class_='video matrix')
print(len(divs))
for video in divs:
videoUrl=video.find('a')
# print(videoUrl.get('href'))
newUrl=videoUrl['href']
newUrl=newUrl[2:len(newUrl)]
print(newUrl)
f.write(newUrl+'\n')
f.close() |
999,683 | ee8cb5f01eb461710e1f5cd4ed76621c48c3d204 | import getpass
from netmiko import ConnectHandler
import smtplib
from email.mime.text import MIMEText
uname = raw_input("Enter your username: ")
pswd = getpass.getpass(prompt="Enter your Password: ")
result = []
test
def sendemail(mes):
fromaddr = "TCAMStatus@abc.com"
toaddr = ['Network@abc.com']
message = MIMEText(mes)
message['From'] = fromaddr
message['To'] = ','.join(toaddr)
message['Subject'] = "TCAM Status RTRs"
server = smtplib.SMTP('appmail.data.ie.abc.net:25')
server.ehlo()
server.sendmail(fromaddr,toaddr,message.as_string())
server.quit()
def tcam_status():
iae_device_list = ['router1', 'router2']
for k in iae_device_list:
connection = ConnectHandler(ip=k, device_type='cisco_ios', username=uname, password=pswd)
output = connection.send_command('sh pl re')
output = output.split('State')
output.pop(0)
output.pop(0)
o = ''.join(output)
o = o.split()
o.pop(0)
tc = o[36]
tc = tc.split("(")
tc = tc[1]
tcam = tc.split(")")
tcam = tcam[0]
tcam_result = 'TCAM for ' + str(k) + ' = ' + str(tcam)
# print(tcam_result)
result.append(tcam_result)
tcam_status()
message = '\n'.join(result)
sendemail(message) |
999,684 | 337eefa85cfe4d2bccfaac4f73de6b336e933ed5 | from pui_tk import Application
from pui_tk.types import Side
from pui_tk.widgets import Label
if __name__ == '__main__':
app = Application('Packing examples')
app.pack(Label(app, text='pack-left'), side=Side.LEFT)
app.pack(Label(app, text='pack-right'), side=Side.RIGHT)
app.pack(Label(app, text='pack-top'), side=Side.TOP)
app.pack(Label(app, text='pack-bottom'), side=Side.BOTTOM)
app.place(Label(app, text='place'), x=200, y=250)
app.run()
|
999,685 | 736168a20f252d034df9bc4ee88a7e610779bebc | #!/usr/bin/env python
# coding: utf-8
# In[8]:
#付録14.A
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from datetime import datetime, date
import pandas as pd
import csv
import numpy as np
def readcsv(file_name):
trades=[]
with open(file_name,'r') as f:
series=csv.reader(f)
n=0
for line in series:
trades.append([])
for elements in line:
trades[n].append(elements)
n=n+1
f.close()
return trades
# In[9]:
#付録14.B 立会ごとのデータの抽出
def sessiondata(trades,n_max,i,yymmdd,hm):
j=0
if hm==1900:# 米国時間、日本時間朝9:00
jmax=0
p99=float(trades[i][2])
for j in range(n_max-i-1):
yymmdd9=int(trades[i+j+1][0])
hm9=int(trades[i+j+1][1])
p9=float(trades[i+j+1][2])
if yymmdd9>yymmdd:#実際の引けは日本時間午後3:15。
if hm9<=110:
jmax=j
if hm9>115:#実際の引けは日本時間午後3:15。
break
price=trades[i:i+jmax+1]#抽出データの格納
if hm==230:#日本時間午後4:30
jmax=0
p99=float(trades[i][2])
for j in range(n_max-i-1):
p9=float(trades[i+j+1][2])
hm9=int(trades[i+j+1][1])
if hm9<=1255:#日本時間朝2時55分
jmax=j
if hm9>1300:#日本時間朝3時
break
price=trades[i:i+jmax+1]#抽出データの格納
return price,j
# In[10]:
#付録14.C 緊急性取引 vs 非緊急性取引 取引数
def analysis(trades):
n_max=len(trades)
forward=0
yymmdd0=int(trades[0][0])
results=[]
ratio=[]
t=0
for i in range(n_max-2):
yymmdd=int(trades[i][0])
hm=int(trades[i][1])
if i>=forward:
price,j=sessiondata(trades,n_max,i,yymmdd,hm)
if hm==1900 or hm==230:#データの分析-----------------------------
trade0=price[0][2]
dp0=0
price2=[] #動きのある価格を保存
length=len(price)
for ii in range(1,length):
trade=float(price[ii][2])
if trade!=trade0:#価格の動きのない取引を除外
price2.append(trade)
trade0=trade
price3=[] #動きのある価格の内マーケットメーカ等のトレードを除外
for ii in range(2,len(price2)):
trade00=float(price2[ii-2])
trade0=float(price2[ii-1])
trade=float(price2[ii])
if abs(trade0-trade)==5:
if trade==trade00:
pass
else:
price3.append(trade)
else:
price3.append(trade)
results.append([])
results[t].append(length)
results[t].append(len(price3))
ratio.append(1-float(len(price3))/float(length))
#緊急性取引・非緊急性取引の比率
t+=1
forward=i+j+1
r=pd.DataFrame(ratio,columns=['ratio'])
plt.figure(figsize=(7,4))
r[:-1].plot(color='darkgray')
plt.xlabel('no of sessions')
plt.ylabel('ratio')
results=pd.DataFrame(results,columns=['all','immediacy trade'])
plt.figure(figsize=(7,4))
results[:-1].plot(style=['-','--'],color=['darkgray','darkblue'])
plt.xlabel('no of sessions')
plt.ylabel('no of trades per session')
buf_path0="C:\\users\\moriya\\documents\\Database\\n225\\tick\\"
if __name__ == "__main__":
filenames=["n225m201508Sep.csv"]
filename=filenames[0]
file_name=buf_path0+filename
trades=readcsv(file_name)
yymmdd=trades[0][0]
print(yymmdd,len(trades))
analysis(trades)
# In[11]:
#14.D 緊急性取引 vs 非緊急性取引 取引枚数
def analysis(trades):
n_max=len(trades)
forward=0
yymmdd0=int(trades[0][0])
results=[]
ratio=[]
t=0
for i in range(n_max-2):
yymmdd=int(trades[i][0])
hm=int(trades[i][1])
if i>=forward:
price,j=sessiondata(trades,n_max,i,yymmdd,hm)
if hm==1900 or hm==230:#データの分析-----------------------------
trade0=price[0][2]
dp0=0
price2=[] #動きのある価格を保存
length=len(price)
volume=0
volume2=0
tt=0
for ii in range(1,length):
trade=float(price[ii][2])
v=int(price[ii][5])
volume+=v
if trade!=trade0:#価格の動きのない取引を除外
price2.append([])
price2[tt].append(trade)
price2[tt].append(v)
tt+=1
trade0=trade
price3=[] #動きのある価格の内マーケットメーカ等のトレードを除外
for ii in range(2,len(price2)):
trade00=float(price2[ii-2][0])
trade0=float(price2[ii-1][0])
trade=float(price2[ii][0])
v=float(price2[ii][1])
if abs(trade0-trade)==5:
if trade==trade00:
pass
else:
price3.append(trade)
volume2+=v
else:
price3.append(trade)
volume2+=v
results.append([])
#results[t].append(length)
#results[t].append(len(price3))
results[t].append(volume)
results[t].append(volume2)
ratio.append(1-float(volume2)/float(volume))
t+=1
forward=i+j+1
r=pd.DataFrame(ratio,columns=['ratio'])
plt.figure(figsize=(7,4))
r[:-1].plot(color='darkgray')
plt.xlabel('no of sessions')
plt.ylabel('ratio')
#results=pd.DataFrame(results,columns=['all','immediacy trade','volume','volume2'])
results=pd.DataFrame(results,columns=['all','immediacy trade'])
plt.figure(figsize=(7,4))
results[:-1].plot(style=['-','--'],color=['darkgray','darkblue'])
#results.volume2[:-1].plot()
plt.xlabel('no of sessions')
plt.ylabel('trading volume')
buf_path0="C:\\users\\moriya\\documents\\Database\\n225\\tick\\"
if __name__ == "__main__":
filenames=["n225m201508Sep.csv"]
filename=filenames[0]
file_name=buf_path0+filename
trades=readcsv(file_name)
yymmdd=trades[0][0]
print(yymmdd,len(trades))
analysis(trades)
# In[12]:
#14.E 緊急性取引 売り手主導・買い手主導
def analysis(trades):
n_max=len(trades)
forward=0
yymmdd0=int(trades[0][0])
results=[]
ratio=[]
t=0
tmp=0
tmp2=0
for i in range(n_max-2):
yymmdd=int(trades[i][0])
hm=int(trades[i][1])
if i>=forward:
price,j=sessiondata(trades,n_max,i,yymmdd,hm)
if hm==1900 or hm==230:#データの分析-----------------------------
trade0=float(price[0][2])
p_open=trade0
price2=[] #動きのある価格を保存
length=len(price)
volume2p=0
volume2m=0
uptick=0
dwntick=0
tt=0
for ii in range(1,length):
trade=float(price[ii][2])
bid=float(price[ii][3]) #買気配値
offer=float(price[ii][4]) #売気配値
v=int(price[ii][5]) #約定枚数
if trade!=trade0: #価格の動きのない取引を除外
price2.append([])
price2[tt].append(trade)
price2[tt].append(bid)
price2[tt].append(offer)
price2[tt].append(v)
tt+=1
trade0=trade
temp=0
length=len(price2)
for ii in range(2,length):
trade00=float(price2[ii-2][0])
trade0=float(price2[ii-1][0])
trade=float(price2[ii][0])
B=float(price2[ii][1])#買気配値
O=float(price2[ii][2])#売気配値
v=float(price2[ii][3])#約定毎数
tmp2+=v
if trade>trade0 and trade!=trade00:#買い手主導
volume2p+=v
uptick+=1
if trade<trade0 and trade!=trade00:#売り手主導
volume2m+=v
dwntick+=1
results.append([])
results[t].append(np.log(trade)-np.log(p_open))
results[t].append((volume2p-volume2m))
results[t].append((uptick-dwntick))
t+=1
forward=i+j+1
results=pd.DataFrame(results,columns=['pgap','vgap','tgap'])
plt.figure(figsize=(7,4))
plt.scatter(results.pgap,results.vgap,color='darkgray')
plt.xlabel('log price difference between open and close')
plt.ylabel('net trading volume')
buf_path0="C:\\users\\moriya\\documents\\Database\\n225\\tick\\"
if __name__ == "__main__":
filenames=["n225m201508Sep.csv"]
filename=filenames[0]
file_name=buf_path0+filename
trades=readcsv(file_name)
yymmdd=trades[0][0]
print(yymmdd,len(trades))
analysis(trades)
# In[13]:
#14.F 非緊急性取引 EMOアルゴリズム
def analysis(trades):
n_max=len(trades)
forward=0
yymmdd0=int(trades[0][0])
results=[]
ratio=[]
t=0
tmp=0
for i in range(n_max-2):
yymmdd=int(trades[i][0])
hm=int(trades[i][1])
if i>=forward:
price,j=sessiondata(trades,n_max,i,yymmdd,hm)
if hm==1900 or hm==230:#データの分析-----------------------------
trade0=float(price[0][2])
p_open=trade0
length=len(price)
volume2p=0
volume2m=0
tt=0
dp0=5
for ii in range(1,length):
trade=float(price[ii][2])
bid=float(price[ii][3])
offer=float(price[ii][4])
v=int(price[ii][5])
if trade==trade0:#価格の動きのある取引を除外
if trade==offer:
volume2p+=v
else:
if trade==bid:
volume2m+=v
else:
tmp+=1
if dp0>0:
volume2p+=v
if dp0<0:
volume2m+=v
else:
dp0=trade-trade0
trade0=trade
results.append([])
results[t].append(np.log(trade)-np.log(p_open))
results[t].append((volume2p-volume2m))
t+=1
forward=i+j+1
print(float(tmp)/float(n_max))
results=pd.DataFrame(results,columns=['p_gap','gap'])
plt.figure(figsize=(7,4))
plt.scatter(results.p_gap,results.gap,color='darkblue')
plt.xlabel('log price difference between open and close')
plt.ylabel('net trading volume')
plt.figure(figsize=(7,4))
buf_path0="C:\\users\\moriya\\documents\\Database\\n225\\tick\\"
if __name__ == "__main__":
filenames=["n225m201508Sep.csv"]
filename=filenames[0]
file_name=buf_path0+filename
trades=readcsv(file_name)
yymmdd=trades[0][0]
print(yymmdd,len(trades))
analysis(trades)
# In[14]:
#14.G 実現ボラティリティと緊急性取引の取引回数
def analysis(trades):
n_max=len(trades)
forward=0
yymmdd0=int(trades[0][0])
results=[]
I=[]
zN=[]
t=0
for i in range(n_max-2):
yymmdd=int(trades[i][0])
hm=int(trades[i][1])
if i>=forward:
price,j=sessiondata(trades,n_max,i,yymmdd,hm)
if hm==1900 or hm==230:#データの分析-----------------------------
length=len(price)
trade0=float(price[0][2])
trade00=trade0
hh0=int(int(price[0][1])/100)
rv=0.0
nrv=0
l=0
for ii in range(1,length):
trade=float(price[ii][2])
hh=int(int(price[ii][1])/100)
if hh!=hh0:
rv+=(trade-trade0)**2
#rv+=(np.log(trade)-np.log(trade0))**2
trade0=trade
nrv+=1
hh0=hh
if trade!=trade00:
l+=1
trade00=trade
rv=rv/nrv
I.append([])
I[t].append(rv)
I[t].append(l)
t+=1
forward=i+j+1
#print t,yymmdd,rv
I=pd.DataFrame(I,columns=['rv','speed'])
plt.figure(figsize=(7,4))
plt.scatter(I.speed,I.rv,c='darkred')
plt.xlabel('no of immediate trades')
plt.ylabel('realized volatility')
buf_path0="C:\\users\\moriya\\documents\\Database\\n225\\tick\\"
if __name__ == "__main__":
filenames=["n225m201508Sep.csv"]
filename=filenames[0]
file_name=buf_path0+filename
trades=readcsv(file_name)
yymmdd=trades[0][0]
print(yymmdd,len(trades))
analysis(trades)
# In[15]:
#14.H 緊急性取引 売り手主導・買い手主導
def analysis(trades):
n_max=len(trades)
forward=0
yymmdd0=int(trades[0][0])
results=[]
ratio=[]
t=0
tmp=0
tmp2=0
for i in range(n_max-2):
yymmdd=int(trades[i][0])
hm=int(trades[i][1])
if i>=forward:
price,j=sessiondata(trades,n_max,i,yymmdd,hm)
if hm==1900 or hm==230:#データの分析-----------------------------
trade0=float(price[0][2])
p_open=trade0
price2=[] #動きのある価格を保存
length=len(price)
uptick=0
dwntick=0
tt=0
for ii in range(1,length):
trade=float(price[ii][2])
if trade!=trade0: #価格の動きのない取引を除外
price2.append([])
price2[tt].append(trade)
tt+=1
trade0=trade
temp=0
length=len(price2)
for ii in range(2,length):
trade00=float(price2[ii-2][0])
trade0=float(price2[ii-1][0])
trade=float(price2[ii][0])
if trade>trade0 and trade!=trade00:#買い手主導
uptick+=1
if trade<trade0 and trade!=trade00:#売り手主導
dwntick+=1
results.append([])
results[t].append(np.log(trade)-np.log(p_open))
results[t].append((uptick-dwntick))
t+=1
forward=i+j+1
results=pd.DataFrame(results,columns=['pgap','tgap'])
plt.figure(figsize=(7,4))
plt.scatter(results.pgap,results.tgap,c='indianred')
plt.xlabel('log price difference between open and close')
plt.ylabel('uptick-downtick')
buf_path0="C:\\users\\moriya\\documents\\Database\\n225\\tick\\"
if __name__ == "__main__":
filenames=["n225m201508Sep.csv"]
filename=filenames[0]
file_name=buf_path0+filename
trades=readcsv(file_name)
yymmdd=trades[0][0]
print(yymmdd,len(trades))
analysis(trades)
# In[ ]:
|
999,686 | a66c9149c80063830351addee889d194ec8034ab | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Loading the raw dataset
rawMovieData = pd.read_excel('2014 and 2015 CSM dataset.xlsx')
# Start cleaning
# ---> Start with missing values
rawMovieData.isnull().sum()
# ---> In 'Budget' only 1 row is NA so we can drop it
rawMovieData.dropna(subset=['Budget'], inplace =True)
# ---> Budget attribute show in scientific notation so convert to int
rawMovieData['Budget'] = rawMovieData['Budget'].astype(int)
# ---> Next we have 10 missing value in screens
# ---> TODO: Predict the missing values instead of dropping them
rawMovieData.dropna(subset=['Screens'], inplace =True)
# ---> And lastly we have 33 missing in aggregate Followers
rawMovieData.dropna(subset=['Aggregate Followers'], inplace =True)
# ---> Aggregate Followers attribute show in scientific notation so convert to int
rawMovieData['Aggregate Followers'] = rawMovieData['Aggregate Followers'].astype(int)
# ---> Pay off with only ticket = If the movie gain more than its budget with only
# ---> the money which gained with the movie tickets. (Total movie gain is not just ticket gain)
rawMovieData['IsPayOffWithOnlyTicket'] = rawMovieData['Gross'] - rawMovieData['Budget']
# If no its assigned 0
rawMovieData['IsPayOffWithOnlyTicket'][rawMovieData['IsPayOffWithOnlyTicket'] < 0] = 0
# If yes its assigned 1
rawMovieData['IsPayOffWithOnlyTicket'][rawMovieData['IsPayOffWithOnlyTicket'] > 0] = 1
processedData = pd.DataFrame()
processedData['Ratings'] = rawMovieData['Ratings']
processedData['Budget'] = rawMovieData['Budget']
processedData['Screens'] = rawMovieData['Screens']
processedData['Sentiment'] = rawMovieData['Sentiment']
processedData['Views'] = rawMovieData['Views']
processedData['Like-Dislike'] = rawMovieData['Likes'] - rawMovieData['Dislikes']
processedData['Comments'] = rawMovieData['Comments']
processedData['TotalFollowers'] = rawMovieData['Aggregate Followers']
targetData = pd.DataFrame()
targetData['IsPayOffWithOnlyTicket'] = rawMovieData['IsPayOffWithOnlyTicket']
# End of preperation
# Start Scaling
from sklearn.preprocessing import MinMaxScaler
scaledData = pd.DataFrame()
mmScaler = MinMaxScaler()
scaledData['Ratings'] = processedData['Ratings']
temp = processedData['Budget'].values
temp = temp.reshape(-1,1)
scaledData['Budget'] = mmScaler.fit_transform(temp)
temp = processedData['Screens'].values
temp = temp.reshape(-1,1)
scaledData['Screens'] = mmScaler.fit_transform(temp)
scaledData['Sentiment'] = processedData['Sentiment']
temp = processedData['Views'].values
temp = temp.reshape(-1,1)
scaledData['Views'] = mmScaler.fit_transform(temp)
temp = processedData['Like-Dislike'].values
temp = temp.reshape(-1,1)
scaledData['Like-Dislike'] = mmScaler.fit_transform(temp)
temp = processedData['Comments'].values
temp = temp.reshape(-1,1)
scaledData['Comments'] = mmScaler.fit_transform(temp)
temp = processedData['TotalFollowers'].values
temp = temp.reshape(-1,1)
scaledData['TotalFollowers'] = mmScaler.fit_transform(temp)
temp2 = processedData['Ratings'].values
temp2 = temp2*10
temp2 = temp2.astype(int)
temp2 = temp2.reshape(-1,1)
scaledData['Ratings'] = mmScaler.fit_transform(temp2)
temp3 = processedData['Sentiment'].values
temp3 = temp3.reshape(-1,1)
scaledData['Sentiment'] = mmScaler.fit_transform(temp3)
# End of scaling
# ----------> VISUALIZATIONS <----------
visualData = scaledData.copy()
visualData['IsPayOffWithOnlyTicket'] = targetData['IsPayOffWithOnlyTicket'].copy()
import seaborn as sns
sns.pairplot(visualData,kind='reg')
sns.heatmap(visualData.corr(), annot = True)
rawMovieData.describe()
# ----------> VISUALIZATIONS <----------
calculationsdf = pd.DataFrame()
def calculations(arr):
TrueNeg = arr[0,0]
FalseNeg = arr[0,1]
FalsePos = arr[1,0]
TruePos = arr[1,1]
pos = TruePos + FalsePos
neg = TrueNeg + FalseNeg
sensitivity = TruePos / (TruePos + FalseNeg)
specificity = TrueNeg / (TrueNeg + FalsePos)
precision = TruePos / (TruePos + FalsePos)
accuracy = sensitivity * ((pos)/(pos+neg)) + specificity * ((neg)/(pos+neg))
f_measure = (2 * precision * sensitivity) / (precision + sensitivity)
calcDisc = {
"Sensitivity": sensitivity,
"Specificity": specificity,
"Precision": precision,
"Accuracy": accuracy,
"F-Measure": f_measure}
return calcDisc
# Start Data splitting
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(scaledData, targetData, test_size = 0.40)
# End Data splitting
# ---> LOGISTIC REGRESSION <---
from sklearn.linear_model import LogisticRegression
logReg = LogisticRegression(random_state = 0)
logReg.fit(x_train,y_train)
y_pred = logReg.predict(x_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues')
plt.xlabel("Real Value")
plt.ylabel("Predicted value")
plt.title("LOGISTIC REGRESSION")
plt.show()
logisticCalc= calculations(cm)
# ---> LOGISTIC REGRESSION <---
# ---> DECISION TREE CLASSIFIER <---
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier(max_depth = 3, criterion = 'entropy', random_state = 0)
dtc.fit(x_train, y_train)
dtPred = dtc.predict(x_test)
cm2 = confusion_matrix(y_test, dtPred)
sns.heatmap(cm2/np.sum(cm2), annot=True, fmt='.2%', cmap='Greens')
plt.xlabel("Real Value")
plt.ylabel("Predicted value")
plt.title("DECISION TREE CLASSIFIER")
plt.show()
dtcCalc= calculations(cm2)
# ---> DECISION TREE CLASSIFIER <---
# ---> KNN CLASSIFIER <---
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
knn.fit(x_train, y_train)
knnPred = knn.predict(x_test)
cm3 = confusion_matrix(y_test, knnPred)
sns.heatmap(cm3/np.sum(cm3), annot=True, fmt='.2%', cmap='copper_r')
plt.xlabel("Real Value")
plt.ylabel("Predicted value")
plt.title("KNN CLASSIFIER")
plt.show()
knnCalc= calculations(cm3)
# ---> KNN CLASSIFIER <---
# ---> RANDOM FOREST CLASSIFIER <---
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators = 50, max_depth = 5, criterion = 'entropy', random_state = 0) #
rfc.fit(x_train, y_train)
rfcPred = rfc.predict(x_test)
cm4 = confusion_matrix(y_test, rfcPred)
sns.heatmap(cm4/np.sum(cm4), annot=True, fmt='.2%', cmap='binary')
plt.xlabel("Real Value")
plt.ylabel("Predicted value")
plt.title("RANDOM FOREST CLASSIFIER")
plt.show()
randomCalc = calculations(cm4)
# ---> RANDOM FOREST CLASSIFIER <---
# ---> SUPPORT VECTOR CLASSIFIER (LINEAR) <---
from sklearn.svm import SVC
svm = SVC(kernel = 'linear', random_state = 0)
svm.fit(x_train, y_train)
svcPred = svm.predict(x_test)
cm5 = confusion_matrix(y_test, svcPred)
sns.heatmap(cm5/np.sum(cm5), annot=True, fmt='.2%', cmap='PuBu')
plt.xlabel("Real Value")
plt.ylabel("Predicted value")
plt.title("SUPPORT VECTOR CLASSIFIER (LINEAR)")
plt.show()
svmLCalc = calculations(cm5)
# ---> SUPPORT VECTOR CLASSIFIER (LINEAR) <---
# ---> SUPPORT VECTOR CLASSIFIER (RBF) <---
svm2 = SVC(kernel = 'rbf', random_state = 0)
svm2.fit(x_train, y_train)
svcPred2 = svm2.predict(x_test)
cm6 = confusion_matrix(y_test, svcPred2)
svmRCalc = calculations(cm6)
sns.heatmap(cm6/np.sum(cm6), annot=True, fmt='.2%', cmap='Reds')
plt.xlabel("Real Value")
plt.ylabel("Predicted value")
plt.title("SUPPORT VECTOR CLASSIFIER (RBF)")
plt.show()
# ---> SUPPORT VECTOR CLASSIFIER (RBF) <---
|
999,687 | 3c8c67ee5eefa44cc0957428adafc90cb63d5eb1 | """
Created on Thu Oct 25 15:54:19 2018
General function script to load from
@author: alexschw
"""
import itertools
import numpy as np
def Gaussian2D(Amp, Shape, Sig):
"""
Computes a 2D gaussian with given amplitude, shape and width (sigma)
"""
Sig = np.array(Sig)
# If one or both of the shape values are zero replace it by 6*sigma
Shape += (Shape == np.zeros(2)) * np.ceil(3 * Sig).astype('int')
# Decrease even shape values by 1
Shape -= (Shape % 2 == 0)
# Calculate the center of the shape
G_Center = (Shape - 1) // 2
# gauss = Amp * exp(-((x - cx)^2 / (2*sx^2) + (y - cy)^2 / (2*sy^2)))
gauss = np.array(np.meshgrid(*map(np.arange, Shape)))
gauss = 0.5 * (gauss - G_Center[:, None, None])**2 / Sig[:, None, None]**2
gauss = Amp * np.exp(-np.sum(gauss, axis=0))
return gauss.transpose()
def Gabor(Amp, Shape, Sig, theta, freq):
"""
Compute a 2D Gabor with given Amplitude, Shape, Width(Sig)
Orientation in degree(theta) and Frequency (freq).
"""
# convert degree to radians
theta = theta * np.pi / 180
Sig = np.array(Sig)
halfSDsq = 1.0 / (2 * Sig**2)
# If one or both of the shape values are zero replace it by 6*sigma
Shape += (Shape == np.zeros(2)) * np.ceil(3 * Sig).astype('int')
# Decrease even shape values by 1
Shape -= (Shape % 2 == 0)
# Calculate the center of the shape
G_cen = (Shape - 1) // 2
# gauss = Amp * exp(-((x - cx)^2 / (2*sx^2) + (y - cy)^2 / (2*sy^2)))
grid = np.array(np.meshgrid(*map(np.arange, Shape))) - G_cen[:, None, None]
x_th = grid[0, :, :] * np.cos(theta) + grid[1, :, :] * np.sin(theta)
y_th = -grid[0, :, :] * np.sin(theta) + grid[1, :, :] * np.cos(theta)
Gaussian = np.exp(-halfSDsq[0] * x_th * x_th - halfSDsq[1] * y_th * y_th)
gabor = Amp * Gaussian * np.cos(np.pi * freq * y_th / Shape[0])
return gabor
def positive(Arr):
"""
Removes values smaller than zero from the given Array
"""
A = np.array(Arr) # To avoid overwriting the input, a new array is created
A[A < 0] = 0
return A
def normpdf(x, mu, sigma):
"""
Defines a normal probability density function
"""
return np.exp(-0.5 * ((x - mu) / sigma)**2) / (np.sqrt(2.0 * np.pi) * sigma)
def rangeX(iterations):
"""
Multidimensional iterator using all possible combinations within the given
boundaries e.g. rangeX((2,3)) => 00 01 02 10 11 12
"""
if not isinstance(iterations, (tuple)):
raise AttributeError
return itertools.product(*map(range, iterations))
if __name__ == '__main__':
G, co, gab = Gabor(1.0, [200, 200], [20, 10], 30, 3)
|
999,688 | 9e98d65560f6e2a8cc3ecbcf655d236433effb3d | fun main() {
val x = "${String::class.toString()}"
println("${x}.hello")
println("${x.toString()}.hello")
println("${x}hello")
println("${x.length}.hello")
println("$x.hello")
}
|
999,689 | 15c7e2b5bc98c45c069fca2e4a42e174a8f5526c | class Computer:
def __init__(self):
self.on = True
def are_you_on(self):
if self.on:
return 'Yes'
else:
return 'No'
def is_computer_on():
computer = Computer()
return computer.are_you_on()
|
999,690 | 1740fbf4212e6cd9ac45122b7fac25b33305a1d2 | from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.rank
num_procs = comm.Get_size()
if rank == 0:
num_trials = 2
trial_seed_list = list(zip(range(num_trials),0.03 + 0.04*np.random.rand(num_trials)))
trial_chunk = np.array_split(trial_seed_list, num_procs)
else:
trial_chunk = None
trial_chunk = comm.bcast(trial_chunk, root=0)
print({rank: trial_chunk[rank]})
#####################################
import os
# os.environ['OMP_NUM_THREADS'] = '5'
# os.environ['OMP_THREAD_LIMIT'] = '5'
# !echo $OMP_NUM_THREADS
# !export OMP_DYNAMIC=FALSE
# from progressbar import progressbar as progbar
from fenics import *
import mshr
import numpy as np
import dolfin
import numpy as np
# from mpi4py import MPI as pyMPI
# comm = MPI.comm_world
def channel_model(T=3.0,
dt=1/2000,
mu=0.001,
rho=1,
center=(0.25, 0.25),
radius=0.05,
mesh_res=24,
circ_seg=24,
amp=24.0,
num_time_steps_qoi = 300,
folder_name='results/navier_stokes_cylinder_mesh'
):
# T = 3.0 # final time
# dt = 1/1000 # time step size
# mu = 0.001 # dynamic viscosity
# rho = 1 # density
# c_x, c_y, c_r = 0.25, 0.25, 0.05
c_x, c_y = center
c_r = radius
# Create mesh
wall_ht = 0.5
wall_wd = 2.0
num_steps = int(T/dt) # number of time steps
channel = mshr.Rectangle(Point(0, 0), Point(wall_wd, wall_ht))
cylinder = mshr.Circle(Point(c_x, c_y), c_r, segments=circ_seg)
domain = channel - cylinder# - cylinder2
mesh = mshr.generate_mesh(domain, mesh_res)
# Define function spaces
V = VectorFunctionSpace(mesh, 'P', 2)
Q = FunctionSpace(mesh, 'P', 1)
# Define boundaries
inflow = 'near(x[0], 0)'
outflow = 'near(x[0], %f)'%wall_wd
walls = 'near(x[1], 0) || near(x[1], %f)'%wall_ht
cylinder = 'pow(x[0] - %f, 2) + pow(x[1] - %f, 2) <= pow(%f, 2)'%(c_x,
c_y,
c_r + 1E-4
)
# Define inflow profile
# quadratic function with roots on top/bottom boundary
# amp = 24.0
inflow_profile = ('%f*x[1]*(%f - x[1])'%(amp, wall_ht), '0')
# Define boundary conditions
bcu_inflow = DirichletBC(V, Expression(inflow_profile, degree=2), inflow)
bcu_walls = DirichletBC(V, Constant((0, 0)), walls)
bcu_cylinder = DirichletBC(V, Constant((0, 0)), cylinder)
bcp_outflow = DirichletBC(Q, Constant(0), outflow)
bcu = [bcu_inflow, bcu_walls, bcu_cylinder]
bcp = [bcp_outflow]
# Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
p = TrialFunction(Q)
q = TestFunction(Q)
# Define functions for solutions at previous and current time steps
u_n = Function(V)
u_ = Function(V)
p_n = Function(Q)
p_ = Function(Q)
# Define expressions used in variational forms
U = 0.5*(u_n + u)
n = FacetNormal(mesh)
f = Constant((0, 0))
k = Constant(dt)
mu = Constant(mu)
rho = Constant(rho)
# Define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
# Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
###########
# Define variational problem for step 1
F1 = rho*dot((u - u_n) / k, v)*dx \
+ rho*dot(dot(u_n, nabla_grad(u_n)), v)*dx \
+ inner(sigma(U, p_n), epsilon(v))*dx \
+ dot(p_n*n, v)*ds - dot(mu*nabla_grad(U)*n, v)*ds \
- dot(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Define variational problem for step 2
a2 = dot(nabla_grad(p), nabla_grad(q))*dx
L2 = dot(nabla_grad(p_n), nabla_grad(q))*dx - (1/k)*div(u_)*q*dx
# Define variational problem for step 3
a3 = dot(u, v)*dx
L3 = dot(u_, v)*dx - k*dot(nabla_grad(p_ - p_n), v)*dx
# Assemble matrices
A1 = assemble(a1)
A1_orig = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Apply boundary conditions to matrices
[bc.apply(A1) for bc in bcu]
[bc.apply(A2) for bc in bcp]
# TODO: HAVE NAMING CONVENTION TO AVOID FILE COLLISIONS
# Create XDMF files for visualization output
xdmffile_u = XDMFFile('%s/velocity.xdmf'%folder_name)
xdmffile_p = XDMFFile('%s/pressure.xdmf'%folder_name)
# Create time series (for use in reaction_system.py)
timeseries_u = TimeSeries('%s/velocity_series'%folder_name)
timeseries_p = TimeSeries('%s/pressure_series'%folder_name)
# Save mesh to file (for use in reaction_system.py)
# File('%s/cylinder.xml.gz'%folder_name) << mesh
# Create progress bar
# progress = Progress('Time-stepping', num_steps)
set_log_level(10)
# Time-stepping
t = 0
# pbar = progbar.ProgressBar().start()
for n in range(num_steps):
# Update current time
t += dt
# Step 1: Tentative velocity step
b1 = assemble(L1)
[bc.apply(b1) for bc in bcu]
solve(A1, u_.vector(), b1, 'bicgstab', 'hypre_amg')
# Step 2: Pressure correction step
b2 = assemble(L2)
# [bc.apply(b2) for bc in bcp]
solve(A2, p_.vector(), b2, 'bicgstab', 'hypre_amg')
# Step 3: Velocity correction step
b3 = assemble(L3)
solve(A3, u_.vector(), b3, 'cg', 'sor')
# Save solution to file (XDMF/HDF5)
xdmffile_u.write(u_, t)
xdmffile_p.write(p_, t)
# Save nodal values to file
timeseries_u.store(u_.vector(), t)
timeseries_p.store(p_.vector(), t)
# Update previous solution
u_n.assign(u_)
p_n.assign(p_)
# num_time_steps_qoi = 300
coords = p_.function_space().mesh().coordinates()
locs = [ [2, 0.1], [2, 0.2], [2, 0.3], [2, 0.4] ]
time_vec = np.linspace(1,T,num_time_steps_qoi)
num_locs = len(locs)
locs = np.array(locs)
locs[:,0] = wall_wd
p = np.empty((num_locs, num_time_steps_qoi))
for i, time in enumerate(time_vec):
timeseries_p.retrieve(p_.vector(), time)
pv = p_.compute_vertex_values()
for j in range(num_locs):
p[j, i-1] = p_(locs[j,0], locs[j,1])
return (p, (u_, timeseries_u), (p_, timeseries_p))
def wrapper(i):
# absolute directory
folder_name = 'results/navier_stokes_cylinder_mesh{}'.format(i[0])
if not os.path.exists(folder_name):
os.makedirs(folder_name)
try:
p, uu, pp = channel_model(center=(0.25, 0.25), radius=i[1],
folder_name=folder_name)
except:
p = None
# return {i[0]: {'qoi': p, 'u': uu, 'p': pp}}
return {i[0]: {'qoi': p, 'r': i[1]}}
results = []
for trial in trial_chunk[rank]:
results.append(wrapper(trial))
results = comm.gather('results').get()
print(results)
qoi = np.array([l[i]['qoi'].ravel() for i,l in enumerate(results_gather)])
radii = np.array([l[i]['r'] for i,l in enumerate(results_gather)])
np.save('results_gathered_qoi.npy', qoi)
np.save('results_gathered_lam.npy', radii)
|
999,691 | 2ab983da96fd130e1df8b3405d2596731cc05240 | import socket, ssl
import threading
from connectionHandler import handleConn
useSsl = False
def serverInit(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(5)
return s
def serverLoop():
s = serverInit('', 5777)
while True:
conn, addr = s.accept()
t = threading.Thread(target = handleConn, args=(conn,), daemon=False)
t.start()
# handleConn(conn)
def sslServerLoop():
s = serverInit('', 5777)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain('cert.pem')
while True:
conn, addr = s.accept()
sslConn = context.wrap_socket(conn, server_side=True)
t = threading.Thread(target = handleConn, args=(sslConn,), daemon=False)
t.start()
# handleConn(sslConn)
def start():
if useSsl:
sslServerLoop()
else:
serverLoop()
#def handleConn(conn):
# f = open('ReceivedImg.jpg', 'wb')
#
# buf = conn.recv(4096)
# while buf:
# f.write(buf)
# buf = conn.recv(4096)
#
# print("File Received")
#
# conn.send("Image Received\n".encode())
#
# conn.close()
|
999,692 | f6f120d71abbea56709c0d7e1dada22fd1e5b8c5 | from TournamentResults import *
import random
import time
class Ranking(TournamentResults):
def __init__(self, results, ranking, ks=None):
self.ranking = ranking
self.results = results
if ks:
self.ks = ks
else:
self.ks = self.calculate_ks()
# Calculates the Kemeny score of a ranking
def calculate_ks(self):
pos = self.ranking.copy()
ks = 0
for participant_a in range(1, len(self.ranking) + 1):
for participant_b in range(participant_a + 1, len(self.ranking) + 1):
result = self.results.get_matchup(participant_a, participant_b)
if not result:
continue
if pos.index(participant_a) > pos.index(participant_b) and result > 0 or pos.index(participant_a) < pos.index(participant_b) and result < 0:
ks += abs(result)
self.ks = ks
return ks
# Return Kemeny Score
def get_kemeny_score(self):
return self.calculate_ks()
# Moves a random participant to a random position
def get_random_n(self):
old_rank = int(random.uniform(0, 1.0) * len(self.ranking))
new_rank = old_rank
difference = 0
while difference == 0 or (difference == 1 and random.uniform(0.0, 1.0) < 0.5):
new_rank = int(random.uniform(0.0, 1.0) * len(self.ranking))
difference = abs(new_rank - old_rank)
ranking = self.ranking.copy()
ranking[new_rank - 1] = self.ranking[int(old_rank) - 1]
source, target = 0, 0
while target < len(ranking) and source < len(ranking):
if source == old_rank:
source += 1
if target == new_rank:
target += 1
ranking[target - 1] = self.ranking[source - 1]
source += 1
target += 1
return Ranking(self.results, ranking, self.calculate_n_ks(old_rank, new_rank))
# An attempt speed up runtime by not completely recalculating the Kenemy Score but isn't used as it doesn't work
def calculate_n_ks(self, old_rank, new_rank):
participant = self.ranking[old_rank - 1]
ks = self.ks
count = 0
if new_rank < old_rank:
count = 1
else:
count = -1
for i in range(new_rank, old_rank, count):
result = self.results.get_matchup(participant, self.ranking[i - 1])
if not result:
continue
if old_rank > i ^ result > 0:
ks -= abs(result)
else:
ks += abs(result)
return ks |
999,693 | 02db92d5561cfd0546fde59d90cd0438a52eef26 | from django.apps import AppConfig
class MyblogApiConfig(AppConfig):
name = 'myblog_api'
|
999,694 | 915e8064b13acd9f49ea78ca363b6f007bdb017f | import time
import nomad
# Create a Nomad client
client = nomad.Nomad()
# Create a batch of jobs to submit to Nomad
jobs = [{
"Name": "stress-test-job",
"Type": "batch",
"Datacenters": ["dc1"],
"TaskGroups": [{
"Name": "stress-test-task-group",
"Tasks": [{
"Name": "stress-test-task",
"Driver": "raw_exec",
"Config": {
"command": "sleep 10"
},
"Resources": {
"CPU": 500,
"MemoryMB": 512
}
}]
}]
}]
# Continuously submit the batch of jobs to Nomad
while True:
for job in jobs:
client.jobs.create(job)
time.sleep(1)
|
999,695 | d0b8a95fce0dd466c7eea507f75746b12bc57fc8 | from setuptools import setup, find_packages
cemba_data_version = '0.1.2'
setup(
name='cemba_data',
version=cemba_data_version,
author='Hanqing Liu',
author_email='hanliu@salk.edu',
packages=find_packages(),
description='A package for processing and analyzing single cell sequencing data.',
long_description=open('README.md').read(),
include_package_data=True,
install_requires=['pandas', 'pybedtools', 'h5py', 'numpy', 'scipy', 'anndata', 'scikit-learn', 'scanpy',
'matplotlib', 'seaborn', 'allensdk', 'holoviews', 'IPython', 'annoy', 'fbpca', 'psutil', 'tables',
'six', 'xarray'],
entry_points={
'console_scripts': ['yap=cemba_data.__main__:main'],
}
)
if __name__ == '__main__':
f = open("cemba_data/__init__.py", 'w')
f.write(f"__version__ = '{cemba_data_version}'\n")
f.close()
|
999,696 | cc79f47cd1e97eb8522821fade3a6d86369f853a | # DSP functions such as applying noise, RIRs, or data representation conversions
import numpy as np
import pandas as pd
import random as rnd
import librosa as lr
import time
import os
import os.path as osp
import scipy
from libs.colored_noise import powerlaw_psd_gaussian
# generate seed from the time at which this script is run
rnd.seed(int(time.time()))
### FRAGMENTING AND RECONSTRUCTING FROM FRAGMENTS
def make_fragments(s, frag_hop_len, frag_win_len):
# convert T-F data into fragments
n_frags = int((s.shape[1] - frag_win_len) / frag_hop_len + 1)
def get_slice(i):
lower_bound = i*frag_hop_len
upper_bound = i*frag_hop_len+frag_win_len
return s[:, lower_bound:upper_bound]
frags = [get_slice(i) for i in range(n_frags)]
return np.array(frags)
def unmake_fragments(s_frag, frag_hop_len, frag_win_len):
# store input shape
in_shape = s_frag.shape
# calculate output spectrogram length in frames
spec_length = (in_shape[0]-1) * frag_hop_len + frag_win_len
# calculate output shape based on number of dims
output_shape = (in_shape[1], spec_length, in_shape[-1]) if len(in_shape) == 4 else (in_shape[1], spec_length)
s = np.zeros(output_shape, dtype=s_frag.dtype)
for i, frag in enumerate(s_frag):
# NOTE this uses the initial portion of each fragment
lower_bound = i*frag_hop_len
upper_bound = i*frag_hop_len+frag_win_len
s[:, lower_bound:upper_bound] = frag
return s
def unmake_fragments_slice(s_frag, frag_hop_len, frag_win_len, time_slice):
# store input shape
in_shape = s_frag.shape
# multiple input shape support
spec_length = (in_shape[0]-1) * frag_hop_len + frag_win_len
output_shape = (in_shape[1], spec_length, in_shape[-1]
) if len(in_shape) == 4 else (in_shape[1], spec_length)
# if slice is integer, use it as single slice
# NOTE: indexing [i] instead of slicing [x:y] cause dimension to collapse
if isinstance(time_slice, int) or isinstance(time_slice, np.generic):
time_slice = slice(time_slice, time_slice+frag_hop_len)
# if time slice is larger than frag_hop, actual slice will be of size hop
slice_width = time_slice.stop - time_slice.start
if slice_width > frag_hop_len:
time_slice = slice(time_slice.start, time_slice.start + frag_hop_len)
# initialize recipient
s = np.zeros(output_shape, dtype=s_frag.dtype)
for i, frag in enumerate(s_frag):
frag = frag[..., time_slice, :] if len(
frag.shape) == 3 else frag[..., time_slice]
lower_bound = time_slice.start + i*frag_hop_len
upper_bound = time_slice.start + (i+1)*frag_hop_len
#upper_bound = i*frag_hop_len+frag_win_len
s[:, lower_bound:upper_bound] = frag
return s
### PRE/POST PROCESSING FUNCTIONS
## helper funcs
def rem_dc_bin(s):
if s.shape[-2] % 2 != 0:
s = s[..., :-1, :]
return s
def add_dc_bin(s):
pad_shape = list(s.shape)
pad_shape[-2] = 1
pad_shape = tuple(pad_shape)
padding = np.zeros(pad_shape)
s = np.concatenate((s, padding), axis=-2)
return s
## convert complex spectrograms to/from magnitude^exponent
# NOTE implementation based on callable class rather than nested functions
# due to `fit_generator` requiring data_generator arguments to be
# picklable (nested functions aren't)
class s_to_exp(object):
def __init__(self, exponent):
self.exponent = exponent
self.__name__ = 's_to_exp({:.3f})'.format(exponent)
def __call__(self, s):
s = rem_dc_bin(s)
# complex -> magnitude -> power/amplitude/etc
s_power = np.abs(s) ** self.exponent
return s_power[..., np.newaxis]
def exp_to_s(exponent):
def func(power, s_noisy=None):
# power/amplitude/etc -> magnitude
s = power[..., 0] ** (1.0/exponent)
# use phase from noisy signal: magnitude -> complex
if s_noisy is not None:
s_noisy = s_noisy[..., :-1, :]
angles = np.angle(s_noisy)
s = s * np.exp(1j * angles)
s = add_dc_bin(s)
return s
return func
## convert complex spectrograms to/from absolute power spectrum
def s_to_power(s):
return s_to_exp(2)(s)
def power_to_s(power, s_noisy=None):
return exp_to_s(2)(power, s_noisy)
## convert complex spectrograms to/from decibel-spectrum
def s_to_db(s):
s = rem_dc_bin(s)
# complex -> magnitude -> decibels
s_db = lr.amplitude_to_db(np.abs(s))
return s_db[..., np.newaxis]
def db_to_s(db, s_noisy=None):
# decibels -> magnitude
s = lr.db_to_amplitude(db[..., 0])
# use phase from noisy signal: magnitude -> complex
if s_noisy is not None:
s_noisy = s_noisy[..., :-1, :]
angles = np.angle(s_noisy)
s = s * np.exp(1j * angles)
s = add_dc_bin(s)
return s
# convert complex spectrograms to Re/Im representation
# NOTE unmaintained!
def s_to_reim(s):
s = rem_dc_bin(s)
# split re/im
re = np.real(s)
im = np.imag(s)
# stack
reim = np.stack([re, im], axis=-1)
return reim
# convert Re/Im representation to complex spectrograms
def reim_to_s(reim):
# extract real and imaginary components
re = reim[..., 0]
im = reim[..., 1]
# combine into complex values
s = re + 1j * im
s = add_dc_bin(s)
return s
## normalization
def normalize_spectrum(s):
s_std = np.std(s)
s_avg = np.mean(s)
return (s - s_avg) / s_std, (s_avg, s_std)
def normalize_spectrum_clean(s, norm_factors):
s_avg, s_std = norm_factors
return (s - s_avg) / s_std
def unnormalize_spectrum(s, norm_factors):
s_avg, s_std = norm_factors
return (s * s_std) + s_avg
### NOISING FUNCTIONS
# sum s(ignal) and n(oise) at a given SNR (in dB)
def sum_with_snr(s, n, snr):
# calculate SNR as linear ratio
snr_lin = 10.0 ** (snr / 10.0)
# calculate signals RMS (standard deviation in AC signals)
s_rms = s.std()
n_rms = n.std()
# calculate scaling factor for noise
scaling = s_rms / (snr_lin * n_rms)
# sum scaled signals
out = s + (n * scaling)
# TODO normalize?
return out
def pink_noise(x, sr, snr):
n = powerlaw_psd_gaussian(1, len(x))
return sum_with_snr(x, n, snr)
# add white gaussian noise
def white_noise(x, sr, snr):
n = np.random.randn(*x.shape)
return sum_with_snr(x, n, snr)
class take_file_as_noise(object):
def __init__(self, filepath, gain=0.0):
self.filepath = filepath
self.gain = gain
self.__name__ = 'take_file_as_noise({}, {:2f})'.format(osp.basename(osp.dirname(filepath)), gain)
def __call__(self, x, sr, snr):
xn, _ = lr.load(self.filepath, sr = sr)
dur_speech = x.shape[0]
dur_noise = xn.shape[0]
# Create Fade-in & fade-out
dur_fade = 1# in sec
p100_fade = dur_fade*sr / dur_noise # proportion
fade_len = np.int(p100_fade * dur_noise)
fadein = np.cos(np.linspace(-np.pi/2,0,fade_len))**2
fadeout = np.cos(np.linspace(0, np.pi/2,fade_len))**2
# apply fading to copy of xn
noise = xn[:]
noise[ :fade_len] = fadein * noise[ :fade_len]
noise[-fade_len: ] = fadeout * noise[-fade_len: ]
# Draw random proportion for the beginning of the noise, in the interval [fade_len, dur_noise-fade_len]
rnd_beg_ind = np.int(np.random.random(1) * (dur_noise - 2*fade_len)) + fade_len
# init
out = np.zeros((dur_speech))
portion_noise = dur_noise - rnd_beg_ind # always <dur_noise
# Checking if the remaining portion of noise can fit into out
if portion_noise >= dur_speech:
n_noise_next = rnd_beg_ind+dur_speech
out[:] += noise[rnd_beg_ind : n_noise_next]
# and that's is!
else:
n_noise_next = 0
n_out_next = dur_noise - rnd_beg_ind
out[ :n_out_next] += noise[rnd_beg_ind:]
# Looping
n_out_beg = n_out_next - fade_len
n_out_end = n_out_beg + dur_noise
#n = 0
while n_out_end < dur_speech:
#print('n: {}, nb_out_end: {}'.format(n, n_out_end))
out[n_out_beg: n_out_end] += noise[:]
n_out_next = n_out_end
n_out_beg = n_out_next - fade_len
n_out_end = n_out_beg + dur_noise
#n +=1
#Last iteration: The noise may be too long for the remaining of the speech file. Trimmed
portion_out = dur_speech-n_out_next
out[n_out_next: ] += noise[n_noise_next : n_noise_next + portion_out]
return sum_with_snr(x, out, snr - self.gain)
# add pink (1/f) noise using Voss-McCartney algorithm
def pink_noise2(x, sr, snr):
# number of values to generate
nrows = len(x) #x.shape
# number of random sources to add
ncols=16
array = np.empty((nrows, ncols))
array.fill(np.nan)
array[0, :] = np.random.random(ncols)
array[:, 0] = np.random.random(nrows)
# the total number of changes is nrows
n = nrows
cols = np.random.geometric(0.5, n)
cols[cols >= ncols] = 0
rows = np.random.randint(nrows, size=n)
array[rows, cols] = np.random.random(n)
df = pd.DataFrame(array)
df.fillna(method='ffill', axis=0, inplace=True)
total = df.sum(axis=1)
sigma = np.sqrt( (x @ x.T) / (nrows * 10**(snr/10)) )
noise= sigma*(total.values-np.mean(total.values)) / (max(total.values) - np.mean(total.values))
# TODO return signal + noise at given SNR
return noise
def velvet_noise(x, SNR):
print('Using velvet noise')
N = max(x.shape)
# N = len(x) alternatively
sigma = np.sqrt( (x @ x.T) / (N * 10**(SNR/10)) )
print('sigma = {0}'.format(sigma))
myVelvetNoise = [rnd.uniform(-1, 1) for k in range( N) ] #random numbers between -1 and 1
rate_zero=.95 # could be parametrized
noise = [sigma * ((vv> rate_zero) - (vv < -rate_zero)) for vv in myVelvetNoise]
return x+noise
|
999,697 | 83b86ee46f2a03a242b3a72bfaaf742bbc7224aa | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, KFold
from sklearn.preprocessing import MinMaxScaler
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import EarlyStopping
from warnings import filterwarnings
filterwarnings('ignore')
# ----------------------------------------------------------------------------------------------------------------------
feature = pd.read_csv("data/raw/multiasset_feature.csv")
idx = pd.read_csv("data/raw/multiasset_index.csv", index_col='Date')
idx.columns = ['idx_price']
# create lagging features
lag_length = 16
for i in range(1, lag_length):
idx['lag_' + str(i)] = idx['idx_price'].shift(i)
idx.head(10)
# merge feature and index datasets
dt = feature.merge(idx, how='left', on='Date')
# change some columns formats
cols = ['Fed Balance Sheet', 'US Real Personal Income', 'US Real Personal Income exTrans', 'Adv Retail Sales US exFood Services']
for i in cols:
dt[i] = dt[i].apply(lambda x: x.replace(",", ""))
dt[i] = dt[i].apply(lambda x: x.replace(" ", ""))
dt[i] = dt[i].apply(lambda x: int(float(x)))
# move up price to predict
dt['pred_price'] = dt['idx_price'].shift(-1)
# truncate dataset
dt = dt.iloc[(lag_length-1):, ]
# dt = pd.DataFrame(dt.drop(['Date', 'idx_price'], axis=1))
# split dataset
X = pd.DataFrame(dt.drop(['Date', 'idx_price', 'pred_price'], axis=1))
y = pd.DataFrame(dt['pred_price'])
# normalize features data!!!
scaler = MinMaxScaler(feature_range=(0, 1))
X_scaled = scaler.fit_transform(X)
# create train & test sets
X_train = X_scaled[:202, :]
X_test = X_scaled[202:, :]
y_train = y.iloc[:202, ]
y_test = y.iloc[202:, ]
# split out last row as forecast data
# X_forecast = pd.DataFrame(X_test[-1, ]).T
X_forecast = X_test[-1, ]
X_test = X_test[:-1, :]
y_test = y_test[:-1]
# reshape input to be 3D [samples, timesteps, features]
X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
X_forecast = X_forecast.reshape((1, 1, X_forecast.shape[0]))
# design network
n_steps = X_train.shape[1]
n_features = X_train.shape[2]
def root_mean_squared_error(y_true, y_pred):
return tensorflow.keras.backend.sqrt(tensorflow.keras.losses.MSE(y_true, y_pred))
# ====================================================================================================================
regressor = Sequential()
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(n_steps, n_features)))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units=1))
regressor.compile(optimizer='adam', loss=root_mean_squared_error)
# ====================================================================================================================
# fit the autoencoder model to reconstruct input
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.00001, patience=5, restore_best_weights=True)
history = regressor.fit(X_train, y_train, epochs=1000, batch_size=8, validation_data=(X_test, y_test), verbose=2,
shuffle=False # , callbacks=[early_stop]
)
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='test loss')
plt.title('RMSE Plot')
plt.legend()
plt.show()
# make a prediction
y_hat = regressor.predict(X_test)
y_test_pred = np.concatenate((y_test, y_hat), axis=1)
# calculate RMSE
rmse = np.sqrt(mean_squared_error(y_test, y_hat))
mae = mean_absolute_error(y_test, y_hat)
print('Test RMSE: %.3f' % rmse)
print('Test MAE: %.3f' % mae)
# forecast for next month
y_forecast = regressor.predict(X_forecast) |
999,698 | 35caf5c5e07f279a21549105e0fd5485420e8b67 | import sys
(python_ver, _, _, _, _) = sys.version_info
import os
from setuptools import setup, find_packages
name = 'querycontacts'
description = "Query network abuse contacts on the command-line for a given ip address on abuse-contacts.abusix.org"
long_description = description
cur_dir = os.path.abspath(os.path.dirname(__file__))
# Read Version
with open('%s/querycontacts/_version.py' % (cur_dir), 'r') as vfh:
__version__ = vfh.read().strip().strip('\'').split(' = ')[1]
with open('%s/README.rst' % cur_dir, 'r') as f:
long_description = f.read()
with open('%s/REQUIREMENTS%s' % (cur_dir, python_ver)) as f:
requires = f.readlines()
setup(
name=name,
version=__version__,
description=description,
long_description=long_description,
author='abusix GmbH',
author_email='info@abusix.com',
url='http://abusix.com/global-reporting/abuse-contact-db',
install_requires=requires,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
scripts=['scripts/querycontacts'],
license="GNU General Public License v3 (GPLv3)",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Topic :: Security',
]
)
|
999,699 | b41d1f9969f42b30a61a7dcfb5c211b716c45ac4 | '''
Vendored version of django-easymoney.
Putting directly in otree-core because PyCharm flags certain usages in yellow,
like:
c(1) + c(1)
Results in: "Currency does not define __add__, so the + operator cannot
be used on its instances"
If "a" is a CurrencyField, then
self.a + c(1)
PyCharm warns: 'Currency does not define __add__, so the + operator cannot
be used on its instances'
c(1) + 1
'Expected type "int", got "Currency" instead'
self.a + 1
'Expected type "int", got "CurrencyField" instead'
easymoney's Money and MoneyField don't have any of these problems.
Other reasons:
- We have to override certain things anyway; it's easier to keep track of
when it's imported indirectly.
- As far as I know, we are the only users of EasyMoney
- If I want to make a fix, I can do it immediately, rather than making a pull
request and waiting for the change upstream.
'''
from django.utils import formats, numberformat
from django.utils.translation import ungettext
import sys
from decimal import Decimal, ROUND_HALF_UP
import babel.core
import babel.numbers
from django import forms
from django.conf import settings
import six
# =============================================================================
# MONKEY PATCH - fix for https://github.com/oTree-org/otree-core/issues/387
# =============================================================================
# Black Magic: The original number format of django used inside templates don't
# work if the currency code contains non-ascii characters. This ugly hack
# remplace the original number format and when you has a easy_money instance
# simple use the old unicode casting.
_original_number_format = numberformat.format
def otree_number_format(number, *args, **kwargs):
if isinstance(number, Currency):
return six.text_type(number)
return _original_number_format(number, *args, **kwargs)
numberformat.format = otree_number_format
# Set up money arithmetic
def _to_decimal(amount):
if isinstance(amount, Decimal):
return amount
elif isinstance(amount, float):
return Decimal.from_float(amount)
else:
return Decimal(amount)
def _make_unary_operator(name):
method = getattr(Decimal, name, None)
# NOTE: current context would be used anyway, so we can just ignore it.
# Newer pythons don't have that, keeping this for compatability.
return lambda self, context=None: self.__class__(method(self))
def _prepare_operand(self, other):
try:
return _to_decimal(other)
except:
raise TypeError(
"Cannot do arithmetic operation between "
"{} and {}.".format(repr(self), repr(other))
)
def _make_binary_operator(name):
method = getattr(Decimal, name, None)
def binary_function(self, other, context=None):
other = _prepare_operand(self, other)
return self.__class__(method(self, other))
return binary_function
def format_currency(number, currency, format, locale=babel.numbers.LC_NUMERIC,
force_frac=None, format_type='standard'):
"""Same as ``babel.numbers.format_currency``, but has ``force_frac``
argument instead of ``currency_digits``.
If the ``force_frac`` argument is given, the argument is passed down to
``pattern.apply``.
"""
locale = babel.core.Locale.parse(locale)
if format:
pattern = babel.numbers.parse_pattern(format)
else:
try:
pattern = locale.currency_formats[format_type]
except KeyError:
raise babel.numbers.UnknownCurrencyFormatError(
"%r is not a known currency format type" % format_type)
if force_frac is None:
fractions = babel.core.get_global('currency_fractions')
try:
digits = fractions[currency][0]
except KeyError:
digits = fractions['DEFAULT'][0]
frac = (digits, digits)
else:
frac = force_frac
return pattern.apply(number, locale, currency=currency, force_frac=frac)
# Data class
class BaseCurrency(Decimal):
# what's this for?? can't money have any # of decimal places?
MIN_DECIMAL_PLACES = 2
def __new__(cls, amount):
if amount is None:
raise ValueError('Cannot convert None to currency')
return Decimal.__new__(cls, cls._sanitize(amount))
@classmethod
def _sanitize(cls, amount):
if isinstance(amount, cls):
return amount
quant = Decimal('0.1') ** cls.get_num_decimal_places()
return _to_decimal(amount).quantize(quant, rounding=ROUND_HALF_UP)
# Support for pickling
def __reduce__(self):
return (self.__class__, (Decimal.__str__(self),))
# Money is immutable
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __float__(self):
"""Float representation."""
return float(Decimal(self))
def __unicode__(self):
return self._format_currency(Decimal(self))
def __str__(self):
string = self._format_currency(Decimal(self))
if six.PY2:
return string.encode('utf-8')
return string
@classmethod
def _format_currency(cls, number):
return format_currency(
number=number,
currency=settings.REAL_WORLD_CURRENCY_CODE,
locale=settings.REAL_WORLD_CURRENCY_LOCALE,
format=None,
force_frac=(
cls.MIN_DECIMAL_PLACES,
settings.REAL_WORLD_CURRENCY_DECIMAL_PLACES)
)
def __format__(self, format_spec):
if format_spec in {'', 's'}:
formatted = six.text_type(self)
else:
formatted = format(Decimal(self), format_spec)
if isinstance(format_spec, six.binary_type):
return formatted.encode('utf-8')
else:
return formatted
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, Decimal.__str__(self))
def __eq__(self, other):
if isinstance(other, BaseCurrency):
return Decimal.__eq__(self, other)
elif isinstance(other, six.integer_types + (float, Decimal)):
return Decimal.__eq__(self, self._sanitize(other))
else:
return False
# for Python 3:
# need to re-define __hash__ because we defined __eq__ above
# https://docs.python.org/3.5/reference/datamodel.html#object.%5F%5Fhash%5F%5F
__hash__ = Decimal.__hash__
# Special casing this, cause it have extra modulo arg
def __pow__(self, other, modulo=None):
other = _prepare_operand(self, other)
return self.__class__(Decimal.__pow__(self, other, modulo))
__abs__ = _make_unary_operator('__abs__')
__pos__ = _make_unary_operator('__pos__')
__neg__ = _make_unary_operator('__neg__')
__add__ = _make_binary_operator('__add__')
__radd__ = _make_binary_operator('__radd__')
__sub__ = _make_binary_operator('__sub__')
__rsub__ = _make_binary_operator('__rsub__')
__mul__ = _make_binary_operator('__mul__')
__rmul__ = _make_binary_operator('__rmul__')
__floordiv__ = _make_binary_operator('__floordiv__')
__rfloordiv__ = _make_binary_operator('__rfloordiv__')
__truediv__ = _make_binary_operator('__truediv__')
__rtruediv__ = _make_binary_operator('__rtruediv__')
if hasattr(Decimal, '__div__'):
__div__ = _make_binary_operator('__div__')
__rdiv__ = _make_binary_operator('__rdiv__')
__mod__ = _make_binary_operator('__mod__')
__rmod__ = _make_binary_operator('__rmod__')
__divmod__ = _make_binary_operator('__divmod__')
__rdivmod__ = _make_binary_operator('__rdivmod__')
__rpow__ = _make_binary_operator('__rpow__')
def deconstruct(self):
return '{}.{}'.format(self.__module__, self.__class__.__name__), \
[Decimal.__str__(self)], {}
def to_number(self):
'''
Deprecated. This has trivial functionality; it's not clear from the API
whether this returns a float or Decimal. Better to let the user do it explicitly
'''
return Decimal(self)
@classmethod
def get_num_decimal_places(cls):
raise NotImplementedError()
class Currency(BaseCurrency):
@classmethod
def get_num_decimal_places(cls):
if settings.USE_POINTS:
return settings.POINTS_DECIMAL_PLACES
else:
return settings.REAL_WORLD_CURRENCY_DECIMAL_PLACES
def to_real_world_currency(self, session):
if settings.USE_POINTS:
return RealWorldCurrency(
float(self) *
session.config['real_world_currency_per_point'])
else:
return self
def _format_currency(cls, number):
if settings.USE_POINTS:
formatted_number = formats.number_format(number)
if hasattr(settings, 'POINTS_CUSTOM_NAME'):
return '{} {}'.format(
formatted_number, settings.POINTS_CUSTOM_NAME)
# Translators: display a number of points,
# like "1 point", "2 points", ...
# See "Plural-Forms" above for pluralization rules
# in this language.
# Explanation at http://bit.ly/1IurMu7
# In most languages, msgstr[0] is singular,
# and msgstr[1] is plural
# the {} represents the number;
# don't forget to include it in your translation
return ungettext('{} point', '{} points', number).format(
formatted_number)
else:
return super()._format_currency(number)
class RealWorldCurrency(BaseCurrency):
'''payment currency'''
def to_real_world_currency(self, session):
return self
@classmethod
def get_num_decimal_places(cls):
return settings.REAL_WORLD_CURRENCY_DECIMAL_PLACES
# Utils
def to_dec(value):
return Decimal(value) if isinstance(value, Currency) else value
def stdout_encode(u, default='UTF8'):
if sys.stdout.encoding:
return u.encode(sys.stdout.encoding)
return u.encode(default) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.