index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,200 | 748e1f32a9632642ef19f549317e884a560dd531 | from app.models import Pagination, Product, Offer, Category, AllCategories
class TestCategory:
def test_init(self):
category = Category(1, 'Category 1')
assert category.obj_id == 1
assert category.title == 'Category 1'
class TestAllCategories:
def test_init(self):
categories =[Category(1, 'Category 1'), Category(2, 'Category 2')]
all_categories = AllCategories(categories)
assert all_categories.categories == categories
assert all_categories.category_ids == [1, 2]
class TestOffer:
def test_init(self):
offer = Offer(1, 'Shop Name', 30.5, 'https://shopurl')
assert offer.id == 1
assert offer.shop_name == 'Shop Name'
assert offer.price == 30.5
assert offer.url == 'https://shopurl'
class TestProduct:
@classmethod
def setup_class(cls):
cls.offer1 = Offer(1, 'Shop 1', 1000.5, 'https://url1')
cls.offer2 = Offer(2, 'Shop 2', 500.0, 'https://url2')
cls.offer3 = Offer(3, 'Shop 3', 700.0, 'https://url3')
cls.product = Product(1, 2, 'Product', 'Description',
['https://image1', 'https://image2'],
[cls.offer1, cls.offer2, cls.offer3])
def test_init(self):
assert self.product.id == 1
assert self.product.category_id == 2
assert self.product.title == 'Product'
assert self.product.description == 'Description'
assert self.product.image_urls == ['https://image1', 'https://image2']
assert self.product.offers == [self.offer1, self.offer2, self.offer3]
def test_min_price(self):
assert self.product.min_price == 500.0
def test_max_price(self):
assert self.product.max_price == 1000.5
def test_sorted_offers(self):
assert self.product.sorted_offers == [self.offer2, self.offer3, self.offer1]
class TestPagination:
def test_beginning(self):
pagination = Pagination(items_count=200,
items_per_page=5,
current_page=1,
show_pages_count=5)
assert pagination.total_pages_count == 40
assert pagination.show_pages_count == 5
assert pagination.current_page == 1
assert pagination.available_pages == [1, 2, 3, 4, 5]
assert pagination.prev_page is None
assert pagination.next_page == 2
def test_beginning2(self):
pagination = Pagination(items_count=15,
items_per_page=5,
current_page=1,
show_pages_count=5)
assert pagination.total_pages_count == 3
assert pagination.current_page == 1
assert pagination.show_pages_count == 5
assert pagination.available_pages == [1, 2, 3]
assert pagination.prev_page is None
assert pagination.next_page == 2
def test_middle(self):
pagination = Pagination(items_count=201,
items_per_page=5,
current_page=33,
show_pages_count=5)
assert pagination.total_pages_count == 41
assert pagination.current_page == 33
assert pagination.show_pages_count == 5
assert pagination.available_pages == [31, 32, 33, 34, 35]
assert pagination.prev_page == 32
assert pagination.next_page == 34
def test_middle2(self):
pagination = Pagination(items_count=14,
items_per_page=5,
current_page=2,
show_pages_count=5)
assert pagination.total_pages_count == 3
assert pagination.current_page == 2
assert pagination.show_pages_count == 5
assert pagination.available_pages == [1, 2, 3]
assert pagination.prev_page == 1
assert pagination.next_page == 3
def test_end(self):
pagination = Pagination(items_count=199,
items_per_page=5,
current_page=40,
show_pages_count=5)
assert pagination.total_pages_count == 40
assert pagination.current_page == 40
assert pagination.show_pages_count == 5
assert pagination.available_pages == [36, 37, 38, 39, 40]
assert pagination.prev_page == 39
assert pagination.next_page is None
def test_end2(self):
pagination = Pagination(items_count=13,
items_per_page=5,
current_page=3,
show_pages_count=5)
assert pagination.total_pages_count == 3
assert pagination.current_page == 3
assert pagination.show_pages_count == 5
assert pagination.available_pages == [1, 2, 3]
assert pagination.prev_page == 2
assert pagination.next_page is None
def test_page_out_of_range(self):
pagination = Pagination(items_count=13,
items_per_page=5,
current_page=4,
show_pages_count=5)
assert pagination.current_page == 1
def test_page_out_of_range2(self):
pagination = Pagination(items_count=13,
items_per_page=5,
current_page=0,
show_pages_count=5)
assert pagination.current_page == 1
|
15,201 | d6dce521cf38954586b365a0218b1267f609cc79 |
class Tree():
def __init__(self, value, partition_key, lookup_key):
self.value = value
self.partition_key = partition_key
self.lookup_key = lookup_key
self.left = None
self.right = None
def insert(self, value, partition_key, lookup_key):
if self.left == None and value <= self.value:
self.left = Tree(value, partition_key, lookup_key)
elif self.right == None and value > self.value:
self.right = Tree(value, partition_key, lookup_key)
elif value > self.value:
self.right.insert(value, partition_key, lookup_key)
elif value < self.value:
self.left.insert(value, partition_key, lookup_key)
elif self.value == "":
self.value = value
self.partition_key = partition_key
self.lookup_key = lookup_key
return self
def walk(self, less_than, stop):
if self.left:
yield from self.left.walk(less_than, stop)
if less_than <= self.value and self.value <= stop:
yield self.partition_key, self.value, self.lookup_key
if self.right:
yield from self.right.walk(less_than, stop)
def delete(self, value):
next_value = self
while True:
if value < next_value.value:
if next_value and next_value.left.value == value:
next_value.left = None
print("Found item to delete")
break
else:
next_value = next_value.left
if value > next_value.value:
if next_value.right and next_value.right.value == value:
next_value.right = None
print("Found item to delete")
break
else:
next_value = next_value.right
if next_value == None:
break
class PartitionTree():
def __init__(self, value, partition_tree):
self.value = value
self.partition_tree = partition_tree
self.left = None
self.right = None
def insert(self, value, partition_tree):
if self.left == None and value <= self.value:
self.left = PartitionTree(value, partition_tree)
return self.left
elif self.right == None and value > self.value:
self.right = PartitionTree(value, partition_tree)
return self.right
elif value > self.value:
return self.right.insert(value, partition_tree)
elif value < self.value:
return self.left.insert(value, partition_tree)
elif self.value == "":
self.value = value
self.partition_tree = partition_tree
return self
def walk(self, less_than, stop):
if self.left:
yield from self.left.walk(less_than, stop)
if less_than <= self.value and self.value <= stop:
yield self.value, self.partition_tree
if self.right:
yield from self.right.walk(less_than, stop)
|
15,202 | 3f1247bd1a37dcab9108c09a5dcf3293eb7b6964 | #!/bin/python
class Board:
def __init__(self):
self.aliveCells = set()
def isCellAlive(self, x, y):
return (x, y) in self.aliveCells
def bringToLife(self, x, y):
self.aliveCells.add((x, y))
def evolve(self):
cellsToDie = set()
newBornCells = set()
for cell in self.aliveCells:
if self.countFriendsOf(cell) not in {2, 3}:
cellsToDie.add(cell)
for cell in self.newBornCandidates():
if self.countFriendsOf(cell) == 3:
newBornCells.add(cell)
self.aliveCells |= newBornCells
self.aliveCells -= cellsToDie
def countFriendsOf(self, cell):
friendsCount = 0
for n in self.neighboursOf(cell):
if n in self.aliveCells:
friendsCount += 1
return friendsCount
def newBornCandidates(self):
return {n for c in self.aliveCells for n in self.neighboursOf(c)}
def neighboursOf(self, cell):
x, y = cell
for dx in range(-1, 2):
for dy in range(-1, 2):
neighbour = (x + dx, y + dy)
if cell == neighbour:
continue
else:
yield neighbour
def createdBoardHasDeadCell():
board = Board()
assert not board.isCellAlive(0, 0)
def canBringOneCellToLife():
board = Board()
board.bringToLife(0, 0)
assert board.isCellAlive(0, 0)
def singleCellDiesOfLonelyness():
board = Board()
board.bringToLife(0, 0)
board.evolve()
assert not board.isCellAlive(0, 0)
def cellWithTwoFriendsSurvives():
board = Board()
board.bringToLife(0, 0)
board.bringToLife(1, 1)
board.bringToLife(2, 2)
board.evolve()
assert board.isCellAlive(1, 1)
def threeCellsGiveBirthToNewCell():
board = Board()
board.bringToLife(0, 0)
board.bringToLife(2, 0)
board.bringToLife(2, 2)
board.evolve()
assert board.isCellAlive(1, 1)
def cellsDieOfOverpopulation():
board = Board()
board.bringToLife(0, 0)
board.bringToLife(2, 0)
board.bringToLife(1, 1)
board.bringToLife(0, 2)
board.bringToLife(2, 2)
board.evolve()
assert not board.isCellAlive(1, 1)
createdBoardHasDeadCell()
canBringOneCellToLife()
singleCellDiesOfLonelyness()
cellWithTwoFriendsSurvives()
threeCellsGiveBirthToNewCell()
cellsDieOfOverpopulation()
print "Success. Tests passed."
|
15,203 | 556de5347af6037564145c21b38e02a236d51166 | __author__='callMeBin'
#!/usr/bin/env Python
# coding=utf-8
import re
import requests
import jieba
import numpy as np
import codecs
import matplotlib
from bs4 import BeautifulSoup
from wordcloud import WordCloud,ImageColorGenerator
import matplotlib.pyplot as plt
import pandas as pd
import time
from pandas import Series,DataFrame
import getpass
from collections import Counter
def makeWordCloud():
sys_name = getpass.getuser()
target_file = 'wyComment3.txt'
with open(r'C:/Users/'+sys_name+'/Desktop/'+target_file,'r',encoding='utf-8') as f:
data = f.read()
pattern = re.compile(r'[\u4e00-\u9fa5a-zA-Z0-9].*')
data = re.findall(pattern,data)
#print(data)
#使用结巴分词进行中文分词
#jieba.add_word('白色球鞋')
segment = jieba.lcut(str(data))
words_df = DataFrame({'segment':segment})
print(words_df)
#去掉停用词,quoting=3全不引用
stopwords = pd.read_csv(r'C:/Users/'+sys_name+'/Desktop/stopwords.txt',index_col=False,quoting=3,sep='\t',names=['stopword'],encoding='utf-8')
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
#统计词频
words_stat = words_df.groupby(by=['segment'])['segment'].agg({'计数':np.size})
words_stat = words_stat.reset_index().sort_values(by=['计数'],ascending=False)
#用词云进行显示
back_img = plt.imread(r'C:/Users/'+sys_name+'/Desktop/color.jpg')
img_color = ImageColorGenerator(back_img)
wordcloud = WordCloud(mask=back_img,font_path='simhei.ttf',background_color='white',max_font_size=200,min_font_size=20,random_state=42,max_words=500)
word_frequence = {x[0]:x[1] for x in words_stat.head(1000).values}
wordcloud = wordcloud.fit_words(word_frequence)
plt.axis('off')
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.imshow(wordcloud.recolor(color_func=img_color))
def main():
makeWordCloud()
if __name__ =='__main__':
main() |
15,204 | 0db08499236031e7d081f3f4fa2e1cb906b0f44f | # Copyright 2020 Francesco Ceccon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyomo.environ as pe
import numpy as np
from coramin.relaxations.auto_relax import (
RelaxationCounter,
_relax_root_to_leaf_map,
_relax_leaf_to_root_map,
_relax_root_to_leaf_SumExpression,
_relax_expr,
)
from coramin.relaxations import PWXSquaredRelaxation, PWUnivariateRelaxation, nonrelaxation_component_data_objects
from coramin.utils.coramin_enums import RelaxationSide
from pyomo.core.expr.numvalue import polynomial_degree
from suspect.pyomo.quadratic import QuadraticExpression
from galini.relaxations.expressions import _relax_leaf_to_root_QuadraticExpression
_relax_leaf_to_root_map[QuadraticExpression] = _relax_leaf_to_root_QuadraticExpression
_relax_root_to_leaf_map[QuadraticExpression] = _relax_root_to_leaf_SumExpression
class RelaxationData:
def __init__(self, model):
self.model = model
self.original_to_new_var_map = pe.ComponentMap()
self.aux_var_map = dict()
self.reverse_var_map = dict()
self.var_relax_map = pe.ComponentMap()
self.degree_map = pe.ComponentMap()
self.counter = RelaxationCounter()
def relax_expression(model, expr, relaxation_side, data):
relaxation_side_map = pe.ComponentMap()
relaxation_side_map[expr] = relaxation_side
expr = _relax_expr(expr=expr, aux_var_map=data.aux_var_map, parent_block=model,
relaxation_side_map=relaxation_side_map, counter=data.counter, degree_map=data.degree_map)
return expr
def relax_inequality(model, ineq_expr, relaxation_side, data):
if ineq_expr.nargs() == 3:
lb, expr, ub = ineq_expr.args
else:
assert ineq_expr.nargs() == 2
c1, c2 = ineq_expr.args
if type(c1) in pe.nonpyomo_leaf_types or not c1.is_expression_type():
assert c2.is_expression_type()
lb = c1
expr = c2
ub = None
elif type(c2) in pe.nonpyomo_leaf_types or not c2.is_expression_type():
assert c1.is_expression_type()
lb = None
expr = c1
ub = c2
else:
raise ValueError('Cannot handle inequality expression {} with args {}, {}'.format(ineq_expr, type(c1), type(c2)))
relaxed_expr = relax_expression(model, expr, relaxation_side, data)
return pe.inequality(lb, relaxed_expr, ub)
def relax_constraint(model, cons, data, inplace=False):
body_degree = polynomial_degree(cons.body)
if body_degree is not None:
if body_degree <= 1:
return pe.Constraint(expr=cons.body)
if cons.has_lb() and cons.has_ub():
relaxation_side = RelaxationSide.BOTH
elif cons.has_lb():
relaxation_side = RelaxationSide.OVER
elif cons.has_ub():
relaxation_side = RelaxationSide.UNDER
else:
raise ValueError('Encountered a constraint without a lower or an upper bound: ' + str(cons))
relaxation_side = RelaxationSide.BOTH
new_body = relax_expression(model, cons.body, relaxation_side, data)
if inplace:
cons._body = new_body
return cons
lb, ub = cons.lb, cons.ub
if cons.has_lb() and cons.has_ub():
assert np.isclose(lb, ub)
return pe.Constraint(expr=new_body == lb)
elif cons.has_lb():
return pe.Constraint(expr=lb <= new_body)
elif cons.has_ub():
return pe.Constraint(expr=new_body <= ub)
raise ValueError('Encountered a constraint without a lower or an upper bound: ' + str(cons))
def update_relaxation_data(model, data):
for var in model.component_data_objects(pe.Var, active=True, descend_into=True):
data.reverse_var_map[id(var)] = var
for aux_var_info, aux_var_value in data.aux_var_map.items():
_, relaxation = aux_var_value
aux_var_info_len = len(aux_var_info)
if aux_var_info_len == 2 and aux_var_info[1] == 'quadratic':
var = data.reverse_var_map[aux_var_info[0]]
vars = [var]
elif aux_var_info_len == 3 and aux_var_info[2] == 'mul':
var0 = data.reverse_var_map[aux_var_info[0]]
var1 = data.reverse_var_map[aux_var_info[1]]
vars = [var0, var1]
elif aux_var_info_len == 3 and aux_var_info[2] in ['pow', 'div']:
vars = []
elif aux_var_info_len == 2 and aux_var_info[1] == 'exp':
vars = []
else:
raise RuntimeError("Invalid aux var info ", aux_var_info, aux_var_value)
for var in vars:
if var not in data.var_relax_map:
data.var_relax_map[var] = [relaxation]
else:
data.var_relax_map[var].append(relaxation)
def rebuild_relaxations(model, data, use_linear_relaxation=True):
for _, relaxation in data.aux_var_map.values():
relaxation.use_linear_relaxation = use_linear_relaxation
relaxation.rebuild()
def relax(model, data, use_linear_relaxation=True):
new_model = model.clone()
for var in model.component_data_objects(pe.Var,
active=True,
descend_into=True):
new_var = new_model.find_component(var)
data.original_to_new_var_map[var] = new_var
model = new_model
model.relaxations = pe.Block()
model.aux_vars = pe.VarList()
model.aux_cons = pe.ConstraintList()
for obj in nonrelaxation_component_data_objects(model, ctype=pe.Objective, active=True):
degree = polynomial_degree(obj.expr)
if degree is not None:
if degree <= 1:
continue
assert obj.is_minimizing()
# relaxation_side = RelaxationSide.UNDER
relaxation_side = RelaxationSide.BOTH
new_body = relax_expression(model, obj.expr, relaxation_side, data)
obj._expr = new_body
for cons in nonrelaxation_component_data_objects(model, ctype=pe.Constraint, active=True):
relax_constraint(model, cons, data, inplace=True)
update_relaxation_data(model, data)
rebuild_relaxations(model, data, use_linear_relaxation)
return model |
15,205 | d2025e3a027bf2a9d9eaf429004af20c16992c35 | #encoding=utf8
import json
import re,os
from wox import Wox,WoxAPI
class Shadowsocks(Wox):
def get_pac_path(self):
with open(os.path.join(os.path.dirname(__file__),"config.json"), "r") as content_file:
config = json.loads(content_file.read())
return config["pacPath"]
def add_new_domain(self,domain):
if not domain:
WoxAPI.show_msg("Warning","You can't add empty domain")
return
r = re.compile(r"domains = {([\s\S]*)};")
with open(self.get_pac_path(),"r+") as pac:
pactxt = pac.read()
existing_domains = r.search(pactxt).group(1)
domains = json.loads("{" + existing_domains + "}")
domains[domain] = 1
newpactxt = r.sub(r"domains = " + json.dumps(domains,indent = 4) +";",pactxt)
pac.seek(0)
pac.write(newpactxt)
pac.truncate()
WoxAPI.show_msg("Success","{} is now in PAC file".format(domain))
def query(self,query):
res = []
res.append({
"Title": "add {} to Shadowsocks PAC list".format(query),
"IcoPath":"ssw.png",
"JsonRPCAction":{"method": "add_new_domain", "parameters": [query]}
})
return res
if __name__ == "__main__":
Shadowsocks()
|
15,206 | 453b56b0f7a20faf6b6733ab1db9a85b525dfda0 | n=int(input())
array=list(map(int,input().split()))
ans=1
if 0 in array:
ans=0
else:
for i in range(n):
if ans>10**18:
break
else:
ans=ans*array[i]
if ans>10**18:
print(-1)
else:
print(ans) |
15,207 | 9ba3438bb8b82d599777d27f8a2dec5210c5d245 | # coding: utf-8
import sys
assert sys.version_info >= (3,4)
ROOT_DELIM = '_'
IGS_DELIM = '.'
class Tag(object):
def __init__(self, root, ig):
self.root = root
self.ig = ig
self.fine_ig = None
self.coarse()
self.xpos()
self.conll_analysis()
def __repr__(self):
tabs_num = 1
analysis = '"' + self.root + '"'
if self.ig:
for ig in self.ig:
if ig.startswith('"'):
tabs_num += 1
analysis += '\n' + '\t'*tabs_num + ig.replace('.',' ')
else:
analysis += ' ' + ig.replace('.',' ')
else:
analysis += ' unk'
return analysis
def __eq__(self, other):
return self.root == other.root and self.ig == other.ig
def __ne__(self, other):
return self.root != other.root or self.ig != other.ig
def __hash__(self):
return hash((self.root, ) + tuple(self.ig))
def coarse(self):
cases = ['nom', 'gen', 'acc', 'abl', 'dat', 'ins', 'loc']
for i in range(len(self.ig)):
if self.ig[i].startswith('prn.pers'):
if not self.fine_ig:
self.fine_ig = list(self.ig)
if self.ig[i][-3:] in cases:
self.ig[i] = 'prn.pers.'+self.ig[i][-3:]
else:
self.ig[i] = 'prn.pers'
def fine(self):
if self.fine_ig:
self.ig = list(self.fine_ig)
def xpos(self):
if self.ig:
first_ig_tags = self.ig[0].split('.')
else:
first_ig_tags = ['unk']
return first_ig_tags[0]
def conll_analysis(self):
analysis = []
feats = ''
for ig in self.ig:
if ig.startswith('"'):
parts = ig.split('.')
sub_root = parts[0].strip('"')
sub_xpos = parts[1]
sub_feats = '|'.join(parts[2:])
analysis.append((sub_root, '_', sub_xpos, sub_feats if sub_feats!='' else '_'))
else:
feats += '|' + ig.replace('.', '|')
clean_feats = feats[len(self.xpos()) + 1:].lstrip('|')
analysis.insert(0, (self.root, '_', self.xpos(), clean_feats if clean_feats!='' else '_'))
return analysis
def process_input(text):
word_sequence = ['.']
amb_sequence = []
current_ambiguities = [Tag('.', ['sent'])]
for line in text:
if line.startswith('"'):
amb_sequence.append(current_ambiguities)
current_ambiguities = []
word = line.strip()[2:-2]
word_sequence.append(word)
elif line.strip() != '' and line.startswith('\t'):
line = list(line.strip())
i = 1
while line[i] != '"':
if line[i] == ' ':
line[i] = ROOT_DELIM
i += 1
line = ''.join(line)
line = line.split()
root = line[0][1:-1]
igs = []
if root.startswith('*'):
root = root[1:]
#igs = ['UNK']
else:
current_ig = ''
for elem in line[1:]:
if elem.startswith(('subst', 'attr', 'advl', 'ger_', 'gpr_', 'gna_', 'prc_', 'ger')):
igs.append(current_ig)
current_ig = elem
elif elem.startswith('"'):
igs.append(current_ig)
current_ig = elem
else:
current_ig += elem if current_ig == '' else IGS_DELIM+elem
igs.append(current_ig)
current_ambiguities.append(Tag(root, igs))
amb_sequence.append(current_ambiguities)
return word_sequence, amb_sequence |
15,208 | 6460fdaa934c80168b6f1ffa3a54e936464d16ef | from django.urls import path
from . import views
urlpatterns = [
path('projectcategory/', views.ProjectCategoryList.as_view(), name='projectcategory_list'),
path('projectdetails/', views.ProjectDetailsList.as_view(), name='projectdetails_list'),
path('userDetails/', views.UserDetail.as_view(), name='user-details'),
]
|
15,209 | c4b284a77bc6f1d59ac1aada1c4f4596d89a3e67 |
lines = [line.rstrip('\n ') for line in open('input.csv')]
outFile = open('input_one_line.csv', "w")
outFile.write("%s\n" % lines[0])
lines.pop(0)
dc = {}
for line in lines:
key = line.split(",")[0]
value = line.split(",")[1]
if dc.get(key) is None:
dc[key] = value
else:
dc[key] = dc[key] + (" %s" % value)
for line in lines:
key = line.split(",")[0]
if dc.get(key) is not None:
outFile.write("%s,%s\n" %(key, dc[key]))
del dc[key]
|
15,210 | 995617be8a20a829ede13e01407ccf2132ca2ce0 | #!/usr/bin/python3
#Automate Ecoli
#Ben Lorentz 2.22.19
import sys
import os
import logging
dir = os.getcwd()
cwd = dir+"/OptionA_Ben_Lorentz"
if(not os.path.exists("OptionA_Ben_Lorentz")):
os.system("mkdir OptionA_Ben_Lorentz")
os.chdir(dir+"/OptionA_Ben_Lorentz")
else:
os.chdir(dir+"/OptionA_Ben_Lorentz")
resultdict = {}
longBois = []
logging.basicConfig(level=logging.DEBUG, filename="OptionA.log")
sraAccess = "SRR8185310"
#Downloads the files form sra database
if(not os.path.isfile(str(sraAccess+".sra"))):
print("Getting Files")
logging.info("The reads were not found so we are downloading them now")
os.system("wget ftp://ftp.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/SRR818/SRR8185310/SRR8185310.sra")
#Need to turn the .sra file into .fastq so we can use spades to assemble
if(not os.path.isfile(str(sraAccess+"_1.fastq"))):
print("Turning .sra to .fastq")
logging.info("Turning .sra file into .fastq file")
os.system("fastq-dump -I --split-files "+sraAccess+".sra")
#Assemble reads using spades and typical params
if(not os.path.exists(os.getcwd()+"/spades")):
print("Running spades with standard params")
logging.info("spades -k 55,77,99,127 -t 2 --only-assembler -s "+sraAccess+"_1.fastq -o "+os.getcwd()+"/spades")
os.system("spades -k 55,77,99,127 -t 2 --only-assembler -s "+sraAccess+"_1.fastq -o "+os.getcwd()+"/spades")
#Goes into spades folder and pull contigs.fasta out
if (not os.path.isfile("contigs.fasta")):
os.chdir(cwd+"/spades")
os.system("cp contigs.fasta "+cwd+"/")
os.chdir(cwd)
if(resultdict == {}):
with open("contigs.fasta", "r") as myfile:
allseq = myfile.readlines()
#My way of parsing a fasta file into a dictionary keys are seqID and values are seqs
seq = ""
header = ""
for item in allseq:
if(item[0]==">"):
header = item[1:].strip()
seq = " "
if (item[0] != ">" and (item[0] == "A" or item[0] == "T" or item[0] == "C" or item[0] == "G")):
seq = seq + item[0:].strip()
resultdict[header]=seq
#this turns a dict into a list
resultlist = list(resultdict.items())
logging.info(str(len(resultlist)) + " contigs after alignment")
for item in resultlist:
if (len(item[1]) >= 1000):
longBois.append(item)
logging.info("There are " + str(len(longBois)) + " contigs > 1000 in the assembly.")
#Calculate the length of the assembly
assemb = 0
if(assemb == 0):
for each in longBois:
assemb = len(each[1]) + assemb
logging.info("There are " +str(assemb)+ " bp in the assembly")
#Pulls in the contigs over 1000 bp
if(not os.path.isfile("longBoiContigs.fa")):
result = ""
for item in longBois:
result = result+ ">" +item[0].strip()+ '\n'
result = result + item[1].strip() + '\n'
#Boilerplate to write to file
file = open("longBoi.fasta", "w")
file.write(str(result))
file.close()
#Runs prokka on the contigs longer than 1000 bp
if(not os.path.exists(os.getcwd()+"/prokka")):
os.system("prokka --usegenus --outdir prokka -genus Escherichia longBoi.fasta")
logging.info("prokka --usegenus --outdir prokka -genus Escherichia longBoi.fasta")
os.chdir(cwd+"/prokka")
os.system("cp PROKKA*.txt "+cwd+"/prokka.txt")
os.chdir(cwd)
with open("prokka.txt", "r") as prokka:
allLine = prokka.readlines()
for line in allLine:
logging.info(line.strip())
ecoli = {}
#Calculating diffs from the reference sequence and writes to log
if(ecoli == {}):
ecoli = {"CDS":4140, "tRNAs":89}
with open("prokka.txt", "r") as prokka:
allLine = prokka.readlines()
data = {"CDS":int(allLine[3].split(":")[1].strip()), "tRNAs": int(allLine[4].split(":")[1].strip())}
cdsDiff=ecoli["CDS"]-data["CDS"]
tRNADiff=ecoli["tRNAs"]-data["tRNAs"]
logging.info("Prokka found " +str(abs(cdsDiff))+ (" additional" if cdsDiff > 0 else " fewer") + " CDS and " +str(abs(tRNADiff))+ (" additional " if tRNADiff > 0 else " fewer ") + "tRNA than the RefSeq.")
#Pulls down sequence of ecoli k-12 and makes index and calls tophat on the reference
if(not os.path.isfile("SRR1411276.sra")):
os.system("wget ftp://ftp.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/SRR141/SRR1411276/SRR1411276.sra")
os.system("wget ftp://ftp.ncbi.nlm.nih.gov/genomes/archive/old_refseq/Bacteria/Escherichia_coli_K_12_substr__MG1655_uid57779/NC_000913.fna")
print("Turning .sra to .fastq")
logging.info("Turning .sra file into .fastq file")
os.system("fastq-dump -I --split-files SRR1411276.sra")
logging.info("fastq-dump -I --split-files SRR1411276.sra")
os.system("bowtie2-build NC_000913.fna EcoliK12")
logging.info("bowtie2-build NC_000913.fna EcoliK12")
os.system("tophat2 --no-novel-juncs -o tophat EcoliK12 SRR1411276_1.fastq")
logging.info("tophat2 --no-novel-juncs -o tophat EcoliK12 SRR1411276_1.fastq")
os.chdir("tophat")
#runs cufflinks on the output from tophat
if( not os.path.exists("cufflinks")):
logging.info("running cufflinks")
logging.info("cufflinks accepted_hits.bam -o cufflinks")
os.system("cufflinks accepted_hits.bam -o cufflinks")
os.chdir("cufflinks")
os.system("cp transcripts.gtf" + cwd)
os.chdir(cwd)
#moves the cufflinks output to the top folder
if( not os.path.isfile("transcripts.gtf")):
logging.info("Moving the transcripts.gtf file to: " + cwd)
os.chdir(cwd)
os.chdir("tophat")
os.chdir("cufflinks")
os.system("cp transcripts.gtf " + cwd)
os.chdir(cwd)
#creates the Option1.fpkm file formatted correctly
if( not os.path.isfile("Option1.fpkm")):
logging.info("Formatting the final output")
output = ""
with open("transcripts.gtf", "r") as transcript:
record = []
for line in transcript:
record.append(line)
for i in range(0,len(record)):
if(i % 2 == 0):
seqname = record[i].split('\t')[0].strip()
start = record[i].split('\t')[3].strip()
end = record[i].split('\t')[4].strip()
strand = record[i].split('\t')[6].strip()
FPKM = record[i].split('\t')[8].split(";")[2]
else:
seqname = record[i].split('\t')[0].strip()
start = record[i].split('\t')[3].strip()
end = record[i].split('\t')[4].strip()
strand = record[i].split('\t')[6].strip()
FPKM = record[i].split('\t')[8].split(";")[3]
output = output + seqname+ ',' +start+ "," +end+ ',' +strand+ "," +FPKM + '\n'
#Boilerplate to write to file
file = open("Option1.fpkm", "w")
file.write(str(output))
file.close()
print("The final file is called Option1.fpkm in the directory : " + cwd)
|
15,211 | a5ae78d9c43569237b899b4b736573079e0cb014 | from urllib.parse import urljoin
import requests
import logging
from bs4 import BeautifulSoup as Bs
from requests import HTTPError
from application.models.model import Radio
_URL = "http://radios.sapo.ao"
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 " \
"Safari/537.36"
LANGUAGE = "pt-PT,pt;q=0.8,pt-BR;q=0.7"
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
session.headers['Accept-Language'] = LANGUAGE
session.headers['Content-Language'] = LANGUAGE
class RadioUtilities(object):
@classmethod
def get_radio_soup(cls, url):
try:
html = session.get(url)
soup = Bs(html.text, "html.parser")
except HTTPError:
logging.debug("radio soup http error with code :%s", HTTPError)
except ConnectionError:
logging.debug("raddio soup connection error with code :%s", ConnectionError)
finally:
return soup
@classmethod
def get_radio_name(cls, radio_item):
radio_name = ''
names = radio_item.find_all('span')
if len(names) != 0:
for name in names:
if name.string is not None:
radio_name = name.string
else:
logging.info('error radio name not founded!')
return radio_name
@classmethod
def get_radio_url(cls, radio_item):
radio_url = ''
url_ = radio_item.find('a')
if url_ is not None:
radio_url = url_.get('href')
else:
logging.info('radio url not founded!')
return radio_url
@classmethod
def get_radio_img(cls, radio_item):
img_logo = ''
src = radio_item.find('img')
if src is not None:
img_logo = src.get('src')
else:
logging.info("url image logo not founded")
return img_logo
@classmethod
def built_radio_url(cls, url, params):
return urljoin(url, params)
@classmethod
def get_radio_stream_url(cls, radio_url):
url_stream = ''
try:
soup = cls.get_radio_soup(radio_url)
player = soup.find(class_="player")
if player is not None:
url_stream = player.find('li').find('a').get('href')
except Exception:
logging.debug("radio url stream error :%s", Exception)
finally:
return url_stream
@classmethod
def get_radio_data(cls, url):
data_radio = {}
try:
soup = cls.get_radio_soup(url)
list_radios = soup.find_all(id="radios-list")
radio_items = [li for ul in list_radios for li in ul.findAll('li')]
if len(list_radios) > 0 and len(radio_items) > 0:
for radio in radio_items:
result = dict()
result['r_name'] = cls.get_radio_name(radio)
result['img_src'] = cls.get_radio_img(radio)
result['params'] = cls.get_radio_url(radio)
result['url_radio'] = cls.built_radio_url(url, result['params'])
result['stream_link'] = cls.get_radio_stream_url(result['url_radio'])
radio_ = (Radio(result['r_name'], result['url_radio'], result['stream_link'], result['img_src']))
data_radio.update({radio_.r_name: radio_})
else:
logging.info("radio list not found")
except HTTPError:
logging.debug("radio webdata Http error. code :%s", HTTPError)
except ConnectionError:
logging.debug("radio webdata connection error. code :%s", ConnectionError)
except Exception:
logging.debug("main Error while creating radio_webdata! code:%s", Exception)
finally:
return data_radio
@classmethod
def add_radio_db(cls, radio_webdata, db):
status = False
radio_list_db = Radio.query.all()
try:
if (len(radio_webdata) != 0) and (len(radio_list_db) == 0):
db.session.add_all(list(radio_webdata.values()))
db.session.commit()
status = True
except Exception:
logging.debug("Error while adding radio info to database:%s", Exception)
finally:
db.session.close()
return status
@classmethod
def update_radio_db(cls, radio_webdata, db):
radio_db = Radio.query.all()
try:
if len(radio_webdata) != 0 and len(radio_db) != 0:
for radio_web in radio_webdata.values():
db.session.query(Radio). \
filter(Radio.r_name == radio_web.r_name). \
update({'r_name': radio_web.r_name, 'url': radio_web.url,
'stream_link': radio_web.stream_link, 'img_logo': radio_web.img_logo})
db.session.commit()
else:
logging.info("No need to update radios database!!")
except Exception:
logging.debug("unable to update radio_webdata:%s", Exception)
finally:
db.session.close()
|
15,212 | 64e5169f31e74b95c90687ab7fb8884577a51557 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
class Vector(object):
def __init__(self, list):
self.list = list
def __len__(self):
return len(self.list)
def __str__(self):
return str(self.list)
def __add__(self, vector1):
svector = []
for i in xrange(len(self)):
svector.append(self.list[i] + vector1.list[i])
return svector
def __sub__(self, vector1):
svector = []
for i in xrange(len(self)):
svector.append(self.list[i] - vector1.list[i])
return svector
def __mul__(self, other):
if isinstance(other, Vector):
return self.mulV(other)
else:
if isinstance(other, (float, int, long)):
return self.mulconst(other)
def __eq__(self, other):
if len(self) != len(other):
return False
else:
for i in xrange(len(self)):
if self.list[i] != other.list[i]:
return False
return True
def mulconst(self, const):
ans = 0
for i in xrange(len(self)):
ans += self.list[i] * const
return ans
def mulV(self, vector1):
ans = 0
for i in xrange(len(self)):
ans += self.list[i] * vector1.list[i]
return ans
def getIndex(self, index):
index -= 1
for i in xrange(len(self)):
if i == index:
return self.list[i]
return "No value for the given index"
def main():
myV = Vector([1, 3, 5])
myV2 = Vector([1, 3, 5])
a = 2
print len(myV)
print myV
print myV + myV2
print myV - myV2
print myV * myV2
print myV * 2
print myV == myV2
v = Vector([1])
print myV == v
if __name__ == "__main__":
main()
|
15,213 | 2f60afa8200f278152252247b40a8ce73ec8227d | # -*- coding: utf-8 -*-
"""
####
Main
####
Main model runner
Note: This is intended to be run from the command line
"""
import time
import os
start = time.time() # start run clock
import shutil
import logging
import yaml
from pathlib import Path
import concurrent.futures
import click
from fetch3.__version__ import __version__ as VERSION
from fetch3.utils import make_experiment_directory, load_yaml
from fetch3.initial_conditions import initial_conditions
from fetch3.met_data import prepare_met_data
from fetch3.model_config import save_calculated_params, setup_config
from fetch3.model_functions import Picard, format_model_output, save_csv, save_nc, combine_outputs
from fetch3.model_setup import spatial_discretization, temporal_discretization
from fetch3.sapflux import calc_sapflux, format_inputs
log_format = "%(levelname)s %(asctime)s %(processName)s - %(name)s - %(message)s"
logging.basicConfig(
filemode="w",
format=log_format,
level=logging.DEBUG
)
logger = logging.getLogger("fetch3")
logger.addHandler(logging.StreamHandler())
parent_path = Path(__file__).resolve().parent.parent
default_config_path = parent_path / "config_files" / "model_config.yml"
default_data_path = parent_path / "data"
default_output_path = parent_path / "output"
model_dir = Path(__file__).parent.resolve() # File path of model source code
@click.command()
@click.option(
"--config_path",
type=click.Path(exists=True, dir_okay=False, path_type=Path),
default=str(default_config_path),
help="Path to configuration YAML file",
)
@click.option(
"--data_path",
type=click.Path(exists=True, path_type=Path),
default=str(default_data_path),
help="Path to data directory",
)
@click.option(
"--output_path",
type=click.Path(exists=True, path_type=Path),
default=str(parent_path),
help="Path to output directory",
)
@click.option(
"--species",
type=str,
default=None,
help="species to run the model for"
)
def main(config_path, data_path, output_path, species):
loaded_configs = load_yaml(config_path)
# If using the default output directory, create directory if it doesn't exist
if output_path == parent_path:
output_path = default_output_path
output_path.mkdir(exist_ok=True)
# Make a new experiment directory if make_experiment_dir=True was specified in the config
# Otherwise, use the output directory for the experiment directory
mk_exp_dir = loaded_configs["model_options"].get("make_experiment_dir", False)
exp_name = loaded_configs["model_options"].get("experiment_name", "")
if mk_exp_dir:
exp_dir = make_experiment_directory(output_path, experiment_name=exp_name)
else:
exp_dir = output_path
log_path = exp_dir / "fetch3.log"
if log_path.exists():
os.remove(log_path)
fh = logging.FileHandler(log_path)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
HEADER_BAR = """
##############################################
"""
LOG_INFO = (
f"""
FETCH Run
Output Experiment Dir: {exp_dir}
Config file: {config_path}
Start Time: {time.ctime(start)}
Version: {VERSION}"""
)
logger.info(
f"\n{HEADER_BAR}"
f"\n{LOG_INFO}"
f"\n{HEADER_BAR}"
)
# Copy the config file to the output directory
copied_config_path = exp_dir / config_path.name
if not copied_config_path.exists():
shutil.copy(config_path, copied_config_path)
# Get species list
if species is None:
species_list = list(loaded_configs['species_parameters'].keys())
else:
species_list = list(species)
try:
results = []
with concurrent.futures.ProcessPoolExecutor() as executor:
species_runs = {executor.submit(run_single, species, config_path, data_path, exp_dir): species for species in species_list}
logger.info("submitted jobs!")
for future in concurrent.futures.as_completed(species_runs):
original_task = species_runs[future]
try:
results.append(future.result())
except Exception as exc:
logger.exception('%r generated an exception: %s' % (original_task, exc))
concurrent.futures.wait(species_runs)
nc_output = combine_outputs(results)
save_nc(exp_dir, nc_output)
except Exception as e:
logger.exception("Error completing Run! Reason: %r", e)
raise
finally:
logger.info(f"run time: {time.time() - start} s") # end run clock
logger.info("run complete")
def run_single(species, config_file, data_dir, output_dir):
log_path = output_dir / f"fetch3_{species}.log"
if log_path.exists():
os.remove(log_path)
fh = logging.FileHandler(log_path)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
# Log the directories being used
logger.info("Using config file: " + str(config_file))
logger.info("Using output directory: " + str(output_dir))
cfg = setup_config(config_file, species=species)
# save the calculated params to a file
save_calculated_params(str(output_dir / "calculated_params.yml"), cfg)
##########Set up spatial discretization
zind = spatial_discretization(cfg)
######prepare met data
met, tmax, start_time, end_time = prepare_met_data(cfg, data_dir, zind.z_upper)
t_num, nt = temporal_discretization(cfg, tmax)
logger.info("Total timesteps to calculate: : %d" % nt)
############## Calculate initial conditions #######################
logger.info("Calculating initial conditions ")
H_initial, Head_bottom_H = initial_conditions(cfg, met.q_rain, zind)
############## Run the model #######################
logger.info("Running the model ")
(
H,
K,
S_stomata,
theta,
S_kx,
S_kr,
C,
Kr_sink,
Capac,
S_sink,
EVsink_ts,
THETA,
infiltration,
trans_2d,
) = Picard(cfg, H_initial, Head_bottom_H, zind, met, t_num, nt, output_dir, data_dir)
############## Calculate water balance and format model outputs #######################
df_waterbal, df_EP, nc_output = format_model_output(
species,
H,
K,
S_stomata,
theta,
S_kx,
S_kr,
C,
Kr_sink,
Capac,
S_sink,
EVsink_ts,
THETA,
infiltration,
trans_2d,
cfg.dt,
start_time,
end_time,
cfg.dz,
cfg,
zind,
)
# Calculate sapflux and aboveground storage
H_above, trans_2d_tree = format_inputs(nc_output["ds_canopy"], cfg.mean_crown_area_sp)
ds_sapflux = calc_sapflux(H_above, trans_2d_tree, cfg)
nc_output["sapflux"] = ds_sapflux
####################### Save model outputs ###################################
logger.info("Saving outputs")
# save_csv(output_dir, df_waterbal, df_EP)
logger.info("Finished running species: %s", species)
return nc_output
if __name__ == "__main__":
main()
|
15,214 | a7e8b1bae0e3ab0be7c09a5e66923b0c563a0a87 | import numpy as np
import pandas as pd
import pandas_datareader as web
import statsmodels.api as sm
from statsmodels.regression.rolling import RollingOLS
from collections import OrderedDict
import streamlit as st
"""
To run from command line, install streamlit, then type:
streamlit run oil.py
"""
tickers = ["USO",
'XOM',
"BOKF",
"CADE",
"CFR",
"IBTX",
"FHN",
"CMA",
"EWBC",
"ZION"]
stocks = web.get_data_yahoo(tickers,
start = "2015-01-01",
end = "2020-03-06")
daily_returns = stocks['Adj Close'].pct_change().reset_index()
daily_returns = daily_returns.dropna()
df_out = pd.DataFrame()
for item in daily_returns.columns:
if item is not "Date" and item is not "USO" and item is not "index":
endog = daily_returns[item]
exog = sm.add_constant(daily_returns['USO'])
rols = RollingOLS(endog, exog, window=60)
rres = rols.fit()
df_rolling = rres.params
daily_returns['index'] = daily_returns.index
df_rolling['index'] = df_rolling.index
daily_betas = pd.merge(daily_returns,df_rolling,how = 'inner',left_on='index',right_on='index')
daily_betas[item] = daily_betas['USO_y']
daily_betas = daily_betas[['Date',item]]
if len(df_out) == 0:
df_out = daily_betas
else:
df_out = pd.merge(df_out,daily_betas,how='inner',left_on='Date',right_on='Date')
list_out = []
df_stats = pd.DataFrame()
for col in df_out.columns:
if "Date" not in col:
stock_return = daily_returns[col].tail(1).iloc[0]
index_return = daily_returns['USO'].tail(1).iloc[0]
stock_beta = df_out[col].tail(1).iloc[0]
alpha = float(stock_return-index_return*stock_beta)
data = [['Current 60 day beta vs USO',df_out[col].tail(1).iloc[0]],
['mean',df_out[col].mean()],
['std',df_out[col].std()],
['actual 1 day return',stock_return],
['estimated 1 day return',index_return*stock_beta],
['alpha',alpha]]
r = pd.DataFrame(data,columns=['stat',col])
list_out.append(r)
for r in list_out:
if len(df_stats) == 0:
df_stats = r
else:
df_stats = pd.merge(df_stats,r,how='inner',left_on = 'stat',right_on='stat')
import matplotlib.pyplot as plt
#%matplotlib inline
x_data = df_out['Date']
fig, ax = plt.subplots()
for column in df_out.columns:
if "Date" not in column:
ax.plot(x_data, df_out[column],label=column)
ax.set_title('Rolling Betas Vs OIL ETF ')
ax.legend()
st.write(fig)
st.write(df_stats)
|
15,215 | 76acb2de81774cf981d445641e3df0112485420a | import tensorflow as tf
#定义一个变量用于计算滑动平均,这个变量的初始值为0,注意这里手动指定了变量的类型为tf.float32,因为所有需要计算滑动平均的变量必需是实数型
v1 = tf.Variable(0,dtype=tf.float32)
#这里step变量模拟神经网络中迭代的轮数,可以用于动态控制衰减率
step = tf.Variable(0, trainable=False)
#定义一个滑动平均的类(class),初始化时给定了衰减率(0.99)和控制衰减率的变量step
ema = tf.train.ExponentialMovingAverage(0.99,step)
# -*- coding:utf-8 -*
import tensorflow as tf
# 定义一个变量用于计算滑动平均,这个变量的初始值为0,注意这里手动指定了变量的类型为tf.float32,因为所有需要计算滑动平均的变量必须是实数型
v1 = tf.Variable(0, dtype=tf.float32)
# 这里step变量模拟神经网络中迭代的轮数,可以用于动态控制衰减率
step = tf.Variable(0, trainable=False)
# 定义一个滑动平均的类(class),初始化时给定了衰减率(0.99)和控制衰减率的变量step
ema = tf.train.ExponentialMovingAverage(0.99, step)
# 定义一个更新变量滑动平均的操作,这里需要给定一个列表,每次执行这个操作时,这个列表中的变量都会被更新
maintain_averages_op = ema.apply([v1])
with tf.Session() as sess:
# 初始化所有变量
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 通过ema.average(v1)获取滑动平均之后变量的取值,在初始化之后变量v1的值和v1的滑动平均都为0
print sess.run([v1, ema.average(v1)])
# 更新变量v1的值到5
sess.run(tf.assign(v1,5))
# 更新v1的滑动平均值,衰减率为min{0.99,(1+step)/(10+step)=0.1}=0.1,所以v1的滑动平均会被更新为0.1*0+0.9*5=4.5
sess.run(maintain_averages_op)
print sess.run([v1,ema.average(v1)])
# 更新step的值为10000
sess.run(tf.assign(step, 10000))
# 更新v1的值为10
sess.run(tf.assign(v1,10))
# 更新v1的滑动平均值,衰减率为min{0.99,(1+step)/(10+step)≈0.999}=0.99,所以v1的滑动平均会被更新为0.99*4.5+0.01*10=4.5
# 约等于符号mac上的快捷键是:alt键+x
sess.run(maintain_averages_op)
print sess.run([v1,ema.average(v1)])
# 再次更新滑动平均值,得到的新滑动平均值为 0.99*4.555+0.01*10=4.60945
sess.run(maintain_averages_op)
print sess.run([v1,ema.average(v1)])
|
15,216 | 656419e3df9dd1622270cb6589e0884033c46f3e | import unittest
from munch import Munch
from mock import patch, NonCallableMagicMock
from controller.array_action.array_mediator_ds8k import DS8KArrayMediator
from controller.array_action.array_mediator_ds8k import shorten_volume_name
from controller.array_action.array_mediator_ds8k import IOPORT_STATUS_ONLINE
from pyds8k.exceptions import ClientError, ClientException, NotFound
from controller.common import settings
import controller.array_action.errors as array_errors
from controller.array_action import config
from controller.common.node_info import Initiators
class TestArrayMediatorDS8K(unittest.TestCase):
def setUp(self):
self.endpoint = ["1.2.3.4"]
self.client_mock = NonCallableMagicMock()
patcher = patch('controller.array_action.array_mediator_ds8k.RESTClient')
self.connect_mock = patcher.start()
self.addCleanup(patcher.stop)
self.connect_mock.return_value = self.client_mock
self.client_mock.get_system.return_value = Munch(
{"id": "dsk array id",
"name": "mtc032h",
"state": "online",
"release": "7.4",
"bundle": "87.51.47.0",
"MTM": "2421-961",
"sn": "75DHZ81",
"wwnn": "5005076306FFD2F0",
"cap": "440659",
"capalloc": "304361",
"capavail": "136810",
"capraw": "73282879488"
}
)
self.volume_response = Munch(
{"cap": "1073741824",
"id": "0001",
"name": "test_name",
"pool": "p0",
}
)
self.array = DS8KArrayMediator("user", "password", self.endpoint)
def test_shorten_volume_name(self):
test_prefix = "test"
test_name = "it is a very very long volume name"
full_name = test_prefix + settings.NAME_PREFIX_SEPARATOR + test_name
new_name = shorten_volume_name(full_name, test_prefix)
# new name length should be 16
self.assertEqual(len(new_name), 16)
# the volume prefix should not be changed.
self.assertTrue(new_name.startswith(test_prefix + settings.NAME_PREFIX_SEPARATOR))
def test_connect_with_incorrect_credentials(self):
self.client_mock.get_system.side_effect = \
ClientError("400", "BE7A002D")
with self.assertRaises(array_errors.CredentialsError):
DS8KArrayMediator("user", "password", self.endpoint)
def test_connect_to_unsupported_system(self):
self.client_mock.get_system.return_value = \
Munch({"bundle": "87.50.34.0"})
with self.assertRaises(array_errors.UnsupportedStorageVersionError):
DS8KArrayMediator("user", "password", self.endpoint)
def test_validate_capabilities_passed(self):
self.array.validate_supported_capabilities(
{config.CAPABILITIES_SPACEEFFICIENCY: config.CAPABILITY_THIN}
)
# nothing is raised
def test_validate_capabilities_failed(self):
with self.assertRaises(array_errors.StorageClassCapabilityNotSupported):
self.array.validate_supported_capabilities(
{config.CAPABILITIES_SPACEEFFICIENCY: "fake"}
)
def test_get_volume_with_no_context(self):
with self.assertRaises(array_errors.VolumeNotFoundError):
self.array.get_volume("fake_name")
def test_get_volume_with_pool_context(self):
self.client_mock.get_volumes_by_pool.return_value = [
self.volume_response,
]
vol = self.array.get_volume(
self.volume_response.name,
volume_context={
config.CONTEXT_POOL: self.volume_response.pool
}
)
self.assertEqual(vol.volume_name, self.volume_response.name)
def test_get_volume_with_long_name(self):
volume_name = "it is a very long name, more than 16 characters"
short_name = shorten_volume_name(volume_name, "")
volume_res = self.volume_response
volume_res.name = short_name
self.client_mock.get_volumes_by_pool.return_value = [volume_res, ]
vol = self.array.get_volume(
volume_name,
volume_context={
config.CONTEXT_POOL: self.volume_response.pool
}
)
self.assertEqual(vol.volume_name, short_name)
def test_get_volume_with_pool_context_not_found(self):
self.client_mock.get_volumes_by_pool.return_value = [
self.volume_response,
]
with self.assertRaises(array_errors.VolumeNotFoundError):
self.array.get_volume(
"fake_name",
volume_context={
config.CONTEXT_POOL: self.volume_response.pool
}
)
def test_create_volume_with_default_capabilities_succeeded(self):
self._test_create_volume_with_capabilities_succeeded(False)
def test_create_volume_with_thin_capabilities_succeeded(self):
self._test_create_volume_with_capabilities_succeeded(True)
def _test_create_volume_with_capabilities_succeeded(self, is_thin):
self.client_mock.create_volume.return_value = self.volume_response
self.client_mock.get_volume.return_value = self.volume_response
name = self.volume_response.name
size_in_bytes = self.volume_response.cap
if is_thin:
capabilities = {
config.CAPABILITIES_SPACEEFFICIENCY: config.CAPABILITY_THIN
}
tp = 'ese'
else:
capabilities = {}
tp = 'none'
pool_id = self.volume_response.pool
vol = self.array.create_volume(
name, size_in_bytes, capabilities, pool_id,
)
self.client_mock.create_volume.assert_called_once_with(
pool_id=pool_id,
capacity_in_bytes=self.volume_response.cap,
tp=tp,
name='test_name',
)
self.assertEqual(vol.volume_name, self.volume_response.name)
def test_create_volume_return_existing(self):
self.client_mock.get_volumes_by_pool.return_value = [
self.volume_response,
]
pool_id = self.volume_response.pool
vol = self.array.create_volume(
self.volume_response.name, "1", {}, pool_id,
)
self.assertEqual(vol.volume_name, self.volume_response.name)
def test_create_volume_with_long_name_succeeded(self):
volume_name = "it is a very long name, more than 16 characters"
short_name = shorten_volume_name(volume_name, "")
volume_res = self.volume_response
volume_res.name = short_name
self.client_mock.create_volume.return_value = volume_res
self.client_mock.get_volume.return_value = volume_res
size_in_bytes = volume_res.cap
capabilities = {}
tp = 'none'
pool_id = volume_res.pool
vol = self.array.create_volume(
volume_name, size_in_bytes, capabilities, pool_id,
)
self.client_mock.create_volume.assert_called_once_with(
pool_id=pool_id,
capacity_in_bytes=self.volume_response.cap,
tp=tp,
name=short_name,
)
self.assertEqual(vol.volume_name, short_name)
def test_create_volume_failed_with_ClientException(self):
self.client_mock.create_volume.side_effect = ClientException("500")
with self.assertRaises(array_errors.VolumeCreationError):
self.array.create_volume("fake_name", 1, {}, "fake_pool")
def test_create_volume_failed_with_pool_not_found(self):
self.client_mock.create_volume.side_effect = NotFound("404", message="BE7A0001")
with self.assertRaises(array_errors.PoolDoesNotExist):
self.array.create_volume("fake_name", 1, {}, "fake_pool")
def test_delete_volume(self):
scsi_id = "6005076306FFD3010000000000000001"
self.array.delete_volume(scsi_id)
self.client_mock.delete_volume.assert_called_once_with(volume_id=scsi_id[-4:])
def test_delete_volume_failed_with_ClientException(self):
self.client_mock.delete_volume.side_effect = ClientException("500")
with self.assertRaises(array_errors.VolumeDeletionError):
self.array.delete_volume("fake_name")
def test_delete_volume_failed_with_NotFound(self):
self.client_mock.delete_volume.side_effect = NotFound("404")
with self.assertRaises(array_errors.VolumeNotFoundError):
self.array.delete_volume("fake_name")
def test_get_volume_mappings_failed_with_ClientException(self):
self.client_mock.get_hosts.side_effect = ClientException("500")
with self.assertRaises(ClientException):
self.array.get_volume_mappings("fake_name")
def test_get_volume_mappings_found_nothing(self):
volume_id = "0001"
scsi_id = "6005076306FFD301000000000000{}".format(volume_id)
self.client_mock.get_hosts.return_value = [
Munch({
"mappings_briefs": [{
"volume_id": "0000",
"lunid": "1",
}]
})
]
self.assertDictEqual(self.array.get_volume_mappings(scsi_id), {})
def test_get_volume_mappings(self):
volume_id = "0001"
lunid = "1"
host_name = "test_host"
scsi_id = "6005076306FFD301000000000000{}".format(volume_id)
self.client_mock.get_hosts.return_value = [
Munch({
"mappings_briefs": [{
"volume_id": volume_id,
"lunid": lunid,
}],
"name": host_name,
})
]
self.assertDictEqual(self.array.get_volume_mappings(scsi_id), {host_name: int(lunid)})
def test_map_volume_host_not_found(self):
self.client_mock.map_volume_to_host.side_effect = NotFound("404")
with self.assertRaises(array_errors.HostNotFoundError):
self.array.map_volume("fake_name", "fake_host")
def test_map_volume_volume_not_found(self):
self.client_mock.map_volume_to_host.side_effect = ClientException("500", "[BE586015]")
with self.assertRaises(array_errors.VolumeNotFoundError):
self.array.map_volume("fake_name", "fake_host")
def test_map_volume_failed_with_ClientException(self):
self.client_mock.map_volume_to_host.side_effect = ClientException("500")
with self.assertRaises(array_errors.MappingError):
self.array.map_volume("fake_name", "fake_host")
def test_map_volume(self):
scsi_id = "6005076306FFD3010000000000000001"
host_name = "test_name"
self.client_mock.map_volume_to_host.return_value = Munch({"lunid": "01"})
lun = self.array.map_volume(scsi_id, host_name)
self.assertEqual(lun, 1)
self.client_mock.map_volume_to_host.assert_called_once_with(host_name, scsi_id[-4:])
def test_unmap_volume_host_not_found(self):
self.client_mock.get_host_mappings.side_effect = NotFound("404")
with self.assertRaises(array_errors.HostNotFoundError):
self.array.unmap_volume("fake_name", "fake_host")
def test_unmap_volume_volume_not_found(self):
self.client_mock.get_host_mappings.return_value = []
with self.assertRaises(array_errors.VolumeNotFoundError):
self.array.unmap_volume("fake_name", "fake_host")
def test_unmap_volume_failed_with_ClientException(self):
volume_id = "0001"
lunid = "1"
host_name = "test_host"
scsi_id = "6005076306FFD301000000000000{}".format(volume_id)
self.client_mock.get_host_mappings.return_value = [
Munch({
"volume": volume_id,
"lunid": lunid
})
]
self.client_mock.unmap_volume_from_host.side_effect = ClientException("500")
with self.assertRaises(array_errors.UnMappingError):
self.array.unmap_volume(scsi_id, host_name)
def test_unmap_volume(self):
volume_id = "0001"
lunid = "1"
host_name = "test_host"
scsi_id = "6005076306FFD301000000000000{}".format(volume_id)
self.client_mock.get_host_mappings.return_value = [
Munch({
"volume": volume_id,
"lunid": lunid
})
]
self.array.unmap_volume(scsi_id, host_name)
self.client_mock.unmap_volume_from_host.assert_called_once_with(host_name=host_name, lunid=lunid)
def test_get_array_fc_wwns_failed_with_ClientException(self):
self.client_mock.get_fcports.side_effect = ClientException("500")
with self.assertRaises(ClientException):
self.array.get_array_fc_wwns()
def test_get_array_fc_wwns_skip_offline_port(self):
wwpn1 = "fake_wwpn"
wwpn2 = "offine_wwpn"
self.client_mock.get_fcports.return_value = [
Munch({
"wwpn": wwpn1,
"state": IOPORT_STATUS_ONLINE,
}),
Munch({
"wwpn": wwpn2,
"state": "offline",
}),
]
self.assertListEqual(self.array.get_array_fc_wwns(), [wwpn1])
def test_get_array_fc_wwns(self):
wwpn = "fake_wwpn"
self.client_mock.get_fcports.return_value = [
Munch({
"wwpn": wwpn,
"state": IOPORT_STATUS_ONLINE,
})
]
self.assertListEqual(self.array.get_array_fc_wwns(), [wwpn])
def test_get_host_by_identifiers(self):
host_name = "test_host"
wwpn1 = "wwpn1"
wwpn2 = "wwpn2"
self.client_mock.get_hosts.return_value = [
Munch({
"name": host_name,
"host_ports_briefs": [{"wwpn": wwpn1}, {"wwpn": wwpn2}]
})
]
host, connectivity_type = self.array.get_host_by_host_identifiers(
Initiators('', [wwpn1, wwpn2])
)
self.assertEqual(host, host_name)
self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_type)
def test_get_host_by_identifiers_partial_match(self):
host_name = "test_host"
wwpn1 = "wwpn1"
wwpn2 = "wwpn2"
self.client_mock.get_hosts.return_value = [
Munch({
"name": host_name,
"host_ports_briefs": [{"wwpn": wwpn1}, {"wwpn": wwpn2}]
})
]
host, connectivity_type = self.array.get_host_by_host_identifiers(
Initiators('', [wwpn1, "another_wwpn"])
)
self.assertEqual(host, host_name)
self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_type)
def test_get_host_by_identifiers_not_found(self):
host_name = "test_host"
wwpn1 = "wwpn1"
wwpn2 = "wwpn2"
self.client_mock.get_hosts.return_value = [
Munch({
"name": host_name,
"host_ports_briefs": [{"wwpn": wwpn1}, {"wwpn": wwpn2}]
})
]
with self.assertRaises(array_errors.HostNotFoundError):
self.array.get_host_by_host_identifiers(
Initiators('', ["new_wwpn", "another_wwpn"])
)
|
15,217 | 0dff14935f71bda4c7793951ccdcc7f07f3b72ab | import arcpy,os,math
inputFC = arcpy.GetParameter(0)
inputCenter = arcpy.GetParameter(1)
outputFC = arcpy.GetParameterAsText(2)
path=os.path.dirname(outputFC)
outputFC=os.path.basename(outputFC)
#path=r"D:\Benutzer\issh1011\Documents\ArcGIS\Default.gdb"
#arcpy.env.workspace = path
with arcpy.da.SearchCursor(inputCenter,"SHAPE@") as centercursor:
for row in centercursor:
xc = row[0][0].X
yc = row[0][0].Y
arcpy.env.overwriteOutput = True
sr = arcpy.Describe(inputFC).spatialReference
arcpy.CreateFeatureclass_management(path,outputFC,'POINT',template=inputFC,spatial_reference=sr)
arcpy.AddField_management(outputFC, "Distance",field_type="DOUBLE")
arcpy.AddField_management(outputFC, "Angle",field_type="DOUBLE")
def quarter(dx,dy):
if dx>0 and dy>0:
return 1
elif dx<0 and dy>0:
return 2
elif dx<0 and dy<0:
return 3
else:
return 4
def calculate(x,y):
dx=x-xc
dy=y-yc
distance=math.sqrt(dx**2+dy**2)
angle=math.atan(dy/dx)
r=quarter(dx,dy)
if r==2 or r==3:
angle+=math.pi
elif r==4:
angle+=math.pi*2
angle=angle/math.pi*180
#print r, angle
return angle,distance
outputcursor=arcpy.da.InsertCursor(outputFC,["SHAPE@","Angle","Distance"])
with arcpy.da.SearchCursor(inputFC,"SHAPE@") as cursor:
for row in cursor:
x=row[0][0].X
y=row[0][0].Y
a,d=calculate(x,y)
print a,d
p=arcpy.Point(x,y)
list=[p,a,d]
outputcursor.insertRow((p,a,d))
#outputcursor.insertRow()
#outputcursor.insertRow()
print "Successfully completed"
|
15,218 | 4597b0d2e3499f4a3910b5ca35805d7b85d13e15 | # Créez une liste "x" de 4 tuples de forme (x, y)
listx=[("a","b","c","d")]
print(listx)
listx=[("a","b","c","d"),"a"]
print(listx)
listx=[("a","b","c","d"),"a"]
listx.insert(2,"b")
print(listx)
listy=[1, 2, 3]
listx.extend(listy)
print(listx)
listx.insert(4,2)
print(listx)
del listx[4]
print(listx)
print(listy)
listz=listy[:]
print(listz)
del listy [0]
del listy [1]
del listy [0]
print(listy)
del listz
|
15,219 | ccbf5946bf6b2fc437a7294686efe7e25456988a | #!/usr/bin/env python
"""
'multimutect.py', by Sean Soderman
Parallelizer for MuTect.
"""
from itertools import izip
from synchrom import Synchrom
from time import time
import argparse
import multiprocessing
import os
import re
import subprocess
import sys
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError as I:
sys.stderr.write("Please import the required modules: {}\n".format(I))
"""
Diagnostic function for the program.
Tests whether everything is working as it should, utilising prints.
Takes a Synchrom object and returns None.
"""
def diagnostic(synchrom):
for item in synchrom.commands:
print('My command is {}'.format(item))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MuTect parallelizer')
#Create a group for both the file of bamfiles and cmd line
#list of bamfiles.
file_group = parser.add_mutually_exclusive_group(required=True)
file_group.add_argument('-b', '--bamlistfile', type=str,
help='File containing tumor:normal pairs for MuTect')
file_group.add_argument('-p', '--pairs', type=str, nargs='*',
help=('List of arguments specifying tumor:normal'
' filename pairs.'))
parser.add_argument('-f', '--fasta', type=str,
help='FASTA formatted reference sequence',
required=True)
#Group for either conf file for MuTect specific args or
#command line for them.
cmd_group = parser.add_mutually_exclusive_group(required=False)
parser.add_argument('-m', '--mupath', type=str, default='mutect.jar',
help=('The path to the MuTect jar file.'
' Looks for a file named mutect.jar'
' in the current working directory'
' by default'))
cmd_group.add_argument('-M', '--mutectopts', type=str, default='',
help='Extra parameters specific to MuTect')
cmd_group.add_argument('-c', '--conf', type=str, default='',
help=('File containing extra parameters'
' specific to MuTect'))
parser.add_argument('-i', '--inputdir', type=str, default=os.getcwd(),
help=('The name of the directory the input files'
' are located. Default: working directory.'))
parser.add_argument('-o', '--outputdir', type=str, default='output',
help=('The name of the directory the output should go'
' to. Default: a directory called "output"'))
parser.add_argument('--numthreads', type=int,
default=multiprocessing.cpu_count() // 4,
help=('The number of threads that will fork mutect'
' processes. Default: The # of cores on your'
' computer / 4, rounded down.'))
parser.add_argument('--mem', type=int, default=3,
help=('The max amount of memory each forked MuTect'
' process can allocate on the Java heap'
' Default: 2'))
parser.add_argument('--process_whole_bam', action='store_true',
help=('Process the entire BAM file at once instead '
'of single chromosomes at a time'))
parser.add_argument('--statistics', type=str,
help=('Report statistics on execution time and '
' threads used.'))
args = parser.parse_args()
if not os.path.exists(args.mupath):
sys.stderr.write('Error: path to {} does not exist. cwd: {}\n'
.format(args.mupath, os.getcwd()))
sys.exit(1)
#Create the threads and the parent output directory.
numthreads = args.numthreads
#Mini function: execute the command, surround in try except.
def procfun(dtuple):
tid, cmd = dtuple
try:
cmdlist = cmd.split()
val = subprocess.check_output(cmdlist)
print('tid: {}, the cmd is: {}'.format(tid, cmd))
except subprocess.CalledProcessError as cpe:
errfilepath = ''
if not os.path.exists('errors'):
try:
os.mkdir('errors')
except OSError as O:
sys.stderr.write("Couldn't make directory 'errors':{}{}".
format(O, os.linesep))
errfilepath = os.path.join('errors',
'thread{}.err'.format(tid))
#Log error to a file rather than write to stderr.
with open(errfilepath, 'w') as errfile:
errfile.write(('I crashed with the command line:{}'
' {}.{} You may need to use the default '
' option for individual'
' chromosome processing instead, '
' or a command line '
' accomodating more '
' memory for the Java heap.'
' The specific problem was {}\n'
).format(os.linesep, cmd, os.linesep, cpe))
return 'Thread {} executed unsuccessfully'.format(tid)
return 'Thread {} executed successfully'.format(tid)
#Mini function #2: Generator function for infinite numeric sequence.
def infinigen():
i = 0
while True:
yield i
i += 1
synchrom = Synchrom(args)
infinity = infinigen()
start_time = 0
end_time = 0
with ThreadPoolExecutor(max_workers=numthreads) as threader:
results = threader.map(procfun, izip(infinity, synchrom.commands))
start_time = time()
for i in results:
print i
end_time = time()
if args.statistics is not None:
statfile = args.statistics
bam_gigs = 0
cpu_cores = multiprocessing.cpu_count()
#Gather data for initial run.
if not os.path.exists(statfile):
bams = []
if args.bamlistfile is not None:
with open(args.bamlistfile, 'r') as blf:
bams = [re.split('\s+', b.strip()) for b in blf
if re.search('.*bam', b)]
else:
bams = [b.split(':') for b in args.pairs]
#Flatten the list and remove empty strings.
bams = [i for pair in bams for i in pair if i != '']
#Prepend input directory name to each bam filename in the list.
bams = [os.path.join(args.inputdir, b) for b in bams]
#Attain the size (in bytes) of the processed BAM data.
bam_gigs = sum([os.stat(b).st_size for b in bams])
#stats.txt is opened in append mode, as it will take mult.
#runs to get data for thread performance.
with open(statfile, 'a') as filestats:
#Initialize the file if it is of size zero.
if os.stat(statfile).st_size == 0:
filestats.write('CPU cores: {}\n'.format(cpu_cores))
filestats.write('Total BAM data processed: {}\n'
.format(bam_gigs))
filestats.write('Threads\tTime\n')
thread_and_time = '{}\t{}\n'.format(numthreads, end_time - start_time)
filestats.write(thread_and_time)
|
15,220 | b7d431ae6399d30dbc9f605546dde44d23a99393 | from twisted.trial import unittest
from tests.utils import make_sydent
class StartupTestCase(unittest.TestCase):
"""Test that sydent started up correctly"""
def test_start(self):
sydent = make_sydent()
sydent.run()
|
15,221 | af0f541201a2a5921f98982cf7d4d92321110626 | # model settings
# retinanet_obb_r50_fpn_2x.py
model = dict(
type='R3Det',
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
rbbox_head=dict(
type='R3Det_Head',
num_classes=16,
in_channels=256,
stacked_convs=4,
feat_channels=256,
octave_base_scale=4,
scales_per_octave=3,
anchor_scales = [2 ** 0, 2 ** (2.0 / 3.0)], # originally 2**(1.0/3.0) included
anchor_ratios=[1, 1 / 2, 2., 1 / 3., 3.], # orignally 5. ,1/5. also included
# anchor_strides=[8, 16, 32, 64, 128],
anchor_angles = [-90, -60, -30], # originally range(-90,0,15)
target_means=[.0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0],
num_refine=2,
with_module=False,
vis_score= 0.4,
filter_score= 0.05,
nms_iou_th= 0.1,
nms_pre = 1000,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)
))
# faster_rcnn_RoITrans_r50_fpn_1x_dota.py
train_cfg = dict(
assigner=dict(
type='MaxIoUAssignerRbbox',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
refine_iou_p_th= [0.5, 0.6],
refine_iou_n_th= [0.4, 0.5],
base_size_list= [32, 64, 128, 256, 512],
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr = 0.05,
nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1),
max_per_img = 2000),
dataset_type = 'DOTADataset'
data_root = 'data/dota1_1024/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=1,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',
img_prefix=data_root + 'trainval1024/images/',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',
img_prefix=data_root + 'trainval1024/images',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'test1024/DOTA_test1024.json',
img_prefix=data_root + 'test1024/images',
# ann_file=data_root + 'test1024_ms/DOTA_test1024_ms.json',
# img_prefix=data_root + 'test1024_ms/images',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
# R3det
optimizer = dict(type='SGD', lr=5e-4, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=12)
# yapf:disable
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/r3det'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
15,222 | 6dfdd776ab800727eaa4af00ddcc810e9704c8fa | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
class Operation():
def __init__(self, input_nodes=[]):
self.input_nodes = input_nodes
self.output_nodes = []
# for every node in the input, we want to append the particular
# operation to the list of output nodes
for node in input_nodes: # for every node from which the "self" receives an input
node.output_nodes.append(self) # add itself as a output node to that input node
_default_graph.operations.append(self)
def compute(self):
pass
class add(Operation):
def __init__(self, x, y):
super().__init__([x, y]) # [x, y] is a list of input nodes
def compute(self, x_var, y_var):
self.inputs = [x_var, y_var]
return x_var + y_var
class multiply(Operation):
def __init__(self, x, y):
super().__init__([x, y]) # [x, y] is a list of input nodes
def compute(self, x_var, y_var):
self.inputs = [x_var, y_var]
return x_var * y_var
class matrix_multiply(Operation):
def __init__(self, x, y):
super().__init__([x, y]) # [x, y] is a list of input nodes
def compute(self, x_var, y_var):
self.inputs = [x_var, y_var]
return x_var.dot(y_var)
class Placeholder():
def __init__(self):
self.output_nodes = []
_default_graph.placeholders.append(self)
class Variable():
def __init__(self, initial_value=None):
self.value = initial_value
self.output_nodes = []
_default_graph.variables.append(self)
class Graph():
def __init__(self):
self.operations = []
self.placeholders = []
self.variables = []
def set_as_default(self):
global _default_graph
_default_graph = self
def traverse_postorder(operation):
"""Makes sure computations are done in the correct order.
e.g. z = Ax + b -> Ax is done first, and then Ax + b
"""
nodes_postorder = []
def recurse(node):
if isinstance(node, Operation):
for input_node in node.input_nodes:
recurse(input_node)
nodes_postorder.append(node)
recurse(operation)
return nodes_postorder
class Session():
"""The feed dictionary is a dictionary mapping placeholders to input values."""
def run(self, operation, feed_dict={}):
nodes_postorder = traverse_postorder(operation)
for node in nodes_postorder:
print(type(node))
if type(node) == Placeholder:
node.output = feed_dict[node]
elif type(node) == Variable:
node.output = node.value
else:
print(node.input_nodes)
node.inputs = [input_node.output for input_node in node.input_nodes]
node.output = node.compute(*node.inputs)
if type(node.output) == list:
node.output = np.array(node.ouput)
return operation.output
def sigmoid(z):
return 1 / (1 + np.exp(-z))
class Sigmoid(Operation):
def __init__(self, z):
super().__init__([z])
def compute(self, z_val):
return 1 / (1 + np.exp(-z_val))
if __name__ == '__main__':
#z = Ax + b
# Create graph and set global default
g = Graph()
g.set_as_default()
# Set variables
A = Variable(10)
b = Variable(1)
x = Placeholder()
y = multiply(A, x)
# compute z
z = add(y, b)
# Use PostOrder Tree Traversal to execute nodes in right order.
sess = Session()
result = sess.run(operation=z, feed_dict={x: 10})
# sample_z = np.linspace(-10, 10, 100)
#sample_a = sigmoid(sample_z)
#plt.plot(sample_z, sample_a)
# data = tuplue of values and labels
data = make_blobs(n_samples=50, n_features=2, centers=2, random_state=75)
features = data[0]
labels = data[1]
plt.scatter(features[:, 0], features[:, 1])
# Separate blobs
#np.array([1, 1]).dot(np.array([8], [10])) - 5 # result shows it belongs to one class
#np.array([1, 1]).dot(np.array([2], [-10])) - 5 # result shows it belongs to the other class
# Creating a simple neural net for seperating classes
g = Graph()
g.set_as_default()
x = Placeholder()
w = Variable([1,1])
b = Variable(-5)
z = add(matrix_multiply(w, x), b)
a = Sigmoid(z)
sess = Session()
sess.run(operation=a, feed_dict=x: [8, 10]})
#
|
15,223 | 9dea205696cdc1804c554cb67533b73ceb7242bf | """Example of using hangups to send hangouts notifications to the Ergodox"""
import asyncio
import hangups
import serial
from libs.ergodox_infinity_display import ErgodoxInterface
# Path where OAuth refresh token is saved, allowing hangups to remember your
# credentials.
REFRESH_TOKEN_PATH = 'refresh_token.txt'
def main():
"""Main entry point."""
init_screen()
# Obtain hangups authentication cookies, prompting for username and
# password from standard in if necessary.
cookies = hangups.auth.get_auth_stdin(REFRESH_TOKEN_PATH)
# Instantiate hangups Client instance.
client = hangups.Client(cookies)
# Get the user self info
loop = asyncio.get_event_loop()
self_id = loop.run_until_complete(get_self_info(client))
# Add an observer to the on_connect event to run the send_message coroutine
# when hangups has finished connecting.
client.on_state_update.add_observer(lambda event: asyncio.async(get_event(event, client, self_id)))
# Start an asyncio event loop by running Client.connect. This will not
# return until Client.disconnect is called, or hangups becomes
# disconnected.
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(client.connect())
except KeyboardInterrupt:
client.disconnect()
@asyncio.coroutine
def get_self_info(client):
''' Get the self info for the currently authenticated user. '''
client.connect()
self_info_request = hangups.hangouts_pb2.GetSelfInfoRequest(
request_header=client.get_request_header(),
)
response = yield from client.get_self_info(
self_info_request
)
client.disconnect()
self_id = response.self_entity.id.chat_id
return self_id
@asyncio.coroutine
def get_event(event, client, self_id):
''' Process a hangups 'on_state_update' message and update the ergodox screen. '''
try:
process_event(event, self_id)
except:
# Disconnect the hangups Client to make client.connect return.
yield from client.disconnect()
def init_screen():
''' Clears the screen and turns it blue '''
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=0.5)
ser.close()
ser.open()
dox = ErgodoxInterface(ser)
dox.lcd_hex_color(0x00000C)
dox.clear()
ser.close()
def process_event(event, self_id):
''' Process a hangups event as nevecessary. '''
# Open the serial connection to the ergodox
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=0.5)
ser.close()
ser.open()
dox = ErgodoxInterface(ser)
# Determine notification type
notification_type = event.WhichOneof('state_update')
print("Recieved notification of type {}".format(notification_type))
if notification_type == 'event_notification':
# This is our message type
if event.HasField('conversation'):
# When we recieve a message
if event.event_notification.event.sender_id.chat_id != self_id:
sender = "Unknown"
for part in event.conversation.participant_data:
if part.id.chat_id != self_id:
sender = part.fallback_name
break
print("Message with {}() with message id {}".format(sender,
event.event_notification.event.sender_id.chat_id,
event.conversation.conversation_id.id))
print("Content: {}".format(event.event_notification.event.chat_message.message_content.segment[0].text))
# Clear the screen and write the sender name to the top left
dox.clear()
dox.lcd.format_string(sender, 0, 24)
# Color foosball messages a different color
if 'foos' in event.event_notification.event.chat_message.message_content.segment[0].text:
dox.lcd.format_string('FOOSBALL REQUEST!!!', 0, 16)
dox.lcd_hex_color(0x0C0C00)
else:
dox.lcd_hex_color(0x0C0000)
dox.send()
# When we send a message
else:
init_screen()
print("Message successfully sent.")
# Message read notification
elif notification_type == 'watermark_notification':
# Currently only care about messages we read
if event.watermark_notification.sender_id.chat_id == self_id:
print("Conversation {} was read by you".format(event.watermark_notification.conversation_id.id))
init_screen()
# Close our serial connection
ser.close()
class ChatEvent(object):
def __init__(self, event_type, chat_id):
self.event_type = event_type
self.chat_id = chat_id
if __name__ == '__main__':
main()
|
15,224 | 2f38fbc4fb46ad1623ed255281ad128354ae259e |
from django.conf.urls import url, include
from django.contrib import admin
from foodtaskerapp import views
from django.contrib.auth import views as auth_views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
#resturant
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^resturant/sign-in/$', auth_views.login,
{'template_name':'resturant/sign_in.html'}, name='resturant-sign-in'),
url(r'^resturant/sign-out', auth_views.logout,
{'next_page':'/'}, name='resturant-sign-out'),
url(r'^resturant/$', views.resturant_home, name='resturant-home'),
url(r'^resturant/sign-up', views.resturant_sign_up, name='resturant-sign-up'),
url(r'resturant/account/$', views.resturant_account, name='resturant-account'),
url(r'resturant/meal/$', views.resturant_meal, name='resturant-meal'),
url(r'resturant/meal/add/$', views.resturant_add_meal, name='resturant-add-meal'),
url(r'resturant/meal/edit/(?P<meal_id>\d+)/$', views.resturant_edit_meal, name='resturant-edit-meal'),
url(r'resturant/order/$', views.resturant_order, name='resturant-order'),
url(r'resturant/report/$', views.resturant_report, name='resturant-report'),
#sign in / sign up / sign out
#(r'^api/social/', include('rest_framework_social_oauth2.urls')),
#/convert-token (sign in / sign up)
#/remove-token (sign out)
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
15,225 | c38c5d12ea21f8e342c2de7c31337cec49a758f6 | from django import forms
from salesforce.SalesforceClient import SalesforceClient
class ReportForm(forms.Form):
sf_id = forms.CharField()
def clean(self):
cleaned_data = super(ReportForm, self).clean()
sf_id = cleaned_data.get('sf_id')
sfc = SalesforceClient()
try:
sf_return = sfc.sf.Account.get(sf_id)
except:
raise forms.ValidationError('No matching Salesforce ID')
|
15,226 | c3f29c51fbb4ece54c0426c10d61c33798c522b6 | """
Module to read configuration files
"""
from configobj import ConfigObj, ConfigObjError, ParseError
from .config import (
HOLLANDCFG,
BaseConfig,
ConfigError,
load_backupset_config,
setup_config,
)
__all__ = ["HOLLANDCFG", "setup_config", "load_backupset_config", "BaseConfig"]
|
15,227 | efd2415b8ce639f6e01e51f71cf22b509b26f46f | import errno
import os
import shutil
import sys
def make_sure_dir_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def _config_template():
return os.path.join(sys.path[0], "darkwallet.cfg")
def make_sure_file_exists(filename):
if not os.path.isfile(filename):
print("Initializing new darkwallet.cfg.")
shutil.copyfile(_config_template(), filename)
def list_files(path):
return [filename for filename in os.listdir(path)
if os.path.isfile(os.path.join(path, filename))]
|
15,228 | ec51b1246e5ee4d9e8d5e0cf4413c286718869a7 | #!/usr/bin/python
__author__ = "Bassim Aly"
__EMAIL__ = "basim.alyy@gmail.com"
import subprocess
print(subprocess.Popen("ifconfig"))
# You can re-write the code and use the list
import subprocess
print(subprocess.Popen(["ifconfig"]))
# using a list, you can bypass additional args to the command
import subprocess
print(subprocess.Popen(["sudo", "ifconfig", "enp60s0:0", "10.10.10.2", "netmask", "255.255.255.0", "up"]))
# using shell=True to spawn the process from string
import subprocess
print(subprocess.Popen("sudo ifconfig enp60s0:0 10.10.10.2 netmask 255.255.255.0 up", shell=True))
# using the cwd
import subprocess
print(subprocess.Popen(["cat", "interfaces"], cwd="/etc/network"))
# using the Pipes
##Output
import subprocess
p = subprocess.Popen(["ping", "8.8.8.8", "-c", "3"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
print("""==========The Standard Output is==========
{}""".format(stdout))
print("""==========The Standard Error is==========
{}""".format(stderr))
##Input
# while True:
import subprocess
p = subprocess.Popen(["grep", "subprocess"], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate(
input=b"welcome to subprocess module\nthis line is a new line and doesnot contain the require string")
print("""==========The Standard Output is==========
{}""".format(stdout))
print("""==========The Standard Error is==========
{}""".format(stderr))
subprocess.call()
|
15,229 | 579dd47402991ee2f70e9e30d06024113dcc966f | def cutTheSticks(ar):
while(max(ar)!=min(ar)):
print(len(ar))
x=min(ar)
for i in range(len(ar)):
ar[i]=ar[i]-x
while (0 in ar):
ar.remove(0)
print(len(ar))
try:
n=int(input())
ar=list(map(int,input().split()))
except:
print("ERROR! Enter numeric input only!")
quit()
cutTheSticks(ar)
|
15,230 | c69e53558a98c9cd45125b1e64a255fcbeda9f7e | # program to find square of the number
N = eval(input("enter the limit"))
if 1 <= N and N <= 20:
for i in range(N):
print(i * i)
|
15,231 | dd3b389c63929b37b7152473e1c8d084b2fc2d04 | from django.contrib import admin
from .models import Movie, Showing, Theatre
# Register your models here.
admin.site.register(Movie)
admin.site.register(Theatre)
admin.site.register(Showing)
|
15,232 | 40fa273c5a82bab805d38a05723bea382d4c7462 | strings = input().split()
result = ""
for string in strings:
result += string * len(string)
print(result)
|
15,233 | fe9dc8c887b05a87d851743e42fd77ce975fb5e1 | # Lists - lists are "mutable"
friends = ['Amila','Samith','Daniel']
# Add new friend to the list.
friends.append("Zusan")
print(friends)
# Count number of times an element appears
count = friends.count("Amila")
print(count)
# Number of elements in the list.
ln = len(friends)
print(ln)
|
15,234 | 9452d5de7d0a81c9edcf24054031db1ed7d985e0 | #-*- coding:utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import copy
import pprint
from crawl import Crawl
from wrapdb import Db
from names import names
# dados da sabesp estao disponiveis desde 2003
# TODO
"""
fazer isso rodar mais rapido
"""
mainData = {names.year: None, names.month: None, names.day: None}
URL = "http://www2.sabesp.com.br/mananciais/DivulgacaoSiteSabesp.aspx"
date_str = '%Y-%m-%d'
start_date = datetime(2003, 1, 1)
def fillDict(valDict, nowDate=datetime.now()):
"""
retorna dicionario com os valeres preenchidos com a respectiva nowDate
"""
copyDict = copy.deepcopy(valDict)
copyDict[names.year] = nowDate.year
copyDict[names.month] = nowDate.month
copyDict[names.day] = nowDate.day
return copyDict
class Slave(object):
def __init__(self):
self.crawler = Crawl(URL)
self.db = Db()
def get_one(self, date):
"""
start should be datetime.datetime instance
start+ (2003, 1, 1)
end less then today
save the data on db/start.json
"""
now = datetime.now()
now = datetime(now.year, now.month, now.day)
assert isinstance(date, datetime), 'start need to be datetime instance'
assert date < now, 'date need to be less or equal than yesterday'
assert date >= start_date, 'no data before \"2003-01-01\"'
strftime = datetime.strftime
self.db.DBFILE = strftime(date, date_str)
data = self.work(date)
self.db.save_iter([self.work(date)])
def get_between(self, start, end):
"""
start and end should be datetime.datetime instance
start+ (2003, 1, 1)
end less then today
save the data on db/start+end.json
"""
now = datetime.now()
now = datetime(now.year, now.month, now.day)
assert isinstance(start, datetime), 'start need to be datetime instance'
assert isinstance(end, datetime), 'end need to be datetime instance'
assert start < end, 'start need to be less than end'
assert end < now, 'end need to be less or equal than yesterday'
assert start >= start_date, 'no data before \"2003-01-01\"'
strftime = datetime.strftime
self.db.DBFILE = \
strftime(start, date_str) + "+" + strftime(end, date_str)
# write all the data in the file at once
lst_dict = self._helper_get_between(start, end)
self.db.save_iter(lst_dict)
def _helper_get_between(self, start, end):
day = timedelta(days=1)
yield self.work(start)
while start < end:
start = start + day
yield self.work(start)
def work(self, time):
assert isinstance(time, datetime)
dayCat = fillDict(mainData, time)
data = self.crawler.getForm(dayCat)
return data
if __name__ == '__main__':
s = Slave()
a = datetime(2016, 1, 19)
b = datetime(2004, 2, 22)
s.get_one(b)
|
15,235 | 6fa57a62aa92d036cfba9a7dfa5dce31fcfae07c | from pros import pros_vision_function
from time import sleep
import cv2
import numpy as np
from PIL import Image
i = 0
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
input_img = cv2.imread('C:/Users/hp/downloads/trekar.jpg')
gray_input = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
inputimg = cv2.imwrite('gray_screen.jpg', gray_input)
# Resize smoothly down to 16x16 pixels
Img = Image.open('gray_screen.jpg')
pixel_img_16 = Img.resize((16,16),resample=Image.BILINEAR)
pixel_img_16.save('test_16_test.jpg')
# equalize the histogram of the Y channel
pixel = cv2.imread('test_16_test.jpg')
img_yuv = cv2.cvtColor(pixel, cv2.COLOR_BGR2YUV)
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
# convert the YUV image back to RGB format
img_equalized = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
cv2.imwrite('C:/Users/hp/Documents/test/Assets/Resources/trekar.png', img_equalized)
|
15,236 | 53bde934c39a2d795f856f4ef76f25e10e158593 | '''
Created on Nov 14, 2010
@author: pekka
'''
import unittest
from shallowspace.eventmanager import EventManager
from shallowspace.event import CharactorPlaceEvent, FreeSectorAction
from shallowspace.map import Sector, MapState
from shallowspace.constants import DIRECTION_UP, DIRECTION_LEFT, DIRECTION_DOWN, DIRECTION_RIGHT
from shallowspace.actors import Charactor
class MapTests(unittest.TestCase):
pass
class MapStateTests(unittest.TestCase):
def setUp(self):
self.event_manager = EventManager()
def testInit(self):
"""Test map state initalisation"""
map_state = MapState(self.event_manager)
self.assertEqual(map_state.event_manager, self.event_manager)
self.assertTrue(map_state in self.event_manager.listener_groups["default"].listeners)
self.assertEqual(map_state.occupied_sectors_by_actor_id, {})
self.assertEqual(map_state.actors_by_sector_id, {})
def testCharactorPlaceNotification(self):
"""Test charactor place notification"""
map_state = MapState(self.event_manager)
charactor = Charactor(self.event_manager)
charactor.sector = Sector()
charactor_place_event = CharactorPlaceEvent(charactor)
self.event_manager.post(charactor_place_event)
self.assertEqual(map_state.actors_by_sector_id[charactor.sector.sector_id], charactor)
self.assertEqual(map_state.occupied_sectors_by_actor_id[charactor.charactor_id], charactor.sector)
def testFreeSectorActionNotification(self):
"""Test free section action notification"""
map_state = MapState(self.event_manager)
sector = Sector()
self.actionExecuted = False #TODO: this is no good
def function(sector_is_free):
if sector_is_free:
self.actionExecuted = True
callback_function = function
free_sector_action = FreeSectorAction(sector, callback_function)
self.event_manager.post(free_sector_action)
self.assertTrue(self.actionExecuted)
self.actionExecuted = False
charactor = Charactor(self.event_manager)
charactor.sector = sector
map_state.occupied_sectors_by_actor_id[charactor.charactor_id] = charactor.sector
self.event_manager.post(free_sector_action)
self.assertFalse(self.actionExecuted)
class SectorTests(unittest.TestCase):
def testInit(self):
"""Test sector initialisation"""
sector = Sector()
self.assertEqual(len(sector.neighbors), 4)
self.assertEqual(len(sector.corners), 4)
for neighbor in sector.neighbors:
self.assertEqual(neighbor, None)
for neighbor in sector.corners:
self.assertEqual(neighbor, None)
def testMoveNotPossible(self):
"""Test illegal moves"""
sector = Sector()
self.assertFalse(sector.move_possible(DIRECTION_UP))
self.assertFalse(sector.move_possible(DIRECTION_RIGHT))
self.assertFalse(sector.move_possible(DIRECTION_DOWN))
self.assertFalse(sector.move_possible(DIRECTION_LEFT))
def testMovePossible(self):
"""Test legal moves"""
sector = Sector()
sector.neighbors = [Sector() for x in xrange(4)]
self.assertTrue(sector.move_possible(DIRECTION_UP))
self.assertTrue(sector.move_possible(DIRECTION_RIGHT))
self.assertTrue(sector.move_possible(DIRECTION_DOWN))
self.assertTrue(sector.move_possible(DIRECTION_LEFT))
if __name__ == "__main__":
unittest.main()
|
15,237 | c52d2436677d945d3ddd5b2c5c39d6dc4c034249 | from __future__ import print_function, absolute_import, division
import enum
import logging
import os
import typing as tp
import uuid
from logging import Logger
from satella.coding import silence_excs
from satella.files import DevNullFilelikeObject
from satella.instrumentation import Traceback
from .exception_handlers import BaseExceptionHandler
AsStreamTypeAccept = tp.Union[str, tp.IO, None, Logger, tp.Tuple[Logger, int]]
AsStreamTypeAcceptHR = tp.Union[str, tp.TextIO, Logger, tp.Tuple[Logger, int]]
AsStreamTypeAcceptIN = tp.Union[str, tp.BinaryIO]
class StreamType(enum.IntEnum):
MODE_FILE = 0 # write to file
MODE_STREAM = 1 # a file-like object was provided
MODE_DEVNULL = 2 # just redirect to /dev/null
MODE_LOGGER = 3 # just a logger
class AsStream:
__slots__ = ('o', 'human_readable', 'mode', 'file', 'level', 'logger')
def __init__(self, o: AsStreamTypeAccept, human_readable: bool):
"""
A stream to dump to
:param o: stream, or a file name to use, or None to use /dev/null
:param human_readable: whether the output should be human-readable
or a pickle (False for pickle)
"""
self.o = o
self.human_readable = human_readable
if isinstance(o, str):
if os.path.isdir(o):
self.o = os.path.join(o, uuid.uuid4().hex)
self.mode = StreamType.MODE_FILE
elif isinstance(o, tuple) and isinstance(o[0], Logger):
self.mode = StreamType.MODE_LOGGER
self.level = o[1]
self.logger = o[0]
elif isinstance(o, Logger):
self.mode = StreamType.MODE_LOGGER
self.logger = o
self.level = logging.ERROR
elif hasattr(o, 'write'):
self.mode = StreamType.MODE_STREAM
elif o is None:
self.mode = StreamType.MODE_DEVNULL
else:
raise TypeError('invalid stream object')
def __enter__(self) -> tp.Union[tp.TextIO, tp.BinaryIO]:
if self.mode == StreamType.MODE_FILE:
self.file = open(self.o, 'w' if self.human_readable else 'wb',
encoding='utf8' if self.human_readable else None)
return self.file.__enter__()
elif self.mode == StreamType.MODE_STREAM:
return self.o
elif self.mode == StreamType.MODE_DEVNULL:
self.o = DevNullFilelikeObject()
return self.o
def __exit__(self, exc_type, exc_val, exc_tp):
if self.mode == StreamType.MODE_FILE:
return self.file.__exit__(exc_type, exc_val, exc_tp)
elif self.mode == StreamType.MODE_STREAM:
with silence_excs(AttributeError):
self.o.flush()
elif self.mode == StreamType.MODE_DEVNULL:
pass
class DumpToFileHandler(BaseExceptionHandler):
"""
Write the stack trace to a stream-file.
Note that your file-like objects you throw into that must support only .write() and optionally
.flush()
:param human_readables: iterable of either a file-like objects, or paths where
human-readable files will be output. Also a logger can be put here, or a tuple
of logger, logging level. Default logging level will be ERROR.
:param trace_pickles: iterable of either a file-like objects, or paths where pickles with
stack status will be output
:raises TypeError: invalid stream
"""
__slots__ = ('hr', 'tb')
def __init__(self, human_readables: tp.Iterable[AsStreamTypeAcceptHR],
trace_pickles: tp.Iterable[AsStreamTypeAcceptIN] = None):
super(DumpToFileHandler, self).__init__()
self.hr = [AsStream(x, True)
if not isinstance(x, AsStream) else x
for x in human_readables] # type: tp.List[AsStream]
self.tb = [AsStream(x, False) if not isinstance(x, AsStream) else x for x in
trace_pickles or []] # type: tp.List[AsStream]
def handle_exception(self, type_, value, traceback) -> bool:
try:
tb = Traceback()
except ValueError:
return False # no traceback, probably hit KeyboardInterrupt or SystemExit,
# continue with it
for q in self.hr:
if q.mode == StreamType.MODE_LOGGER:
q.logger.log(q.level, str(value), exc_info=value, stack_info=tb.pretty_format())
else:
with q as f:
f.write('Unhandled exception caught: \n')
tb.pretty_print(output=f)
for q in self.tb:
with q as f:
f.write(tb.pickle())
|
15,238 | 21394e88f381b61305ee94012aac4b98a9c753cd | #!/usr/bin/env python
# coding: utf-8
# ## Real state price predictor
# In[1]:
import pandas as pd
# In[2]:
housing = pd.read_csv("data.csv")
# In[3]:
housing.head()
# In[4]:
housing.info() #information about the data
# In[5]:
housing['CHAS'].value_counts() #counting the value
# In[6]:
housing.describe()
# In[7]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[8]:
# # for plotting histogram
import matplotlib.pyplot as plt
# housing.hist(bins=50,figsize=(20,20),facecolor='r')
# plt.show()
# ## train-test splitting
# In[9]:
# # for learning pursope
import numpy as np
# def split_tarin_test(data, test_ratio): # creating train and test sets , this is present in sklearn, but we can learn things
# np.random.seed(42) # to separate the train and test set
# shuffled = np.random.permutation(len(data)) # randomized the data
# test_set_size = int(len(data) * test_ratio) # getting the train data
# test_indices = shuffled[:test_set_size] #getting the test data
# train_indices = shuffled[test_set_size:] #getting data for training
# return data.iloc[train_indices],data.iloc[test_indices]
# In[10]:
# train_set, test_set = split_tarin_test(housing, 0.2)
# In[11]:
# print(f"Rows is train set : {len(train_set)}\n Rows is test set : {len(test_set)}") #creating the train and test set
# In[12]:
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(f"Rows is train set : {len(train_set)}\n Rows is test set : {len(test_set)}") #creating the train and test set
# ## StratifiedShuffledSplit for the features which are most important for predictions
# In[13]:
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index,test_index in split.split(housing,housing['CHAS']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
# In[14]:
# start_test_set['CHAS'].value_counts()
# In[15]:
# strat_train_set ['CHAS'].value_counts()
# In[16]:
housing = strat_train_set.copy()
# ## Looking for correlations
# In[17]:
corr_matrix = housing.corr()
corr_matrix['MEDV'].sort_values(ascending = False) # 1 = strong positive correlation ,+ve= will incrase with MEDV
# -ve = will decrease with MEDV, -1 = strong negative correlation
# In[18]:
from pandas.plotting import scatter_matrix
attributes = ['MEDV', 'RM', 'ZN', 'LSTAT']
scatter_matrix(housing[attributes],figsize = (12,8))
# In[19]:
housing.plot(kind = "scatter", x= "RM",y = "MEDV", alpha = 0.9)
# ## Trying Out Attributes combination
# In[20]:
housing['TAXRM'] = housing['TAX']/housing['RM']
housing.head()
# In[21]:
corr_matrix = housing.corr()
corr_matrix['MEDV'].sort_values(ascending = False) # 1 = strong positive correlation ,+ve= will incrase with MEDV
# -ve = will decrease with MEDV, -1 = strong negative correlation
# In[22]:
housing.plot(kind = "scatter", x= "TAXRM",y = "MEDV", alpha = 0.9)
# In[23]:
housing = strat_train_set.drop('MEDV',axis=1) # we are not taking the TAXRM column as we are taking strat_train_set which is the original training table
housing_labels = strat_train_set['MEDV'].copy()
# ## Missing Attributes
# In[24]:
# To take care missing attributes we have three options :
# 1: Get rid of the missing data points id there is small no of missing values
# 2: Get rid of the the whole attribute if the relation between output label is not that good
# 3: set the values to some values (0 or mean or )
# In[25]:
# a = housing.dropna(subset = ["RM"]) #option 1
# a.shape
# In[26]:
# housing.drop("RM",axis = 1) #option 2 axis=1 ie the column
# # original housing will remain unchanged
# In[27]:
median = housing["RM"].median() #option 3
housing['RM'].fillna(median)
# original housing will remain unchanged
housing.shape
housing.describe() # before filling the missing attributes
# In[28]:
# there is a class in sklearn which can compute median
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy = "median")
imputer.fit(housing)
# In[29]:
imputer.statistics_
# In[30]:
X = imputer.transform(housing)
housing_tr = pd.DataFrame(X, columns = housing.columns) # housing_tr --> transform data set after filling the missing values
housing_tr.describe()
# ## Scikit-learn Design
# Primarily three types of objects in scikit-learn
# 1. Estimators - It estimates some parameter based on a dataset , Eg: imputer. It has a fit method and transform method. Fit method- Firts the data set and calculate internal parameters
#
# 2. Transformers - transform method takes input and returns output based on the learnings from fit(). It has also a convenience funtion fit_tranform() , which fits and transforms.
#
# 3. Predictors - LinerRegression model is a example, fit and predict are two common functions , it also gives us some score() function which will evaluate the prediction. Predictors will take numpy array as input
# ## Feature Scalling
# Primarily two types of feature scaling method
# 1. Min-max Scalling(Normalization):
# (value - min ) / ( max - min )
# sklearn provides a class called MinMaxScaler for this
#
# 2. Standardization:
# (value - min)/ std
# sklearn provieds a class called StandardScaler for this
#
# ## Creating Pipeline
# In[31]:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
my_pipeline = Pipeline([ # pipeline takes a series of list in it
('imputer',SimpleImputer(strategy="median")),
# ..... add as many as you want in your want
('std_scaler', StandardScaler()),
])
# In[32]:
housing_num_tr = my_pipeline.fit_transform(housing) # housing_num_tr is a numpy array
# In[33]:
housing_num_tr.shape
# ## Selecting a decide model for realstate
# In[34]:
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
#model = LinearRegression()
#model = DecisionTreeRegressor()
model = RandomForestRegressor()
model.fit(housing_num_tr, housing_labels)
# In[35]:
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
# In[36]:
prepared_data = my_pipeline.transform(some_data)
# In[37]:
model.predict(prepared_data)
# In[38]:
list(some_labels)
# ## Evaluating the Model
# In[39]:
from sklearn.metrics import mean_squared_error
housing_predictions = model.predict(housing_num_tr)
mse = mean_squared_error(housing_labels, housing_predictions)
rmse = np.sqrt(mse)
# In[40]:
rmse
# ## Using better evaluation Technique - Cross Validation
# In[41]:
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, housing_num_tr,housing_labels,scoring="neg_mean_squared_error",cv=10 )
rmse_scores = np.sqrt(-scores)
# In[42]:
rmse_scores
# In[43]:
def print_scores(scores) :
print("scores are : ",scores)
print("Mean :", scores.mean())
print("Standard Deviation: ", scores.std())
# In[44]:
print_scores(rmse_scores)
# ## saving the model
# In[45]:
from joblib import dump,load
dump(model , 'DragonRealstate.joblib')
# ## Testing the model
# In[48]:
X_test = strat_test_set.drop("MEDV",axis=1)
Y_test = strat_test_set["MEDV"].copy()
X_test_prepared = my_pipeline.transform(X_test)
final_predictions = model.predict(X_test_prepared)
final_mse = mean_squared_error(Y_test,final_predictions)
final_rmse = np.sqrt(final_mse)
#print(final_predictions,list(Y_test))
# In[47]:
final_rmse
# In[49]:
prepared_data[0]
# ## Using The Model
# In[50]:
from joblib import dump,load
import numpy as np
model = load('DragonRealstate.joblib')
features = np.array([[-0.43942006, 3.12628155, -1.12165014, -0.27288841, -1.42262747,
-0.23782941, -1.31238772, 2.61111401, -1.0016859 , -0.5778192 ,
-0.97491834, 0.41164221, -0.86091034]])
model.predict(features)
# In[ ]:
|
15,239 | 5970eb7b10cc92b695c6565335d7028e1e37c734 | import csv
import sys
with open('D://PythonProject//drawMember//membersBig5.csv', 'rt', encoding='cp950') as f:
r = csv.DictReader(f)
for row in r:
print(row['Name'], ' ', row['Group'])
|
15,240 | cf2d977c8caf7ac18e798b52dd15bd34658de428 |
import requests
from bs4 import BeautifulSoup
import csv
import time
import re
import traceback
URL = 'https://book.douban.com/subject/26943161/comments/new'
# ?p=2
def get_html():
try:
page = 1
user_agent = {'user-agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"}
while True:
if page == 1:
mparams = ''
else:
mparams = {'p': str(page)}
res = requests.get(URL, params=mparams, headers=user_agent)
print('kylin url is ', res.url)
if res.status_code == requests.codes.ok:
parse_comments(res.text)
else:
res.raise_for_status()
page += 1
time.sleep(5)
if page >= 249:
break
except Exception as err:
print('kylin error', err)
traceback.print_exc()
def parse_comments(html):
soup = BeautifulSoup(html, 'lxml')
comment_full = soup.find_all('li', 'comment-item')
username_pattern = re.compile('.*')
dates_pattern = re.compile('[\d|-]{5,10}')
for user_comment in comment_full:
user = user_comment.find_all('a', title=username_pattern)
if len(user) > 0:
user = user[0]['title']
else:
user = ''
comment = user_comment.find_all('p', 'comment-content')
if len(comment) > 0:
comment = comment[0].string
comment = ''.join([x for x in comment if x not in '\n !'])
comment.replace(',', ',')
else:
comment = ''
votes = user_comment.find_all('span', 'vote-count')
if len(votes) > 0:
votes = int(votes[0].string)
else:
votes = 0
stars = user_comment.find_all('span', class_='user-stars')
if len(stars) > 0:
stars = stars[0]['class'][1].split('allstar')[1]
else:
stars = 0
stars = int(stars) // 10
dates = user_comment.find_all('span', text=dates_pattern)
if len(dates) > 0:
dates = dates[0].string
else:
dates = ''
print("write row : ", user, votes, stars, dates, comment)
csv_writer.writerow([user, votes, stars, dates, comment])
def init_csv():
file = open('./future_story.csv', 'w', encoding='utf-8', newline='')
mwriter = csv.writer(file, delimiter=',')
file_header = ['user', 'vote', 'star', 'date', 'comments']
mwriter.writerow(file_header)
return mwriter, file
if __name__ == '__main__':
csv_writer, csv_file = init_csv()
get_html()
csv_file.close() |
15,241 | 6a235a9537991534b712f9a4f00e30e3c6aadfa3 | """
.. module: lemur.endpoints.cli
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from flask_script import Manager
import arrow
from datetime import timedelta
from sqlalchemy import cast
from sqlalchemy_utils import ArrowType
from lemur import database
from lemur.extensions import metrics, sentry
from lemur.endpoints.models import Endpoint
manager = Manager(usage="Handles all endpoint related tasks.")
@manager.option(
"-ttl",
"--time-to-live",
type=int,
dest="ttl",
default=2,
help="Time in hours, which endpoint has not been refreshed to remove the endpoint.",
)
def expire(ttl):
"""
Removed all endpoints that have not been recently updated.
"""
print("[+] Staring expiration of old endpoints.")
try:
now = arrow.utcnow()
expiration = now - timedelta(hours=ttl)
endpoints = database.session_query(Endpoint).filter(
cast(Endpoint.last_updated, ArrowType) <= expiration
)
for endpoint in endpoints:
print(
"[!] Expiring endpoint: {name} Last Updated: {last_updated}".format(
name=endpoint.name, last_updated=endpoint.last_updated
)
)
database.delete(endpoint)
metrics.send("endpoint_expired", "counter", 1)
print("[+] Finished expiration.")
except Exception as e:
sentry.captureException()
|
15,242 | ef87ab11987d77d1b3d763f6e5d4b34867589999 | # -*- coding:utf-8 -*-
# __author__ = 'dayinfinte'
from flask import request, jsonify, url_for
from ..models import Post
from .. import db
from . import api
@api.route('/posts/')
def get_posts():
posts = Post.query.all()
return jsonify({'posts': [post.to_json() for post in posts]})
@api.route('/post/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
def new_post():
post = Post.from_json(request.json)
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, {'Location': url_for('api.get_post', id=post.id, _external=True) }
@api.route('/posts/<int:id>', methods=['PUT'])
def edit_post(id):
post = Post.query.get_or_404(id)
post.content = request.json.get('content', post.content)
db.session.add(post)
return jsonify(post.to_json())
|
15,243 | c8a6b333f636800db76e2c0c57032e43f1261236 |
#! /usr/bin/env python
def take_child(str_arg):
long_person(str_arg)
print('number')
def long_person(str_arg):
print(str_arg)
if __name__ == '__main__':
take_child('find_long_part_at_life')
|
15,244 | f07f6f85ee3bb8bbbe0282be3cd6604afb721bff | import json
import logging
from pprint import pprint
from time import sleep
import requests
from bs4 import BeautifulSoup
from elasticsearch import helpers
from elasticsearch import Elasticsearch
def search(es_object, index_name, search):
"""
This method will display results with respect to queries.
"""
res = es_object.search(index=index_name, body=search)
pprint(res)
def create_index(es_object, index_name):
"""
In this method we passed a config variable that contains the mapping
of entire document structure.
"""
created = False
""" index settings """
settings = {
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0
},
"mappings": {
"physicians": {
"dynamic": "strict",
"properties": {
"overview": {
"type": "text"
},
"full_name": {
"type": "text"
},
"years_of_practice": {
"type": "text"
},
"language": {
"type": "text"
},
"office_location": {
"type": "text"
},
"hospital_affiliation": {
"type": "text"
},
"specialties": {
"type": "text"
},
"education_and_medical_training": {
"type": "text"
},
"certification_and_licensure": {
"type": "text"
},
}
}
}
}
try:
if not es_object.indices.exists(index_name):
# Ignore 400 means to ignore "Index Already Exist" error.
es_object.indices.create(index=index_name, ignore=400, body=settings)
print('Created Index')
created = True
except Exception as ex:
print(str(ex))
finally:
return created
def store_record(elastic_object, index_name, record):
"""
This method is use to storing the actual data or document
"""
is_stored = True
try:
outcome = elastic_object.index(index=index_name, doc_type='physicians', body=record)
print(outcome)
except Exception as ex:
print('Error in indexing data')
print(str(ex))
is_stored = False
finally:
return is_stored
def connect_elasticsearch():
"""
This method is use to connect the ElasticSearch server
"""
_es = None
# create an instance of elasticsearch and assign it to port 9200
_es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
_es.cluster.health(wait_for_status='yellow', request_timeout=1)
# pings the server and returns True if gets connected.
if _es.ping():
print('Connected')
else:
print('It could not connect!')
return _es
def parse(u):
"""
This method is use to pull the data.
Since we need data in JSON format, therefore, It convert the data accordingly.
"""
rec = {}
try:
r = requests.get(u, headers=headers)
if r.status_code == 200:
html = r.text
soup = BeautifulSoup(html, 'lxml')
overview_section = soup.select('.Raw-s14xcvr1-0 gXqFYO')
full_name_section = soup.select('.sc-iwsKbI kjxnCg')
years_of_practice_section = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')
language_section = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')
office_location_section = soup.select('.Paragraph-fqygwe-0 cojhks')
hospital_affiliation_section = soup.select('.Paragraph-fqygwe-0 fwayNy')
specialties = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')
education_and_medical_training_section = soup.select('.EducationAndExperience__Item-xn5fll-0 bzYYRk')
certification_and_licensure_section = soup.select('.Paragraph-fqygwe-0 bQPwuv')
if overview_section:
overview = overview_section[0].text.replace('"', '')
if full_name_section:
full_name = full_name_section[0].text
if years_of_practice_section:
years_of_practice = years_of_practice_section[0].text.strip().replace('"', '')
if language_section:
language = language_section[0].text.strip().replace('"', '')
if office_location_section:
office_location = office_location_section[0].text
if hospital_affiliation_section:
hospital_affiliation = hospital_affiliation_section[0].text.strip().replace('"', '')
if specialties_section:
specialties = specialties_section[0].text.replace('"', '')
if education_and_medical_training_section:
education_and_medical_training = education_and_medical_training_section[0].text
if certification_and_licensure_section:
certification_and_licensure = certification_and_licensure_section[0].text
rec = {'overview': overview, 'full_name': full_name, 'years_of_practice': years_of_practice, 'language': language,
'office_location': office_location, 'hospital_affiliation': hospital_affiliation, 'specialties':specialties,
'education_and_medical_training': education_and_medical_training,
'certification_and_licensure': certification_and_licensure}
except Exception as ex:
print('Exception while parsing')
print(str(ex))
finally:
return json.dumps(rec)
if __name__ == '__main__':
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Pragma': 'no-cache'
}
logging.basicConfig(level=logging.ERROR)
url = 'https://health.usnews.com/doctors/city-index/new-jersey'
r = requests.get(url, headers=headers)
if r.status_code == 200:
html = r.text
soup = BeautifulSoup(html, 'lxml')
links = soup.select('.List__ListWrap-e439ne-0 hobCNJ .List__ListItem-e439ne-1 hgSqfk a .List__ListItem-e439ne-1 hgSqfk .s85n6m5-0-Box-cwadsP fVAhQS a')
if len(links) > 0:
es = connect_elasticsearch()
for link in links:
sleep(2)
result = parse(link['href'])
if es is not None:
if create_index(es, 'physicians'):
out = store_record(es, 'physicians', result)
print('Data indexed successfully')
es = connect_elasticsearch()
if es is not None:
search_object = {'_source': ['full_name'], "query": { "aggs": {"doctors":{"terms":{"field":"full_name"}}}}}
search(es, 'physicians', json.dumps(search_object)) |
15,245 | 1d07feabf575dcc3372264678328eaffe30d44bf | __author__ = 'bourgeois'
import math
from math import pi
print("I was taught Python3 course. Hello bourgeois")
#CIRCLE
#Perimeter of a Circle
def circle_perimeter(radius):
'''
Calculate perimeter of a circle from radius
:param radius: the radius
:return: the perimeter (same units as the radius)
'''
return 2*pi*radius
print("The perimeter of my circle is ", circle_perimeter(10))
#Area of a Circle
def circle_area(radius):
return pi*radius*radius
'''
Calculate area of a circle from radius
:param radius: the radius
:return: the perimeter (units^2 from radius)
'''
print("The area of my circle is ", circle_area(20))
#PARALLELOGRAM
#Perimeter of a parallelogram
def parallelogram_perimeter(side1,side2):
return 2*(side1+side2)
print (parallelogram_perimeter(2,4))
#Area of Parallelogram
def parallelogram_area(base,height):
return base*height
print (parallelogram_area(3,5))
|
15,246 | eecec84441514adfc6302435b0a73af928b950ae | import shutil
import logging
from pathlib import Path
from main import RUNNING
from f1.config import CACHE_DIR
"""Delete the contents of the ./cache directory.
Ensure the bot is not running. A new cache will be built when starting the bot.
"""
logger = logging.getLogger("f1-bot")
if __name__ == "__main__":
if RUNNING:
logger.warning("Bot is running. Exit the process or use /stop command.")
exit()
if Path.exists(CACHE_DIR):
logger.warning("Removing cache directory...")
try:
shutil.rmtree(CACHE_DIR)
except Exception as err:
logger.error(f"Error removing cache\n{err}")
exit()
logger.info("Cache removed successfully!\nStart the bot with python -m main.py to build a new cache.")
exit()
|
15,247 | 42ed07779a34ded965f71df87c45c2f21535430c | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
#defining functions to be used
tau = 2.2
#exponential function
def p(x):
return (1/tau)*np.exp(-1*x/tau)
#cumulative function of p(x)
def g(t):
return (1-np.exp(-t/tau))
#inverse of the cumulative function
def g_i(y):
return (-tau*np.log(1-y))
#number of data sets
datasets = 2500
#number of measurements in each data set
datalength = 1000
#array to hold the mean decay time of each data set
times = []
#counter for while loops
j = 0
### Loops for generating the random numbers
# 3 are written out, if statements determine which method will be used
# using the inverse cumulative method
if True:
#loop to generate data sets
while j <= datasets:
print j
#creating an empty list to hold the numbers generated for each data set
randomlist = []
i=0
#loop to generate random numbers
while i <= datalength:
#generating a random number between 0 and 1
y = np.random.uniform()
#scaling it to the range of the function
y = y*1 # would be y*g(b) but p(x) is normalised to 1 so g(b) = 1
#getting the inverse
y_i = g_i(y)
#adding the value to the list for this data set
randomlist.append(y_i)
i+=1
#adding the mean of the generated values to the list of results
times.append(np.mean(randomlist))
j +=1
# using the box method
if False:
#min and max x values of the box
a = 0.0
b = 15
#box max height
fmax = 1
while j <= datasets:
print j
randomlist = []
i=0
while i < datalength:
#generating a random number in the range a,b
x1 = np.random.uniform()
x1 = a + (b-a)*x1
#getting the function height at this location
y1 = p(x1)
#generating a random number in the range 0, fmax
y2 = np.random.uniform()
y2 = fmax*y2
#adding the x value to the list of numbers for this data set if y2<y1, else repeat for a new number
if y2<y1 :
randomlist.append(x1)
i += 1
times.append(np.mean(randomlist))
j += 1
# using a numpy method to generate numbers
if False:
while j <= datasets:
print j
listthing = []
i=0
while i < datalength:
randomlist.append(np.random.exponential(scale=tau))
i += 1
times.append(np.mean(randomlist))
j += 1
# getting the mean time and printing it to the terminal
result_mean=np.mean(times)
print("Mean of the Simulated Times : %.3f microseconds" % result_mean)
print("Standard Deviation of the Simulated times: %.3f microseconds"% np.std(times))
print("True Decay Time : %.3f microseconds" % tau)
#writing the final data set to a file
file1 = open("muonfile.txt",'w')
for item in randomlist:
file1.write(str(item) + ", ")
file1.close()
#plotting the histogram of the final data set
y,x,_ = plt.hist(randomlist,50)
plt.plot([tau,tau],[0,y.max()],'r--',label="true decay time")
plt.plot([np.mean(randomlist),np.mean(randomlist)],[0,y.max()],'g--',label="mean simulated decay time")
#plotting an example exponential function
x=np.arange(0,16,0.01)
exparr = y.max()*np.exp(-x/tau)
plt.plot(x,exparr,'r-',linewidth=2,label="Example Exponential")
plt.title("Simulated Decay Times Histogram")
plt.xlabel("Decay Time(microseconds)")
plt.ylabel("Frequency")
plt.legend(fontsize=12)
plt.show()
#plotting the histogram
#number of bins
nbins = 50
#making and plotting the histogram, returns an array of the bin sizes (y) and other stuff
y,x, _ = plt.hist(times,nbins)
#plotting a line to represent the mean value of the results
plt.plot([result_mean,result_mean],[0,y.max()],'g--',linewidth=2,label="Mean Decay Time")
plt.plot([tau,tau],[0,y.max()],'r--',linewidth=1,label="True Decay Time")
#plotting a gaussian function roughly the size of the histogram
#sigma of the gaussian = sqrt of the variance of the results
sigma = np.sqrt(np.var(times))
x = np.arange(0,5,0.01)
plt.plot(x, y.max()/5*mlab.normpdf(x, tau,sigma), label = 'Example Gaussian')
plt.xlabel("Muon Lifetime (microseconds)")
plt.ylabel("Frequency")
plt.title("{}{}{}{}{}".format("Muon Lifetime Histogram (bins: ",nbins," , Data Sets: ",datasets,")"))
plt.xlim(1.8,2.6)
plt.legend(fontsize=12)
plt.show()
|
15,248 | 625b855c2e4944b2ac9d158ddeda89b6309ca449 | from django import forms
class AddProposal(forms.Form):
alum_current_institute = forms.CharField(max_length=50, required=True)
alum_current_address = forms.CharField(widget=forms.Textarea,
required=True, max_length=100)
title = forms.CharField(
max_length=60,
required=True,
widget=forms.TextInput(
attrs={
'placeholder': 'Eg. Analysis of Support Vector Machine Models'
}))
description = forms.CharField(
widget=forms.Textarea(
attrs={
'placeholder': 'A short description of the project.'
}),
label="Description", max_length=200, required=True)
duration = forms.DecimalField(decimal_places=2, max_digits=4,
required=True,
label="Estimated Duration(in months)")
hours_week = forms.DecimalField(decimal_places=1, max_digits=2,
required=True,
label="Estimated Hours/Week")
prerequsites = forms.CharField(
widget=forms.Textarea(
attrs={'placeholder': 'Eg. Expertise in Java'}
),
label="Prerequisites", max_length=200, required=True)
outcome = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={'placeholder': "Eg. Research Proposal, Software Package"}
))
# For Alumni: Edit Proposal
class EditProposal(AddProposal):
title = forms.CharField(
max_length=60, required=True,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
alum_current_institute = forms.CharField(
max_length=50, required=True,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
# For Students: Apply to a proposal
class StudApply(forms.Form):
max_hours = forms.DecimalField(
max_digits=2, decimal_places=1, required=True, label="Hours/Week",
help_text="Maximum hours/week you can give."
)
writeup = forms.CharField(
widget=forms.Textarea, required=True,
help_text="Please write it carefully. It can be submitted only once.")
# Report
class ReportForm(forms.Form):
reasons = forms.CharField(
widget=forms.Textarea(
attrs={'placeholder': 'Put your complaint here.'}
),
label="Reasons", max_length=200, required=True,
help_text="Please be clear and specific."
)
|
15,249 | 97d85acf557d6c6c766c254574baf4b91d29dfc3 | '''
Created on 3 Apr 2016
@author: John Beard
'''
import requests
from urllib.parse import quote
from mnem import mnem
class RequestDataLoadError(mnem.MnemError):
'''
Error thrown when a request fails to acquire required data
'''
def __init__(self, engine, url, query, exception=None):
self.engine = engine
self.url = url
self.query = query
self.exception = exception
def __str__(self):
s = "%s\n%s\n%s" % (self.engine, self.url, self.query)
if self.exception:
s += '\n%s' % self.exception
return s
class RequestDataLoader(object):
def load(self):
'''
Perform the completion load - inheritors have to provide this
'''
raise NotImplementedError
class UrlCompletionDataLoader(RequestDataLoader):
'''
Simple loader for inpterpolating a string into a URL and fetching
'''
def __init__(self, pattern):
self.pattern = pattern
def load(self, query):
'''
Interpolate the query into the URL pattern and fetch
'''
try:
ua = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'
headers = {'User-Agent': ua}
data = requests.get(self.pattern % quote(query), headers=headers, timeout=5)
data.close()
except Exception as e:
raise RequestDataLoadError(self, self.pattern, query, exception=e)
return data.text
class FileCompletionLoader(RequestDataLoader):
'''
Really simple loader for loading completions from a fixed file. Probably
mostly useful for tests
'''
def __init__(self, filename):
self.fn = filename
def load(self, filename):
f = open(self.fn, 'r')
data = f.read()
f.close()
return data
|
15,250 | 4376150d332e5cf996f9662a72839f5fff22a5ff | #!/usr/bin/env python2.7
import atexit
import logging
import sys
import threading
import time
from PyQt4 import QtGui
from biosignals.print_biosignal import PrintBiosignal
from biosignals.tagger import Tagger
from controller.MESSAGE import Message
from controller.controller import Controller
from controller.processor import Processor
from gui.dev_tools import DevTools
from openbci_board.board_setup import setup_parser, check_auto_port_selection, \
add_plugin, print_logging_info, print_plugins_found, print_board_setup
logging.basicConfig(level=logging.ERROR)
from yapsy.PluginManager import PluginManager
def make_gui(controller):
app = QtGui.QApplication(sys.argv)
# main_scr = MindType(controller)
main_scr = DevTools(controller)
main_scr.resize(500, 100)
main_scr.show()
sys.exit(app.exec_())
def safe_exit(board, biosignals=None):
if board.streaming:
board.stop()
for biosignal in biosignals:
biosignal.exit()
def board_action(board, controller, pub_sub_fct, biosignal=None):
"""
Reads message from controller, and executes required action on the board.
Examples include starting, pausing, and exiting the board.
Args:
board:
controller:
pub_sub_fct:
biosignal:
Returns:
"""
message = controller.read()
print("Incoming message: " + str(message))
flush = False
recognized = False # current command is recognized or fot
lapse = -1
if message is Message.START:
board.setImpedance(False)
# TODO: should we also add 'and not baord.streaming'
if pub_sub_fct is not None:
# start streaming in a separate thread so we could always send commands in here
boardThread = threading.Thread(
target=board.start_streaming, args=(pub_sub_fct, lapse,
[biosignal,]))
boardThread.daemon = True # will stop on exit
try:
boardThread.start()
print("Starting stream...")
except:
raise
else:
print ("No function loaded")
recognized = True
elif message is Message.PAUSE:
board.stop()
recognized = True
flush = True
# We shouldn't be waiting to get messages every single time a message
# is sent to controller, because messages can be sent while the board is
# still running.
# TODO: Move this block of code under Message.PAUSE
poll_board_for_messages(board, flush)
if recognized == False:
print("Command not recognized...")
def poll_board_for_messages(board, flush):
line = ''
time.sleep(0.1) # Wait to see if the board has anything to report
# The Cyton nicely return incoming packets -- here supposedly messages -- whereas the Ganglion prints incoming ASCII message by itself
if board.getBoardType() == "cyton":
while board.ser_inWaiting():
c = board.ser_read().decode('utf-8',
errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
time.sleep(0.001)
if (c == '\n') and not flush:
print('%\t' + line[:-1])
line = ''
elif board.getBoardType() == "ganglion":
while board.ser_inWaiting():
board.waitForNotifications(0.001)
if not flush:
print(line)
def execute_board(board, controller, fun, biosignal, processor):
print ("--------------INFO---------------")
print ("User serial interface enabled...\n\
View command map at http://docs.openbci.com.\n\
Type /start to run (/startimp for impedance \n\
checking, if supported) -- and /stop\n\
before issuing new commands afterwards.\n\
Type /exit to exit. \n\
Board outputs are automatically printed as: \n\
% <tab> message\n\
$$$ signals end of message")
print("\n-------------BEGIN---------------")
# # Init board state
# # s: stop board streaming; v: soft reset of the 32-bit board (no effect with 8bit board)
# s = 'sv'
# # Tell the board to enable or not daisy module
# if board.daisy:
# s = s + 'C'
# else:
# s = s + 'c'
# # d: Channels settings back to default
# s = s + 'd'
while controller.peek() is not Message.EXIT:
board_action(board, controller, fun, biosignal)
user_control([controller, biosignal.controller, processor.controller])
safe_exit(board, [biosignal,])
def user_control(controllers):
s = get_user_input()
message = parse_user_input(s)
if message is not None:
send_msg_to_controllers(controllers, message)
def get_user_input():
# Take user input
if sys.hexversion > 0x03000000:
s = input('--> ')
else:
s = raw_input('--> ')
return s
def parse_user_input(s):
if s is None:
return None
elif "/start" in s:
return Message.START
elif "/stop" in s:
return Message.PAUSE
elif "/exit" in s:
return Message.EXIT
else: return s
def send_msg_to_controllers(controllers, message):
for controller in controllers:
controller.send(message)
def run_processor(processor):
message = Message.IDLE
while message is not Message.EXIT:
# print("Processing...")
message = processor.process()
if __name__ == '__main__':
# VARIABLES-----------------------------------------------------------------
manager = PluginManager() # Load the plugins from the plugin directory.
main_controller = Controller()
# biosignal = PrintBiosignal()
biosignal = Tagger("./test_results/data.csv")
processor = Processor([biosignal])
# SET UP GUI----------------------------------------------------------------
gui_thread = threading.Thread(target=make_gui, args=[main_controller])
gui_thread.daemon = True
gui_thread.start()
# SET UP BOARD--------------------------------------------------------------
parser = setup_parser()
args = parser.parse_args()
if not(args.add):
print ("WARNING: no plugin selected, you will only be able to communicate with the board. You should select at least one plugin with '--add [plugin_name]'. Use '--list' to show available plugins or '--info [plugin_name]' to get more information.")
if args.board == "cyton":
print ("Board type: OpenBCI Cyton (v3 API)")
import openbci_board.open_bci_v3 as bci
elif args.board == "ganglion":
print ("Board type: OpenBCI Ganglion")
import openbci_board.open_bci_ganglion as bci
else:
raise ValueError('Board type %r was not recognized. Known are 3 and 4' % args.board)
# Check AUTO port selection, a "None" parameter for the board API
check_auto_port_selection(args)
# Collect plugins
plugins_paths = ["plugins"]
if args.plugins_path:
plugins_paths += args.plugins_path
manager.setPluginPlaces(plugins_paths)
manager.collectPlugins()
print ("\n------------SETTINGS-------------")
print ("Notch filtering:" + str(args.filtering))
# Logging
print_logging_info(args, logging)
print ("\n-------INSTANTIATING BOARD-------")
board = bci.OpenBCIBoard(port=args.port,
daisy=args.daisy,
filter_data=args.filtering,
scaled_output=True,
log=args.log,
aux=args.aux)
print_board_setup(board)
print_plugins_found(manager)
# Fetch plugins, try to activate them, add to the list if OK
plug_list = []
callback_list = []
if args.add:
for plug_candidate in args.add:
# first value: plugin name, then optional arguments
plug_name = plug_candidate[0]
plug_args = plug_candidate[1:]
add_plugin(manager, plug_name, plug_args, plug_list, callback_list,
board)
if len(plug_list) == 0:
fun = None
print("No function loaded!")
else:
fun = callback_list
def cleanUp():
board.disconnect()
print ("Deactivating Plugins...")
for plug in plug_list:
plug.deactivate()
print ("User.py exiting...")
atexit.register(cleanUp)
# EXECUTE APPLICATION-------------------------------------------------------
process_thread = threading.Thread(target=run_processor, args=(processor,))
process_thread.start()
execute_board(board, main_controller, fun, biosignal, processor)
|
15,251 | 3fe511ef7f4d35db14569aa392a223fe7328685d | from django.shortcuts import render, redirect, get_object_or_404
# Create your views here.
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import login, logout, authenticate
from .models import Board
from django.contrib.auth.decorators import login_required
def index(request):
return render(request, 'index.html')
@login_required
def edit(request, id):
data = get_object_or_404(Board, pk=id)
if data == None:
return redirect("/")
if data.user != request.user:
return redirect("/")
if request.method == "GET":
context = {"data": data}
return render(request, 'edit.html', context)
else:
data.title = request.POST["title"]
data.content = request.POST["content"]
data.save()
return redirect("/list")
@login_required
def write(request):
if request.method == "GET":
return render(request, 'write.html')
else:
try:
title = request.POST["title"]
content = request.POST["content"]
b = Board(title=title, content=content, user=request.user)
b.save()
return redirect("/list")
except Exception as e:
return HttpResponse("ERROR " + str(e))
@login_required
def delete(request, id):
data = get_object_or_404(Board, pk=id)
if data != None:
if data.user == request.user:
data.delete()
return redirect("/list")
@login_required
def read(request, id):
data = get_object_or_404(Board, pk=id)
if data == None:
return redirect("/list")
deletable = data.user == request.user
context = {"data": data, "deletable": deletable}
return render(request, "read.html", context)
def userLogin(request):
if request.method == "GET":
return render(request, 'login.html')
else:
id = request.POST["id"]
password = request.POST["password"]
user = authenticate(request, username=id, password=password)
if user == None:
return redirect("/login")
else:
login(request, user)
return redirect("/")
@login_required
def userLogout(request):
logout(request)
return redirect("/")
@login_required
def list(request):
boardList = Board.objects.filter(user=request.user)
context = {"BoardList": boardList, "Length": len(boardList)}
return render(request, 'list.html', context)
@login_required
def listAll(request):
boardList = Board.objects.all()
context = {"BoardList": boardList, "Length": len(boardList)}
return render(request, 'list.html', context)
def signup(request):
if request.method == "GET":
return render(request, "signup.html")
else:
id = request.POST["id"]
password = request.POST["password"]
print("=====[%s]==[%s:%s]", request.method, id, password)
try:
user = User.objects.create_user(username=id, password=password)
user.save()
login(request, user)
return redirect("/")
except Exception as e:
error_msg = {"msg": 'Username is already taken. Please choose a different Username.'}
return render(request, "signup.html", error_msg)
error_msg = {"msg": '????'}
return render(request, "signup.html", error_msg)
|
15,252 | 6b3ad5abd41c72d4454b7b26e2f7911384e008b1 | import string
import re
import unicodedata
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
def clean_punctuation(s, punc_to_keep=[]):
"""
Strip string of punctuation
:param s: a string to strip of punctuation
:param punc_to_keep: a set with all the punctuation marks to keep as a string, with no spaces e.g. {$#}
:return: a string stripped of punctuation
"""
if not isinstance(s, str):
raise ValueError('The value passed is not a string')
PUNCT = re.compile('[%s]' % re.escape("".join(set(string.punctuation) - set(punc_to_keep))))
return re.sub('\s+', ' ', PUNCT.sub('', s)).strip()
def remove_non_ascii(word):
"""
Remove non-ASCII characters from list of tokenized words
:param word: a word to clean
:return: the original word with only ascii characters
"""
if not isinstance(word, str):
raise ValueError('The value passed is not a string')
return unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')
def unite_standalone_uppercase(str_to_fix):
"""
Unite single capital letters on a row, to one word. e.g.: U S A -> USA
:param str_to_fix: word to examine and fix
:return: new string with united single capital letters
"""
if not isinstance(str_to_fix, str):
raise ValueError('The value passed is not a string')
str_split = str_to_fix.split()
new_str = ""
for i, word in enumerate(str_split):
if (len(word) == 1) and (i+1 < len(str_split)) and (len(str_split[i+1]) == 1) and (word.isupper()) \
and (str_split[i+1].isupper()):
new_str += word
else:
new_str = new_str + word + " "
return new_str.strip()
def camel_case_split(str_to_split):
"""
Seperate camel case words to two single words
:param str_to_split: word to seperate
:return: new string with split camel case words
"""
if not isinstance(str_to_split, str):
raise ValueError('The value passed is not a string')
str_to_split = str_to_split.replace('-', ' ')
remove_digits = str.maketrans('', '', string.digits)
str_to_split = str_to_split.translate(remove_digits)
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', str_to_split)
return " ".join([m.group(0) for m in matches])
def lemmatize_word(word, pos):
"""
Lemmatize word according to the given POS tag
:param word: a word to lemmatize
:param pos: part of speech tag: 's' / 'a' / 'r' / 'v' / 'n' / 'adverb'
:return: the lemmatized word
"""
if not isinstance(word, str):
raise ValueError('The value passed is not a string')
if not isinstance(pos, str) or pos not in ['s', 'a', 'r', 'v', 'n', 'adverb']:
raise ValueError('Please enter a valid part of speach out of the following options: s, a, r, v, n, adverb')
if pos == "adverb":
return wn.synset(word + ".r.1").lemmas()[0].pertainyms()[0].name()
lemmatizer = WordNetLemmatizer()
return lemmatizer.lemmatize(word, pos = pos)
def normalize_word(word, from_pos, to_pos):
"""
Transform the given word from/to POS tags
:param word: word to normalize
:param from_pos: Part of speach the word is in
:param to_pos: part of speech to convert to
:return: normalized word
"""
if not isinstance(word, str):
raise ValueError('The value passed is not a string')
if not isinstance(from_pos, str) or from_pos not in ['s', 'a', 'r', 'v', 'n', 'adverb']:
raise ValueError('Please enter a valid from_pos part of speach out of the following options: s, a, r, v, n, adverb')
if not isinstance(to_pos, str) or to_pos not in ['s', 'a', 'r', 'v', 'n', 'adverb']:
raise ValueError('Please enter a valid to_pos part of speach out of the following options: s, a, r, v, n, adverb')
synsets = wn.synsets(word, pos=from_pos)
# Word not found
if not synsets:
return []
# Get all lemmas of the word (consider 'a'and 's' equivalent)
lemmas = []
for s in synsets:
for l in s.lemmas():
if s.name().split('.')[1] == from_pos or from_pos in ('a', 's') and \
s.name().split('.')[1] in ('a', 's'):
lemmas += [l]
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) for l in lemmas]
# filter only the desired pos (consider 'a' and 's' equivalent)
related_noun_lemmas = []
for drf in derivationally_related_forms:
for l in drf[1]:
if l.synset().name().split('.')[1] == to_pos or to_pos in ('a', 's') and \
l.synset().name().split('.')[1] in ('a', 's'):
related_noun_lemmas += [l]
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
result = [(w, float(words.count(w)) / len_words) for w in set(words)]
result.sort(key=lambda w: -w[1])
# return all the possibilities sorted by probability
return result[0][0]
|
15,253 | 28752ceee915b1e4c54427f32eb3084323b2e79d | #!/usr/bin/env python3
""" normalizes (standardizes) a matrix """
def normalize(X, m, s):
"""
m is the number of data points
nx is the number of features
"""
return ((X - m) / s)
|
15,254 | a787dbd96833cf3c06dad4cdbcd665f298e9fcee | # Generated by Django 2.2.7 on 2020-01-18 22:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student_Datasss', '0011_result_extra'),
]
operations = [
migrations.AddField(
model_name='result_extra',
name='studentclass_term_year',
field=models.CharField(default='none', max_length=255),
preserve_default=False,
),
]
|
15,255 | 62831699cf999516dda3e6ce524ac24e3359473a | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
#filename handle.py
import hashlib
import web
class Handle(object):
def POST(self):
pass
# get方法,验证token
def GET(self):
try:
data = web.input()
if len(data) == 0:
return "token success!"
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.echostr
token = "ywToken" # 请按照公众平台官网\基本配置中信息填写,两个token保持一致
list = [token, timestamp, nonce]
list.sort()
sha1 = hashlib.sha1()
sha1.update(list[0].encode("utf-8"))
sha1.update(list[1].encode("utf-8"))
sha1.update(list[2].encode("utf-8"))
hashcode = sha1.hexdigest() # 获取加密串
# 验证
print("handle/GET func: hashcode, signature: ", hashcode, signature)
if hashcode == signature:
return echostr
else:
return ""
except Exception as Argument:
return Argument
|
15,256 | fff3f3591382f95564103523692c8b36e9a79a03 | """Run the EasyInstall command"""
import sys
import os
import textwrap
import contextlib
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent(
"""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
).lstrip()
def gen_usage(script_name):
return USAGE % dict(script=os.path.basename(script_name))
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
if __name__ == '__main__':
from setuptools.command import easy_install
easy_install.main()
|
15,257 | 08d38fa2ba8622caa8a5eca41e43f1cb14dd6b0b | # Exercício Python 082: Crie um programa que vai ler vários números e colocar em uma lista.
# Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente.
# Ao final, mostre o conteúdo das três listas geradas.
randomNumber = []
evenNumbers = []
oddNumbers = []
while True:
randomNumber.append(int(input('Enter a number: ')))
exit = str(input('Do you want to continue? [Y/N]: ')).upper().strip()
if exit in 'N':
break
for i in range(0, len(randomNumber)):
aux = randomNumber[i]
if aux != 0 and % 2 == 0:
evenNumbers.append(aux)
elif aux % 2 != 0:
oddNumbers.append(aux)
print(f'\nOriginal list: {randomNumber}\n Even numbers list: {evenNumbers}\n Odd numbers list: {oddNumbers}.')
|
15,258 | 7f9678113afcde3622ea80af051b059f91d8e153 | import numpy as np
x = np.arange(1,11)
print(x) |
15,259 | 018eefee22393c5987c0f8e8147bb943fce2201d | # from splinter.browser import Browser
# from time import sleep
# import traceback
#
# # 设定用户名,密码
# username = u"cl1911618290"
# passwd = u"wykqh101119"
#
# # 起始地址的cookies值要自己去找, 下面两个分别是上海, 营口东。如何找,我们在文#后有简单的介绍
#
# starts = u"%u6DEE%u6EE8%2CHVN"
#
# ends = u"%u9526%u5DDE%2CJZD"
#
# # 时间格式
#
# dtime = u"2018-09-01"
#
# # 车次,选择第几趟,0则从上之下依次点击
#
# order = 1
#
# #设定乘客姓名
#
# pa = u"陈乐"
#
# #设定网址
#
# ticket_url = "https://kyfw.12306.cn/otn/leftTicket/init"
#
# login_url = "https://kyfw.12306.cn/otn/login/init"
#
# initmy_url = "https://kyfw.12306.cn/otn/index/initMy12306"
#
# #登录网站
#
# def login():
#
# b.find_by_text(u"登录").click()
#
# sleep(3)
#
# #我们在这里尝试了模拟登录12306,得到结果如下:
#
#
#
# #第17至20行代码用于自动登录,username是12306账号名,passwd是12306密码
#
# b.fill("loginUserDTO.user_name", username)
#
# sleep(1)
#
# b.fill("userDTO.password", passwd)
#
# sleep(1)
#
# #在我们的模拟登录中,结果如下:
#
# #接下来的验证码还是要大家自己动手输入啦!据说12306的验证码辨识难度堪比常识竞赛。在此,大数据文摘祝你好运!
#
# print(u"等待验证码,自行输入...")
#
# while True:
# if b.url != initmy_url:
# sleep(1)
# else:
# break
#
# #购票
#
# def huoche():
#
# global b
# #使用splinter打开chrome浏览器
# b = Browser(driver_name="chrome")
#
# #返回购票页面
#
# b.visit(ticket_url)
#
# #现在让我们来看看程序运行结果
#
#
# # huoche()
#
#
#
# # 看到了吗?网页能正常打开!
#
# while b.is_text_present(u"登录"):
#
# sleep(1)
#
# login()
#
# if b.url == initmy_url:
# break
#
# try:
#
# print u"购票页面..."
#
# # 跳回购票页面
#
# b.visit(ticket_url)
# # 加载查询信息
#
# # 我们的模拟登录中以上海为始发站,营口东为终点站,时间选定2016年2月1日
#
# b.cookies.add({"_jc_save_fromStation": starts})
#
# b.cookies.add({"_jc_save_toStation": ends})
#
# b.cookies.add({"_jc_save_fromDate": dtime})
#
# b.reload()
#
# #让我们一起来看看运行结果如何?
#
#
#
# sleep(2)
#
# count = 0
#
# # 循环点击预订
#
# if order != 0:
#
# while b.url == ticket_url:
#
# b.find_by_text(u"查询").click()
#
# #程序自动点击查询后,结果如下:
#
#
#
# count +=1
#
# print u"循环点击查询... 第 %s 次" % count
#
# sleep(1)
#
# try:
#
# b.find_by_text(u"预订")[order - 1].click()
#
# 程序自动点击预订后,结果如下:
#
#
#
# 哇啦!我们成功预订了春运车票!
#
# 56 except:
#
# 57 print u"还没开始预订"
#
# 58 continue
#
# 59 else:
#
# 60 while b.url == ticket_url:
#
# 61 b.find_by_text(u"查询").click()
#
# 62 count += 1
#
# 63 print u"循环点击查询... 第 %s 次" % count
#
# 64 sleep(1)
#
# 65 try:
#
# 66 for i in b.find_by_text(u"预订"):
#
# 67 i.click()
#
# 68 except:
#
# 69 print u"还没开始预订"
#
# 70 continue
#
# 71 sleep(1)
#
# 注意:可以通过修改sleep的参数来调整延时, 但延时不要太低, 防止被12306网站认为是刷票屏蔽掉.
#
# 72 b.find_by_text(pa)[1].click()
#
# 如果你运气不好,程序会给出一个这样的信息:
#
# 73 print u"能做的都做了.....不再对浏览器进行任何操作"
#
# 如果出现这样的信息,你也不要灰心,重新执行程序,让好运降临!
#
# 74 except Exception as e:
#
# 75 print(traceback.print_exc())
#
# 76 if __name__ == "__main__":
#
# 77 huoche() |
15,260 | 27bbe3dc3e444fb6deb5b96fe152f9ca5beeaa6e | # Generated by Django 2.2.5 on 2019-10-05 03:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChConversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conversation_name', models.CharField(max_length=100)),
('create_date', models.DateTimeField()),
('is_active', models.BooleanField()),
],
),
migrations.CreateModel(
name='ChGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(max_length=100, unique=True)),
('create_date', models.DateTimeField()),
('is_active', models.BooleanField()),
],
),
migrations.CreateModel(
name='ChMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=100)),
('attachment_url', models.CharField(max_length=255)),
('send_time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='GnGroupType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_type', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='ScUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_code', models.CharField(max_length=10, unique=True)),
('user_name', models.CharField(max_length=100)),
('user_node', models.CharField(max_length=100, null=True)),
('email', models.CharField(max_length=100, null=True)),
('phone', models.CharField(max_length=100, null=True)),
('password', models.CharField(max_length=255, null=True)),
('voice', models.FileField(null=True, upload_to='')),
('face', models.FileField(null=True, upload_to='')),
('salt', models.IntegerField()),
('pin', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_code', models.CharField(max_length=10, unique=True)),
('user_name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100, null=True)),
('phone', models.CharField(max_length=100, null=True)),
('password', models.CharField(max_length=255, null=True)),
('voice', models.BooleanField(null=True)),
('face', models.BooleanField(null=True)),
('salt', models.IntegerField()),
('pin', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='EsUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('sns_user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.User')),
],
),
migrations.CreateModel(
name='ChUserGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_date', models.DateTimeField()),
('is_active', models.BooleanField()),
('add_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ScUser')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ChGroup')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usergroup', to='myapp_2.ScUser')),
],
),
migrations.CreateModel(
name='ChReceiver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seen_date', models.DateTimeField()),
('conversation_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ChConversation')),
('message_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ChMessage')),
('user_group_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ChGroup')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ScUser')),
],
),
migrations.AddField(
model_name='chmessage',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ScUser'),
),
migrations.AddField(
model_name='chgroup',
name='group_type_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.GnGroupType'),
),
migrations.AddField(
model_name='chgroup',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.EsUser'),
),
migrations.AddField(
model_name='chconversation',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp_2.ScUser'),
),
]
|
15,261 | d4ffe7b65fb8741f88d85e899135245087db4886 |
#! /usr/bin/env python
def child(str_arg):
tell_thing(str_arg)
print('part')
def tell_thing(str_arg):
print(str_arg)
if __name__ == '__main__':
child('young_part')
|
15,262 | 2db4e49ed4e80c3d0b817a13d1f12bb9debc5f11 | import math
import os
import random
import re
import sys
if __name__="__main__":
n=int(input().strip())
for i in range(1,11):
print(n,"x",i, "=",(n*i)) |
15,263 | b993047139638a683ccc2ae961f89d91f13ce367 | import re
from kivy.app import App
from kivy.uix.textinput import TextInput
from kivy.clock import Clock
from kivy.properties import NumericProperty, ObjectProperty, BooleanProperty
from kivy.uix.bubble import Bubble
from .navigation import Navigation
from kivy.lang.builder import Builder
Builder.load_string("""
<-TextInput>:
canvas.before:
Color:
rgba: self.background_color
BorderImage:
display_border: [app.display_border/2, app.display_border/2, app.display_border/2, app.display_border/2]
border: self.border
pos: self.pos[0] + 3, self.pos[1] + 3
size: self.size[0] -6, self.size[1] - 6
source: self.background_active if self.focus else (self.background_disabled_normal if self.disabled else self.background_normal)
Color:
rgba:
(self.cursor_color
if self.focus and not self._cursor_blink
else (0, 0, 0, 0))
Rectangle:
pos: self._cursor_visual_pos
size: root.cursor_width, -self._cursor_visual_height
Color:
rgba: self.disabled_foreground_color if self.disabled else (self.hint_text_color if not self.text else self.foreground_color)
padding: app.display_padding
<NormalInput>:
mipmap: True
cursor_color: app.theme.text
write_tab: False
background_color: app.theme.input_background
hint_text_color: app.theme.disabled_text
disabled_foreground_color: 1,1,1,.75
foreground_color: app.theme.text
size_hint_y: None
height: app.button_scale
font_size: app.text_scale
<FloatInput>:
multiline: False
write_tab: False
background_color: app.theme.input_background
disabled_foreground_color: 1,1,1,.75
foreground_color: app.theme.text
size_hint_y: None
height: app.button_scale
font_size: app.text_scale
<IntegerInput>:
multiline: False
write_tab: False
background_color: app.theme.input_background
disabled_foreground_color: 1,1,1,.75
foreground_color: app.theme.text
size_hint_y: None
height: app.button_scale
font_size: app.text_scale
<InputMenu>:
canvas.before:
Color:
rgba: app.theme.menu_background
BorderImage:
display_border: [app.display_border, app.display_border, app.display_border, app.display_border]
size: self.size
pos: self.pos
source: 'data/buttonflat.png'
background_image: 'data/transparent.png'
size_hint: None, None
size: app.button_scale * 9, app.button_scale
show_arrow: False
MenuButton:
text: 'Select All'
on_release: root.select_all()
MenuButton:
text: 'Cut'
on_release: root.cut()
MenuButton:
text: 'Copy'
on_release: root.copy()
MenuButton:
text: 'Paste'
on_release: root.paste()
""")
class NormalInput(TextInput, Navigation):
"""Text input widget that adds a popup menu for normal text operations."""
context_menu = BooleanProperty(True)
long_press_time = NumericProperty(1)
long_press_clock = None
long_press_pos = None
def on_navigation_activate(self):
self.focus = not self.focus
def on_touch_up(self, touch):
if self.long_press_clock:
self.long_press_clock.cancel()
self.long_press_clock = None
super(NormalInput, self).on_touch_up(touch)
def on_touch_down(self, touch):
if self.context_menu:
if self.collide_point(*touch.pos):
pos = self.to_window(*touch.pos)
self.long_press_clock = Clock.schedule_once(self.do_long_press, self.long_press_time)
self.long_press_pos = pos
if hasattr(touch, 'button'):
if touch.button == 'right':
app = App.get_running_app()
app.popup_bubble(self, pos)
return
super(NormalInput, self).on_touch_down(touch)
def do_long_press(self, *_):
app = App.get_running_app()
app.popup_bubble(self, self.long_press_pos)
def keyboard_on_key_down(self, window, keycode, text, modifiers):
app = App.get_running_app()
app.close_bubble()
if keycode[0] == 27:
self.focus = False
return True
if keycode[0] in [13, 271]:
self.press_enter(self, self.text)
if not self.multiline:
self.focus = False
return True
super().keyboard_on_key_down(window, keycode, text, modifiers)
def press_enter(self, instance, text):
pass
def on_focus(self, *_):
app = App.get_running_app()
app.navigation_enabled = not self.focus
class FloatInput(NormalInput):
"""Custom text input that only allows float numbers to be typed in. Only allows numerals and one '.'"""
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join([re.sub(pat, '', s) for s in substring.split('.', 1)])
return super(FloatInput, self).insert_text(s, from_undo=from_undo)
class IntegerInput(NormalInput):
"""Custom text input that only allows numbers to be typed in."""
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
s = re.sub(pat, '', substring)
return super(IntegerInput, self).insert_text(s, from_undo=from_undo)
class InputMenu(Bubble):
"""Class for the text input right-click popup menu. Includes basic text operations."""
owner = ObjectProperty()
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
app = App.get_running_app()
app.close_bubble()
else:
super(InputMenu, self).on_touch_down(touch)
def select_all(self, *_):
if self.owner:
app = App.get_running_app()
self.owner.select_all()
app.close_bubble()
def cut(self, *_):
if self.owner:
app = App.get_running_app()
self.owner.cut()
app.close_bubble()
def copy(self, *_):
if self.owner:
app = App.get_running_app()
self.owner.copy()
app.close_bubble()
def paste(self, *_):
if self.owner:
app = App.get_running_app()
self.owner.paste()
app.close_bubble()
|
15,264 | 0ab486ac522e7f0da27daf56b9a8c454256bb66f | ##############################################################################################
# PURPOSE
# Creates the multiple plots relating the Q_i parameter with the properties of the LCGs (oxygen abundances, sSFR and concentration)
#
# CREATED BY:
# Vitor Eduardo Buss Bootz
#
# ADAPTED BY:
#
# CALLING SEQUENCE
# python Q_vs_properties.py --> In terminal
#
# INPUT PARAMETERS
# prop --> Filename containing the properties of LCGs
# tidal --> Filename containing the Q values measured by 'Tidal_Strength_Estimator.py'
# oxig --> Filename containing the oxygen abundances
# starlight_output_dir --> Directory of input files
#
# OUTPUT
# 3 .pdf plots: Q_OH.pdf, Q_sSFR.pdf, Q_concentration.pdf
#
# REQUIRED SCRIPTS
#
#
# COMMENTS
#
##############################################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from scipy.stats import spearmanr
from scipy.stats import kendalltau
from scipy.stats import pearsonr
pd.set_option('max_columns', None)
pd.set_option('max_rows', None)
prop = pd.read_csv('/home/vitorbootz/research/aux_files/properties_sample.csv')
tidal = pd.read_csv('/home/vitorbootz/research/aux_files/Q_values.csv')
oxig = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sample.csv')
tidal = tidal[(tidal.lcgID != 2361) & (tidal.lcgID != 1864)]
oxig = oxig[(oxig.lcgID != 2361) & (tidal.lcgID != 1864)]
tidal.index = range(len(tidal))
oxig.index = range(len(oxig))
prop.index = range(len(prop))
#####################################
############ Concentração ################
fig2 = plt.figure(figsize=(6,4))
axis2 = fig2.add_subplot()
axis2.grid(alpha=0.2, color='grey')
razao_Q_OH = np.log10(abs(tidal.Q_group/prop.log_R90_R50))
median_Q_OH = razao_Q_OH.median()
std_Q_OH = razao_Q_OH.std()
#coef = np.polyfit(tidal.Q_group,prop.log_R90_R50,1)
#poly1d_fn = np.poly1d(coef)
#axis2.plot(tidal.Q_group, poly1d_fn(tidal.Q_group), '--k')
minix = min(tidal.Q_group)
maxix = max(tidal.Q_group)
miniy = min(prop.log_R90_R50)
maxiy = max(prop.log_R90_R50)
plt.xlim(minix-0.2,maxix+0.2)
plt.ylim(miniy-0.07,maxiy+0.02)
coef = np.polyfit(tidal.Q_group,prop.log_R90_R50,1)
m,b = np.poly1d(coef)
spearman, p = spearmanr(tidal.Q_group,prop.log_R90_R50)
kendall, tau = kendalltau(tidal.Q_group,prop.log_R90_R50)
pearson, pp = pearsonr(tidal.Q_group,prop.log_R90_R50)
sns.regplot(tidal.Q_group, prop.log_R90_R50, color='k', marker='x', ci=95, scatter_kws={"s": 1})
axis2.plot(tidal.Q_group,prop.log_R90_R50,'x', color='black', ms='9')
axis2.set_xlabel('Q', fontsize = 15)
axis2.set_ylabel(r'log$_{10}\left[\frac{petroR90}{petroR50}\right]$', fontsize = 15)
axis2.annotate('Coef. Ang. = '+str(round(m,3))+'\nI.C. = 95%', xy=(260, 40), xycoords='axes points',
size=12, ha='center', va='top',
bbox=dict(boxstyle='round', fc='w'))
axis2.annotate(r'Pearson $\rho$: '+ str(round(pearson,3)) + '\np-value: '+ str(round(pp,3)), xy=(72, 40), xycoords='axes points',
size=12, ha='center', va='top',
bbox=dict(boxstyle='round', fc='w'))
axis2.set_title('Q vs Concentração', fontsize=15)
plt.show()
fig2.savefig('/home/vitorbootz/research/TCC_images/Q_vs_properties/Q_concenetration.pdf', bbox_inches='tight')
#####################################
############ sSFR ################
fig3 = plt.figure(figsize=(6,4))
axis3 = fig3.add_subplot()
axis3.grid(alpha=0.2, color='grey')
razao_Q_OH = np.log10(abs(tidal.Q_group/prop.specsfr_tot_p50))
median_Q_OH = razao_Q_OH.median()
std_Q_OH = razao_Q_OH.std()
minix = min(tidal.Q_group)
maxix = max(tidal.Q_group)
miniy = min(prop.specsfr_tot_p50)
maxiy = max(prop.specsfr_tot_p50)
plt.xlim(minix-0.2,maxix+0.2)
plt.ylim(miniy-1,maxiy+0.3)
coef = np.polyfit(tidal.Q_group,prop.specsfr_tot_p50,1)
m,b = np.poly1d(coef)
spearman, p = spearmanr(tidal.Q_group,prop.specsfr_tot_p50)
kendall, tau = kendalltau(tidal.Q_group,prop.specsfr_tot_p50)
pearson, pp = pearsonr(tidal.Q_group,prop.specsfr_tot_p50)
sns.regplot(tidal.Q_group, prop.specsfr_tot_p50, color='k', marker='x', ci=95, scatter_kws={"s": 1})
axis3.plot(tidal.Q_group,prop.specsfr_tot_p50,'x', color='black', ms='9')
axis3.set_xlabel('Q', fontsize = 15)
axis3.set_ylabel('sSFR', fontsize = 15)
axis3.annotate('Coef. Ang. = '+str(round(m,3))+'\nI.C. = 95%', xy=(260, 40), xycoords='axes points',
size=12, ha='center', va='top',
bbox=dict(boxstyle='round', fc='w'))
axis3.annotate(r'Pearson $\rho$: '+ str(round(pearson,3)) + '\np-value: '+ str(round(pp,3)), xy=(72, 41), xycoords='axes points',
size=12, ha='center', va='top',
bbox=dict(boxstyle='round', fc='w'))
axis3.set_title('Q vs Taxa de formação estelar específica', fontsize=15)
fig3.savefig('/home/vitorbootz/research/TCC_images/Q_vs_properties/Q_sSFR.pdf', bbox_inches='tight')
#####################################
############ Abundância ################
tidal = tidal[(tidal.lcgID != 1864) & (tidal.lcgID != 2361) & (tidal.lcgID != 2023)]
tidal.index = range(len(tidal))
oxig = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sample.csv')
oxig = oxig[(oxig.lcgID != 1864) & (oxig.lcgID != 2361) & (oxig.lcgID != 2023)]
oxig.index = range(len(oxig))
fig1 = plt.figure(figsize=(6,4))
axis1 = fig1.add_subplot()
axis1.grid(alpha=0.2, color='grey')
razao_Q_OH = np.log10(abs(tidal.Q_group/oxig.OH))
median_Q_OH = razao_Q_OH.median()
std_Q_OH = razao_Q_OH.std()
minix = min(tidal.Q_group)
maxix = max(tidal.Q_group)
miniy = min(oxig.OH)
maxiy = max(oxig.OH)
plt.xlim(minix-0.2,maxix+0.2)
plt.ylim(miniy-0.1,maxiy+0.02)
coef = np.polyfit(tidal.Q_group,oxig.OH,1)
m,b = np.poly1d(coef)
spearman, p = spearmanr(tidal.Q_group,oxig.OH)
kendall, tau = kendalltau(tidal.Q_group,oxig.OH)
pearson, pp = pearsonr(tidal.Q_group,oxig.OH)
sns.regplot(tidal.Q_group, oxig.OH, color='k', marker='x', ci=95, scatter_kws={"s": 1})
axis1.plot(tidal.Q_group,oxig.OH,'x', color='black', ms='9')
axis1.set_xlabel('Q', fontsize = 15)
axis1.set_ylabel('12 + log (O/H)', fontsize = 15)
axis1.set_title('Q vs Abundância de oxigênio', fontsize=15)
axis1.annotate('Coef. Ang. = '+str(round(m,3))+'\nI.C. = 95%', xy=(260, 40), xycoords='axes points',
size=12, ha='center', va='top',
bbox=dict(boxstyle='round', fc='w'))
axis1.annotate(r'Pearson $\rho$: '+ str(round(pearson,3)) + '\np-value: '+ str(round(pp,3)), xy=(72, 40), xycoords='axes points',
size=12, ha='center', va='top',
bbox=dict(boxstyle='round', fc='w'))
fig1.savefig('/home/vitorbootz/research/TCC_images/Q_vs_properties/Q_OH.pdf', bbox_inches='tight')
|
15,265 | d2a2d058dda763c44ac09dd70be4b6c4b7045a56 | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class ArtistaFavorit(models.Model):
id_artista = models.AutoField(primary_key=True)
nom = models.CharField(max_length=200)
usuari = models.ForeignKey(User) |
15,266 | 938699b3e64fb87d22f5f9d3dbd0550af73636b4 | import pyinputplus as pyin
while True:
prompt = 'want to know how to keep an idiot busy for hours?'
response = pyin.inputYesNo(prompt)
if response == 'no':
break
print('thank you bhai!!!!!!') |
15,267 | c4223f21ab110acd485704631d66e7285867fc78 | Dict = {'Tim': 18,'Charlie':12,'Tiffany':22,'Robert':25}
Dict.update({"Sarah":9})
print(Dict) |
15,268 | 6f8ec623cbc471911bd56fb24e9e453f044a464a | # EMAILFEATURES takes in a word_indices vector and produces a feature vector
# from the word indices
def emailFeatures(word_indices):
'''
x = EMAILFEATURES(word_indices) takes in a word_indices vector and
construct a binary feature vector that indicates whether a particular
word occurs in the email.
The feature vector looks like:
x = [ 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 ... 0 0 0 1 0 ..];
'''
import numpy as np
# Total number of words in the dictionary
n = 1899
# You need to return the following variables correctly.
x = np.zeros((n, 1))
for num in word_indices:
x[num - 1] = 1 # adj for python indexing starting at 0
return x |
15,269 | 5a0660f2733991a0a52b710245bdf23d9351c217 | def match(s1,s2):
count=0
s=[]
for char in s1:
if char in s2:
s.append(char)
count+=1
print(count)
return s
s1="snigdha"
s2="sniwqwq"
match(s1,s2) |
15,270 | cd8d3556f162a231d82c347e7cecadcaa6020e66 | import os
import json
import logging
import random
import shutil
import unittest
import functools
import tempfile
from nose.plugins.attrib import attr
import pbsmrtpipe.cluster as C
from pbsmrtpipe.engine import backticks, run_command
from base import (TEST_DATA_DIR, HAS_CLUSTER_QSUB, get_temp_cluster_dir, SLOW_ATTR, get_temp_dir, get_temp_file)
log = logging.getLogger(__name__)
_EXE = 'pbsmrtpipe'
class _TestIntegrationHelp(unittest.TestCase):
CMD = "{e} pipeline --help".format(e=_EXE)
def test_help(self):
rcode, stdout, stderr, run_time = backticks(self.CMD)
self.assertEqual(rcode, 0)
class TestIntegrationHelp(_TestIntegrationHelp):
CMD = '{e} task --help'.format(e=_EXE)
@unittest.skipIf(not HAS_CLUSTER_QSUB, "Cluster is not accessible")
class TestHelloWorldCluster(unittest.TestCase):
def test_hello_world_job(self):
r = C.load_installed_cluster_templates_by_name('sge')
log.debug(r)
job_name = "int_job_hello"
output_dir = get_temp_cluster_dir(job_name)
cmd = "pbsmrtpipe --help"
def _to_p(x_):
return os.path.join(output_dir, x_)
sh_script = _to_p('qsub_test.sh')
with open(sh_script, 'w') as f:
f.write(cmd + "\n")
# qsub output
stdout = _to_p('stdout')
stderr = _to_p('stderr')
for x in [stdout, stderr]:
with open(x, 'w') as f:
f.write("")
log.info(sh_script)
cmd = r.render("start", sh_script, 'test_job_01', stdout=stdout, stderr=stderr, nproc=1)
log.debug("Running qsub command '{c}'".format(c=cmd))
time_out = 60 * 5
with tempfile.TemporaryFile() as stdout_tmp:
with tempfile.TemporaryFile() as stderr_tmp:
rcode, stdout, stderr, run_time = run_command(cmd, stdout_tmp, stderr_tmp, time_out=time_out)
log.debug((rcode, stdout, stderr, run_time))
if rcode != 0:
log.info(stdout)
log.error(stderr)
log.error("Failed Integration Job {i} with exit code {r}".format(i=job_name, r=rcode))
if os.path.exists(stderr):
with open(stderr, 'r') as f:
log.error(f.read())
else:
try:
shutil.rmtree(output_dir)
except Exception as e:
log.warn("Unable to cleanup testdir {o}. {m}".format(o=output_dir, m=e.message))
self.assertEqual(rcode, 0, stderr)
def __to_cmd(pipeline_subparser, job_dir, workflow_xml_or_pipeline_id, preset_json, preset_xml, ep_d):
preset_json_str = '' if preset_json is None else ' --preset-json {p}'.format(p=preset_json)
preset_xml_str = '' if preset_xml is None else ' --preset-xml {p}'.format(p=preset_xml)
e = ['-e "{i}:{p}"'.format(i=i, p=p) for i, p in ep_d.iteritems()]
_d = dict(e=' '.join(e), j=preset_json_str, p=preset_xml_str, w=workflow_xml_or_pipeline_id, d=job_dir, s=pipeline_subparser)
return "pbsmrtpipe {s} {w} --debug --output-dir={d} {p} {j} {e}".format(**_d)
_to_pipeline_cmd = functools.partial(__to_cmd, "pipeline")
_to_pipeline_id_cmd = functools.partial(__to_cmd, "pipeline-id")
@attr(SLOW_ATTR)
class _TestDriverIntegrationBase(unittest.TestCase):
JOB_NAME = "my_job"
PRESET_XML = None
PRESET_JSON = None
WORKFLOW_XML = ''
ENTRY_POINTS = {}
TO_CMD_FUNC = _to_pipeline_cmd
EXPECTED_EXIT_CODE = 0 # in case we want to test failure modes
def _get_root_temp_dir(self):
"""Override me to set the shared tmp dir"""
return get_temp_cluster_dir(self.JOB_NAME)
def test_run(self):
root_output_dir = self._get_root_temp_dir()
i = random.randint(1, 10000)
name = "{n}_{i}".format(n=self.JOB_NAME, i=i)
output_dir = os.path.join(root_output_dir, name)
os.mkdir(output_dir)
ep_d = {ep_id: get_temp_file(suffix=name) for ep_id, name in self.ENTRY_POINTS.iteritems()}
for ep_id, file_name in ep_d.iteritems():
with open(file_name, 'w') as x:
x.write("Mock data for {i} \n".format(i=ep_id))
cmd = self.TO_CMD_FUNC(output_dir, self.WORKFLOW_XML, self.PRESET_JSON, self.PRESET_XML, ep_d)
stderr_path = os.path.join(output_dir, 'job.stderr')
stdout_path = os.path.join(output_dir, 'job.stdout')
log.debug(cmd)
with open(stdout_path, 'w') as wo:
with open(stderr_path, 'w') as we:
rcode, stdout_results, stderr_results, run_time = run_command(cmd, wo, we)
log.debug("Integration Job {i} state {s} in {t:.2f} sec.".format(i=self.JOB_NAME, s=rcode, t=run_time))
if rcode != 0 and self.EXPECTED_EXIT_CODE == 0:
log.error("Integration Job {i} failed.".format(i=self.JOB_NAME))
log.error(stdout_results)
log.error(stderr_results)
if os.path.exists(stderr_path):
with open(stderr_path, 'r') as f:
log.error(f.read())
emsg = "Failed Integration Job {i} with exit code {r} in {d}. {w}".format(i=self.JOB_NAME, r=rcode, d=output_dir, w=self.WORKFLOW_XML)
self.assertEqual(rcode, self.EXPECTED_EXIT_CODE, emsg)
class TestHelloWorldWorkflow(_TestDriverIntegrationBase):
JOB_NAME = 'hello_world'
PRESET_XML = os.path.join(TEST_DATA_DIR, 'hello_world_preset.xml')
WORKFLOW_XML = os.path.join(TEST_DATA_DIR, 'hello_world_workflow.xml')
ENTRY_POINTS = {'e_01': "hello_entry_point.txt"}
# XXX only the preset is json
class TestHelloWorldWorkflowJson(_TestDriverIntegrationBase):
JOB_NAME = 'hello_world_json'
PRESET_JSON = os.path.join(TEST_DATA_DIR, 'hello_world_preset.json')
WORKFLOW_XML = os.path.join(TEST_DATA_DIR, 'hello_world_workflow.xml')
ENTRY_POINTS = {'e_01': "hello_entry_point.txt"}
@unittest.skipIf(not HAS_CLUSTER_QSUB, "No qsub exe found.")
class TestHelloWorldDistributed(TestHelloWorldWorkflow):
JOB_NAME = 'hello_world_distributed'
WORKFLOW_XML = os.path.join(TEST_DATA_DIR, 'hello_world_distributed_workflow.xml')
def _get_root_temp_dir(self):
return get_temp_cluster_dir(self.JOB_NAME)
class _TestDriverPipelineId(_TestDriverIntegrationBase):
TO_CMD_FUNC = _to_pipeline_id_cmd
class TestWritePresets(unittest.TestCase):
def test_write_pipeline_preset_json(self):
ofn = tempfile.NamedTemporaryFile(suffix=".json").name
pipeline_id = "pbsmrtpipe.pipelines.dev_diagnostic_stress"
cmd = "pbsmrtpipe show-template-details {i} -j {f}".format(f=ofn, i=pipeline_id)
rcode, stdout, stderr, run_time = backticks(cmd)
fail_msg = "Failed (exit code {e}) to show template details cmd: {c} {m} ".format(i=pipeline_id, e=rcode, c=cmd, m=str(stderr))
self.assertEqual(rcode, 0, fail_msg)
with open(ofn) as f:
d = json.load(f)
self.assertEqual(len(d['taskOptions']), 9)
def test_write_workflow_preset_json(self):
from pbsmrtpipe.pb_io import REGISTERED_WORKFLOW_OPTIONS
ofn = tempfile.NamedTemporaryFile(suffix=".json").name
CMD = "pbsmrtpipe show-workflow-options -j {f}".format(f=ofn)
rcode, stdout, stderr, run_time = backticks(CMD)
self.assertEqual(rcode, 0)
with open(ofn) as f:
d = json.load(f)
self.assertEqual(len(d['options']), len(REGISTERED_WORKFLOW_OPTIONS))
|
15,271 | 1e31b17710c3454a649155dde30d120856b43a59 | """
Author: Renato Durrer
Created: 25.03.2019
File in with helpful functions for data creation and processing are written.
"""
import Labber
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
def data_creator(logfile):
"""
Takes the Labber LogFile and creates a new data file for plotting. Supports
n-dimensional sweeping.
Parameters
----------
logfile :
Labber.LogFile() object
Returns
-------
dict
Example:
data = {
'I QPC': np.array() <- shape(n_x, n_y, n_z), # Log
'I TQD': np.array() <- shape(n_x, n_y, n_z), # Log
'LPG': np.array() <- shape(n_x, n_y, n_z), # Step
'MPG': np.array() <- shape(n_x, n_y, n_z), # Step
'LTG': np.array() <- shape(n_x, n_y, n_z) # Step
}
For the case of independent step channels one can get the steps as
LPG = data['LPG'][:, 0, 0]
MPG = data['MPG'][0, :, 0]
LTG = data['LTG'][0, 0, :]
Attention: the order matters and is given by the logfile!
"""
# create data dict
data = {}
# get shape of the data
shapes = []
steps = []
step_channels = logfile.getStepChannels()
for channel in step_channels:
if len(channel['values']) != 1:
shapes.append(len(channel['values']))
steps.append(channel['name'])
else:
break
shapes = tuple(shapes)[::-1]
# find the log channels
log_channels = logfile.getLogChannels()
# store the Measurements
for channel in log_channels:
if len(log_channels) == 1:
msm = logfile.getData(name=channel['name'])
msm = msm.reshape(shapes)
else:
msm = logfile.getData(name=channel['name'])
msm = msm.reshape(shapes)
msm = np.moveaxis(msm, -1, 0)
msm = np.moveaxis(msm, -1, 0)
data[channel['name']] = msm
# store the step channels
for channel in steps:
if len(steps) == 1:
steps = logfile.getData(name=channel)
steps = steps.reshape(shapes)
else:
steps = logfile.getData(name=channel)
steps = steps.reshape(shapes)
steps = np.moveaxis(steps, -2, 0)
steps = np.moveaxis(steps, -1, 0)
data[channel] = steps
return data
def x_y_derivator(data, x=None, y=None):
"""
Takes 2-dimensional data and returns the superposition of the derivative
for x and the derivative for y.
Parameters
----------
data : np.array() <- shape(nx, ny)
x : np.array() <- shape(nx,), optional
y : np.array() <- shape(ny,), optional
Returns
-------
np.array() <- shape(nx-1, ny-1)
"""
if x is None or y is None:
x = np.arange(0, len(data[:, 0]))
y = np.arange(0, len(data[0, :]))
d_measure_x = np.diff(data, axis=0)
d_measure_y = np.diff(data, axis=1)
dx = np.diff(x)
dy = np.diff(y)
ddx = (d_measure_x.T / dx).T
ddy = d_measure_y / dy
# subtract QPC slope
QPC_x = (data[:, 0] - data[:, -1]) / (x[0] - x[-1])
QPC_y = (data[0, :] - data[-1, :]) / (y[0] - y[-1])
x_offset = np.average(QPC_x)
y_offset = np.average(QPC_y)
ddx = ddx - x_offset
ddy = ddy - y_offset
derv = (ddx[:, :-1] + ddy[:-1, :]) / 2
return derv
def subtract_median(data):
"""
Parameters
----------
data
Returns
-------
"""
median = np.median(data)
return data - median
def scaler(data):
"""
Parameters
----------
data
Returns
-------
rescaled data
"""
std = np.std(data)
return data / std
def remove_outliers(data):
"""
Remove outliers. An outliers is defined as a point that is more than
5 standard deviations away from the datas median.
Parameters
----------
data
Returns
-------
Input data with outliers set to 0.
"""
upper_boundary = np.quantile(data, 0.992)
lower_boundary = np.quantile(data, 0.008)
selection = data[(data > lower_boundary) & (data < upper_boundary)]
standard_dev = np.std(selection)
median = np.median(selection)
data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median
return data
def preprocessor(data, rescale=True):
if rescale:
return subtract_median(scaler(remove_outliers(x_y_derivator(scaler((data))))))
else:
return subtract_median(remove_outliers(x_y_derivator(scaler((data)))))
def collect_data(input_folder, ratio):
"""
Collects data and puts them in a pandas DataFrame
Parameters
----------
input_folder : os.path object
folder where data is stored
ratio : float
describes #learn/#test set.
Returns
-------
pd.DataFrame()
columns:
'x': np.array() <- shape(f+2p, f+2p)
'y': list like <- len(3)
"""
# TODO implement ratio
data = pd.DataFrame()
folderpaths = [os.path.normpath((os.path.join(input_folder, x)))
for x in os.listdir(input_folder) if not x.endswith('.gitkeep')]
# for folder in folderpaths:
for folder in folderpaths:
filepaths = [os.path.normpath((os.path.join(folder, x)))
for x in os.listdir(folder) if not x.endswith('.gitkeep')]
for file in filepaths:
df = pd.read_pickle(file)
df = df[df['is_feas'] == 1]
data = data.append(df[['frames', 'label']], ignore_index=True)
return data.rename(columns={'frames': 'x', 'label': 'y'})
#
#
# def create_encoding(data):
# """
#
# Parameters
# ----------
# data
#
# Returns
# -------
#
# """
# labels = data['y']
def create_imgs(folder_in, folder_out):
"""
Parameters
----------
folder_in : path
path to folder with .hdf5 files
folder_out : path
path to folder where img are saved
Returns
-------
"""
filepaths = [os.path.normpath(os.path.join(folder_in, x))
for x in os.listdir(folder_in) if (x.endswith('.hdf5') and not x.endswith('_cal.hdf5'))]
for file in filepaths:
labber_data = Labber.LogFile(file)
raw_data = data_creator(labber_data)
data = x_y_derivator(raw_data['I QPC'][:, :], raw_data['LPG0'][:, 0], raw_data['MPG0'][0, :])
filename = os.path.split(file)[-1]
filename = filename.split('.')[0]
plt.figure()
plt.pcolormesh(raw_data['LPG0'][:-1, 0], raw_data['MPG0'][0, :-1], data)
plt.title(filename)
plt.savefig(os.path.join(folder_out, filename + '.png'))
|
15,272 | e4a18f3e06099e521ccfd4e83f7242b4c330c18f | #!/usr/bin/env python3
"""
TFTP Server Command.
"""
import sys
import os
import argparse
import tftp
TIMEOUT = 2
PORT = 6969
parser = argparse.ArgumentParser(prog='tftp-server')
parser.add_argument('-p', '--port', type=int, default=PORT)
parser.add_argument('-t', '--timeout', type=int, default=TIMEOUT)
parser.add_argument('-c', '--cwd', type=str, default='')
parser.add_argument('--thread', action='store_true')
args = parser.parse_args()
# change current working directory
if args.cwd != '': os.chdir(args.cwd)
# run main server loop
tftp.runServer(('', args.port), args.timeout, args.thread)
# EOF
|
15,273 | 01cd5a9c980ca6aa3129409fa8fdce0fadd9fba9 | import urllib, urllib2
from simplejson import loads
BASE_URL = 'http://rest.nexmo.com/sms'
JSON_END_POINT = BASE_URL + '/json'
XML_END_POINT = BASE_URL + '/xml'
class NexmoError(Exception):
pass
class Client(object):
"""
Send SMS using nexmo
"""
def __init__(self, username, password):
self.username = username
self.password = password
def send_message(self, text, from_, to):
data = {
'username': self.username,
'password': self.password,
'text': text,
'from': from_,
'to': to,
}
response = urllib2.urlopen(JSON_END_POINT, urllib.urlencode(data))
reply = loads(response.read())
for m in reply['messages']:
if m['status'] is not '0':
raise NexmoError(m['error-text'])
|
15,274 | c1896fc755d5106926aa7ed6b9cfc8b1731ee856 |
# Input
with open("coins.in") as f:
value = int(float(f.readline()) * 100)
# Define coin values in cents
coins = [1, 5, 10, 25]
# Greedy method
num = 0 # number of coins
cc = 3 # current coin
while value > 0:
while coins[cc] > value:
cc -= 1
value -= coins[cc]
num += 1
# Output
with open("coins.out", "w") as f:
f.write(str(num))
|
15,275 | ae0c8b8223fd273e19d9c8def24161a4aa10e24a | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import os
import six
# os的常用api,比如文件功能,path功能等
def os_path():
print(os.path.abspath("."))
file_name = "pytest"
print(os.path.isfile(file_name))
print(six.PY3)
print(six.PY2)
|
15,276 | f644833bae221ccecaf4ae4032d1f9197b7b374f | from selenium import webdriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
class Browser:
def __init__(self, driver_exe, path, options=[]):
if driver_exe == 'chromedriver':
chromedriver_path_location = path
if options:
webdriver_options = webdriver.ChromeOptions()
for option in options:
webdriver_options.add_argument(option)
self.driver = webdriver.Chrome(chromedriver_path_location, options=webdriver_options)
def wait_for_element(self):
# TODO: for replacing sleeps with webdriverwait
pass
def close_browser(self):
self.driver.close()
def cleanup_browser(self):
self.driver.quit()
|
15,277 | f02bf9918148011259262671ea75abbfe8537c41 | import os
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException, BinanceOrderException
# init
api_key = os.environ.get('binance_api')
api_secret = os.environ.get('binance_secret')
client = Client(api_key, api_secret)
## main
try:
order = client.create_oco_order(
symbol='ETHUSDT',
side='SELL',
quantity=100,
price=250,
stopPrice=150,
stopLimitPrice=150,
stopLimitTimeInForce='GTC')
except BinanceAPIException as e:
# error handling goes here
print(e)
except BinanceOrderException as e:
# error handling goes here
print(e)
# use exchange info to confirm order types
info = client.get_symbol_info('ETHUSDT')
print(info['orderTypes'])
|
15,278 | ffe4e9c5b61ecf1664d946d5cb670f988025f1fc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
with open('text1.txt', 'r', encoding="utf8") as f:
text1 = f.read()
# Заменить символы конца предложения.
text1 = text1.replace("!", ".")
text1 = text1.replace("?", ".")
# Удалить все многоточия.
while ".." in text1:
text1 = text1.replace("..", ".")
# Разбить текст на предложения.
sentences = text1.split(".")
# Вывод предложений с запятыми.
for sentence in sentences:
if "," in sentence:
print("{}.".format(sentence)) |
15,279 | bc0a8c66b7880cbf706030fb2b0712e0f787e3da | #!/usr/bin/env python
# encoding: utf-8
"""
exp1.py
First experiment: extremely simple region detection using thresholding.
Created by Oliver Smith on 2009-08-16.
Copyright (c) 2009 Oliver Smith. All rights reserved.
"""
import cv
import sys
val1 = 1
def chgv1(x):
global val1
val1 = x
def validate_contour(c):
#x_min = min(pt[0] for pt in c)
#x_max = max(pt[0] for pt in c)
#y_min = min(pt[1] for pt in c)
#y_max = max(pt[1] for pt in c)
#dx = x_max - x_min
#dy = y_max - y_min
#d = dy!=0 and dx/dy or 0
#return dx != 0 and dy != 0 and d>0.25 and d<4.0
return cv.ContourArea(c) > 6
def main():
global val1, val2
img = cv.LoadImage(sys.argv[1])
if img:
cv.NamedWindow("bar")
img2 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
img21 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
img3 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_16S, 1)
img4 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(img, img2, cv.CV_BGR2GRAY)
cv.EqualizeHist(img2, img21)
stor = cv.CreateMemStorage()
cv.AdaptiveThreshold(img21, img4, 255,
cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV,
7, 7)
cont = cv.FindContours(img4,
stor,
cv.CV_RETR_LIST,
cv.CV_CHAIN_APPROX_NONE)
img5 = cv.CloneImage(img)
while cont:
if validate_contour(cont):
cv.DrawContours(img5, cont, (255,255,255), (255,255,255),0,2,8,(0,0))
cont = cont.h_next()
cv.ShowImage("bar", img5)
cv.WaitKey(0)
if __name__ == '__main__':
main()
|
15,280 | e488dd4af33f95678b0ecbb9fac06ef97d4ef672 | __author__ = 'arsia'
class CpuPlayer:
def __init__(self, mark):
self.mark = mark
self.round = 1 # To track if it is the first or second or ... move by computer
self.op = '-' # What marker the opponent is using (x or o)?
self.moved = False # To make sure we do not mark more than one spot in each round
if self.mark == 'x':
self.op = 'o'
else:
self.op = 'x'
def play(self, board):
# First move by computer
if self.round == 1:
if board.spots[0][0] == '-':
board.spots[0][0] = self.mark
self.round += 1
# Second move by computer
elif self.round == 2:
if (board.spots[0][1] == self.op or board.spots[1][0] == self.op or
board.spots[1][2] == self.op or board.spots[2][1] == self.op)and board.spots[1][1] == '-':
board.spots[1][1] = self.mark
elif (board.spots[2][0] == self.op or board.spots[2][2] == self.op) and board.spots[0][2] == '-':
board.spots[0][2] = self.mark
elif board.spots[0][2] == self.op and board.spots[2][0] == '-':
board.spots[2][0] = self.mark
elif board.spots[1][1] == self.op and board.spots[2][2] == '-':
board.spots[2][2] = self.mark
self.round += 1
# Third move by computer
elif self.round == 3:
self.moved = False
if not self.moved:
# check for 2 self.mark in any row and mark the third spot if empty
if self.checkRowCol(board,self.mark):
self.moved = True
elif self.checkDiagonals(board,self.mark):
self.moved = True
elif self.checkRowCol(board,self.op):
self.moved = True
elif self.checkDiagonals(board,self.op):
self.moved = True
if not self.moved:
# check for following case
# x | o | -
# ---------
# - | x | -
# ---------
# X | - | o
if board.spots[0][0] == board.spots[1][1] == self.mark and \
board.spots[0][1] == board.spots[2][2] == self.op and board.spots[2][0] == "-":
board.spots[2][0] = self.mark
self.moved = True
# check for following case
# x | - | o
# ---------
# o | - | -
# ---------
# x | - | X
elif board.spots[0][0] == board.spots[2][0] == self.mark and \
board.spots[1][0] == board.spots[0][2] == self.op and board.spots[2][2] == "-":
board.spots[2][2] = self.mark
self.moved = True
# check for following case
# x | - | X
# ---------
# o | x | -
# ---------
# - | - | o
elif board.spots[0][0] == board.spots[1][1] == self.mark and \
board.spots[1][0] == board.spots[2][2] == self.op and board.spots[0][2] == "-":
board.spots[0][2] = self.mark
self.moved = True
# check for following case
# x | o | x
# ---------
# - | - | -
# ---------
# o | - | X
elif board.spots[0][0] == board.spots[0][2] == self.mark and \
board.spots[0][1] == board.spots[2][0] == self.op and board.spots[2][2] == "-":
board.spots[2][2] = self.mark
self.moved = True
# check for following case
# x | o | x
# ---------
# - | - | -
# ---------
# X | - | o
elif board.spots[0][0] == board.spots[0][2] == self.mark and \
board.spots[0][1] == board.spots[2][2] == self.op and board.spots[2][0] == "-":
board.spots[2][0] = self.mark
self.moved = True
self.round += 1
# Forth move by computer
elif self.round == 4:
self.moved = False
# check for 2 self.mark in any row and mark the third spot if empty
if self.checkRowCol(board,self.mark):
self.moved = True
elif self.checkDiagonals(board,self.mark):
self.moved = True
elif self.checkRowCol(board,self.op):
self.moved = True
elif self.checkDiagonals(board,self.op):
self.moved = True
self.round += 1
# Fifth move by computer
elif self.round == 5:
self.moved = False
# check for 2 self.mark in any row and mark the third spot if empty
if self.checkRowCol(board,self.mark):
self.moved = True
elif self.checkDiagonals(board,self.mark):
self.moved = True
elif self.checkRowCol(board,self.op):
self.moved = True
elif self.checkDiagonals(board,self.op):
self.moved = True
#######################################################################################################################
def checkRowCol(self, board, mrk):
for i in range(len(board.spots)):
# check for 2 mrk in any row and mark the third spot if empty
if board.spots[i][0] == board.spots[i][1] == str(mrk) and board.spots[i][2] == '-':
board.spots[i][2] = self.mark
return True
elif board.spots[i][0] == board.spots[i][2] == str(mrk) and board.spots[i][1] == '-':
board.spots[i][1] = self.mark
return True
elif board.spots[i][1] == board.spots[i][2] == str(mrk) and board.spots[i][0] == '-':
board.spots[i][0] = self.mark
return True
# check for 2 mrk in any col and mark the third spot if empty
elif board.spots[0][i] == board.spots[1][i] == str(mrk) and board.spots[2][i] == '-':
board.spots[2][i] = self.mark
return True
elif board.spots[0][i] == board.spots[2][i] == str(mrk) and board.spots[1][i] == '-':
board.spots[1][i] = self.mark
return True
elif board.spots[1][i] == board.spots[2][i] == str(mrk) and board.spots[0][i] == '-':
board.spots[0][i] = self.mark
return True
return False
#######################################################################################################################
def checkDiagonals(self, board, mrk):
# check for 2 mrk in top left to bottom right diagonal and mark the third spot if empty
if (board.spots[0][0] == board.spots[1][1] == mrk) and board.spots[2][2] == '-':
board.spots[2][2] = self.mark
return True
elif (board.spots[0][0] == board.spots[2][2] == mrk) and board.spots[1][1] == '-':
board.spots[1][1] = self.mark
return True
elif (board.spots[1][1] == board.spots[2][2] == mrk) and board.spots[0][0] == '-':
board.spots[0][0] = self.mark
return True
# check for 2 mrk in top right to bottom left diagonal and mark the third spot if empty
elif (board.spots[0][2] == board.spots[1][1] == mrk) and board.spots[2][0] == "-":
board.spots[2][0] = self.mark
return True
elif (board.spots[0][2] == board.spots[2][0] == mrk) and board.spots[1][1] == "-":
board.spots[1][1] = self.mark
return True
elif (board.spots[1][1] == board.spots[2][0] == mrk) and board.spots[0][2] == "-":
board.spots[0][2] = self.mark
return True
return False
#######################################################################################################################
class Player:
def __init__(self, mark):
self.mark = mark
def play(self, board):
spot = input("Where do you want to play? ")
while len(spot) < 2:
spot = input("Bad input, try again: ")
row = int(spot[0])
col = int(spot[1])
while board.spots[row][col] != "-":
print ("The spot " + spot + " is not empty! choose another spot.")
spot = input("Where do you want to play? ")
row = int(spot[0])
col = int(spot[1])
if board.spots[row][col] == "-":
board.spots[row][col] = self.mark
|
15,281 | 2998008e227da544663c29ccc79fa995ba2b42b6 | a=float(input('enter number '))
'''создание первого числа'''
min = a
max = a
'''присвоение значения первого числа максимальному и минимальному значению'''
for i in range(5):
'''цикл для создания чисел и проверки их на максимальность или минимальность'''
a=float(input('enter number '))
'''создание нового числа'''
if a > max:
'''проверка на максимальность'''
max = a
if a < max and a < min:
'''проверка на минимальность'''
min = a
print('\n','min is: ',round(min,2),'\n','max is: ',round(max,2))
'''вывод миниального и максимального чисел''' |
15,282 | 7b44214b45b2579f62d1b3067c15f784b2d11bca | #!/usr/bin/env python
import argparse
import logging
import os
import sys
import time
import parsers
import settings
from src import utils
logger = logging.getLogger(__name__)
MAIN_URL = "http://www.aemet.es/es/eltiempo/observacion/ultimosdatos"
STATE_URL = MAIN_URL + "?k=%s&w=0&datos=det&x=h24&f=temperatura"
STATION_URL = MAIN_URL + "_%s_datos-horarios.csv?k=%s&l=%s&datos=det"
def download_data(output, c_time, format):
main_content = utils.download_content(MAIN_URL)
state_parser = parsers.MainParser(main_content)
for state in state_parser.get_match():
logger.debug("Processing state %s", state)
download_state_data(state, output, c_time, format)
def download_state_data(state, output, c_time, format):
state_content = utils.download_content(STATE_URL % state)
state_parser = parsers.StateParser(state_content)
for station in state_parser.get_match():
logger.debug("Processing station %s", station)
download_station_data(state, station, output, c_time)
def download_station_data(state, station, output, c_time):
def _get_station_filename():
""" Returns the full path where to download the file creating the
necessary directories. """
output_dir = os.path.join(output, state, station)
if not os.path.isdir(output_dir):
logger.debug("Creating directory %s", output_dir)
os.makedirs(output_dir)
return os.path.join(output_dir, "%s.%s" % (c_time, format))
url = STATION_URL % (station, state, station)
filename = _get_station_filename()
utils.download_content(url, filename)
def parse_options():
parser = argparse.ArgumentParser(
description=("Download the hourly weather data for all the"
" stations available in aemet.es.")
)
parser.add_argument('-d', '--debug', action='store_true',
help="Enable debug mode.")
parser.add_argument('-v', '--verbose', default="2",
help="Verbosity level. Options: 0=ERROR,"
" 1=WARNING, 2=INFO or 3=DEBUG. Default: 2.")
parser.add_argument('-o', '--output',
default=settings.DEFAULT_OUTPUT_DIR,
help="Output directory path where files will be"
" downloaded. Default: aemet_data.")
parser.add_argument('-f', '--format', default='txt',
help="Store file in the specified format."
"Options: csv or txt. Default: csv.")
return parser.parse_args()
def main(options):
logger.debug("Storing files into %s", options.output)
if not os.path.isdir(options.output):
os.mkdir(options.output)
download_data(
output=options.output,
c_time=time.strftime("%Y%m%d%H00"),
format=options.format
)
def get_logger_config(options):
logging_options = dict(level=logging.DEBUG, format=settings.LOG_FORMAT)
if options.debug:
return logging_options
if options.verbose == "0":
logging_options['level'] = logging.ERROR
elif options.verbose == "1":
logging_options['level'] = logging.WARNING
elif options.verbose == "2":
logging_options['level'] = logging.INFO
log_basedir = os.path.dirname(settings.LOG_FILE)
if not os.path.exists(log_basedir):
os.makedirs(log_basedir)
log_file = settings.LOG_FILE % time.strftime('%Y%m%d')
logging_options['filename'] = log_file
return logging_options
if __name__ == '__main__':
opts = parse_options()
logging.basicConfig(**get_logger_config(opts))
logger.info("Start")
try:
main(opts)
except Exception:
logger.exception("Unknown error when getting AEMET data.")
sys.exit(1)
logger.info("Done")
sys.exit(0)
|
15,283 | 80441a828b469490297f8a5a4f7970dd62d51b07 | #!/usr/bin/python
import marathon
import requests
import json
from jsonschema import validate
from marathon import MarathonClient
from marathon.models import MarathonApp, MarathonDeployment, MarathonGroup, MarathonInfo, MarathonTask, MarathonEndpoint, MarathonQueueItem
from marathon.exceptions import InternalServerError, NotFoundError, MarathonHttpError, MarathonError
from marathon.models.events import EventFactory
class DcosMarathon(MarathonClient):
def _is_reachable(self):
try:
response = requests.get(self.marathon_url)
response.raise_for_status()
return True
except requests.exceptions.HTTPError as err:
return True
except:
pass
return False
def _token(self, force_new=False):
if self.dcos == False:
return None
if self.auth_token != None and not force_new:
return self.auth_token
data = '{"uid": "%s", "password": "%s"}'%(self.user, self.password)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
response = requests.post(self.url.rstrip("/")+"/acs/api/v1/auth/login", data=data, headers=headers)
response.raise_for_status()
return response.json()['token']
def __init__(self, url, username=None, password=None, timeout=10, dcos=True):
self.marathon_url = url
if dcos == True:
self.marathon_url = "%s/marathon"%(url)
super(DcosMarathon, self).__init__(self.marathon_url, username=username, password=password, timeout=timeout)
self.url, self.user, self.password, self.dcos = url, username, password, dcos
self.auth_token = None
self.can_connect, self.auth_token = self._is_reachable(), self._token(force_new=True)
def __str__(self):
mode, status = "dcos", "unknown"
if self.dcos == False:
mode = "standalone"
if self.auth_token != None:
status = "authenticated"
if self.can_connect == True:
status = "reachable"
return "url: %s, mode: %s, status: %s" % (self.marathon_url, mode, status)
# Reusing code from thefactory/marathon-python and setting DCOS authorization token
def _do_request(self, method, path, params=None, data=None):
"""Query Marathon server."""
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
if self.dcos == True:
headers['Authorization'] = "token=%s"%(self._token())
self.auth = None
response = None
servers = list(self.servers)
while servers and response is None:
server = servers.pop(0)
url = ''.join([server.rstrip('/'), path])
try:
response = self.session.request(
method, url, params=params, data=data, headers=headers,
auth=self.auth, timeout=self.timeout)
marathon.log.info('Got response from %s', server)
except requests.exceptions.RequestException as e:
marathon.log.error(
'Error while calling %s: %s', url, str(e))
if response is None:
raise MarathonError('No remaining Marathon servers to try')
if response.status_code >= 500:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text))
raise InternalServerError(response)
elif response.status_code >= 400:
print response.status_code
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text))
if response.status_code == 404:
raise NotFoundError(response)
else:
raise MarathonHttpError(response)
elif response.status_code >= 300:
marathon.log.warn('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text))
else:
marathon.log.debug('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text))
return response
# Reusing code from thefactory/marathon-python and setting DCOS authorization token
def _do_sse_request(self, path, params=None, data=None):
from sseclient import SSEClient
headers = {'Accept': 'text/event-stream'}
if self.dcos == True:
headers['Authorization'] = "token=%s"%(self._token())
self.auth = None
messages = None
servers = list(self.servers)
while servers and messages is None:
server = servers.pop(0)
url = ''.join([server.rstrip('/'), path])
try:
messages = SSEClient(url, params=params, data=data, headers=headers,
auth=self.auth)
except Exception as e:
marathon.log.error('Error while calling %s: %s', url, e.message)
if messages is None:
raise MarathonError('No remaining Marathon servers to try')
return messages
def is_reachable(self, force=False):
if self.can_connect == None or force == True:
self.can_connect = self._is_reachable()
return self.can_connect
def _validate_schema(self, config_json, schema_name):
if schema_name == None:
return True
if config_json == None:
return False
try:
schema_file = 'resources/schema/%s.json'%schema_name
with open(schema_file, "r") as schema_text:
schema = json.load(schema_text)
except IOError as err:
raise MarathonError("%s: schema not found"%err.filename)
validate(config_json, schema)
def validate_app_schema(self, config_json):
self._validate_schema(config_json, 'AppDefinition')
def validate_group_schema(self, config_json):
self._validate_schema(config_json, 'Group')
|
15,284 | 6437da8de33706f49143bb1ac396be3d45e4d200 | from django.db import models
class Bookinfo(models.Model):
bookname = models.CharField(max_length=30, verbose_name='书名')
autor = models.CharField(max_length=30, verbose_name='作者')
postedtime = models.DateField(auto_now_add=True, verbose_name='发表时间')
altertime = models.DateField(auto_now=True, verbose_name='修改时间')
def __str__(self):
return self.bookname
class Heroinfo(models.Model):
name = models.CharField(max_length=30, verbose_name='名字')
gender = models.CharField(max_length=10, choices=(("男", "男"), ("女", "女")), verbose_name='性别')
book = models.ForeignKey(Bookinfo, on_delete=models.CASCADE, verbose_name='书名')
def __str__(self):
return self.name
class Goods(models.Model):
name = models.CharField(max_length=30)
# manager = models.Manager()
@classmethod
def create(cls,name):
goods = cls(name=name)
return goods
|
15,285 | b2d3cace82dd1e0b8b6842bac81b6edbb7846adb | ##LESSSE
##10 November 2018
##gmidi
##____________
##Python Organologic Midi Dictionary
##____________
organology_dict={}
organology_dict['flutes'] = [72,73,74,75,76,77,78,79]
organology_dict['oboes'] = [68,69]
organology_dict['clarinets'] = [71]
organology_dict['saxofones'] = [64,65,66,67]
organology_dict['bassoon'] = [70]
organology_dict['reed'] = list(range(64,72))
organology_dict['pipe'] = list(range(72,80))
organology_dict['woods'] = list(range(64,80))
organology_dict['piano'] = list(range(0,8))
organology_dict['organ'] = list(range(16,24))
organology_dict['keyboards'] = list(organology_dict['piano'])+list(organology_dict['organ'])
organology_dict['guitars'] = list(range(24,32))
organology_dict['basses'] = list(range(32,40))
organology_dict['harp'] = [46]
organology_dict['strings'] = list(range(40,47)) + list(range(48,52)) + [55]
organology_dict['voices'] = list(range(52,55))
organology_dict['tuba'] = [58]
organology_dict['trombone'] = [57]
organology_dict['trumpets'] = [56,59]
organology_dict['horns'] = list(range(60,64))
organology_dict['brass'] = list(range(56,64))
organology_dict['tubular_bells'] = [14]
organology_dict['chromatic_percussion'] = list(range(8,16)) + list(range(112,120))
organology_dict['percussion'] = list(range(34,81))
organology_dict['timpani'] = [47]
organology_dict['all'] = list(range(0,128))
organology_dict['default'] = [128]
organology_dict
def translate(dic):
d = {}
for i in dic:
for j in organology_dict[i[0]]:
d[(j,i[1])] = dic[i]
return d
|
15,286 | 260916194d37710564b85914f46887f07406c4bd | import pyramid.testing
import pym.testing
import pym.models
def before_all(context):
args = pym.testing.TestingArgs
app = pym.testing.init_app(args, setup_logging=True)
# This essentially sets properties:
# settings, DbEngine, DbSession, DbBase
for k, v in app.items():
setattr(context, k, v)
# noinspection PyUnusedLocal
def before_scenario(context, scenario):
context.configurator = pyramid.testing.setUp(
request=pyramid.testing.DummyRequest(),
settings=context.settings
)
context.sess = pym.models.DbSession()
# noinspection PyUnusedLocal
def after_scenario(context, scenario):
pyramid.testing.tearDown()
#context.sess.remove() |
15,287 | a4d4ec893c77f94f5666bff0352606e1e16a23bf | import torch.nn as nn
class SentimentLSTM(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.7):
"""
Initialize the model by setting up the layers.
"""
super(SentimentLSTM, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# define all layers
self.embed = nn.Embedding(vocab_size,embedding_dim)
self.lstm = nn.LSTM(embedding_dim,hidden_dim,n_layers,dropout=drop_prob,batch_first=True)
self.fc = nn.Linear(hidden_dim,output_size)
self.sigmoid = nn.Sigmoid()
self.drp = nn.Dropout(p=0.7)
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size=x.shape[0]
x = self.embed(x)
x,hidden = self.lstm(x,hidden)
x = x.reshape(-1,self.hidden_dim)
x = self.drp(x)
x = self.fc(x)
sig_out = self.sigmoid(x)
# return last sigmoid output and hidden state
sig_out = sig_out.reshape(batch_size,-1)
sig_out = sig_out[:,-1]
return sig_out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
train_on_gpu = torch.cuda.is_available()
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
|
15,288 | 9606b5349d79a3e0733f0009ec042b29614dabae | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import sys
import time
offset = int(sys.argv[2])
random = random.Random()
random.seed(time.time() + offset)
data = ""
if sys.argv[1] == "right":
data += random.choice(["▙", "▛", "█"])
data += random.choice(["▚", "▞"])
data += random.choice(["▗", "▝"])
else:
data += random.choice(["▖", "▘"])
data += random.choice(["▚", "▞"])
data += random.choice(["▜", "▟", "█"])
print(data)
|
15,289 | 16578416e62f731594c898e94affbb71d98de8f2 | # coding=utf-8
# Input:_2015_6_2015_6_7_in.csv,_2015_6_2015_6_7_out.csv
# Output:match
# Author:ZJW
import csvdata
import diffdata
import evlfunc
import connectingcar
import storage
import time
import codecs
'''
2轴车:1min26s 200duo
3轴车: 23s 100duo
4轴车:22s
5轴车: 2s
6轴车: 6min57 500duo
设置定时器进行验证
'''
def run(zss,k1,k2):
time1 = csvdata.opencsv_in(zss)[0]
time2 = csvdata.opencsv_out(zss)[0]
height1 = csvdata.opencsv_in(zss)[1]
height2 = csvdata.opencsv_out(zss)[1]
# print time1 # 入站时间数组,string类型
# print time2 # 出战时间数组,string类型
# print height1 # 入站车重数组
# print height2 # 出站车重数组
time_in = diffdata.changetime(time1, time2)[0]
time_out = diffdata.changetime(time1, time2)[1]
# print time_in # 入站时间数组,datetime类型
# print time_out # 出站时间数组,datetime类型
difftime1 = diffdata.caltime(time_in, time_out)[0]
difftime2 = diffdata.caltime(time_in, time_out)[1]
index1 = diffdata.caltime(time_in, time_out)[2]
index2 = diffdata.caltime(time_in, time_out)[3]
# print difftime1 # 符合时间差条件下的入站时间差数组
# print difftime2 # 符合时间差条件下的出站时间差数组
# print index1 # 符合时间差条件下的入站序列数组
# print index2 # 符合时间差条件下的出站序列数组
diffheight_in = diffdata.calheight_in(height1,height2,difftime1,index1)[0]
diffheight_out = diffdata.calheight_out(height1,height2,difftime2,index2)[0]
difftime_in = diffdata.calheight_in(height1,height2,difftime1,index1)[1]
difftime_out = diffdata.calheight_out(height1,height2,difftime2,index2)[1]
index_in = diffdata.calheight_in(height1,height2,difftime1,index1)[2]
index_out = diffdata.calheight_out(height1,height2,difftime2,index2)[2]
# print diffheight_in # 符合重量差和时间差条件的入站重量差数组
# print diffheight_out # 符合重量差和时间差条件的出站重量差数组
# print difftime_in # 符合重量差和时间差条件的入站时间差数组
# print difftime_out # 符合重量差和时间差条件的出站时间差数组
# print index_in # 符合重量差和时间差条件的入站序列数组
# print index_out # 符合重量差和时间差条件的出站序列数组
rkdata = evlfunc.getdata(difftime_in,diffheight_in,k1,k2)
ckdata = evlfunc.getdata(difftime_out,diffheight_out,k1,k2)
# print rkdata # 入口车辆数据的评价函数
# print ckdata # 出口车辆数据的评价函数
rkpri = evlfunc.getpri(rkdata,index_in)
ckpri = evlfunc.getpri(ckdata,index_out)
# print rkpri # 入口车辆数据优先级顺序数组
# print ckpri # 出口车辆数据优先级顺序数组
match = connectingcar.find_stable_matching(rkpri,ckpri)
# print match # 匹配结对的数据
cph_in = storage.opencph_in(zss)
cph_out = storage.opencph_out(zss)
t = 0
f = 0
for i in match:
#print cph_in[i[0]].decode('utf-8'),'---',cph_out[i[1]].decode('utf-8')
if cph_in[i[0]] == cph_out[i[1]]:
t += 1
else:
f += 1
print t / float(t + f)
storage.savedata(match,cph_in,cph_out) # 将match存储进.csv中
if __name__ == "__main__":
time_start=time.time();#time.time()为1970.1.1到当前时间的毫秒数
zss = '2'
k1 = 0.5
k2 = 0.5
'''
for i in range(100):
for j in range(100 - i):
run(zss,float(i)/100,float(j)/100)
'''
run(zss,k1,k2)
time_end=time.time();#time.time()为1970.1.1到当前时间的毫秒数
t = time_end - time_start
wr = zss + u"轴车的运行时间为:" + str(t) + 's'
#print wr
# print type(wr)
'''
with codecs.open(u'F:\\研究生之\\20171127\\time.txt','a','utf-8') as f:
f.write('\n'+wr)
'''
|
15,290 | 21e2483fd03adfbfda85f4bbb453c22f326d3966 | fullTextPath = '/mnt/datasets/erudit/'
extractedTextPath = 'fullText/'
rankingsPath = 'rankings'
termsPath = 'terms/'
picklePath = 'pickles/'
SolrFilesPath = 'solr.files/'
|
15,291 | 1158351111741031adcd544e86e91a6f7548bec0 | from joueur.base_ai import BaseAI
from math import inf
from timeit import default_timer as timer
from collections import namedtuple, defaultdict
from itertools import count
from random import getrandbits
from operator import xor
import re
'''
This board representation is based off the Sunfish Python Chess Engine
Several changes have been made (most notably to value())
This method of board representation is more compact, and significantly faster than the old method
Most notably, it does not use any form of copying or deep-copying
'''
A1, H1, A8, H8 = 91, 98, 21, 28
initial = (
' \n' # 0 - 9
' \n' # 10 - 19
' rnbqkbnr\n' # 20 - 29
' pppppppp\n' # 30 - 39
' ........\n' # 40 - 49
' ........\n' # 50 - 59
' ........\n' # 60 - 69
' ........\n' # 70 - 79
' PPPPPPPP\n' # 80 - 89
' RNBQKBNR\n' # 90 - 99
' \n' # 100 -109
' \n' # 110 -119
)
N, E, S, W = -10, 1, 10, -1
# valid moves for each piece
directions = {
'P': (N, N + N, N + W, N + E),
'N': (N + N + E, E + N + E, E + S + E, S + S + E, S + S + W, W + S + W, W + N + W, N + N + W),
'B': (N + E, S + E, S + W, N + W),
'R': (N, E, S, W),
'Q': (N, E, S, W, N + E, S + E, S + W, N + W),
'K': (N, E, S, W, N + E, S + E, S + W, N + W)
}
piece_values = {
'P': 1,
'N': 3,
'B': 3,
'R': 5,
'Q': 9,
'K': 200
}
z_indicies = {
'P': 1,
'N': 2,
'B': 3,
'R': 4,
'Q': 5,
'K': 6,
'p': 7,
'n': 8,
'b': 9,
'r': 10,
'q': 11,
'k': 12
}
# initialize Zobrist hash table
z_table = [[None] * 12] * 64
for i in range(0, 64):
for j in range(0, 12):
z_table[i][j] = getrandbits(16)
class Position(namedtuple('Position', 'board score wc bc ep kp depth captured')):
""" A state of a chess game
board -- a 120 char representation of the board
score -- the board evaluation
wc -- the castling rights, [west/queen side, east/king side]
bc -- the opponent castling rights, [west/king side, east/queen side]
ep - the en passant square
kp - the king passant square
depth - the node depth of the position
captured - the piece that was captured as the result of the last move
"""
def gen_moves(self):
for i, p in enumerate(self.board):
# i - initial position index
# p - piece code
# if the piece doesn't belong to us, skip it
if not p.isupper(): continue
for d in directions[p]:
# d - potential action for a given piece
for j in count(i + d, d):
# j - final position index
# q - occupying piece code
q = self.board[j]
# Stay inside the board, and off friendly pieces
if q.isspace() or q.isupper(): break
# Pawn move, double move and capture
if p == 'P' and d in (N, N + N) and q != '.': break
if p == 'P' and d == N + N and (i < A1 + N or self.board[i + N] != '.'): break
if p == 'P' and d in (N + W, N + E) and q == '.' and j not in (self.ep, self.kp): break
# Move it
yield (i, j)
# Stop non-sliders from sliding and sliding after captures
if p in 'PNK' or q.islower(): break
# Castling by sliding rook next to king
if i == A1 and self.board[j + E] == 'K' and self.wc[0]: yield (j + E, j + W)
if i == H1 and self.board[j + W] == 'K' and self.wc[1]: yield (j + W, j + E)
def rotate(self):
# Rotates the board, preserving enpassant
# Allows logic to be reused, as only one board configuration must be considered
return Position(
self.board[::-1].swapcase(), -self.score, self.bc, self.wc,
119 - self.ep if self.ep else 0,
119 - self.kp if self.kp else 0, self.depth, None)
def nullmove(self):
# Like rotate, but clears ep and kp
return Position(
self.board[::-1].swapcase(), -self.score,
self.bc, self.wc, 0, 0, self.depth + 1, None)
def move(self, move):
# i - original position index
# j - final position index
i, j = move
# p - piece code of moving piece
# q - piece code at final square
p, q = self.board[i], self.board[j]
# put replaces string character at i with character p
put = lambda board, i, p: board[:i] + p + board[i + 1:]
# copy variables and reset eq and kp and increment depth
board = self.board
wc, bc, ep, kp, depth = self.wc, self.bc, 0, 0, self.depth + 1
# score = self.score + self.value(move)
# perform the move
board = put(board, j, board[i])
board = put(board, i, '.')
# update castling rights, if we move our rook or capture the opponent's rook
if i == A1: wc = (False, wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling Logic
if p == 'K':
wc = (False, False)
if abs(j - i) == 2:
kp = (i + j) // 2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Pawn promotion, double move, and en passant capture
if p == 'P':
if A8 <= j <= H8:
# Promote the pawn to Queen
board = put(board, j, 'Q')
if j - i == 2 * N:
ep = i + N
if j - i in (N + W, N + E) and q == '.':
board = put(board, j + S, '.')
# Rotate the returned position so it's ready for the next player
return Position(board, 0, wc, bc, ep, kp, depth, q.upper()).rotate()
def value(self):
score = 0
# evaluate material advantage
for k, p in enumerate(self.board):
# k - position index
# p - piece code
if p.isupper(): score += piece_values[p]
if p.islower(): score -= piece_values[p.upper()]
return score
def is_check(self):
# returns if the state represented by the current position is check
op_board = self.nullmove()
for move in op_board.gen_moves():
i, j = move
p, q = op_board.board[i], op_board.board[j]
# opponent can take our king
if q == 'k':
return True
return False
def is_quiescent(self):
return self.is_check() or self.captured
def z_hash(self):
# Zobrist Hash of board position
# strip all whitespace from board
stripboard = re.sub(r'[\s+]', '', self.board)
h = 0
for i in range(0, 64):
j = z_indicies.get(stripboard[i], 0)
h = xor(h, z_table[i][j - 1])
return h
####################################
# square formatting helper functions
####################################
def square_index(file_index, rank_index):
# Gets a square index by file and rank index
file_index = ord(file_index.upper()) - 65
rank_index = int(rank_index) - 1
return A1 + file_index - (10 * rank_index)
def square_file(square_index):
file_names = ["a", "b", "c", "d", "e", "f", "g", "h"]
return file_names[(square_index % 10) - 1]
def square_rank(square_index):
return 10 - (square_index // 10)
def square_san(square_index):
# convert square index (21 - 98) to Standard Algebraic Notation
square = namedtuple('square', 'file rank')
return square(square_file(square_index), square_rank(square_index))
def fen_to_position(fen_string):
# generate a Position object from a FEN string
board, player, castling, enpassant, halfmove, move = fen_string.split()
board = board.split('/')
board_out = ' \n \n'
for row in board:
board_out += ' '
for piece in row:
if piece.isdigit():
for _ in range(int(piece)):
board_out += '.'
else:
board_out += piece
board_out += '\n'
board_out += ' \n \n'
wc = (False, False)
bc = (False, False)
if 'K' in castling: wc = (True, wc[1])
if 'Q' in castling: wc = (wc[0], True)
if 'k' in castling: bc = (True, bc[1])
if 'q' in castling: bc = (bc[0], True)
if enpassant != '-':
enpassant = square_index(enpassant[0], enpassant[1])
else:
enpassant = 0
# Position(board score wc bc ep kp depth)
if player == 'w':
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None)
else:
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None).rotate()
class AI(BaseAI):
""" The basic AI functions that are the same between games. """
def get_name(self):
""" This is the name you send to the server so your AI will control the
player named this string.
Returns
str: The name of your Player.
"""
return "Sawyer McLane"
def start(self):
""" This is called once the game starts and your AI knows its playerID
and game. You can initialize your AI here.
"""
# store a sign controlling addition or subtraction so pieces move in the right direction
self.board = fen_to_position(self.game.fen)
self.transposition_table = dict()
def game_updated(self):
""" This is called every time the game's state updates, so if you are
tracking anything you can update it here.
"""
# replace with your game updated logic
self.update_board()
def end(self, won, reason):
""" This is called when the game ends, you can clean up your data and
dump files here if need be.
Args:
won (bool): True means you won, False means you lost.
reason (str): The human readable string explaining why you won or
lost.
"""
pass
# replace with your end logic
def run_turn(self):
""" This is called every time it is this AI.player's turn.
Returns:
bool: Represents if you want to end your turn. True means end your
turn, False means to keep your turn going and re-call this
function.
"""
# Here is where you'll want to code your AI.
# We've provided sample code that:
# 1) prints the board to the console
# 2) prints the opponent's last move to the console
# 3) prints how much time remaining this AI has to calculate moves
# 4) makes a random (and probably invalid) move.
# 1) print the board to the console
self.print_current_board()
# 2) print the opponent's last move to the console
if len(self.game.moves) > 0:
print("Opponent's Last Move: '" + self.game.moves[-1].san + "'")
# 3) print how much time remaining this AI has to calculate moves
print("Time Remaining: " + str(self.player.time_remaining) + " ns")
# 4) make a move
(piece_index, move_index) = self.tlabiddl_minimax()
# flip board indicies if playing from other side
if self.player.color == "Black":
piece_index = 119 - piece_index
move_index = 119 - move_index
# convert indices to SAN
piece_pos = square_san(piece_index)
move_pos = square_san(move_index)
piece = self.get_game_piece(piece_pos.rank, piece_pos.file)
piece.move(move_pos.file, move_pos.rank, promotionType="Queen")
return True # to signify we are done with our turn.
def get_game_piece(self, rank, file):
# used to go between rank and file notation and actual game object
return next((piece for piece in self.game.pieces if piece.rank == rank and piece.file == file), None)
def update_board(self):
# update current board state by converting current FEN to Position object
self.board = fen_to_position(self.game.fen)
def tlabiddl_minimax(self):
# Time Limited Alpha Beta Iterative-Deepening Depth-Limited MiniMax
initial_board = self.board
l_depth = 0
depth_limit = 4
# time limiting stuff
time_limit = 10 # 10 seconds to find the best move
start_time = timer()
# history stuff
history = defaultdict(dict)
if initial_board.z_hash() in self.transposition_table.keys():
return self.transposition_table[initial_board.z_hash()]
def min_play(board, alpha=(-inf), beta=(inf)):
if board.depth >= l_depth:
return board.value()
best_score = inf
for move in board.gen_moves():
next_board = board.move(move)
if next_board.is_check(): continue
if next_board.is_quiescent():
score = quiescence(next_board, alpha, beta)
else:
score = max_play(next_board, alpha, beta)
if score < best_score:
best_move = move
best_score = score
if score <= alpha:
return score
beta = min(beta, score)
return best_score
def max_play(board, alpha=(-inf), beta=(inf)):
if board.depth >= l_depth:
return board.value()
best_score = -inf
for move in board.gen_moves():
next_board = board.move(move)
if next_board.is_check(): continue
if next_board.is_quiescent():
score = quiescence(next_board, alpha, beta)
score = min_play(next_board, alpha, beta)
if score > best_score:
best_move = move
best_score = score
if score >= beta:
return score
alpha = max(alpha, score)
return best_score
def quiescence(board, alpha=(-inf), beta=(inf)):
stand_pat = board.value()
if stand_pat >= beta:
return beta
if alpha < stand_pat:
alpha = stand_pat
for move in board.gen_moves():
if (timer() - start_time) >= time_limit:
# if time limit has been reached, give us the best move
return alpha
next_board = board.move(move)
score = -quiescence(next_board, -beta, -alpha)
if score >= beta:
history[initial_board.z_hash()][board.z_hash()] = board.depth * board.depth
return beta
if score > alpha:
alpha = score
return alpha
while l_depth <= depth_limit:
frontier = [initial_board]
visited = [initial_board]
while len(frontier) != 0:
# sort frontier by prune history
frontier = sorted(frontier, key=lambda x: history[initial_board.z_hash()].get(x.z_hash(), 0))
board = frontier.pop(0)
best_score = -inf
for move in board.gen_moves():
next_board = board.move(move)
if next_board.is_check(): continue
score = max_play(next_board)
if score > best_score:
best_move = move
best_score = score
if not (next_board in visited) and not (next_board in frontier):
visited.append(next_board)
if (timer() - start_time) >= time_limit:
# if time limit has been reached, give us the best move
self.transposition_table[self.board.board] = best_move
return best_move
if len(frontier) == 0:
l_depth += 1
self.transposition_table[self.board.board] = best_move
return best_move
def print_current_board(self):
"""Prints the current board using pretty ASCII art
Note: you can delete this function if you wish
"""
# iterate through the range in reverse order
for r in range(9, -2, -1):
output = ""
if r == 9 or r == 0:
# then the top or bottom of the board
output = " +------------------------+"
elif r == -1:
# then show the ranks
output = " a b c d e f g h"
else: # board
output = " " + str(r) + " |"
# fill in all the files with pieces at the current rank
for file_offset in range(0, 8):
# start at a, with with file offset increasing the char
f = chr(ord("a") + file_offset)
current_piece = None
for piece in self.game.pieces:
if piece.file == f and piece.rank == r:
# then we found the piece at (file, rank)
current_piece = piece
break
code = "." # default "no piece"
if current_piece:
# the code will be the first character of their type
# e.g. 'Q' for "Queen"
code = current_piece.type[0]
if current_piece.type == "Knight":
# 'K' is for "King", we use 'N' for "Knights"
code = "N"
if current_piece.owner.id == "1":
# the second player (black) is lower case.
# Otherwise it's uppercase already
code = code.lower()
output += " " + code + " "
output += "|"
print(output)
|
15,292 | b2196821507fa49a1993c00ec0644c420eae8ebb | import math
import numpy
import os
import xboa.common
import ROOT
import utilities.root_style
class PlotAmplitudeData(object):
def __init__(self, amplitude_data, plot_dir, key):
self.data = amplitude_data
self.plot_dir = plot_dir
self.key = key
def plot(self):
self.plot_data_1d("emittance_vs_n_events_"+self.key, self.emittance_4d_lambda, "#varepsilon_{4D} [mm]", self.n_events_lambda, "Number of Events")
self.plot_data_1d("emittance_vs_beta_x_"+self.key, self.emittance_4d_lambda, "#varepsilon_{4D} [mm]", self.beta_x_lambda, "#beta_{x} [mm]")
self.plot_data_1d("emittance_vs_beta_y_"+self.key, self.emittance_4d_lambda, "#varepsilon_{4D} [mm]", self.beta_y_lambda, "#beta_{y} [mm]")
self.plot_data_1d("max_amplitude_vs_n_events_"+self.key, self.max_amp_lambda, "#A [mm]", self.n_events_lambda, "Number of Events")
self.plot_data_1d("max_amplitude_vs_beta_x_"+self.key, self.max_amp_lambda, "A [mm]", self.beta_x_lambda, "#beta_{x} [mm]")
self.plot_data_1d("max_amplitude_vs_beta_y_"+self.key, self.max_amp_lambda, "A [mm]", self.beta_y_lambda, "#beta_{y} [mm]")
for i in range(4):
for j in range(i+1, 4):
self.plot_phase_space(i, j)
def plot_data_1d(self, plot_name, plot_lambda_x, x_label, plot_lambda_y, y_label):
if len(self.data.state_list) == 0:
print "Warning - no data for emittance vs beta plots/etc"
return
marker = 24
x_axis = []
y_axis = []
for sample_states in self.data.state_list:
for state in sample_states:
x_axis.append(plot_lambda_x(state))
y_axis.append(plot_lambda_y(state))
hist, graph = xboa.common.make_root_graph(plot_name, x_axis, x_label, y_axis, y_label)
canvas = xboa.common.make_root_canvas("plot")
canvas.Draw()
hist.Draw()
for sample_states in self.data.state_list:
x_axis = []
y_axis = []
for state in sample_states:
x_axis.append(plot_lambda_x(state))
y_axis.append(plot_lambda_y(state))
hist, graph = xboa.common.make_root_graph(plot_name, x_axis, x_label, y_axis, y_label)
graph.SetMarkerStyle(marker)
marker += 1
graph.Draw("SAME P")
canvas.Update()
for fmt in ["png", "pdf", "root"]:
canvas.Print(self.plot_dir+"/phase_space/"+plot_name+"."+fmt)
def plot_phase_space(self, x_var, y_var):
x_label = self.psv_labels[x_var]
y_label = self.psv_labels[y_var]
title = "amplitude_phase_space_"+self.key+"_"+self.psv_names[x_var]+"_"+self.psv_names[y_var]
canvas = xboa.common.make_root_canvas(title)
x_data, y_data = [], []
for sample in range(2):
for a_bin in range(21):
for run, spill, evt, psv, amp in self.data.retrieve(a_bin, sample):
x_data.append(psv[x_var])
y_data.append(psv[y_var])
hist = xboa.common.make_root_histogram(title, x_data, x_label, 50, y_data, y_label, 50)
canvas.SetFrameFillColor(utilities.root_style.get_frame_fill())
hist.Draw("COLZ")
delta_list = [-10, -7, -4, 1, 3]
for color, sample in [(ROOT.kGreen, 0), (ROOT.kRed, 1)]:
step = len(self.data.state_list[sample])/len(delta_list)+1
for i, ellipse in enumerate(self.data.state_list[sample][::step]):
my_color = color+4
if i < len(delta_list):
my_color = color+delta_list[i]
graph = self.plot_ellipse(ellipse, x_var, y_var)
graph.SetLineColor(my_color)
canvas.Update()
for fmt in ["root", "png", "pdf"]:
canvas.Print(self.plot_dir+"/phase_space/"+title+"."+fmt)
@classmethod
def plot_ellipse(cls, ellipse, var_1, var_2):
mean = [ellipse["mean"][var_1], ellipse["mean"][var_2]]
cov = [[ellipse["cov"][i][j] for i in [var_1, var_2]] for j in [var_1, var_2]]
try:
points = xboa.common.make_shell(41, numpy.array(cov))
except Exception:
graph = ROOT.TGraph()
return graph
graph = ROOT.TGraph(len(points)+1)
points = [(a_point[0, 0], a_point[0, 1]) for a_point in points]
points = sorted(points, key = lambda points: math.atan2(points[1], points[0]))
points.append(points[0])
for i, a_point in enumerate(points):
graph.SetPoint(i, a_point[0]+mean[0], a_point[1]+mean[1])
graph.SetLineWidth(2)
graph.Draw("SAME L")
cls.root_objects.append(graph)
return graph
@classmethod
def emittance_4d_lambda(cls, state):
return state["emittance"]
@classmethod
def max_amp_lambda(cls, state):
return state["max_amplitude"]
@classmethod
def n_events_lambda(cls, state):
return state["n_events"]
@classmethod
def beta_x_lambda(cls, state):
return cls.beta_2d(state, 0)
@classmethod
def beta_y_lambda(cls, state):
return cls.beta_2d(state, 2)
@classmethod
def beta_2d(cls, state, axis):
twod_matrix = [item[axis:axis+2] for item in state["cov"][axis:axis+2]]
emit = numpy.linalg.det(twod_matrix)**0.5/cls.mu_mass
beta = twod_matrix[0][0]/emit
return beta
psv_names = ["x", "px", "y", "py"]
psv_labels = [
"x [mm]",
"p_{x} [MeV/c]",
"y [mm]",
"p_{y} [MeV/c]",
]
mu_mass = xboa.common.pdg_pid_to_mass[13]
root_objects = [] |
15,293 | 2675ec4578a42e4f692af58596fbc2b8314f3bf3 | # -*- coding: UTF-8 -*-
import sys
import csv
import json
import time
from elasticsearch import Elasticsearch
from filter_rules import filter_activity, filter_ip, filter_retweet_count, filter_mention
reload(sys)
sys.path.append('../../')
from global_utils import R_CLUSTER_FLOW2, R_DICT, ES_DAILY_RANK, es_user_portrait
from global_utils import R_RECOMMENTATION as r
from global_config import RECOMMENTATION_TOPK as k
from time_utils import datetime2ts, ts2datetime
def search_from_es(date):
# test
k = 10000
index_time = ''.join(date.split('-'))
print 'index_time:', index_time
index_type = 'bci'
query_body = {
'query':{
'match_all':{}
},
'size':k,
'sort':[{'user_index':{'order':'desc'}}]
}
try:
result = ES_DAILY_RANK.search(index=index_time, doc_type=index_type, body=query_body)['hits']['hits']
except:
print 'recommentation in: there is not %s es' % index_time
return None, None
user_set = []
user_set = [user_dict['_id'] for user_dict in result]
print 'len user_set:',len(user_set)
return set(user_set), result
def filter_in(top_user_set):
results = []
try:
in_results = es_user_portrait.mget(index='user_portrait', doc_type='user', body={'ids':list(top_user_set)})
except Exception as e:
raise e
filter_list = [item['_id'] for item in in_results['docs'] if item['found'] is True]
print 'before filter in:', len(top_user_set)
print 'filter_list:', len(filter_list)
results = set(top_user_set) - set(filter_list)
print 'after filter in:', len(results)
return results
def filter_rules(candidate_results):
results = []
#rule1: activity count
filter_result1 = filter_activity(candidate_results)
#rule2: ip count
filter_result2 = filter_ip(filter_result1)
#rule3: retweet count & beretweeted count
filter_result3 = filter_retweet_count(filter_result2)
#rule4: mention count
results = filter_mention(filter_result3)
return results
def write_recommentation(date, results, user_dict):
f = open('/home/ubuntu8/huxiaoqian/user_portrait/user_portrait/cron/recommentation_in/recommentation_list_'+date+'.csv', 'wb')
writer = csv.writer(f)
status = False
for item in results:
writer.writerow([item])
return True
def save_recommentation2redis(date, user_set):
hash_name = 'recomment_'+str(date)
status = 0
for uid in user_set:
r.hset(hash_name, uid, status)
return True
def read_black_user():
results = set()
f = open('/home/ubuntu8/huxiaoqian/user_portrait/user_portrait/cron/recommentation_in/blacklist_2.csv', 'rb')
reader = csv.reader(f)
for line in reader:
uid = line[0]
results.add(uid)
f.close()
return results
# get sensitive user and filt in
def get_sensitive_user(date):
results = set()
r_cluster = R_CLUSTER_FLOW2
ts = datetime2ts(date)
results = r_cluster.hgetall('sensitive_'+str(ts))
if results:
user_list = results.keys()
else:
user_list = []
results = filter_in(user_list)
return results
def main():
now_ts = time.time()
#test
now_ts = datetime2ts('2013-09-07')
date = ts2datetime(now_ts - 3600*24)
#step1: read from top es_daily_rank
top_user_set, user_dict = search_from_es(date)
#step2: filter black_uid
black_user_set = read_black_user()
print 'black_user_set:', len(black_user_set)
intersection = top_user_set & black_user_set
print 'intersection:', len(intersection)
subtract_user_set = top_user_set - black_user_set
print 'after filter blacklist:', len(subtract_user_set)
#step3: filter users have been in
candidate_results = filter_in(subtract_user_set)
#step4: filter rules about ip count& reposts/bereposts count&activity count
results = filter_rules(candidate_results)
print 'after filter:', len(results)
#step5: get sensitive user
sensitive_user = list(get_sensitive_user(date))
print 'sensitive_user:', len(sensitive_user)
print 'sensitive_user_2:', sensitive_user[2]
print 'before extend results:', len(results), type(results)
results.extend(sensitive_user)
print 'after list extend:', len(results), type(results)
results = set(results)
print 'end:', len(results)
#step6: write to recommentation csv/redis
'''
status = save_recommentation2redis(date, results)
status = True
write_recommentation(date, results, user_dict)
if status==True:
print 'date:%s recommentation done' % date
'''
def write_sensitive_user(results):
csvfile = open('/home/ubuntu8/huxiaoqian/user_portrait/user_portrait/cron/recommentation_in/sensitive_user.csv', 'wb')
writer = csv.writer(csvfile)
for user in results:
writer.writerow([user])
return True
if __name__=='__main__':
#main()
results = get_sensitive_user('2013-09-07')
print 'sensitive_user:', len(results)
write_sensitive_user(results)
|
15,294 | bb7eb2c6f5a21e575c049f0e2de5a8cf5a941d1d | #!/usr/bin/python3
def print_last_digit(number):
if (number < 0):
ch = ((number * -1) % 10)
else:
ch = (number % 10)
print("{:d}".format(ch), end="")
return(ch)
|
15,295 | 654ae0182c187b903f23d7dca7b9769c0ed1952a | # Generated by Django 2.1.7 on 2019-08-04 20:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('paytm', '0007_auto_20190804_1947'),
]
operations = [
migrations.RemoveField(
model_name='paytmhistory',
name='merchant',
),
migrations.AddField(
model_name='paytmhistory',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='paytm_payments', to=settings.AUTH_USER_MODEL),
),
]
|
15,296 | dee57002049b84494353582f35a73f95cd9e8797 | from django.urls import path
from . import views
urlpatterns = [
path('', views.generate, name='generate_barcodes'),
]
|
15,297 | 6611024e217dc540f6c8a05b47ccf337a4f2ce61 | price = [10, 20, 30, 40]
total = 0
for totall in price:
total += totall
print(f"Total: {total}") |
15,298 | 473697b305a3c345cec60836904b9f2c65d17852 | #! /usr/bin/python
import os
os.system ('clear')
# functions with specific args
def abc(x,y,z):
return sum([x,y,z])
r=abc(z=34,x=100,y=200)
print r
|
15,299 | 691dfd80930fcbc857bddd04504d78ed7fce3855 | """After the initial BioBank assessment, two later assessments were
performed. However, this data is not that helpful to us, as
the two later assessments were completed by a fraction of the original cohort.
This script takes in a BioBank file, and removes the extra instances.
"""
# BELOW: constants that often need to be altered
IN_FILE = "/home/users/benson97/CS221/data_biomarkers_double_plus"
OUT_FILE = "/home/users/benson97/CS221/data_biomarkers_double_plus_one"
def get_instance(string):
"""Finds the instance number in a column header.
Args:
string: Should be in the form f.#.#.#
"""
row = string.split(".")
# handles "f.eid" case
if len(row) < 4:
return "0"
# the number is somewhat arbitrary...
# it is determined by Joeri's UK Phenotypes script.
# (which is "get_UKphenotypes.r" --- thanks Joeri!)
return row[2]
infile = open(IN_FILE)
outfile = open(OUT_FILE, "w+")
header = infile.readline().split()
outfile_header = ""
to_add = set()
index = 0
for column in header:
if get_instance(column) == "0":
outfile_header = outfile_header + column + "\t"
to_add.add(index)
index += 1
# remove extra \t, add \n
outfile_header = outfile_header[:-1]
outfile_header = outfile_header + "\n"
outfile.write(outfile_header)
current = infile.readline().strip()
future = infile.readline().strip()
while True:
row = current.split()
outfile_row = []
index = 0
for column in row:
if index in to_add:
outfile_row.append(column)
index += 1
outfile_line = ""
for column in outfile_row:
outfile_line = outfile_line + column + "\t"
outfile_line = outfile_line[:-1]
if future != "":
outfile_line = outfile_line + "\n"
outfile.write(outfile_line)
current = future
if current == "":
break
future = infile.readline().strip()
infile.close()
outfile.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.