index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,700 | 3ca0780bb64d9e57d33f6aedbfcc182b3e7d6e99 | import sys
sys.setrecursionlimit(10 ** 7) # 再帰関数の呼び出し制限
class Solution:
def exist(self, board, word) -> bool:
if not board:
# Quick response for empty board
return False
# H行W列を考える
H,W = len(board), len(board[0])
def dfs_search(idx,h,w):
# 場外アウトしたり、移動先が壁の場合はスルー
if h < 0 or h >= H or w < 0 or w >= W or word[idx] != board[h][w]:
return False
# boardの中に一致した文字があった
if idx == len(word) - 1:
return True
cur = board[h][w]
board[h][w] = "#"
found = dfs_search(idx+1,h+1,w) or dfs_search(idx+1,h-1,w) or dfs_search(idx+1,h,w+1) or dfs_search(idx+1,h,w-1)
board[h][w] = cur
return found
return any(dfs_search(0,h,w) for w in range(W) for h in range(H))
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCCED"
Solve = Solution()
print(Solve.exist(board,word))
|
23,701 | 2c57a9147ca5ecbc92d2e5c7085796d110fb2558 | #For actual: echo "The mighty mountain lay to the west, beyond the sea" | python tlm.py | python imGen.py && python NVDIA.py
#For actual: echo "The two mighty mountains lay to opposite sides, past the endless sea" | python tlm.py | python imGen.py && python NVDIA.py
#For demo: echo "The mighty mountain lay to the west, beyond the sea" | python tlm.py | python imGen.py | python NVDIA.py
#For demo: echo "The two mighty mountains lay to opposite sides, past the endless sea" | python tlm.py | python imGen.py | python NVDIA.py
import json
import logging
import time
logging.basicConfig(level=logging.DEBUG)
base_strings = ["Object found in Cache", "Using shared memory type iterator",
"Requesting block allocation", "Importing from C:\\Users\Lenovo\AppData\Local\Programs\Python37\python",
"Performing fast read"]
strings = ["Performing fast memory iteration", "Lookup transform...",
"Get data from NLTK modules and wait for block read..."]
def shape(self):
return tuple(((stop-start-1)//step+1) for start, stop, step in
zip(self.start, self.stop, self.step))
def __iter__(self):
if [dim for dim in self.shape if dim <= 0]:
return
start = self.start[:]
stop = self.stop[:]
step = self.step[:]
ndims = self.var.ndim
while True:
count = self.buf_size or reduce(mul, self.shape)
rundim = 0
for i in range(ndims-1, -1, -1):
if count == 0:
stop[i] = start[i]+1
elif count <= self.shape[i]:
stop[i] = start[i] + count*step[i]
rundim = i
else:
stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
yield self.var[slice_]
start[rundim] = stop[rundim]
for i in range(ndims-1, 0, -1):
if start[i] >= self.stop[i]:
start[i] = self.start[i]
start[i-1] += self.step[i-1]
if start[0] >= self.stop[0]:
return
def random_screen_splash():
exit()
output_dict = {}
text = input()
text = text.replace("\"","").strip()
output_dict["sentence"] = text
if "two" in text:
logging.info("Accepted basic text")
time.sleep(0.01)
output_dict["features"] = ["mountain", "sea"]
output_dict["mountain"] = {"position": ["left", "right"], "size": "big"}
output_dict["sea"] = {"position": "natural", "size": "natural"}
logging.info(strings[0])
logging.info(strings[2])
time.sleep(1)
logging.info(base_strings[0])
logging.info(base_strings[0])
logging.debug(json.dumps(output_dict))
print(json.dumps(output_dict))
elif text == "The mighty mountain lay to the west, beyond the sea":
logging.info("Accepted basic text")
time.sleep(0.01)
output_dict["features"] = ["mountain", "sea"]
output_dict["mountain"] = {"position": ["left"], "size": "big"}
output_dict["sea"] = {"position": "natural", "size": "natural"}
logging.info(strings[0])
logging.info(strings[2])
time.sleep(1)
logging.info(base_strings[0])
logging.info(base_strings[0])
logging.debug(json.dumps(output_dict))
print(json.dumps(output_dict))
else:
random_screen_splash() |
23,702 | bbb058b7f135d6a10a1747e64403e2446ce60e37 | import psutil, time
import httplib,json,argparse
TOKEN='token string'
SITE='url'
HEADERS={"Authorization": "Token {0}".format(TOKEN), "Content-Type": "application/json"}
import requests,json
### sigle rest request
class Django_rest_api:
def __init__(self):
self.header={"Authorization": "Token {0}".format(TOKEN), "Content-Type": "application/json"}
def get(self, path):
result=requests.get(path,headers=self.header)
return result.status_code,result.json()
def post(self,path,data):
result=requests.post(path,data=json.dumps(data),headers=self.header)
return result.status_code,result.json()
def put(self,path,data):
result=requests.put(path,data=json.dumps(data),headers=self.header)
return result.status_code,result.json()
def patch(self,path,data):
result=requests.patch(path,data=json.dumps(data),headers=self.header)
return result.status_code,result.json()
def delete(self,path,data):
result=requests.delete(path,headers=self.header)
return result.status_code,result.json()
def getAllProcessInfo(host):
instances = []
for proc in psutil.process_iter(attrs=['pid', 'name', 'username']):
instances.append(proc.info)
send_data={
"host":host,
"procs":instances
}
ret_status,ret_result=Django_rest_api().post(SITE,send_data)
print ret_status
print ret_result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="your script description")
parser.add_argument('--host', required=True, type=str)
args = parser.parse_args()
host=args.host
getAllProcessInfo(host=host)
|
23,703 | 388b1f156a8919f727142f2ae7bcea9b2cc4a4f4 |
from skimage.segmentation import clear_border
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import os
import shutil
class BankChequeMICR:
def __init__(self):
self.result_dir = 'result/'
self.reference_micr = 'reference/micr_e13b_reference.png'
self.keep_bottom_part = 0.2
self.rsz_h, self.rsz_w = 36, 36
self.crop_offset = 5
self.draw_offset = 10
self.min_pass_w, self.min_pass_h = 0.02, 0.15
# initialize the list of reference character names, in the same
# order as they appear in the reference image where the digits
# their names and:
# T = Transit (delimit bank branch routing transit)
# U = On-us (delimit customer account number)
# A = Amount (delimit transaction amount)
# D = Dash (delimit parts of numbers, such as routing or account)
self.charNames = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "T", "U", "A", "D"]
self.display = True
self.draw_predicted = True
self.put_predicted_text = True
def remove_directory_contents(self, path):
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
# To show the image.
def show_image(self, in_image):
cv2.imshow('image', in_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def extract_digits_and_symbols(self, image, charCnts, minW=5, minH=15): # was 5 and 15
char_iter = charCnts.__iter__()
rois = []
locs = []
while True:
try:
c = next(char_iter)
(cX, cY, cW, cH) = cv2.boundingRect(c)
if cW >= minW and cH >= minH:
# extract the ROI
roi = image[cY:cY + cH, cX:cX + cW]
rois.append(roi)
locs.append((cX, cY, cX + cW, cY + cH))
else:
parts = [c, next(char_iter), next(char_iter)]
(sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf,
-np.inf)
# loop over the parts
for p in parts:
(pX, pY, pW, pH) = cv2.boundingRect(p)
sXA = min(sXA, pX)
sYA = min(sYA, pY)
sXB = max(sXB, pX + pW)
sYB = max(sYB, pY + pH)
# extract the ROI
roi = image[sYA:sYB, sXA:sXB]
rois.append(roi)
locs.append((sXA, sYA, sXB, sYB))
except StopIteration:
break
return rois, locs
def process_reference(self):
ref = cv2.imread(self.reference_micr)
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
ref = imutils.resize(ref, width=400)
ref = cv2.threshold(ref, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
ref_cnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
ref_cnts = ref_cnts[0] if imutils.is_cv2() else ref_cnts[1]
ref_cnts = contours.sort_contours(ref_cnts, method="left-to-right")[0]
ref_roi = self.extract_digits_and_symbols(ref, ref_cnts, minW=10, minH=20)[0]
chars = {}
for (name, roi) in zip(self.charNames, ref_roi):
roi = cv2.resize(roi, (self.rsz_h, self.rsz_w))
chars[name] = roi
return chars
def get_bottom_cropped_image(self, in_image):
(h, w,) = in_image.shape[:2]
delta = int(h - (h * self.keep_bottom_part))
return in_image[delta:h, 0:w], delta
def get_processed_bmp(self, in_image):
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7))
bottom_cropped, delta = self.get_bottom_cropped_image(in_image)
gray = cv2.cvtColor(bottom_cropped, cv2.COLOR_BGR2GRAY)
blackhat_image = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rect_kernel)
scharr_grad_x = cv2.Sobel(blackhat_image, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
scharr_grad_x = np.absolute(scharr_grad_x)
(minVal, maxVal) = (np.min(scharr_grad_x), np.max(scharr_grad_x))
scharr_grad_x = (255 * ((scharr_grad_x - minVal) / (maxVal - minVal)))
scharr_grad_x = scharr_grad_x.astype("uint8")
scharr_grad_x_closed = cv2.morphologyEx(scharr_grad_x, cv2.MORPH_CLOSE, rect_kernel)
thresh = cv2.threshold(scharr_grad_x_closed, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
thresh = clear_border(thresh)
return gray, thresh, delta
def get_grouped_contours(self, in_bmp):
(oh, ow,) = in_bmp.shape[:2]
w_th = (int)(ow*self.min_pass_w)
h_th = (int)(oh*self.min_pass_h)
group_cnts = cv2.findContours(in_bmp, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
group_cnts = group_cnts[0] if imutils.is_cv2() else group_cnts[1]
group_locs = []
for (i, c) in enumerate(group_cnts):
(x, y, w, h) = cv2.boundingRect(c)
if w >= w_th and h >= h_th: # was 50 and 15
group_locs.append((x, y, w, h))
# sort the digit locations from left-to-right
group_locs = sorted(group_locs, key=lambda x: x[0])
return group_locs
def get_group_roi_from_gray_scale(self, gray, gx, gy, gw, gh):
group = gray[gy - self.crop_offset:gy + gh + self.crop_offset, gx - self.crop_offset:gx + gw + self.crop_offset]
group = cv2.threshold(group, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
return group
def process_cheque(self, in_image):
chars = self.process_reference()
output = []
image = cv2.imread(in_image)
image = imutils.resize(image, width=1024)
draw_image = image.copy()
gray, thresh, delta = self.get_processed_bmp(image)
group_locs = self.get_grouped_contours(thresh)
for (gX, gY, gW, gH) in group_locs:
group_output = []
group = self.get_group_roi_from_gray_scale(gray, gX, gY, gW, gH)
'''
if self.display:
self.show_image(group)
'''
char_cnts = cv2.findContours(group, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
char_cnts = char_cnts[0] if imutils.is_cv2() else char_cnts[1]
if len(char_cnts)==0:
continue
char_cnts = contours.sort_contours(char_cnts, method="left-to-right")[0]
rois = self.extract_digits_and_symbols(group, char_cnts)[0]
for roi in rois:
scores = []
roi = cv2.resize(roi, (self.rsz_h, self.rsz_w))
for charName in self.charNames:
# apply correlation-based template matching, take the
# score, and update the scores list
result = cv2.matchTemplate(roi, chars[charName],
cv2.TM_CCOEFF)
(_, score, _, _) = cv2.minMaxLoc(result)
scores.append(score)
# the classification for the character ROI will be the
# reference character name with the *largest* template
# matching score
group_output.append(self.charNames[np.argmax(scores)])
if self.draw_predicted:
# draw (padded) bounding box surrounding the group along with
# the OCR output of the group
cv2.rectangle(draw_image, (gX - self.draw_offset, gY + delta - self.draw_offset),
(gX + gW + self.draw_offset, gY + gY + delta), (0, 0, 255), 2)
if self.put_predicted_text:
cv2.putText(draw_image, "".join(group_output),
(gX - 10, gY + delta - 25), cv2.FONT_HERSHEY_SIMPLEX,
0.95, (0, 0, 255), 3)
# add the group output to the overall check OCR output
output.append("".join(group_output))
# display the output check OCR information to the screen
print("Check OCR: {}".format(" ".join(output)))
if self.display:
self.show_image(draw_image)
self.save_images(in_image, draw_image, output)
def save_images(self, name, marked_image, ocr_op):
output_link_file = os.path.join(self.result_dir, 'ocr.txt')
file_ptr = open(output_link_file, 'a')
words = name.split("\\")
patch_words = words[-1].split(".")
new_name = os.path.join(self.result_dir, (str(patch_words[0]) + '_ocr.png'))
cv2.imwrite(new_name, marked_image)
data_to_write = (words[-1], " ".join(ocr_op))
file_ptr.write(", ".join(str(v) for v in data_to_write))
file_ptr.write('\n')
file_ptr.close()
def get_cheque_samples_from_dir(self, srcdir):
for filename in os.listdir(srcdir):
sample_name = os.path.join(srcdir, filename)
print('Processing: ', sample_name)
self.process_cheque(sample_name)
if __name__ == "__main__":
input_image = 'data/example_check.png'
micr_extractor = BankChequeMICR()
micr_extractor.remove_directory_contents(micr_extractor.result_dir)
#micr_extractor.process_cheque(input_image)
src_dir = 'sample_data'
micr_extractor.get_cheque_samples_from_dir(src_dir)
print('Done') |
23,704 | 231afca233e411e4c1bc715336ecac20fc8f97a2 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class CNN_NLP(nn.Module):
def __init__(self,
embed_dim=300,
filter_sizes=[3, 4, 5],
num_filters=[100, 100, 100],
num_classes=2,
dropout=0.5):
super(CNN_NLP, self).__init__()
self.num_filters=num_filters
self.num_classes = num_classes
self.conv1d_list = nn.ModuleList([
nn.Conv1d(in_channels=embed_dim,
out_channels=num_filters[i],
kernel_size=(filter_sizes[i]))
for i in range(len(filter_sizes))
])
self.fc = nn.Linear(np.sum(num_filters), num_classes)
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_ids):
x_reshaped = input_ids.permute(0,2,1)
#print(x_reshaped.shape)
x_conv_list = [F.relu(conv1d(x_reshaped)) for conv1d in self.conv1d_list]
#print("convlution filter shape and size")
#print(x_conv_list[0].shape)
x_pool_list = [F.max_pool1d(x_conv, kernel_size=(x_conv.shape[2]))
for x_conv in x_conv_list]
x_fc = torch.cat([x_pool.squeeze(2) for x_pool in x_pool_list],
dim=1)
# print('======= x_Fc shape 1======')
# print(x_fc.shape)
# print(x_fc[0].shape)
# x_fc = torch.cat([x_pool for x_pool in x_fc],
# dim=1)
fc_shape = (self.num_classes,np.sum(self.num_filters))
logits = self.fc(self.dropout(x_fc))
return logits |
23,705 | dde95f4c5e0d9c3e85c6dbaffa2fcfd40be53071 | from django.shortcuts import render
from github import Github
# Create your views here.
g = Github(token) #take token from github
repository = 'CSU33012_Practical_visualisation' #pass any repository linked to username here
username = 'crowlec8' #pass any username in here
user = g.get_user(username)
repo = user.get_repo(repository)
contributors = repo.get_contributors() #code taken from previous api project
#function used to return lists of repositories
#def getRepoNames():
# repoNameList = []
# for repo in user.get_repos():
# repoName = repo.name
# repoNameList.append(repoName)
# return repoNameList
#function used to return lists of commits per repositories
#def getTotalRepoCommits():
# totalCommitsLists = []
# for repo in user.get_repos():
# totalCommits = repo.get_commits().totalCount
# totalCommitsLists.append(totalCommits)
# return totalCommitsLists
#function used to return lists of contributors
def getContributorsRepo():
listOfContributors = []
for contributor in contributors:
contributorName = contributor.login
listOfContributors.append(contributorName)
return listOfContributors
#function used to return lists of contributers' commits
def getContributorsCommits():
listOfCommits = []
for contributor in contributors:
contributorCommits = repo.get_commits(author = contributor.login).totalCount
listOfCommits.append(contributorCommits)
return listOfCommits
def indexPage(request):
# varA = getRepoNames()
# varB = getTotalRepoCommits()
varC = getContributorsRepo()
varD = getContributorsCommits()
context={'varC':varC,'varD':varD}
return render(request,'index.html',context)
|
23,706 | 0ecf2d21b9a5959025a15f7c576f3ec06d9e41b9 | import psycopg2
import os
from db.db_config import init_db,close_connection
from flask import jsonify, request
from flask_jwt_extended import (jwt_required,
create_access_token,get_jwt_identity
)
import smtplib
class Status():
"""This class is for editing status of a record"""
@jwt_required
def edit_status(self,id,type,status):
"""This method is for editing the record of a status"""
current_user = get_jwt_identity()
try:
con = init_db()
cur = con.cursor()
cur.execute("SELECT is_admin FROM users WHERE email = %s",(current_user,))
user = cur.fetchall()
user_role = user[0][0]
if user_role != True:
return{
"Status": 403,
"Message":"Unauthorized user"
},403
cur.execute("SELECT user_id FROM incidents WHERE \
incident_id = %s AND type = %s",(id,type))
record = cur.fetchall()
if not record:
return{
"Status": 404,
"Message": "Record does not exist"
},404
user =record[0][0]
cur.execute("SELECT email FROM users WHERE user_id = %s",(user,))
user_email = cur.fetchall()
email = user_email[0][0]
cur.execute("UPDATE incidents SET status = %s WHERE \
incident_id = %s and type = %s \
RETURNING incident_id,type,location,status,comment,user_id",
(status,id,type))
updated_record = cur.fetchone()
close_connection(con)
new_record = {
"Created by":updated_record[5],
"Incident Id":updated_record[0],
"Type":updated_record[1],
"Location":updated_record[2],
"Status":updated_record[3],
"Comment":updated_record[4]
}
#send mail
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(
"projectemail2303@gmail.com", "Projectemail_2303")
msg = "Your {} record is now {}".format(type, status)
server.sendmail(
"projectemail2303@gmail.com", email, msg)
server.quit()
#update admin after status change
return{
"Status": 200,
"Message":"Updated " + type + " record status",
"Data": new_record
}
except (Exception,psycopg2.DatabaseError) as error:
print(error)
return{
"message":"Record has not been edited please try again"
} |
23,707 | 5cd8d4982d50aeb0af889d4d3cf9c1cc19a6976a | """
Copyright 2016, Michael DeHaan <michael.dehaan@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from camp.band.members.scale_source import ScaleSource
from camp.band.members.performance import Performance
from camp.core.scale import scale as core_scale
VALID_NOTATIONS = [ 'roman', 'literal' ]
class Instrument(object):
def __init__(self, channel=None, notation='roman'):
self.channel = channel
self.notation = notation
if type(self.channel) != int:
raise Exception("channel must be set, as an integer, got: %s" % self.channel)
if notation not in VALID_NOTATIONS:
raise Exception("invalid notation type: %s" % notation)
def to_data(self):
return dict(cls="camp.band.tracker.instrument.Instrument", data=dict(channel=self.channel, notation=self.notation))
|
23,708 | ce1255cb7f2ea15b1b52c90b06a796ebd4698e8d | from __future__ import unicode_literals
from django.db import models
# Create your models here.
'''
I'm not using any data model, instead I created a file called storing
which calls mongodb and stores on it the parsed data.
'''
#class DataPost(object):
# def __init__(self, dev_id, prof_id, dev_time, content):
# self.dev_id = dev_id
# self.prof_id = prof_id
# self.dev_time = dev_time
# self.rec_time = datetime.now()
# self.content = content
|
23,709 | c85f635edd75ea4e4f43b30c2492201dfeb1f733 |
from AST import *
from Tokens import *
from PrecedenceBuilder import PrecedenceBuilder
from parse_util import *
# Dépendances circulaires...
import parseFunction
import parseBlock
def parseArgs(tokenizer):
# Renvoie un triplet (pos, isLValuable, args).
args = []
cmps = []
isLValuable = True
start = tokenizer.current().pos()[0]
isTuple = True
def finalize(finalPos):
if isTuple:
return (start, end), isLValuable, args
else:
if len(args) == 0:
raise ASTError(
finalPos,
"Expected an argument."
)
cmps.append((Comparison.OPERAND, args))
return (start, end), False, [Cmp((start, end), cmps)]
def receiveComparator(pos, op):
nonlocal args, isTuple
isTuple = False
if len(args) == 0:
raise ASTError(
pos,
"Expected something to compare to."
)
cmps.append((Comparison.OPERAND, args))
args = []
cmps.append((op, pos))
while True:
argLValuable, arg = parseLRValue(tokenizer)
isLValuable = isLValuable and argLValuable
token = tokenizer.current()
end = token.pos()[1]
def expectArg(pos):
if arg == None:
raise ASTError(
pos,
"Expected an argument."
)
args.append(arg)
cmp_dict = {
Symbol.LT: Comparison.LT,
Symbol.LEQ: Comparison.LEQ,
Symbol.GT: Comparison.GT,
Symbol.GEQ: Comparison.GEQ,
Symbol.EQ: Comparison.EQ
}
if token.isSymbol(Symbol.COMMA):
expectArg(token.pos())
elif token.getValue() in cmp_dict:
isLValuable = False
expectArg(token.pos())
receiveComparator(token.pos(), cmp_dict[token.getValue()])
elif token.isSymbol(Symbol.RPAR):
if arg != None:
args.append(arg)
return finalize(token.pos())
else:
raise ASTError(token.pos(), "Expected ',' or ')'.")
def parseMaybeArith(tokenizer):
# Renvoie un couple (isLValuable, value).
start = tokenizer.lookahead().pos()[0]
builder = PrecedenceBuilder()
isLValuable = True
target = None
expectOperand = True
needOperand = False
end = start
unaryStack = []
def finalize(start, end):
built = builder.finalize(start, end)
if isinstance(built, Arith):
return False, built
elif built == None:
return False, None
else:
return isLValuable, built
def parseOperand():
nonlocal unaryStack, isLValuable, target, expectOperand, end
token = None
while True:
token = tokenizer.nextToken()
end = token.pos()[1]
if token.isSymbol(Symbol.NOT):
unaryStack.append((token.pos(), Operation.NOT))
else:
break
def wrapUnary(target):
nonlocal unaryStack
for pos, unary in unaryStack[::-1]:
target = Unary(pos, unary, target)
unaryStack = []
return target
if token.isType(TokenType.IDENTIFIER):
target = Var(token.pos(), token.getValue())
elif token.isKeyword(Keyword.TRUE):
target = Constant(token.pos(), Boolean, True)
isLValuable = False
elif token.isKeyword(Keyword.FALSE):
target = Constant(token.pos(), Boolean, False)
isLValuable = False
elif token.isType(TokenType.NUMBER):
target = Constant(token.pos(), Integer, token.getValue())
isLValuable = False
elif token.isType(TokenType.STRING):
target = Constant(token.pos(), String, token.getValue())
isLValuable = False
elif token.isSymbol(Symbol.LPAR):
pos, argsLValuable, contents = parseArgs(tokenizer)
isLValuable = isLValuable and argsLValuable
expectSymbol(tokenizer.current(), Symbol.RPAR)
if len(contents) == 0:
raise ASTError(
pos,
"Expected a value."
)
elif len(contents) == 1:
target = contents[0]
else:
target = Tuple(pos, contents)
else:
if needOperand:
raise ASTError(token.pos(), "Expected an operand.")
return finalize(start, end)
expectOperand = False
token = tokenizer.nextToken()
if token.isSymbol(Symbol.LPAR):
pos, argsLValuable, args = parseArgs(tokenizer)
isLValuable = isLValuable and argsLValuable
expectSymbol(tokenizer.current(), Symbol.RPAR)
tokenizer.nextToken()
builder.receiveOperand(
wrapUnary(Call(pos, target, args))
)
else:
builder.receiveOperand(wrapUnary(target))
def parseOperator():
nonlocal target, expectOperand, needOperand, end
token = tokenizer.current()
if token.isSymbol(Symbol.CONCAT):
builder.receiveOperator(Operation.CONCAT)
elif token.isSymbol(Symbol.OR):
builder.receiveOperator(Operation.OR)
elif token.isSymbol(Symbol.AND):
builder.receiveOperator(Operation.AND)
elif token.isSymbol(Symbol.ADD):
builder.receiveOperator(Operation.ADD)
elif token.isSymbol(Symbol.MUL):
builder.receiveOperator(Operation.MUL)
elif token.isSymbol(Symbol.SUB):
builder.receiveOperator(Operation.SUB)
elif token.isSymbol(Symbol.DIV):
builder.receiveOperator(Operation.DIV)
else:
return finalize(start, end)
expectOperand = True
needOperand = True
while True:
val = None
if expectOperand:
val = parseOperand()
else:
val = parseOperator()
if val != None:
return val
def parseIf(tokenizer):
first = tokenizer.nextToken()
start = first.pos()[0]
expectKeyword(first, Keyword.IF)
isUnwrapping = False
if tokenizer.lookahead().isKeyword(Keyword.UNWRAP):
tokenizer.nextToken()
isUnwrapping = True
token = tokenizer.current()
# "cond" représente "box" dans le cas d'un "if unwrap".
_, cond = parseLRValue(tokenizer)
target = None
if isUnwrapping:
if cond == None:
raise ASTError(
token.pos(),
"Expected a value to unwrap."
)
onToken = tokenizer.current()
expectKeyword(onToken, Keyword.ON)
isLValuable, target = parseLRValue(tokenizer)
if target == None:
raise ASTError(
onToken.pos(),
"Expected a value after 'on'."
)
elif not isLValuable:
raise ASTError(
target.pos(),
"Expected a LValue."
)
else:
if cond == None:
raise ASTError(
token.pos(),
"Expected a condition."
)
thenToken = tokenizer.current()
expectKeyword(thenToken, Keyword.THEN)
_, valIfTrue = parseLRValue(tokenizer)
if valIfTrue == None:
raise ASTError(
thenToken.pos(),
"Expected a value after 'then'."
)
end = valIfTrue.pos()[1]
valElse = None
maybeElse = tokenizer.current()
if maybeElse.isKeyword(Keyword.ELSE):
_, valElse = parseLRValue(tokenizer)
if valElse == None:
raise ASTError(
maybeElse.pos(),
"Expected a value after 'else'."
)
end = valIfTrue.pos()[1]
if isUnwrapping:
return IfUnwrapBlock(
(start, end), cond, target, valIfTrue, valElse
)
else:
return IfBlock((start, end), cond, valIfTrue, valElse)
def parseLoop(tokenizer):
first = tokenizer.nextToken()
start = first.pos()[0]
expectKeyword(first, Keyword.LOOP)
body = parseBlock.parseBlock(tokenizer)
end = body.pos()[1]
tokenizer.nextToken()
return LoopBlock((start, end), body)
def parseEmptyType(tokenizer):
expectKeyword(tokenizer.nextToken(), Keyword.EMPTY)
expectSymbol(tokenizer.nextToken(), Symbol.COLON)
tokenizer.nextToken()
type = parseFunction.parseType(tokenizer, False)
return type
def parseWrap(tokenizer):
token = tokenizer.nextToken()
expectKeyword(token, Keyword.WRAP)
start = token.pos()[0]
if tokenizer.lookahead().isKeyword(Keyword.EMPTY):
type = parseEmptyType(tokenizer)
end = tokenizer.current().pos()[1]
tokenizer.nextToken()
return WrapEmpty((start, end), type)
else:
_, target = parseLRValue(tokenizer)
end = tokenizer.current().pos()[1]
return Wrap((start, end), target)
def parseCons(tokenizer):
token = tokenizer.nextToken()
expectKeyword(token, Keyword.CONS)
start = token.pos()[0]
if tokenizer.lookahead().isKeyword(Keyword.EMPTY):
type = parseEmptyType(tokenizer)
end = tokenizer.current().pos()[1]
tokenizer.nextToken()
return ConsEmpty((start, end), type)
else:
_, element = parseLRValue(tokenizer)
expectSymbol(tokenizer.current(), Symbol.COMMA)
_, list = parseLRValue(tokenizer)
end = tokenizer.current().pos()[1]
return Cons((start, end), element, list)
def parseLRValue(tokenizer):
# Renvoie un couple (isLValuable, value).
peek = tokenizer.lookahead()
if peek.isSymbol(Symbol.LBRACKET):
value = parseBlock.parseBlock(tokenizer)
tokenizer.nextToken()
return False, value
elif peek.isKeyword(Keyword.IF):
return False, parseIf(tokenizer)
elif peek.isKeyword(Keyword.LOOP):
return False, parseLoop(tokenizer)
elif peek.isKeyword(Keyword.WRAP):
return False, parseWrap(tokenizer)
elif peek.isKeyword(Keyword.CONS):
return False, parseCons(tokenizer)
else:
return parseMaybeArith(tokenizer)
|
23,710 | 6676e8e95d1ddf63d44a02146e1991aeb2493ea8 | import boto3
# create client for ec2 service and pass aws_access_key_id and aws_secret_access_key as parameter
client = boto3.client('ec2', region_name='us-east-1', aws_access_key_id= 'AWS_ACCESS_KEY_ID',
aws_secret_access_key= 'AWS_SECRET_ACCESS_KEY' )
# dictionary with names of databases or instances as key and their volume ids as value that we had created previously
volumes_dict = {
'database-1' : 'volume-id-1',
'database-2' : 'volume-id-2',
'database-3' : 'volume-id-3',
}
# create a list of snapshot-ids which were deleted successfully
deleted_snapshots = []
# working for each volumeid
for snapshot in volumes_dict:
snapshots_list = client.describe_snapshots(Filters=[
{
'Name': 'volume-id',
'Values': [
'{}'.format(volumes_dict[snapshot]),
]
},
])
# snapshots_list is the response dictionary of client.describe_snapshots() method which contains
# 'Snapshots' and 'ResponseMetadata'. snapshots_list['Snapshots'] is list of snapshot ids of given volume-id
# roughly the structure of snapshots_list would be {'Snapshots': ['snap1','snap2'], 'ResponseMetadata': {'RequestId': '757f9e'...}}
if snapshots_list['ResponseMetadata']['HTTPStatusCode'] == 200:
# iterate through the list of snapshot ids of snapshots_list['Snapshots'] and perform deletion of each
for snapshot_id in snapshots_list['Snapshots']:
response = client.delete_snapshot(SnapshotId=snapshot_id, DryRun=False)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
deleted_snapshots.append(snapshot_id)
# print the snapshot-ids which were deleted successfully
print(deleted_snapshots) |
23,711 | 00802597b493b1451500517cbbd1833306264e96 | import json
import random
from loguru import logger
from spade.behaviour import OneShotBehaviour
from spade.template import Template
from spade.message import Message
from pygomas.bditroop import BDITroop
from pygomas.bdifieldop import BDIFieldOp
from agentspeak import Actions
from agentspeak import grounded
from agentspeak.stdlib import actions as asp_action
from pygomas.ontology import DESTINATION
from pygomas.agent import LONG_RECEIVE_WAIT
class BDIGen(BDITroop):
def add_custom_actions(self, actions):
super().add_custom_actions(actions)
@actions.add_function(".f", (tuple, ))
def _f(Flag):
"""
Devuelve los cuatro puntos del mapa respectivos a los vertices
de un cuadrado centrado en Flag y de lado side
param: Flag: posicion en la que centrar el cuadrado.
return: Tupla con los cuatro vertices del cuadrado
"""
x = Flag[0]
y = Flag[2]
side = 30
pos1 = (x-(side/2), 0, y-(side/2))
pos2 = (x+(side/2), 0, y-(side/2))
pos3 = (x+(side/2), 0, y+(side/2))
pos4 = (x-(side/2), 0, y+(side/2))
return (pos1,pos2,pos3,pos4)
@actions.add_function(".primerElem",(tuple, ))
def _primerElem(l):
"""
Devuelve el primer elemento de la lista l
param: l: lista de la cual extraer el primer elemento.
return: primer elemento de la lista l.
"""
return l[0]
@actions.add_function(".del", (int, tuple, ))
def _del(pos, l):
"""
Elimina el elemento de la posicion pos de la lista l
param: pos: posición de la lista a borrar
l: lista cuyo elemento queremos borrar
"""
if pos == 0:
return l[1:]
elif pos == len(l) - 1:
return l[:pos]
else:
return l[0:pos] + l[pos+1:]
|
23,712 | 190d7a98b6c9c6269f5538648dafc447e0a3a118 | # $Id: MitMHT_cfi.py,v 1.3 2009/03/22 08:49:00 loizides Exp $
import FWCore.ParameterSet.Config as cms
htMetSC5 = cms.EDProducer("METProducer",
src = cms.InputTag("L2L3CorJetSC5Calo"),
METType = cms.string('MET'),
alias = cms.string('HTMETSC5'),
noHF = cms.bool(False),
globalThreshold = cms.double(5.0),
InputType = cms.string('CaloJetCollection')
)
htMetSC7 = cms.EDProducer("METProducer",
src = cms.InputTag("L2L3CorJetSC7Calo"),
METType = cms.string('MET'),
alias = cms.string('HTMETSC7'),
noHF = cms.bool(False),
globalThreshold = cms.double(5.0),
InputType = cms.string('CaloJetCollection')
)
htMetIC5 = cms.EDProducer("METProducer",
src = cms.InputTag("L2L3CorJetIC5Calo"),
METType = cms.string('MET'),
alias = cms.string('HTMETIC5'),
noHF = cms.bool(False),
globalThreshold = cms.double(5.0),
InputType = cms.string('CaloJetCollection')
)
htMetKT4 = cms.EDProducer("METProducer",
src = cms.InputTag("L2L3CorJetKT4Calo"),
METType = cms.string('MET'),
alias = cms.string('HTMETKT4'),
noHF = cms.bool(False),
globalThreshold = cms.double(5.0),
InputType = cms.string('CaloJetCollection')
)
htMetKT6 = cms.EDProducer("METProducer",
src = cms.InputTag("L2L3CorJetKT6Calo"),
METType = cms.string('MET'),
alias = cms.string('HTMETKT6'),
noHF = cms.bool(False),
globalThreshold = cms.double(5.0),
InputType = cms.string('CaloJetCollection')
)
htMetIC5JPT = cms.EDProducer("METProducer",
src = cms.InputTag("L2L3CorJetIC5JPT"),
METType = cms.string('MET'),
alias = cms.string('HTMETIC5JPT'),
noHF = cms.bool(False),
globalThreshold = cms.double(5.0),
InputType = cms.string('CaloJetCollection')
)
#deprecated
#import JetMETCorrections.Type1MET.MetMuonCorrections_cff
#corMHTGlobalMuonsSC5 = JetMETCorrections.Type1MET.MetMuonCorrections_cff.corMetGlobalMuons.clone()
#JetMETCorrections.Type1MET.uncorMETInputTag = htMetSC5
#corMHTGlobalMuonsSC7 = JetMETCorrections.Type1MET.MetMuonCorrections_cff.corMetGlobalMuons.clone()
#JetMETCorrections.Type1MET.uncorMETInputTag = htMetSC7
#corMHTGlobalMuonsIC5 = JetMETCorrections.Type1MET.MetMuonCorrections_cff.corMetGlobalMuons.clone()
#JetMETCorrections.Type1MET.uncorMETInputTag = htMetIC5
#corMHTGlobalMuonsKT4 = JetMETCorrections.Type1MET.MetMuonCorrections_cff.corMetGlobalMuons.clone()
#JetMETCorrections.Type1MET.uncorMETInputTag = htMetKT4
#corMHTGlobalMuonsKT6 = JetMETCorrections.Type1MET.MetMuonCorrections_cff.corMetGlobalMuons.clone()
#JetMETCorrections.Type1MET.uncorMETInputTag = htMetKT6
#corMHTGlobalMuonsIC5JPT = JetMETCorrections.Type1MET.MetMuonCorrections_cff.corMetGlobalMuons.clone()
#JetMETCorrections.Type1MET.uncorMETInputTag = htMetIC5JPT
#MitMHT = cms.Sequence(htMetSC5*
#htMetSC7*
#htMetIC5*
#htMetKT4*
#htMetKT6*
#htMetIC5JPT*
#corMHTGlobalMuonsSC5*
#corMHTGlobalMuonsSC7*
#corMHTGlobalMuonsIC5*
#corMHTGlobalMuonsKT4*
#corMHTGlobalMuonsKT6*
#corMHTGlobalMuonsIC5JPT)
|
23,713 | ebc0f5a766baf69623e5633bc2d5cfd4e367b9e8 | # -*- coding:utf-8 -*-
# @Time:2020/6/20 12:04
# @Author:TimVan
# @File:14. Longest Common Prefix.py
# @Software:PyCharm
# Write a function to find the longest common prefix string amongst an array of strings.
# If there is no common prefix, return an empty string "".
#
# Example 1:
# Input: ["flower","flow","flight"]
# Output: "fl"
#
# Example 2:
# Input: ["dog","racecar","car"]
# Output: ""
# Explanation: There is no common prefix among the input strings.
# Note:
# All given inputs are in lowercase letters a-z.
from typing import List
# class Solution:
# def longestCommonPrefix(self, strs: List[str]) -> str:
# if len(strs) < 1:
# return ""
#
# commonPrefix = strs[0]
# i = 1
# lens = len(strs)
# while i < lens:
# strSingle = strs[i]
# j = 0
# lenStr = len(strSingle)
# lenCommonPrefix = len(commonPrefix)
# cacheCommon = ""
# # 字符数需要小于公共串和每个单词
# while j < min(lenStr, lenCommonPrefix):
# if commonPrefix[j] == strSingle[j]:
# cacheCommon += commonPrefix[j]
# j += 1
# else:
# # 若第一个字母不同,说明直接无公共字符
# if j == 0:
# return ""
# # 跳出循环尝试下一个
# else:
# break
# commonPrefix = cacheCommon
# # print(i, "-", commonPrefix)
# i += 1
#
# return commonPrefix
# 使用zip()和set()的方法
# class Solution:
# def longestCommonPrefix(self, strs: List[str]) -> str:
# s = ""
# for one in zip(*strs):
# if len(set(one)) == 1:
# s += one[0]
# else:
# break
# return s
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
common = ""
for one in zip(*strs):
if len(set(one)) == 1:
common += one[0]
else:
break
return common
solution = Solution()
myStrArrayList = [["flower", "flow", "flight"]
, ["dog", "racecar", "car"]
, ["self", "self", "self"]
, ["gitee", "github", "gitlab"]
, []]
for myStrS in myStrArrayList:
print("***", solution.longestCommonPrefix(myStrS))
|
23,714 | 6b4557bd4ff1d2a5175e834bb8c814661d649de5 | # Generated by Django 3.2.7 on 2021-10-05 15:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_rename_depart_lecture_department'),
]
operations = [
migrations.RenameField(
model_name='lecture',
old_name='department',
new_name='depart',
),
]
|
23,715 | 9d3600f237bf357ad76ee0ec76fbb90497642802 | from PyQt4.QtGui import *
from PyQt4 import QtGui
""" List all of the functions available.
"""
class ModeListWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(ModeListWidget, self).__init__(parent)
self.radioMode = QtGui.QPushButton("Radio", self)
self.mp3Mode = QtGui.QPushButton("MP3", self)
self.cbMode = QtGui.QPushButton("CB", self)
self.phoneMode = QtGui.QPushButton("Phone", self)
modeGrid = QtGui.QGridLayout()
modeGrid.setSpacing(10)
modeGrid.addWidget(self.radioMode, 0, 0)
modeGrid.addWidget(self.mp3Mode, 0, 1)
modeGrid.addWidget(self.cbMode, 0, 2)
modeGrid.addWidget(self.phoneMode, 0, 3)
self.setLayout(modeGrid)
|
23,716 | 81cad42676b1bc47246872aaddb9fc34e0bfcd27 | #
# @lc app=leetcode id=33 lang=python3
#
# [33] Search in Rotated Sorted Array
# difficulty: medium
# https://leetcode.com/problems/search-in-rotated-sorted-array/
#
from typing import List
import pytest
class Solution:
def search(self, nums: List[int], target: int) -> int:
n = len(nums)
left, right = 0, n - 1
return search(nums, target, left, right)
def search(nums, target, left, right):
if left > right:
return -1
mid = (left + right) // 2
if target == nums[mid]:
return mid
elif nums[left] <= nums[mid]:
# if left half is sorted
if nums[left] <= target < nums[mid]:
return search(nums, target, left, mid - 1)
else:
return search(nums, target, mid + 1, right)
else:
# if right half is sorted
if nums[mid] < target <= nums[right]:
return search(nums, target, mid + 1, right)
else:
return search(nums, target, left, mid - 1)
@pytest.mark.parametrize('nums, target, expected', [
([0], 0, 0),
([1], 2, -1),
([2, 3, 4, 5, 1], 0, -1),
([2, 3, 4, 5, 1], 1, 4),
([2, 3, 4, 5, 1], 2, 0),
([2, 3, 4, 5, 1], 3, 1),
([2, 3, 4, 5, 1], 4, 2),
([2, 3, 4, 5, 1], 5, 3),
([5, 1, 2, 3, 4], 1, 1),
([4, 5, 6, 7, 0, 1, 2], 0, 4),
([4, 5, 6, 7, 0, 1, 2], 1, 5),
([4, 5, 6, 7, 0, 1, 2], 2, 6),
([4, 5, 6, 7, 0, 1, 2], 3, -1),
([4, 5, 6, 7, 0, 1, 2], 4, 0),
([4, 5, 6, 7, 0, 1, 2], 5, 1),
([4, 5, 6, 7, 0, 1, 2], 6, 2),
([4, 5, 6, 7, 0, 1, 2], 7, 3),
])
def test_rotated_array_search(nums, target, expected):
actual = Solution().search(nums, target)
assert expected == actual
if __name__ == '__main__':
pytest.main(['-v', __file__]) |
23,717 | 189b6fdf6f7ea4c2b9476cce93d35acdfecfcfb4 | # https://www.codewars.com/kata/54edbc7200b811e956000556/train/python
def count_sheeps(sheep):
# TODO May the force be with you
x = sum(map(lambda w: 1 if w == True else 0, sheep))
return(x)
|
23,718 | dc342a6db469774fd162d717f30becb798cc7374 | import re
break2 = True
def Add_items(items):
items_add = open("super_app!","a")
items_add.write("{}\n".format(items))
def See_items():
items_file = open("super_app!",encoding="utf-8")
info = items_file.read()
items_file.close()
print(info)
def functions(*string):
string = input("menu salir:" )
if string == "Salir111":
return 1
elif string == "menu":
main()
def main():
print("Bienvenido a un notepad basico xD")
print(" 1- Salir \n 2- Ver notepad \n 3- Escribir en él notepad \n para salir del programa escriba Salir111 para el menu lo mismo")
operacion = int(input(": "))
print(operacion)
while break2:
if operacion == 1:
break
elif operacion == 2:
See_items()
operacion = functions()
elif operacion == 3:
string = input(":")
Add_items(string)
if string == "Salir111":
operacion = functions("Salir111")
main() |
23,719 | 77095a633261f4a861d20543ef2947c88181b71c | import vampytest
from ...application_command_option import ApplicationCommandOption, ApplicationCommandOptionType
from ..nested import ApplicationCommandOptionMetadataNested
from .test__ApplicationCommandOptionMetadataNested__constructor import _asert_fields_set
def test__ApplicationCommandOptionMetadataNested__copy():
"""
Tests whether ``ApplicationCommandOptionMetadataNested.copy`` works as intended.
"""
options = [
ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),
ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),
]
option_metadata = ApplicationCommandOptionMetadataNested(
options = options,
)
copy = option_metadata.copy()
_asert_fields_set(copy)
vampytest.assert_is_not(option_metadata, copy)
vampytest.assert_eq(option_metadata, copy)
def test__ApplicationCommandOptionMetadataNested__copy_with__0():
"""
Tests whether ``ApplicationCommandOptionMetadataNested.copy_with`` works as intended.
Case: no parameters.
"""
options = [
ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),
ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),
]
option_metadata = ApplicationCommandOptionMetadataNested(
options = options,
)
copy = option_metadata.copy_with()
_asert_fields_set(copy)
vampytest.assert_is_not(option_metadata, copy)
vampytest.assert_eq(option_metadata, copy)
def test__ApplicationCommandOptionMetadataNested__copy_with__1():
"""
Tests whether ``ApplicationCommandOptionMetadataNested.copy_with`` works as intended.
Case: All field given
"""
old_options = [
ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),
ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),
]
new_options = [
ApplicationCommandOption('aya', 'ayaya', ApplicationCommandOptionType.float),
ApplicationCommandOption('momiji', 'awoo', ApplicationCommandOptionType.user),
]
option_metadata = ApplicationCommandOptionMetadataNested(
options = old_options,
)
copy = option_metadata.copy_with(
options = new_options,
)
_asert_fields_set(copy)
vampytest.assert_is_not(option_metadata, copy)
vampytest.assert_eq(copy.options, tuple(new_options))
def test__ApplicationCommandOptionMetadataNested__copy_with_keyword_parameters__0():
"""
Tests whether ``ApplicationCommandOptionMetadataNested.copy_with_keyword_parameters`` works as intended.
Case: no parameters.
"""
options = [
ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),
ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),
]
option_metadata = ApplicationCommandOptionMetadataNested(
options = options,
)
copy = option_metadata.copy_with_keyword_parameters({})
_asert_fields_set(copy)
vampytest.assert_is_not(option_metadata, copy)
vampytest.assert_eq(option_metadata, copy)
def test__ApplicationCommandOptionMetadataNested__copy_with_keyword_parameters__1():
"""
Tests whether ``ApplicationCommandOptionMetadataNested.copy_with_keyword_parameters`` works as intended.
Case: All field given
"""
old_options = [
ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),
ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),
]
new_options = [
ApplicationCommandOption('aya', 'ayaya', ApplicationCommandOptionType.float),
ApplicationCommandOption('momiji', 'awoo', ApplicationCommandOptionType.user),
]
option_metadata = ApplicationCommandOptionMetadataNested(
options = old_options,
)
copy = option_metadata.copy_with_keyword_parameters({
'options': new_options,
})
_asert_fields_set(copy)
vampytest.assert_is_not(option_metadata, copy)
vampytest.assert_eq(copy.options, tuple(new_options))
|
23,720 | 9717855d49902bd8c3010b6d77d33634285f6f41 | #variables
# int, str, bool, float
"""
def persona(nombre, apellido, edad, estudia):
print (f"hola", nombre, f"su apellido es: ", apellido, f"Su edad es: ", edad, f"Usted estudia en: ", estudia)
nombre = input("Digite su nombre: ")
apellido = input("Digite su apellido: ")
edad = input("Digite edad: ")
estudia = input("Donde estudia?: ")
persona(nombre, apellido, edad, estudia)
"""
"""
def suma (a, b):
res = a + b
print("El resultado de la suma es: ", res)
a = int(input("Digite numero A:"))
b = int(input("Digite numero B: "))
suma(a,b)
def resta (a, b):
resu = a - b
print("El resultado de la resta es: ", resu)
resta(a, b)
def div (a, b):
resul = a / b
print("El resultado de la division es: ", resul)
div(a, b)
def multi (a, b):
result = a * b
print("El resultado de la multiplicacion es: ", result)
"""
#multi(a, b)
print(".: Bienvenido Estudiante, por favor digite sus notas :.")
def prome(a, b, c, d):
acum = a + b + c + d
promedio = acum / 4
print("El promedio es: ", promedio)
#Condición para emitir mensaje según nota.
if promedio < 2.9:
print("Lástima, usted ha reprobado el curso.!")
elif promedio >= 3.0 :
print("Felicitaciones, usted aprobó el curso.!")
a = float(input("Digite nota 1:"))
b = float(input("Digite nota 2:"))
c = float(input("Digite nota 3:"))
d = float(input("Digite nota 4:"))
prome(a, b, c, d)
|
23,721 | b41b3dfd8f1d4be4ec0cc7e8e199293179216991 | from typing import List
# ===== Problem Statement =====
# Given an array of integers, return indices of the two numbers
# such that they add up to a specific target.
# You may assume that each input would have exactly one solution,
# and you may not use the same element twice.
class Solution:
# . To solve this problem, consider the notion of some number's (call
# it nums[i]) complement for some other target number. The complement
# is the value that needs to be added to nums[i] to get the target.
# E.g., 7's complement for 9 is 2.
# . To get a number's complement for, say, 9, subtract the number from 9.
# E.g., 7's complement for 9 is 2 because 9-7=2 and so 7+2=9.
# . So the general strategy is to consider the complements for each
# element and check if it's in the given nums list. If it is, then we
# need to get the complement's index. We can actually use a dict to make
# a mapping of complements to corresponding indices rather than iterating
# through nums itself with a for i in range loop for geting the index i.
# . Indices are the target values for the result, so make a dict where
# keys are nums' complements and values are the corresponding indices.
# If an element's complement for, say, 9 is in the dict's keys, then we
# have a result.
# Brute force method
# Runtime: 7308 ms, faster than 5.02%
# Memory Usage: 15 MB, less than 9.53%
def approach1(self, nums: List[int], target: int) -> List[int]:
res = []
for i in range(len(nums)):
for j in range(i+1, len(nums)):
if nums[i]+nums[j] == target:
res = [i, j]
break
return res
# Using two passes through nums
# Runtime: 40 ms, faster than 97.97%
# Memory Usage: 15.8 MB, less than 5.11%
def approach2(self, nums: List[int], target: int) -> List[int]:
d = {}
res = []
for i in range(len(nums)):
d[nums[i]] = i
for i in range(len(nums)):
complement = target-nums[i]
if complement in d and d[complement] != i:
res = [i, d[complement]]
break
return res
# Using one pass through nums
# Runtime: 36 ms, faster than 99.65%
# Memory Usage: 15.4 MB, less than 5.11%
def approach3(self, nums: List[int], target: int) -> List[int]:
d = {}
res = []
# Difference between this approach and approach2:
# - A complement for target can come from two different
# values. E.g., for x+y=target, the following complements
# (x and y) can exist: x=target-y & y=target-x
# - The first time we get x=target-y, it may not exist. After
# checking for its existence in d, we put y in d. We then
# move on with our iteration and may eventually come across
# y=target-x. Since we had previous put y in d, we get a match
# and set our result.
# - All of that^ in the second bullet point contrasts with what
# happens in approach 2 where we build all of d beforehand rather
# than on the fly. Therefore, the first time we come across x,
# we get a hit and set our result.
for i in range(len(nums)):
complement = target-nums[i]
if complement in d and d[complement] != i:
res = [i, d[complement]]
break
d[nums[i]] = i
return res
#twoSum = approach1
#twoSum = approach2
twoSum = approach3
if __name__ == "__main__":
sol = Solution()
nums = [2, 7, 11, 15]
target = 9
print(sol.twoSum(nums, target))
|
23,722 | f4b4254c46fad5ae96cc9be7ff0423321043d557 | from Key import Key
class NumKey(Key):
def __init__(self, number):
super(NumKey, self).__init__()
self.__number = number
def get_value(self):
return self.__number
|
23,723 | 64539b87c53d62893a6979e75678bffe3dcbe8ac | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" setup zojax.principal.password
$Id$
"""
import os
from zope.app.testing.functional import ZCMLLayer
zojaxPrincipalPassword = ZCMLLayer(
os.path.join(os.path.split(__file__)[0], 'ftesting.zcml'),
__name__, 'zojaxPrincipalPassword', allow_teardown=True)
|
23,724 | 86d2351e852cdf5fe35fba7bb4269724b6b7f57f | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: the head of linked list.
@return: a middle node of the linked list
228. 链表的中点
找链表的中点。
样例
样例 1:
输入: 1->2->3
输出: 2
样例解释: 返回中间节点的值
样例 2:
输入: 1->2
输出: 1
样例解释: 如果长度是偶数,则返回中间偏左的节点的值。
挑战
如果链表是一个数据流,你可以不重新遍历链表的情况下得到中点么?
##2个指针,一个快一个慢
"""
def middleNode(self, head):
# write your code here
if not head:
return None
spointer, fpointer = head, head
while fpointer.next != None:
fpointer = fpointer.next
if fpointer.next != None:
fpointer = fpointer.next
else:
return spointer
spointer = spointer.next
return spointer |
23,725 | b52a302d7a8eb00a51f4b70f44e64a6504263da4 | def letras(a):
lista=['T','R','W','A','G','M','Y','F','P','D','X','B','N','J','Z','S','Q','V','H','L','C','K','E']
return lista[a]
print("La letra es:\t"+ letras (int(input("Ingresa el numero:\t"))))
|
23,726 | 46204b8d3c2db056d63059fc8490c1a8415e63ae | import chainer
import chainer.links as L
import chainer.functions as F
from lda2vec import utils, dirichlet_likelihood
import numpy as np
class NSLDA(chainer.Chain):
def __init__(self, counts, n_docs, n_topics, n_dim, n_vocab, n_samples=5):
factors = np.random.random((n_topics, n_dim)).astype('float32')
loss_func = L.NegativeSampling(n_dim, counts, n_samples)
loss_func.W.data[:, :] = np.random.randn(*loss_func.W.data.shape)
loss_func.W.data[:, :] /= np.sqrt(np.prod(loss_func.W.data.shape))
super(NSLDA, self).__init__(proportions=L.EmbedID(n_docs, n_topics),
factors=L.Parameter(factors),
loss_func=loss_func)
self.n_docs = n_docs
self.n_topics = n_topics
self.n_vocab = n_vocab
self.n_dim = n_dim
def forward(self, doc, wrd, window=5):
doc, wrd = utils.move(self.xp, doc, wrd)
proportions = self.proportions(doc)
ld = dirichlet_likelihood(self.proportions.W)
context = F.matmul(F.softmax(proportions), self.factors())
loss = self.loss_func(context, wrd)
return loss, ld
|
23,727 | 4d554259f290ff0b9b966effceca4ae0a8fb21f6 | # books/models.py
from django.db import models
# Create your models here.
class Book(models.Model):
title=models.CharField(max_length=250)
subtitle=models.CharField(max_length=250)
author=models.CharField(max_length=100)
isbn=models.CharField(max_length=13)
def __str__(self):
return self.title |
23,728 | cf89079ac6595a11930cc650ff45e22e2d3126fa | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 12:36:57 2018
@author: simon
"""
import numpy as np
# The solution of the number of rectangles simplifies to the product
# of the Lth and Wth triangle numbers for a rectangle of length L and
# width w.
# So construct a large array via numpy, get the difference from 2 million
# And find the element which is least.
max_tri = 1000
q = np.arange(0,max_tri+1)
qplus1 = q + 1
tris = np.divide(np.multiply(q,qplus1),2).astype(np.int)
#print(tris)
tris_lst = []
for i in range(0,max_tri+1):
tris_lst.append(tris)
tris_square = np.vstack(tris_lst)
rects = np.multiply(tris_square, tris_square.T)
diff = np.abs(rects - 2*10**6)
gap = np.min(diff)
anstup = np.where(diff==np.min(diff))
print(anstup, gap)
ans = (anstup[0][0], anstup[1][0])
print(ans[0]*ans[1]) # area of the answer rectangle |
23,729 | 2194acd80db7c5a0436bba3850801d49e0036181 | # Generated by Django 3.0b1 on 2019-10-29 18:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('common', '0002_contact_party'),
('conceptscheme', '0001_initial'),
('registry', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='conceptscheme',
name='submitted_structure',
field=models.ManyToManyField(to='registry.SubmittedStructure', verbose_name='Submitted structures'),
),
migrations.AddField(
model_name='concept',
name='container',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='conceptscheme.ConceptScheme', verbose_name='Concept scheme'),
),
migrations.AddField(
model_name='concept',
name='core_representation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='common.Representation', verbose_name='Core representation'),
),
migrations.AddField(
model_name='concept',
name='iso_concept_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='conceptscheme.ISOConceptReference', verbose_name='ISO concept'),
),
migrations.AddField(
model_name='annotation',
name='concept',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='conceptscheme.Concept', verbose_name='Concept'),
),
migrations.AddField(
model_name='annotation',
name='concept_scheme',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='conceptscheme.ConceptScheme', verbose_name='Concept scheme'),
),
migrations.AddIndex(
model_name='conceptscheme',
index=models.Index(fields=['agency', 'object_id', 'version'], name='conceptsche_agency__0a5fbd_idx'),
),
migrations.AddConstraint(
model_name='conceptscheme',
constraint=models.UniqueConstraint(fields=('agency', 'object_id', 'version'), name='conceptscheme_conceptscheme_unique_maintainable'),
),
migrations.AddIndex(
model_name='concept',
index=models.Index(fields=['container', 'object_id'], name='conceptsche_contain_4ebcaf_idx'),
),
migrations.AddConstraint(
model_name='concept',
constraint=models.UniqueConstraint(fields=('container', 'object_id'), name='conceptscheme_concept_unique_item'),
),
]
|
23,730 | 6d3904bb917fa9c6c2c6102293a7081b182e254d | from .mapper import ApiResponse, ApiResponseInterface
__all__ = ['ChangePasswordResponse']
class ChangePasswordResponseInterface(ApiResponseInterface):
pass
class ChangePasswordResponse(ApiResponse, ChangePasswordResponseInterface):
pass
|
23,731 | c1d9bf4b2f1972f6527439803adb51568ed52114 | from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
from requests.utils import quote
from platform_crawler.utils.post_get import post, get
from platform_crawler.utils.utils import Util
from platform_crawler.spiders.pylib.login_qq import LoginQQ
from platform_crawler.spiders.pylib.cut_img import cut_img
from platform_crawler.spiders.pylib.task_process import TaskProcess
from platform_crawler.settings import join, JS_PATH
import time
import json
import os
set_start_time = """
(function(st, et){
if(jQuery('#daterange') &&
jQuery('#daterange').data('daterangepicker') &&
('setStartDate' in jQuery('#daterange').data('daterangepicker'))
) {
jQuery('#daterange').data('daterangepicker').setStartDate(st);
jQuery('#daterange').data('daterangepicker').setEndDate(et);
document.querySelector('.applyBtn').click();
} else {
let settime = Date.now();
localStorage.setItem('list_sdate', '{"data":"'+st+'","_time":'+settime+',"_expire":31308148}');
localStorage.setItem('list_edate', '{"data":"'+et+'","_time":'+settime+',"_expire":31308148}');
}
})('%s', '%s');"""
u = Util()
logger = None
page_version = 'old'
base_header = {
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'Content-Type': "application/x-www-form-urlencoded",
'cookie': None,
'origin': "https://e.qq.com",
'referer': None,
'Cache-Control': "no-cache",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
}
class AppTreasureHL(TaskProcess):
def __init__(self, user_info, **kwargs):
global logger
self.dates = None
self.cookies_str = None
self.gtk = None
self.uid = None
self.init__post_param()
self.login_obj = None
super().__init__(headers=base_header, user_info=user_info, **kwargs)
logger = self.logger
def init__post_param(self):
self.params = {
"mod": "report", "act": "productdetail", "g_tk": None
}
self.pdata = {
"page": "1", "pagesize": "50", "sdate": None, "edate": None, "product_type": "20",
"product_id": None, "time_rpt": "0", "owner": None
}
def get_product(self, sd, ed):
url = 'https://e.qq.com/ec/api.php'
params = {'mod': 'report', 'act': 'getproduct', 'g_tk': str(self.gtk), 'sdate': sd, 'edate': ed, 'searchtype': 'product', 'product_type': '20'}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
'referer': 'https://e.qq.com/atlas/%s/report/producttype' % self.uid,
'cookie': self.cookies_str,
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
res = get(url, params=params, headers=headers)
if not res.get('is_success'):
logger.error(res.get('msg').json())
data = res.get('msg').json()
total_num = data.get('data').get('conf').get('totalnum')
if total_num == 0:
return {'succ': False, 'msg': 'no data'}
data_list = data.get('data').get('list')
data = [{'pname': e.get('pname'), 'pid': e.get('product_id'), 'cost': e.get('cost')} for e in data_list]
return {'succ': True, 'msg': data}
def get_img(self, p_list, sd, ed):
"""截图,并处理图片文件"""
with open(join(JS_PATH, 'e_qq_pagenum.js'), 'r') as p:
pjs = p.read()
for e in p_list:
if not e.get('has_data'):
continue
picname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.png' % {
'productId': e.get('pid'), 'productName': e.get('pname'), 'sd': sd, 'ed': ed
}
url = 'https://e.qq.com/atlas/%s/report/order?ptype=20&pid=%s&pname=%s' % (self.uid, e.get('pid'), quote(e.get('pname')))
self.d.get(url)
time.sleep(0.5)
if page_version == 'new': # 版本判断
try:
self.wait_element(By.CLASS_NAME, 'button-more').click()
except:
self.d.execute_script("document.querySelector('.button-more').click()")
else:
self.wait_element(By.LINK_TEXT, '查看报表', ec=EC.presence_of_element_located).click()
time.sleep(2)
# if page_version != 'new':
# u.pag.hotkey('ctrl', '-', interval=0.3)
# 调整分页数量
self.d.execute_script(pjs)
time.sleep(1.5)
self.d.switch_to.frame(self.d.find_element_by_css_selector('.splitview-tabs-body iframe'))
# 获取高度
get_height = 'return a=document.querySelector("#content").offsetHeight'
height = self.d.execute_script(get_height)
# 截图
cut_res = cut_img(height, self.dir_path, picname)
if not cut_res['succ']:
logger.error('get img %s_%s failed-------msg: %s' % (e['pid'], e['pname'], cut_res['msg']))
logger.info('height: %s ---picname: %s' % (height, picname))
# 恢复
# u.pag.hotkey('ctrl', '0', interval=0.3)
else:
return {'succ': True}
def get_data_process(self, dates):
# 获取上个月到现在每天的数据
err_list, res, data_list, has_data_in_two_mth = [], None, [], []
for sd, ed in dates:
p_res = self.get_product(sd, ed)
if not p_res.get('succ') and p_res.get('msg') == 'no data':
continue
p_list = p_res.get('msg')
for p in p_list:
if page_version == 'new':
res = self.get_data_another_version(p, sd, ed)
else:
res = self.get_data(p, sd, ed)
if res.get('succ'):
time.sleep(0.2)
p.update({'has_data': True})
has_data_in_two_mth.append(1)
continue
elif not res['succ'] and res.get('msg') == 'no data':
p.update({'has_data': False})
else:
err_list.append(p)
else:
data_list.append({'data': p_list, 'date': [sd, ed]})
if not has_data_in_two_mth:
self.result_kwargs['has_data'] = 0
return data_list
def get_version(self):
# 判断界面版本
global page_version
self.d.get('https://e.qq.com/atlas/%s/report/producttype' % self.uid)
# if u.wait_element(self.d, (By.CLASS_NAME, 'datechoose'), 10):
try:
self.d.find_element_by_xpath('//div[@class="datechoose l"]')
except:
page_version = 'new'
time.sleep(1)
def get_data_another_version(self, data, sd, ed):
logger.info('get into (self.get_data_another_version)function')
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data.get('pid'), 'productName': data.get('pname'), 'sd': sd, 'ed': ed
}
url = "https://e.qq.com/ec/api.php"
params = {"g_tk": str(self.gtk), "product_id": data.get('pid'), "product_type": '20', "sdate": sd, "edate": ed}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/analytic2?product_id=%(pid)s&product_type=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self._headers.update(headers)
data = get(url, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def get_data(self, data, sd, ed):
logger.info('get into (self.get_data_common_version)function')
url = "https://e.qq.com/ec/api.php"
fname = '%(productId)s_%(productName)s_%(sd)s_%(ed)s.json' % {
'productId': data['pid'], 'productName': data['pname'], 'sd': sd, 'ed': ed
}
params = {"g_tk": str(self.gtk)}
pdata = {
"sdate": sd, "edate": ed, "product_type": '20',
"product_id": data.get('pid'), "owner": self.uid
}
headers = {
"cookie": self.cookies_str,
"referer": "http://e.qq.com/atlas/%(uid)s/report/order_old?pid=%(pid)s&ptype=%(ptype)s" % {
'uid': self.uid, 'pid': data.get('pid'), 'ptype': '20'
}
}
self.params.update(params)
self.pdata.update(pdata)
self._headers.update(headers)
data = post(url, data=self.pdata, params=self.params, headers=self._headers)
if not data['is_success']:
return {'succ': False, 'msg': data['msg']}
file_name = os.path.join(self.dir_path, fname)
data = json.loads(data['msg'].content.decode('utf-8'))
cost = data.get('data').get('total').get('cost').replace(',', '')
if float(cost) == 0:
return {'succ': False, 'msg': 'no data'}
data['account'] = self.acc
with open(file_name, 'w') as f:
json.dump(data, f)
logger.info('crawled data: ' + json.dumps(data))
return {'succ': True}
def parse_balance(self, *args, **kwargs):
# parse
res = self.login_obj.get_balance(self.uid)
if not res.get('succ'):
return res
unknown_account_name_type = {}
balance_data = {'现金账户': 0, '虚拟账户': 0, '信用账户': 0, '换量账户': 0}
accounts = res.get('msg')
keys = balance_data.keys()
for i in accounts:
account_name = i.get('account_name')
if account_name in keys:
balance_data[account_name] = round(i.get('balance')/100, 2)
else:
# unknown_account_name_type[account_name] = round(i.get('balance')/100, 2)
continue
header = ['账号', '现金账户', '虚拟账户', '信用账户', '换量账户', '总计']
balance_data['总计'] = sum(balance_data.values())
balance_data['账号'] = self.acc
if unknown_account_name_type:
header.extend(unknown_account_name_type.keys())
balance_data.update(unknown_account_name_type)
return header, [balance_data]
def login_part(self, ui):
# 登陆
self.login_obj = LoginQQ(ui, ui.get('platform'))
return self.login_obj.run_login()
def deal_login_result(self, login_res):
if not login_res['succ']:
return login_res
if login_res.get('msg') == 'unknown situation':
logger.warning('got unknown login situation: %s' % login_res.get('desc'))
self.result_kwargs['has_data'] = 0
return {'succ': True, 'msg': 'pass'}
# 获取登录后浏览器驱动和数据
self.d = login_res.pop('driver')
self.cookies_str = self.login_obj.cookies.get('cookie_str')
self.gtk = self.login_obj.gtk
self.uid = login_res.get('data').get('uid')
self.get_version()
def get_data_part(self, ui, **kwargs):
# 获取时间
self.dates = ui.get('dates')
ys, ms, ye, me = self.dates if self.dates else (None, None, None, None)
mths, dates = u.make_dates(ys=ys, ms=ms, ye=ye, me=me)
# 获取数据
return self.get_data_process(dates)
def get_img_part(self, get_data_res=None, **kwargs):
# 截图
for e in get_data_res:
sd, ed = e.get('date')
self.d.execute_script(set_start_time % (sd, ed)) # 更新日期
self.d.refresh()
self.get_img(e.get('data'), sd, ed)
if not get_data_res:
self.result_kwargs['has_data'] = 0
return {'succ': True}
|
23,732 | eecdfa1d47fd9d0f43e5b506dfc00004f75c4c54 | import re, math, collections, itertools, os
import nltk, nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
import subprocess
class Sentiment():
def _init_(self):
#from affin readme:
afinn = dict(map(lambda (k,v): (k,int(v)), [ line.split('\t') for line in open("AFINN-111.txt") ]))
def evaluate_text_afinn(self):
# sample from affin readme:
# returns -5 (very negative) to +5 (very positive)
x = sum(map(lambda word: afinn.get(word, 0), text.lower().split()))
infile = open('sentiment.csv', 'rb')
print x
def evaluate_text_sanan(self):
subprocess.Popen("workingsentiment.py", shell=True)
|
23,733 | d749fe9d4633e2b520b9a9a1b1f3a5364804415b | #! /usr/bin/env python
PACKAGE='fuzzy_logic_controller'
import roslib;
roslib.load_manifest(PACKAGE)
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
controller_enum = gen.enum([ gen.const("SFLC", int_t, 0, "Singleton Fuzzy Logic Controller"),
gen.const("SNSFLC", int_t, 1, "Standard Nonsingleton Fuzzy Logic Controller"),
gen.const("CNSFLC", int_t, 2, "Centroid Nonsingleton Fuzzy Logic Controller")],
"Controller type")
# Name Type Level Description Default Min Max Values
gen.add( "x_d", double_t, 0, "Desired x position", 0, -10, 10)
gen.add( "y_d", double_t, 0, "Desired y position", 0, -10, 10)
gen.add( "z_d", double_t, 0, "Desired z position", 1, 0, 10)
gen.add( "controller", int_t, 0, "Controller type", 2, 0, 2, edit_method=controller_enum)
gen.add( "k_p", double_t, 0, "Proportional gain", 2, 0, 10)
gen.add( "k_d", double_t, 0, "Derivative gain", 0.1, 0, 10)
gen.add( "k_i", double_t, 0, "Integral gain", 0.3, 0, 10)
gen.add( "k_v", double_t, 0, "Velocity gain", 6, 0, 10)
gen.add( "i_internal", bool_t, 0, "Integral part internal to the controller", True)
gen.add( "noise_position", double_t, 1, "Position noise level", 0.01, 0, 10)
gen.add( "noise_velocity", double_t, 1, "Velocity noise level", 0.1, 0, 1)
gen.add( "noise_attitude", double_t, 1, "Attitude noise level", 0.1, 0, 2)
gen.add( "noise_angular_velocity", double_t, 1, "Angular velocity noise level", 0.1, 0, 20)
exit(gen.generate(PACKAGE, "fuzzy_logic_controller", "setController"))
|
23,734 | f4e67c64837165521c5bf6860415dfa593bf4ba1 | #!/usr/bin/python3
import rospy
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import PoseStamped
def odom_path(data):
global path_odom
path_odom.header = data.header
pose = PoseStamped()
pose.header = data.header
pose.pose = data.pose.pose
path_odom.poses.append(pose)
odom_path_publisher.publish(path_odom)
if __name__ == "__main__":
rospy.init_node("odompath")
path_odom = Path()
odom_subscriber = rospy.Subscriber('/odom', Odometry, odom_path)
odom_path_publisher = rospy.Publisher('/odompath', Path, queue_size=10)
rospy.spin() |
23,735 | 1273579b0bd43113624b3484151d74bcc0005a8b | def romanToInt(s: str) -> int:
numerals = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000,
}
_sum = 0
prev = 0
l = len(s)
for i in range(l - 1, -1, -1):
_s = s[i]
val = numerals[_s]
if val < prev:
_sum -= val
else:
_sum += val
prev = val
return _sum
print(romanToInt('IIII'))
print(romanToInt('LX'))
print(romanToInt('IX'))
print(romanToInt('IV'))
|
23,736 | 51db746abdf4ad3fa0fc306d3103e26ed69c2eb9 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 20:32:47 2018
@author: tf
"""
import svmMLia
dataMat, labelMat = svmMLia.loadDataSet('testSet.txt')
print(labelMat) |
23,737 | adfe6ade6508f97c564b636e3f984292a520748b | # script to read each csv file in directory and insert data into database table
import sqlite3 as lite
import sys
import csv
import glob
import os
import datetime
# database connection variable
con = None
# path to sqlite3 db
db = '/home/stuart/cqu/cquaccdata.db'
# directory of the datafiles
directry = '/home/stuart/cqu/'
#connect to database
con = lite.connect(db)
# holding variable for header info
snum = '' # serial number
startdate = '' #startdate
starttime = '' # starttime
temperature = '' # temperature read by device at data capture start
# foreach csv in directory
# open and read header lines
# save to variables
# the read each data variable line
# insert into db.
with con:
cur = con.cursor()
os.chdir(directry)
for file in glob.glob("*.CSV"):
print file
with open(file,'rb') as csvfile:
for row in csv.reader(csvfile):
tstr = str(row[:1])
if tstr[2:3]==';':
if tstr[3:4] == 'V':
# in version number header row
snum = row[4:5][0]
snum = snum[5:19]
elif tstr[3:5] == 'St':
# in start time row
startitems = row[1:3]
startdate = str(startitems[0])
startdate = startdate.strip()
starttime = str(startitems[1])
starttime = starttime.strip()
h = int(starttime[0:2])
m = int(starttime[3:5])
s = float(starttime[6:])
yr = int(startdate[0:4])
mth = int(startdate[5:7])
day = int(startdate[8:])
basetime = datetime.datetime(yr,mth,day,h,m)
td = datetime.timedelta(seconds=s)
basetime=basetime + td
# dbtime = startdate + ' ' + starttime
# print dbtime
elif tstr[3:5] == 'Te':
#in temperature row]
temperature = str(row[1:2][0])
temperature = temperature.strip()
#print temperature
else:
pass #we don't need this header row
else:
# read accelerometer data & use info above and insert into db
# db table definition
# TABLE movementdata(devicesn CHARACTER(15), timedate TIMESTAMP, temp REAL, x REAL, y REAL, z REAL)
t = float(row[0])
x = row[1]
y = row[2]
z = row[3]
timeincrement = datetime.timedelta(seconds=t)
newtime = basetime + timeincrement
#print(basetime,t,newtime)
datestring = newtime.strftime('%Y-%m-%d %H:%M:%S:%f')
sqlstring = "INSERT INTO movementdata VALUES(" + "'" +snum + "'" +',' + "'" +datestring + "'" +',' + temperature +',' + x +',' + y +',' + z + ');'
#print sqlstring
#execute sql insert
cur.execute(sqlstring)
con.commit()
print "Insert done"
## db stuff - temp for reference
|
23,738 | 06754093e70e4b045b9b5a519b5b1ece102fa316 | import pandas as pd
import numpy as np
import mglearn
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import images.image
# 8. 메모리 가격 동향 데이터 셋 ram_prices DataFrame
ram_prices = pd.read_csv(os.path.join(mglearn.datasets.DATA_PATH, "ram_price.csv"))
print("데이터의 형태: {}".format(ram_prices.shape))
print("ram_prices 타입: {}".format(type(ram_prices)))
print(ram_prices)
#
plt.plot(ram_prices.date, ram_prices.price)
plt.ylim(0, 100000000)
plt.xlabel("년")
plt.ylabel("가격 ($/Mbyte_")
plt.title("램 가격 동향")
images.image.save_fig("8.ram_prices_plot_by_raw")
plt.show()
# 가격을 로그 스케일로 바꾸었기 때문에 비교적 선형적인 관계
plt.semilogy(ram_prices.date, ram_prices.price)
plt.xlabel("년")
plt.ylabel("가격 ($/Mbyte_")
plt.title("로그 스케일로 그린 램 가격 동향")
images.image.save_fig("8.ram_prices_plot")
plt.show()
# 2000년 이전을 훈련 데이터로, 2000년 이후를 테스트 데이터로 만듭니다.
data_train = ram_prices[ram_prices.date < 2000]
data_test = ram_prices[ram_prices.date >= 2000]
# 가격 예측을 위해 날짜 특성만을 이용합니다.
X_train = data_train.date[:, np.newaxis]
# 데이터와 타깃의 관계를 간단하게 만들기 위해 로그 스케일로 바꿉니다.
y_train = np.log(data_train.price)
#
X_test = data_test.date[:, np.newaxis]
y_test = np.log(data_test.price)
print("X_train.shape: {}".format(X_train.shape))
print("y_train.shape: {}".format(y_train.shape))
print("X_train 타입: {}".format(type(X_train)))
print("y_train 타입: {}".format(type(y_train)))
print(X_train[:5], y_train[:5])
# 비교 1:전체 2:X_train 3:X_test
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
for X, y, title, ax in zip([ram_prices.date, X_train, X_test], [ np.log(ram_prices.price), y_train, y_test],
['전체','X_train','X_test'], axes):
# 산점도를 그립니다. 2개의 특성
mglearn.discrete_scatter(X, y, ax=ax, s=2)
ax.set_title("{}".format(title))
ax.set_xlabel("특성 1")
ax.set_ylabel("특성 2")
axes[0].legend(loc=3)
images.image.save_fig("5.ram_prices_plot_compare")
plt.show() |
23,739 | e3a659403570766944fbfd0184a5cf52913580d8 |
import backtrader as bt
class TestStrategy(bt.Strategy):
params = (
# ('exitbars',5),
('maperiod',5),
)
def log(self,txt,dt = None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' %(dt.isoformat(),txt))
def __init__(self):
# TODO:Question1: whats inside self.datas
self.dataclose = self.datas[0].close
self.order = None
self.buyprice = None
self.buycomm = None
self.sma = bt.indicators.MovingAverageSimple(self.datas[0],period = self.params.maperiod)
bt.indicators.MovingAverageSimple(self.datas[0], period=5)
bt.indicators.ExponentialMovingAverage(self.datas[0],period=25)
bt.indicators.WeightedMovingAverage(self.datas[0],period=25,subplot=True)
bt.indicators.StochasticSlow(self.datas[0])
bt.indicators.MACDHisto(self.datas[0])
rsi = bt.indicators.RSI(self.datas[0])
bt.indicators.SmoothedMovingAverage(rsi,period=10)
bt.indicators.ATR(self.datas[0],plot=False)
def notify_order(self,order):
print(order.status)
# 订单处于提交状态或者接受状态时,什么都不做
if order.status in [order.Submitted,order.Accepted]:
return
if order.status in [order.Completed]:
if order.isbuy():
self.log('已买入, %.2f'%order.executed.price)
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
elif order.issell():
self.log('已卖出, %.2f'%order.executed.price)
self.bar_executed = len(self)
elif order.status in [order.Canceled,order.Margin,order.Rejected]:
self.log('订单失败')
self.order = None
def notify_trade(self,trade):
# 一买一卖算交易
if not trade.isclosed:
return
self.log('交易利润,毛利润 %.2f, 净利润 %.2f' %(trade.pnl,trade.pnlcomm))
def next(self):
self.log('Close %.2f' % self.dataclose[0])
if self.order:
return
if not self.position:
# Note: 下标是0表示今天,-1表示昨天,-2表示前天
# if self.dataclose[0]<self.dataclose[-1]:
# if self.dataclose[-1]<self.dataclose[-2]:
# self.log('买入, %.2f' % self.dataclose[0])
# self.order = self.buy()
if self.dataclose[0] > self.sma[0]:
self.log('买入, %.2f' % self.dataclose[0])
self.order = self.buy()
else:
# if len(self)>=(self.bar_executed+self.params.exitbars):
if self.dataclose[0]<self.sma[0]:
self.log('卖出, %.2f'%self.dataclose[0])
self.order = self.sell() |
23,740 | 77795f2722bcf4d46a15c0b4333d5f52bdd74054 | '''
Created on 2015. 10. 23.
@author: USER
'''
def median(contigs):
import math
x = len(contigs)
y = sorted(contigs)
if x % 2 == 0:
first_number= (int(x)/2) - 1
first_con = y[int(first_number)]
second_number = first_number+1
second_con = y[int(second_number)]
value = (first_con+second_con)/2
return(value)
if x % 2 == 1:
number = (int(x)/2) -1
number_processed = math.ceil(number)
number_con = y[number_processed]
return(float(number_con))
contigs = (2, 2, 2, 3, 3, 4, 8, 8)
print(median(contigs))
number= 0
def extend(contigs):
list2 = []
for contigs2 in contigs:
for number in range(int(contigs2)):
list2 += [contigs2]
return list2
contigs = (2, 2, 2, 3, 3, 4, 8, 8)
print(extend(contigs))
def N50(Input):
return median(extend(contigs))
contigs = (2, 2, 2, 3, 3, 4, 8, 8)
print(N50(contigs)) |
23,741 | 6e213c4566c57c8a904b2bd64fa0486df58bb98b | from openerp import models, fields, api
class dym_report_kartu_stock_sm(models.Model):
_inherit = 'stock.move'
def _report_xls_kartu_stock_fields(self, cr, uid, context=None):
return [
'no',\
'packing_date',\
'packing_name',\
'qty',\
'qty_out',\
'location',\
]
#aris
def _report_xls_kartu_stock_fields_sparepart(self, cr, uid, context=None):
return [
'no',\
'packing_date',\
'packing_name',\
'qty',\
'qty_out',\
'location',\
]
|
23,742 | 34c911897ed5b02da7817bb0c64bd809f344224a | #!/usr/bin/env python2
from __future__ import print_function
import struct
import sys
from .run_taint import registers, Trace, Module, saved_context_t, saved_memory_t
from .create_patch import SavedInstruction
from triton import *
try:
from triton_autocomplete import *
except ImportError:
pass
# MAIN_END = 0x1400038A8 # test_tamper_debug
# MAIN_END = 0x140001C6A # test_tamper
# MAIN_END = 0x140001818 # test_medium
# MAIN_END = 0x1400020B8 # test_large_debug
# MAIN_END = 0x0001400012CA
# MAIN_END = 0x000000000040062a # test_tamper_no_relro
# MAIN_END = 0x400bac # test_tamper_sc
# MAIN_END = 0
DEBUG = False
def set_debug(dbg):
global DEBUG
DEBUG = dbg
def dprint(*args, **kargs):
if DEBUG:
print('[D] ', *args, **kargs)
def u64(val):
return struct.unpack("Q", val)[0]
def p64(val):
return struct.pack("Q", val)
def set_triton_context(ctx, state, set_rip=False):
dprint("[*] set_triton_context at {:#x}".format(ctx.getConcreteRegisterValue(ctx.registers.rip)))
for reg in registers:
if reg == 'rflags' or not set_rip and reg == 'rip': #(reg == 'rip' or reg == 'rsp'):
continue
reg_val = getattr(state, reg)
triton_reg = getattr(ctx.registers, reg)
if DEBUG:
dprint('{: <3}: {:#018x} => {:#018x}'.format(reg, ctx.getConcreteRegisterValue(triton_reg), reg_val))
ctx.setConcreteRegisterValue(triton_reg, reg_val)
def print_triton_context(ctx):
for reg in [r for r in registers if r != 'rflags']:
print("[C] {: <3}: {:#018x}".format(reg, ctx.getConcreteRegisterValue(getattr(ctx.registers, reg))))
rsp = ctx.getConcreteRegisterValue(ctx.registers.rsp)
print("\n[C] Stack:")
for i in range(0, 0x20, 8):
print("[C] {:#018x} : {:#018x}".format(rsp + i, struct.unpack("Q", ctx.getConcreteMemoryAreaValue(rsp + i, 8))[0]))
def print_triton_memory(ctx, addr, size):
for i in range(addr, addr + size, 8):
print("[C] {:#018x} : {:#018x}".format(i, u64(ctx.getConcreteMemoryAreaValue(i, 8))))
def print_triton_memory_at_register(ctx, reg, size=0x40):
reg_val = ctx.getConcreteRegisterValue(getattr(ctx.registers, reg))
print("[C] {}".format(reg))
print_triton_memory(ctx, reg_val - size, 2*size)
skipped_opcodes = { OPCODE.X86.XGETBV, OPCODE.X86.RDTSCP, OPCODE.X86.RDRAND,
OPCODE.X86.VPCMPEQB,
# OPCODE.X86.VPMOVMSKB,
OPCODE.X86.VZEROUPPER,
OPCODE.X86.XSAVE, OPCODE.X86.XRSTOR, OPCODE.X86.PSLLD, OPCODE.X86.PSLLQ,
OPCODE.X86.VMOVD, OPCODE.X86.VPXOR, OPCODE.X86.VPBROADCASTB }
def emulate(ctx, trace, saved_contexts, saved_memories):
# type: (TritonContext, List[Trace], List[], List[]) -> Union[None, Dict[int, SavedInstruction]]
old_pc = 0
pc = trace[0]
set_triton_context(ctx, saved_contexts[0], set_rip=True)
print('[*] trace length {}'.format(len(trace)))
next_saved_memory = 0
next_saved_context = 1
print('[*] starting emulation at {:#x}'.format(pc))
count = 0
# monitored_addr = 0x000000000034a000
# monitored_val = ctx.getConcreteMemoryAreaValue(monitored_addr, 8)
# print("[D] Monitored val start: {:#x}".format(u64(monitored_val)))
tainted_addrs = dict()
while pc:
opcodes = ctx.getConcreteMemoryAreaValue(pc, 16)
cond = True
while cond:
inst = Instruction()
inst.setOpcode(opcodes)
inst.setAddress(pc)
skip_inst = False
if ctx.processing(inst) == False:
if inst.getType() in skipped_opcodes:
dprint('skipping next inst')
skip_inst = True
else:
print('instruction not supported: {}'.format(inst))
break
dprint("{0:07} {1} {2}".format(count, inst, 'tainted' if inst.isTainted() else ''))
if len(saved_contexts) > next_saved_context and count == saved_contexts[next_saved_context].instr_num:
dprint("[*] saved_context {}".format(next_saved_context))
if pc != saved_contexts[next_saved_context].rip:
print("[-] saved context wrong pc: {:#x} != {:#x}".format(pc, saved_contexts[next_saved_context].rip))
return
set_triton_context(ctx, saved_contexts[next_saved_context])
next_saved_context += 1
while len(saved_memories) > next_saved_memory and count == saved_memories[next_saved_memory].trace_addr:
saved_memory = saved_memories[next_saved_memory]
dprint("[*] saved_memory {}: {:#x} - {:#x}".format(next_saved_memory, saved_memory.start_addr, saved_memory.start_addr + saved_memory.size))
ctx.setConcreteMemoryAreaValue(saved_memory.start_addr, saved_memory.data.tobytes())
next_saved_memory += 1
if skip_inst:
ctx.setConcreteRegisterValue(ctx.registers.rip, inst.getNextAddress())
old_pc = pc
pc = ctx.getConcreteRegisterValue(ctx.registers.rip)
cond = pc == old_pc
if inst.isTainted():
tainted_addrs[inst.getAddress()] = SavedInstruction(ctx, inst)
sys.stdout.flush()
# cur_val = ctx.getConcreteMemoryAreaValue(monitored_addr, 8)
# if cur_val != monitored_val:
# print("[D] Monitored value changed: {:#x} => {:#x}".format(u64(monitored_val), u64(cur_val)))
# monitored_val = cur_val
count += 1
# if pc == 0x7ffccbb56e1c:
# print_triton_context(ctx)
# print_triton_memory_at_register(ctx, "rcx")
# print_triton_memory_at_register(ctx, "r9")
# if pc == 0x7ffee9eb5f5a:
# print_triton_context(ctx)
# print_triton_memory_at_register(ctx, "rax")
# if pc == 0x7FFF9E494245:
# print_triton_context(ctx)
# if count >= 20876:
# print_triton_context(ctx)
# if pc == 0x7fff9e494224:
# print_triton_memory_at_register(ctx, "rax")
# if pc == MAIN_END:
# print('[+] Reached end of main {:#x}. Emulation done'.format(MAIN_END))
# return tainted_addrs
if len(trace) <= count:
print('[+] reached end of trace. stopping emulation')
return tainted_addrs
if pc != trace[count]:
print('[-] Execution diverged at {:#x}, trace {:#x}'.format(pc, trace[count]))
print_triton_context(ctx)
print('[*] Next trace instr')
for i in range(10):
print('{:#018x}'.format(trace[count+i]))
# set_triton_context(ctx, saved_contexts[next_saved_context])
# print('[D] monitored addr/value from last memory dump')
# for saved_mem in reversed(saved_memories[:next_saved_memory-1]):
# if saved_mem.contains_addr(monitored_addr):
# print('[D] {:#018x} : {:#x}'.format(monitored_addr, u64(saved_mem.get_value(monitored_addr))))
# break
# print('[D] monitored addr/value currently')
# print('[D] {:#018x} : {:#x}'.format(monitored_addr, u64(ctx.getConcreteMemoryAreaValue(monitored_addr, 8))))
break
return None
def setup_triton(modules):
# context boilerplate
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setAstRepresentationMode(AST_REPRESENTATION.PYTHON)
ctx.enableSymbolicEngine(False)
ctx.enableTaintEngine(True)
# segment values seem to be addresses instead of offsets into GDT
# ctx.setConcreteRegisterValue(ctx.registers.gs, 0x2b)
# ctx.setConcreteRegisterValue(ctx.registers.fs, 0x53)
main_mem_tainted = False
# set up modules
for module in modules:
with open(module.path, 'rb') as f:
data = f.read()
ctx.setConcreteMemoryAreaValue(module.start, data)
if module.is_main:
print('[*] Tainting main module memory {:#x} - {:#x}'.format(module.start, module.end))
main_mem_tainted = True
taint_size = 64
for i in range(module.start, module.end, taint_size):
ctx.taintMemory(MemoryAccess(i, taint_size))
# print('[DDD] TESTING. REMOVE LATER')
# for i in range(0, 515):
# ctx.taintMemory(MemoryAccess(0x603080, 1))
if not main_mem_tainted:
print("[-] No main module for tainting found")
return
# ctx.concretizeAllMemory()
# ctx.concretizeAllRegister()
# set up stack
# ctx.setConcreteRegisterValue(ctx.registers.rbp, BASE_STACK)
# ctx.setConcreteRegisterValue(ctx.registers.rsp, BASE_STACK)
return ctx
def main(argv):
print("This file is not meant to be run directly")
exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
23,743 | 0dfae23e63bacb633f2fcfb3ed76b4257689e135 | # Generated by Django 3.2 on 2021-08-25 06:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ceuApp', '0005_billing'),
]
operations = [
migrations.RenameField(
model_name='billing',
old_name='zip',
new_name='zipcode',
),
]
|
23,744 | dd40a39fefd08396d7bb7f6ecc34bf3b0e39e008 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-01-17 14:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('GOD', '0006_auto_20180117_2147'),
]
operations = [
migrations.RemoveField(
model_name='tasklog',
name='selected_host',
),
]
|
23,745 | 15a0d9b418cf9ded82ec6eddad29a915b56632cd | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 07:34:24 2016
@author: celin
first: CV=1125.42893, LB=1105.65075
second: CV=1126.47669, LB=1107.00320
"""
import os
from shutil import copyfile
from allstate1 import *
def read_data(file_name):
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
dirname='results/'
cv_per_file = 1
n_rounds = int(round(100/cv_per_file))
y_test_pred_list = []
y_train_pred_list = []
mae_list = []
ntree_list = []
param_list = []
for i in range(n_rounds):
filename = os.path.join(dirname, 'xgbHyperOpt'+str(i)+'.pkl')
if not os.path.exists(filename):
continue
y_test_pred, y_train_pred, mae, ntree, param = \
read_data(filename)
y_test_pred_list.append(y_test_pred[0])
y_train_pred_list.append(y_train_pred[0])
mae_list.append(mae[0])
ntree_list.append(ntree[0])
param_list.append(param[0])
# print('Import results', i)
mae_list = np.array(mae_list)
ntree_list = np.array(ntree_list)
mae_ave = np.mean(mae_list, axis=1)
ntree_ave = np.mean(ntree_list, axis=1)
mae_sortidx = np.argsort(mae_ave)
mae_sort = [mae_ave[i] for i in mae_sortidx]
ntree_sort = [ntree_ave[i] for i in mae_sortidx]
param_sort = [param_list[i] for i in mae_sortidx]
y_test_pred_sort = [y_test_pred_list[i] for i in mae_sortidx]
y_train_pred_sort = [y_train_pred_list[i] for i in mae_sortidx]
save_submission(invlogs(y_test_pred_sort[0]), 'HO_submission0.csv')
save_submission(invlogs(y_test_pred_sort[1]), 'HO_submission1.csv')
save_submission(invlogs(y_test_pred_sort[2]), 'HO_submission2.csv')
save_submission(invlogs(y_test_pred_sort[3]), 'HO_submission3.csv') |
23,746 | 2977765fa11c457a8c0bde3aa2b03b498a0d274c | import warnings
from marshmallow import fields
from marshmallow.utils import is_iterable_but_not_string
from sqlalchemy import inspect
from sqlalchemy.orm.exc import NoResultFound
def get_primary_keys(model):
"""Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
"""
mapper = model.__mapper__
return [mapper.get_property_by_column(column) for column in mapper.primary_key]
def ensure_list(value):
return value if is_iterable_but_not_string(value) else [value]
class RelatedList(fields.List):
def get_value(self, obj, attr, accessor=None):
# Do not call `fields.List`'s get_value as it calls the container's
# `get_value` if the container has `attribute`.
# Instead call the `get_value` from the parent of `fields.List`
# so the special handling is avoided.
return super(fields.List, self).get_value(obj, attr, accessor=accessor)
class Related(fields.Field):
"""Related data represented by a SQLAlchemy `relationship`. Must be attached
to a :class:`Schema` class whose options includes a SQLAlchemy `model`, such
as :class:`SQLAlchemySchema`.
:param list columns: Optional column names on related model. If not provided,
the primary key(s) of the related model will be used.
"""
default_error_messages = {
"invalid": "Could not deserialize related value {value!r}; "
"expected a dictionary with keys {keys!r}"
}
def __init__(self, columns=None, column=None, **kwargs):
if column is not None:
warnings.warn(
"`column` parameter is deprecated and will be removed in future releases. "
"Use `columns` instead.",
DeprecationWarning,
)
if columns is None:
columns = column
super().__init__(**kwargs)
self.columns = ensure_list(columns or [])
@property
def model(self):
return self.root.opts.model
@property
def related_model(self):
model_attr = getattr(self.model, self.attribute or self.name)
if hasattr(model_attr, "remote_attr"): # handle association proxies
model_attr = model_attr.remote_attr
return model_attr.property.mapper.class_
@property
def related_keys(self):
if self.columns:
insp = inspect(self.related_model)
return [insp.attrs[column] for column in self.columns]
return get_primary_keys(self.related_model)
@property
def session(self):
return self.root.session
@property
def transient(self):
return self.root.transient
def _serialize(self, value, attr, obj):
ret = {prop.key: getattr(value, prop.key, None) for prop in self.related_keys}
return ret if len(ret) > 1 else list(ret.values())[0]
def _deserialize(self, value, *args, **kwargs):
"""Deserialize a serialized value to a model instance.
If the parent schema is transient, create a new (transient) instance.
Otherwise, attempt to find an existing instance in the database.
:param value: The value to deserialize.
"""
if not isinstance(value, dict):
if len(self.related_keys) != 1:
keys = [prop.key for prop in self.related_keys]
raise self.make_error("invalid", value=value, keys=keys)
value = {self.related_keys[0].key: value}
if self.transient:
return self.related_model(**value)
try:
result = self._get_existing_instance(self.related_model, value)
except NoResultFound:
# The related-object DNE in the DB, but we still want to deserialize it
# ...perhaps we want to add it to the DB later
return self.related_model(**value)
return result
def _get_existing_instance(self, related_model, value):
"""Retrieve the related object from an existing instance in the DB.
:param related_model: The related model to query
:param value: The serialized value to mapto an existing instance.
:raises NoResultFound: if there is no matching record.
"""
if self.columns:
result = (
self.session.query(related_model)
.filter_by(
**{prop.key: value.get(prop.key) for prop in self.related_keys}
)
.one()
)
else:
# Use a faster path if the related key is the primary key.
lookup_values = [value.get(prop.key) for prop in self.related_keys]
try:
result = self.session.get(related_model, lookup_values)
except TypeError:
keys = [prop.key for prop in self.related_keys]
raise self.make_error("invalid", value=value, keys=keys)
if result is None:
raise NoResultFound
return result
class Nested(fields.Nested):
"""Nested field that inherits the session from its parent."""
def _deserialize(self, *args, **kwargs):
if hasattr(self.schema, "session"):
self.schema.session = self.root.session
self.schema.transient = self.root.transient
return super()._deserialize(*args, **kwargs)
|
23,747 | 9e21da5490b2d01279d9ec96d7e0dd9327e37ccd | """
the module to detect whether the style is multiple cases or not in Codeforces
"""
import re
import urllib.parse
from logging import getLogger
from typing import *
import bs4
from onlinejudge_template.types import *
logger = getLogger(__name__)
class CodeforcesParserError(AnalyzerError):
pass
def is_codeforces_url(url: str) -> bool:
result = urllib.parse.urlparse(url)
return result.netloc == 'codeforces.com'
def has_multiple_testcases(html: bytes, *, url: str) -> bool:
# parse HTML
soup = bs4.BeautifulSoup(html, 'html.parser')
input_specifications = soup.find_all('div', class_='input-specification')
if len(input_specifications) != 1:
logger.error("""<div class="input-specification"> is not found or not unique.""")
return False
input_specification = input_specifications[0]
logger.debug('parse Input section')
p = input_specification.find('p')
if p is None:
logger.error("""There are no <p> in the Input section.""")
return False
text = p.text
# parse the first paragraph
logger.debug('parse the first paragraph: %s', text)
pattern = r"""[Tt]he +first +line.*integer.*\$t\$.*(number +of.*test|test +cases)|multiple +test +cases"""
return bool(re.search(pattern, text))
|
23,748 | fd6b5a7fa380b138ae6a69e81d94074de7c30885 | """
Test for ocular preferrence in topographic maps using STDP and synaptic rewiring.
http://hdl.handle.net/1842/3997
"""
# Imports
import numpy as np
import pylab as plt
import time
from pacman.model.constraints.placer_constraints.placer_chip_and_core_constraint import \
PlacerChipAndCoreConstraint
import spynnaker7.pyNN as sim
from function_definitions import *
from argparser import *
case = args.case
print "Case", case, "selected!"
# SpiNNaker setup
start_time = plt.datetime.datetime.now()
sim.setup(timestep=1.0, min_delay=1.0, max_delay=10)
sim.set_number_of_neurons_per_core("IF_curr_exp", 50)
sim.set_number_of_neurons_per_core("IF_cond_exp", 256 // 10)
sim.set_number_of_neurons_per_core("SpikeSourcePoisson", 256 // 13)
sim.set_number_of_neurons_per_core("SpikeSourcePoissonVariable", 256 // 13)
# +-------------------------------------------------------------------+
# | General Parameters |
# +-------------------------------------------------------------------+
# Population parameters
model = sim.IF_cond_exp
# Membrane
v_rest = -70 # mV
e_ext = 0 # V
v_thr = -54 # mV
g_max = 0.1
tau_m = 20 # ms
tau_ex = 5 # ms
cell_params = {'cm': 20.0, # nF
'i_offset': 0.0,
'tau_m': 20.0,
'tau_refrac': args.tau_refrac,
'tau_syn_E': 5.0,
'tau_syn_I': 5.0,
'v_reset': -70.0,
'v_rest': -70.0,
'v_thresh': -50.0,
'e_rev_E': 0.,
'e_rev_I': -80.
}
# +-------------------------------------------------------------------+
# | Rewiring Parameters |
# +-------------------------------------------------------------------+
no_iterations = args.no_iterations # 300000 # 3000000 # 3,000,000 iterations
simtime = no_iterations
# Wiring
n = args.n
N_layer = n ** 2
S = (n, n)
# S = (256, 1)
grid = np.asarray(S)
s_max = args.s_max
sigma_form_forward = 1.99
sigma_form_lateral = 2.49
p_form_lateral = args.p_form_lateral
p_form_forward = args.p_form_forward
p_elim_dep = 0.01
p_elim_pot = 5.56 * (10.**-5)
f_rew = args.f_rew # 10 ** 4 # Hz
# Inputs
f_mean = args.f_mean # Hz
f_base = 5 # Hz
f_peak = 152.8 * 2 # 152.8 # Hz
sigma_stim = args.sigma_stim # 2
t_stim = args.t_stim # 20 # ms
t_record = args.t_record # ms
# STDP
a_plus = 0.1
b = args.b
tau_plus = 20. # ms
tau_minus = args.t_minus # ms
a_minus = (a_plus * tau_plus * b) / tau_minus
# Reporting
sim_params = {'g_max': g_max,
't_stim': t_stim,
'simtime': simtime,
'f_base': f_base,
'f_peak': f_peak,
'sigma_stim': sigma_stim,
't_record': t_record,
'cell_params': cell_params,
'case': args.case,
'grid': grid,
's_max': s_max,
'sigma_form_forward': sigma_form_forward,
'sigma_form_lateral': sigma_form_lateral,
'p_form_lateral': p_form_lateral,
'p_form_forward': p_form_forward,
'p_elim_dep': p_elim_dep,
'p_elim_pot': p_elim_pot,
'f_rew': f_rew,
'lateral_inhibition': args.lateral_inhibition,
'delay': args.delay,
'b': b,
't_minus': tau_minus,
't_plus': tau_plus,
'tau_refrac': args.tau_refrac,
'a_minus': a_minus,
'a_plus': a_plus
}
if args.gaussian_input:
gen_rate = generate_gaussian_input_rates
else:
gen_rate = generate_rates
# +-------------------------------------------------------------------+
# | Initial network setup |
# +-------------------------------------------------------------------+
# Need to setup the moving input
one_row = np.asarray(np.arange(16) % 2, dtype=bool)
binoc_positions = np.asarray([one_row if i % 2 == 0 else np.logical_not(one_row) for i in range(16)])
left_positions = np.where(binoc_positions==0)
right_positions = np.where(binoc_positions==1)
positions = [left_positions, right_positions]
if case == CASE_REW_NO_CORR:
raise NotImplementedError
elif case == CASE_CORR_AND_REW or case == CASE_CORR_NO_REW:
rates = np.empty((simtime // t_stim, grid[0], grid[1]))
for rate_id in range(simtime // t_stim):
rand_offset = np.random.randint(0, N_layer//2)
stim_position = (positions[rate_id%2][0][rand_offset], positions[rate_id%2][1][rand_offset])
assert binoc_positions[stim_position] == rate_id%2
r = gen_rate(stim_position,
f_base=f_base,
grid=grid,
f_peak=f_peak,
sigma_stim=sigma_stim)
rates[rate_id, :, :] = r
rates = rates.reshape(simtime // t_stim, N_layer)
source_pop = sim.Population(N_layer,
sim.SpikeSourcePoissonVariable,
{'rate': rates,
'start': 100,
'duration': simtime,
'rate_interval_duration': t_stim
}, label="Variable-rate Poisson spike source")
ff_s = np.zeros(N_layer, dtype=np.uint)
lat_s = np.zeros(N_layer, dtype=np.uint)
init_ff_connections = []
init_lat_connections = []
if args.initial_connectivity_file is None:
raise NotImplementedError
else:
if "npz" in args.initial_connectivity_file:
initial_connectivity = np.load(args.initial_connectivity_file)
else:
import scipy.io as io
initial_connectivity = io.loadmat(args.initial_connectivity_file)
conn = initial_connectivity['ConnPostToPre'] - 1
weight = initial_connectivity['WeightPostToPre']
for target in range(conn.shape[1]):
for index in range(conn.shape[0]):
if conn[index, target] >= 0:
if conn[index, target] < N_layer:
init_ff_connections.append(
(conn[index, target], target,
weight[index, target], 1))
else:
init_lat_connections.append(
(conn[index, target] - N_layer, target,
weight[index, target], 1))
# Neuron populations
target_pop = sim.Population(N_layer, model, cell_params, label="TARGET_POP")
# Putting this populations on chip 0 1 makes it easier to copy the provenance
# data somewhere else
target_pop.set_constraint(PlacerChipAndCoreConstraint(0, 1))
# Connections
# Plastic Connections between pre_pop and post_pop
stdp_model = sim.STDPMechanism(
timing_dependence=sim.SpikePairRule(tau_plus=tau_plus,
tau_minus=tau_minus),
weight_dependence=sim.AdditiveWeightDependence(w_min=0, w_max=g_max,
# A_plus=0.02, A_minus=0.02
A_plus=a_plus,
A_minus=a_minus)
)
if case == CASE_CORR_AND_REW or case == CASE_REW_NO_CORR:
structure_model_w_stdp = sim.StructuralMechanism(
stdp_model=stdp_model,
weight=g_max,
delay=args.delay,
s_max=s_max,
grid=grid,
f_rew=f_rew,
lateral_inhibition=args.lateral_inhibition,
random_partner=args.random_partner,
p_elim_dep=p_elim_dep,
p_elim_pot=p_elim_pot,
sigma_form_forward=sigma_form_forward,
sigma_form_lateral=sigma_form_lateral,
p_form_forward=p_form_forward,
p_form_lateral=p_form_lateral
)
elif case == CASE_CORR_NO_REW:
structure_model_w_stdp = stdp_model
# structure_model_w_stdp = sim.StructuralMechanism(weight=g_max, s_max=s_max)
ff_projection = sim.Projection(
source_pop, target_pop,
sim.FromListConnector(init_ff_connections),
synapse_dynamics=sim.SynapseDynamics(slow=structure_model_w_stdp),
label="plastic_ff_projection"
)
lat_projection = sim.Projection(
target_pop, target_pop,
sim.FromListConnector(init_lat_connections),
synapse_dynamics=sim.SynapseDynamics(slow=structure_model_w_stdp),
label="plastic_lat_projection",
target="inhibitory" if args.lateral_inhibition else "excitatory"
)
# +-------------------------------------------------------------------+
# | Simulation and results |
# +-------------------------------------------------------------------+
# Record neurons' potentials
# target_pop.record_v()
# Record spikes
# if case == CASE_REW_NO_CORR:
if args.record_source:
source_pop.record()
target_pop.record()
# Run simulation
pre_spikes = []
post_spikes = []
pre_sources = []
pre_targets = []
pre_weights = []
pre_delays = []
post_sources = []
post_targets = []
post_weights = []
post_delays = []
# rates_history = np.zeros((16, 16, simtime // t_stim))
e = None
print "Starting the sim"
no_runs = simtime // t_record
run_duration = t_record
try:
for current_run in range(no_runs):
print "run", current_run + 1, "of", no_runs
sim.run(run_duration)
if (current_run + 1) * run_duration % t_record == 0:
pre_weights.append(
np.array([
ff_projection._get_synaptic_data(True, 'source'),
ff_projection._get_synaptic_data(True, 'target'),
ff_projection._get_synaptic_data(True, 'weight'),
ff_projection._get_synaptic_data(True, 'delay')]).T)
post_weights.append(
np.array([
lat_projection._get_synaptic_data(True, 'source'),
lat_projection._get_synaptic_data(True, 'target'),
lat_projection._get_synaptic_data(True, 'weight'),
lat_projection._get_synaptic_data(True, 'delay')]).T)
if args.record_source:
pre_spikes = source_pop.getSpikes(compatible_output=True)
else:
pre_spikes = []
post_spikes = target_pop.getSpikes(compatible_output=True)
# End simulation on SpiNNaker
sim.end()
except Exception as e:
print e
# print("Weights:", plastic_projection.getWeights())
end_time = plt.datetime.datetime.now()
total_time = end_time - start_time
pre_spikes = np.asarray(pre_spikes)
post_spikes = np.asarray(post_spikes)
print "Total time elapsed -- " + str(total_time)
suffix = end_time.strftime("_%H%M%S_%d%m%Y")
if args.filename:
filename = args.filename
else:
filename = "ocular_preference_results" + str(suffix)
total_target_neuron_mean_spike_rate = \
post_spikes.shape[0] / float(simtime) * 1000. / N_layer
np.savez(filename, pre_spikes=pre_spikes,
post_spikes=post_spikes,
init_ff_connections=init_ff_connections,
init_lat_connections=init_lat_connections,
ff_connections=pre_weights,
lat_connections=post_weights,
final_pre_weights=pre_weights[-1],
final_post_weights=post_weights[-1],
simtime=simtime,
sim_params=sim_params,
total_time=total_time,
mean_firing_rate=total_target_neuron_mean_spike_rate,
exception=e,
insult=args.insult)
# Plotting
if args.plot and e is None:
init_ff_conn_network = np.ones((256, 256)) * np.nan
init_lat_conn_network = np.ones((256, 256)) * np.nan
for source, target, weight, delay in init_ff_connections:
if np.isnan(init_ff_conn_network[int(source), int(target)]):
init_ff_conn_network[int(source), int(target)] = weight
else:
init_ff_conn_network[int(source), int(target)] += weight
for source, target, weight, delay in init_lat_connections:
if np.isnan(init_lat_conn_network[int(source), int(target)]):
init_lat_conn_network[int(source), int(target)] = weight
else:
init_lat_conn_network[int(source), int(target)] += weight
def plot_spikes(spikes, title):
if spikes is not None and len(spikes) > 0:
f, ax1 = plt.subplots(1, 1, figsize=(16, 8))
ax1.set_xlim((0, simtime))
ax1.scatter([i[1] for i in spikes], [i[0] for i in spikes], s=.2)
ax1.set_xlabel('Time/ms')
ax1.set_ylabel('spikes')
ax1.set_title(title)
else:
print "No spikes received"
plot_spikes(pre_spikes, "Source layer spikes")
plt.show()
plot_spikes(post_spikes, "Target layer spikes")
plt.show()
final_ff_conn_network = np.ones((256, 256)) * np.nan
final_lat_conn_network = np.ones((256, 256)) * np.nan
for source, target, weight, delay in pre_weights[-1]:
if np.isnan(final_ff_conn_network[int(source), int(target)]):
final_ff_conn_network[int(source), int(target)] = weight
else:
final_ff_conn_network[int(source), int(target)] += weight
assert delay == args.delay
for source, target, weight, delay in post_weights[-1]:
if np.isnan(final_lat_conn_network[int(source), int(target)]):
final_lat_conn_network[int(source), int(target)] = weight
else:
final_lat_conn_network[int(source), int(target)] += weight
assert delay == args.delay
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
i = ax1.matshow(np.nan_to_num(final_ff_conn_network))
i2 = ax2.matshow(np.nan_to_num(final_lat_conn_network))
ax1.grid(visible=False)
ax1.set_title("Feedforward connectivity matrix", fontsize=16)
ax2.set_title("Lateral connectivity matrix", fontsize=16)
cbar_ax = f.add_axes([.91, 0.155, 0.025, 0.72])
cbar = f.colorbar(i2, cax=cbar_ax)
cbar.set_label("Synaptic conductance - $G_{syn}$", fontsize=16)
plt.show()
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
i = ax1.matshow(
np.nan_to_num(final_ff_conn_network) - np.nan_to_num(
init_ff_conn_network))
i2 = ax2.matshow(
np.nan_to_num(final_lat_conn_network) - np.nan_to_num(
init_lat_conn_network))
ax1.grid(visible=False)
ax1.set_title("Diff- Feedforward connectivity matrix", fontsize=16)
ax2.set_title("Diff- Lateral connectivity matrix", fontsize=16)
cbar_ax = f.add_axes([.91, 0.155, 0.025, 0.72])
cbar = f.colorbar(i2, cax=cbar_ax)
cbar.set_label("Synaptic conductance - $G_{syn}$", fontsize=16)
plt.show()
print "Results in", filename
print "Total time elapsed -- " + str(total_time)
|
23,749 | 807244ff54f55ce68346da6822b3015750195fac | import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as sci
from scipy.signal import savgol_filter
import os
root = "../../../master_latex/results/"
plt.style.use(['science','no-latex', 'grid'])
import matplotlib
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
RW_sim_old = np.load("../data_test/final_run_RW_D0.1.npy")
RW_sim = np.load("../data_test/RW_pos_03_03__D01.npy")
t = np.linspace(0, 300*3, len(RW_sim[0,0,:]))
epsilon = np.array([0.1, 0.2, 0.3])
kappa = np.array([0.2, 0.6, 1.0, 1.4, 1.7, 2.1]) #0.2
kappa_num = np.array([0.2, 0.6, 1.0, 1.4, 1.8, 2.2, 2.5]) #0.2
eps2 = np.arange(0.4, 0.501, 0.1)
additional_RW = np.zeros((len(eps2), len(kappa_num), 2))
for i in range(len(eps2)):
for j in range(len(kappa)):
data = np.load("../data_test/RW_eps_04_05/var_over_2Dm_D01_kappa%3.2f" %(kappa_num[j])+"_eps"+str(eps2[i])+"_periods400.npy")
t2 = np.linspace(0, 600*3, len(data))
cutoff = int(len(t2)/2)
additional_RW[i, j, 0] = np.mean(data[cutoff:]/t2[cutoff:])
additional_RW[i, j, 1] = np.std( data[cutoff:]/t2[cutoff:])
eps3 = np.array([0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50])
T3 = int(3.0/0.004)
Lx3 = np.array([31.41, 10.47, 6.283, 4.487, 3.49, 2.855])
kappa3 = 2*np.pi/Lx3
bren_D01 = np.zeros((len(eps3), len(kappa3)))
bren_D01_amp = np.zeros((len(eps3), len(kappa3)))
difference = np.zeros(np.shape(bren_D01))
U_bren_D01 = np.zeros(np.shape(bren_D01))
for i in range(len(eps3)):
for j in range(len(kappa3)):
data = np.loadtxt("../data_test/tdatas_tau3.0_nu1.2_D0.1_fone12.0/Lx"+str(Lx3[j])+"_tau3.0_eps"+str(eps3[i])+"_nu1.2_D0.1_fzero0.0_fone12.0_res150_dt0.004/tdata.dat")
bren_D01[i,j] = sci.trapz(data[-T3:, 8], data[-T3:, 0])/3.0
difference[i, j] = abs(bren_D01[i,j]-sci.trapz(data[-2*T3:-T3, 8], data[-2*T3:-T3, 0])/3.0)/(bren_D01[i,j])
U_bren_D01[i,j] = sci.trapz(data[-T3:, 4], data[-T3:, 0])/3.0
bren_D01_amp[i,j] = (np.max( abs(data[-T3:, 8] - sci.trapz(data[-T3:, 8], data[-T3:, 0])/3.0 )))/bren_D01[i,j]
#plt.plot(data[:, 0], data[:, 8])
#plt.plot(data[-T3:, 0], data[-T3:, 8])
#plt.show()
plt.figure(4)
for i in range(len(eps3)):
plt.plot(kappa3, U_bren_D01[i,:])
#plt.yscale("log")
plt.show()
plt.figure(4)
for i in range(len(eps3)):
plt.plot(kappa3, difference[i,:])
plt.yscale("log")
#plt.show()
tau = 3
dt = 0.004
nu = 1.2
D = 0.1
F0 = 12/nu
omega = 2*np.pi/tau
Sc = nu
Pe = 1/D
gamma = np.sqrt(1j*omega/Sc)
gamma_c = np.conj(gamma)
rho = np.sqrt(1j*omega/D)
rho_c = np.conj(rho)
T = int(tau/dt)
D0_ana = 1 + Pe*Pe*F0*F0*np.tanh(gamma)*np.tanh(gamma_c)/(4*gamma*gamma_c*(gamma**4 - rho**4))*(1/(gamma*gamma)*(gamma/np.tanh(gamma) - gamma_c/np.tanh(gamma_c)) - 1/(rho*rho)*(rho/np.tanh(rho) - rho_c/np.tanh(rho_c)))
cutoff = int(len(t)*0.5)
D_RW = np.zeros((len(epsilon), len(kappa), 2))
RW_sim2 = np.zeros((len(epsilon), len(kappa), 2))
for i in range(len(epsilon)):
for j in range(len(kappa)):
D_RW[i, j, 0] = np.mean(RW_sim[i, j, cutoff:]/t[cutoff:])
D_RW[i, j, 1] = np.std( RW_sim[i, j, cutoff:]/t[cutoff:])
plt.figure(1)
for i in range(len(epsilon)):
for j in range(len(kappa)):
RW_sim2[i, j, 0] = (np.mean(RW_sim_old[i, j, cutoff:]) + D_RW[i, j, 0])/2
RW_sim2[i, j, 1] = np.sqrt( np.std( RW_sim_old[i, j, cutoff:])**2 + D_RW[i, j, 1]**2 )/np.sqrt(2)
t2 = np.linspace(0, 1, len(RW_sim_old[i, j,:]))
if j == 3:
plt.plot(t2, RW_sim_old[i, j,:], label=r"$\epsilon=%2.1f$" % epsilon[i])
for i in range(len(eps2)):
for j in range(len(kappa)):
data = np.load("../data_test/RW_eps_04_05/var_over_2Dm_D01_kappa%3.2f" %(kappa_num[j])+"_eps"+str(eps2[i])+"_periods400.npy")
t2 = np.linspace(0, 600*3, len(data))
if j == 3:
plt.plot(t2/(600*3), data/t2, label=r"$\epsilon=%2.1f$" % eps2[i])
plt.legend(loc="best", ncol=3, fontsize=8)
plt.xlabel(r"Time $[T_{max}]$", fontsize=8)
plt.axis([-0.02, 1.02, 0.8, 2.5])
plt.ylabel(r"Effective diffusion coefficient $D_\parallel$ [$D_m$]", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/RW_vs_time_oscflow.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.show()
plt.figure(1)
for i in range(len(epsilon)):
plt.errorbar(kappa, RW_sim2[i, :, 0], yerr=RW_sim2[i, :, 1], markersize=2, fmt="o", color="C"+str(i), label=r"$\epsilon = $"+str(epsilon[i]))
#plt.plot(kappa_num, D_num[i,:], color="C"+str(i))
for i in range(len(eps2)):
plt.errorbar(kappa_num, additional_RW[i, :, 0], yerr=additional_RW[i, :, 1], markersize=2, fmt="o", color="C"+str(i+3), label=r"$\epsilon = $"+str(eps2[i]))
counter = 0
for i in range(len(eps3)):
if (i + 1)%2 == 0:
plt.plot(kappa3, bren_D01[i,:], color="C"+str(counter))
counter += 1
plt.plot(kappa_num+np.linspace(-2, 2, len(kappa_num)), np.ones(len(kappa_num))*D0_ana, "k", label=r"$\epsilon = 0$")
plt.legend(loc="upper center", ncol=3, fontsize=8)
plt.xlabel(r"Wave number $\kappa$", fontsize=8)
plt.axis([0.05, 2.35, 1.15, 2.76])
plt.ylabel(r"Effective diffusion coefficient $D_\parallel$ [$D_m$]", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/comparison_RW_brenner.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.show()
tau = 3
dt = 0.004
nu = 1.2
D = 0.1
F0 = 12/nu
omega = 2*np.pi/tau
Sc = nu
Pe = 1/D
gamma = np.sqrt(1j*omega/Sc)
gamma_c = np.conj(gamma)
rho = np.sqrt(1j*omega/D)
rho_c = np.conj(rho)
T = int(tau/dt)
D0_ana = 1 + Pe*Pe*F0*F0*np.tanh(gamma)*np.tanh(gamma_c)/(4*gamma*gamma_c*(gamma**4 - rho**4))*(1/(gamma*gamma)*(gamma/np.tanh(gamma) - gamma_c/np.tanh(gamma_c)) - 1/(rho*rho)*(rho/np.tanh(rho) - rho_c/np.tanh(rho_c)))
numeric = np.load("../data_test/tdata_03_03_D01_.npy")
epsilon = np.array([0, 0.1, 0.2, 0.3, 0.5])
kappa = np.array([0.2, 0.6, 1.0, 1.4, 1.8, 2.2]) #0.2
D_num = np.zeros((len(epsilon), len(kappa)))
D_nuA = np.zeros((len(epsilon), len(kappa)))
U = np.zeros((len(epsilon), len(kappa)))
T = 750
tau = 3
omega = 2*np.pi/tau
F0 = 12
D_num[0, :] = np.real(D0_ana)
for i in range(len(epsilon)-1):
for j in range(len(kappa)):
D_num[i+1, j] = sci.trapz(np.trim_zeros(numeric[i, j, :, 8])[-T:], np.trim_zeros(numeric[i, j, :, 0])[-T:])/(tau)
U[i+1, j] = np.sqrt(sci.trapz(numeric[i, j, -T:, 8], numeric[i, j, -T:, 0])/(tau))
D_nuA[i+1, j] = np.max(abs(numeric[i,j, -T:, 8] - D_num[i+1, j])/D_num[i+1, j])
plt.figure(1)
for i in range(len(kappa)):
plt.plot(epsilon[1:], D_num[1:,i]/(1+2/105*(U[1:,i]/0.1)**2), color="C"+str(i), label=r"$\kappa=$"+str(kappa_num[i]))
print("Pe = ", U[1:,i]/0.1)
plt.legend(loc="best", fontsize=8)
plt.xlabel(r"Boundary amplitude $\epsilon$", fontsize=8)
plt.ylabel(r"Relative Effective Dispersion $D_\parallel/D_\parallel^{aris}$", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.axis([0.08, 0.518, 0.36, 0.42999])
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/rel_D_eff_vs_eps_D01.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.figure(2)
for i in range(len(kappa)):
plt.plot(epsilon[1:], D_num[1:,i], color="C"+str(i), label=r"$\kappa=$"+str(kappa_num[i]))
plt.legend(loc="lower left", fontsize=8)
plt.xlabel(r"Boundary amplitude $\epsilon$", fontsize=8)
plt.ylabel(r"Effective Dispersion $D_\parallel$ [$D_m$]", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/D_eff_vs_eps_D01.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.show()
numeric = np.load("../data_test/tdata_04_03_D1_.npy")
epsilon = np.array([0.0, 0.1, 0.2, 0.3])
kappa = np.array([0.2, 0.6, 1.0, 1.4, 1.8, 2.2]) #0.2
D_num = np.zeros((len(epsilon), len(kappa)))
D_nuA = np.zeros((len(epsilon), len(kappa)))
U = np.zeros((len(epsilon), len(kappa)))
D = 1.0
Pe = 1/D
rho = np.sqrt(1j*omega/D)
rho_c = np.conj(rho)
xi = np.linspace(-1, 1, int(1e4))
D0_ana = 1 + Pe*Pe*F0*F0*np.tanh(gamma)*np.tanh(gamma_c)/(4*gamma*gamma_c*(gamma**4 - rho**4))*(1/(gamma*gamma)*(gamma/np.tanh(gamma) - gamma_c/np.tanh(gamma_c)) - 1/(rho*rho)*(rho/np.tanh(rho) - rho_c/np.tanh(rho_c)))
u0 = 0.5*sci.trapz( (F0*(1-np.cosh(gamma*xi)/np.cosh(gamma))/(2*gamma*gamma))*np.conjugate(F0*(1-np.cosh(gamma*xi)/np.cosh(gamma))/(2*gamma*gamma)) , xi)
U[0, :] = np.sqrt(u0)
D_num[0, :] = np.real(D0_ana)
for i in range(len(epsilon)-1):
for j in range(len(kappa)):
D_num[i+1, j] = sci.trapz( np.trim_zeros(numeric[i, j, :, 8])[-T:], np.trim_zeros(numeric[i, j, :, 0])[-T:])/(tau)
U[i+1, j] = np.sqrt(sci.trapz( np.trim_zeros(numeric[i, j, :, 4])[-T:], np.trim_zeros(numeric[i, j, :, 0])[-T:])/(tau))
print("Pe = ", U[i+1,j]/1)
plt.figure(3)
for i in range(len(epsilon)-1):
plt.plot(kappa, D_num[i+1,:]/(1+2/105*(U[i+1,:]/1)**2), color="C"+str(i+1), label=r"$\epsilon=$"+str(epsilon[i+1]))
plt.plot(kappa, D_num[i+1,:]/(1+2/105*(U[i+1,:]/1)**2), "o", markersize=3, color="C"+str(i+1))
plt.legend(loc="best", fontsize=8)
plt.xlabel(r"Wave number $\kappa$", fontsize=8)
plt.ylabel(r"Relative Effective Dispersion $D_\parallel/D_\parallel^{aris}$", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/rel_D_eff_vs_eps_D1.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.figure(30)
for i in range(len(epsilon)):
plt.plot(kappa, D_num[i,:], color="C"+str(i), label=r"$\epsilon=$"+str(epsilon[i]))
plt.plot(kappa, D_num[i,:], "o", markersize=3, color="C"+str(i))
plt.legend(loc="best", fontsize=8, ncol=2)
plt.xlabel(r"Wave number $\kappa$", fontsize=8)
plt.ylabel(r"Effective Dispersion $D_\parallel$ [$D_m$]", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/D_eff_vs_kappa_D1.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.show()
numeric = np.load("../data_test/tdata_04_03_D10_.npy")
epsilon = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
kappa = np.array([0.2, 0.6, 1.0, 1.4, 1.8, 2.2])
D_num = np.zeros((len(epsilon), len(kappa)))
D_nuA = np.zeros((len(epsilon), len(kappa)))
U = np.zeros((len(epsilon), len(kappa)))
tau = 30
dt = 0.04
nu = 12
D = 10
F0 = 12/nu
omega = 2*np.pi/tau
Sc = nu
Pe = 1/D
gamma = np.sqrt(1j*omega/Sc)
gamma_c = np.conj(gamma)
rho = np.sqrt(1j*omega/D)
rho_c = np.conj(rho)
T = int(tau/dt)
D_num[0, :] = 1 + Pe*Pe*F0*F0*np.tanh(gamma)*np.tanh(gamma_c)/(4*gamma*gamma_c*(gamma**4 - rho**4))*(1/(gamma*gamma)*(gamma/np.tanh(gamma) - gamma_c/np.tanh(gamma_c)) - 1/(rho*rho)*(rho/np.tanh(rho) - rho_c/np.tanh(rho_c)))
for i in range(len(epsilon)-1):
for j in range(len(kappa)):
D_num[i+1, j] = sci.trapz( np.trim_zeros(numeric[i, j, :, 8])[-T:], np.trim_zeros(numeric[i, j, :, 0])[-T:])/(tau)
U[i+1, j] = np.sqrt(sci.trapz( np.trim_zeros(numeric[i, j, :, 4])[-T:], np.trim_zeros(numeric[i, j, :, 0])[-T:])/(tau))
plt.plot(np.trim_zeros(numeric[i, j, :, 0]), np.trim_zeros(numeric[i, j, :, 4]))
plt.show()
plt.figure(4)
for i in range(len(kappa)):
plt.plot(epsilon, D_num[:,i]/(1+2/105*(U[i,:]/10)**2), color="C"+str(i), label=r"$\kappa=$"+str(kappa[i]))
print((U[i,:]/10))
plt.legend(loc="best", fontsize=8)
plt.xlabel(r"Boundary amplitude $\epsilon$", fontsize=8)
plt.ylabel(r"Relative Effective Dispersion $D_\parallel/D_\parallel^{aris}$", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/rel_D_eff_vs_eps_D10.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.figure(5)
for i in range(len(kappa)):
plt.plot(epsilon, D_num[:,i], color="C"+str(i), label=r"$\kappa=$"+str(kappa[i]))
print("PE=", (U[i,:]/10))
plt.legend(loc="best", fontsize=8)
plt.xlabel(r"Boundary amplitude $\epsilon$", fontsize=8)
plt.ylabel(r"Effective Dispersion $D_\parallel$ [$D_m$]", fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
filename = root+"figures/D_eff_vs_eps_D10.pdf"
plt.savefig(filename, bbox_inches="tight")
os.system('pdfcrop %s %s &> /dev/null &'%(filename, filename))
plt.show() |
23,750 | c23fc74b54318ef5db651d39d935606534a52a09 | # Declaring the variables
# Python is very smart. It does not need any declaration. It figures it out by its own
# Integer variable declaration
a=10
b=2
# Addition and output
c=a+b
print(c)
# String value declaration
a="Array"
print(a[1])
# Local and Global variables
def globtest():
a="I am a local variable inside a funcation."
print(a)
globtest()
print("global a function:", a) |
23,751 | e32d3ff78e63d7000e6f85ee86f9e58e5047e99e | from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
DEPS = {
0: "Державний НДЕКЦ МВС України",
1: "Вінницький",
2: "Волинський",
3: "Дніпропетровський",
4: "Донецький",
5: "Житомирський",
6: "Закарпатський",
7: "Запорізький",
8: "Івано-Франківський",
9: "Київський",
10: "Кіровоградський",
11: "Луганський",
12: "Львівський",
13: "Миколаївський",
14: "Одеський",
15: "Полтавський",
16: "Рівненський",
17: "Сумський",
18: "Тернопільський",
19: "Харківський",
20: "Херсонський",
21: "Хмельницький",
22: "Черкаський",
23: "Чернівецький",
24: "Чернігівський",
}
kbList = InlineKeyboardMarkup()
kbList.row(InlineKeyboardButton(DEPS[0], callback_data="depInfo0"))
depsCount = DEPS.__len__()
for i in range(1, depsCount, 2):
buttons_row = list()
buttons_row.append(InlineKeyboardButton(DEPS[i], callback_data=f"depInfo{i}"))
if i+1 <= depsCount-1:
buttons_row.append(InlineKeyboardButton(DEPS[i+1], callback_data=f"depInfo{i+1}"))
kbList.row(*tuple(buttons_row))
kbList.row(InlineKeyboardButton("До початку", callback_data="home"))
kbMenu = InlineKeyboardMarkup()
kbMenu.row( \
InlineKeyboardButton("Інша інформація", callback_data="depsInfo"), \
InlineKeyboardButton("Інний підрозділ", callback_data="getDeps"), \
)
kbMenu.row(InlineKeyboardButton("До початку", callback_data="home"))
kbInfo = InlineKeyboardMarkup()
kbInfo.row(InlineKeyboardButton("Контакти", callback_data="contacts"))
kbInfo.row(InlineKeyboardButton("Керівництво", callback_data="administration"))
kbInfo.row(InlineKeyboardButton("Розклад роботи", callback_data="hours"))
kbInfo.row(InlineKeyboardButton("Додаткова інформація", callback_data="aditional"))
kbInfo.row(InlineKeyboardButton("До початку", callback_data="home")) |
23,752 | 8dad1cad65ff917505e92285e998d00e33d5b34a | import time
import numpy as np
def content_generator():
orders = ['C', 'F']
types = [
'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'int', 'float'
]
kinds = ['raw', 'slice', 'nonco']
for kind in kinds:
for order in orders:
for dtype in types:
print(kind, order, dtype)
data = np.random.randn(20, 10, 5)
data = np.asarray(data, order=order).astype(dtype)
if kind == 'raw':
yield data
elif kind == 'slice':
yield data[..., int(data.shape[0] / 2):]
elif kind == 'nonco':
yield data[np.random.randint(0, data.shape[0], 10)]
def test_upload_from_file(cci, object_name):
'''test file uploads'''
# byte round trip
content = b'abcdefg123457890'
flname = '/tmp/test.txt'
with open(flname, 'wb') as fl:
fl.write(content)
print(cci.upload_from_file(flname, object_name=object_name))
time.sleep(cci.wait_time)
dat = cci.download_object(object_name)
assert dat == content
# string roundtrip
content = 'abcdefg123457890'
flname = '/tmp/test.txt'
with open(flname, 'w') as fl:
fl.write(content)
print(cci.upload_from_file(flname, object_name=object_name))
time.sleep(cci.wait_time)
dat = cci.download_object(object_name).decode()
assert dat == content
cci.rm(object_name, recursive=True)
def test_upload_json(cci, object_name):
content = dict(
hello=0,
bye='bye!',
)
print(cci.upload_json(object_name, content))
time.sleep(cci.wait_time)
dat = cci.download_json(object_name)
assert dat == content
cci.rm(object_name, recursive=True)
def test_pickle_upload(cci, object_name):
content = dict(hello=1, bye='bye?')
print(cci.upload_pickle(object_name, content))
time.sleep(cci.wait_time)
dat = cci.download_pickle(object_name)
assert dat == content
cci.rm(object_name, recursive=True)
def test_upload_npy_upload(cci, object_name):
for content in content_generator():
print(cci.upload_npy_array(object_name, content))
time.sleep(cci.wait_time)
dat = cci.download_npy_array(object_name)
assert np.allclose(dat, content)
cci.rm(object_name, recursive=True)
def test_upload_raw_array(cci, object_name):
for i, content in enumerate(content_generator()):
print(i, cci.upload_raw_array(object_name, content))
time.sleep(cci.wait_time)
dat = cci.download_raw_array(object_name)
assert np.allclose(dat, content)
cci.rm(object_name, recursive=True)
def test_upload_raw_array_uncompressed(cci, object_name):
for i, content in enumerate(content_generator()):
print(i, cci.upload_raw_array(object_name, content, compression=False))
time.sleep(cci.wait_time)
dat = cci.download_raw_array(object_name)
assert np.allclose(dat, content)
cci.rm(object_name, recursive=True)
def test_upload_dask_array(cci, object_name):
for content in content_generator():
print(cci.upload_dask_array(object_name, content))
time.sleep(cci.wait_time)
dat = cci.download_dask_array(object_name)
dat = np.asarray(dat)
assert np.allclose(dat, content)
cci.rm(object_name, recursive=True)
def test_dict2cloud(cci, object_name):
for cc in content_generator():
content = dict(
arr1=cc,
deep=dict(
dat01=np.random.randn(15),
dat02=np.random.randn(30),
),
)
print(cci.dict2cloud(object_name, content))
time.sleep(cci.wait_time)
dat = cci.cloud2dict(object_name)
assert np.allclose(dat['arr1'], content['arr1'])
for k, v in content['deep'].items():
assert np.allclose(v, dat['deep'][k])
cci.rm(object_name, recursive=True)
|
23,753 | 35c8eaa014c1e469058506660f2c2f3293566920 | class Solution:
def isValid(self, s: str) -> bool:
b_list = list(s)
answer = True
stack = []
for b in b_list:
if b == '[' or b == '(' or s == '{':
stack.append(b)
elif len(stack) != 0:
if(b == ']' and stack[-1] == '[') or (b == '}' and stack[-1] == '{') or (b == ')' and stack[-1] == '('):
stack.pop()
else:
answer = False
break
else:
answer = False
break
if len(stack) != 0:
answer = False
return answer |
23,754 | b9a26520c287d4bab55de60a2ced431ed4e58f0f | #!/usr/bin/env python
#coding=utf-8
#======================================================================
# Program: Neural Network Dose Distribution
# Author: James Keal
# Date: 2016/08/04
#======================================================================
'''
comment
'''
from __future__ import print_function, division
import os
import sys
import time
import datetime
import imp
import mhd_utils_3d as mhd
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
import lasagne
VOXEL_SIZE = (0.125,0.125,0.125) # voxel size (z,y,x) [cm]
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(ann3d, density_dir, intgr_dir, fluence_dir, dose_dir, output_file=None,
hp_override=None):
print('\n')
print('Loading network parameters...')
ann3d = imp.load_source('networks.ann3d', ann3d)
learning_rate = ann3d.LEARNING_RATE
batchsize = ann3d.BATCHSIZE
num_epochs = ann3d.NUM_EPOCHS
#momentum = ann3d.MOMENTUM
if hp_override is not None:
learning_rate, batchsize, num_epochs, momentum = hp_override
print('Learning Rate:\t\t%s' % learning_rate)
print('Batch Size:\t\t%s' % batchsize)
print('Number of Epochs:\t%s' % num_epochs)
#print('Momentum:\t\t%s' % momentum)
# Load a set of volumes from multiple files
print('\n')
print('Loading density data...')
density = []
for dirpath, subdirs, files in os.walk(density_dir):
for f in sorted(files):
if f.endswith(".mhd"):
print(f)
density += [mhd.load_mhd(os.path.join(dirpath, f))[0]]
# Load a set of volumes from multiple files
print('\n')
print('Loading integral data...')
integral = []
for dirpath, subdirs, files in os.walk(intgr_dir):
for f in sorted(files):
if f.endswith(".mhd"):
print(f)
integral += [mhd.load_mhd(os.path.join(dirpath, f))[0]]
# Load a set of volumes from multiple files
print('\n')
print('Loading fluence data...')
fluence = []
for dirpath, subdirs, files in os.walk(fluence_dir):
for f in sorted(files):
if f.endswith(".mhd"):
print(f)
fluence += [mhd.load_mhd(os.path.join(dirpath, f))[0]]
# Load a set of volumes from multiple files
print('\n')
print('Loading dose data...')
dose = []
max_dose = 0
for dirpath, subdirs, files in os.walk(dose_dir):
for f in sorted(files):
if f.endswith(".mhd"):
print(f)
dose += [mhd.load_mhd(os.path.join(dirpath, f))[0]]
if np.max(dose[-1]) > max_dose:
max_dose = np.max(dose[-1])
dose = [d/max_dose for d in dose]
assert len(density) == len(integral)
assert len(integral) == len(fluence)
assert len(fluence) == len(dose)
# Prepare Theano variables for inputs and targets
#input_var = T.ftensor4('inputs')
input_var = T.ftensor5('inputs')
target_var = T.fmatrix('targets')
# Create neural network model (depending on first command line parameter)
print('\n')
print('Sampling data set...')
x_train, y_train, x_val, y_val, x_test, y_test = ann3d.ann3d_dataset(density, integral, fluence, dose)
print('\n')
print('Building model and compiling functions...')
network = ann3d.ann3d_model(input_var)
# Create a loss expression for training, i.e., a scalar objective:
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.squared_error(prediction, target_var)
loss = lasagne.objectives.aggregate(loss, weights=None, mode='sum')
#loss = loss.sqrt()
params = lasagne.layers.get_all_params(network, trainable=True)
#updates = lasagne.updates.nesterov_momentum(
# loss, params, learning_rate=learning_rate, momentum=momentum)
updates = lasagne.updates.adam(loss, params, learning_rate=learning_rate)
#updates = lasagne.updates.sgd(loss, params, learning_rate=learning_rate)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.aggregate(loss, weights=None, mode='sum')
#test_loss = test_loss.sqrt()
feed_forward = theano.function([input_var], test_prediction)
# Compile a function performing a training step on a mini-batch:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a function to compute the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], test_loss)
def rss(nums):
return (sum([n**2.0 for n in nums]))**0.5
# Finally, launch the training loop.
print('\n')
print("Starting training...")
# We iterate over epochs:
t_plot = []
v_plot = []
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(x_train, y_train, batchsize, shuffle=False):
inputs, targets = batch
train_err = rss([train_err,train_fn(inputs,targets)])
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_batches = 0
for batch in iterate_minibatches(x_val, y_val, batchsize, shuffle=False):
inputs, targets = batch
val_err = rss([val_err,val_fn(inputs,targets)])
val_batches += 1
# Store the errors for plotting
t_plot += [train_err/train_batches**0.5]
v_plot += [val_err/val_batches**0.5]
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6e}".format(train_err/train_batches**0.5))
print(" validation loss:\t\t{:.6e}".format(val_err/val_batches**0.5))
# Early stop if failure to improve three consecutive times
if len(v_plot) > 4:
if v_plot[-1] >= v_plot[-2] >= v_plot[-3] >= v_plot[-4]:
break
# After training, we compute and print the test error:
test_err = 0
test_batches = 0
for batch in iterate_minibatches(x_test, y_test, batchsize, shuffle=False):
inputs, targets = batch
test_err = rss([test_err,val_fn(inputs,targets)])
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6e}".format(test_err/test_batches**0.5))
# Now dump the network weights to a file:
now = datetime.datetime.now()
now = str(now.month) + '-' + str(now.day) + '_' + \
str(now.hour) + '-' + str(now.minute) + '-' + str(now.second)
np.savez('model_' + now, *lasagne.layers.get_all_param_values(network))
# And plot the errors
fig = plt.figure()
plt.plot(t_plot, label="training")
plt.plot(v_plot, label="validation")
plt.legend()
plt.title('L:%s, B:%s, T:%s' % (learning_rate, batchsize, test_err/test_batches**0.5))
fig.savefig('errors_' + now + '.png')
if __name__ == '__main__':
if ('--help' in sys.argv) or (len(sys.argv) < 6):
print("Trains a neural network to predict radiation dose.")
print("Usage: %s <PARAMS> <DENSITY> <INTEGRALS> <FLUENCE> <DOSE> [OUTPUT_FILE]"
% sys.argv[0])
print()
print("PARAMS: A python module containing the parameters of learning,")
print(" network architecture, and data sampling methods employed.")
print("DENSITY: The path to a folder containing 'n' MHD files, each")
print(" containing the voxel densities of a phantom.")
print("INTEGRALS: The path to a folder containing 'n' MHD files, each")
print(" containing the integral densities of a phantom.")
print("FLUENCE: The path to a folder containing 'n' MHD files, each")
print(" containing the voxel fluences of a phantom.")
print("DOSE: The path to a folder containing 'n' MHD files, each")
print(" containing the voxel doses (targets) of a phantom.")
print("OUTPUT_FILE: The name of the MHD file that will contain the")
print(" generated data.")
else:
kwargs = {}
kwargs['ann3d'] = sys.argv[1]
kwargs['density_dir'] = sys.argv[2]
kwargs['intgr_dir'] = sys.argv[3]
kwargs['fluence_dir'] = sys.argv[4]
kwargs['dose_dir'] = sys.argv[5]
if len(sys.argv) > 6:
kwargs['output_file'] = sys.argv[6]
main(**kwargs)
|
23,755 | 462b2db73b9472203a10c218491b522774d5a592 | def convert(a):
str1 = ""
return(str1.join(a))
a = ['p','r','o','g','r','a','m','m','e','r']
print(convert(a)) |
23,756 | 02b8f7ca0adeb47626eae7ba0eaefd7a5f139cd1 | import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
class FeatureNet(object):
def __init__(self, state_shape, network, special=None):
self.special = special or {}
self.state_shape = state_shape
self.states = tf.placeholder(shape=(None,) + state_shape, dtype=tf.float32, name="states")
self.is_training = tf.placeholder(dtype=tf.bool, name="is_training")
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.loss = None
self.optimizer = None
self.train_op = None
self.relative_scope = self.special.get("scope", "feature_network")
self.scope = tf.get_variable_scope().name + "/" + self.relative_scope
self.feature_state = network(
self.states,
scope=self.relative_scope + "/feature",
reuse=self.special.get("reuse_feature", False),
is_training=self.is_training)
class PolicyNet(object):
def __init__(self, hidden_state, n_actions, special=None):
self.special = special or {}
self.n_actions = n_actions
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
self.cumulative_rewards = tf.placeholder(shape=[None], dtype=tf.float32, name="rewards")
self.is_training = tf.placeholder(dtype=tf.bool, name="is_training")
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = None
self.train_op = None
self.relative_scope = self.special.get("scope", "policy_network")
self.scope = tf.get_variable_scope().name + "/" + self.relative_scope
self.predicted_probs = self._probs(
hidden_state,
scope=self.relative_scope + "/probs",
reuse=self.special.get("reuse_probs", False)) + 1e-8
batch_size = tf.shape(self.actions)[0]
predicted_ids = tf.range(batch_size) * tf.shape(self.predicted_probs)[1] + self.actions
self.predicted_probs_for_actions = tf.gather(
tf.reshape(self.predicted_probs, [-1]), predicted_ids)
# yeah loss sum
J = tf.reduce_sum(tf.log(self.predicted_probs_for_actions) * self.cumulative_rewards)
self.loss = -J # * self.special.get("reward_factor", 1.0)
# a bit of regularization
if self.special.get("entropy_loss", True):
H = tf.reduce_sum(
tf.reduce_sum(
self.predicted_probs * tf.log(self.predicted_probs),
axis=-1))
self.loss += H * self.special.get("entropy_factor", 0.01)
def _probs(self, hidden_state, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
probs = tf.layers.dense(
hidden_state,
units=self.n_actions,
activation=tf.nn.softmax)
return probs
class ValueNet(object):
def __init__(self, hidden_state, special=None):
self.special = special or {}
self.td_target = tf.placeholder(shape=[None], dtype=tf.float32, name="td_target")
self.is_training = tf.placeholder(dtype=tf.bool, name="is_training")
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = None
self.train_op = None
self.relative_scope = self.special.get("scope", "value_network")
self.scope = tf.get_variable_scope().name + "/" + self.relative_scope
self.predicted_values = self._state_value(
hidden_state,
scope=self.relative_scope + "/state_value",
reuse=self.special.get("reuse_state_value", False))
self.predicted_values_for_actions = tf.squeeze(self.predicted_values, axis=1)
self.loss = tf.losses.mean_squared_error(
labels=self.td_target,
predictions=self.predicted_values_for_actions)
def _state_value(self, hidden_state, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
state_values = tf.layers.dense(
hidden_state,
units=1,
activation=None)
return state_values
class QvalueNet(object):
def __init__(self, hidden_state, n_actions, special=None):
self.special = special or {}
self.n_actions = n_actions
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
self.td_target = tf.placeholder(shape=[None], dtype=tf.float32, name="td_target")
self.is_training = tf.placeholder(dtype=tf.bool, name="is_training")
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = None
self.train_op = None
self.relative_scope = self.special.get("scope", "qvalue_network")
self.scope = tf.get_variable_scope().name + "/" + self.relative_scope
self.predicted_qvalues = self._qvalues(
hidden_state,
scope=self.relative_scope + "/qvalue",
reuse=self.special.get("reuse_state_value", False))
batch_size = tf.shape(self.actions)[0]
predicted_ids = tf.range(batch_size) * tf.shape(self.predicted_qvalues)[1] + self.actions
self.predicted_qvalues_for_actions = tf.gather(
tf.reshape(self.predicted_qvalues, [-1]), predicted_ids)
self.loss = tf.losses.mean_squared_error(
labels=self.td_target,
predictions=self.predicted_qvalues_for_actions)
def _qvalues(self, hidden_state, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
qvalues = tf.layers.dense(
hidden_state,
units=self.n_actions,
activation=None)
if self.special.get("advantage", False):
qvalues -= tf.reduce_mean(qvalues, axis=-1, keep_dims=True)
return qvalues
def copy_scope_parameters(sess, net1_scope, net2_scope):
net1_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=net1_scope)
net2_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=net2_scope)
net1_params = sorted(net1_params, key=lambda v: v.name)
net2_params = sorted(net2_params, key=lambda v: v.name)
update_ops = []
for net1_v, net2_v in zip(net1_params, net2_params):
op = net2_v.assign(net1_v)
update_ops.append(op)
sess.run(update_ops)
def copy_model_parameters(sess, net1, net2):
"""
Copies the model parameters of one net to another.
Args:
sess: Tensorflow session instance
net1: net to copy the parameters from
net2: net to copy the parameters to
"""
copy_scope_parameters(sess, net1.scope, net2.scope)
|
23,757 | ed9ed6dee11b0df08ea1fbfdf2061a2ebc182a60 | import re
BULK_RE = re.compile(r'data\-product\-id="(\d+)".+?data\-expected\-price="(\d+)".+?data\-expected\-seller-id="(\d+)".+?data\-lowest\-private\-sale\-userasset\-id="(\d+)"', re.DOTALL)
def parse_item_page(data):
match = BULK_RE.search(data)
product_id = int(match.group(1))
price = int(match.group(2))
seller_id = int(match.group(3))
userasset_id = int(match.group(4))
return product_id, price, seller_id, userasset_id |
23,758 | d5bd06926ac9facc07807850cd994afc726d0851 | import collections
import math
import sys
def isPowerOfTwo (x):
return (x and (not(x & (x - 1))))
def primefactors(n):
d = collections.defaultdict(int)
while n%2 == 0:
d[2]+=1
n/=2
for i in range(3, int(n**0.5)+1, 2):
while n%i==0:
d[i]+=1
n/=i
if n > 2:
d[n]+=1
return d
n = int(input())
if n == 1:
print(1, 0)
sys.exit()
x = primefactors(n)
y = max(x.values())
z = x.keys()
k = 1
for i in z:
k*=i
if y == 1:
print(int(k), 0)
elif isPowerOfTwo(y):
if len(set(x.values())) == 1:
print(int(k), int(math.log2(y)))
else:
print(int(k), int(math.log2(y)+1))
else:
print(int(k), math.ceil(math.log2(y))+1) |
23,759 | d161fcb257932092708f38d08eab3a2f200cca1d | #continue_for
#打印5以内除了2的整数
for i in range(5):
if i == 2:
continue
print(i)
#continue_while
#打印偶数
i = 0
while i <= 10:
if i%2 == 1:
i += 1
continue
else:
print(i)
i += 1
|
23,760 | 755f6a27430c74335fc28055bdad6422f1341788 | from random import randint
class RandomSet:
"""
A class resembling the interface of a set that also
has O(1) random access
"""
def __init__(self, s=None):
"""
Create a new managed set
s can be any iterable to initialize the set
"""
self._index_map = {}
self._list = []
if s is not None:
for item in s:
self.add(item)
def __contains__(self, item):
"""
Returns True if the item is in the set
"""
return item in self._index_map
def __len__(self):
return len(self._list)
def add(self, item):
"""
Add an element to the ManagedSet if it doesn't yet exist
"""
if item not in self:
self._index_map[item] = len(self._list)
self._list.append(item)
def remove(self, item):
"""
Remove an item from the ManagedSet if it exists
"""
if item in self:
item_index = self._index_map[item]
last_item = self._list[-1]
# Swap in the item from the end of the list
self._list[item_index] = last_item
self._list.pop()
self._index_map[last_item] = item_index
def pop_random(self):
"""
Remove a random item from the set, and return it
"""
rand_index = randint(0, len(self._list) - 1)
item = self._list[rand_index]
self.remove(item)
return item
if __name__ == '__main__':
test = RandomSet({1, 2, 3})
for _ in range(3):
test.pop_random()
print(len(test))
|
23,761 | 0b47f52c6ecf18fb59b479887a66da395c887c05 | '''
public Classical(String input) {
super();
this.array1 = "0 1 2 3 4 5 6 7 8 9 a b c d e f g h i j k l m n o p q r s t u v w x y z = A B C D E F G H I J K L M E O P Q R S T U V W X Y Z"
;
this.array2 = "W,p,X,4,5,B,q,A,6,a,V,3,r,b,U,s,E,d,C,c,D,O,t,T,Y,v,9,Q,2,e,8,P,f,h,J,N,g,u,K,k,H,x,L,w,R,I,j,i,y,l,m,S,M,1,0,O,n,2,G,7,=,F,Z"
;
this.input = input;
}
public String make() {
new String[0];
new String[0];
String[] v1 = this.array1.split(" ");
String[] v2 = this.array2.split(",");
int v4 = this.input.length();
int v3;
for(v3 = 0; v3 < v4; ++v3) {
String v0 = String.valueOf(this.input.charAt(v3));
int v5;
for(v5 = 0; v5 < 63; ++v5) {
if(v0.equals(v1[v5])) {
if(v3 == 0) {
this.output = v2[v5];
}
else {
this.output = this.output + v2[v5];
}
}
}
}
return this.output;
@author: wenhuizone
'''
import base64
v1="0123456789abcdefghijklmnopqrstuvwxyz=ABCDEFGHIJKLMEOPQRSTUVWXYZ"
v2="WpX45BqA6aV3rbUsEdCcDOtTYv9Q2e8PfhJNguKkHxLwRIjiylmSM10On2G7=FZ"
target='SRlhb70YZHKvlTrNrt08F=DX3cdD3txmg'
result=''
result_r=''
tmp1=''
flag=''
'''
for i in range(0,len(target)):
for j in range(65,127):
tmp1=chr(j)
#print tmp1
for k in range(0,63):
if tmp1==v1[k]:
tmp1=v2[k]
if tmp1==target[i]:
#print tmp1
result+=tmp1
print "the flag is:"
print result
'''
for i in range(0,len(target)):
for j in range(0,63):
if target[i]==v2[j]:
tmp1=v1[j]
result+=tmp1
print result
result_f=base64.b64decode(result)[::-1]
print result_f
flag=result_f.strip('hdu1s8')
print "the flag is:"
print flag[1::]
|
23,762 | e0f8d2c6f747e890580c6a5a3691a3c975460f37 |
def prime(n):
i=2
while i < n :
if n%i == 0:
return False
i+=1
return True
i = 2
while i <= 100:
if prime(i):
print (i,"is a prime number.")
i=i+1
print("end")
|
23,763 | 4661b00d7715dad84ab9128ffec20ae022c5a5bf | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'flingjie'
SITENAME = u"Jie's Blog"
SITEURL = ''
DISPLAY_CATEGORIES_ON_MENU = False
PATH = 'content'
TIMEZONE = 'Asia/Shanghai'
DEFAULT_LANG = u'ch'
GOOGLE_ANALYTICS = 'UA-71093531-1'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/flingjie'),
)
DEFAULT_PAGINATION = 10
#顶部菜单项
MENUITEMS = [
('Archives',SITEURL+'/archives.html'),
('Algorithm',SITEURL+'/category/algorithm.html'),
('Clojure',SITEURL+'/category/clojure.html'),
('Emacs',SITEURL+'/category/emacs.html'),
('Note',SITEURL+'/category/note.html'),
('Python',SITEURL+'/category/python.html'),
]
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
STATIC_PATHS = ['images', 'extra/robots.txt', 'extra/favicon.ico']
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
'extra/favicon.ico': {'path': 'favicon.ico'}
}
OUTPUT_PATH = "../flingjie.github.io"
|
23,764 | e3a33c082ac19c3df434d1586b36619042803645 | #!/usr/bin/python3
# Dictionary can't have duplicate keys.
# Create a dictionary, using the List items as keys.
# Convert dictionary back to list.
mylist = ["a", "b", "a", "c", "c"]
mylist = list(dict.fromkeys(mylist))
print(mylist) |
23,765 | 0278e0aeb2d16ba14a3c57a007fb8bf19a1a08f7 | print("Super Sweet Open Source Project!")
print("Now includes my excellent new feature...")
# New feature...
for i in range(3):
print(str(i + 1))
|
23,766 | 6732ed31ea5b868ec59c3bf3b941973bbeee6606 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchtext import data, datasets
from module import ClassificationTransformer
from argparse import ArgumentParser
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(data_iterator, model, optimizer, criterion, args):
n_sample, n_correct = 0, 0
batch_loss = 0.0
for batch in tqdm(data_iterator):
optimizer.zero_grad()
input = batch.text[0]
label = batch.label - 1
if input.size(1) > args.max_len:
input = input[:, :args.max_len]
out = model(input)
n_sample += input.size(0)
n_correct += (label == out.argmax(dim=1)).sum().item()
loss = criterion(out, label)
batch_loss += loss.item() * input.size(0)
loss.backward()
optimizer.step()
print('train loss: {:.3f}, accuracy: {:.3f}'.format(batch_loss / n_sample,
n_correct / n_sample))
def validate(data_iterator, model, criterion, args):
with torch.no_grad():
n_sample, n_correct = 0, 0
batch_loss = 0.0
for batch in tqdm(data_iterator):
input = batch.text[0]
label = batch.label - 1
if input.size(1) > args.max_len:
input = input[:, :args.max_len]
out = model(input)
n_sample += input.size(0)
n_correct += (label == out.argmax(dim=1)).sum().item()
loss = criterion(out, label)
batch_loss += loss.item() * input.size(0)
print('test loss: {:.3f}, accuracy: {:.3f}'.format(batch_loss / n_sample,
n_correct / n_sample))
def main(args):
TEXT = data.Field(batch_first=True, lower=True, include_lengths=True)
LABEL = data.Field(sequential=False, batch_first=True)
print("Loading IMDB dataset...")
trainset, testset = datasets.IMDB.splits(TEXT, LABEL)
print("Building vocabulary: {} words...".format(args.vocab_size))
TEXT.build_vocab(trainset, max_size=args.vocab_size - 2)
LABEL.build_vocab(trainset)
train_iter, test_iter = data.BucketIterator.splits((trainset, testset),
batch_size=args.batch,
device=device)
print("Instantiating model...")
model = ClassificationTransformer(n_layer=args.n_layer,
n_head=args.n_head,
d_embed=args.d_embed,
n_token=args.vocab_size,
n_sequence=args.max_len,
n_class=2,
embedding_pos=args.position_embedding)
model.to(device)
print(model)
optimizer = torch.optim.Adam(lr=0.001, params=model.parameters())
criterion = nn.NLLLoss()
for epoch in range(args.epochs):
print('epoch', epoch + 1)
model.train(True)
train(train_iter, model, optimizer, criterion, args)
model.train(False)
validate(test_iter, model, criterion, args)
torch.save(model.state_dict(), 'saved_model.pt')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--batch", default=4, type=int)
parser.add_argument("--max_len", default=512, type=int)
parser.add_argument("--vocab_size", default=5000, type=int)
parser.add_argument("--n_layer", default=1, type=int)
parser.add_argument("--n_head", default=3, type=int)
parser.add_argument("--d_embed", default=16, type=int)
parser.add_argument("--position_embedding", default=False, type=bool)
args = parser.parse_args()
main(args)
|
23,767 | 0dc128514ba4a4449db68a8ec14f8a159a42cd5c | #
# Template for 15-110 Homework #6
#
# Problem #3: 3.isLegalSudoku.py
#
#
# WRITTEN BY (NAME & ANDREW ID): Moo Suk Kim (moosukk)
#
# 15-110 section:N
import copy
def areLegalValues(values):
Answer = True
for x in values:
while Answer == True:
if x != 0:
if values.count(x) < 2 and x < 10 and x > 0 == False: Answer = False
else: Answer = True
return Answer
def isLegalRow(board, row):
for x in range(8):
values = []
values += board[row][x]
areLegalValues(values)
return False # replace with your code!
def isLegalCol(board, col):
return False # replace with your code!
def isLegalBlock(board, block):
return False # replace with your code!
def isLegalSudoku(board):
return False # replace with your code!
################## You may ignore below this line #################
okBoard1 = [
[ 5, 3, 0, 0, 7, 0, 0, 0, 0 ],
[ 6, 0, 0, 1, 9, 5, 0, 0, 0 ],
[ 0, 9, 8, 0, 0, 0, 0, 6, 0 ],
[ 8, 0, 0, 0, 6, 0, 0, 0, 3 ],
[ 4, 0, 0, 8, 0, 3, 0, 0, 1 ],
[ 7, 0, 0, 0, 2, 0, 0, 0, 6 ],
[ 0, 6, 0, 0, 0, 0, 2, 8, 0 ],
[ 0, 0, 0, 4, 1, 9, 0, 0, 5 ],
[ 0, 0, 0, 0, 8, 0, 0, 7, 9 ]
]
okBoard2 = [
[ 5, 3, 4, 6, 7, 8, 9, 1, 2 ],
[ 6, 7, 2, 1, 9, 5, 3, 4, 8 ],
[ 1, 9, 8, 3, 4, 2, 5, 6, 7 ],
[ 8, 5, 9, 7, 6, 1, 4, 2, 3 ],
[ 4, 2, 6, 8, 5, 3, 7, 9, 1 ],
[ 7, 1, 3, 9, 2, 4, 8, 5, 6 ],
[ 9, 6, 1, 5, 3, 7, 2, 8, 4 ],
[ 2, 8, 7, 4, 1, 9, 6, 3, 5 ],
[ 3, 4, 5, 2, 8, 6, 1, 7, 9 ]
]
badBoard1 = copy.deepcopy(okBoard1)
badBoard1[0][0] = 7
badBoard2 = copy.deepcopy(okBoard2)
badBoard2[8][8] = 8
print "Testing areLegalValues()...",
assert(areLegalValues([0, 1, 2, 3, 4, 5, 6, 7, 8]) == True) # partial set
assert(areLegalValues([7, 3, 1, 5, 2, 9, 8, 4, 6]) == True) # full set!
assert(areLegalValues([0, 1, 2, 3, 4, 5, 6, 7, 0]) == True) # duplicate blanks are ok
assert(areLegalValues([1, 2, 3, 2, 4, 5, 6, 7, 9]) == False) # duplicate values are not
assert(areLegalValues([10, 1, 2, 3, 4, 5, 6, 7, 8]) == False) # out of range
assert(areLegalValues([-1, 1, 2, 3, 4, 5, 6, 7, 8]) == False) # out of range
print "Passed!"
print "Testing isLegalRow()...",
assert(isLegalRow(okBoard1, 0) == True)
assert(isLegalRow(badBoard1, 0) == False)
print "Passed!"
print "Testing isLegalCol()...",
assert(isLegalCol(okBoard1, 0) == True)
assert(isLegalCol(badBoard1, 0) == False)
print "Passed!"
print "Testing isLegalBlock()...",
assert(isLegalBlock(okBoard2, 8) == True)
assert(isLegalBlock(badBoard2, 8) == False)
print "Passed!"
print "Testing isSudokuBoard()...",
assert(isLegalSudoku(okBoard1) == True)
assert(isLegalSudoku(badBoard1) == False)
assert(isLegalSudoku(okBoard2) == True)
assert(isLegalSudoku(badBoard2) == False)
print "Passed!"
|
23,768 | c95534d64c9ec8894927768cb8414890640901fd | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pex.tools.command import Command
from pex.tools.commands.graph import Graph
from pex.tools.commands.info import Info
from pex.tools.commands.interpreter import Interpreter
from pex.tools.commands.repository import Repository
from pex.tools.commands.venv import Venv
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Iterable
def all_commands():
# type: () -> Iterable[Command]
return Info(), Interpreter(), Graph(), Repository(), Venv()
|
23,769 | bd5da1935302eebb1ec42da9ab412bd7694459ee | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-30 00:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapa_app', '0013_local_altura'),
]
operations = [
migrations.CreateModel(
name='Abrigo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(blank=True, max_length=100, null=True, verbose_name='Abrigo')),
('cep', models.CharField(blank=True, max_length=20, null=True, verbose_name='Cep')),
('espaco', models.CharField(blank=True, max_length=15, null=True, verbose_name='Espaço disponivel')),
],
options={
'verbose_name': 'Abrigo',
'verbose_name_plural': 'Abrigos',
},
),
]
|
23,770 | 614252d80ca8f02729effc1d271388f7a24ee0ed | last_tri_num=3
tri_num=6
index=4
fac_num=2
prime=[2,3]
temp=0
is_prime=[True]*len(prime)
while fac_num<=500:
p=range(last_tri_num+1,tri_num+1)
prime.extend(p)
p1=[True]*len(p)
is_prime.extend(p1)
for i in prime:
if i*i<=tri_num:
j=i*i
while j<=tri_num:
is_prime[j-2]=False
j=j+i
p_fac_num=[0]*len(prime)
temp=tri_num
for j in range(len(prime)-1):
if is_prime[j]:
while temp%prime[j]==0:
temp=temp/prime[j]
p_fac_num[j]=p_fac_num[j]+1
fac_num=1
for ele in p_fac_num:
fac_num=fac_num*(ele+1)
fac_num=fac_num
last_tri_num=tri_num
tri_num=tri_num+index
index=index+1
print last_tri_num
|
23,771 | 4c022152e3b6ba5b67fd632bee8f1bf7fd2433aa | from rest_framework import viewsets, generics, filters
from rest_framework.permissions import AllowAny
from measurement.filters import MeasurementFilter, RecomendationFilter
from measurement.models import Measurement, Recomendation
from measurement.serializers import MeasurementSerializer, RecomendationSerializer, UserMeasurementHistorySerializer, \
UserRecomendationHistorySerializer
from users.models import User
class MeasurementCRUDView(viewsets.ModelViewSet):
queryset = Measurement.objects.all()
serializer_class = MeasurementSerializer
permission_classes = (AllowAny,)
class RecomendationCRUDView(viewsets.ModelViewSet):
queryset = Recomendation.objects.all()
serializer_class = RecomendationSerializer
permission_classes = (AllowAny,)
class PatientMeasurementHistoryView(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserMeasurementHistorySerializer
permission_classes = (AllowAny,)
class PatientRecomendationHistoryView(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserRecomendationHistorySerializer
permission_classes = (AllowAny,)
class MeasurementHistoryView(generics.ListAPIView):
serializer_class = MeasurementSerializer
filterset_class = MeasurementFilter
def get_queryset(self):
user_id = self.kwargs['pk']
return Measurement.objects.filter(patient=user_id).order_by('created_at')
class RecomendationHistoryView(generics.ListAPIView):
serializer_class = RecomendationSerializer
filterset_class = RecomendationFilter
def get_queryset(self):
user_id = self.kwargs['pk']
return Recomendation.objects.filter(patient=user_id).order_by('created_at')
|
23,772 | 940a19dc450ebe3ce6d13ae960a39b8a7617285b | #### COPIED #### VERIFIED
"""
Given a 2D array, find the maximum sum subarray in it.
"""
def kadanes(array, m):
sum_so_far = 0
start, end = 0, 0
max_sum, max_start, max_end = 0, 0, 0
for i in range(m):
sum_so_far += array[i]
if sum_so_far < 0:
sum_so_far = 0
start = i + 1
elif sum_so_far > max_sum:
max_start = start
max_sum = sum_so_far
max_end = i # inclusive
print(max_sum, max_start, max_end)
return max_sum, max_start, max_end
def find_max_sum(arr, m, n): # m rows and n colomns
max_sum = 0
max_left, max_right = 0, 0
max_up, max_down = 0, 0
for left in range(n):
temp = [0] * m
for right in range(left, n):
for i in range(m):
temp[i] += arr[i][right]
sum_so_far, sum_start, sum_end = kadanes(temp, m)
if sum_so_far > max_sum:
max_up = sum_start
max_down = sum_end
max_left = left
max_right = right
max_sum = sum_so_far
print(max_sum, max_left, max_right, max_up, max_down)
return max_sum
arr = [[ 1, 2, -1, -4, -20],
[-8, -3, 4, 2, 1],
[ 3, 8, 10, 1, 3],
[-4, -1, 1, 7, -6]]
find_max_sum(arr, 4, 5) |
23,773 | e358bf21048b81cf277bc771c32556216d6893f8 | from config.aliyun import Aliyun
from .signature import Signature
from util.util import Util
from util.hash import Hash
from util.base64 import Base64
import oss2
# 对象存储
class Oss:
OssConn = None #连接
AccessKeyId: str = '' #RAM: AccessKeyId
AccessKeySecret: str = '' #RAM: AccessKeySecret
Endpoint: str = '' #地域节点
Bucket: str = '' #Bucket名称
# 签名直传
def Policy(dir: str, file: str, expireTime: int=0, maxSize: int=0):
ram = Aliyun.RAM()
cfg = Aliyun.OSS()
# 默认值
if expireTime == 0 : expireTime = cfg['ExpireTime']
if maxSize == 0 : maxSize = cfg['MaxSize']
# 数据
res = Signature.PolicySign(expireTime, maxSize)
res['host'] = 'https://'+cfg['Bucket']+'.'+cfg['Endpoint']
res['dir'] = dir
res['file'] = file
res['max_size'] = maxSize
# 回调
callbackBody = Util.JsonEncode({
'dir': dir,
'file': file,
'expire': res['expire'],
'sign': Hash.Md5(dir+'&'+file+'&'+str(res['expire'])+'&'+ram['AccessKeySecret']),
})
callbackData = Util.JsonEncode({
'callbackUrl': cfg['CallbackUrl'],
'callbackBodyType': cfg['CallbackType'],
'callbackBody': callbackBody,
})
res['callback'] = Base64.ToStr(Base64.Encode(Base64.ToByte(callbackData)))
return res
# 签名直传-验证
def PolicyVerify(dir: str, file: str, expire: str, sign: str):
# 配置
ram = Aliyun.RAM()
# 验证
signTmp = Hash.Md5(dir+'&'+file+'&'+expire+'&'+ram['AccessKeySecret'])
if sign != signTmp : return False
# 是否超时
now = Util.Time()
etime = int(expire)
if now > etime : return False
return True
# 初始化
def Init():
# 配置
ramCfg = Aliyun.RAM()
ossCfg = Aliyun.OSS()
if not Oss.AccessKeyId : Oss.AccessKeyId = ramCfg['AccessKeyId']
if not Oss.AccessKeySecret : Oss.AccessKeySecret = ramCfg['AccessKeySecret']
if not Oss.Endpoint : Oss.Endpoint = ossCfg['Endpoint']
if not Oss.Bucket : Oss.Bucket = ossCfg['Bucket']
# 连接
if not Oss.OssConn :
try:
auth = oss2.Auth(Oss.AccessKeyId, Oss.AccessKeySecret)
Oss.OssConn = oss2.Bucket(auth, Oss.Endpoint, Oss.Bucket)
except Exception as e:
print('[OSS] Conn:', e)
Oss.OssConn = None
return Oss.OssConn
# 列表
def ListObject(path: str):
res = {'folder': [], 'file': []}
# 连接
conn = Oss.Init()
if not conn : return res
# 数据
for val in oss2.ObjectIterator(conn, prefix = path, delimiter = '/'):
if val.is_prefix(): res['folder'] += [val.key]
else : res['file'] += [val.key]
return res
# 上传
def PutObject(file: str, content, headers=None):
# 连接
conn = Oss.Init()
if not conn : return False
# 执行
conn.put_object(file, content, headers)
return True
# 删除-单个
def DeleteObject(file: str):
if len(file)==0 : return False
# 连接
conn = Oss.Init()
if not conn : return False
# 执行
conn.delete_object(file)
return True
# 删除-多个
def DeleteObjects(files: list):
if len(files)==0 : return False
# 连接
conn = Oss.Init()
if not conn : return False
# 执行
conn.batch_delete_objects(files)
return True
# 删除-文件夹&文件
def DeleteObjectAll(path: str):
if len(path)==0 : return False
# 连接
conn = Oss.Init()
if not conn : return False
# 文件
last = path[-1:]
if last != '/' :
conn.delete_object(path)
return True
# 文件夹
objects = []
lists = oss2.ObjectIterator(conn, prefix=path)
for val in lists :
objects += [val.key]
return Oss.DeleteObjects(objects)
|
23,774 | 5b880b63af5282c973e954d521be93ebcd83c716 | from import_data.build import load_data
def data_splitter(df):
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
return X, y |
23,775 | ccc768d57a929428338e6e256d1d5509fd69ee4f | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,BooleanField,SubmitField,TextAreaField
from wtforms.validators import EqualTo,Email,DataRequired,Length,ValidationError
from flaskblog.models import User
from flask_wtf.file import FileField,FileAllowed
from flask_login import current_user
class RegisterForm(FlaskForm):
username=StringField("Username",validators=[DataRequired(),Length(min=2,max=35)])
email=StringField("Email",validators=[DataRequired(),Email()])
password=StringField("Password",validators=[DataRequired()])
confirm_password=StringField("Confirm Password",validators=[DataRequired(),EqualTo('password')])
submit=SubmitField('Sign Up')
def validate_username(self,username):
user=User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('UserName is Already Exist')
def validate_email(self,email):
user=User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email is Already Exist')
class LoginForm(FlaskForm):
email=StringField("Email",validators=[DataRequired(),Email()])
password=StringField("Password",validators=[DataRequired()])
remember_me=BooleanField('Remember Me')
submit=SubmitField('Login')
class ProfileUpdateForm(FlaskForm):
username=StringField("Username",validators=[DataRequired(),Length(min=2,max=35)])
email=StringField("Email",validators=[DataRequired(),Email()])
picture=FileField('update profile picture',validators=[FileAllowed(['jpeg','jpg','png'])])
submit=SubmitField('Update')
def validate_username(self,username):
if username.data!=current_user.username:
user=User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('UserName is Already Exist')
def validate_email(self,email):
if email.data!=current_user.email:
user=User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email is Already Exist')
class PostForm(FlaskForm):
title=StringField('Title',validators=[DataRequired()])
content=TextAreaField("Content",validators=[DataRequired()])
submit=SubmitField("AddPost")
class UpdatePostForm(FlaskForm):
title=StringField('Title',validators=[DataRequired()])
content=TextAreaField("Content",validators=[DataRequired()])
submit=SubmitField("Update")
class RequestResetForm(FlaskForm):
email=StringField("Email",validators=[DataRequired(),Email()])
submit=SubmitField("Submit")
def validate_email(self,email):
user=User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError("There is no account with this email please login first")
class ResetPasswordForm(FlaskForm):
password=StringField("Password",validators=[DataRequired()])
confirm_password=StringField("Confirm Password",validators=[DataRequired(),EqualTo('password')])
submit=SubmitField("Reset Password")
|
23,776 | 1e34d7d14b11cc6746056d63f94f63e524144ee4 | import pygame,random,os
from pygame.locals import *
pygame.init()
w = 600
h = 500
screen = pygame.display.set_mode((w, h))
pygame.display.set_caption("game car")
k1 = []
pygame.mixer.music.load("TSIS8/background.wav")
class Rect(pygame.Rect):
def __init__(self,x,y,w,h,color):
super().__init__(x,y,w,h)
self.color,self.x,self.y,self.w,self.h = color,x,y,w,h
def draw(self,surface):
pygame.draw.rect(surface,self.color,self)
class Line(Rect):
def __init__(self,road,w,l,color,speed = 6,y0 = 0):
super().__init__(road.x+road.w/2-w/2,-l+y0,w,l,color)
self.road = road
self.sp = speed
self.w,self.l = w,l
self.y = -l+y0
self.color = color
def update(self):
self.y+=self.sp
self.move_ip(0,self.sp)
if(self.y>=h):
self.move_ip(0,-self.y-self.l)
self.y = -self.l
class Player:
def __init__(self, w, h, image, road, speed = 7):
self.speed = speed
self.x, self.y = 0, road.h-h
self.w,self.h = w,h
self.road = road
self.surf = pygame.Surface((w,h))
self.rect = self.surf.get_rect(topleft = (road.x,road.h-h))
self.image = pygame.image.load(os.path.join(os.getcwd(), image)).convert_alpha(screen)
def draw(self,surface):
surface.blit(self.image, self.rect)
def update(self):
key1 = pygame.key.get_pressed()
crash = 0
if key1[K_LEFT] and self.x-self.speed>=0 and not crash:
self.rect.move_ip(-self.speed, 0)
self.x-=self.speed
crash = 0
if key1[K_RIGHT] and self.x+self.speed<=self.road.w-self.w and not crash:
self.rect.move_ip(self.speed, 0)
self.x+=self.speed
if key1[K_UP] and self.y-self.speed>=0:
self.rect.move_ip(0, -self.speed)
self.y-=self.speed
if key1[K_DOWN] and self.y+self.speed<=self.road.h-self.h:
self.rect.move_ip(0, self.speed)
self.y+=self.speed
road = Rect(100, 0, 400, h, (99, 99, 99))
road.draw(screen)
p1 = Player(64, 130, "TSIS8/3.png", road)
p1.draw(screen)
pygame.mixer.music.play()
crashed = 0
class Enemy:
def __init__(self,w,h,img,road,speed = 7,y0 = 0):
self.w,self.h = w,h
self.sp = speed
self.msp = speed
self.road = road
self.y = y0
self.x = random.randint(road.x,road.x+road.w-w)
self.img = pygame.image.load(os.path.join(os.getcwd(),img))
self.rect = self.img.get_rect(topleft = (self.x,y0))
def draw(self,surface):
surface.blit(self.img,self.rect)
def update(self):
self.rect.move_ip(0,self.sp)
self.y+=self.sp
if((self.x in range(p1.x + 100, p1.x + p1.w + 100) and self.y in range(p1.y, p1.y + p1.h)) or (p1.x in range(self.x - 100, self.x + self.w - 100) and p1.y in range(self.y, self.y + self.h))):
global crashed,epx,epy
epx = (self.x + p1.x - 100) / 2
epy = (self.y + p1.y) / 2
crashed = 1
return
if(self.y>=h):
self.x = random.randint(road.x,road.x+road.w-self.w)
self.rect = self.img.get_rect(topleft = (self.x,-self.h))
self.y = -self.h
self.img = pygame.image.load(os.path.join(os.getcwd(), "TSIS8/" + str(random.randint(1, 2)) + ".png"))
score = 0
class Coins:
def __init__(self,r,sp = 12,color = (255,255,0)):
self.r,self.color,self.sp = r,color,sp
self.x = random.randint(100,500)
self.y = -random.randint(r,200)
self.a = r
def update(self):
self.y+=self.sp
d = (self.x in range(p1.x + 100, p1.x + p1.w + 100) and self.y in range(p1.y, p1.y + p1.h))
if self.y-self.r>=h or d:
self.x = random.randint(100,500)
self.y = -random.randint(self.r,1000)
global score
score+=d
def draw(self,surface):
pygame.draw.ellipse(screen, self.color, pygame.Rect((int(self.x - self.a), int(self.y - self.r)), (int(2 * self.a) + 4, int(2 * self.r))))
l1 = [Line(road, 20, 100, (255, 255, 255), y0 =i * 250) for i in range(2)]
k1 = [Enemy(64, 130, "TSIS8/" + str(random.randint(1, 2)) + ".png", road, y0 = int(-i * (h + 100) // 2)) for i in range(2)]
pygame.display.update()
FPS = 60
coin = Coins(20)
coin.draw(screen)
FramePerSec = pygame.time.Clock()
font_style = pygame.font.SysFont("impact", 80)
font_style1 = pygame.font.SysFont("impact", 30)
def message(msg, color):
mesg = font_style.render(msg, True, color)
screen.blit(mesg, [(w / 2)-180, (h / 2)-90])
def message1(msg, color):
mesg = font_style1.render(msg, True, color)
screen.blit(mesg, [(w / 2) - 180, (h / 2) +30])
def play():
while(True):
if crashed:
import time
pygame.mixer.music.stop()
pygame.mixer.music.load("TSIS8/crash.wav")
pygame.mixer.music.play()
img = pygame.image.load(os.path.join(os.getcwd(), "TSIS8/explosion.png"))
screen.blit(img, img.get_rect(center=(epx + 125, epy + 50)))
pygame.display.update()
time.sleep(0.2)
screen.fill((255,255,0))
message("GAME OVER!", (255,0,0))
message1("Your score: "+str(score), (255, 0, 0))
pygame.display.update()
time.sleep(2)
pygame.quit()
exit()
p1.update()
coin.update()
for i in k1:
i.update()
screen.fill((255, 255, 0))
road.draw(screen)
for i in l1:
i.update()
i.draw(screen)
coin.draw(screen)
p1.draw(screen)
for i in k1:
i.draw(screen)
font = pygame.font.SysFont("rockwell",25)
fr = font.render("Score: "+str(score),1,1)
screen.blit(fr, (10, 10))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
FramePerSec.tick(FPS)
play()
pygame.quit()
exit()
pygame.quit() |
23,777 | b46fce4e3ebd0893acd278266888d5938760d59b | import config
import dash_html_components as html
def header():
return html.Div(
className='header',
children=[
html.Div(
className="iconArea",
children=[
html.Img(src="../assets/pan_bread_1kin_yama.png")
]
),
html.Div(
className="titleArea",
children=[
html.Span(config.TITLE_NAME)
]
),
html.Div(
className="socialArea",
children=[
html.Img(src="../assets/GitHub-Mark-Light-64px.png")
]
)
]
) |
23,778 | 3b8301fb7dec8f84f14ec7cd55c1abb040e6da22 | """
51. 特徴量抽出
学習データ,検証データ,評価データから特徴量を抽出し,それぞれtrain.feature.txt,valid.feature.txt,test.feature.txtというファイル名で保存せよ.
なお,カテゴリ分類に有用そうな特徴量は各自で自由に設計せよ.記事の見出しを単語列に変換したものが最低限のベースラインとなるであろう.
"""
import pandas as pd
import pickle
import texthero as hero
from sklearn.feature_extraction.text import TfidfVectorizer
def load_data() -> dict:
"""データの読み込み"""
# 読み込むファイルを定義
inputs = {
'train': 'train.txt',
'valid': 'valid.txt',
'test': 'test.txt',
}
dfs = {}
for k, v in inputs.items():
dfs[k] = pd.read_csv(v, sep='\t')
# データチェック
for k in inputs.keys():
print(k, '---', dfs[k].shape)
print(dfs[k].head())
return dfs
def preprocess(text) -> str:
"""前処理"""
clean_text = hero.clean(text, pipeline=[
hero.preprocessing.fillna,
hero.preprocessing.lowercase,
hero.preprocessing.remove_digits,
hero.preprocessing.remove_punctuation,
hero.preprocessing.remove_diacritics,
hero.preprocessing.remove_stopwords
])
return clean_text
class FeatureExtraction():
def __init__(self, min_df=1, max_df=1) -> None:
self.tfidf_vec = TfidfVectorizer(min_df=min_df, max_df=max_df, ngram_range=(1, 2))
def fit(self, input_text) -> None:
self.tfidf_vec.fit(input_text)
def transform(self, input_text) -> pd.DataFrame:
_tfidf_vec = self.tfidf_vec.transform(input_text)
return _tfidf_vec
if __name__ == "__main__":
dfs = load_data()
# trainとtestを生成
train = pd.concat([dfs['train'], dfs['valid']], axis=0).reset_index(drop=True)
test = dfs['test']
# 前処理
train['clean_title'] = train[['title']].apply(preprocess)
test['clean_title'] = test[['title']].apply(preprocess)
# 特徴量抽出
feat = FeatureExtraction(min_df=10, max_df=0.1)
feat.fit(train['clean_title'])
X_train = feat.transform(train['clean_title'])
X_test = feat.transform(test['clean_title'])
pickle.dump(feat.tfidf_vec, open('tfidf_vec.pkl', 'wb')) # 推論時にも使用するため、保存
# DFに変換
X_train = pd.DataFrame(X_train.toarray(), columns=feat.tfidf_vec.get_feature_names())
X_test = pd.DataFrame(X_test.toarray(), columns=feat.tfidf_vec.get_feature_names())
# 分割して保存
X_valid = X_train[len(dfs['train']):].reset_index(drop=True)
X_train = X_train[:len(dfs['train'])].reset_index(drop=True)
X_train.to_csv('X_train.txt', sep='\t', index=False)
X_valid.to_csv('X_valid.txt', sep='\t', index=False)
X_test.to_csv('X_test.txt', sep='\t', index=False)
print('X_train ---- ', X_train.shape)
print('X_valid ---- ', X_valid.shape)
print('X_test ---- ', X_test.shape)
|
23,779 | fb2f185cebc885d1da7424c31787c4a0787b20dd | import cv2
import numpy as np
import matplotlib.pyplot as plt
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
print("start")
crop = []
crop = np.array(crop)
cam = cv2.VideoCapture(0)
while True:
ret , img = cam.read(0)
face_rects = face_cascade.detectMultiScale(img,scaleFactor=1.2,minNeighbors= 10)
detected = len(face_rects)# total no of face detect
def extract_plate(face):
plate_img = face.copy()
plate_cascade = cv2.CascadeClassifier('./indian_license_plate.xml')
plate_rect = plate_cascade.detectMultiScale(plate_img, scaleFactor = 1.3, minNeighbors = 7)
print("number plate = "+str(len(plate_rect)))
for (x,y,w,h) in plate_rect:
a,b = (int(0.02*face.shape[0]), int(0.025*face.shape[1]))
plate = plate_img[y+a:y+h-a, x+b:x+w-b, :]
cv2.rectangle(plate_img, (x,y), (x+w, y+h), (51,51,255), 3)
cv2.imshow("Number plate",plate_img)
for (x,y,w,h) in face_rects:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,255),10)
a,b = (int(0.02*img.shape[0]), int(0.025*img.shape[1])) #parameter tuning
j = 70
p = y + a-j # top - = up , + = down
q = y+h-a+j + 300 # bottom (- = up , + = down)
r = x+b-j # left (- = right , + = left)
s = x+w-b+j # right (- = left , + = right )
crop = img[p:q,r:s]
cv2.imshow('original',img)
if ( detected is not 0):
cv2.imshow('video face detect',crop)
print ("total face = " + str(detected))
extract_plate(crop)
else:
print("face not detected")
k = cv2.waitKey(1)
if k == 27:
break
cam.release()
cv2.destroyAllWindows() |
23,780 | c439c12148814e884e1ef903b6d6d3c333e13f46 | from rest_framework import serializers
from weibo.models import User, Post, Like, FollowShip, Collection
from functools import reduce
class FollowShipSerializer(serializers.ModelSerializer):
following_user = serializers.SerializerMethodField()
follower_user = serializers.SerializerMethodField()
class Meta:
model = FollowShip
fields = ('follower', 'following', 'following_user', 'follower_user')
def get_following_user(self, obj):
return UserSerializer(User.objects.get(id=obj.following_id)).data
def get_follower_user(self, obj):
return UserSerializer(User.objects.get(id=obj.follower_id)).data
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = ('is_like', 'user', 'post')
class UserSerializer(serializers.ModelSerializer):
follower_num = serializers.SerializerMethodField()
following_num = serializers.SerializerMethodField()
likes_earn = serializers.SerializerMethodField()
avatar_url = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('username', 'id', 'follower_num', 'following_num', 'likes_earn', 'avatar_url')
def get_follower_num(self, obj):
return FollowShip.objects.filter(following_id=obj.id).count()
def get_following_num(self, obj):
return FollowShip.objects.filter(follower_id=obj.id).count()
def get_likes_earn(self, obj):
if Post.objects.filter(user__id=obj.id).exists():
queryset = Post.objects.filter(user__id=obj.id)
return reduce(lambda x, y: x + y,
map(lambda x:
Like.objects.filter(post_id=x).filter(is_like=True).count(),
map(lambda x: x.id, queryset)))
else:
return 0
def get_avatar_url(self, obj):
return User.objects.get(id=obj.id).avatar.url_avatar
def delete_fields(self, *args, **kwargs):
fields = kwargs.get('fields')
if fields is not None:
forbidden = set(fields)
existing = set(self.fields.keys())
for field_name in forbidden:
self.fields.pop(field_name)
class CommentSerializer(serializers.ModelSerializer):
user = UserSerializer(many=True)
class Meta:
model = Post
fields = ('content', 'user', 'datetime')
class PostSerializer(serializers.ModelSerializer):
user = UserSerializer(many=True)
comment = CommentSerializer(many=True)
total_liked = serializers.SerializerMethodField()
class Meta:
model = Post
fields = ('content', 'datetime', 'user', 'comment', 'total_liked', 'id')
def create(self, validated_data):
return Post.objects.create(**validated_data)
def get_total_liked(self, obj):
return Like.objects.filter(post__id=obj.id).filter(is_like=True).count()
def delete_fields(self, *args, **kwargs):
fields = kwargs.get('fields')
if fields is not None:
forbidden = set(fields)
existing = set(self.fields.keys())
for field_name in forbidden:
self.fields.pop(field_name)
class CollectionSerializer(serializers.ModelSerializer):
posts = serializers.SerializerMethodField()
class Meta:
model = Collection
fields = ('posts',)
def get_posts(self, obj):
return PostSerializer(Post.objects.filter(id=obj.user_id)).data
|
23,781 | 0998baa4fe0dd5ec75aa8f19a108e28955bbcd39 | #from mp1_tooltits.mplot3d import axes3d
#import matplotlib.py.plot as plt
import matplotlib as mp
mp.test()
#fig=pl t.figure()
#ax=fig.gca(progection='3d')
#X,Y,Z=axes3d.get_test_data(0.05)
#ax.plot_surface(X,Y,Z,rstride=8,cstride=8,alpha=0.3)
#cset=ax.contour(X,Y,Z,) |
23,782 | 556dfed2de5f8fc8aa930983ebbcaa6eabd3cd97 | #!/usr/bin/python
import cgi
import MySQLdb as mariadb
print "Content-type:text/html"
x=cgi.FieldStorage()
uid=x.getvalue('uid')
ctc=x.getvalue('ctc')
pass1=x.getvalue('pass1')
pass2=x.getvalue('pass2')
uid=cgi.escape(uid)
ctc=cgi.escape(ctc)
pass1=cgi.escape(pass1)
pass2=cgi.escape(pass2)
if( pass1 != pass2 ):
print "location:http://192.168.43.63/cgi-bin/forg.py?q=perror"
print ""
if((uid =="")| (ctc == "")| (pass1 == "")| (pass2 == "")):
print "location:http://192.168.43.63/cgi-bin/forg.py?q=ferror"
print ""
elif((len(uid) > 30)| (len(pass1)<6)| (len(pass2)<6)| (len(pass1)>30)| (len(pass1)>30)):
print "location:http://192.168.43.63/cgi-bin/forg.py?q=ferror"
print ""
else:
a=[]
b=[]
flag=0
mariadb_connection=mariadb.connect(user='hadoop')
cursor=mariadb_connection.cursor()
cursor.execute("use hs")
cursor.execute("select USERNAME from USERS")
mariadb_connection.commit()
for USERNAME in cursor:
a=USERNAME
if(a[0] == uid):
cursor.execute("select PHONE from USERS where USERNAME=%s",(uid))
mariadb_connection.commit()
for CONTACT in cursor:
b=CONTACT
if(b[0] == ctc):
flag=1;
break;
if(flag==0):
mariadb_connection.close()
print "location:http://192.168.43.63/cgi-bin/forg.py?q=merror"
print ""
else:
a=[]
cursor.execute("update USERS set PASSWORD=%s where USERNAME=%s",(pass1,uid))
mariadb_connection.commit()
mariadb_connection.close()
print "location:http://192.168.43.63/cgi-bin/index.py?q=forgsucess"
print ""
|
23,783 | 40c34930f9c8827fc4009ada9ec5e013f61ba237 | from flask import Flask, render_template, redirect, request
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html.j2")
@app.route("/realview")
def realview():
return render_template("realview.html.j2")
@app.route("/login")
def login():
return render_template("login.html.j2")
@app.route("/rota")
def rota():
return render_template("rota.html.j2")
@app.route("/contato")
def contato():
return render_template("contato.html.j2")
if __name__ == "__main__":
app.run(debug=True) |
23,784 | 972de1b83b331a465aaae48f190dc9de9ecf7e23 | import math
n = int(input("Число: "))
i = 0
# while True:
# if math.pow(2, i) == n:
# print("Yes")
# break
# elif math.pow(2, i) > n:
# print("No")
# break
# else:
# i += 1
if math.log2(n) == int(math.log2(n)):
print('Yes')
else:
print('No')
|
23,785 | e4ab83fdd2e2931a498d5c7620b1c139340f3070 | # -*- coding: utf-8 -*-
"""
:Author: Jaekyoung Kim
:Date: 2018. 1. 10.
"""
import argparse
import os
from sqlalchemy import create_engine
database = 'willbe'
aws_endpoint = 'findb.cay7taoqqrm6.ap-northeast-2.rds.amazonaws.com'
aws_user = 'jaekyoungkim'
aws_password = 'willbelucky'
aws_engine = create_engine(
'mysql+mysqlconnector://{}:{}@{}/{}'.format(aws_user, aws_password, aws_endpoint, database),
encoding='utf-8', pool_recycle=1, pool_size=2 * (os.cpu_count() or 1))
engine_pool = {
'aws': aws_engine,
}
def get_connection(engine_key=None):
"""
Get connection of the script parameter's db. If there is no script parameter, default db is local.
:return Connection:
"""
if engine_key is None:
engine_key = _get_engine_key()
if engine_key not in engine_pool.keys():
raise EnvironmentError("{} is not in engine_pool. Please check your script parameters.".format(engine_key))
return engine_pool[engine_key].connect()
def _get_engine_key():
parser = argparse.ArgumentParser()
parser.add_argument(
'--engine_key',
type=str,
default='aws',
help='Directory to put the log data.'
)
flags, unparsed = parser.parse_known_args()
return flags.engine_key
|
23,786 | 136d1878a95786bc58296f74435d120996cf341d | import sys
lines = open(sys.argv[1]).readlines()
k = set()
for l in lines:
ls = l.split(',')
count = ls[0]
rest = ls[1:]
n = len(rest)
assert n % 2 == 0
# we have the same name repeated twice so we need to find out how many extra commas are in it
cms = (n - 4) / 2
#print ','.join(rest[:cms])
sys.stdout.write('\t'.join([count] + [','.join(rest[:cms])] + rest[cms:-cms] + [','.join(rest[-cms:])]))
#k.add(len(ls))
#ls = [",".join(ls[:-7])] + ls[-7:]
#print ls
#print k
|
23,787 | 785388c385a27ea5fe86accd74c04dcce4a6a754 | #!python3
#encoding:utf-8
import json
import time
from urllib.parse import urlparse
import re
from PIL import Image
from io import BytesIO
class Response(object):
def __init__(self):
self.Headers = Response.Headers()
def Get(self, r, sleep_time=2, is_show=True):
if is_show:
print('Response.start---------------------')
print("HTTP Status Code: {0} {1}".format(r.status_code, r.reason))
print(r.text)
print('Response.end---------------------')
time.sleep(sleep_time)
r.raise_for_status()
self.Headers.ContentType.Split(r)
if None is self.Headers.ContentType.mime_type:
return None
elif 'application/json' == self.Headers.ContentType.mime_type:
return r.json()
elif ('image/gif' == self.Headers.ContentType.mime_type or
'image/jpeg' == self.Headers.ContentType.mime_type or
'image/png' == self.Headers.ContentType.mime_type
):
return Image.open(BytesIO(r.content))
# elif r.request.stream:
# return r.raw
else:
return r.text
class Headers:
def __init__(self):
self.ContentType = Response.Headers.ContentType()
self.Link = Response.Headers.Link()
class Link:
def __init__(self):
pass
def Get(self, r, rel='next'):
if None is r.links:
return None
if 'next' == rel or 'prev' == rel or 'first' == rel or 'last' == rel:
return r.links[rel]['url']
def Next(self, r):
return self.__get_page(r, 'next')
def Prev(self, r):
return self.__get_page(r, 'prev')
def First(self, r):
return self.__get_page(r, 'first')
def Last(self, r):
return self.__get_page(r, 'last')
def __get_page(self, r, rel='next'):
if None is r:
return None
print(r.links)
if rel in r.links.keys():
url = urlparse(r.links[rel]['url'])
print('page=' + url.query['page'])
return url.query['page']
else:
return None
class ContentType:
def __init__(self):
self.__re_charset = re.compile(r'charset=', re.IGNORECASE)
self.mime_type = None # 例: application/json
self.char_set = None # 例: utf8
# トップレベルタイプ名/サブタイプ名 [;パラメータ]
# トップレベルタイプ名/[ツリー.]サブタイプ名[+サフィックス] [;パラメータ1] [;パラメータ2] ...
self.top_level_type = None
self.sub_type = None
self.suffix = None
self.parameters = None
def Split(self, r):
self.mime_type = None
self.char_set = None
self.top_level_type = None
self.sub_type = None
self.suffix = None
self.parameters = None
if not('Content-Type' in r.headers) or (None is r.headers['Content-Type']) or ('' == r.headers['Content-Type']):
pass
else:
content_types = r.headers['Content-Type'].split(';')
self.mime_type = content_types[0]
if 1 < len(content_types):
parameters = content_types[1:]
self.parameters = {}
for p in parameters:
key, value = p.split('=')
self.parameters.update({key.strip(): value.strip()})
if None is not self.mime_type:
self.mime_type = self.mime_type.strip()
self.top_level_type, self.sub_type = self.mime_type.split('/')
if self.sub_type.endswith('+'):
self.suffix = self.sub_type.split('+')[1]
if None is not self.parameters:
# 'charset='に一致するならcharsetに設定する
for key in self.parameters.keys():
if 'charset' == key.lower():
self.char_set = self.parameters[key]
print('mime_type: {0}'.format(self.mime_type))
print('top_level_type: {0}'.format(self.top_level_type))
print('sub_type: {0}'.format(self.sub_type))
print('suffix: {0}'.format(self.suffix))
print('parameters: {0}'.format(self.parameters))
print('char_set: {0}'.format(self.char_set))
|
23,788 | a60467db8beba713fa45f4acafdfea0cd064eeeb | from rest_framework import serializers
from utils.exceptions.get_object_or_404 import get_object_or_404_customed
from ..models import Cheese
class CheeseSerializer(serializers.ModelSerializer):
class Meta:
model = Cheese
fields = '__all__'
class CheeseRelatedField(serializers.RelatedField):
queryset = Cheese.objects.all()
def to_representation(self, value):
serializer = CheeseSerializer(value)
return serializer.data
def to_internal_value(self, data):
cheese_name = data.get('name')
cheese = get_object_or_404_customed(Cheese, name=cheese_name)
return cheese
|
23,789 | de9f6cee75601bbd4d5593426222c2e38cf198b6 | # Description: This program ask the user to input the total amount of numbers they would
# like to add to the list. The user will be prompted to enter their numbers.
# Upon completion, the program will display all the numbers in the list, the minimum
# and maximum number, total of all numbers, and the average number in the list.
# Date 06/08/2021
# CSC-121 M3Lab – Lists
# Anthony Orengo
#Pseudocode
#1. Define main function and declare and initialize variables, sentinel, and list.
#2. Define function to get numbers from user.
#3. Define function to display main menu to user.
#4. In the main function, create a loop for the main menu. Get input from the user and
# and parse the string to an int. You will use the list and number as arguments in the
# get number function.
#5. In the get number function, add a loop to get 10 numbers from the user. Each number
#will be inserted into the list until 10 numbers have been added.
#6. After the user inputs all numbers into the list, the numbers in the list will be
#displayed to the user.
#7. The min/max number, total of numbers, and average number will also be displayed to
# the user.
#Function to get numbers and add them to a list
def get_numbers(total_num, number_list):
i = 1
while i <= total_num:
#Get numbers from user
num = input(f"Enter number choice {i} --> ")
#Determine if the user entered a digit or letter
if num.isalpha():
print("Invalid input! You must enter a number!")
else:
#num = int(num)
num = float(num)
#Add users number to list
number_list.insert(i,num)
i += 1
return number_list
#Function to display main menu to user
def main_menu():
print("Main Menu\n" +
"1. Enter Numbers\n" +
"2. Exit")
def main():
#Declare and initialize sentinel
menu_loop = 0
#Declare list
number_list = []
total_num = ""
while menu_loop == 0:
#Displays main menu to user
main_menu()
#Get input from user
choice = input("Make a selection --> ")
if choice == "1":
#Get total numbers from user
total_num = input("How many numbers do you want to add to the list? --> ")
#Determine if the user entered a digit or letter
if total_num.isalpha():
print("You must enter a number")
else:
#Parse string to int
total_num = int(total_num)
#Call get_numbers function
get_numbers(total_num, number_list)
#Display all numbers in list
print("\nNumber List\n******************")
for element in number_list:
x = 1
print(f"{element}")
#Display the smallest number in the list to the user
print(f"Smallest Number: {min(number_list)}")
#Display the largest number in the list to the user
print(f"Largest Number: {max(number_list)}")
#Display the total sum of all numbers in the list to the user
print(f"Total: {sum(number_list)}")
#Display the average number in the list to the user
print(f"Average Number: {sum(number_list)/len(number_list)}\n")
elif choice == "2":
#Ends the program
menu_loop += 1
else:
print("Invalid option! Try again!")
number_list.clear()
#Call main function
main()
|
23,790 | e65d48c823ced8934999b7a61bff2b77a61636e0 | f1=open("/home/suhaiz/PycharmProjects/luminarpython/fileinputout/place_temp",'r')
f2=open("final_temp",'w')
dict={}
lst=[]
i=0
for item in f1:
lst=item.rstrip("\n").split(",")
print(lst)
if lst[i] not in dict:
dict[lst[i]]=lst[i+1]
continue
elif lst[i+1]>dict[lst[i]]:
dict[lst[i]]=lst[i+1]
else:
continue
print(dict)
for i,j in dict.items():
i=str(i)
j=str(j)
f2.write(i)
f2.write("\t")
f2.write(j)
f2.write("\n")
|
23,791 | 2969672182b61f6e2d6d5a9f79be8a8437b7002f | from .tagger import Tagger
from .xtagger import XTagger
|
23,792 | 4d344cc42c1a52d309c6db592934a36803d8a385 | from numpy import linspace, asarray
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
f = lambda y, lamb: lamb*y
g_impl = lambda y, y0, lamb, dt: y0+dt*lamb*y-y
def explicit_euler(y0, lamb, dt, N):
y_ee = []
y_ee.append(y0)
for i in range(N):
y_ee.append( y_ee[-1] + dt*f(y_ee[-1],lamb) )
return asarray(y_ee)
def implicit_euler(y0, lamb, dt, N):
y_ie = []
y_ie.append(y0)
for i in range(N):
y_ie.append( fsolve(g_impl, y_ie[-1],args=(y_ie[-1],lamb,dt)) )
return asarray(y_ie)
N = 1000
T = 10
lamb = -10
dt = 1.0*T/N
y0 = 1
t_arr = linspace(0,T,N+1)
# explicit euler + plot
y_ee = explicit_euler(y0, lamb, dt, N)
print y_ee[0:10]
plt.figure()
plt.title(r'Stability Explicit Euler', fontsize=26.)
plt.plot(t_arr, y_ee, label=r'$\lambda=%d$, $h=%.2f$'%(lamb,dt))
plt.xlabel(r'$t$')
plt.ylabel(r'$y(t)$')
plt.legend(loc=2)
plt.tight_layout()
plt.show() |
23,793 | 5c9ed3fa70abdae080276ca3b51255814df1d861 | # THE CHALLENGE
# Our football team finished the championship. The result of each match look like "x:y". Results of all matches are recorded in the array.
#
# For example: ["3:1", "2:2", "0:1", ...]
#
# Write a function that takes such list and counts the points of our team in the championship. Rules for counting points for each match:
#
# if x>y - 3 points
# if x<y - 0 point
# if x=y - 1 point
# Notes:
#
# there are 10 matches in the championship
# 0 <= x <= 4
# 0 <= y <= 4
# Solution 1
def total_points(scorelist):
total = 0;
for score in scorelist:
scores = score.split(':')
x = scores[0]
y = scores[1]
if x>y:
total += 3
elif x=y:
total += 1
return total
|
23,794 | dc29e3b8ad43657405e40b1f8c3922a93a2cafcf | # ______ ___
# ____ _5._W.. ______ _
# ____ _5._G.. ______ QP..
#
#
# c_ Window QW..
# ___ -
# s___. -
# sWT.. *Using Labels
# sG.. 50, 50, 500, 500)
# ?
#
# ___ ui
# image _ QL.. ?
# ?.sP.. QP.. images/nuke.png
# ?.m.. 150 50
# remove_button _ QPB.. *Remove ?
# ?.m.. 150 220
# ?.c__.c.. ?
# show_button _ QPB.. *Show ?
# ?.c__.c.. ?
# ?.m.. 260, 220
# ?
#
# ___ remove_img
# i___.cl..
#
# ___ show_img
# i__.s..
#
#
# ___ main
# app _ ?
# window _ ?
# ___.e.. ?.e.
#
#
# __ _____ __ ______
# ?
|
23,795 | 2446674ffd8cf0ef60e67bab330a00cc929827dc | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QGridLayout, QLabel, QLineEdit, QPushButton
from PyQt5 import QtWidgets
class SpiControlWidget(QtWidgets.QWidget):
def __int__(self, parent):
super().__init__(parent=parent)
self.spi_id1 = QLineEdit()
self.spi_id2 = QLineEdit()
self.spi_ok = QPushButton("Send")
spi_layout = QGridLayout()
self.setLayout(spi_layout)
self.setContentsMargins(0, 5, 0, 0)
spi_layout.addWidget(QLabel("ID 1"), 0, 0, Qt.AlignCenter)
spi_layout.addWidget(QLabel("ID 2"), 1, 0, Qt.AlignCenter)
spi_layout.addWidget(self.spi_id1, 0, 1, Qt.AlignCenter)
spi_layout.addWidget(self.spi_id2, 1, 1, Qt.AlignCenter)
spi_layout.addWidget(self.spi_ok, 1, 2, Qt.AlignCenter)
|
23,796 | 3f3e63c425bfb18cfd56d0c61ba09c91e24da049 | """budgetingapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from django.views.generic.base import TemplateView
from django.urls import include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
from . import views
import users.views
import accounts.views
import budget.views
import goals.views
import resources.views
# path('favicon.ico', RedirectView.as_view(url=staticfiles_storage.url('img/favicon.ico'))),
favicon_view = RedirectView.as_view(url='/static/img/favicon.ico', permanent=True)
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('favicon.ico/', favicon_view),
re_path(r'^favicon\.ico$', favicon_view),
path('', views.home, name='home'),
path('home/', views.home),
path('welcome/', TemplateView.as_view(template_name='welcome.html'), name='welcome'),
path('onboarding/', views.onboarding, name='onboarding'),
path('budget/', budget.views.budgetpage, name='budget'),
path('goals/<pk>/', goals.views.GoalUpdate.as_view()),
path('new-goals/', goals.views.GoalCreate.as_view(), name='newgoal'),
path('new-income/', budget.views.IncomeCreate.as_view(), name='newincome'),
path('new-budget-expense/', budget.views.BudgetExpenseCreate.as_view(), name='newbudgetexpense'),
path('new-account/', accounts.views.AccountCreate.as_view(), name='newaccount'),
path('test/', views.test),
path('accounts/', accounts.views.accounts_page, name='accounts'),
path('resources/', resources.views.ResourceListView.as_view(), name='resources'),
path('budget/expense/<pk>/', budget.views.BudgetExpenseUpdate.as_view(), name="updatebudgetexpense"),
path('budget/income/<pk>/', budget.views.IncomeUpdate.as_view(), name= 'updateincome'),
path('accounts/<pk>', accounts.views.AccountUpdate.as_view(), name= 'updateaccount')
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
23,797 | 21ab075fe21563898c0fbca02ab5e997eb343259 |
def strs_to_ints(param):
return [int(x) for x in param]
def read_matrix():
n = int(input())
return [strs_to_ints(input().split(", ")) for _ in range(n)]
def get_even_elements(row):
return [x for x in row if x%2==0]
def get_even_matrix(matrix):
return [get_even_elements(row) for row in matrix]
def print_result(even_matrix):
print(even_matrix)
matrix=read_matrix()
even_matrix=get_even_matrix(matrix)
print_result(even_matrix) |
23,798 | be620cc9d4d2be8d1e89e69d53c09ec9c951ae87 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
__author__ = "huangy"
__copyright__ = "Copyright 2016, The metagenome Project"
__version__ = "1.0.0-dev"
import os, re
from ConfigParser import ConfigParser
from workflow.util.useful import mkdir,parse_group,rmdir_my,gettime,share_mothod,const
bin_gene_profile_default_dir = "%s/06.gene_profile/" % const.bin_default_dir
bin_cag_default_dir = "%s/CAG.V1.0" % const.bin_default_dir
bin_mgs_default_dir = "%s/MGS.V2.0" % const.bin_default_dir
tool_default_dir = const.tool_default_dir
command_default = const.command_default
def gene_profile_pre(config, name):
print gettime("start 06.gene_profile_pre")
commands = []
work_dir = '%s/%s' % (os.path.dirname(config), name)
mkdir(work_dir)
commands.append("cp %s/../01.clean_reads/clean_reads.list %s/clean_reads.list"\
%(work_dir,work_dir))
commands.append("## build index")
mkdir("%s/database/"%work_dir)
commands.append("#ln -s %s/../05.gene_catalog/gene_catalog.fna %s/database/"%(work_dir,work_dir))
commands.append("#2bwt-builder %s/database/gene_catalog.fna"%work_dir)
commands.append("python %s/genebuild.py -d %s" % (bin_gene_profile_default_dir, work_dir))
commands.append("/data_center_03/USER/zhongwd/bin/qsge --queue all.q --memery 10G --jobs 1 --prefix BI --lines 1 shell/2bwt_builder.sh")
commands.append("## calculate gene abundance")
commands.append("perl %s/geneabundance.pl %s/clean_reads.list database/gene_catalog.fna %s/../05.gene_catalog/gene_catalog.length %s/"\
%(bin_gene_profile_default_dir,work_dir,work_dir,work_dir))
commands.append("/data_center_03/USER/zhongwd/bin/qsge --queue all.q --memery 10G --jobs 50 --prefix MA --lines 1 shell/match.sh")
commands.append("/data_center_03/USER/zhongwd/bin/qsge --queue all.q --memery 10G --jobs 10 --prefix AB --lines 2 shell/abun.sh")
print gettime("end 06.gene_profile_pre")
return commands
def gene_profile(config, name):
print gettime("start 06.gene_profile")
commands = []
work_dir = '%s/%s' % (os.path.dirname(config), name)
commands.append("## calculate gene abundance")
commands.append("ls %s/alignment/*/*abundance |perl %s/profile.pl - > %s/gene.profile"\
%(work_dir,tool_default_dir,work_dir))
commands.append("## 01.alpha diversity")
mkdir("%s/01.alpha_diversity/"%work_dir)
commands.append(command_default + "perl %s/shannon.pl %s/gene.profile %s/01.alpha_diversity/gene.alpha.div.tsv"\
%(tool_default_dir,work_dir,work_dir))
config_gene = ConfigParser()
config_gene.read(config)
group = re.split("\s+|\t+|,\s*|,\t+",config_gene.get("param","group"))
mkdir("%s/group" % work_dir)
for subgroup_name in group:
subgroup = '%s/material/%s_group.list' % (os.path.dirname(config), subgroup_name)
sample_num_in_groups,min_sample_num_in_groups,sample_num_total,group_num=parse_group(subgroup)
commands.append("## ----------------------------------%s----------------------"%(subgroup_name))
work_dir_601 = "%s/group/%s/01.alpha_diversity"%(work_dir,subgroup_name)
mkdir(work_dir_601)
commands.append("##01.alpha diversity")
commands.append(command_default + "Rscript %s/gene.alpha.div.R %s/01.alpha_diversity/gene.alpha.div.tsv %s %s/gene.alpha.div.pdf"\
%(bin_gene_profile_default_dir,work_dir,subgroup,work_dir_601))
commands.append("convert -density 300 %s/gene.alpha.div.pdf %s/gene.alpha.div.png"\
%(work_dir_601,work_dir_601))
# 2018.10.25新增加
commands.append(command_default + "python %s/gene.alpha.div.py -i %s/01.alpha_diversity/gene.alpha.div.tsv -g %s -o %s/gene.num.tvs"\
%(bin_gene_profile_default_dir,work_dir,subgroup,work_dir_601))
commands.append(command_default + "Rscript %s/01.alpha_diversity.gene.num.R %s/gene.num.tvs %s/gene.num.pdf"\
%(bin_gene_profile_default_dir,work_dir_601,work_dir_601))
commands.append("convert -density 300 %s/gene.num.pdf %s/gene.num.png"\
%(work_dir_601,work_dir_601))
work_dir_602 = "%s/group/%s/02.anosim"%(work_dir,subgroup_name)
mkdir(work_dir_602)
commands.append("##02.anosim")
commands.append(command_default + "python %s/t04_anosim.py -i %s/gene.profile -g %s -o %s"\
%(tool_default_dir,work_dir,subgroup,work_dir_602))
# commands.extend(share_mothod(tool_default_dir,work_dir,"gene.profile",subgroup,subgroup_name,\
# numlist=["02","03","04","05","06","07","08","09"]))
# commands.append("##03.LefSe")
# work_dir_603 = "%s/group/%s/03.LEfSe/" % (work_dir,subgroup_name)
# mkdir(work_dir_603)
# commands.append(command_default + "python %s/603_LEfSe.py -i %s/gene.profile -l /data_center_03/USER/huangy/soft/LEfSe_lzb -g %s -o %s --LDA 2"\
# %(bin_gene_profile_default_dir,work_dir,subgroup,work_dir_603))
#becose Error: protect(): protection stack overflow
commands.append("##03.diff")
work_dir_603 = "%s/group/%s/03.diff" % (work_dir,subgroup_name)
mkdir(work_dir_603)
commands.append(command_default + "python %s/t08_diff.py -i %s/gene.profile -g %s -o %s/"\
%(tool_default_dir,work_dir,subgroup,work_dir_603))
commands.append('''awk -F "\\t" '{print $1"\\t"$7"\\t"$8"\\t"$9}' %s/diff.marker.filter.tsv|sed '1a Gene ID\\tP-value\\tQ-value\\tGroup'|sed '1d' > %s/diff.stat.tsv''' % (work_dir_603,work_dir_603))
commands.append("##03.diff/diff_boxplot")
commands.append(command_default + "python %s/t09_diff_boxplot.py -i %s/diff.marker.filter.profile.tsv -p %s/diff.marker.filter.tsv -g %s -o %s/diff_boxplot/"\
%(tool_default_dir,work_dir_603,work_dir_603,subgroup,work_dir_603))
commands.append("## diff_qvalue")
commands.append("Rscript %s/qvalue.R %s/diff.marker.tsv %s/qvalue.pdf"\
%(bin_gene_profile_default_dir,work_dir_603,work_dir_603))
commands.append("convert -density 300 %s/qvalue.pdf %s/qvalue.png"\
%(work_dir_603,work_dir_603))
# if sample_num_in_groups>5 and sample_num_total>20 and group_num==2:
# work_dir_604 = "%s/group/%s/04.mgs"%(work_dir,subgroup_name)
# mkdir(work_dir_604)
# #os.system("cp %s/MGS.V2.0/MGS.cfg %s/MGS.cfg"%(const.bin_default_dir,work_dir_604))
# commands.append("python %s/full_MGS.py -p %s/gene.profile -d %s -g %s --threshold 0"%\
# (bin_mgs_default_dir,work_dir,work_dir_604,subgroup))
# mkdir("%s/taxonomy/"%work_dir_604)
# commands.append("python %s/mgs_taxonomy.py -i %s/pathway/ -g %s/../05.gene_catalog/gene_catalog.fna -o %s/taxonomy/ --group %s"\
# %(bin_mgs_default_dir,work_dir_604,work_dir,work_dir_604,subgroup))
# #TODO mgs
# if sample_num_in_groups>5 and sample_num_total>20:
# work_dir_605 = "%s/group/%s/05.cag" % (work_dir,subgroup_name)
# mkdir(work_dir_605)
# #os.system("cp %s/CAG.V1.0/CAG.cfg %s/CAG.cfg"%(const.bin_default_dir,work_dir_605))
# commands.append("python %s/full_CAG.py -p %s/gene.profile -d %s -g %s "%\
# (bin_cag_default_dir,work_dir,work_dir_605,subgroup))
# mkdir("%s/taxonomy"%work_dir_605)
# commands.append("python %s/cag_taxonomy.py -i %s/outfile/cag -g %s/../05.gene_catalog/gene_catalog.fna -o %s/taxonomy/"\
# %(bin_cag_default_dir,work_dir_605,work_dir,work_dir_605))
# #TODO cag
print gettime("end 06.gene_profile")
return commands
|
23,799 | 9cb9ba049f1ee29821761f63003e722c3ca64de8 | import pymongo
uri = "mongodb://mrd198:12051998d@ds335957.mlab.com:35957/c4e32thuvien"
client = pymongo.MongoClient(uri)
# db = client.c4e32thuvien
collection = client.sach
def get_all():
return list(collection.find())
def insert_data_book(name,price):
collection.insert_data_book({'name':name,'price':price}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.