index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,000 | b7da13a879d7062b7e90dbc50c1789cf359282f4 | """
@summary: Custom HTML parser using HTMLParser library, store the result in a tree
@author: Alexandre Bisiaux
"""
from HTMLParser import HTMLParser
from xml.etree import cElementTree as etree
import requests
"""
Get text of a tree node
@param node: The tree node
@return: The text presents in the node Ex : <a> text </a> => return text
"""
def get_text_recursive(node):
return (node.text or '') + ' '.join(map(get_text_recursive, node)) + (node.tail or '')
"""
Parse the content of a html page
@param url: URL of the page
@return: An XML tree of the page content
"""
def parse(url):
response = requests.get(url, headers={'Content-Type' : 'application/octet-stream',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'}, allow_redirects=True)
parser = MyParser()
encoding = response.encoding
response = (response.content).decode(encoding)
parser.feed(response)
return parser.close()
"""
@summary: Custom HTML parser using HTMLParser library, store the result in a tree
"""
class MyParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.tb = etree.TreeBuilder()
"""
Store the data between two tags in the tree
@param img: data
"""
def handle_data(self, data):
self.tb.data(data)
"""
Store a start tag with its attributes in the tree
@param img: data
"""
def handle_starttag(self, tag, attrs):
self.tb.start(tag, dict(attrs))
"""
Store a end tag in the tree
@param img: data
"""
def handle_endtag(self, tag):
self.tb.end(tag)
"""
Close the parser and return the tree builded
@return: The tree
"""
def close(self):
HTMLParser.close(self)
return self.tb.close() |
990,001 | ad3d5ab73a1bd99dc0e960266d713ddd169d887e | import torch
from torch.autograd import Variable
from scipy.misc import imresize
from imageio import imread, imsave
import numpy as np
from path import Path
import argparse
from tqdm import tqdm
from models import PoseExpNet
from inverse_warp import pose_vec2mat
from utils import tensor2array
from PIL import Image
parser = argparse.ArgumentParser(description='Script for PoseNet testing with corresponding groundTruth from KITTI Odometry',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("pretrained_posenet", type=str, help="pretrained PoseNet path")
parser.add_argument("--img-height", default=128, type=int, help="Image height")
parser.add_argument("--img-width", default=416, type=int, help="Image width")
parser.add_argument("--no-resize", action='store_true', help="no resizing is done")
parser.add_argument("--min-depth", default=1e-3)
parser.add_argument("--max-depth", default=80)
parser.add_argument("--dataset-dir", default='.', type=str, help="Dataset directory")
parser.add_argument("--sequences", default=['2011_09_26_drive_0002_02'], type=str, nargs='*', help="sequences to test")
parser.add_argument("--output-dir", default=None, type=str, help="Output directory for saving predictions in a big 3D numpy file")
parser.add_argument("--img-exts", default=['png', 'jpg', 'bmp'], nargs='*', type=str, help="images extensions to glob")
parser.add_argument("--rotation-mode", default='euler', choices=['euler', 'quat'], type=str)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def main():
args = parser.parse_args()
from kitti_eval.exp_mask_utils import test_framework_KITTI as test_framework
weights = torch.load(args.pretrained_posenet, map_location=lambda storage, loc: storage)
seq_length = int(weights['state_dict']['conv1.0.weight'].size(1)/3)
pose_net = PoseExpNet(nb_ref_imgs=seq_length - 1, output_exp=True).to(device)
pose_net.load_state_dict(weights['state_dict'], strict=False)
dataset_dir = Path(args.dataset_dir)
framework = test_framework(dataset_dir, args.sequences, seq_length)
print('{} snippets to test'.format(len(framework)))
errors = np.zeros((len(framework), 2), np.float32)
if args.output_dir is not None:
output_dir = Path(args.output_dir)
output_dir.makedirs_p()
predictions_array = np.zeros((len(framework), seq_length, 3, 4))
for j, sample in enumerate(tqdm(framework)):
imgs = sample['imgs']
h,w,_ = imgs[0].shape
if (not args.no_resize) and (h != args.img_height or w != args.img_width):
imgs = [imresize(img, (args.img_height, args.img_width)).astype(np.float32) for img in imgs]
imgs = [np.transpose(img, (2,0,1)) for img in imgs]
ref_imgs = []
for i, img in enumerate(imgs):
img = torch.from_numpy(img).unsqueeze(0)
img = ((img/255 - 0.5)/0.5).to(device)
if i == len(imgs)//2:
tgt_img = img
else:
ref_imgs.append(img)
exp_mask, poses = pose_net(tgt_img, ref_imgs)
# print('This is the maskp')
print(exp_mask.data.size())
args.output_disp = True
if args.output_disp:
# disp_ = exp_mask.data[:,2,:,:].reshape(1,128,416)
# print(disp_)
# disp = (255*tensor2array(disp_, max_value=10, colormap='bone')).astype(np.uint8)
max_value = exp_mask.data.max().item()
array_1 = exp_mask.data[:,0,:,:].squeeze().numpy()
print(array_1)
array_1 = (255*array_1).astype(np.uint8)
print(array_1)
print(np.min(array_1))
imsave(output_dir/'{}_disp{}'.format(j, '.png'), array_1)
if __name__ == '__main__':
main()
|
990,002 | 728c3c3e5bbc9536a8a8b859e0bc62ac1c0ef0ae | """Reply API Call handler for snapshots."""
from pymongo import Connection
from pymongo.errors import InvalidId
from bson.objectid import ObjectId
from datetime import datetime
from cgi import escape, parse_multipart, parse_header, FieldStorage
from urlparse import parse_qs
from json import dumps
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from urllib import unquote
# from PIL
from PIL import Image
import cStringIO
# author will likely come from the auth session.
# missing "data" in this list, mind you it's manually checked.
POST_REQUIRED_PARAMS = ("tags", "author", "code", "data", "reply_to")
POST_OPTIONAL_PARAMS = ("location",)
# XXX: Move into neato library.
def string_from_interwebs(input_value):
"""Given a string from the query dictionary string thing; make it clean."""
return escape(unquote(input_value))
######### This code is now in two files; need to move into library
def cleanup_post(post):
"""Given a dictionary of a post, return a new dictionary for output as
JSON"""
post_data = post
post_data["id"] = str(post["_id"])
post_data["author"] = str(post["author"])
post_data["created"] = str(post["created"].ctime())
del post_data["_id"]
if "reply_to" in post:
post_data["reply_to"] = str(post["reply_to"])
if "repost_of" in post:
post_data["repost_of"] = str(post["repost_of"])
return post_data
def update_post(reply_to, connection):
"""Given a post id, update it's num_replies."""
database = connection['test']
collection = database['posts']
collection.update({"_id" : ObjectId(reply_to)},
{"$inc" : {"num_replies" : 1}})
def adjust_image_resolution(data):
"""Given image data, shrink it to no greater than 1024 for its larger
dimension."""
output_large = cStringIO.StringIO()
output_default = cStringIO.StringIO()
output_tiny = cStringIO.StringIO()
try:
im0 = Image.open(cStringIO.StringIO(data))
im0.thumbnail((1280, 1280), Image.ANTIALIAS)
im0.save(output_large, 'JPEG')
im1 = Image.open(cStringIO.StringIO(data))
im1.thumbnail((1024, 1024), Image.ANTIALIAS)
# could run entropy check to see if GIF makes more sense given an item.
im1.save(output_default, 'JPEG')
im2 = Image.open(cStringIO.StringIO(data))
im2.thumbnail((120, 120), Image.ANTIALIAS)
im2.save(output_tiny, 'JPEG')
except IOError:
return None
return {"large" : output_large.getvalue(),
"default" : output_default.getvalue(),
"tiny" : output_tiny.getvalue()}
def insert_data_into_storage(name, image_dict):
"""Given file contents, insert into S3."""
# if S3Connection supports __enter__, and __exit__ then we can use with.
conn = S3Connection(aws_access_key_id, aws_secret_access_key)
bucket = conn.get_bucket('hyperionstorm')
k_lrg = Key(bucket)
k_lrg.key = "data/%s_lrg.jpg" % name
k_dft = Key(bucket)
k_dft.key = "data/%s.jpg" % name
k_tiny = Key(bucket)
k_tiny.key = "data/%s_tiny.jpg" % name
try:
k_lrg.set_contents_from_string(image_dict["large"])
k_dft.set_contents_from_string(image_dict["default"])
k_tiny.set_contents_from_string(image_dict["tiny"])
except Exception, exp:
conn.close()
return False
conn.close()
return True
def verify_post(post_id, connection):
"""Given a post id, check it."""
database = connection['test']
collection = database['posts']
try:
post = collection.find_one({"_id" : ObjectId(post_id)})
except InvalidId:
post = None
if post is None:
return False
return True
def verify_author(author, connection):
"""Given an author id, check it."""
database = connection['test']
collection = database['users']
try:
post = collection.find_one({"_id" : ObjectId(author)})
except InvalidId:
post = None
if post is None:
return False
return True
def insert_post_into_db(post):
"""Given a post dictionary, insert it into database collection for posts."""
if post is not None:
connection = Connection('localhost', 27017)
database = connection['test']
collection = database['posts']
# need to wrap with try, except
entry = collection.insert(post)
connection.close()
return entry
return None
def handle_new_post(post_data, user_agent, remote_addr):
"""Does not handle multi-part data properly.
Also, posts don't quite exist as they should."""
for required in POST_REQUIRED_PARAMS:
if required not in post_data:
return None, None
try:
value = int(string_from_interwebs(post_data.getfirst("code", "")))
except ValueError:
return None, None
if value != 98098098098:
return None, None
# not yet safe to use.
location = post_data.getfirst("location", "")
tags = string_from_interwebs(post_data.getfirst("tags"))
author = post_data.getfirst("author")
split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(",")] # temporary
if len(split_tags) > 3:
return None, None
author_id = string_from_interwebs(author).strip()
with Connection('localhost', 27017) as connection:
reply_to = string_from_interwebs(post_data.getfirst("reply_to"))
if not verify_author(author_id, connection):
return None, None
if not verify_post(reply_to, connection):
return None, None
# if reply then it's verified.
# XXX: I need to make a standard object structure for this, so that I don't
# have to update separate things.
post = {"viewed" : 0,
"comments" : 0,
"flagged" : 0,
"disliked" : 0,
"enjoyed" : 0,
"num_replies" : 0,
"num_reposts" : 0,
"content-type" : "image", # need to pull this from the mime lookup
"file" : "placeholder",
"user_agent" : user_agent,
"remote_addr" : remote_addr,
"created" : datetime.utcnow(),
"location" : string_from_interwebs(location).strip(),
"author" : ObjectId(author_id),
"reply_to" : ObjectId(reply_to),
"tags" : split_tags}
update_post(reply_to, connection)
return post_data.getfirst("data"), post
def bad_request(start_response):
"""Just does the same thing, over and over -- returns bad results.."""
output = []
output_len = sum(len(line) for line in output)
start_response('400 Bad Request',
[('Content-type', 'text/html'),
('Content-Length', str(output_len))])
return output
def application(environ, start_response):
"""Entry point for all wsgi applications."""
output = []
if environ['REQUEST_METHOD'] == 'GET':
return bad_request(start_response)
##### parameters are never safe
try:
content_length = int(environ['CONTENT_LENGTH'])
except ValueError:
return bad_request(start_response)
# maximum file length is 5MiB
if content_length > 5*1024*1024:
return bad_request(start_response)
user_agent = environ.get('HTTP_USER_AGENT', '')
remote_addr = environ.get('REMOTE_ADDR', '')
# add CONTENT_TYPE check
# FieldStorage is not the best solution because it reads the entire thing
# into memory; what I need to do is get parse_headres and parse_multipart
# working.
post_env = environ.copy()
post_env['QUERY_STRING'] = ''
post = \
FieldStorage(
fp=environ['wsgi.input'],
environ=post_env,
keep_blank_values=True)
raw_data, processed_post = handle_new_post(post, user_agent, remote_addr)
if raw_data is None: # if data is fine, processed_post is fine.
return bad_request(start_response)
images = adjust_image_resolution(raw_data)
if images is None: # should all be good.
bad_request(start_response)
return output
entry = insert_post_into_db(processed_post)
if entry is None:
return bad_request(start_response)
success = insert_data_into_storage(str(entry), images)
if success is False:
# need to delete the database entry.
return bad_request(start_response)
output.append(dumps({"id" : str(entry)}, indent=4))
# send results
output_len = sum(len(line) for line in output)
start_response('200 OK',
[('Content-type', 'application/json'),
('Content-Length', str(output_len))])
return output
|
990,003 | 6290590fe88890aca747d5940fd9e48012462c29 | import ants
import numpy as np
import h5py
import time
from zebrafish_io import lif_read_stack, save
import multiprocessing as mp
def pipeline(file_path,fixed_index,moving_indices,rigid_out_file,atlas_file,transform_out_file,out_diff_file):
stack,spacing=lif_read_stack(file_path)
#rigid=rigid_registration(stack, fixed_index, moving_indices,rigid_out_file,spacing)
#get_diffeomorphic_transform(atlas_file,rigid_out_file,transform_out_file)
get_diffeomorphic_transform(stack, atlas_file,transform_out_file)
morph_timestack(atlas_file,rigid_out_file,saved_transform,out_diff_file,spacing)
def rigid_registration(stack, fixed_index, moving_indices,out_file,spacing):
fixed=stack[fixed_index,:,:,:].astype(np.float32)
moving=stack[moving_indices,:,:,:].astype(np.float32)
shape=(moving.shape[0]+1,moving.shape[1],moving.shape[2],moving.shape[3])
rigid = np.empty(shape, dtype=np.float32)
rigid[0,:,:,:]=fixed
fixed=ants.from_numpy(fixed)
start=time.time()
for timepoint in range(moving.shape[0]-1):
print('Iteration: ', timepoint)
moving_im=moving[timepoint,:,:,:]
moving_im=ants.from_numpy(moving_im)
rigid_transform = ants.registration(fixed=fixed, moving=moving_im,
type_of_transform = 'Rigid')
transformed_image=rigid_transform['warpedmovout']
im=transformed_image.numpy()
rigid[timepoint+1,:,:,:]=im
end=time.time()
print('time: ', end-start)
save(out_file, rigid, spacing)
return rigid
def get_diffeomorphic_transform(stack,atlas_file,transform_out_file):
stack = h5py.File(rigid_out_file, 'r')
moving = np.array(stack['ITKImage']['0']['VoxelData'])[0,:,:,:]
moving=ants.from_numpy(moving)
atlas=np.array(h5py.File(atlas_file, 'r')['warped_image'])
atlas=ants.from_numpy(atlas)
start=time.time()
diffeomorphic_transform = ants.registration(fixed=atlas , moving=moving ,
type_of_transform = 'SyN', syn_metric='CC', grad_step=0.25,
flow_sigma=6,total_sigma=0.5, reg_iterations=[200,200,200,200,10],syn_sampling=2)
end=time.time()
print('time: ', end-start)
transform=diffeomorphic_transform['fwdtransforms']
ants.write_transform(transform, transform_out_file)
#data = hf.get('warped_image')[()]
def morph_timestack(atlas_file,rigid_out_file,saved_transform,out_diff_file,spacing):
print('Morphing')
stack = h5py.File(rigid_out_file, 'r')
atlas=np.array(h5py.File(atlas_file, 'r')['warped_image'])
atlas=ants.from_numpy(atlas)
fixed=atlas
moving = np.array(stack['ITKImage']['0']['VoxelData'])[0:10,:,:,:]
transformlist=['/Users/koesterlab/Documents/Maria/files/transform.mat']
out_diff=apply_transforms(fixed, moving, transformlist,
interpolator='welchWindowedSinc')
out_diff=out_diff.numpy()
save(out_diff_file, out_diff, spacing)
file_path='/Users/koesterlab/Documents/Maria/files/fish37_6dpf_medium.lif'
fixed_index=176
moving_indices=range(177,206)
#176
#195
#rigid_out_file='/Users/koesterlab/Documents/Maria/files/fish37_6dpf_medium_rigid_176_206.h5'
rigid_out_file='/Users/koesterlab/Documents/Maria/files/fish37_6dpf_medium_rigid_0_10.h5'
atlas_file='/Users/koesterlab/Documents/Maria/files/test_16_atlas_highres_z.h5'
transform_out_file='/Users/koesterlab/Documents/Maria/files/transform.mat'
out_diff_file='/Users/koesterlab/Documents/Maria/files/fish37_6dpf_medium_diff_0_10.h5'
pipeline(file_path,fixed_index,moving_indices,rigid_out_file,atlas_file,transform_out_file,out_diff_file)
|
990,004 | 80a5fb4b4c78da82be2296751919b04f57b91c6b | from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_3
from ryu.base import app_manager
from ryu.topology import switches
from ryu.topology import event as TopologyEvent
from ryu.controller import dpset
from ryu.controller.controller import Datapath
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import ether_types, packet, ethernet, ipv4, tcp
from ryu.ofproto import inet
from shared import ofprotoHelper
from modules.db.databaseEvents import EventDatabaseQuery, SetNodeInformationEvent
from modules.db.databasemodule import DatabaseModule
from modules.cdnmodule.models.node import Node
from modules.cdnmodule.models.ServiceEngine import ServiceEngine
from modules.cdnmodule.models.RequestRouter import RequestRouter
from modules.cdnmodule.models.TCPSession import TCPSesssion
from modules.cdnmodule.models.HandoverSesssion import HandoverSession
from modules.cdnmodule.cdnEvents import EventCDNPipeline, EventClosestSeReply, EventClosestSeRequest
from modules.forwardingmodule.forwardingEvents import EventForwardingPipeline, EventShortestPathRequest, EventShortestPathReply
from modules.forwardingmodule.models import Path
from modules.wsendpointmodule.ws_endpoint import WsCDNEndpoint
import networkx as nx
from ryu import cfg
CONF = cfg.CONF
class CDNModule(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
opts = [
cfg.IntOpt('table',
default=1,
help='Table to use for CDN Handling'),
cfg.IntOpt('cookie',
default=201,
help='cookie to install'),
cfg.IntOpt('node_priority',
default=1,
help='Priority to install CDN engine matching flows'),
cfg.IntOpt('handover_priority',
default=2,
help='Priority to use for handover flows')
]
_CONTEXTS = {
'switches': switches.Switches,
'dpset': dpset.DPSet,
'db': DatabaseModule
}
def __init__(self, *args, **kwargs):
super(CDNModule, self).__init__(*args, **kwargs)
CONF.register_opts(self.opts, group='cdn')
self.switches = kwargs['switches'] # type: switches.Switches
self.dpset = kwargs['dpset'] # type: dpset.DPSet
self.db = kwargs['db'] # type: DatabaseModule
self.ofHelper = ofprotoHelper.ofProtoHelperGeneric()
self.nodes = []
self.shortestPathtoSefromIPCache = []
def _install_cdnengine_matching_flow(self, datapath, ip, port):
"""
Installs flow to match based on IP, port to datapath to send to controller
:param datapath: dp_id
:param ip: IP of http engine
:param port: port of http engine
:return:
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_dst=ip,
tcp_dst=port)
actions = [
parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)
]
self.ofHelper.add_flow(datapath, CONF.cdn.node_priority, match, actions, CONF.cdn.table, CONF.cdn.cookie)
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_src=ip,
tcp_src=port)
actions = [
parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)
]
self.ofHelper.add_flow(datapath, CONF.cdn.node_priority, match, actions, CONF.cdn.table, CONF.cdn.cookie)
def _install_rewrite_dst_action_out(self, datapath, ip_src, port_src, ip_dst_old, port_dst_old, ip_dst_new, port_dst_new, new_dst_mac, out_port):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_src=ip_src, tcp_src=port_src, ipv4_dst=ip_dst_old, tcp_dst=port_dst_old)
actions = [
parser.OFPActionSetField(eth_dst=new_dst_mac),
parser.OFPActionSetField(ipv4_dst=ip_dst_new),
parser.OFPActionSetField(tcp_dst=port_dst_new),
parser.OFPActionOutput(out_port)
]
self.ofHelper.add_flow(datapath, CONF.cdn.handover_priority, match, actions, CONF.cdn.table, 0, None, 1, 0)
def _install_rewrite_src_action_out(self, datapath, ip_src_old, port_src_old, ip_src_new, port_src_new, ip_dst, port_dst, new_src_mac, out_port):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_src=ip_src_old, tcp_src=port_src_old, ipv4_dst=ip_dst, tcp_dst=port_dst)
actions = [
parser.OFPActionSetField(eth_src=new_src_mac),
parser.OFPActionSetField(ipv4_src=ip_src_new),
parser.OFPActionSetField(tcp_src=port_src_new),
parser.OFPActionOutput(out_port)
]
self.ofHelper.add_flow(datapath, CONF.cdn.handover_priority, match, actions, CONF.cdn.table, 0, None, 1, 0)
def _install_rewrite_dst_action_with_tcp_sa_out(self, datapath, ip_src, port_src, ip_dst_old, port_dst_old, ip_dst_new, port_dst_new, inc_seq, inc_ack, new_dst_mac, out_port):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_src=ip_src, tcp_src=port_src, ipv4_dst=ip_dst_old, tcp_dst=port_dst_old)
actions = [
parser.OFPActionSetField(eth_dst=new_dst_mac),
parser.OFPActionSetField(ipv4_dst=ip_dst_new),
parser.OFPActionSetField(tcp_dst=port_dst_new),
parser.OFPActionIncSeq(inc_seq),
parser.OFPActionIncAck(inc_ack),
parser.OFPActionOutput(out_port)
]
self.ofHelper.add_flow(datapath, CONF.cdn.handover_priority, match, actions, CONF.cdn.table, 0, None, 1, 0)
def _install_rewrite_src_action_with_tcp_sa_out(self, datapath, ip_src_old, port_src_old, ip_src_new, port_src_new, ip_dst, port_dst, inc_seq, inc_ack, new_src_mac, out_port):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_src=ip_src_old, tcp_src=port_src_old, ipv4_dst=ip_dst, tcp_dst=port_dst)
actions = [
parser.OFPActionSetField(eth_src=new_src_mac),
parser.OFPActionSetField(ipv4_src=ip_src_new),
parser.OFPActionSetField(tcp_src=port_src_new),
parser.OFPActionIncSeq(inc_seq),
parser.OFPActionIncAck(inc_ack),
parser.OFPActionOutput(out_port)
]
self.ofHelper.add_flow(datapath, CONF.cdn.handover_priority, match, actions, CONF.cdn.table, 0, None, 1, 0)
def _mitigate_tcp_session(self, datapath, src_ip, dst_ip, src_port, dst_port):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=inet.IPPROTO_TCP, ipv4_src=src_ip, tcp_src=src_port, ipv4_dst=dst_ip, tcp_dst=dst_port)
self.ofHelper.add_drop_flow(datapath, 2, match, CONF.cdn.table, 1, 0)
def _generate_rsts(self, hsess):
"""
:param hsess:
:type hsess: HandoverSession
:return:
"""
hsess_eth = hsess.eth
hsess_ip = ipv4.ipv4(version=hsess.ip.version, header_length=5, tos=hsess.ip.tos, total_length=0,
identification=hsess.ip.identification, flags=hsess.ip.flags, offset=hsess.ip.offset,
ttl=hsess.ip.ttl, proto=hsess.ip.proto, csum=0, src=hsess.ip.src, dst=hsess.ip.dst, option=hsess.ip.option)
hsess_ptcp = tcp.tcp(src_port=hsess.ptcp.src_port, dst_port=hsess.ptcp.dst_port,
seq=hsess.src_seq + hsess.request_size + 1, ack=hsess.dst_seq + 1,
offset=0, bits=(tcp.TCP_ACK | tcp.TCP_RST), window_size=hsess.ptcp.window_size, csum=0,
urgent=hsess.ptcp.urgent,
option=None)
hsess_rst = packet.Packet()
hsess_rst.add_protocol(hsess_eth)
hsess_rst.add_protocol(hsess_ip)
hsess_rst.add_protocol(hsess_ptcp)
hsess_rst.serialize()
sess = hsess.handoverPair
sess_eth = ethernet.ethernet(dst=sess.eth.src, src=sess.eth.dst, ethertype=sess.eth.ethertype)
sess_ip = ipv4.ipv4(version=sess.ip.version, header_length=5, tos=sess.ip.tos, total_length=0,
identification=sess.ip.identification, flags=sess.ip.flags, offset=sess.ip.offset,
ttl=sess.ip.ttl, proto=sess.ip.proto, csum=0, src=sess.ip.dst, dst=sess.ip.src, option=sess.ip.option)
sess_ptcp = tcp.tcp(src_port=sess.ptcp.dst_port, dst_port=sess.ptcp.src_port,
seq=sess.dst_seq + 1, ack=sess.src_seq + sess.request_size + 1,
offset=0, bits=(tcp.TCP_ACK | tcp.TCP_RST), window_size=sess.ptcp.window_size, csum=0,
urgent=sess.ptcp.urgent,
option=None)
sess_rst = packet.Packet()
sess_rst.add_protocol(sess_eth)
sess_rst.add_protocol(sess_ip)
sess_rst.add_protocol(sess_ptcp)
sess_rst.serialize()
return hsess_rst, sess_rst
def _update_nodes(self):
self.nodes = self.db.getData().getNodes()
for node in self.nodes:
if node.type == 'rr':
node.setHandoverCallback(self.get_closest_se_to_ip)
if node.type == 'se':
node.setHandoverCallback(self.perform_handover)
node.setRSTCallback(self.rsttcpSessioncb)
node.setMitigateCallback(self.mitigatecb)
def mitigatecb(self, datapath_id, src_ip, dst_ip, src_port, dst_port):
dp = self.switches.dps.get(datapath_id)
self._mitigate_tcp_session(dp, src_ip, dst_ip, src_port, dst_port)
def rsttcpSessioncb(self, sess):
hsess = sess.handoverPair
#
# Send Reset packets towards request router
hsess_rst, sess_rst = self._generate_rsts(hsess)
self.ofHelper.do_packet_out(hsess_rst, hsess.parentNode.datapath_obj, hsess.parentNode.port_obj)
self.ofHelper.do_packet_out(sess_rst, hsess.parentNode.datapath_obj, hsess.parentNode.port_obj)
def perform_handover(self, sess):
"""
:param sess:
:type sess: TCPSesssion
:return:
"""
hsess = sess.handoverPair # type: HandoverSession
self.logger.debug('DOING HANDOVER IN CDN MODULE. DOING MAGIC')
self.logger.debug('Client established connection to RR:')
self.logger.debug('{}:{} -> {}.{}'.format(hsess.ip.src, hsess.ptcp.src_port, hsess.ip.dst, hsess.ptcp.dst_port))
self.logger.debug('CDN Engine decided to handover this session to service engine:')
self.logger.debug(str(hsess.serviceEngine))
self.logger.debug('RR pre established a Sesssion to the chosen SE:')
self.logger.debug('{}:{} -> {}.{}'.format(sess.ip.src, sess.ptcp.src_port, sess.ip.dst, sess.ptcp.dst_port))
self.logger.debug('Source SEQ on client-RR leg: %d', hsess.src_seq)
self.logger.debug('Dest SEQ on client-RR leg: %d', hsess.dst_seq)
self.logger.debug('Source SEQ on RR-SE leg: %d', sess.src_seq)
self.logger.debug('Dest SEQ on RR-SE leg: %d', sess.dst_seq)
self.logger.debug('Now do the maths and handover those')
spev = EventShortestPathRequest(hsess.ip.src, hsess.serviceEngine.ip)
spev.dst = 'ForwardingModule'
spev.sync = True
pathres = self.send_request(spev) # type: EventShortestPathReply
if pathres.path:
# Rewrite DST IP and PORT from Client to RR -> SE on ACC switch in FW direction
p = pathres.path.fw[1] # 2nd entry on forwardp path
dp = self.switches.dps.get(p['src'])
self._install_rewrite_dst_action_out(dp, hsess.ip.src, hsess.ptcp.src_port, hsess.ip.dst, hsess.ptcp.dst_port, hsess.serviceEngine.ip, hsess.serviceEngine.port, sess.eth.dst, p['port'])
p = pathres.path.bw[0] # 1st entry on backward path
dp = self.switches.dps.get(p['src'])
self._install_rewrite_src_action_out(dp, hsess.serviceEngine.ip, hsess.serviceEngine.port, hsess.ip.dst, hsess.ptcp.dst_port, hsess.ip.src, hsess.ptcp.src_port, hsess.eth.dst, p['port'])
## Calculate seq ack diffs
# Sinc_cs = ((2^32) + (Srs - Scr) + (Rrs - Rcr)) %% (2^32)
self.logger.debug('REQUEST SIZE RS %d CR %d', sess.request_size, hsess.request_size)
seq_cs = ((2 ** 32) + (sess.src_seq - hsess.src_seq) + (sess.request_size - hsess.request_size)) % (2 ** 32)
self.logger.debug('SEQ CS %d', seq_cs)
# Ainc_sc = ((2 ^ 32) - Sinc_cs) % % (2 ^ 32)
ack_sc = ((2 ** 32) - seq_cs) % (2**32)
self.logger.debug('ACK SC %d', ack_sc)
# Sinc_sc = ((2 ^ 32) + (Src - Ssr)) % % (2 ^ 32)
seq_sc = ((2 ** 32) + (hsess.dst_seq - sess.dst_seq)) % (2 ** 32)
self.logger.debug('SEQ SC %d', seq_sc)
# Ainc_cs = ((2 ^ 32) - Sinc_sc) % % (2 ^ 32)
ack_cs = ((2 ** 32) - seq_sc) % (2 ** 32)
self.logger.debug('ACK CS %d', ack_cs)
# Rewrite SRC IP and PORT from Client -> RR to SE and modify SEQ ACK on CR sw in FW direction
# FM 4
p = pathres.path.fw[-1]
dp = self.switches.dps.get(p['src'])
self._install_rewrite_src_action_with_tcp_sa_out(dp, hsess.ip.src, hsess.ptcp.src_port, sess.ip.src, sess.ptcp.src_port, hsess.serviceEngine.ip, hsess.serviceEngine.port, seq_cs, ack_cs, sess.eth.src, p['port'])
# Rewrite DST IP and PORT from SE to RR -> Client and modify SEQ ACK on CR sw in BW direction
# FM 2
p = pathres.path.bw[-2]
dp = self.switches.dps.get(p['src'])
self._install_rewrite_dst_action_with_tcp_sa_out(dp, hsess.serviceEngine.ip, hsess.serviceEngine.port, sess.ip.src, sess.ptcp.src_port, hsess.ip.src, hsess.ptcp.src_port, seq_sc, ack_sc, hsess.eth.src, p['port'])
# Mitigate all corresponding communication from to request router
self._mitigate_tcp_session(hsess.parentNode.datapath_obj, sess.ip.src, sess.ip.dst, sess.ptcp.src_port, sess.ptcp.dst_port)
self._mitigate_tcp_session(hsess.parentNode.datapath_obj, sess.ip.dst, sess.ip.src, sess.ptcp.dst_port, sess.ptcp.src_port)
self.logger.info('Mitigating communication from RR towards network')
hsess.state = HandoverSession.STATE_HANDOVERED
sess.state = TCPSesssion.STATE_HANDOVERED
self.logger.info('Handovered and path Installed from Client %s to Service Engine %s', hsess.ip.src, hsess.serviceEngine.ip)
else:
self.logger.error('Failed to retrieve path from Client to SE')
def get_closest_se_to_ip(self, ip):
cache = dict(self.shortestPathtoSefromIPCache)
if ip in cache:
return cache[ip]
switches = [dp for dp in self.switches.dps]
links = [(link.src.dpid, link.dst.dpid, {'port': link.src.port_no}) for link in self.switches.links]
g = nx.DiGraph()
g.add_nodes_from(switches)
g.add_edges_from(links)
for mac, host in self.switches.hosts.iteritems():
if ip in host.ipv4:
g.add_node(ip)
g.add_edge(ip, host.port.dpid)
g.add_edge(host.port.dpid, ip, port=host.port.port_no)
for node in self.nodes:
if node.type == 'se' and node.ip in host.ipv4:
g.add_node(str(node.ip))
g.add_edge(str(node.ip), host.port.dpid)
g.add_edge(host.port.dpid, str(node.ip), port=host.port.port_no)
lengths = nx.single_source_shortest_path_length(g, ip)
lensrted = sorted(lengths.items(), key=lambda x: x[1])
for distance in lensrted:
for node in self.nodes:
if node.type == 'se' and node.ip == distance[0]:
self.shortestPathtoSefromIPCache.append((ip, node))
return node
return None
@set_ev_cls(EventClosestSeRequest, None)
def get_closest_se_to_ip_public(self, ev):
node = self.get_closest_se_to_ip(ev.ip) # type: ServiceEngine
reply = EventClosestSeReply(node.ip, ev.src)
self.reply_to_request(ev, reply)
@set_ev_cls(TopologyEvent.EventHostAdd, MAIN_DISPATCHER)
def _host_in_event(self, ev):
"""
This function if responsible for installing matching rules sending to controller if a SE or an RR joins the
network
List of RRs and SEs are defined in the database.json file
:param ev:
:type ev: TopologyEvent.EventHostAdd
:return:
"""
self._update_nodes()
if not self.nodes:
return
for node in self.nodes:
if node.ip in ev.host.ipv4:
datapath = self.dpset.get(ev.host.port.dpid)
node.setPortInformation(ev.host.port.dpid, datapath, ev.host.port.port_no, ev.host.port)
self._install_cdnengine_matching_flow(datapath, node.ip, node.port)
self.logger.info('New Node connected the network. Matching rules were installed ' + node.__str__())
def _get_node_from_packet(self, ip, ptcp):
"""
:param ip:
:type ip: ipv4.ipv4
:param ptcp:
:type ptcp: tcp.tcp
:return:
"""
for node in self.nodes:
if node.ip == ip.dst and node.port == ptcp.dst_port:
return node
if node.ip == ip.src and node.port == ptcp.src_port:
return node
return None
def _remove_tcp_options(self, pkt):
eth = pkt.get_protocols(ethernet.ethernet)[0] # type: ethernet.ethernet
ip = pkt.get_protocols(ipv4.ipv4)[0] # type: ipv4.ipv4
ptcp = pkt.get_protocols(tcp.tcp)[0] # type: tcp.tcp
if(ptcp.has_flags(tcp.TCP_SYN)):
new_ip = ipv4.ipv4(version=ip.version, header_length=5, tos=ip.tos, total_length=0,
identification=ip.identification, flags=ip.flags, offset=ip.offset,
ttl=ip.ttl, proto=ip.proto, csum=0, src=ip.src, dst=ip.dst, option=ip.option)
# Remove TCP Timestamp and SACK permitted Option as it prevents the handover from working
new_options = []
for option in ptcp.option: # type: tcp.TCPOption
if not option.kind in [tcp.TCP_OPTION_KIND_TIMESTAMPS, tcp.TCP_OPTION_KIND_SACK_PERMITTED]:
new_options.append(option)
new_ptcp = tcp.tcp(src_port=ptcp.src_port, dst_port=ptcp.dst_port, seq=ptcp.seq, ack=ptcp.ack,
offset=0, bits=ptcp.bits, window_size=ptcp.window_size, csum=0, urgent=ptcp.urgent,
option=new_options)
new_pkt = packet.Packet()
new_pkt.add_protocol(eth)
new_pkt.add_protocol(new_ip)
new_pkt.add_protocol(new_ptcp)
new_pkt.serialize()
return new_pkt
else:
return pkt
@set_ev_cls(EventCDNPipeline, None)
def cdnHandlingRequest(self, ev):
"""
Handles the incoming TCP sessions towards RR or SE
We only should receive packets destined to CDN engine (SE or RR) over TCP
# TODO, cases that are not valid (not tcp, host not existing). Situations like this might happen on Controller restart
:param ev:
:type ev: EventCDNPipeline
:return:
"""
pkt = packet.Packet(ev.data)
datapath = ev.datapath # type: Datapath
# Removes all TCP options on SYN packets
pkt = self._remove_tcp_options(pkt)
eth = pkt.get_protocols(ethernet.ethernet)[0] # type: ethernet.ethernet
ip = pkt.get_protocols(ipv4.ipv4)[0] # type: ipv4.ipv4
ptcp = pkt.get_protocols(tcp.tcp)[0] # type: tcp.tcp
node = self._get_node_from_packet(ip, ptcp) # type: Node
if node:
pkt, sess = node.handlePacket(pkt, eth, ip, ptcp) # type: packet.Packet, TCPSesssion
fwev = EventForwardingPipeline(datapath=datapath, match=ev.match, data=pkt.data, doPktOut=True)
self.send_event(name='ForwardingModule', ev=fwev)
if sess is not None:
self.rsttcpSessioncb(sess)
self.logger.info('We are sending 2 RSTs to the RR ass sess was returned')
else:
self.logger.error('Could not find node dest / source for the incoming packet packet {}'.format(ip))
|
990,005 | 30414317aeb4427615c3f05d90b306160128e33c | menuItems = {
'Wings': 0,
'Cookies': 0,
'Spring Rolls': 0,
'Salmon': 0,
'Steak': 0,
'Meat Tornado': 0,
'A Literal Garden': 0,
'Ice Cream': 0,
'Cake': 0,
'Pie': 0,
'Coffee': 0,
'Tea': 0,
'Unicorn Tears': 0,
}
response = ''
print('**************************************\n** Welcome to the Snakes Cafe! **\n** Please see our menu below. **\n** **\n** To quit at any time, type "quit" **\n**************************************\n\nAppetizers\n----------\nWings\nCookies\nSpring Rolls\n\nEntrees\n-------\nSalmon\nSteak\nMeat Tornado\nA Literal Garden\n\nDesserts\n--------\nIce Cream\nCake\nPie\n\nDrinks\n------\nCoffee\nTea\nUnicorn Tears\n')
while response != 'Quit':
print('***********************************\n** What would you like to order? **\n***********************************')
response = input('> ').title()
if response != 'Quit':
if response in menuItems:
menuItems[response]+=1
quantity = 'orders' if menuItems[response] > 1 else 'order'
grammar = 'have' if menuItems[response] > 1 else 'has'
print(f'** {menuItems[response]} {quantity} of {response} {grammar} been added to your meal **')
else:
print(f'** Apologies but we don\'t sell {response} here. **')
print('** Your order is as follows: **')
for item in menuItems:
if menuItems[item] > 0:
print(f'{menuItems[item]} {item}')
print('** End of your order. **') |
990,006 | c8454367d3c8a098a03214a677cc700c72034bf3 | """
g $$$$$$$$$$$$$$$$$$$$
f $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
t $$$$$$$$$$
o $$$$$$$$$$$$
"""
def histogram_maker(dictionary_of_sales):
for company in dictionary_of_sales.keys():
print(f"{company[0]} {dictionary_of_sales[company] * '$'}")
sales_Q1 = {
'google': 20,
'facebook': 25,
'twitter': 10,
'offline': 12,
}
sales_Q2 = {
'google': 18,
'facebook': 22,
'twitter': 18,
"amazon": 10,
'offline': 13,
}
histogram_maker(sales_Q1)
print("--------------")
histogram_maker(sales_Q2)
# print('g ' + sales['google'] * '$')
# print('f ' + sales['facebook'] * '$')
# print('t ' + sales['twitter'] * '$')
# print('o ' + sales['offline'] * '$')
|
990,007 | df003a5b4d735e95b75c0ad2c7ee03f616ab5f54 | from django.shortcuts import render, get_object_or_404, redirect
from .forms import InvestmentForm, ProductForm, ExpenseForm
from .models import Investment, Expense, Product
def investment_stat(request):
investments = Investment.objects.all()
return render(request, 'investments.html', {'investments': investments})
def investment_details(request, investment_id):
investment = get_object_or_404(Investment, pk=investment_id)
return render(request, 'investment-details.html', {'investment': investment})
def expenses(request):
expenses = Expense.objects.all()
return render(request, 'expenses.html', {"expenses":expenses})
def products(request):
products = Product.objects.all()
return render(request, 'products.html', {"products":products})
def add_investment(request):
if request.method == 'POST':
form = InvestmentForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
else:
print('form invalid')
else:
form = InvestmentForm()
return render(request, 'add_investment.html', {'form': form})
def edit_investment(request, investment_id):
investment = get_object_or_404(Investment, pk=investment_id)
form = InvestmentForm(request.POST or None, instance=investment)
if form.is_valid():
form.save()
return redirect('/')
return render(request, 'add_investment.html', {'form': form})
def delete_investment(request, investment_id):
investment = get_object_or_404(Investment, pk=investment_id)
investment.delete()
return redirect('/')
def add_product(request):
if request.method == 'POST':
form = ProductForm(request.POST)
if form.is_valid():
form.save()
return redirect('/products/')
else:
print('form invalid')
else:
form = ProductForm()
return render(request, 'add_product.html', {'form': form})
def edit_product(request, product_id):
product = get_object_or_404(Product, pk=product_id)
form = ProductForm(request.POST or None, instance=product)
if form.is_valid():
form.save()
return redirect('/products/')
return render(request, 'add_product.html', {'form': form})
def delete_product(request, product_id):
product = get_object_or_404(Product, pk=product_id)
product.delete()
return redirect('/products/')
def add_expense(request):
if request.method == 'POST':
form = ExpenseForm(request.POST)
if form.is_valid():
form.save()
return redirect('/expenses/')
else:
print('form invalid')
else:
form = ExpenseForm()
return render(request, 'add_expense.html', {'form': form})
def edit_expense(request, expense_id):
expense = get_object_or_404(Expense, pk=expense_id)
form = ExpenseForm(request.POST or None, instance=expense)
if form.is_valid():
form.save()
return redirect('/expenses/')
return render(request, 'add_expense.html', {'form': form})
def delete_expense(request, expense_id):
expense = get_object_or_404(Expense, pk=expense_id)
expense.delete()
return redirect('/expenses/')
|
990,008 | 7f9297b8136428c3f2c4d0b52d24a7fc8c16d2b1 | import sys
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.pyplot import plot,draw,figure,cla,xlim
from time import sleep
from numpy import loadtxt
if len(sys.argv) < 2:
print "USAGE: %s file.dat"%sys.argv[0]
sys.exit(1)
f=figure()
f.show()
while 1:
d = loadtxt(sys.argv[1])
cla()
map(lambda i: plot(d[-100 if d.shape[0]>100 else 0:,0],d[-100:,i]), range(1,d.shape[1]))
xlim(d[-100 if d.shape[0]>100 else 0,0],d[-1,0])
draw()
sleep(1)
|
990,009 | 27bc2ba5a21e4d88de7761c5504c4acab7f66c7b | import matplotlib.pyplot as plt
from gwpy.time import Time
from gwpy.timeseries import (TimeSeries, TimeSeriesDict)
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import pylab
print "Import Modules Success"
#Start-End Time
start = Time('2014-03-24 00:00:00', format='iso', scale='utc')
end = Time('2014-03-30 00:00:00', format='iso', scale='utc')
print start.iso, start.gps
print end.iso, end.gps
channels_M0 = []
channels_M1 = []
OPTICS_M0 = ['ETMX','ITMX','ITMY']
OPTICS_M1 = ['BS', 'MC1', 'MC2', 'MC3', 'PR2', 'PR3', 'PRM', 'SR2', 'SR3', 'SRM' ]
DOFS = ['P', 'R', 'Y']
TRENDS = ['min','mean','max']
#f=open('test.txt','wb')
# loop over list of optics
for optic_m0 in OPTICS_M0:
# loop over list of degrees-of-freedom
for dof in DOFS:
for trend in TRENDS:
channels_M0.append('L1:SUS-%s_M0_DAMP_%s_INMON.%s,m-trend' % (optic_m0, dof, trend))
# loop over list of optics
for optic_m1 in OPTICS_M1:
# loop over list of degrees-of-freedom
for dof in DOFS:
for trend in TRENDS:
channels_M1.append('L1:SUS-%s_M1_DAMP_%s_INMON.%s,m-trend' % (optic_m1, dof, trend))
data_m1 = TimeSeriesDict.fetch(channels_M1, start, end, verbose=True)
data_m1_MC1_Pmin = data_m1[channels_M1[9]]-data_m1[channels_M1[9]][[720]]
data_m1_MC1_Pmean = data_m1[channels_M1[10]]-data_m1[channels_M1[10]][[720]]
data_m1_MC1_Pmax = data_m1[channels_M1[11]]-data_m1[channels_M1[11]][[720]]
plot_MC1_Pmin = data_m1_MC1_Pmin.plot()
axMC1P = plot_MC1_Pmin.gca()
axMC1P.plot(data_m1_MC1_Pmean, label='Mean')
axMC1P.plot(data_m1_MC1_Pmax, label='Max')
axMC1P.set_ylabel('Amplitude (urad)')
pylab.ylim([-200,200])
L = axMC1P.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True)
L.get_texts()[0].set_text('Min')
ml1 = MultipleLocator(10)
ml2 = MultipleLocator(3600)
axMC1P.yaxis.set_minor_locator(ml1)
axMC1P.xaxis.set_minor_locator(ml2)
plot_MC1_Pmin.save('L1-SUS-MC1_M1_DAMP_Pmmm_INMON.png')
data_m1_MC1_Rmin = data_m1[channels_M1[12]]-data_m1[channels_M1[12]][[720]]
data_m1_MC1_Rmean = data_m1[channels_M1[13]]-data_m1[channels_M1[13]][[720]]
data_m1_MC1_Rmax = data_m1[channels_M1[14]]-data_m1[channels_M1[14]][[720]]
plot_MC1_Rmin = data_m1_MC1_Rmin.plot()
axMC1R = plot_MC1_Rmin.gca()
axMC1R.plot(data_m1_MC1_Rmean, label='Mean')
axMC1R.plot(data_m1_MC1_Rmax, label='Max')
axMC1R.set_ylabel('Amplitude (urad)')
pylab.ylim([-200,200])
L = axMC1R.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True)
L.get_texts()[0].set_text('Min')
ml1 = MultipleLocator(10)
ml2 = MultipleLocator(3600)
axMC1R.yaxis.set_minor_locator(ml1)
axMC1R.xaxis.set_minor_locator(ml2)
plot_MC1_Rmin.save('L1-SUS-MC1_M1_DAMP_Rmmm_INMON.png')
data_m1_MC1_Ymin = data_m1[channels_M1[15]]-data_m1[channels_M1[15]][[720]]
data_m1_MC1_Ymean = data_m1[channels_M1[16]]-data_m1[channels_M1[16]][[720]]
data_m1_MC1_Ymax = data_m1[channels_M1[17]]-data_m1[channels_M1[17]][[720]]
plot_MC1_Ymin = data_m1_MC1_Ymin.plot()
axMC1Y = plot_MC1_Ymin.gca()
axMC1Y.plot(data_m1_MC1_Ymean, label='Mean')
axMC1Y.plot(data_m1_MC1_Ymax, label='Max')
axMC1Y.set_ylabel('Amplitude (urad)')
pylab.ylim([-200,200])
L = axMC1Y.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True)
L.get_texts()[0].set_text('Min')
ml1 = MultipleLocator(10)
ml2 = MultipleLocator(3600)
axMC1Y.yaxis.set_minor_locator(ml1)
axMC1Y.xaxis.set_minor_locator(ml2)
plot_MC1_Ymin.save('L1-SUS-MC1_M1_DAMP_Ymmm_INMON.png')
|
990,010 | e3c0ebcd8241a16d94ad1df1fe48d60870470c09 | # from django.contrib.auth.models import User
class TemplateTest:
def __init__(self, a, b):
self.a = a
self.b = b
def modify(self, x, y):
# print(getattr(self, x))
self.__dict__[x] = y
modify.alters_data = True
class A(object):
def __init__(self):
self._map = {}
def __getitem__(self, key):
return self.map.get(key)
def foo():
pass
if __name__ == '__main__':
foo() |
990,011 | 8a75d3671ad30944d0eca47082d4387eb4d10395 | from django.shortcuts import render,redirect
from user.forms import *
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as builtInLogin, logout as builtInLogout
# Create your views here.
def selectUser(request):
return render(request,'sign-up-choose.html')
def adminRegister(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
admin = AdminForm(request.POST)
if form.is_valid():
user_form = form.save(True,False)
admin_fom = admin.save(False)
admin_fom.user = user_form
admin_fom.save()
builtInLogin(request, user_form)
return redirect("/dashboard/admin")
else:
form = RegistrationForm()
return render(request,'admin_register.html',{'form':form})
def parentRegister(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
parent = ParentForm(request.POST)
if form.is_valid():
user_form = form.save(False,True)
parent_form = parent.save(False)
parent_form.user = user_form
builtInLogin(request, user_form)
return redirect("/dashboard/parent")
else:
form = RegistrationForm()
return render(request,'parent_register.html',{'form':form})
def login(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
builtInLogin(request,user)
if user.is_boss == True:
return redirect("/dashboard/admin")
elif user.is_parent == True:
return redirect("/dashboard/parent")
else:
form = AuthenticationForm()
return render(request,'login.html',{'form':form})
def logout(request):
if request.method == 'POST':
builtInLogout(request)
return redirect("/")
|
990,012 | 540a5b843a494b86072aa331f61f0cb3f1f798d1 | from String import telephone_name_lookup
def test_tel_directory():
names = ['bevavy 2019830294','kim 94409294','smith 298234205','lewis 329049235','jim 392029446','peter 3425252525','john 23425255']
(name_directory, phone_directory) = telephone_name_lookup.build_name_directory(names)
results = telephone_name_lookup.lookup_number(238289, name_directory, phone_directory)
assert {'bevavy': ['2019830294']} == results
results = telephone_name_lookup.lookup_number(546, name_directory, phone_directory)
assert {'kim': ['94409294'], 'jim': ['392029446']} == results
|
990,013 | 01d80fd14ed5299f0236b20a530d6d99430a5272 | import math
reflec_index = [1.33, 1.34026 ,1.34782, 1.35568, 1.36384, 1.37233, 1.38115, 1.39032, 1.39986, 1.40987, 1.42009, 1.4308, 1.44193]
for i in range(len(reflec_index)) :
seta = math.degrees(math.asin((math.sin(math.radians(30)))*reflec_index[i]))
print("Brix degree : "+ str(i*5) +" reflec_index : " + str(reflec_index[i]) + " Degrees : " +str(seta)+" degree")
|
990,014 | 0d3f65ca6c22ca7a7918b5aeec1424b31cd4539b | # Simple code for loading some standard data sets from the UCI Machine
# Learning repository.
#
from classifier import data_item
def read_wine_dataset():
'''Return a list of data_item object representing the UCI Wine data.'''
dataset = []
fp = open('wine.txt')
for line in fp:
fields = line.split(',')
data = [float(v) for v in fields[1:]]
label = int(fields[0]) - 1
dataset.append(data_item(label, data))
fp.close()
return dataset
def read_iris_dataset():
'''Return a list of data_item object representing the UCI Iris data.'''
dataset = []
fp = open('iris.txt')
for line in fp:
if not line.startswith('#'):
fields = line.split()
data = [float(v) for v in fields[:-1]]
if fields[-1] == "Iris-setosa":
label = 0
elif fields[-1] == "Iris-versicolor":
label = 1
elif fields[-1] == "Iris-virginica":
label = 2
else:
raise ValueError("Illegal class name: " + fields[-1])
dataset.append(data_item(label, data))
fp.close()
return dataset
def read_seeds_dataset():
'''Return a list of data_item object representing the UCI Seeds data.'''
fp = open('seeds.txt')
dataset = []
for line in fp:
fields = line.split()
data = [float(v) for v in fields[:-1]]
label = int(fields[-1]) - 1
dataset.append(data_item(label, data))
return dataset
def read_parkinsons_dataset():
'''Return a list of data_item object representing the UCI Parkinson's
data.'''
fp = open('parkinsons.data')
dataset = []
header = fp.readline()
for line in fp:
fields = line.split(',')
label = int(fields[17])
data = [float(x) for x in (fields[1:17] + fields[17+1:])]
dataset.append(data_item(label, data))
fp.close()
return dataset
def read_datasets():
'''Return all four of the datasets we use into a single dictionary.'''
datasets = {}
datasets['Wine'] = read_wine_dataset()
datasets['Iris'] = read_iris_dataset()
datasets['Seeds'] = read_seeds_dataset()
datasets['Parkinsons'] = read_parkinsons_dataset()
return datasets
|
990,015 | 8d12590d957f257d0bd3294e06af2db58c29047f | import json
import asyncio
async def dumps(content: dict) -> str:
loop = asyncio.get_running_loop()
json = await loop.run_in_executor(None, json.dumps, content)
return json
async def loads(content: str) -> dict:
loop = asyncio.get_running_loop()
dictionary = await loop.run_in_executor(None, json.loads, content)
return dictionary
|
990,016 | bb76226153814b101fa6686a5a747a82c028b122 | """
Fine tune forward model using data generate by itself.
choose S_init, S_goal
controller gives a_1
excute a_1, get transition S_init, a_1, S_next
use this transition to renforce the forward model
Choose S_init, S_goal
1. using Policy (Fine tune in policy distribution)
2. randomly choose one (if not state space, need to use replay buffer)
python active_sample_fine_tune_forward_model.py ~/Desktop/forward_planner_data/tf_model/Box3d_reach_table_v1_1500k_random_data_1e-3 Box3dReachTable-v1 ~/rllab/data/local/trpo-box3d-state-reach-table-v1-tf-5000itr/trpo_box3d_state_reach_table_v1_tf_5000itr_2017_08_14_22_59_20_0001/itr_3600.pkl ~/Desktop/forward_planner_data/tf_model/Box3d_reach_table_v1_1500k_random_data_1e-3_fine_tune_step1/ ~/Desktop/forward_planner_data/tf_board/Box3d_reach_table-v1_1500k_random_1e-3_fine_tune_step1 1
"""
import argparse
import joblib
import uuid
import time
import numpy as np
from rllab.misc import logger
from os import listdir
import os.path as osp
filename = str(uuid.uuid4())
from railrl.data_management.simple_replay_pool import SimpleReplayPool
def save_snapshot(encoder, inverse_model, forward_model, tfmodel_path):
save_dict = dict(
encoder=encoder,
inverse_model=inverse_model,
forward_model=forward_model
)
joblib.dump(save_dict, tfmodel_path, compress=3)
logger.log("Saved ICM model to {}".format(tfmodel_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dynamic_model_path', type=str,
help='path to the dynamix_model')
parser.add_argument('env_name', type = str,
help='environment name')
parser.add_argument('policy_path', type = str,
help='path to policy')
parser.add_argument('save_model_path', type=str,
help='path to save the fine tuned model')
parser.add_argument('tf_board', type=str,
help='path tf_board')
parser.add_argument('horizon', type=int, help='The horizon of FW model controller')
parser.add_argument('--variable_horizon', action='store_true', help='if the horizon is variable')
parser.add_argument('--init_lr', type=float, default=1e-3,
help='fine tune initial learning_rate')
args = parser.parse_args()
import gym
env = gym.make(args.env_name)
import tensorflow as tf
from planner import ClippedSgdForwardModelPlanner, InverseModelPlanner, \
ConstrainedForwardModelPlanner, SgdForwardModelPlanner,\
FastClippedSgdForwardModelPlanner, FastClippedSgdShootingForwardModelPlanner
from railrl.predictors.dynamics_model import NoEncoder, FullyConnectedEncoder, ConvEncoder, InverseModel, ForwardModel
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
##load policy
policy_data = joblib.load(args.policy_path)
policy = policy_data['policy']
##load dynamic model
data = joblib.load(args.dynamic_model_path)
_encoder = data['encoder']
_inverse_model = data['inverse_model']
_forward_model = data['forward_model']
replay_buffer = SimpleReplayPool(500000, env.observation_space.shape, env.action_space.shape)
action_ph =tf.placeholder(tf.float32, [None,4])
s1_ph = tf.placeholder(tf.float32, [None] + list(env.observation_space.shape))
s2_ph = tf.placeholder(tf.float32, [None] + list(env.observation_space.shape))
encoder1 = _encoder.get_weight_tied_copy(observation_input=s1_ph)
encoder2 = _encoder.get_weight_tied_copy(observation_input=s2_ph)
forward_model = _forward_model.get_weight_tied_copy(feature_input=encoder1.output,
action_input=action_ph)
inverse_model = _inverse_model.get_weight_tied_copy(feature_input1=encoder1.output,
feature_input2=encoder2.output)
loss = tf.reduce_mean(tf.square(encoder2.output - forward_model.output))
with tf.variable_scope("new_optimizer"):
fine_tune_opt = tf.train.AdamOptimizer(args.init_lr).minimize(loss)
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='new_optimizer')
sess.run(tf.initialize_variables(variables))
#summary
summary_writer = tf.summary.FileWriter(args.tf_board, graph = tf.get_default_graph())
forward_loss_summ = tf.summary.scalar("forward_loss", loss)
summary = tf.summary.merge_all()
controller = FastClippedSgdShootingForwardModelPlanner(_forward_model, _encoder, env, \
sess = sess, pos_only = True)
#S_init = env.reset()
#S_goal = policy run 14 steps
S_goal_list = []
NUM_GOALS = 1000
for i in range(NUM_GOALS):
if i %100 == 0:
print("sampling policy S_goal")
obs = env.reset()
for j in range(50):
action, _ = policy.get_action(obs)
obs, r, d, _ = env.step(action)
S_goal_list.append(obs)
#collect 5000 goals
S_goal_list = np.array(S_goal_list)
for i in range(30000):
print(i)
obs = env.reset()
S_goal = S_goal_list[np.random.randint(NUM_GOALS)]
#roll out 20 steps using dynamic model controller
for j in range(30):
if not args.variable_horizon:
action, _ = controller.get_action(obs, S_goal, steps = args.horizon)
else:
action, _ = controller.get_action(obs, S_goal, steps = max(15-j, 1))
replay_buffer.add_sample(obs, action, 0, False, False)
obs, r, d, _ = env.step(action)
if (i*30 + j) % 5000 == 0:
PATH = args.save_model_path +"/fine_tune_itr{}.pkl".format(i*20+j)
save_snapshot(_encoder, _inverse_model, _forward_model, PATH)
if replay_buffer.size > 500:
# print("Start Training")
batch = replay_buffer.random_batch(256)
obs_batch = list(batch['observations'])
action_batch = list(batch['actions'])
next_obs_batch = list(batch['next_observations'])
# import pdb; pdb.set_trace()
feed_dict = {s1_ph:obs_batch, s2_ph:next_obs_batch, action_ph:action_batch}
if (i*30 + j) % 200 == 0:
_, summ = sess.run([fine_tune_opt, summary], feed_dict = feed_dict)
summary_writer.add_summary(summ, i*20+j)
else:
sess.run(fine_tune_opt, feed_dict = feed_dict)
# env.render()
replay_buffer.add_sample(obs, np.zeros(4), 0, True, True)
|
990,017 | 29193872339962c72b1a9fa9dea05504ffbff608 | import numpy as np
import matplotlib.pyplot as plt
from DubinsCar import DubinsCar
from Environment import Environment
def gen1(d):
return np.radians(10)
def randomDubins():
car = DubinsCar(theta=np.radians(45), speed=10)
x, y = np.zeros(100), np.zeros(100)
for i in xrange(100):
angle = np.radians(np.random.randint(0, 180))
angle *= np.random.choice([-1, 1])
car.sim_for_dt(angle)
x[i] = car.x
y[i] = car.y
plt.plot(x, y)
plt.axis('square')
plt.show()
def testEnvironment():
env = Environment()
# env = Environment(regulator=gen1)
env.car.speed = 20
env.simUntilCollision()
env.plotPath()
if __name__ == '__main__':
# randomDubins()
testEnvironment()
|
990,018 | a51363ec4e0abffea47f63c1c9203e1796f46677 | #!/usr/bin/env python
#Written by Roger Fachini
#Startup script located at /etc/init/githttpstartup.conf
import logging
import socket
import SimpleHTTPServer
import SocketServer
import json
import time
import os
import commands
import cgi
IP = ''
PORT = 80
BASE_DIR = '/robotics/services/pvcs/'
LOG_DIR = '/robotics/logs/pvcs/'
HTML_DIR = 'html/'
GIT_DIR = '/robotics/git/'
LATEST_LOG = '/robotics/logs/latest-pvcs.log'
REDIRECTS = {'/':'index.htm'}
fileData = {'__data': {'ip':'Null'}}
NEW_REPO_CMD = 'bash /robotics/scripts/makeHub.sh'
NEW_LOCAL_CMD = 'bash /robotics/scripts/makeHubLocal.sh'
RESYNC_CMD = 'bash /robotics/scripts/syncGit.sh'
MACHINE_IP = commands.getoutput("/sbin/ifconfig").split("\n")[1].split()[1][5:]
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
global main
def do_GET(self):
self.log_message('Getting path: %s',self.path)
if self.path == '/customData.json':
self.send_response(200)
self.send_header("Content-type", 'application/json')
self.end_headers()
reloadFileList()
fileData.update({'__data': {'ip':MACHINE_IP}})
self.wfile.write(json.dumps(fileData))
return
elif self.path in REDIRECTS.iterkeys():
self.path = REDIRECTS[self.path]
self.path = HTML_DIR+self.path
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
postvars = cgi.parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.getheader('content-length'))
postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
else:
postvars = {}
print(self.path)
if self.path =='/newRepo':
self.send_response(301)
self.send_header('Location','versionControl.htm')
self.end_headers()
print postvars
if postvars['name'] == ['']:return
private = postvars['public'] == ['false']
name=postvars['name'][0]
name = name.replace(' ','_')
dir = '%s/%s.git'%(GIT_DIR, name)
makeNewGitRepo(dir, private)
elif self.path =='/resync':
self.send_response(301)
self.send_header('Location','versionControl.htm')
self.end_headers()
resyncGit()
def log_message(self, format, *args):
log = logging.getLogger('handler')
log.info("%s %s" %
(self.client_address[0],
format%args))
def reloadFileList():
global fileData
fileData = {'__data': {'ip':'Null'}}
for f in os.listdir(GIT_DIR):
if not f.endswith('.git'): continue
if os.path.isfile(GIT_DIR+f+'/no-github-sync'): state = 'Local'
else: state = 'Public'
date = time.ctime(os.path.getmtime(GIT_DIR+f+'/git-daemon-export-ok'))
ip = commands.getoutput("/sbin/ifconfig").split("\n")[1].split()[1][5:]
link = 'git@server:/opt/git/%s'%f
fileData.update({f:{'path':GIT_DIR+f,
'date':date,
'link':link,
'state':state}})
def makeNewGitRepo(dir, localR=False):
if localR:
cmd = NEW_LOCAL_CMD
else:
cmd = NEW_REPO_CMD
log = logging.getLogger('server')
log.info('Running git init script: %s'%cmd)
name = dir.split('/')
name = name[-1]
status = os.system(cmd+' %s'%name)
if status == 0:
log.info('Success!')
else:
log.error('Failure: command returned status code of %s', status)
def resyncGit():
log = logging.getLogger('server')
log.info('Running git resync ')
status = os.system(RESYNC_CMD)
log.info('done')
class Main:
def __init__(self, ip, port):
reloadFileList()
logger = logging.getLogger('server')
logger.info('Starting server on %s:%s',ip,port)
server = SocketServer.TCPServer((ip, port), Handler)
logger.info('Serving forever...')
server.serve_forever()
if __name__ == '__main__':
os.chdir(BASE_DIR)
logfile = LOG_DIR+time.strftime("%m-%d-%y %H:%M:%S.log")
logfmt = '[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s'
datefmt = '%H:%M:%S %m/%d/%y'
formatter = logging.Formatter(fmt=logfmt,
datefmt=datefmt)
logging.basicConfig(level=logging.DEBUG,
format=logfmt,
datefmt=datefmt)
logger = logging.getLogger()
fh = logging.FileHandler(logfile)
fh.setFormatter(formatter)
logger.addHandler(fh)
fh = logging.FileHandler(LATEST_LOG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.info('Logging to file: %s',logfile)
while True:
try:
Main(IP,PORT)
except KeyboardInterrupt:
logging.critical('Terminated By User')
break
except BaseException as er:
logging.critical('Server Daemon crashed:')
logging.exception(er)
logging.info('Restarting in 2...')
time.sleep(2)
logging.info('Restarting...')
|
990,019 | 737bfa88f64e7242021140ce914c694082660b89 | import cv2
import numpy as np
class CommonsVariables(object):
labels = ['blow_down',
'bare_ground',
'conventional_mine',
'blooming',
'cultivation',
'artisinal_mine',
'haze',
'primary',
'slash_burn',
'habitation',
'clear',
'road',
'selective_logging',
'partly_cloudy',
'agriculture',
'water',
'cloudy']
label_map = {'agriculture': 14,
'artisinal_mine': 5,
'bare_ground': 1,
'blooming': 3,
'blow_down': 0,
'clear': 10,
'cloudy': 16,
'conventional_mine': 2,
'cultivation': 4,
'habitation': 9,
'haze': 6,
'partly_cloudy': 13,
'primary': 7,
'road': 11,
'selective_logging': 12,
'slash_burn': 8,
'water': 15}
def satured_image_color(self, img, s_factor):
"""
:param img: img in numpy matrix
:param s_factor: multiplication factor for saturation
:return:
"""
# load file in hsv format
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_hsv = img_hsv.astype("float32")
# modify saturation
(h, s, v) = cv2.split(img_hsv)
s = s*s_factor
s = np.clip(s, 0, 255)
img_hsv_s = cv2.merge([h, s, v])
img_rgb_s = cv2.cvtColor(img_hsv_s.astype("uint8"), cv2.COLOR_HSV2BGR)
return img_rgb_s
|
990,020 | 092d01e09987b7e1d93870d02baa0367adc88749 | from django.urls import path
from .views import *
urlpatterns = [
path('login.html', loginView, name='login'),
path('register.html', registerView, name='register'),
path('setps.html', setpsView, name='setps'),
path('logout.html', logoutView, name='logout'),
path('', findpsView, name='findps')
] |
990,021 | 02a2c33974219c9f386befc723506264d4632826 | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
class DataProcessor():
'''
The DataProcessor class is repsonsible for parsing the given .csv file.
### Attributes ####
:file : (str) the path to the file.
:test : (Bool) indicates if the file is the Testing set.
:minimal: (Bool) if True it drops from the training set the columns that are not found in the test set.
:time_attributes: list of (str) the data-time attributes in the dataset.
:categorical_attributes: list of (str) the categorical attributes in the dataset.
:label_col: (str) the label column (Y-data) or target vector.
:encoder: (sklearn LabelEncoder object) the encoder used to encode categorical variables.
:user-col : (str) the name of User Id column.
'''
def __init__(self, file = None, additional = None, test = False, minimal = True):
if file is None :
self.file = "../data/Train.csv"
else: self.file = file
if additional is None:
self.additional_file = '../data/additional_data/trainRoot_edited.csv'
else: self.additional_file = additional
if test is False:
if minimal is True:
self.time_attributes = ['Placement - Time', 'Confirmation - Time', 'Arrival at Pickup - Time', 'Pickup - Time']
else:
self.time_attributes = ['Placement - Time', 'Confirmation - Time', 'Arrival at Pickup - Time', 'Pickup - Time', 'Arrival at Destination - Time']
else:
self.time_attributes = ['Placement - Time', 'Confirmation - Time', 'Arrival at Pickup - Time', 'Pickup - Time']
self.categorical_attributes = ['User Id' , 'Vehicle Type', 'Platform Type', 'Rider Id', 'Personal or Business']
self.label_col = 'Time from Pickup to Arrival'
self.encoder = LabelEncoder
self.one_hot = None
self.user_col = ['User Id']
self.row_values = range(30)
if test is False:
if minimal is True:
self.cols_to_drop = ['Order No', 'Precipitation in millimeters',
'Arrival at Destination - Day of Month', 'Arrival at Destination - Weekday (Mo = 1)',
'Arrival at Destination - Time', 'Confirmation - Day of Month',
'Confirmation - Weekday (Mo = 1)', 'Pickup - Day of Month', 'Pickup - Weekday (Mo = 1)',
'Arrival at Pickup - Day of Month', 'Arrival at Pickup - Weekday (Mo = 1)']
else:
self.cols_to_drop = ['Order No', 'Precipitation in millimeters']
else:
self.cols_to_drop = ['Order No', 'Precipitation in millimeters','Confirmation - Day of Month',
'Confirmation - Weekday (Mo = 1)', 'Pickup - Day of Month', 'Pickup - Weekday (Mo = 1)',
'Arrival at Pickup - Day of Month', 'Arrival at Pickup - Weekday (Mo = 1)']
self.test = test
def _load_file(self, file):
train_df = pd.read_csv(file)
return train_df
def _merge_additional_data(self, df):
additional_df = pd.read_csv(self.additional_file)
result = pd.concat([df, additional_df], axis=1, join='inner')
return result
def _drop_rows_by_value(self, df, col, values):
for value in values:
df = df[df[col] != value]
return df
def _drop_col(self, df, col_to_drop):
df = df.drop(col_to_drop, axis = 1)
return df
def _fill_null(self, df):
df = df.fillna(df.mean())
return df
def _process_time(self, df, dt_attributes):
for time in dt_attributes:
df[time] = pd.to_datetime(df[time]).dt.strftime('%H:%M:%S')
for time in dt_attributes:
df[time+'_H'] = pd.DatetimeIndex(df[time]).hour
df[time+'_M'] = pd.DatetimeIndex(df[time]).minute
df[time+'_S'] = pd.DatetimeIndex(df[time]).second
df = self._drop_col(df, time)
return df
def _encode_categorical(self, df, encoding, attribues):
encoder = encoding()
encoded = df[attribues].apply(encoder.fit_transform)
df[attribues] = encoded[attribues]
return df
def _extract_features_labels(self, df, label_col):
if self.test is False:
feature_cols = df.columns.drop([label_col])
X = df[feature_cols]
Y = df[[label_col]]
return X, Y
else:
return df
def _get_numpy_train_valid_data(self, data):
if self.test is False:
X, Y = data
x_train, x_valid, y_train, y_valid = train_test_split(X.values, Y.values, test_size=0.33)
return x_train, x_valid, y_train, y_valid
else:
X = data
return X.values
def _normalize(self, mat):
means = np.mean(mat, axis = 0)
stds = np.std(mat, axis = 0)
stds += 1e-5
return mat-means/stds
def get_numpy_data(self, fillna = True, additional = True, encode = True, np_split = True, enocde_user = False, normalize = True, drop_ones = True):
""" This is the only function you need to use from DataProcessor class to process the data
Parameters:
-----------
fillna: (Bool) if True it fills the missing columns in the dataset with the mean values for that column
encode: (Bool) if True it encodes the categorical variables with integer values.
np_split: (Bool) if True it splits to train and validation sets and returns 4 numpy arrays.
encode_user: (Bool) if True it encodes the user using 1-hot encoding (not implemented yet): default is False.
normalize: (Bool) if True it normalizes the dataset with z-score i.e. for each column it subtracts the mean value and divides by the standard deviation
Returns:
if np_split is True it returns 4 numpy arrays (x_train, x_valid, y_train, y_valid)
if np_split is false it returns 2 numpu arrays (xtrain, y_train)
"""
df = self._load_file(self.file)
if additional is True:
df = self._merge_additional_data(df)
if drop_ones is True:
df = self._drop_rows_by_value(df, self.label_col, self.row_values)
for col in self.cols_to_drop:
df = self._drop_col(df, col)
if fillna is True:
df = self._fill_null(df)
df = self._process_time(df, self.time_attributes)
if encode is True:
df = self._encode_categorical(df, self.encoder, self.categorical_attributes)
if enocde_user is False:
df = self._drop_col(df, self.user_col)
if self.test is False:
if np_split is True:
if normalize is True :
xtr, xva, ytr, yva = self._get_numpy_train_valid_data(self._extract_features_labels(df, self.label_col))
return self._normalize(xtr), self._normalize(xva), ytr, yva
else:
return self._get_numpy_train_valid_data(self._extract_features_labels(df, self.label_col))
else:
return self._extract_features_labels(df, self.label_col)
else:
if normalize is True :
xtr = self._get_numpy_train_valid_data(self._extract_features_labels(df, self.label_col))
return self._normalize(xtr)
else:
xtr = self._get_numpy_train_valid_data(self._extract_features_labels(df, self.label_col))
return xtr
def main():
pass
if __name__ == "__main__":
main() |
990,022 | ca4f83a7a225b5a1f78b6bd9016a3b3fa93bdd81 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import configparser
import logging.handlers
import os
PYTHON_LOGGER = logging.getLogger(__name__)
if not os.path.exists("log"):
os.mkdir("log")
HDLR = logging.handlers.TimedRotatingFileHandler("log/config_reader.log",
when="midnight", backupCount=60)
STREAM_HDLR = logging.StreamHandler()
FORMATTER = logging.Formatter("%(asctime)s %(filename)s [%(levelname)s] %(message)s")
HDLR.setFormatter(FORMATTER)
STREAM_HDLR.setFormatter(FORMATTER)
PYTHON_LOGGER.addHandler(HDLR)
PYTHON_LOGGER.addHandler(STREAM_HDLR)
PYTHON_LOGGER.setLevel(logging.DEBUG)
# Absolute path to the folder location of this python file
FOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
class ConfigReader:
"""
Example usage:
config.ini:
[SectionA]
param1=toto
param2 = titi
[SectionB]
param3=10
Class usage:
config = ConfigReader("config.ini")
config.SectionA["param1"]
or
config.SectionB.getint("param3")
.getfloat
.getboolean
"""
def __init__(self, absolute_path_config_file="config.ini"):
"""
:param absolute_path_config_file: (string) path to the config file
"""
self.config = configparser.ConfigParser(allow_no_value=True)
if len(self.config.read(absolute_path_config_file)) == 0:
raise FileNotFoundError("The config file {} his not found !".format(absolute_path_config_file))
try:
self.__dict__.update(self.config)
except Exception as e:
PYTHON_LOGGER.error("Error to load the configurations: {}".format(e))
if __name__ == "__main__":
config = ConfigReader("../config.ini")
print(config)
|
990,023 | edc55bcf5d44be31a0d3cd08613494c3bc209419 | import jittor as jt
from jittor import nn, Module
from jittor.nn import Sequential
from collections import OrderedDict
import math
class Flatten(Module):
def execute(self, input):
return input.view(input.size(0), -1)
class Conv_block(Module): # verified: the same as ``Conv'' in ./fmobilefacenet
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Conv_block, self).__init__()
self.conv2d = nn.Conv(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False) # verified: the same as mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride, pad=pad, no_bias=True)
self.batchnorm = nn.BatchNorm(out_c, eps=0.001) # verified: the same as mx.sym.BatchNorm(data=conv, fix_gamma=False,momentum=0.9)
self.relu = nn.PReLU(num_parameters=out_c)
def execute(self, x):
x = self.conv2d(x)
x = self.batchnorm(x)
x = self.relu(x)
return x
class Linear_block(Module): # verified: the same as ``Linear'' in ./fmobilefacenet
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Linear_block, self).__init__()
self.conv2d = nn.Conv(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False) # verified: the same as mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride, pad=pad, no_bias=True)
self.batchnorm = nn.BatchNorm(out_c, eps=0.001)
def execute(self, x):
x = self.conv2d(x)
x = self.batchnorm(x)
return x
class Depth_Wise(Module): # verified: if residual is False: the same as ``DResidual'' in ./fmobilefacenet; else: the same as ``Residual''
def __init__(self, in_c, out_c, residual = False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):
super(Depth_Wise, self).__init__()
self.conv_sep = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride)
self.conv_proj = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
self.residual = residual
def execute(self, x):
if self.residual:
short_cut = x
x = self.conv_sep(x)
x = self.conv_dw(x)
x = self.conv_proj(x)
if self.residual:
output = short_cut + x
else:
output = x
return output
class Residual(Module): # verified: the same as ``Residual'' in ./fmobilefacenet
def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
super(Residual, self).__init__()
modules = OrderedDict()
for i in range(num_block):
modules['block%d'%i] = Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups)
self.model = Sequential(modules)
def execute(self, x):
return self.model(x)
class MobileFaceNet(Module):
def __init__(self, embedding_size):
super(MobileFaceNet, self).__init__()
self.conv_1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))
self.conv_2_dw = Conv_block(64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)
self.dconv_23 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128)
self.res_3 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.dconv_34 = Depth_Wise(64, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256)
self.res_4 = Residual(128, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.dconv_45 = Depth_Wise(128, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512)
self.res_5 = Residual(128, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_6sep = Conv_block(128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))
self.conv_6dw7_7 = Linear_block(512, 512, groups=512, kernel=(7,7), stride=(1, 1), padding=(0, 0))
self.conv_6_flatten = Flatten()
self.pre_fc1 = nn.Linear(512, embedding_size)
self.fc1 = nn.BatchNorm(embedding_size, eps=2e-5) # doubt: the same as mx.sym.BatchNorm(data=conv_6_f, fix_gamma=True, eps=2e-5, momentum=0.9)?
def execute(self, x):
x = x - 127.5
x = x * 0.078125
out = self.conv_1(x)
out = self.conv_2_dw(out)
out = self.dconv_23(out)
out = self.res_3(out)
out = self.dconv_34(out)
out = self.res_4(out)
out = self.dconv_45(out)
out = self.res_5(out)
out = self.conv_6sep(out)
out = self.conv_6dw7_7(out)
out = self.conv_6_flatten(out)
out = self.pre_fc1(out)
out = self.fc1(out)
return out
|
990,024 | c1a7b282cb54a44af7f10c5ba7e363ae9193378b | # Write a function that takes two arguments - a quiz question and the correct answer.
# In your function, you will print the question, and ask the user for the answer.
# If the user gets the answer correct, print a success message.
# Else, print a message with the correct answer.
# Your function does not need to return anything.
# Call your function with two example quiz questions. Here's some suggestions,
#
# Q: What year did Apollo 11 land on the moon? A: 1969
# Q: Who painted the Mona Lisa? A: Leonardo da Vinci
def quiz(question, answer):
user_input = input(question)
if user_input == answer:
print('Correct! ')
else:
print('Incorrect, correct answer was ' + answer)
def main():
quiz('What year did Apollo 11 land on the moon? ', '1969')
quiz('Who painted the Mona Lisa? ', 'Leonardo da Vinci')
main()
|
990,025 | 5f3a57be0883754c3df2a1761eef5bc80379277c | from flask import Flask,render_template,redirect,flash,url_for,request
import requests
import datetime
import pandas as pd
import os
from bokeh.io import output_file, show, save
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool, CustomJS
from bokeh.models.widgets import Button
from bokeh.embed import components
#import numpy as np
import matplotlib.pyplot as plt
from goodreads import client
books = pd.read_csv('books.csv', sep=',', error_bad_lines=False, encoding="latin-1")
books.columns = ['bookID','ISBN', 'bookTitle','imageUrlM']
#users = pd.read_csv('users.csv', sep=',', error_bad_lines=False, encoding="latin-1")
#users.columns = ['userID', 'Location', 'Age']
ratings = pd.read_csv('ratings.csv', sep=';', error_bad_lines=False, encoding="latin-1")
ratings.columns = ['userID', 'ISBN', 'bookRating']
user_recom = pd.read_csv('user_recom.csv', sep=',', error_bad_lines=False, encoding="latin-1")
user_recom.columns = ['userID','book1', 'book2', 'book3', 'book4', 'book5', 'book6', 'book7', 'book8','book9','book10']
book_recom = pd.read_csv('book_recom.csv', sep=',', error_bad_lines=False, encoding="latin-1")
book_recom.columns = ['bookID','book1', 'book2', 'book3', 'book4', 'book5', 'book6', 'book7', 'book8','book9','book10','book11']
#book_sim = pd.read_csv('book_similar.csv', sep=',', error_bad_lines=False, encoding="latin-1")
#book_sim.columns = ['bookISBN','similar']
book_A = pd.read_csv('bookname_A.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookA_list = book_A.values.tolist()
book_B = pd.read_csv('bookname_B.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookB_list = book_B.values.tolist()
book_C = pd.read_csv('bookname_C.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookC_list = book_C.values.tolist()
book_D = pd.read_csv('bookname_D.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookD_list = book_D.values.tolist()
book_E = pd.read_csv('bookname_E.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookE_list = book_E.values.tolist()
book_F = pd.read_csv('bookname_F.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookF_list = book_F.values.tolist()
book_G = pd.read_csv('bookname_G.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookG_list = book_G.values.tolist()
book_H = pd.read_csv('bookname_H.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookH_list = book_H.values.tolist()
book_I = pd.read_csv('bookname_I.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookI_list = book_I.values.tolist()
book_J = pd.read_csv('bookname_J.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookJ_list = book_J.values.tolist()
book_K = pd.read_csv('bookname_K.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookK_list = book_K.values.tolist()
book_L = pd.read_csv('bookname_L.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookL_list = book_L.values.tolist()
book_M = pd.read_csv('bookname_M.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookM_list = book_M.values.tolist()
book_N = pd.read_csv('bookname_N.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookN_list = book_N.values.tolist()
book_O = pd.read_csv('bookname_O.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookO_list = book_O.values.tolist()
book_P = pd.read_csv('bookname_P.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookP_list = book_P.values.tolist()
book_Q = pd.read_csv('bookname_Q.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookQ_list = book_Q.values.tolist()
book_R = pd.read_csv('bookname_R.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookR_list = book_R.values.tolist()
book_S = pd.read_csv('bookname_S.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookS_list = book_S.values.tolist()
book_T = pd.read_csv('bookname_T.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookT_list = book_T.values.tolist()
book_U = pd.read_csv('bookname_U.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookU_list = book_U.values.tolist()
book_V = pd.read_csv('bookname_V.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookV_list = book_V.values.tolist()
book_W = pd.read_csv('bookname_W.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookW_list = book_W.values.tolist()
book_X = pd.read_csv('bookname_X.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookX_list = book_X.values.tolist()
book_Y = pd.read_csv('bookname_Y.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookY_list = book_Y.values.tolist()
book_Z = pd.read_csv('bookname_Z.csv', sep=',', error_bad_lines=False, encoding="latin-1")
bookZ_list = book_Z.values.tolist()
uid=user_recom['userID']
uid=list(uid.values)
app = Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/index1')
def index1():
return render_template('index1.html')
@app.route('/index2')
def index2():
return render_template('index2.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/A')
def A():
return render_template('A.html')
@app.route('/listA')
def listA():
return render_template('list.html',data=bookA_list,chr='A')
@app.route('/B')
def B():
return render_template('B.html')
@app.route('/listB')
def listB():
return render_template('list.html',data=bookB_list,chr='B')
@app.route('/C')
def C():
return render_template('C.html')
@app.route('/listC')
def listC():
return render_template('list.html',data=bookC_list,chr='C')
@app.route('/D')
def D():
return render_template('D.html')
@app.route('/listD')
def listD():
return render_template('list.html',data=bookD_list,chr='D')
@app.route('/E')
def E():
return render_template('E.html')
@app.route('/listE')
def listE():
return render_template('list.html',data=bookE_list,chr='E')
@app.route('/F')
def F():
return render_template('F.html')
@app.route('/listF')
def listF():
return render_template('list.html',data=bookF_list,chr='F')
@app.route('/G')
def G():
return render_template('G.html')
@app.route('/listG')
def listG():
return render_template('list.html',data=bookG_list,chr='G')
@app.route('/H')
def H():
return render_template('H.html')
@app.route('/listH')
def listH():
return render_template('list.html',data=bookH_list,chr='H')
@app.route('/I')
def I():
return render_template('I.html')
@app.route('/listI')
def listI():
return render_template('list.html',data=bookI_list,chr='I')
@app.route('/J')
def J():
return render_template('J.html')
@app.route('/listJ')
def listJ():
return render_template('list.html',data=bookJ_list,chr='J')
@app.route('/K')
def K():
return render_template('K.html')
@app.route('/listK')
def listK():
return render_template('list.html',data=bookK_list,chr='K')
@app.route('/L')
def L():
return render_template('L.html')
@app.route('/listL')
def listL():
return render_template('list.html',data=bookL_list,chr='L')
@app.route('/M')
def M():
return render_template('M.html')
@app.route('/listM')
def listM():
return render_template('list.html',data=bookM_list,chr='M')
@app.route('/N')
def N():
return render_template('N.html')
@app.route('/listN')
def listN():
return render_template('list.html',data=bookN_list,chr='N')
@app.route('/O')
def O():
return render_template('O.html')
@app.route('/listO')
def listO():
return render_template('list.html',data=bookO_list,chr='O')
@app.route('/P')
def P():
return render_template('P.html')
@app.route('/listP')
def listP():
return render_template('list.html',data=bookP_list,chr='P')
@app.route('/Q')
def Q():
return render_template('Q.html')
@app.route('/listQ')
def listQ():
return render_template('list.html',data=bookQ_list,chr='Q')
@app.route('/R')
def R():
return render_template('R.html')
@app.route('/listR')
def listR():
return render_template('list.html',data=bookR_list,chr='R')
@app.route('/S')
def S():
return render_template('S.html')
@app.route('/listS')
def listS():
return render_template('list.html',data=bookS_list,chr='S')
@app.route('/T')
def T():
return render_template('T.html')
@app.route('/listT')
def listT():
return render_template('list.html',data=bookT_list,chr='T')
@app.route('/U')
def U():
return render_template('U.html')
@app.route('/listU')
def listU():
return render_template('list.html',data=bookU_list,chr='U')
@app.route('/V')
def V():
return render_template('V.html')
@app.route('/listV')
def listV():
return render_template('list.html',data=bookV_list,chr='V')
@app.route('/W')
def W():
return render_template('W.html')
@app.route('/listW')
def listW():
return render_template('list.html',data=bookW_list,chr='W')
@app.route('/X')
def X():
return render_template('X.html')
@app.route('/listX')
def listX():
return render_template('list.html',data=bookX_list,chr='X')
@app.route('/Y')
def Y():
return render_template('Y.html')
@app.route('/listY')
def listY():
return render_template('list.html',data=bookY_list,chr='Y')
@app.route('/Z')
def Z():
return render_template('Z.html')
@app.route('/listZ')
def listZ():
return render_template('list.html',data=bookZ_list,chr='Z')
@app.route('/book',methods=['GET','POST'])
def book():
try:
default_name=53202
if (request.method == 'POST'):
B = int(request.values.get("book",default_name))
#print(book_recom.loc[book_recom['bookID']==B])
#bookdes = "ISBN : "+books.loc[books['bookID']==B]['ISBN'].values[0]+" Title : "+books.loc[books['bookID']==B]['bookTitle'].values[0]
Bimg=books.loc[books['bookID']==B]['imageUrlM'].values[0]
Btitle=books.loc[books['bookID']==B]['bookTitle'].values[0]
BISBN=books.loc[books['bookID']==B]['ISBN'].values[0]
#print(Bimg,Btitle,BISBN,len(BISBN))
if len(BISBN)<10:
l=10-len(BISBN)
BISBN='0'*l+BISBN
Blist=[Bimg, Btitle]
try:
gc = client.GoodreadsClient('jz7ZTvCqb3eUPgtYhXvZ0Q', 'CoZb0wPah3A8iDIYCg0HjEkxKuHIcWjN8vkCBrr8Vk')
book=gc.book(isbn=BISBN)
bookdes='ISBN-10: '+str(book.isbn)+', ISBN-13: '+str(book.isbn13)+', Title: '+str(book.title)+', Authors: '+str(book.authors)+', Rating Average: '+str(book.average_rating)
bookdes1=book.description
booksim=book.similar_books
bookisbn='http://covers.openlibrary.org/b/isbn/'+str(book.isbn).strip()+'-M.jpg'
except Exception as e:
bookdes=Btitle+','+BISBN
bookdes1='N/A'
booksim=[]
bookisbn=Bimg
print('Not Found in Good Reads')
'''
sim_list=book_sim.loc[book_sim['bookISBN']==BISBN]['similar'].values
sim_list=sim_list.tolist()
print(len(sim_list))
for i in sim_list:
print(i,len(i))
'''
#book_recom
book1=book_recom.loc[book_recom['bookID']==B]['book2'].values
book2=book_recom.loc[book_recom['bookID']==B]['book3'].values
book3=book_recom.loc[book_recom['bookID']==B]['book4'].values
book4=book_recom.loc[book_recom['bookID']==B]['book5'].values
book5=book_recom.loc[book_recom['bookID']==B]['book6'].values
book6=book_recom.loc[book_recom['bookID']==B]['book7'].values
book7=book_recom.loc[book_recom['bookID']==B]['book8'].values
book8=book_recom.loc[book_recom['bookID']==B]['book9'].values
book9=book_recom.loc[book_recom['bookID']==B]['book10'].values
book10=book_recom.loc[book_recom['bookID']==B]['book11'].values
bisbn1=books.loc[books['bookID']==book1[0]]['ISBN'].values[0]
bisbn2=books.loc[books['bookID']==book2[0]]['ISBN'].values[0]
bisbn3=books.loc[books['bookID']==book3[0]]['ISBN'].values[0]
bisbn4=books.loc[books['bookID']==book4[0]]['ISBN'].values[0]
bisbn5=books.loc[books['bookID']==book5[0]]['ISBN'].values[0]
bisbn6=books.loc[books['bookID']==book6[0]]['ISBN'].values[0]
bisbn7=books.loc[books['bookID']==book7[0]]['ISBN'].values[0]
bisbn8=books.loc[books['bookID']==book8[0]]['ISBN'].values[0]
bisbn9=books.loc[books['bookID']==book9[0]]['ISBN'].values[0]
bisbn10=books.loc[books['bookID']==book10[0]]['ISBN'].values[0]
bimg1=books.loc[books['bookID']==book1[0]]['imageUrlM'].values[0]
bimg2=books.loc[books['bookID']==book2[0]]['imageUrlM'].values[0]
bimg3=books.loc[books['bookID']==book3[0]]['imageUrlM'].values[0]
bimg4=books.loc[books['bookID']==book4[0]]['imageUrlM'].values[0]
bimg5=books.loc[books['bookID']==book5[0]]['imageUrlM'].values[0]
bimg6=books.loc[books['bookID']==book6[0]]['imageUrlM'].values[0]
bimg7=books.loc[books['bookID']==book7[0]]['imageUrlM'].values[0]
bimg8=books.loc[books['bookID']==book8[0]]['imageUrlM'].values[0]
bimg9=books.loc[books['bookID']==book9[0]]['imageUrlM'].values[0]
bimg10=books.loc[books['bookID']==book10[0]]['imageUrlM'].values[0]
'''
bimg1='http://covers.openlibrary.org/b/isbn/'+str(bisbn1).strip()+'-M.jpg'
bimg2='http://covers.openlibrary.org/b/isbn/'+str(bisbn2).strip()+'-M.jpg'
bimg3='http://covers.openlibrary.org/b/isbn/'+str(bisbn3).strip()+'-M.jpg'
bimg4='http://covers.openlibrary.org/b/isbn/'+str(bisbn4).strip()+'-M.jpg'
bimg5='http://covers.openlibrary.org/b/isbn/'+str(bisbn5).strip()+'-M.jpg'
bimg6='http://covers.openlibrary.org/b/isbn/'+str(bisbn6).strip()+'-M.jpg'
bimg7='http://covers.openlibrary.org/b/isbn/'+str(bisbn7).strip()+'-M.jpg'
bimg8='http://covers.openlibrary.org/b/isbn/'+str(bisbn8).strip()+'-M.jpg'
bimg9='http://covers.openlibrary.org/b/isbn/'+str(bisbn9).strip()+'-M.jpg'
bimg10='http://covers.openlibrary.org/b/isbn/'+str(bisbn10).strip()+'-M.jpg'
'''
btit1=str(books.loc[books['bookID']==book1[0]]['bookTitle'].values[0])
btit2=str(books.loc[books['bookID']==book2[0]]['bookTitle'].values[0])
btit3=str(books.loc[books['bookID']==book3[0]]['bookTitle'].values[0])
btit4=str(books.loc[books['bookID']==book4[0]]['bookTitle'].values[0])
btit5=str(books.loc[books['bookID']==book5[0]]['bookTitle'].values[0])
btit6=str(books.loc[books['bookID']==book6[0]]['bookTitle'].values[0])
btit7=str(books.loc[books['bookID']==book7[0]]['bookTitle'].values[0])
btit8=str(books.loc[books['bookID']==book8[0]]['bookTitle'].values[0])
btit9=str(books.loc[books['bookID']==book9[0]]['bookTitle'].values[0])
btit10=str(books.loc[books['bookID']==book10[0]]['bookTitle'].values[0])
'''
sbook=[]
stitle=[]
cnt=0
for i in booksim:
try:
r = requests.get('https://www.googleapis.com/books/v1/volumes?q=title:'+str(i).strip())
if (r.status_code==200):
rdict = r.json()
try:
for j in rdict['items'][0]['volumeInfo']['industryIdentifiers']:
if (j['type']=='ISBN_10'):
sbook.append(j['identifier'])
stitle.append(str(i).strip())
cnt+=1
if cnt==10: break
except Exception as e:
print(e)
except TypeError as e:
print(e)
if cnt==10: break
bisbn1=sbook[0]
bisbn2=sbook[1]
bisbn3=sbook[2]
bisbn4=sbook[3]
bisbn5=sbook[4]
bisbn6=sbook[5]
bisbn7=sbook[6]
bisbn8=sbook[7]
bisbn9=sbook[8]
bisbn10=sbook[9]
bimg1='http://covers.openlibrary.org/b/isbn/'+str(bisbn1).strip()+'-M.jpg'
bimg2='http://covers.openlibrary.org/b/isbn/'+str(bisbn2).strip()+'-M.jpg'
bimg3='http://covers.openlibrary.org/b/isbn/'+str(bisbn3).strip()+'-M.jpg'
bimg4='http://covers.openlibrary.org/b/isbn/'+str(bisbn4).strip()+'-M.jpg'
bimg5='http://covers.openlibrary.org/b/isbn/'+str(bisbn5).strip()+'-M.jpg'
bimg6='http://covers.openlibrary.org/b/isbn/'+str(bisbn6).strip()+'-M.jpg'
bimg7='http://covers.openlibrary.org/b/isbn/'+str(bisbn7).strip()+'-M.jpg'
bimg8='http://covers.openlibrary.org/b/isbn/'+str(bisbn8).strip()+'-M.jpg'
bimg9='http://covers.openlibrary.org/b/isbn/'+str(bisbn9).strip()+'-M.jpg'
bimg10='http://covers.openlibrary.org/b/isbn/'+str(bisbn10).strip()+'-M.jpg'
btit1=stitle[0]
btit2=stitle[1]
btit3=stitle[2]
btit4=stitle[3]
btit5=stitle[4]
btit6=stitle[5]
btit7=stitle[6]
btit8=stitle[7]
btit9=stitle[8]
btit10=stitle[9]
'''
bimg = [[bimg1,btit1],[bimg2,btit2],[bimg3,btit3],[bimg4,btit4],[bimg5,btit5],[bimg6,btit6],[bimg7,btit7],[bimg8,btit8],[bimg9,btit9],[bimg10,btit10]]
return render_template('book.html', bookisbn=bookisbn,bookdes=bookdes,bookdes1=bookdes1,booksim=booksim,bimg=bimg,Blist=Blist)
except Exception as e:
print(e)
#flash(e)
return render_template('index.html')
# Route for handling the login page logic
@app.route('/login',methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
#if request.form['username'] != 'admin' or request.form['password'] != 'admin':
if request.form['username'] in uid and request.form['password'] in uid:
error = 'Invalid Credentials. Please try again.'
else:
return redirect(url_for('user',usrid=request.form['username']))
return render_template('login.html', error=error)
@app.route('/user/<usrid>')
def user(usrid):
U=int(usrid)
#rec=users.loc[(users['userID']==U)]
userdes= 'UserID: '+str(usrid)#+' Location: '+str(rec['Location'].values[0])+', Age :'+str(rec['Age'].values[0])
userate10=ratings.loc[(ratings['userID']==U)&(ratings['bookRating']==10)]
rate10list=[]
if len(userate10['ISBN'].values)>0:
for i in userate10['ISBN'].values:
try:
bl=books.loc[books['ISBN']==i]['bookTitle'].values[0]
rate10list.append(bl)
except IndexError as e:
print(e)
book1=user_recom.loc[user_recom['userID']==U]['book1'].values
book2=user_recom.loc[user_recom['userID']==U]['book2'].values
book3=user_recom.loc[user_recom['userID']==U]['book3'].values
book4=user_recom.loc[user_recom['userID']==U]['book4'].values
book5=user_recom.loc[user_recom['userID']==U]['book5'].values
book6=user_recom.loc[user_recom['userID']==U]['book6'].values
book7=user_recom.loc[user_recom['userID']==U]['book7'].values
book8=user_recom.loc[user_recom['userID']==U]['book8'].values
book9=user_recom.loc[user_recom['userID']==U]['book9'].values
book10=user_recom.loc[user_recom['userID']==U]['book10'].values
bimg1=books.loc[books['bookID']==book1[0]]['imageUrlM'].values[0]
bimg2=books.loc[books['bookID']==book2[0]]['imageUrlM'].values[0]
bimg3=books.loc[books['bookID']==book3[0]]['imageUrlM'].values[0]
bimg4=books.loc[books['bookID']==book4[0]]['imageUrlM'].values[0]
bimg5=books.loc[books['bookID']==book5[0]]['imageUrlM'].values[0]
bimg6=books.loc[books['bookID']==book6[0]]['imageUrlM'].values[0]
bimg7=books.loc[books['bookID']==book7[0]]['imageUrlM'].values[0]
bimg8=books.loc[books['bookID']==book8[0]]['imageUrlM'].values[0]
bimg9=books.loc[books['bookID']==book9[0]]['imageUrlM'].values[0]
bimg10=books.loc[books['bookID']==book10[0]]['imageUrlM'].values[0]
bisbn1=books.loc[books['bookID']==book1[0]]['ISBN'].values[0]
bisbn2=books.loc[books['bookID']==book2[0]]['ISBN'].values[0]
bisbn3=books.loc[books['bookID']==book3[0]]['ISBN'].values[0]
bisbn4=books.loc[books['bookID']==book4[0]]['ISBN'].values[0]
bisbn5=books.loc[books['bookID']==book5[0]]['ISBN'].values[0]
bisbn6=books.loc[books['bookID']==book6[0]]['ISBN'].values[0]
bisbn7=books.loc[books['bookID']==book7[0]]['ISBN'].values[0]
bisbn8=books.loc[books['bookID']==book8[0]]['ISBN'].values[0]
bisbn9=books.loc[books['bookID']==book9[0]]['ISBN'].values[0]
bisbn10=books.loc[books['bookID']==book10[0]]['ISBN'].values[0]
'''
bimg1='http://covers.openlibrary.org/b/isbn/'+str(bisbn1).strip()+'-M.jpg'
bimg2='http://covers.openlibrary.org/b/isbn/'+str(bisbn2).strip()+'-M.jpg'
bimg3='http://covers.openlibrary.org/b/isbn/'+str(bisbn3).strip()+'-M.jpg'
bimg4='http://covers.openlibrary.org/b/isbn/'+str(bisbn4).strip()+'-M.jpg'
bimg5='http://covers.openlibrary.org/b/isbn/'+str(bisbn5).strip()+'-M.jpg'
bimg6='http://covers.openlibrary.org/b/isbn/'+str(bisbn6).strip()+'-M.jpg'
bimg7='http://covers.openlibrary.org/b/isbn/'+str(bisbn7).strip()+'-M.jpg'
bimg8='http://covers.openlibrary.org/b/isbn/'+str(bisbn8).strip()+'-M.jpg'
bimg9='http://covers.openlibrary.org/b/isbn/'+str(bisbn9).strip()+'-M.jpg'
bimg10='http://covers.openlibrary.org/b/isbn/'+str(bisbn10).strip()+'-M.jpg'
'''
btit1=str(books.loc[books['bookID']==book1[0]]['bookTitle'].values[0])
btit2=str(books.loc[books['bookID']==book2[0]]['bookTitle'].values[0])
btit3=str(books.loc[books['bookID']==book3[0]]['bookTitle'].values[0])
btit4=str(books.loc[books['bookID']==book4[0]]['bookTitle'].values[0])
btit5=str(books.loc[books['bookID']==book5[0]]['bookTitle'].values[0])
btit6=str(books.loc[books['bookID']==book6[0]]['bookTitle'].values[0])
btit7=str(books.loc[books['bookID']==book7[0]]['bookTitle'].values[0])
btit8=str(books.loc[books['bookID']==book8[0]]['bookTitle'].values[0])
btit9=str(books.loc[books['bookID']==book9[0]]['bookTitle'].values[0])
btit10=str(books.loc[books['bookID']==book10[0]]['bookTitle'].values[0])
bimg = [[bimg1,btit1],[bimg2,btit2],[bimg3,btit3],[bimg4,btit4],[bimg5,btit5],[bimg6,btit6],[bimg7,btit7],[bimg8,btit8],[bimg9,btit9],[bimg10,btit10]]
title = 'user'
userate=ratings.loc[(ratings['userID']==U)&(ratings['bookRating']>0)]
rates=userate.groupby('bookRating').count()
output_file('./templates/test.html',mode='inline')
rating = list(rates.index)
rating = [str(i) for i in rating]
p = figure(x_range=rating, plot_height=250, title="Rating Counts",x_axis_label='Ratings',y_axis_label='Number of books')
p.vbar(x=rating, top=list(rates['ISBN'].values), width=0.9)
p.xgrid.grid_line_color = None
p.y_range.start = 0
script, div = components(p)
return render_template('user.html', title = title,userdes = userdes ,script = script, div = div, rate10list=rate10list, bimg=bimg)
if __name__ == '__main__':
#app.run(port=7000)
app.run(debug='True')
|
990,026 | 26072f407e089cb7a4f607f4a260f6d854ce2d5f | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for differentially private sampling.
Usage:
From google-research/
python -m private_sampling.private_sampling_test
"""
import itertools
import math
from absl.testing import absltest
from absl.testing import parameterized
from private_sampling import private_sampling
# A parameter that determines the failure probability of each randomized test:
# Each random test will fail with probability at most
# 1 / FAILURE_PROBABILITY_INVERSE.
FAILURE_PROBABILITY_INVERSE = 10000000.0
class ThresholdSampleTest(absltest.TestCase):
"""Tests for the (non-private) threshold sampling class."""
def test_samples_high_weight_elements_ppswor(self):
"""Checks that an element with high weight is sampled when using PPSWOR.
For the fixed threshold 1.0, an element with weight w is sampled with
probability 1-exp(-w). Hence, this test uses an element with weight
ln(10000000), so the test is supposed to fail (element not sampled) with
probability 1/10000000.
"""
s = private_sampling.ThresholdSample(1.0,
private_sampling.PpsworSamplingMethod)
s.process("a", math.log(FAILURE_PROBABILITY_INVERSE, math.e))
self.assertCountEqual(["a"], s.elements.keys())
def test_samples_high_weight_elements_priority(self):
"""Checks that high-weight elements are sampled (using priority sampling).
For threshold t, an element with weight at least 1/t will always
be sampled, so this test should always succeed.
"""
s = private_sampling.ThresholdSample(
0.5, private_sampling.PrioritySamplingMethod)
s.process("a", 2.0)
s.process("b", 3.0)
self.assertCountEqual(["a", "b"], s.elements.keys())
def test_samples_close_to_inclusion_probability_ppswor(self):
"""Confirms sampling close to the correct inclusion probability (PPSWOR).
The test works as follows: We create an empty sample and process n (a large
number) elements into it, such that each element is sampled with
probability 0.5. Then, we check that between 0.49n and 0.51n elements were
sampled. The number n needed to ensure that the test fails with probability
at most 1/10000000 is computed using Chernoff bounds.
"""
# The range we allow around 0.5n
distance_from_half = 0.01
# The number of elements we use (computed using Chernoff bounds)
n = int((6.0 / (distance_from_half**2)) *
math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)
s = private_sampling.ThresholdSample(1.0,
private_sampling.PpsworSamplingMethod)
for i in range(n):
s.process(i, math.log(2.0, math.e))
self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)
self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)
def test_samples_close_to_inclusion_probability_priority(self):
"""Confirms sampling close to the correct inclusion probability (priority).
The test works as follows: We create an empty sample and process n (a large
number) elements into it, such that each element is sampled with
probability 0.5. Then, we check that between 0.49n and 0.51n elements were
sampled. The number n needed to ensure that the test fails with probability
at most 1/10000000 is computed using Chernoff bounds.
"""
# The range we allow around 0.5n
distance_from_half = 0.01
# The number of elements we use (computed using Chernoff bounds)
n = int((6.0 / (distance_from_half**2)) *
math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)
s = private_sampling.ThresholdSample(
0.5, private_sampling.PrioritySamplingMethod)
for i in range(n):
s.process(i, 1.0)
self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)
self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)
def test_does_not_sample_twice_ppswor(self):
"""Checks that an exception is raised when processing the same key twice.
The exception is raised when we process a key that is already in the sample
(this event should not happen since we assume the data is aggregated).
To implement that, we start with an element with high weight (and is thus
sampled with high probability), and then try to add it again.
As in test_samples_high_weight_elements_ppswor, the test fails with
probability 1/10000000 (happens when the first element is not sampled).
"""
with self.assertRaises(ValueError):
s = private_sampling.ThresholdSample(
1.0, private_sampling.PpsworSamplingMethod)
s.process("a", math.log(FAILURE_PROBABILITY_INVERSE, math.e))
s.process("a", 1)
def test_does_not_sample_twice_priority(self):
"""Checks that an exception is raised when processing the same key twice.
The exception is raised when we process a key that is already in the sample
(this event should not happen since we assume the data is aggregated).
To implement that, we start with an element with high weight (that is
always sampled for priority sampling with this threshold), and then try to
add it again.
See test_samples_high_weight_elements_priority for why the first element
is always sampled.
"""
with self.assertRaises(ValueError):
s = private_sampling.ThresholdSample(
0.5, private_sampling.PrioritySamplingMethod)
s.process("a", 2.0)
s.process("a", 0.1)
def test_does_not_sample_negligible_weight_ppswor(self):
"""Checks that a very low weight element is not sampled (with PPSWOR).
For the fixed threshold 1.0, an element with weight w is sampled with
probability 1-exp(-w). For this test to fail with probability 1/10000000,
we add an element with weight ln(10000000/(10000000 - 1)) and check that the
element was not sampled.
"""
s = private_sampling.ThresholdSample(1.0,
private_sampling.PpsworSamplingMethod)
s.process(
"a",
math.log(
FAILURE_PROBABILITY_INVERSE / (FAILURE_PROBABILITY_INVERSE - 1),
math.e))
self.assertEmpty(s.elements)
def test_does_not_sample_negligible_weight_priority(self):
"""Checks that a very low weight element is not sampled (with priority).
For the fixed threshold 1.0, an element with weight w is sampled with
probability min{w,1}. For this test to fail with probability 1/10000000, we
add an element with weight 1/10000000 and check that the element was not
sampled.
"""
s = private_sampling.ThresholdSample(
1.0, private_sampling.PrioritySamplingMethod)
s.process("a", 1.0 / FAILURE_PROBABILITY_INVERSE)
self.assertEmpty(s.elements)
def test_estimate_statistics_ppswor(self):
"""Checks the estimate for the full statistics (using PPSWOR).
We check that the function that estimates the full statistics (sum of all
weights) on a dataset that contains one element which is sampled with
probability 1-1/10000000 (as in test_samples_high_weight_elements_ppswor).
We compare the output of estimate_statistics with the estimate we should
get when the element is sampled. Therefore, the test should fail with
probability 1/10000000 (when the element is not sampled).
"""
s = private_sampling.ThresholdSample(1.0,
private_sampling.PpsworSamplingMethod)
element_weight = math.log(FAILURE_PROBABILITY_INVERSE, math.e)
s.process("a", element_weight)
sampling_probability = (FAILURE_PROBABILITY_INVERSE -
1) / FAILURE_PROBABILITY_INVERSE
self.assertEqual(s.estimate_statistics(),
element_weight / sampling_probability)
def test_estimate_statistics_priority(self):
"""Checks the estimate for the full statistics (using priority sampling).
We check the function that estimates the full statistics (sum of all
weights) on a dataset where all the elements are sampled with probability
1.0. As a result, the estimate for the statistics should be exactly
accurate.
As in test_samples_high_weight_elements_priority, the elements are sampled
since for threshold t, an element with weight at least 1/t will always be
sampled.
"""
s = private_sampling.ThresholdSample(
0.5, private_sampling.PrioritySamplingMethod)
s.process("a", 2.0)
s.process("b", 3.0)
self.assertEqual(s.estimate_statistics(), 5.0)
class PrivateThresholdSampleTest(parameterized.TestCase):
"""Tests for the private threshold sampling classes."""
@parameterized.parameters(
itertools.product([
private_sampling.PrivateThresholdSampleKeysOnly,
private_sampling.PrivateThresholdSampleWithFrequencies
], [
private_sampling.PpsworSamplingMethod,
private_sampling.PrioritySamplingMethod
]))
def test_low_delta_weight_one_not_sampled(self, sampling_class,
sampling_method):
"""Checks that for very low delta, an element with weight 1 is not sampled.
The motivation for that test is that the probability of including a key with
weight 1 in a private sample can be at most delta (even if the threshold is
high and without privacy the key is supposed to be included with high
probability). This test fails with probability at most 1/10000000 (delta).
Args:
sampling_class: The private sampling class to be tested
sampling_method: The underlying sampling method
"""
s = sampling_class(
threshold=100,
eps=0.1,
delta=1.0 / FAILURE_PROBABILITY_INVERSE,
sampling_method=sampling_method)
s.process(1, 1)
self.assertEmpty(s.elements)
@parameterized.parameters(
itertools.product([
private_sampling.PrivateThresholdSampleKeysOnly,
private_sampling.PrivateThresholdSampleWithFrequencies
], [(private_sampling.PpsworSamplingMethod, math.log(2.0, math.e)),
(private_sampling.PrioritySamplingMethod, 0.5)]))
def test_high_delta_similar_to_threshold_dist(self, sampling_class,
sampling_method_and_threshold):
"""Checks that for delta=1.0, private sampling is similar to non-private.
This test is for PPSWOR and is similar to
ThresholdSampleTest.test_samples_close_to_inclusion_probability_ppswor and
ThresholdSampleTest.test_samples_close_to_inclusion_probability_priority.
The motivation is that when delta is 1.0, privacy does not add constraints,
so we can test the inclusion probability of elements in the same way we used
for non-private sampling.
Args:
sampling_class: The private sampling class to be tested
sampling_method_and_threshold: A tuple of the underlying sampling method
and the threshold to be used
"""
sampling_method, threshold = sampling_method_and_threshold
# The range we allow around 0.5n
distance_from_half = 0.01
# The number of elements we use (computed using Chernoff bounds)
n = int((6.0 / (distance_from_half**2)) *
math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)
s = sampling_class(
threshold=threshold,
eps=0.1,
delta=1.0,
sampling_method=sampling_method)
for i in range(n):
s.process(i, 1)
self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)
self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)
@parameterized.parameters(
itertools.product([
private_sampling.PrivateThresholdSampleKeysOnly,
private_sampling.PrivateThresholdSampleWithFrequencies
], [
private_sampling.PpsworSamplingMethod,
private_sampling.PrioritySamplingMethod
]))
def test_high_delta_sample_stays_the_same(self, sampling_class,
sampling_method):
"""Makes a non-private sample private, and checks it is the same (delta=1).
This test checks the functions that create a private sample form an existing
non-private threshold sample. When delta is 1.0, privacy does not add
constraints, so the new private sample should contain the same elements as
the non-private sample.
Args:
sampling_class: The private sampling class to be tested
sampling_method: The underlying sampling method
"""
s = private_sampling.ThresholdSample(0.5, sampling_method)
for i in range(2000):
s.process(i, 1)
private_priority_sample = sampling_class.from_non_private(
s, eps=0.1, delta=1.0)
self.assertCountEqual(s.elements.keys(), private_priority_sample.elements)
def test_valid_inclusion_probabilities(self):
"""Sanity checks on the inclusion probabilities in a private sample.
This test contains various checks on the inclusion probabilities computed by
the private sampling class that only returns keys:
1. When delta is low (0.5**30), the inclusion probability of an element with
frequency 1 is delta.
2. When delta is 1.0, the inclusion probability is the same as in a
non-private sample.
3. Inclusion probabilities are between 0.0 and 1.0, and are nondecreasing in
the frequency.
"""
self.assertEqual(
private_sampling.PrivateThresholdSampleKeysOnly(
threshold=1, eps=0.1, delta=0.5**30).compute_inclusion_prob(1),
0.5**30)
self.assertEqual(
private_sampling.PrivateThresholdSampleKeysOnly(
threshold=0.5,
eps=0.1,
delta=1.0,
sampling_method=private_sampling.PrioritySamplingMethod)
.compute_inclusion_prob(1), 0.5)
s = private_sampling.PrivateThresholdSampleKeysOnly(
threshold=1, eps=0.1, delta=0.5**10)
inclusion_prob = [s.compute_inclusion_prob(i) for i in range(0, 1000, 10)]
for x in inclusion_prob:
self.assertGreaterEqual(x, 0.0)
self.assertLessEqual(x, 1.0)
for i in range(len(inclusion_prob) - 1):
self.assertGreaterEqual(inclusion_prob[i + 1], inclusion_prob[i])
def test_valid_reported_frequency_distribution(self):
"""Checks that the distribution of reported frequencies is valid.
Computes the distribution of frequencies that are reported (when computing
a private sample) and checks that it is valid: all probabilities are between
0 and 1, and they sum up to 1.
"""
s = private_sampling.PrivateThresholdSampleWithFrequencies(
threshold=0.5, eps=0.1, delta=0.5**20)
freq_dists = [
s.compute_reported_frequency_dist(i) for i in range(100, 1001, 100)
]
for dist in freq_dists:
self.assertAlmostEqual(sum(dist.values()), 1.0)
for x in dist.values():
self.assertGreaterEqual(x, 0.0)
if __name__ == "__main__":
absltest.main()
|
990,027 | e8e48f97456d196f8e1546ad024dfac731137270 | import datetime
import factory
import factory.fuzzy
import pytz
from foosball.users.tests.factories import UserFactory
class TeamFactory(factory.django.DjangoModelFactory):
score = factory.fuzzy.FuzzyInteger(10)
@factory.post_generation
def players(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of players was passed in, use them
for player in extracted:
self.players.add(player)
return
self.players.add(UserFactory())
self.players.add(UserFactory())
class Meta:
model = 'games.Team'
class GameFactory(factory.django.DjangoModelFactory):
played_at = factory.fuzzy.FuzzyDateTime(
datetime.datetime.utcnow().replace(tzinfo=pytz.utc) - datetime.timedelta(weeks=8))
membership1 = factory.RelatedFactory(TeamFactory, 'game')
membership2 = factory.RelatedFactory(TeamFactory, 'game')
class Meta:
model = 'games.Game'
|
990,028 | 889411af9586f6a7375fcff7809645c69ae5527e | # You are given a char array representing tasks CPU need to do. It contains capital letters A to Z where each letter represents a different task. Tasks could be done without the original order of the array. Each task is done in one unit of time. For each unit of time, the CPU could complete either one task or just be idle.
# However, there is a non-negative integer n that represents the cooldown period between two same tasks (the same letter in the array), that is that there must be at least n units of time between any two same tasks.
# You need to return the least number of units of times that the CPU will take to finish all the given tasks.
class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
char_freq = sorted(Counter(tasks).values() , reverse = True)
index , counter , max_freq = 0 , 0 , char_freq[0]
while index < len(char_freq) and char_freq[index] == max_freq:
counter += 1
result = (max_freq - 1) * (n + 1) + counter
index += 1
return max(result , len(tasks))
|
990,029 | d76bbd5847181f76f0609e5139d2cf185617b40e | """Helper to preprocess data."""
import pandas as pd
from sklearn.model_selection import train_test_split
class Preprocessor:
"""Class to handle preprocessing operations."""
def __init__(self, df):
"""Set DataFrame as class variable."""
self.df = df
def remove_urequired_columns(self, unrequired_columns):
"""Remove columns that are not required."""
self.df = self.df.drop(columns=unrequired_columns)
def get_x_and_y(self, y_column):
"""Split X and y."""
X = self.df.drop(columns=[y_column])
y = self.df[y_column]
return X, y
|
990,030 | 67232fc8b46954395589e5264d2426d0b2db3ec7 | #!/usr/bin/python
# Import libraries
import RPi.GPIO as GPIO
import time
import datetime
from time import sleep
# Pinouts from RPi Zero 2 W
# 22 Panel on 24 LepiLED on
# 27 Panel off 23 LepiLED off
# Set up pin for latching relay FOR THE PANEL ON
GPIO.setmode(GPIO.BCM)
# Latching Relay #2
GPIO.setup(22, GPIO.OUT)
# Pin 22 turns PANEL on
GPIO.output(22, 1)
sleep(0.1)
GPIO.output(22, 0)
# Reset the pin
GPIO.cleanup(22)
# Make an entry in the logfile
nowdate = datetime.datetime.now()
nowsec = round(time.time())
outfile = open("/home/arducam/rpi9.csv", "a")
curtim = nowdate.strftime("RPi9, Panel_On, %a %d %b %H:%M:%S %Y, ")
newlin = " \n"
outfile.write(curtim + str(nowsec) + newlin)
outfile.close()
# RPi1, camera, Wed 26 Apr 16:24:08 EDT 2023, 1682540648
|
990,031 | c26c030ad7948bbf2a68d40e512e77b22c3753b9 | def notas(*n, sit=False):
b = {}
b['Total'] = len(n)
b['Maior'] = max(n)
b['Menor'] = min(n)
b['Media'] = sum(n) / len(n)
if sit:
if b['Media'] > 7:
b['Situação'] = 'BOA'
elif 5 < b['Media'] < 7:
b['Situação'] = 'RAZOÁVEL'
elif r['Media'] < 5:
b['Situação'] = 'RUIM'
return b
resp = notas(9, 10, 5.5, 2.5, 8.5, sit=True)
print(resp) |
990,032 | b5f5aee098e7cb0b7a1469a361378df34b470b3d | #
# @lc app=leetcode id=18 lang=python3
#
# [18] 4Sum
#
class Solution:
def fourSum(self, nums, target):
nums.sort()
if len(nums) == 4:
if sum(nums)==target:
return [nums]
else:
return []
result = []
for idx, i in enumerate(nums):
if idx > len(nums) - 4:
break
if idx > 0 and i == nums[idx - 1]:
continue
for jdx, j in enumerate(nums[idx + 1:]):
jdx = idx + 1 + jdx
if jdx > len(nums) - 3:
break
if jdx > idx + 1 and j == nums[jdx - 1]:
continue
left = jdx + 1
right = len(nums) - 1
while left < right:
tar_sum = nums[left] + nums[right] + j + i - target
if tar_sum == 0:
result.append([i, j, nums[left], nums[right]])
left += 1
while left < right and nums[left] == nums[left - 1]:
left += 1
elif tar_sum > 0:
right -= 1
elif tar_sum < 0:
left += 1
while left < right and nums[left] == nums[left - 1]:
left += 1
return result
if __name__ == "__main__":
print(Solution().fourSum([-1,-5,-5,-3,2,5,0,4], -7))
|
990,033 | bff3b950a4db58ce5977ee80d0a617c53d7e88ce | from django.conf.urls import url
from django.contrib import admin
from .views import (
workload_list,
workload_create,
workload_update,
workload_delete,
workload_report,
workload_export,
detail,
sum_report,
)
urlpatterns = [
url( r'^$', workload_list, name='list'),
url( r'^create/$', workload_create,name='create'),
url( r'^report/$', workload_report,name='report'),
url( r'^detail/$', detail,name='detail'),
url( r'^export/$', workload_export,name='export'),
url( r'^(?P<id>\d+)/edit/$', workload_update, name='update'),
url( r'^(?P<id>\d+)/delete/$', workload_delete, name='delete'),
url( r'^api/report/$',sum_report,name='sum_report'),
] |
990,034 | 32f86269149ffa9e1d692ddf1fa6e480db2ad5fc | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import tempfile
from django.conf import settings
from PIL import Image, ImageDraw, ImageFont
def resize(image, width=None, height=None):
if width is None and height is not None:
imageWidth = (imageWidth * height) / imageHeight
imageHeight = height
elif width is not None and height is None:
imageHeight = (imageHeight * width) / imageWidth
imageWidth = width
elif width is not None and height is not None:
imageWidth = width
imageHeight = width
return image.resize((int(imageWidth), int(imageHeight)), Image.ANTIALIAS)
def create_resized_image_from_file(file, width=None, height=None):
tmpfile = tempfile.TemporaryFile()
image = Image.open(file)
imageWidth, imageHeight = image.size
if width and width > 0:
resize(image, width, height).save(tmpfile, format='JPEG')
else:
image.save(tmpfile, format='JPEG', optimize=True, quality=35)
tmpfile.seek(0)
return tmpfile, imageWidth, imageHeight
|
990,035 | d0a1ab7a2fde1abff0d603a452091d8a34f8b0d3 | from django.template import Library
from native_tags.nodes import do_function, do_comparison, do_block
from native_tags.registry import load_module, AlreadyRegistered, register as native_register
from django.conf import settings
from django.utils.importlib import import_module
from os import listdir
register = Library()
# Comb through installed apps w/ templatetags looking for native tags
for app in settings.INSTALLED_APPS :
if app == 'native_tags':
continue
try:
mod = import_module('.templatetags', app)
except ImportError:
continue
# TODO: Make this hurt less
for f in listdir(mod.__path__[0]):
if f.endswith('.py') and not f.startswith('__'):
try:
load_module('%s.templatetags.%s' % (app,f.split('.py')[0]))
except AlreadyRegistered:
break
except ImportError:
continue
for tag_name in native_register['comparison']:
if not tag_name.startswith('if'):
tag_name = 'if_%s' % tag_name
register.tags[tag_name] = do_comparison
for tag_name in native_register['function']:
register.tags[tag_name] = do_function
for tag_name in native_register['block']:
register.tags[tag_name] = do_block
register.filters.update(native_register['filter']) |
990,036 | c86903d684390fac3908ddac63ec93ce40352c9b | # Code is a modified version of 'Jose Julio @2009 "IMU_Razor9DOF.py"'
# This script needs VPhyton, pyserial and pywin modules
# First Install Python 2.6.4
# Install pywin from http://sourceforge.net/projects/pywin32/
# Install pyserial from http://sourceforge.net/projects/pyserial/files/
# Install Vphyton from http://vpython.org/contents/download_windows.html
#python 2.7
from visual import *
import serial
import string
import math
import serial
connected = False
port = '/dev/tty.usbserial-1420'
baud = 115200
g = 9.834
serial_port = serial.Serial(port, baud, timeout=0)
from time import time,sleep
deg2rad = 3.141592/180.0
#gennaro ser = serial.Serial(port='COM9',baudrate=115200,timeout=1)
# Main scene
scene=display(title="JB Robotics IMU")
scene.range=(1.2,1.2,1.2)
scene.forward = (1,0,-0.25)
scene.up=(0,0,1)
# Second scene (Roll, Pitch, Yaw)
scene2 = display(title='JB Robotics IMU',x=0, y=0, width=500, height=200,center=(0,0,0), background=(0,0,0))
scene2.range=(1,1,1)
scene.width=500
scene.y=200
scene2.select()
#Roll, Pitch, Yaw
cil_roll = cylinder(pos=(-0.4,0,0),axis=(0.2,0,0),radius=0.01,color=color.red)
cil_roll2 = cylinder(pos=(-0.4,0,0),axis=(-0.2,0,0),radius=0.01,color=color.red)
cil_pitch = cylinder(pos=(0.1,0,0),axis=(0.2,0,0),radius=0.01,color=color.green)
cil_pitch2 = cylinder(pos=(0.1,0,0),axis=(-0.2,0,0),radius=0.01,color=color.green)
arrow_course = arrow(pos=(0.6,0,0),color=color.cyan,axis=(-0.2,0,0), shaftwidth=0.02, fixedwidth=1)
#Roll,Pitch,Yaw labels
label(pos=(-0.4,0.3,0),text="Roll",box=0,opacity=0)
label(pos=(0.1,0.3,0),text="Pitch",box=0,opacity=0)
label(pos=(0.55,0.3,0),text="Yaw",box=0,opacity=0)
label(pos=(0.6,0.22,0),text="N",box=0,opacity=0,color=color.yellow)
label(pos=(0.6,-0.22,0),text="S",box=0,opacity=0,color=color.yellow)
label(pos=(0.38,0,0),text="W",box=0,opacity=0,color=color.yellow)
label(pos=(0.82,0,0),text="E",box=0,opacity=0,color=color.yellow)
label(pos=(0.75,0.15,0),height=7,text="NE",box=0,color=color.yellow)
label(pos=(0.45,0.15,0),height=7,text="NW",box=0,color=color.yellow)
label(pos=(0.75,-0.15,0),height=7,text="SE",box=0,color=color.yellow)
label(pos=(0.45,-0.15,0),height=7,text="SW",box=0,color=color.yellow)
L1 = label(pos=(-0.4,0.22,0),text="-",box=0,opacity=0)
L2 = label(pos=(0.1,0.22,0),text="-",box=0,opacity=0)
L3 = label(pos=(0.7,0.3,0),text="-",box=0,opacity=0)
# Main scene objects
scene.select()
# Reference axis (x,y,z)
arrow(color=color.green,axis=(1,0,0), shaftwidth=0.02, fixedwidth=1)
arrow(color=color.green,axis=(0,-1,0), shaftwidth=0.02 , fixedwidth=1)
arrow(color=color.green,axis=(0,0,-1), shaftwidth=0.02, fixedwidth=1)
# labels
label(pos=(0,0,0.8),text="JB Robotics IMU",box=0,opacity=0)
label(pos=(1,0,0),text="X",box=0,opacity=0)
label(pos=(0,-1,0),text="Y",box=0,opacity=0)
label(pos=(0,0,-1),text="Z",box=0,opacity=0)
# IMU object
platform = box(length=1, height=0.05, width=1, color=color.red)
p_line = box(length=1,height=0.08,width=0.1,color=color.yellow)
plat_arrow = arrow(color=color.green,axis=(1,0,0), shaftwidth=0.06, fixedwidth=1)
def converti (mbyte):
if len(mbyte) ==0:
return 0
else:
return ord(mbyte)
roll=0
pitch=0
yaw=0
counter = 0
while 1:
line = 'RPY: 10 10 10' #ser.readline()
line = line.replace("RPY: ","")
words = string.split(line," ") # Fields split
sleep(1)
isU = serial_port.read()
#print(isU)
if isU == b'U':
isQ = serial_port.read()
if isQ == b'Q':
#print('found UQ')
AxL = converti(serial_port.read())
AxH = converti(serial_port.read())
AyL = converti(serial_port.read())
AyH = converti(serial_port.read())
AzL = converti(serial_port.read())
AzH = converti(serial_port.read())
TL = converti(serial_port.read())
TH = converti(serial_port.read())
sum = converti(serial_port.read())
ax =((AxH << 8)|AxL)/32768.0*16*g
ay =((AyH << 8)|AyL)/32768.0*16*g
az =((AzH << 8)|AzL)/32768.0*16*g
T = ((TH << 8)|TL)/340+36.53
Checksum = 85 + 81 + AxH + AxL + AyH + AyL + AzH + AzL + TH + TL
Lower = divmod(Checksum, 0x100) [1]
#if sum == Lower:
# print( '%4.4f %4.4f %4.4f ' % (ax,ay,az))
isU = serial_port.read()
isR = serial_port.read()
if isR == b'R':
WxL = converti(serial_port.read())
WxH = converti(serial_port.read())
WyL = converti(serial_port.read())
WyH = converti(serial_port.read())
WzL = converti(serial_port.read())
WzH = converti(serial_port.read())
TL = converti(serial_port.read())
TH = converti(serial_port.read())
sum = converti(serial_port.read())
wx =((WxH << 8)|WxL)/32768.0*2000
wy =((WyH << 8)|WyL)/32768.0*2000
wz =((WzH << 8)|WzL)/32768.0*2000
isU = serial_port.read()
isS = serial_port.read()
if isS == b'S':
RollL = converti(serial_port.read())
RollH = converti(serial_port.read())
PitchL = converti(serial_port.read())
PitchH = converti(serial_port.read())
YawL = converti(serial_port.read())
YawH = converti(serial_port.read())
TL = converti(serial_port.read())
TH = converti(serial_port.read())
sum = converti(serial_port.read())
Roll =((RollH << 8)|RollL)/32768.0*180
Pitch =((PitchH << 8)|PitchL)/32768.0*180
Yaw =((YawH << 8)|YawL)/32768.0*180
print( '%4.4f %4.4f %4.4f ' % (Roll,Pitch,Yaw))
counter = counter + 1
if (counter % 1000) == 0:
try:
roll = float(Roll)*deg2rad
pitch = float(Pitch)*deg2rad
yaw = float(Yaw)*deg2rad
except:
print("Invalid line")
axis=(cos(pitch)*cos(yaw),-cos(pitch)*sin(yaw),sin(pitch))
up=(sin(roll)*sin(yaw)+cos(roll)*sin(pitch)*cos(yaw),sin(roll)*cos(yaw)-cos(roll)*sin(pitch)*sin(yaw),-cos(roll)*cos(pitch))
platform.axis=axis
platform.up=up
platform.length=1.0
platform.width=0.65
plat_arrow.axis=axis
plat_arrow.up=up
plat_arrow.length=0.8
p_line.axis=axis
p_line.up=up
cil_roll.axis=(0.2*cos(roll),0.2*sin(roll),0)
cil_roll2.axis=(-0.2*cos(roll),-0.2*sin(roll),0)
cil_pitch.axis=(0.2*cos(pitch),0.2*sin(pitch),0)
cil_pitch2.axis=(-0.2*cos(pitch),-0.2*sin(pitch),0)
arrow_course.axis=(0.2*sin(yaw),0.2*cos(yaw),0)
L1.text = str(float(Roll))
L2.text = str(float(Pitch))
L3.text = str(float(Yaw))
|
990,037 | 3d05d365e01d2597199e608abdc6162245c1b6a9 | # this will serve as the database, where we will connect to the file system
# and fetch from the file system
# what we will do in this database
# create a record
# update record
# read record
# delete record
# all of this is called the CRUD operation
# see how the functions below line up with each CRUD operation
# we also need to search through the record to find a user
def create(account_number, user_details):
completion_state = False # created a variable
try: #try block. Here, you are trying to create a file
f = open('data/user_record/'+ str(account_number) + '.txt', 'x') # That explains all of the components of that line of string
# this line is opening a new file (txt file), the name of the file is "account_number" and it's being saved in the user_record folder in the data folder.
except FileExistsError: # this is one of the errors that can potentially print out if the file already exists
print('User already exists')
return completion_state
# so then we will delete the already created file and print out error, then return false
else: # if there are no errors when you create the file, you come here and add the user details
f.write(str(user_details)) # this is the data you are writing into the file. This is converted into a string because you cannot save a list inside a file. Below when you are calling the function
f.close()
completion_state = True
#finally: # finally means that anything you write in the try block does not matter, whatever you write in 'finally' will still run
#f.close() # this is closing the file that you opened
#return completion_state
# SECTION SKELETON
# the plan is to create a file called account_number.txt
# each time we add a new user, we create a file in a folder
# in VS code, tap the "explorer" tab, click the "new folder" icon
# then the user details are added to the file
# return True
# if saving to file fails, then delete created file. Go to the folder and delete the file with the account number
# when you run the code again, you will see a new file is created
def read(user_account_number):
print('read user record')
# find the user with the account number
# fetch contents of the file
# return True
def update(user_account_number):
print('update user record')
# find user with the account number
# go into the folder, search to the file name corresponding to the account number
# once found, we can fetch the contents of the file
# update the contents of the file
# then save the file
# return True
def delete(user_account_number):
print('delete user record')
# find the user with the account number
# delete the user record (file)
# return True
def find(user_account_number): # this requires an arguement, so we're going to find the user based on their account number
print('find user')
# find user record in the data folder
create(6005439815, ['Dixie', 'Sasu', 'dixie@gmail.com', 'carter', '200'])
# this is calling the function with the required arguments |
990,038 | 99f9e4def160187aa541895e7f202fed0e94f8cd | """Create the login parser."""
from __future__ import annotations
import typing
from awssso.console.login import login
if typing.TYPE_CHECKING:
import argparse
def arg_parser_login(
subparsers: argparse._SubParsersAction,
parent_parser: argparse.ArgumentParser,
default_args: None | dict = None,
) -> dict:
"""Create the login parser."""
login_parser = subparsers.add_parser("login", parents=[parent_parser])
login_parser_group = login_parser.add_mutually_exclusive_group()
login_parser_group.add_argument(
"-e",
"--export",
action="store_true",
default=False,
help="output credentials as environment variables",
)
login_parser_group.add_argument(
"-j",
"--json",
action="store_true",
default=False,
help="output credentials in JSON format "
"(see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html)",
)
login_parser_group.add_argument(
"-c",
"--shell",
action="store_true",
default=False,
help="output credentials in as shell commands",
)
login_parser.add_argument(
"-r",
"--renew",
action="store_true",
default=False,
help="ignore cached credentials and renew them",
)
login_parser.set_defaults(func=login)
if not default_args:
default_args = {}
default_args.update(
{
"export": False,
"json": False,
"shell": False,
"renew": False,
},
)
return default_args
|
990,039 | 998041791144a53755cae38a3db404812d995e63 | # Generated by Django 2.2.8 on 2020-01-13 23:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('preproject', '0019_auto_20200113_2254'),
]
operations = [
migrations.AlterField(
model_name='preproject',
name='solution_criteria',
field=models.CharField(choices=[('s', 'Standard'), ('m', 'Multiservice'), ('cm', 'Complex MRC > 150JT'), ('co', 'Complex OTC > 1M'), ('cn', 'Complex Non Product')], max_length=2),
),
]
|
990,040 | d9c80b0414cf1c2bc8b28ef2c988571a80c4fbeb | import numpy as np
def L_inf_prox(z, tau):
p = z.shape[0]
zs = np.fabs(z)
z_rank = np.argsort(zs)
zs = np.sort(zs)
xmax = zs[-1] - tau
i = 1
while xmax <= zs[-(i + 1)]:
xmax = (xmax * i + zs[-(i + 1)]) / (i + 1.0)
i += 1
if i + 1 > p:
break
zs[(-i) : ] = xmax
tmp = np.empty(p)
tmp[z_rank] = zs
return np.copysign(tmp, z)
def slope_prox(z, tau):
return None
def proj_positive(z):
return np.where(z >= 0, z, 0)
def proj_svm(z, y):
cond = z + y / 2.0
return np.where(np.fabs(cond) <= 0.5, z, (- y + np.sign(cond)) / 2.0)
|
990,041 | eb7e2fe94e8e2938e50e2deb156d4c55337fecdf | # Generated by Django 3.0.6 on 2020-06-01 09:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('OnlineShoppingApp', '0009_addtocart'),
]
operations = [
migrations.RenameField(
model_name='addtocart',
old_name='AddToCart_fk',
new_name='product',
),
]
|
990,042 | 1da447bf1bd4f586bfd1f1d9cf111680f6909c0b | #!/usr/bin/env python
from imports import *
from configs import *
def urlOK(url):
r = requests.head(url)
return r.status_code == 200
|
990,043 | 6de2c38a9a894ef2cef2e2ca7bbcd473a93c9130 |
from common import timestamp |
990,044 | e651f3ced0e1f8a41336b67d28c805820d31a167 | import pygame, images
from pygame.locals import *
class Symbol(pygame.sprite.Sprite):
images = pygame.sprite.Group()
def __init__(self, image, x, y, color=None, action="door"):
pygame.sprite.Sprite.__init__(self)
image = images.Image(image, x, y, color)
self.image = image
# Make our top-left corner the passed-in location.
self.rect = self.image.image.get_rect()
self.rect.x = x
self.rect.y = y
self.color = color
self.clicked = False
self.action = action
Symbol.images.add(self)
def update(self, screen):
if self.color:
pygame.draw.rect(screen, self.color, (self.rect.x, self.rect.y, self.image.image.get_width(), self.image.image.get_height()))
screen.blit(self.image.image, (self.rect.x, self.rect.y))
def mouse_over(self):
self.mouse_on = True
def mouse_off(self):
self.mouse_on = False
def do_action(self):
self.clicked = True
from game import Game
image = None
if self.action == "door":
image = images.Image('images/small_door.png', self.rect.x, self.rect.y)
elif self.action == "door_vertical":
image = images.Image('images/small_door_vertical.png', self.rect.x, self.rect.y)
elif self.action == "red_vent":
image = images.Image('images/red_vent.png', self.rect.x, self.rect.y)
elif self.action == "blue_vent":
image = images.Image('images/blue_vent.png', self.rect.x, self.rect.y)
elif self.action == "clear":
Game.clear_board()
if image:
Game.MOUSE = image
|
990,045 | 19835de951e1fdbd38feca94269f68a51f247a5c | from typing import List, Optional
from pydantic import BaseModel
from datetime import datetime
class Songbase(BaseModel):
name: str
duration : int
uploadTime: datetime
class Song(Songbase):
class Config():
orm_mode = True
class ShowSong(BaseModel):
name: str
duration: int
uploadTime: datetime
class Config():
orm_mode = True
class PodcastBase(BaseModel):
name: str
duration: int
uploadTime: datetime
host: str
participants : Optional[List[str]] = []
class Podcast(PodcastBase):
class Config():
orm_mode = True
class ShowPodcast(BaseModel):
name: str
duration: int
uploadTime: datetime
host: str
participants : Optional[List[str]] = []
class Config():
orm_mode = True
class AudiobookBase(BaseModel):
title: str
author: str
narrator: str
duration: int
uploadTime: datetime
class Audiobook(AudiobookBase):
class Config():
orm_mode = True
class ShowAudiobook(BaseModel):
title: str
author: str
narrator: str
duration: int
uploadTime: datetime
class Config():
orm_mode = True
|
990,046 | 6ee441d0759691e8b590f13a6ffb0337db52b164 | # 973. K Closest Points to Origin
# Time: O(len(points)*log(K))
# Space: O(log(K))
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
import heapq
heap = []
for point in points:
dist = math.sqrt(point[0]**2+point[1]**2)
if len(heap)<K:
heapq.heappush(heap, (-dist, point))
else:
if dist<-heap[0][0]:
heapq.heappop(heap)
heapq.heappush(heap, (-dist, point))
k_points = []
for el in heap:
k_points.append(el[1])
return k_points
|
990,047 | 1b92355877ff74eb9ffa6b175115df7755b239e1 | cont = mulher = homem = 0
while True:
idade = int(input("Idade: "))
sexo = " "
p = ' '
while sexo not in "MF":
sexo = str(input("Sexo [M/F]: ")).strip().upper()[0]
if idade >= 18:
cont += 1
if sexo == "M":
homem += 1
elif sexo == "F" and idade >= 20:
mulher += 1
while p not in "SN":
p = str(input("Quer continuar? [S/N]: ")).strip().upper()[0]
if p == 'N':
break
print(f'Ao todo temos {cont} pessoas que tem mais de 18 anos;')
print(f"Ao todo temos {homem} homens cadastrados;")
print(f"Total de {mulher} mulheres tem mais de 20 anos")
|
990,048 | 17891d0b8c3b213fc10a2690a3bde7468d9d8785 | #
# complexity O(n)
def parity(x):
result = 0
while x:
result ^= x&1
x >>= 1
return result
# complexity O(k)
# x = 00101100, x-1 = 00101011
# x & (x-1) = 00101000
def parity_Ologk(x):
result = 0
while x:
result ^= 1
x = x & (x-1)
return result
def parity_OlogN(x):
x ^= x >> 32
x ^= x >> 16
x ^= x >> 8
x ^= x >> 4
x ^= x >> 2
x ^= x >> 1
return x & 0x1
num = 11
print parity(num)
print parity_Ologk(num)
print parity_OlogN(num) |
990,049 | 3b6b9b3426c4aaf90c89b210386e036b9aaa4f38 | import pytest
from datetime import datetime
from decimal import Decimal
from b2c2.models import Balances, TradeResponse, SideEnum
def test_can_add_trade_to_balance():
balance = Balances(__root__={'BTC': Decimal(0)})
trade_resp = TradeResponse(
created=datetime.now(),
instrument='BTCUSD.SPOT',
side=SideEnum.buy,
quantity=1,
price=1,
trade_id='',
origin='',
rfq_id='',
user='',
order='',
executing_unit=''
)
balance += trade_resp
assert balance['BTC'] == 1
trade_resp.side = SideEnum.sell
balance += trade_resp
assert balance['BTC'] == 0
with pytest.raises(ValueError):
trade_resp.instrument = 'unknown'
balance += trade_resp
|
990,050 | 1c80550ee95b9eeddd154de7251d4943cd78ea2a | # -*- coding: utf-8 -*-
from odoo import models, fields
class FruteriaSocio(models.Model):
_name = 'fruteria.socio'
_description = 'Socio fruteria'
_inherits = {'res.partner': 'partner_id'}
imagen_socio = fields.Binary('Imagen', related='partner_id.image')
id_socio = fields.Integer('Id_Socio',require=True)
partner_id = fields.Many2one('res.partner',ondelete='cascade',require=True)
ids_peticion = fields.One2many('fruteria.peticion',inverse_name='id_socio') |
990,051 | 2d76f1f6a3578897586520113dcaccbd11f80fa2 | import re
a=5
b=6
# swap 2 variable
a,b = b,a
print ('swap: ', a, b)
# delete duplicated element in an array
old_list = [1,1,1,3,4]
new_list = list(set(old_list))
print ('uniq a array: ', old_list, new_list)
# reverse string
s = 'abcde'
ss = s[::-1]
print ('reverse: ', s, ss)
# make a dict using 2 related array
names = ['lucien', 'joe']
ages = [23, 40]
m = dict(zip(names,ages))
print ('make dict', m)
# how to connect many strings,why: using '+' will apply memories every time
fruits = ['apple', 'banana']
result = ''.join(fruits)
print ('concatenate array of strings instead of using plus: ', result)
# iterate reverse
list=[1,2,3,4,5]
list.reverse()
try:
for x in list:
print(x,end=' ') # add , so it won't start a new line, in python3: print(x, end="")
finally:
list.reverse()
print ('Restore list', list)
import re
s = '<html><head><title>Title</title>'
search = re.search('<.*>', s)
print('search: ',search.group())
findall = re.findall('<.*?>', s)
print('find all: ',findall)
print('match: ', re.match('<.*>', s).group())
# 异常处理
# while True:
# try:
# x = int(input("Please enter a number: "))
# break
# except ValueError:
# print("Oops! That was no valid number. Try again...")
list=[3,4,6,'r','d',5,6,5]
for i in list:
try:
print(int(i))
except ValueError as e:
print("Oops! That was not a valid number.",e)
else:
print('done')
import sys
try:
f = open('myfile.txt')
s = f.readline()
i = int(s.strip())
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer.")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
|
990,052 | 0606367a5851704cad332cf9bac681b7f97b1bad | #dataBase.py
import os
import json
import time
#import dataset
import databases
#import sqlite3
from sqlalchemy import *
from sqlalchemy.ext import mutable
from sqlalchemy import orm
from sqlalchemy.ext import declarative
from Server.server import cnf
class JsonEncodedDict(TypeDecorator):
"""Enables JSON storage by encoding and decoding on the fly."""
impl = String
def process_bind_param(self, value, dialect):
return json.dumps(value)
def process_result_value(self, value, dialect):
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
Base = declarative.declarative_base()
AdminBase = declarative.declarative_base()
class Database:
name:str = None
default:str = 'tools'
user:str = None
location:str = os.path.realpath("D:/GmtDB")
def __init__(self, name:str=None):
self.set_user
if name:
self.connect_db(name)
self.create_db_session
else:
self.connect_db(self.default)
self.create_db_session
@property
def set_user(self):
self.user = os.getlogin()
def connect_db(self, dbn:str=None):
if dbn:
self.name = cnf.SQL_DATABASE_URI.get(dbn)
self.engine = create_engine(self.name, echo=True)
# an async connector
self.async_connection = databases.Database(self.name)
@property
def close_connection(self):
self.async_connection.disconnect()
@property
def create_tools_table(self):
Base.metadata.bind = self.engine
Base.query = self.session.query_property()
Base.metadata.create_all()
@property
def create_admin_table(self):
AdminBase.metadata.bind = self.engine
AdminBase.query = self.session.query_property()
#Base.query = self.session.query_property()
AdminBase.metadata.create_all()
@property
def create_db_session(self):
self.session = orm.scoped_session(orm.sessionmaker(autocommit=False,
autoflush=false,
bind=self.engine))
def __repr__(self):
if self.name:
return f"GM Tools Sqlite Database {self.name}"
else:
return f"GM Tools Default Sqlite Database {self.dbs.get(self.default)}"
class Client(AdminBase):
__tablename__ = 'client'
id = Column(Integer, primary_key=True)
client_id = Column(String, nullable=False)
firstname = Column(String)
lastname = Column(String)
data = Column(JsonEncodedDict)
def __init__(self, firstname:str=None, lastname:str=None, client_id:str=None, directive:str=None):
if firstname and lastname and directive=='new':
self.firstname = firstname
self.lastname = lastname
self.generate_id()
self.connect_db
if firstname and lastname and directive=='search':
self.firstname = firstname
self.lastname = lastname
self.connect_db
self.search(f"firstname-lastname")
# Search for the purchaser's data by name method
if client_id:
self.client_id = client_id
self.connect_db
# Get the purchaser's data by id method
# Connect the database anyway
self.connect_db
def generate_id(self):
if self.firstname and self.lastname:
self.client_id = cnf.keygen.name_id(self.firstname, self.lastname)
@property
def connect_db(self):
self.db = Database(name='admin')
@property
async def list_purchasers(self):
query = """SELECT * FROM client"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.purchasers_list:list = []
self.purchasers_index:list = []
for item in results:
self.purchasers_list.append({
"id": item[1],
"firstname": item[2],
"lastname": item[3],
"data": json.loads(item[4])
})
self.purchasers_index.append(
{
"id": item[1],
"name": f"{item[2]} {item[3]}"
}
)
return self.purchasers_index
@property
async def get(self):
query = "SELECT * FROM client WHERE client_id = :client_id"
await self.db.async_connection.connect()
result = await self.db.async_connection.fetch_one(query=query, values={"client_id": self.client_id})
await self.db.async_connection.disconnect()
return result
async def get_by_id(self, id):
query = "SELECT * FROM client WHERE client_id = :client_id"
await self.db.async_connection.connect()
result = await self.db.async_connection.fetch_one(query=query, values={"client_id": id})
await self.db.async_connection.disconnect()
return result
def search(self, term:str=None):
if term:
term = term.spilt('-')
# Implement search mechanism.
return term
return {"message": "You did not provide a Search Term."}
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
class Supplier(AdminBase):
__tablename__ = 'supplier'
id = Column(Integer, primary_key=True)
supplier_id = Column(String, nullable=False)
name = Column(String)
data = Column(JsonEncodedDict)
def __init__(self, name:str=None):
if name:
self.name = name
self.generate_id()
self.connect_db
self.connect_db
def generate_id(self):
if self.name:
self.supplier_id = cnf.keygen.name_id(cnf.TITLE, self.name)
@property
def connect_db(self):
self.db = Database(name='admin')
async def list_suppliers(self):
query = """SELECT * FROM supplier"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.suppliers_list:list = []
self.suppliers_index:list = []
for item in results:
self.suppliers_list.append({
"id": item[1],
"name": item[2],
"data": json.loads(item[3])
})
self.suppliers_index.append(
{
"id": item[1],
"name": f"{item[2]} {item[3]}"
}
)
return self.supplers_index
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
class SalesAgent(AdminBase):
__tablename__ = 'sales-agent'
id = Column(Integer, primary_key=True)
agent_id = Column(String, nullable=False)
firstname = Column(String)
lastname = Column(String)
data = Column(JsonEncodedDict)
def __init__(self, firstname:str=None, lastname:str=None):
if firstname and lastname:
self.firstname = firstname
self.lastname = lastname
self.generate_id()
self.connect_db
self.connect_db
def generate_id(self):
self.agent_id = cnf.keygen.name_id(self.firstname, self.lastname)
@property
def connect_db(self):
self.db = Database(name='admin')
async def list_agents(self):
query = """SELECT * FROM sales-agent"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.sales_agents_list:list = []
self.sales_agents_index:list = []
for item in results:
self.sales_agents_list.append({
"id": item[1],
"firstname": item[2],
"lastname": item[3],
"data": json.loads(item[4])
})
self.sales_agents_index.append(
{
"id": item[1],
"name": f"{item[2]} {item[3]}"
}
)
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
#-------- TOOL MODEL TYPES -------
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name:str=None):
if name:
self.name = name
self.connect_db
self.connect_db
@property
def connect_db(self):
self.db = Database(name='tools')
async def list_category(self):
query = """SELECT * FROM category"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
return results
@property
def save(self):
self.connect_db
self.db.session.add(self)
self.db.session.commit()
class BatteryTool(Base):
__tablename__ = 'battery_tool'
id = Column(Integer, primary_key=True)
tool_id = Column(String(12), nullable=False)
name = Column(String(100))
brand = Column(String(60))
type = Column(String(10))
specification = Column(JsonEncodedDict)
price = Column(JsonEncodedDict)
data = Column(JsonEncodedDict)
image_url = "/tools/static/images/Batterytools/"
def __init__(self, name:str=None):
if name:
self.name = name
self.type = 'battery'
self.generate_id
self.connect_db
self.connect_db
@property
def generate_id(self):
if self.name:
self.tool_id = cnf.keygen.name_id('B', self.name)
async def list_tools(self):
query = """SELECT * FROM battery_tool"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.inventory:list = []
self.tool_index:list = []
for item in results:
self.inventory.append({
"id": item[1],
"name": item[2],
"brand": item[3],
"type": item[4],
"image_url": self.image_url,
"specification": json.loads(item[5]),
"price": json.loads(item[6]),
"data": json.loads(item[7])
})
self.tool_index.append(
{
"id": item[1],
"name": f"{item[2]} {item[3]}",
"brand": item[3],
"image_url": self.image_url,
"images": json.loads(item[7]).get('image_urls', None)
}
)
return self.tool_index
@property
def connect_db(self):
self.db = Database(name='tools')
def getTool(self, id:str=None):
return BatteryTool.query.all()
@property
def save(self):
self.connect_db
self.db.session.add(self)
self.db.session.commit()
class PowerTool(Base):
__tablename__ = 'power_tool'
id = Column(Integer, primary_key=True)
tool_id = Column(String, nullable=False)
name = Column(String)
brand = Column(String)
type = Column(String)
specification = Column(JsonEncodedDict)
price = Column(JsonEncodedDict)
data = Column(JsonEncodedDict)
image_url = "/tools/static/images/Powertools/"
def __init__(self, name:str=None):
if name:
self.name = name
self.type = 'power'
self.generate_id
self.connect_db
self.connect_db
@property
def generate_id(self):
if self.name:
self.tool_id = cnf.keygen.name_id('P', self.name)
@property
def connect_db(self):
self.db = Database(name='tools')
async def list_tools(self):
query = """SELECT * FROM power_tool"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.inventory:list = []
self.tool_index:list = []
for item in results:
self.inventory.append({
"id": item[1],
"name": item[2],
"brand": item[3],
"type": item[4],
"image_url": self.image_url,
"specification": json.loads(item[5]),
"price": json.loads(item[6]),
"data": json.loads(item[7])
})
self.tool_index.append(
{
"id": item[1],
"name": f"{item[2]} {item[3]}",
"brand": item[3],
"image_url": self.image_url,
"images": json.loads(item[7]).get('image_urls', None)
}
)
return self.tool_index
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
def getTool(self, id:str=None):
if id:
tool = PowerTool.query.filter_by(tool_id=id).first()
return tool
return None
class FuelTool(Base):
__tablename__ = 'fuel_tool'
id = Column(Integer, primary_key=True)
tool_id = Column(String, nullable=False)
name = Column(String)
brand = Column(String)
type = Column(String)
specification = Column(JsonEncodedDict)
price = Column(JsonEncodedDict)
data = Column(JsonEncodedDict)
image_url = "/tools/static/images/Fueltools/"
def __init__(self, name:str=None):
if name:
self.name = name
self.type = 'fuel'
self.generate_id
self.connect_db
self.connect_db
@property
def generate_id(self):
if self.name:
self.tool_id = cnf.keygen.name_id('F', self.name)
@property
def connect_db(self):
self.db = Database(name='tools')
async def list_tools(self):
query = """SELECT * FROM fuel_tool"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.inventory:list = []
self.tool_index:list = []
for item in results:
self.inventory.append({
"id": item[1],
"name": item[2],
"brand": item[3],
"type": item[4],
"image_url": self.image_url,
"specification": json.loads(item[5]),
"price": json.loads(item[6]),
"data": json.loads(item[7])
})
self.tool_index.append(
{
"id": item[1],
"name": f"{item[2]} {item[3]}",
"brand": item[3],
"image_url": self.image_url,
"images": json.loads(item[7]).get('image_urls', None)
}
)
return self.tool_index
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
def getTool(self, id:str=None):
return FuelTool.query.all()
#------- ACTIVITY ------------
class Supply(AdminBase):
__tablename__ = 'supply'
id = Column(Integer, primary_key=True)
sid = Column(String, nullable=False)
name = Column(String)
data = Column(JsonEncodedDict)
def __init__(self, name:str=None):
if name:
self.name = name
self.generate_id()
self.connect_db
self.connect_db
def generate_id(self):
if self.name:
self.supplier_id = cnf.keygen.name_id(cnf.TITLE, self.name)
@property
def connect_db(self):
self.db = Database(name='admin')
async def list_supplies(self):
query = """SELECT * FROM supply"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.suppliers_list:list = []
self.suppliers_index:list = []
for item in results:
self.suppliers_list.append({
"id": item[1],
"name": item[2],
"data": json.loads(item[3])
})
self.suppliers_index.append(
{
"id": item[1],
"name": item[2],
}
)
return self.suppliers_index
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
class Sale(AdminBase):
__tablename__ = 'sale'
id = Column(Integer, primary_key=True)
tid = Column(String, nullable=False)
item = Column(JsonEncodedDict)
data = Column(JsonEncodedDict)
def __init__(self, name:str=None):
if name:
self.name = name
self.generate_id()
self.connect_db
self.connect_db
def generate_id(self):
if self.name:
self.tid = cnf.keygen.name_id(cnf.TITLE, self.item['name'])
@property
def connect_db(self):
self.db = Database(name='admin')
async def list_sales(self):
query = """SELECT * FROM sale"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.sales_list:list = []
self.sales_index:list = []
for item in results:
self.sales_list.append({
"id": item[1],
"item": item[2],
"data": json.loads(item[3])
})
self.sales_index.append(
{
"id": item[1],
"item": item[2]
}
)
return self.sales_index
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
class Account(AdminBase):
__tablename__ = 'suppliers-accounts'
id = Column(Integer, primary_key=True)
aaid = Column(String, nullable=False)
data = Column(JsonEncodedDict)
transactions = Column(JsonEncodedDict)
def __init__(self, name:str=None):
if name:
self.name = name
self.generate_id()
self.connect_db
self.connect_db
def generate_id(self):
if self.name:
self.tid = cnf.keygen.name_id(cnf.TITLE, self.item['name'])
@property
def connect_db(self):
self.db = Database(name='admin')
async def list_accounts(self):
query = """SELECT * FROM suppliers-accounts"""
await self.db.async_connection.connect()
results = await self.db.async_connection.fetch_all(query=query)
await self.db.async_connection.disconnect()
self.accounts_list:list = []
self.accounts_index:list = []
for item in results:
self.accounts_list.append({
"id": item[1],
"data": json.loads(item[2]),
"transactions": json.loads(item[3])
})
self.accounts_index.append(
{
"id": item[1],
"data": item[2]
}
)
return self.accounts_index
@property
def save(self):
self.db.session.add(self)
self.db.session.commit()
def createAdmins():
cnf._make_dir(cnf.DATABASE_DIR)
db = Database(name='admin')
db.create_admin_table
def createTools():
cnf._make_dir(cnf.DATABASE_DIR)
db = Database(name='tools')
db.create_tools_table
def setup():
createAdmins()
time.sleep(1)
createTools()
#setup() |
990,053 | 2c089d309414663707d795267fc81769c8963e53 | import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None,reduce=True,size_average=True):
super().__init__()
self.loss = nn.NLLLoss(weight,reduce=reduce,size_average=size_average)
def forward(self, outputs, targets):
return self.loss(F.log_softmax(outputs,dim=1), targets)
class partialCrossEntropyLoss2d(nn.Module):
def __init__(self, weight,temperature = 1):
super().__init__()
self.temporature = temperature
self.loss = nn.NLLLoss(weight, reduce=False, size_average=False)
def forward(self, outputs, targets):
elementwise_loss = self.loss(F.log_softmax(outputs/self.temporature,dim=1), targets)
partialLossMask = targets.data.float()
if partialLossMask.sum()==0:
pass
partialLoss = (elementwise_loss * partialLossMask ).sum()/targets.data.sum().float()
return partialLoss
class logBarrierLoss(nn.Module):
def __init__(self,low_band,high_band,):
super().__init__()
self.low_band = low_band
self.high_band = high_band
def forward(self, probability):
total_pixel_number = list(probability.view(-1).shape)[0]
if probability.sum(1).mean()!=1:
probability= F.softmax(probability,dim=1)
sum_pixels= probability[:,1,:,:].sum(-1).sum(-1)
loss_t = torch.Tensor([0]).cuda()
for image in sum_pixels:
if image>= self.high_band:
loss = (image-self.high_band)**2/total_pixel_number
if image <= self.low_band:
loss = (sum_pixels - self.low_band) ** 2/total_pixel_number
if (image>self.low_band) and (image<self.high_band):
loss = torch.Tensor([0]).cuda()
try:
loss_t=torch.cat((loss_t,loss.unsqueeze(0)),0)
except:
loss_t = torch.cat((loss_t, loss), 0)
return loss_t.sum()/(loss_t.shape[0]-1)
def dice_loss(input, target):
# with torch.no_grad:
smooth = 1.
iflat = input.view(input.size(0),-1)
tflat = target.view(input.size(0),-1)
intersection = (iflat * tflat).sum(1)
# intersection = (iflat == tflat).sum(1)
return ((2. * intersection + smooth).float() / (iflat.sum(1) + tflat.sum(1) + smooth).float()).mean()
# return ((2. * intersection + smooth).float() / (iflat.size(1)+ tflat.size(1) + smooth)).mean()
if __name__=="__main__":
output_of_the_net = torch.rand(16, 2, 256,256)
output_of_the_net = F.softmax(output_of_the_net, dim=1)
criterion = logBarrierLoss(10,100)
loss = criterion(output_of_the_net)
print(loss)
|
990,054 | 1267db37365ab61417eb77f1e6d9b8cb7d3ba46f | # -*- coding:utf-8 -*-
import ftplib
def returnDefault(ftp):
try:
# nlst()方法获取目录下的文件
dirList = ftp.nlst()
except:
dirList = []
print '[-] Could not list directory contents.'
print '[-] Skipping To Next Target.'
return
retList = []
for filename in dirList:
# lower()方法将文件名都转换为小写的形式
fn = filename.lower()
if '.php' in fn or '.asp' in fn or '.htm' in fn:
print '[+] Found default page: ' + filename
retList.append(filename)
return retList
host ='127.0.0.1'
# host ='119.28.140.248'
# host = '111.230.43.239'
username = 'root'
password = '123'
ftp = ftplib.FTP(host)
ftp.login(username, password)
returnDefault(ftp)
#
# import ftplib
# ftp = ftplib.FTP()
# ftp.connect('127.0.0.1',2121)
# ftp.login('root','123')
# print ftp.getwelcome()
# list = ftp.nlst()
# print list
|
990,055 | ef469da38d69163ea7dcc1fc0c4a7a69c3fa765d | #!/usr/bin/python3
import os
import sys
import pymongo
from modules import Scan_NTP,Ssh_Cracked,Telnet_Cracked,GET_Whitelist
print('Welcome to Jormungandr system (^ ^)')
col=pymongo.MongoClient()
while True:
print('''Please chose you modules!:
1,ssh cracked
2,ntp scan
3,telnet scracked
enter you chose number:
''')
client_chose=input('<<')
Whitelist=GET_Whitelist.Get_whitelist()
if resul == None:
print('Whitelist Falid!!!')
sys.exit(0)
if client_chose == '1':
Ssh_cracked.
if client_chose == '2':
Scan_NTP.
if client_chose == '3':
Telnet_cracked
|
990,056 | 5449188d2e83b7242e9e2de0f182f4d5716db43d | import json
import time
from datetime import datetime
import requests
import csv
import os
import os.path
from urllib.parse import urljoin
AAIDA_BACKEND_BASE_URL= os.getenv("AAIDA_BACKEND_BASE_URL")
print(os.getcwd())
filename = 'responses-record.csv'
url = urljoin(AAIDA_BACKEND_BASE_URL, 'cases/submit') #dummy load dengan HTTP-POST
treshold = 0.936 #defining treshold dari skor tensor
with open ('responses-record.csv','r') as record:#tidak ada kewajiban menulis file csv
record_counter = 0
payload = {} #structure dasar payload
csvReader = csv.reader(record)
for line in csvReader:
print(line)
payload["tweet_id"] = int(line[2])# index[2] menyimpan tweet_id
if float(line[4]) >= treshold:#index[4] menyimpan score prediction
payload["class"] = "Teridentifikasi"
else:
payload["class"] = "tidak teridentifikasi"
payload["score"] = float(line[4])#index[4]menimpan score prediction
payload["twitter_user_id"] = int(line[0])#menyimpan id twitter user
payload["is_claimed"] = False
payload["is_closed"] = False
print(dict(payload)) #just test
resp = requests.post(url,json=payload)
print(f'{resp.status_code=} {resp.text=}')
"""
ini kalau aaida-backend sudah siap
if resp.status_code != 200:
print("record {} failed to sent".format(record_counter))
record_counter +=1
""" |
990,057 | 4005589b4aa4cc7f2b94e25c80df8a8d9d18158d |
cars = 10
space_in_a_car= 4.0
drivers = 3
passengers = 9
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print cars
print drivers
print passengers
print drivers
print carpool_capacity
print average_passengers_per_car
|
990,058 | f4d122e17534abd57f7e14314d9d892937ec3d9d | start = raw_input("Start ")
end = raw_input("End? ")
for x in range(start, end):
print(x)
def oneToTen(start, end):
for x in range(start, end)
print(x)
return True
result = oneToTen(0, 10)
print result |
990,059 | 46578f3e7f71a45c0de873a685de858d091bf676 | import time
import tensorflow as tf
import numpy as np
import networkx as nx
import data_utils
from model import *
print("TF Version: ", tf.__version__)
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataname', 'twitter', 'Dataset string.')
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_float('regularization_scale', 0.001, 'Scale for L2 regularization.')
flags.DEFINE_integer('epochs', 50, 'Number of epochs to train.')
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_integer('hidden_dim', 100, 'Hidden Dimension')
flags.DEFINE_integer('max_steps', 30, 'Number of steps in RNN.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
# Load data
G, node_index, train_examples, train_mask, test_examples, test_mask = data_utils.load_data('datasets/' + FLAGS.dataname, FLAGS.max_steps + 1)
indices = np.random.permutation(train_examples.shape[0])
to_index = int(len(indices) * 0.9)
training_idx, validation_idx = indices[:to_index], indices[to_index:]
train_examples, validation_examples = train_examples[training_idx, :], train_examples[validation_idx, :]
train_mask, validation_mask = train_mask[training_idx, :], train_mask[validation_idx, :]
number_of_nodes = len(G.nodes())
print ("Number of nodes: {}".format(number_of_nodes))
print ("Number of edges: ", len(G.edges()))
print ("Number of training examples: {}".format(len(train_examples)))
print ("Number of validation examples: {}".format(len(validation_examples)))
print ("Number of testing examples: {}".format(len(test_examples)))
print ("Shape of train_examples: {}".format(train_examples.shape))
print ("Shape of train_mask: {}".format(train_mask.shape))
print ("Shape of validation_examples: {}".format(validation_examples.shape))
print ("Shape of validation_mask: {}".format(validation_mask.shape))
print ("Shape of test_examples: {}".format(test_examples.shape))
print ("Shape of test_mask: {}".format(test_mask.shape))
print ("Average train cascade size: {}".format(np.mean(np.sum(train_mask, axis=1))))
print ("Average validation cascade size: {}".format(np.mean(np.sum(validation_mask, axis=1))))
print ("Average test cascade size: {}".format(np.mean(np.sum(test_mask, axis=1))))
print ("***** Hyper Parameters *****")
print ("Learning rate: {}".format(FLAGS.learning_rate))
print ("Batch size: {}".format(FLAGS.batch_size))
print ("Max steps: {}".format(FLAGS.max_steps))
print ("Regularization scale: {}".format(FLAGS.regularization_scale))
print ("hidden_dim: {}".format(FLAGS.hidden_dim))
train_batches = data_utils.Loader(train_examples, train_mask, FLAGS.batch_size)
print ("Number of train batches: {}".format(len(train_batches)))
# Define placeholders
placeholders = {
'contents': tf.placeholder(tf.float32, shape=(None, FLAGS.hidden_dim)),
'sequences': tf.placeholder(tf.int32, shape=(None, FLAGS.max_steps + 1)),
'seq_mask': tf.placeholder(tf.int32, shape=(None, FLAGS.max_steps)),
'hit_at': tf.placeholder(tf.int32)
}
# Create model
model = CascadeRNN(number_of_nodes, FLAGS.hidden_dim, FLAGS.max_steps, nx.to_numpy_matrix(G).astype(np.float32), placeholders, name='cascadernn', logging=True)
# Initialize session
sess = tf.Session()
def evaluate(eval_feed_dict):
eval_feed_dict[placeholders['hit_at']] = 10
hitat_10, loss = sess.run([model.metric, model.loss], feed_dict=eval_feed_dict)
eval_feed_dict[placeholders['hit_at']] = 50
hitat_50 = sess.run(model.metric, feed_dict=eval_feed_dict)
eval_feed_dict[placeholders['hit_at']] = 100
hitat_100 = sess.run(model.metric, feed_dict=eval_feed_dict)
return hitat_10, hitat_50, hitat_100, loss
# Init variables
sess.run(tf.global_variables_initializer())
val_loss = []
# Train model
for epoch in range(FLAGS.epochs):
t = time.time()
for batch_num in range(len(train_batches)):
batch_examples, batch_mask = train_batches()
feed_dict = {
placeholders['contents']: np.zeros((batch_examples.shape[0], FLAGS.hidden_dim)),
placeholders['sequences']: batch_examples,
placeholders['seq_mask']: batch_mask,
placeholders['hit_at']: 10
}
_, train_loss, train_hitat_10 = sess.run([model.opt_op, model.loss, model.metric], feed_dict=feed_dict)
val_feed_dict = {
placeholders['contents']: np.zeros((validation_examples.shape[0], FLAGS.hidden_dim)),
placeholders['sequences']: validation_examples,
placeholders['seq_mask']: validation_mask
}
hitat_10, hitat_50, hitat_100, loss = evaluate(val_feed_dict)
print ("Epoch {:04d} (time={:.5f}): train_loss={:.5f} train_hit@10={:.5f} validation_loss={:.5f} validation_hit@10={:.5f} validation_hit@50={:.5f} validation_hit@100={:.5f}".format(epoch + 1, time.time() - t, train_loss, train_hitat_10, loss, hitat_10, hitat_50, hitat_100))
val_loss.append(loss)
if epoch > FLAGS.early_stopping and val_loss[-1] > np.mean(val_loss[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
# Testing
test_t = time.time()
test_feed_dict = {
placeholders['contents']: np.zeros((test_examples.shape[0], FLAGS.hidden_dim)),
placeholders['sequences']: test_examples,
placeholders['seq_mask']: test_mask
}
test_hit_10, test_hit_50, test_hit_100, test_cost = evaluate(test_feed_dict)
print("*** Test set results: *** \ntime={:.5f} test_loss={:.5f} test_hit@10={:.5f} test_hit@50={:.5f} test_hit@100={:.5f}".format((time.time() - test_t), test_cost, test_hit_10, test_hit_50, test_hit_100))
|
990,060 | 173d3d4858a2de042610391d0797e5af58d7b553 | game = {
"name": "Vainglory",
"description": "MOBA",
"status": "frequent updates",
"features": "3v3, 5v5"
}
print(game)
n = input("Enter key you want to delete: ")
if n in game:
del game[n]
print(game)
else:
print("Error 404") |
990,061 | 23dfb2f8e8c36c88bc21ca851a3335ad881e0371 | import os
import re
def read_version():
with open(os.path.join('freezegun', '__init__.py')) as f:
m = re.search(r'''__version__\s*=\s*['"]([^'"]*)['"]''', f.read())
if m:
return m.group(1)
raise ValueError("couldn't find version")
def create_tag():
from subprocess import call
version = read_version()
errno = call(['git', 'tag', '--annotate', version, '--message', 'Version %s' % version])
if errno == 0:
print("Added tag for version %s" % version)
if __name__ == '__main__':
create_tag()
|
990,062 | d3c381ee37ef813c5a87a429f45e743badf771f0 | # 1. Pedir dos números por teclado e imprimir la suma de ambos.
print("Ejercicio 1")
def sumar (numero_1, numero_2):
resultado = numero_1 + numero_2
return resultado
numero_1 = float(input("Introduce el primer número: "))
numero_2 = float(input("Introduce el segundo número: "))
resultado = sumar (numero_1, numero_2)
print ("El resultado es: " + str(resultado)) |
990,063 | be54859e42304ebc7c28d26dc70cffeb2f59affa | #!/usr/bin/python2.7
#
# Tester for the assignement1
#
DATABASE_NAME = 'dds_assignment'
# TODO: Change these as per your code
RATINGS_TABLE = 'ratings'
RANGE_TABLE_PREFIX = 'range_part'
RROBIN_TABLE_PREFIX = 'rrobin_part'
USER_ID_COLNAME = 'userid'
MOVIE_ID_COLNAME = 'movieid'
RATING_COLNAME = 'rating'
INPUT_FILE_PATH = 'C:\Users\Francisco\Desktop\CSE511_DataProcessingAtScale\CSE511\Assignment_3\ml-10M100K\\ratings.dat'
ACTUAL_ROWS_IN_INPUT_FILE = 20 # Number of lines in the input file
import psycopg2
import traceback
import testHelper
import Interface as MyAssignment
if __name__ == '__main__':
try:
testHelper.createDB(DATABASE_NAME)
with testHelper.getOpenConnection(dbname=DATABASE_NAME) as conn:
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
testHelper.deleteAllPublicTables(conn)
[result, e] = testHelper.testloadratings(MyAssignment, RATINGS_TABLE, INPUT_FILE_PATH, conn, ACTUAL_ROWS_IN_INPUT_FILE)
if result :
print ("loadratings function pass!")
[result, e] = testHelper.testrangepartition(MyAssignment, RATINGS_TABLE, 5, conn, 0, ACTUAL_ROWS_IN_INPUT_FILE)
if result :
print "rangepartition function pass!"
# ALERT:: Use only one at a time i.e. uncomment only one line at a time and run the script
[result, e] = testHelper.testrangeinsert(MyAssignment, RATINGS_TABLE, 100, 2, 3, conn, '2')
#[result, e] = testHelper.testrangeinsert(MyAssignment, RATINGS_TABLE, 100, 2, 0, conn, '0')
if result:
print "rangeinsert function pass!"
testHelper.deleteAllPublicTables(conn)
MyAssignment.loadRatings(RATINGS_TABLE, INPUT_FILE_PATH, conn)
[result, e] = testHelper.testroundrobinpartition(MyAssignment, RATINGS_TABLE, 5, conn, 0, ACTUAL_ROWS_IN_INPUT_FILE)
if result :
print "roundrobinpartition function pass!"
[result, e] = testHelper.testroundrobininsert(MyAssignment, RATINGS_TABLE, 100, 1, 3, conn, '0')
[result, e] = testHelper.testroundrobininsert(MyAssignment, RATINGS_TABLE, 100, 1, 3, conn, '1')
[result, e] = testHelper.testroundrobininsert(MyAssignment, RATINGS_TABLE, 100, 1, 3, conn, '2')
if result :
print "roundrobininsert function pass!"
#choice = raw_input('Press enter to Delete all tables? ')
#if choice == '':
testHelper.deleteAllPublicTables(conn)
except Exception as detail:
traceback.print_exc()
|
990,064 | a06239ea75ff4d82dea2403d4f27b0471c20f3ff | #!/usr/bin/env python
translation = str.maketrans('+-', '-+')
T = int(input())
for i in range(1, T + 1):
stack = input()
s = 0
while '-' in stack:
s += 1
lastBS = stack.rfind('-')
stack = stack[0:lastBS+1].translate(translation) + stack[lastBS+1:]
else:
print('Case #{}: {}'.format(i, s))
|
990,065 | bb4735a9bd4b28650060472838d54c7afc41f3f7 | import numpy
import re
import math
import threading
import time
from OpenGL.GL import *
from Cura.gui import openGLUtils
from Cura.gui.view3D.renderer import Renderer
class GCodeLayerRenderer(object):
def __init__(self, prev_layer = None):
self._x = 0
self._y = 0
self._z = 0
self._e = 0
self._extruder = 0
self._last_extrusion_z = 0
self._prev_last_extrusion_z = 0
self._feedrate = 0
self._retracted = False
self._move_points = []
self._inset0_extrude_points = []
self._insetX_extrude_points = []
self._infill_extrude_points = []
self._support_extrude_points = []
self._inset0_extrude_amounts = []
self._insetX_extrude_amounts = []
self._infill_extrude_amounts = []
self._support_extrude_amounts = []
self._retract_marks = []
self._prime_marks = []
self._layer_height = 0.1
self._active_extrude_points = self._support_extrude_points
self._active_extrude_amounts = self._support_extrude_amounts
self._extruder_offset = [[0.0, 0.0], [-18.0, 0.0]]
if prev_layer is not None:
self._x = prev_layer._x
self._y = prev_layer._y
self._z = prev_layer._z
self._e = prev_layer._e
self._feedrate = prev_layer._feedrate
self._retracted = prev_layer._retracted
self._extruder = prev_layer._extruder
def setPosition(self, x, y, z, e):
if x is not None:
self._x = x
if y is not None:
self._y = y
if z is not None:
self._z = z
if e is not None:
self._e = e
def addMove(self, x, y, z, e, f):
new_x = self._x
new_y = self._y
new_z = self._z
new_e = self._e
if f is not None:
self._feedrate = f
if x is not None:
x -= self._extruder_offset[self._extruder][0]
new_x = x
if y is not None:
y -= self._extruder_offset[self._extruder][1]
new_y = y
if z is not None:
new_z = z
if e is not None:
new_e = e
if new_e > self._e:
if self._retracted:
self._retracted = False
self._addPrimeMark(new_x, new_y, new_z)
if new_x != self._x or new_y != self._y or new_z != self._z:
self._move_points.append([self._x, self._y, self._z])
self._move_points.append([new_x, new_y, new_z])
else:
# Extrusion
if new_x != self._x or new_y != self._y or new_z != self._z:
self._last_extrusion_z = new_z
self._active_extrude_points.append([self._x, self._y, self._z, new_x, new_y, new_z])
self._active_extrude_amounts.append(new_e - self._e)
elif new_e < self._e:
self._retracted = True
self._addRetractionMark(self._x, self._y, self._z)
if new_x != self._x or new_y != self._y or new_z != self._z:
self._move_points.append([self._x, self._y, self._z])
self._move_points.append([new_x, new_y, new_z])
else:
self._move_points.append([self._x, self._y, self._z])
self._move_points.append([new_x, new_y, new_z])
self._x = new_x
self._y = new_y
self._z = new_z
self._e = new_e
def setExtruder(self, extruder):
self._extruder = extruder
def setFanSpeed(self, speed):
pass
def setTemperature(self, temperature):
pass
def setBedTemperature(self, temperature):
pass
def setExtrusionType(self, e_type):
if e_type == 'WALL-OUTER':
self._active_extrude_points = self._inset0_extrude_points
self._active_extrude_amounts = self._inset0_extrude_amounts
elif e_type == 'WALL-INNER':
self._active_extrude_points = self._insetX_extrude_points
self._active_extrude_amounts = self._insetX_extrude_amounts
elif e_type == 'FILL':
self._active_extrude_points = self._infill_extrude_points
self._active_extrude_amounts = self._infill_extrude_amounts
else:
self._active_extrude_points = self._support_extrude_points
self._active_extrude_amounts = self._support_extrude_amounts
def _addRetractionMark(self, x, y, z):
size = 1.0
z += self._layer_height / 2.0
self._retract_marks.append([x - size, y, z])
self._retract_marks.append([x, y - size, z])
self._retract_marks.append([x + size, y, z])
self._retract_marks.append([x, y + size, z])
def _addPrimeMark(self, x, y, z):
size = 0.8
z += self._layer_height / 2.0
self._prime_marks.append([x - size, y, z])
self._prime_marks.append([x, y - size, z])
self._prime_marks.append([x + size, y, z])
self._prime_marks.append([x, y + size, z])
def _extrusion_to_renderer(self, points, amounts):
if len(points) < 1:
return None
points = numpy.array(points, numpy.float32)
xdiff = (points[::,0] - points[::,3])
ydiff = (points[::,1] - points[::,4])
lengths = numpy.sqrt((xdiff * xdiff) + (ydiff * ydiff))
amounts /= lengths
#Amounts is amount of E per mm now. Calculate the extrusion width by "width = E / layer_height"
amounts *= self._e_correction_factor
normals = (points[::, 3:6] - points[::, 0:3])
normals[::,0] /= lengths
normals[::,1] /= lengths
normals[::,2] /= lengths
tmp = -normals[::,1]
normals[::,1] = normals[::,0]
normals[::,0] = tmp
normals[::,0] *= amounts
normals[::,1] *= amounts
normals[::,2] *= amounts
verts = numpy.concatenate((points, points[::, 3:6], points[::, 0:3], points[::, 0:3] + normals, points[::, 3:6] + normals, points[::, 3:6] - normals, points[::, 0:3] - normals), 1)
verts[::, 8] -= self._layer_height
verts[::, 11] -= self._layer_height
return openGLUtils.VertexRenderer(GL_QUADS, numpy.array(verts, numpy.float32).reshape((len(verts) * 8, 3)), False)
def finalize(self):
filamentRadius = 2.85 / 2.0
filamentArea = math.pi * filamentRadius * filamentRadius
self._layer_height = self._last_extrusion_z - self._prev_last_extrusion_z
if self._layer_height <= 0.0:
self._layer_height = self._last_extrusion_z
if self._layer_height <= 0.0:
self._layer_height = 0.1
self._e_correction_factor = (filamentArea / self._layer_height / 2.0)
self._move_points = openGLUtils.VertexRenderer(GL_LINES, numpy.array(self._move_points, numpy.float32), False)
self._inset0_extrude_points = self._extrusion_to_renderer(self._inset0_extrude_points, self._inset0_extrude_amounts)
self._insetX_extrude_points = self._extrusion_to_renderer(self._insetX_extrude_points, self._insetX_extrude_amounts)
self._infill_extrude_points = self._extrusion_to_renderer(self._infill_extrude_points, self._infill_extrude_amounts)
self._support_extrude_points = self._extrusion_to_renderer(self._support_extrude_points, self._support_extrude_amounts)
self._retract_marks = openGLUtils.VertexRenderer(GL_QUADS, numpy.array(self._retract_marks, numpy.float32), False)
self._prime_marks = openGLUtils.VertexRenderer(GL_QUADS, numpy.array(self._prime_marks, numpy.float32), False)
def render(self, main_renderer, c):
if main_renderer._show_moves:
glColor3f(0, 0, c)
self._move_points.render()
if main_renderer._show_outer_wall and self._inset0_extrude_points is not None:
glColor3f(c, 0, 0)
self._inset0_extrude_points.render()
if main_renderer._show_inner_wall and self._insetX_extrude_points is not None:
glColor3f(0, c, 0)
self._insetX_extrude_points.render()
if main_renderer._show_infill and self._infill_extrude_points is not None:
glColor3f(c, c, 0)
self._infill_extrude_points.render()
if main_renderer._show_support and self._support_extrude_points is not None:
glColor3f(0, c, c)
self._support_extrude_points.render()
if main_renderer._show_retraction:
glColor3f(0, 0, 0.5 * c)
self._retract_marks.render()
glColor3f(0.5 * c, 0, 0.5 * c)
self._prime_marks.render()
class GCodeRenderer(object):
def __init__(self, gcode):
self._layers = []
thread = threading.Thread(target=self._process, args=(gcode,))
thread.daemon = True
thread.start()
def _process(self, gcode):
G = re.compile('G([0-9]+)')
M = re.compile('M([0-9]+)')
X = re.compile('X([0-9\\.]+)')
Y = re.compile('Y([0-9\\.]+)')
Z = re.compile('Z([0-9\\.]+)')
E = re.compile('E([0-9\\.]+)')
F = re.compile('F([0-9\\.]+)')
S = re.compile('S([0-9]+)')
T = re.compile('T([0-9]+)')
current_layer = GCodeLayerRenderer()
for line in gcode.split('\n'):
if line.startswith(';'):
if line.startswith(';LAYER:'):
current_layer.finalize()
time.sleep(0.001)
self._layers.append(current_layer)
current_layer = GCodeLayerRenderer(current_layer)
current_layer._prev_last_extrusion_z = self._layers[-1]._last_extrusion_z
if line.startswith(';TYPE:'):
current_layer.setExtrusionType(line[6:].strip())
else:
g = G.search(line)
if g:
g = int(g.group(1))
if g == 0 or g == 1:
x = X.search(line)
y = Y.search(line)
z = Z.search(line)
e = E.search(line)
f = F.search(line)
if x:
x = float(x.group(1))
if y:
y = float(y.group(1))
if z:
z = float(z.group(1))
if e:
e = float(e.group(1))
if f:
f = float(f.group(1))
current_layer.addMove(x, y, z, e, f)
elif g == 21:
pass # Metric
elif g == 28:
pass # Home
elif g == 90:
pass # Absolute positioning
elif g == 91:
pass # Relative positioning
elif g == 92:
x = X.search(line)
y = Y.search(line)
z = Z.search(line)
e = E.search(line)
if x:
x = float(x.group(1))
if y:
y = float(y.group(1))
if z:
z = float(z.group(1))
if e:
e = float(e.group(1))
current_layer.setPosition(x, y, z, e)
else:
print 'G', g
else:
m = M.search(line)
if m:
m = int(m.group(1))
if m == 104:
s = S.search(line)
if s:
s = int(s.group(1))
current_layer.setTemperature(s)
elif m == 140:
s = S.search(line)
if s:
s = int(s.group(1))
current_layer.setBedTemperature(s)
elif m == 109:
s = S.search(line)
if s:
s = int(s.group(1))
current_layer.setTemperature(s)
elif m == 190:
s = S.search(line)
if s:
s = int(s.group(1))
current_layer.setBedTemperature(s)
elif m == 106:
s = S.search(line)
if s:
s = int(s.group(1))
current_layer.setFanSpeed(s)
else:
current_layer.setFanSpeed(255)
elif m == 107:
current_layer.setFanSpeed(0)
elif m == 84:
pass # Steppers off
else:
print 'M', m
else:
t = T.search(line)
if t:
current_layer.setExtruder(int(t.group(1)))
current_layer.finalize()
self._layers.append(current_layer)
def render(self, main_renderer):
f = 1.0
bottom_layer_nr = 0
top_layer_nr = main_renderer._top_layer_nr
if main_renderer._is_single_layer:
bottom_layer_nr = top_layer_nr - 1
for layer in self._layers[top_layer_nr:bottom_layer_nr:-1]:
layer.render(main_renderer, f)
f -= 0.05
if f < 0.5:
f = 1.0
class ToolpathLayerRenderer(object):
COLORS = {
'skirt': (0, 0.5, 0.5),
'inset0': (0.5, 0, 0.5),
'insetx': (0.5, 0.5, 0),
'skin': (0.5, 0.5, 0),
'support': (0.5, 0.5, 0),
}
def __init__(self, layer):
self._renderer = {}
self._layer = layer
def render(self):
for type in self._layer.getPathTypes():
if not type in self._renderer:
polygons = self._layer.getPolygons(type)
point_count = 0
indices_count = 0
for poly in polygons:
point_count += len(poly)
indices_count += len(poly) * 4
points = numpy.zeros((point_count, 2), numpy.float32)
indices = numpy.zeros(indices_count, dtype=numpy.int32)
point_index = 0
indices_index = 0
for poly in polygons:
n = len(poly)
points[point_index:point_index + n] = poly
i1 = numpy.arange(n, dtype=numpy.int32).reshape((n, 1)) + point_index
i2 = i1 + point_count
indices[indices_index:indices_index + (n * 4)] = numpy.concatenate((i1, i1 + 1, i2 + 1, i2), 1).reshape((n * 4))
indices[indices_index + (n * 4) - 3] = i1[0]
indices[indices_index + (n * 4) - 2] = i2[0]
point_index += n
indices_index += n * 4
z_pos1 = numpy.zeros((point_count, 1), numpy.float32)
z_pos2 = numpy.zeros((point_count, 1), numpy.float32)
z_pos1.fill(self._layer._z_height)
z_pos2.fill(self._layer._z_height - self._layer._layer_height)
points1 = numpy.concatenate((points, z_pos1), 1)
points2 = numpy.concatenate((points, z_pos2), 1)
self._renderer[type] = openGLUtils.VertexRenderer(GL_QUADS, numpy.concatenate((points1, points2)), False, indices)
glColor3fv(self.COLORS[type])
self._renderer[type].render()
class ToolpathRenderer(Renderer):
def __init__(self):
super(ToolpathRenderer,self).__init__()
self._show_outer_wall = True
self._show_inner_wall = True
self._show_infill = True
self._show_support = True
self._show_moves = False
self._show_retraction = True
self._is_single_layer = False
self._top_layer_nr = 1
def render(self):
glPushMatrix()
if self.machine.getSettingValueByKey('machine_center_is_zero') == 'False':
glTranslatef(-self.machine.getSettingValueByKeyFloat('machine_width') / 2.0, -self.machine.getSettingValueByKeyFloat('machine_depth') / 2.0, 0.0)
glDisable(GL_CULL_FACE)
glDisable(GL_LIGHTING)
for obj in self.scene.getObjects():
for layer_nr in xrange(0, obj.getToolpathLayerCount()):
layer = obj.getToolpathLayer(layer_nr)
if layer is None:
continue
if not hasattr(layer, 'renderer'):
layer.renderer = ToolpathLayerRenderer(layer)
# layer.renderer.render(self)
gcode = self.scene.getResult().getGCode()
if gcode is not None:
if not hasattr(self.scene.getResult(), 'renderer'):
self.scene.getResult().renderer = GCodeRenderer(gcode)
self.scene.getResult().renderer.render(self)
glPopMatrix()
def focusRender(self):
pass
def showOuterWall(self, show):
self._show_outer_wall = show
def showInnerWall(self, show):
self._show_inner_wall = show
def showInfill(self, show):
self._show_infill = show
def showSupport(self, show):
self._show_support = show
def showMoves(self, show):
self._show_moves = show
def showRetraction(self, show):
self._show_retraction = show
def setTopShowLayerNr(self, nr):
self._top_layer_nr = nr
def setSingleLayer(self, is_single_layer):
self._is_single_layer = is_single_layer |
990,066 | 2914cd7e0283a7ce1c25ff1fffe6880d7b75a07c | t = int(input())
for T in range(t):
n = int(input())
l = []
for i in range(n):
s = input()
l.append(list(s))
ans = 0
for i in range(10):
count = 0
for j in range(n):
count += int(l[j][i])
if count % 2 == 1:
ans += 1
print(ans)
|
990,067 | f633f5ff62e78dfdab19ed173ac624fdd97da890 | import random
import re
import torch
from torch.nn import functional as F
from pytorch_transformers import GPT2Tokenizer, GPT2Model, GPT2LMHeadModel
torch.set_grad_enabled(False)
MODEL_PATH = './WoWQuestPytorch124M'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = GPT2Tokenizer.from_pretrained(MODEL_PATH)
model = GPT2LMHeadModel.from_pretrained(MODEL_PATH).eval()
model = model.to(device)
wow_class_list = ['Death Knight', 'Demon Hunter', 'Druid', 'Hunter', 'Mage', 'Monk', 'Paladin', 'Priest', 'Rogue', 'Shaman', 'Warrior', 'Warlock']
wow_race_list = ['Blood Elf', 'Human', 'Tauren', 'Orc', 'Kul Tiran', 'Void Elf', 'Troll', 'Vulpera', 'Night Elf', 'Zandalari Troll', 'Worgen', 'Undead', 'Goblin', 'Highmountain Tauren', 'Nightborne', 'Dwarf', 'Draenei', 'Gnome', 'Lightforged Draenei', 'Pandaren', 'Maghar Orc', 'Mechagnome', 'Dark Iron Dwarf']
wow_silly_name_list = ['Glitterstorm', 'Sunderwear', 'Arrowdynamic', 'Sapntap', 'Crossblesser', 'Praystation', 'Healium', 'Shocknorris', 'Alestrom', 'Harryportal', 'Merlìn', 'Wreckquiem', 'Owlcapone']
suggested_text_list = ['Greetings $r', '$c I need your help', 'Good to see you $n']
def parseGenderTokens(text):
regex = r"\$[gG]([^:]+):([^;]+);"
matches = re.finditer(regex, text, re.MULTILINE)
parsed_string = ""
prev_index = 0
group_num = 0
random_group = -1
for matchNum, match in enumerate(matches, start=1):
parsed_string += text[prev_index:match.start()]
if random_group == -1:
group_num = len(match.groups())
random_group = random.randint(1, group_num)
parsed_string += match.group(random_group)
prev_index = match.end(group_num) + 1
parsed_string += text[prev_index:]
return parsed_string
def parseSpecialCharacters(text, wow_class_item, wow_race_item, wow_silly_name_item):
parsedText = text.replace("$B", "\n").replace("$b", "\n").replace("$c", wow_class_item).replace("$C", wow_class_item).replace("$r", wow_race_item).replace("$R", wow_race_item).replace("$n", wow_silly_name_item).replace("$N", wow_silly_name_item)
return parseGenderTokens(parsedText)
def extend(text, size=20):
if len(text) == 0:
text = random.choice(suggested_text_list)
tokens = tokenizer.encode(text)
prediction, past = torch.tensor([tokens]).to(device), None
for i in range(size):
prediction, past = model(prediction, past=past)
prediction = torch.multinomial(F.softmax(prediction[:, -1], dim=1), 1)
tokens.append(prediction.item())
decoded_tokens = tokenizer.decode(tokens)
wow_class_item = random.choice(wow_class_list)
wow_race_item = random.choice(wow_race_list)
wow_silly_name_item = random.choice(wow_silly_name_list)
return parseSpecialCharacters(decoded_tokens, wow_class_item, wow_race_item, wow_silly_name_item)
if __name__ == "__main__":
random.seed(None)
#test_text = '$c, over here. Hello $n the $r I need your help'
#test_text = "Hello there $gGentelman:Lady;, how are you $gBoy:Girl;?"
test_text = 'I need your help'
extended = extend(test_text, 120)
print(extended)
|
990,068 | c8bd17ce9492aef044ec2b986c815f1a50a25c89 | class person:
def __init__(self,fname,lname):
self.firstname = fname
self.lastname = lname
def fullname(self):
print(self.firstname , self.lastname)
def subject(marks):
total_marks = marks*5
print(total_marks)
p1 = person("raj","kumar")
p1.fullname()
subject(100)
|
990,069 | 72099ff67d5a245634b3c1e0e6fb43b53ae19ac2 | # baselineTeam.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# baselineTeam.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from captureAgents import CaptureAgent
import distanceCalculator
import random, time, util, sys
from game import Directions
import game
from util import nearestPoint
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class ReflexCaptureAgent(CaptureAgent):
def __init__(self, index, epsillon=0.5, alpha=0.2, gamma=0.8, **args):
CaptureAgent.__init__(self, index)
self.epsillon = epsillon
self.alpha = alpha
self.discout = gamma
self.lastState = None
self.lastAction = None
self.targetPos = None
self.mazeSize = None
self.specificPath = []
def registerInitialState(self, gameState):
self.start = gameState.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gameState)
walls = gameState.getWalls()
self.mazeSize = walls.height * walls.width
def getSuccessor(self, gameState, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gameState.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if pos != nearestPoint(pos):
# Only half a grid position was covered
return successor.generateSuccessor(self.index, action)
else:
return successor
def doAction(self, gameState, action):
"""
update last state and action
"""
self.lastState = gameState
self.lastAction = action
def getQValue(self, gameState, action):
"""
Computes a linear combination of features and feature weights
"""
features = self.getFeatures(gameState, action)
return features * self.weights
def update(self, gameState, action, nextState, reward):
actions = nextState.getLegalActions(self.index)
actions.remove('Stop')
values = [self.getQValue(nextState, a) for a in actions]
maxValue = max(values)
features = self.getFeatures(gameState, action)
diff = (reward + self.discout * maxValue) - self.getQValue(gameState, action)
for feature in features:
self.weights[feature] += self.alpha * diff * features[feature]
def getGhosts(self, gameState):
enemies = [gameState.getAgentState(a) for a in self.getOpponents(gameState)]
ghosts = [a for a in enemies if not a.isPacman and a.getPosition()]
return ghosts
def getInvaders(self, gameState):
enemies = [gameState.getAgentState(a) for a in self.getOpponents(gameState)]
invaders = [a for a in enemies if a.isPacman and a.getPosition()]
return invaders
def getSafeActions(self, gameState, border=None):
if border == None:
border = self.border
safeActions = []
myPos = gameState.getAgentPosition(self.index)
actions = gameState.getLegalActions(self.index)
actions.remove('Stop')
for action in actions:
successor = self.getSuccessor(gameState, action)
myNextPos = successor.getAgentPosition(self.index)
finalNode = self.aStarSearch(successor, border, [myPos])
if finalNode[2] < self.mazeSize:
safeActions.append(action)
return safeActions
def getAlternativePath(self, gameState, minPathLength=5, penaltyDist=2, exploreRange=5):
walls = gameState.getWalls()
myPos = gameState.getAgentPosition(self.index)
ghosts = self.getGhosts(gameState)
foodList = self.getFood(gameState).asList()
capsuleList = self.getCapsules(gameState)
targetList = foodList + capsuleList
penaltyPos = []
for ghost in ghosts:
for x in range(max(1, myPos[0] - exploreRange), min(myPos[0] + exploreRange, walls.width)):
for y in range(max(1, myPos[1] - exploreRange), min(myPos[1] + exploreRange, walls.height)):
pos = (int(x), int(y))
if not pos in walls.asList():
distToGhost = self.getMazeDistance(pos, ghost.getPosition())
if distToGhost <= penaltyDist:
penaltyPos.append(pos)
if pos in targetList:
targetList.remove(pos)
if len(targetList) == 0:
return [], None
finalNode = self.aStarSearch(gameState, targetList, penaltyPos)
pathLength = min(minPathLength, len(finalNode[1]))
return finalNode[1][0:pathLength], finalNode[0]
def aStarSearch(self, gameState, goals, penaltyPos=[], avoidGhost=True):
walls = gameState.getWalls().asList()
ghosts = self.getGhosts(gameState)
actions = [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]
actionVectors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
startPos = gameState.getAgentPosition(self.index)
currentNode = (startPos, [], 0)
pQueue = util.PriorityQueueWithFunction(lambda item: item[2] + min(self.getMazeDistance(item[0], goal) for goal in goals))
pQueue.push(currentNode)
closed = set()
while currentNode[0] not in goals and not pQueue.isEmpty():
currentNode = pQueue.pop()
successors = [((currentNode[0][0] + v[0], currentNode[0][1] + v[1]), a) for v, a in zip(actionVectors ,actions)]
legalSuccessors = [s for s in successors if s[0] not in walls]
for successor in legalSuccessors:
if successor[0] not in closed:
closed.add(successor[0])
position = successor[0]
path = currentNode[1] + [successor[1]]
cost = currentNode[2] + 1
wallCount = 0
if successor[0] in penaltyPos:
cost += self.mazeSize
if avoidGhost:
distToGhost = min([self.getMazeDistance(successor[0], a.getPosition()) for a in ghosts])
if distToGhost > 0:
cost += (self.mazeSize / 4) / distToGhost
pQueue.push((position, path, cost))
return currentNode
def isOppoentsScared(self, gameState, timer=4):
myPos = gameState.getAgentPosition(self.index)
ghosts = self.getGhosts(gameState)
if len(ghosts) == 0:
return False
closestGhost = min(ghosts, key=lambda x: self.getMazeDistance(myPos, x.getPosition()))
return closestGhost.scaredTimer > timer
def isStucking(self, gameState, stuckingCount=4):
history = self.observationHistory
count = 0
myPos = gameState.getAgentPosition(self.index)
if len(history) > 0:
for i in range(min(10, len(history))):
myPastPos = history[-i - 1].getAgentPosition(self.index)
if myPastPos == myPos:
count += 1
return count >= stuckingCount
def isChased(self, gameState, chasedCount=3, minDist=3):
history = self.observationHistory
myState = gameState.getAgentState(self.index)
ghosts = self.getGhosts(gameState)
if len(history) == 0 or len(ghosts) == 0 or not myState.isPacman:
return False
myPos = myState.getPosition()
distToGhost = min([self.getMazeDistance(myPos, a.getPosition()) for a in ghosts])
if distToGhost > minDist:
return False
for i in range(min(chasedCount, len(history))):
pastState = history[-i - 1]
myPastPos = pastState.getAgentPosition(self.index)
pastGhosts = self.getGhosts(pastState)
if len(pastGhosts) == 0:
return False
pastDistToGhost = min([self.getMazeDistance(myPastPos, a.getPosition()) for a in pastGhosts])
if pastDistToGhost != distToGhost:
return False
return True
class OffensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that seeks food. This is an agent
we give you to get an idea of what an offensive agent might look like,
but it is by no means the best or only way to build an offensive agent.
"""
def __init__(self, index, **args):
ReflexCaptureAgent.__init__(self, index, **args)
self.weights = util.Counter({
'bias': 1.0,
'distToTarget': -10.0,
'distToGhost': 5.0,
'distToBorder': -1.0,
'eatFood': 1.0,
'#-of-ghosts-2-step-away': -1,
})
self.border = None
def registerInitialState(self, gameState):
ReflexCaptureAgent.registerInitialState(self, gameState)
walls = gameState.getWalls()
border = []
x = walls.width // 2
if self.red:
x -= 1
for y in range(1, walls.height - 1):
if not walls[x][y] and (x, y) != self.start:
border.append((x, y))
self.border = border
myPos = gameState.getAgentPosition(self.index)
foodList = self.getFood(gameState).asList()
if len(foodList) > 0:
self.targetPos = max(foodList, key=lambda x: self.getMazeDistance(myPos, x))
def observationFunction(self, gameState):
"""
This is where we ended up after our last action.
The simulation should somehow ensure this is called
"""
if self.lastState:
myPos = gameState.getAgentPosition(self.index)
if myPos == self.start:
self.specificPath = []
foodList = self.getFood(gameState).asList()
if len(foodList) > 0:
self.targetPos = min(foodList, key=lambda x: self.getMazeDistance(myPos, x))
if len(self.specificPath) == 0:
reward = self.getReward(gameState)
self.update(self.lastState, self.lastAction, gameState, reward)
return gameState
def getFeatures(self, gameState, action):
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
successor = self.getSuccessor(gameState, action)
myNextState = successor.getAgentState(self.index)
myNextPos = myNextState.getPosition()
foodList = self.getFood(successor).asList()
capsuleList = self.getCapsules(successor)
ghosts = self.getGhosts(successor)
closestBorder = min(self.border, key=lambda x: self.getMazeDistance(myNextPos, x))
minDistToBorder = min([self.getMazeDistance(myNextPos, b) for b in self.border])
# Update target position
timeLeft = gameState.data.timeleft / gameState.getNumAgents()
if timeLeft - minDistToBorder <= 10:
self.targetPos = closestBorder
if len(foodList) <= 2:
self.targetPos = closestBorder
if len(foodList) > 2:
if myNextPos == self.targetPos or myNextPos in foodList:
self.targetPos = min(foodList, key=lambda x: self.getMazeDistance(myNextPos, x))
if len(ghosts) > 0 and len(foodList) > 2:
minDistToFood = min([self.getMazeDistance(myNextPos, f) for f in foodList])
minDistToGhost = min([self.getMazeDistance(myNextPos, a.getPosition()) for a in ghosts])
if not self.isOppoentsScared(successor):
if self.isChased(gameState):
self.targetPos = closestBorder
if myState.numCarrying >= 3 and minDistToBorder < minDistToFood:
self.targetPos = closestBorder
if myState.numCarrying >= 5 and minDistToGhost <= 5:
self.targetPos = closestBorder
if len(capsuleList) > 0:
minDistToCapsule = min([self.getMazeDistance(myNextPos, c) for c in capsuleList])
if self.isChased(successor) and minDistToCapsule < minDistToBorder:
self.targetPos = min(capsuleList, key=lambda x: self.getMazeDistance(myNextPos, x))
# Calculate features
distToGhost = 0.0
if len(ghosts) > 0:
distToGhost = min([self.getMazeDistance(myNextPos, a.getPosition()) for a in ghosts])
if not self.isOppoentsScared(successor) and myState.isPacman and myNextPos == self.start:
distToGhost = -999999
capsuleList = self.getCapsules(gameState)
distToCapsule = 0.0
if len(capsuleList) > 0:
distToCapsule = min([self.getMazeDistance(myNextPos, capsule) for capsule in capsuleList])
features = util.Counter()
features['bias'] = 1.0
features['distToTarget'] = self.getMazeDistance(myNextPos, self.targetPos) / self.mazeSize
features['distToGhost'] = distToGhost / self.mazeSize
features['distToCapsule'] = distToCapsule / self.mazeSize
features['#-of-ghosts-2-step-away'] = len([ghost for ghost in ghosts if self.getMazeDistance(myNextPos, ghost.getPosition()) <= 2])
if self.isOppoentsScared(successor):
features['distToGhost'] = 0.0
features['#-of-ghosts-2-step-away'] = 0.0
foodList = self.getFood(gameState).asList()
if not features['#-of-ghosts-2-step-away'] and myNextPos in foodList:
features['eatFood'] = 1.0
if myState.numCarrying > 0:
features['distToBorder'] = min([self.getMazeDistance(myPos, b) for b in self.border]) / (self.mazeSize / 4)
return features
def chooseAction(self, gameState):
"""
Picks among the actions with the highest Q(s,a).
"""
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
ghosts = self.getGhosts(gameState)
if self.lastState:
myLastState = self.lastState.getAgentState(self.index)
ghosts = self.getGhosts(gameState)
if len(ghosts) > 0:
minDistToGhost = min([self.getMazeDistance(myPos, a.getPosition()) for a in ghosts])
if not myState.isPacman and myLastState.isPacman and minDistToGhost <= 3 and self.specificPath == []:
path, target = self.getAlternativePath(gameState, minPathLength=5)
self.specificPath = path
self.targetPos = target
if len(self.specificPath) > 0:
return self.specificPath.pop(0)
elif self.isStucking(gameState):
actions, target = self.getAlternativePath(gameState, minPathLength=5)
if len(actions) > 0:
self.specificPath = actions
self.targetPos = target
return self.specificPath.pop(0)
else:
actions = gameState.getLegalActions(self.index)
return random.choice(actions)
actions = gameState.getLegalActions(self.index)
actions.remove('Stop')
if len(ghosts) > 0:
distToGhost = min([self.getMazeDistance(myPos, a.getPosition()) for a in ghosts])
if not self.isOppoentsScared(gameState) and myState.isPacman and distToGhost <= 6:
safeActions = self.getSafeActions(gameState)
if len(safeActions) > 0:
actions = safeActions
values = [self.getQValue(gameState, a) for a in actions]
maxValue = max(values)
bestActions = [a for a, v in zip(actions, values) if v == maxValue]
bestAction = random.choice(bestActions)
self.doAction(gameState, bestAction)
return bestAction
def getReward(self, gameState):
reward = 0
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
myLastPos = self.lastState.getAgentPosition(self.index)
foodList = self.getFood(self.lastState).asList()
capsuleList = self.getCapsules(self.lastState)
if myPos != self.targetPos:
reward -= 1
else:
if myPos in foodList:
reward += 1
elif myPos in capsuleList:
reward += 2
else:
reward += self.getScore(gameState) - self.getScore(self.lastState)
distToPrevPos = self.getMazeDistance(myPos, myLastPos)
if distToPrevPos > 1:
reward -= distToPrevPos / self.mazeSize
return reward
class DefensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that keeps its side Pacman-free. Again,
this is to give you an idea of what a defensive agent
could be like. It is not the best or only way to make
such an agent.
"""
def __init__(self, index, **args):
ReflexCaptureAgent.__init__(self, index, **args)
self.weights = util.Counter({
'bias': 1.0,
'distToTarget': -10.0,
'distToInvader': -1.0,
'numOfInvaders': -1.0,
'distToMissingFood': -1.0,
'scaredScore': 1.0,
'onDefense': 20.0,
})
self.initialFoodList = None
self.border = None
self.defenceBorder = None
self.deepBorder = None
self.opponentPacman = []
def registerInitialState(self, gameState):
ReflexCaptureAgent.registerInitialState(self, gameState)
self.initialFoodList = self.getFoodYouAreDefending(gameState).asList()
walls = gameState.getWalls()
self.mazeSize = walls.height * walls.width
border = []
x = walls.width // 2
if self.red:
x -= 1
for y in range(1, walls.height - 1):
if not walls[x][y] and (x, y) != self.start:
border.append((x, y))
self.border = border
defenceBorder = []
x = walls.width // 2
if self.red:
x -= 3
else:
x += 2
for y in range(1, walls.height - 1):
if not walls[x][y] and (x, y) != self.start:
defenceBorder.append((x, y))
self.defenceBorder = defenceBorder
distCounter = util.Counter()
for b in self.defenceBorder:
dist = 0
for food in self.getFoodYouAreDefending(gameState).asList():
dist += self.getMazeDistance(b, food)
distCounter[b] = dist
self.targetPos = min(distCounter, key=distCounter.get)
deepBorder = []
x = walls.width // 2
if self.red:
x -= 5
else:
x += 4
for y in range(1, walls.height - 1):
if not walls[x][y] and (x, y) != self.start:
deepBorder.append((x, y))
self.deepBorder = deepBorder
distCounter = util.Counter()
for b in self.deepBorder:
dist = 0
for food in self.getFoodYouAreDefending(gameState).asList():
dist += self.getMazeDistance(b, food)
distCounter[b] = dist
def observationFunction(self, gameState):
"""
This is where we ended up after our last action.
The simulation should somehow ensure this is called
"""
if self.lastState:
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
invaders = self.getInvaders(gameState)
if not myState.isPacman:
self.specificPath = []
if myPos == self.start:
self.specificPath = []
foodList = self.getFood(gameState).asList()
if len(invaders) > 0:
self.targetPos = min(invaders, key=lambda x: self.getMazeDistance(myPos, x.getPosition())).getPosition()
else:
distCounter = util.Counter()
for b in self.defenceBorder:
dist = 0
for food in self.getFoodYouAreDefending(gameState).asList():
dist += self.getMazeDistance(b, food)
distCounter[b] = dist
self.targetPos = min(distCounter, key=distCounter.get)
if len(self.specificPath) == 0:
reward = self.getReward(gameState)
self.update(self.lastState, self.lastAction, gameState, reward)
return gameState
def getFeatures(self, gameState, action):
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
successor = self.getSuccessor(gameState, action)
myNextState = successor.getAgentState(self.index)
myNextPos = myNextState.getPosition()
foodList = self.getFoodYouAreDefending(successor).asList()
missingFoodList = self.getMissingFoods(successor)
invaders = self.getInvaders(successor)
ghosts = self.getGhosts(successor)
# Update target position
self.findOpponentPacman()
if len(invaders) > 0:
minDistToInvader = min([self.getMazeDistance(myPos, a.getPosition()) for a in invaders])
if minDistToInvader >= 8 and len(missingFoodList) > 0:
minDist = float('inf')
closestFoodFromMissing = None
for missingFood in missingFoodList:
distFromMissingFood = min([self.getMazeDistance(missingFood, f) for f in foodList])
if distFromMissingFood < minDist:
closestFoodFromMissing = missingFood
self.targetPos = closestFoodFromMissing
else:
self.targetPos = min(invaders, key=lambda x: self.getMazeDistance(myPos, x.getPosition())).getPosition()
else:
if len(self.opponentPacman) > 0:
distCounter = util.Counter()
for b in self.defenceBorder:
dist = 0
for p in self.opponentPacman:
distCounter[(b, p)] = self.getMazeDistance(b, gameState.getAgentPosition(p))
self.targetPos = min(distCounter, key=distCounter.get)[0]
else:
distCounter = util.Counter()
for b in self.defenceBorder:
dist = 0
for food in self.getFoodYouAreDefending(gameState).asList():
dist += self.getMazeDistance(b, food)
distCounter[b] = dist
self.targetPos = min(distCounter, key=distCounter.get)
if myNextState.scaredTimer > 0:
if len(self.opponentPacman) > 0:
agent = min(self.opponentPacman, key=lambda x: self.getMazeDistance(myPos, successor.getAgentPosition(x)))
if not successor.getAgentState(agent).isPacman:
distCounter = util.Counter()
for b in self.deepBorder:
dist = 0
for p in self.opponentPacman:
distCounter[(b, p)] = self.getMazeDistance(b, gameState.getAgentPosition(p))
self.targetPos = min(distCounter, key=distCounter.get)[0]
# Calculate features
distToInvader = 0.0
if len(invaders) > 0:
distToInvader = min([self.getMazeDistance(myNextPos, a.getPosition()) for a in invaders])
features = util.Counter()
features['bias'] = 1.0
features['numOfInvaders'] = len(invaders) / 2.0
features['distToTarget'] = self.getMazeDistance(myNextPos, self.targetPos) / self.mazeSize
if myNextState.scaredTimer > 0:
features['scaredScore'] = (distToInvader - myNextState.scaredTimer) / self.mazeSize
if len(invaders) > 0:
distToInvader = min([self.getMazeDistance(myNextPos, a.getPosition()) for a in invaders])
if distToInvader <= 1:
features['distToInvader'] = 99999
else:
features['scaredScore'] = 0.0
features['distToInvader'] = distToInvader / self.mazeSize
if not myNextState.isPacman:
features['onDefense'] = 1.0
else:
features['onDefense'] = -1.0
return features
def chooseAction(self, gameState):
"""
Picks among the actions with the highest Q(s,a).
"""
actions = gameState.getLegalActions(self.index)
actions.remove('Stop')
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
invaders = self.getInvaders(gameState)
if myState.scaredTimer > 0 and len(invaders) > 0:
distToGhost = min([self.getMazeDistance(myPos, a.getPosition()) for a in invaders])
if not myState.isPacman:
safeActions = self.getSafeActions(gameState, self.defenceBorder)
if len(safeActions) > 0:
actions = safeActions
values = [self.getQValue(gameState, a) for a in actions]
maxValue = max(values)
bestActions = [a for a, v in zip(actions, values) if v == maxValue]
bestAction = random.choice(bestActions)
self.doAction(gameState, bestAction)
return bestAction
def getReward(self, gameState):
reward = 0
myState = gameState.getAgentState(self.index)
myPos = myState.getPosition()
myLastState = self.lastState.getAgentState(self.index)
myLastPos = myLastState.getPosition()
foodList = self.getFoodYouAreDefending(gameState).asList()
lastFoodList = self.getFoodYouAreDefending(self.lastState).asList()
capsuleList = self.getCapsulesYouAreDefending(gameState)
lastCapsuleList = self.getCapsulesYouAreDefending(self.lastState)
if myPos != self.targetPos:
reward -= 1
else:
if len(foodList) < len(lastFoodList):
reward -= 1
elif len(capsuleList) < len(lastCapsuleList):
reward -= 2
else:
reward += self.getScore(gameState) - self.getScore(self.lastState)
distToPrevPos = self.getMazeDistance(myPos, myLastPos)
if distToPrevPos > 1:
reward -= distToPrevPos / self.mazeSize
return reward
def getMissingFoods(self, gameState, exploreRange=5):
history = self.observationHistory
for i in range(1, min(exploreRange, len(history))):
foodList = self.getFoodYouAreDefending(history[-i]).asList()
lastFoodLIst = self.getFoodYouAreDefending(history[-i - 1]).asList()
missingList = [f for f in lastFoodLIst if f not in foodList]
if len(missingList) > 0:
return missingList
else:
return []
def findOpponentPacman(self):
opponentIndex = []
if self.red:
opponentIndex = [1, 3]
else:
opponentIndex = [0, 2]
pacmanIndex = []
history = self.observationHistory
for index in opponentIndex:
count = 0
historyCount = 0
for j in range(len(history)):
pastState = history[-j - 1]
historyCount += 1
if pastState.getAgentState(index).isPacman:
count += 1
if count >= 5:
pacmanIndex.append(index)
break
else:
count = 0
self.opponentPacman = pacmanIndex |
990,070 | 495b3a6631d5bb729097ce44fbb34da798131f66 | def color(name,colour):
if type(name) is not str:
raise TypeError("only strings allowed")
print(f"{name} likes {colour}")
color("alley","green")
color(21,"red") |
990,071 | 520335e224f55d367329f5b6cfd6ca4c938ead74 | # Generated by Django 2.0.7 on 2018-10-14 06:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appDatas', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='pill',
old_name='qrcode',
new_name='QR_AI01',
),
]
|
990,072 | 97b89627f52004d09e651e733aa7ef04c4c2508b | import ROOT as r
f = r.TFile("tree_cycles_hist.root", "recreate")
t = r.TTree("Events", "")
obj = r.vector("float")()
t.Branch("Jet_pt", obj)
rows = [[], [27.324586868286133, 24.88954734802246, 20.853023529052734], [], [20.330659866333008], [], []]
for i,row in enumerate(rows):
obj.clear()
for x in row:
obj.push_back(x)
t.Fill()
if i == 3:
t.Write()
th1f = r.TH1F("myTH1F", "", 2, -2, 2)
th1d = r.TH1D("myTH1D", "", 2, -2, 2)
th2f = r.TH2F("myTH2F", "", 2, -2, 2, 4, -2, 2)
th2d = r.TH2D("myTH2D", "", 2, -2, 2, 4, -2, 2)
for x,y,w in [
[-1.5, -1.5, 20.0],
[+1.5, +1.5, 1.0],
[-1.5, +1.5, 20.0],
[+1.5, -1.5, 1.0],
]:
th1f.Fill(x, w)
th1d.Fill(x, w)
th2f.Fill(x, y, w)
th2d.Fill(x, y, w)
f.Write()
f.Close()
|
990,073 | 6aa29e8b5b8fa0c8dbb289ac016b587e114e1db3 | import numpy as np
from deepthought.experiments.encoding.experiment_templates.base import NestedCVExperimentTemplate
class End2EndBaseline(NestedCVExperimentTemplate):
def __init__(self,
job_id,
hdf5name,
fold_generator,
pipeline_factory,
**kwargs):
self.pipeline_factory = pipeline_factory
super(End2EndBaseline, self).__init__(job_id, hdf5name, fold_generator, **kwargs)
def pretrain_encoder(self, *args, **kwargs):
def dummy_encoder_fn(indices):
if type(indices) == np.ndarray:
indices = indices.tolist() # ndarray is not supported as indices
# read the chunk of data for the given indices
state = self.full_hdf5.open()
data = self.full_hdf5.get_data(request=indices, state=state)
self.full_hdf5.close(state)
# get only the features source
source_idx = self.full_hdf5.sources.index('features')
data = np.ascontiguousarray(data[source_idx])
return data
return dummy_encoder_fn
def run(self, verbose=False):
from deepthought.experiments.encoding.classifiers.simple_nn import SimpleNNClassifierFactory
cls_factory = SimpleNNClassifierFactory(self.pipeline_factory)
super(End2EndBaseline, self).run(classifiers=(('mlp', cls_factory),), verbose=verbose)
|
990,074 | 6e4519d9a662f384e59f8f8f995343076712d88d | def perfectnum(num):
sum= 0
for i in range(1, num):
if (num % i == 0):
sum += i
if (num == sum):
return True
perfectnumbers = []
for i in range(1, 1000):
if (perfectnum(i)):
perfectnumbers.append(i) # listeye ekledik.
print("Perfect Numbers Between 1 and 1000 are\n", perfectnumbers) |
990,075 | 7960f5cad98fab18612a8ddb103386fedc8821f2 | n = int(input("Digite um numero: "))
resto = n % 3
if resto > 0:
print(n)
else:
print("Fizz") |
990,076 | 339da4279f815e45db89bd6b115ed4d583fd09a5 | from django.db import models
# Create your models here.
class Manufacturer(models.Model):
name = models.CharField(max_length=50)
date_added = models.DateField()
def __str__(self):
return f'{self.name} added on {self.date_added}'
|
990,077 | d5ce9046b71d0cbe40a77b1b22c60339c8d10f9a | try:
from unittest import mock
except ImportError:
import mock
import betamax
from betamax.decorator import use_cassette
@mock.patch('betamax.recorder.Betamax', autospec=True)
def test_wraps_session(Betamax):
# This needs to be a magic mock so it will mock __exit__
recorder = mock.MagicMock(spec=betamax.Betamax)
recorder.use_cassette.return_value = recorder
Betamax.return_value = recorder
@use_cassette('foo', cassette_library_dir='fizbarbogus')
def _test(session):
pass
_test()
Betamax.assert_called_once_with(
session=mock.ANY,
cassette_library_dir='fizbarbogus',
default_cassette_options={}
)
recorder.use_cassette.assert_called_once_with('foo')
@mock.patch('betamax.recorder.Betamax', autospec=True)
@mock.patch('requests.Session')
def test_creates_a_new_session(Session, Betamax):
@use_cassette('foo', cassette_library_dir='dir')
def _test(session):
pass
_test()
assert Session.call_count == 1
|
990,078 | dda2dbd589a326af41368501ff58a2a68c2b01e9 |
#from flask import Flask
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, case
import datetime as dt
from sqlalchemy.pool import NullPool
from flask import Flask, jsonify, render_template, abort, request, send_from_directory, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
import sqlite3
import pymysql
pymysql.install_as_MySQLdb()
# engine = create_engine("sqlite:///Resources/hawaii.sqlite",
# poolclass=NullPool)
engine = create_engine("sqlite:///data/new_olympics.db")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
Olympian = Base.classes.olympics_raw
# Save references to each table
# Create our session (link) from Python to the DB
session = Session(engine)
###############################################
######## TESTING TABLE EXISTENCE ##############
###############################################
conn = sqlite3.connect('data/new_olympics.db')
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
print(cursor.fetchall())
################################################
################## WORKS ######################
################################################
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data/new_olympics.db'
db = SQLAlchemy(app)
from models import *
@app.route('/map')
def hello_world():
return (
render_template("map.html")
)
@app.route('/')
def home():
return(
render_template('home.html')
)
@app.route('/country/')
def land():
print('in a country')
return redirect(url_for('countryPlot',NOC ='SUI'))
@app.route('/data/<csv>')
def data(csv):
return send_from_directory('data/',csv, as_attachment=True)
@app.route('/mapData')
def map_data():
subq = (db.session.query(raw.country_id,raw.Medal,func.count(raw.id)).group_by(raw.country_id,raw.Medal)).subquery()
results=db.session.query(country_ref.country_name, country_ref.code, subq).join(subq).all()
country_dic ={}
for result in results:
if result[2] in country_dic:
country_dic[result[2]]['medals'][result[3]] = result[4]
else:
country ={}
country['name'] = result[0]
country['code'] = result[1]
country['medals'] = {}
country['medals'][result[3]] = result[4]
country_dic[result[2]] = country
return(
jsonify(country_dic)
)
@app.route('/countryData/<id>')
def countryData(id):
goldCase = case([(raw.Medal == 'Gold', 1)], else_=0)
count = db.session.query(raw.Edition, raw.Gender, raw.Sport,func.count(raw.id).label('totMed'),func.sum(goldCase).label('goldMed')).group_by(raw.Edition,raw.Gender,raw.Sport).filter(raw.country_id==id).all()
editions =[]
for result in count:
edition ={}
edition['Edition'] = result.Edition
edition['Gender'] = result.Gender
edition['Sport'] = result.Sport
edition['Total_Medals'] = result.totMed
edition['Medal_Gold'] = result.goldMed
editions.append(edition)
return jsonify(editions)
@app.route('/country/<NOC>')
def countryPlot(NOC):
country_data = db.session.query(country_ref.id, country_ref.code, country_ref.country_name,country_ref.flag_image).filter(country_ref.code== NOC).all()
print(country_data)
word_cloud = db.session.query(raw.Sport,func.count(raw.id).label('nMed')).group_by(raw.Sport).filter(raw.country_id== country_data[0].id).all()
words = []
for sport in word_cloud:
sp ={}
sp['word'] = sport.Sport
sp['size'] = sport.nMed
words.append(sp)
print(words)
return render_template('country.html', data= country_data,words = words)
@app.route("/api/v1.0/olympians", methods=['GET'])
def names():
"""Return a list of all olympian data"""
df = pd.read_sql_query(f"SELECT * FROM olympics_raw", con = engine)
print(df.head())
# return jsonify(all_olympians)
return jsonify(df.to_dict(orient='records'))
@app.route('/api/v1.0/olympians/params/', methods=['GET'])
# example : http://127.0.0.1:5000/api/v1.0/olympians/params?Edition=2000&Sport=Aquatics
def get_parameters():
params = request.args.to_dict()
def parameters(params):
possible_params = ["City", "Edition", "Sport", "Discipline", "Athlete", "NOC", "Gender", "Event", "Event_gender", "Medal"]
param_keys = [p.capitalize() for p in list(params.keys())]
fin_list = [key for key in param_keys if key in possible_params]
return fin_list
# testing return
# return jsonify(params)
param_keys = parameters(params)
print(param_keys)
where_clause = ' AND '.join([f"{x} = '{params[x].capitalize()}'" for x in param_keys])
print("----------------------------------------------------------------------")
print(where_clause)
df = pd.read_sql_query(f"SELECT * FROM olympics_raw WHERE {where_clause}", con = engine)
print(df.head())
# return jsonify(all_olympians)
return jsonify(df.to_dict(orient='records'))
if __name__ == '__main__':
app.run(debug=True)
|
990,079 | db54ea5a922913df0aeb5b5e3bfcdaa556a5632d | from pathlib import Path
from unittest import IsolatedAsyncioTestCase
import os
import shutil
import tempfile
from unittest.mock import AsyncMock, MagicMock, patch, ANY
from pyartcd.pipelines.promote import PromotePipeline
from doozerlib.assembly import AssemblyTypes
class TestPromotePipeline(IsolatedAsyncioTestCase):
FAKE_DEST_MANIFEST_LIST = {
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-dest-multi-amd64",
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-dest-multi-ppc64le",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-dest-multi-s390x",
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-dest-multi-aarch64",
"platform": {
"architecture": "arm64",
"os": "linux"
}
}
]
}
FAKE_SOURCE_MANIFEST_LIST = {
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-amd64",
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-ppc64le",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-s390x",
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-arm64",
"platform": {
"architecture": "arm64",
"os": "linux"
}
}
]
}
def setUp(self) -> None:
os.environ.update({
"GITHUB_TOKEN": "fake-github-token",
"JIRA_TOKEN": "fake-jira-token",
"QUAY_PASSWORD": "fake-quay-password",
"SIGNING_CERT": "/path/to/signing.crt",
"SIGNING_KEY": "/path/to/signing.key",
"REDIS_SERVER_PASSWORD": "fake-redis-server-password",
"REDIS_HOST": "fake-redis-host",
"REDIS_PORT": "12345",
"JENKINS_SERVICE_ACCOUNT": "fake-jenkins-service-account",
"JENKINS_SERVICE_ACCOUNT_TOKEN": "fake-jenkins-service-account-token",
"AWS_ACCESS_KEY_ID": "fake-aws-access-key-id",
"AWS_SECRET_ACCESS_KEY": "fake-aws-crecret-access-key",
})
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.util.load_releases_config", return_value={})
@patch("pyartcd.pipelines.promote.util.load_group_config", return_value=dict(arches=["x86_64", "s390x"]))
async def test_run_without_explicit_assembly_definition(
self, load_group_config: AsyncMock, load_releases_config: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
with self.assertRaisesRegex(ValueError, "must be explicitly defined"):
await pipeline.run()
load_group_config.assert_awaited_once_with("openshift-4.10", "4.10.99", env=ANY)
load_releases_config.assert_awaited_once_with(
group='openshift-4.10', data_path='https://example.com/ocp-build-data.git')
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.util.load_releases_config", return_value={
"releases": {"stream": {"assembly": {"type": "stream"}}}
})
@patch("pyartcd.pipelines.promote.util.load_group_config", return_value=dict(arches=["x86_64", "s390x"]))
async def test_run_with_stream_assembly(self, load_group_config: AsyncMock, load_releases_config: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="stream", signing_env="prod")
with self.assertRaisesRegex(ValueError, "not supported"):
await pipeline.run()
load_group_config.assert_awaited_once_with("openshift-4.10", "stream", env=ANY)
load_releases_config.assert_awaited_once_with(group='openshift-4.10',
data_path='https://example.com/ocp-build-data.git')
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.util.load_releases_config", return_value={
"releases": {"art0001": {"assembly": {"type": "custom", "basis": {}}}}
})
@patch("pyartcd.pipelines.promote.util.load_group_config", return_value=dict(arches=["x86_64", "s390x"]))
async def test_run_with_custom_assembly_and_missing_release_offset(
self, load_group_config: AsyncMock, load_releases_config: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False,
new_slack_client=MagicMock(return_value=AsyncMock())
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="art0001", signing_env="prod")
with self.assertRaisesRegex(ValueError, "patch_version is not set"):
await pipeline.run()
load_group_config.assert_awaited_once_with("openshift-4.10", "art0001", env=ANY)
load_releases_config.assert_awaited_once_with(group='openshift-4.10',
data_path='https://example.com/ocp-build-data.git')
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.build_release_image", return_value=None)
@patch("pyartcd.pipelines.promote.get_release_image_info", side_effect=lambda pullspec, raise_if_not_found=False: {
"image": pullspec,
"digest": f"fake:deadbeef-{pullspec}",
"metadata": {
"version": "4.10.99-assembly.art0001",
},
"references": {
"spec": {
"tags": [
{
"name": "machine-os-content",
"annotations": {"io.openshift.build.versions": "machine-os=00.00.212301010000-0"}
}
]
}
}
} if raise_if_not_found else None)
@patch("pyartcd.pipelines.promote.util.load_releases_config", return_value={
"releases": {"art0001": {"assembly": {"type": "custom", "basis": {"patch_version": 99}}}}
})
@patch("pyartcd.pipelines.promote.util.load_group_config", return_value=dict(arches=["x86_64", "s390x"]))
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream")
async def test_run_with_custom_assembly(self, get_image_stream: AsyncMock, load_group_config: AsyncMock,
load_releases_config: AsyncMock, get_release_image_info: AsyncMock,
build_release_image: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
runtime.new_slack_client.return_value = AsyncMock()
runtime.new_slack_client.return_value.say.return_value = {'message': {'ts': ''}}
runtime.new_slack_client.return_value.bind_channel = MagicMock()
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="art0001",
skip_attached_bug_check=True, skip_mirror_binaries=True, signing_env="prod")
await pipeline.run()
load_group_config.assert_awaited_once_with("openshift-4.10", "art0001", env=ANY)
load_releases_config.assert_awaited_once_with(group='openshift-4.10',
data_path='https://example.com/ocp-build-data.git')
get_release_image_info.assert_any_await(
"quay.io/openshift-release-dev/ocp-release:4.10.99-assembly.art0001-x86_64", raise_if_not_found=ANY)
get_release_image_info.assert_any_await(
"quay.io/openshift-release-dev/ocp-release:4.10.99-assembly.art0001-s390x", raise_if_not_found=ANY)
build_release_image.assert_any_await(
"4.10.99-assembly.art0001", "x86_64", [], {},
"quay.io/openshift-release-dev/ocp-release:4.10.99-assembly.art0001-x86_64", None,
'4.10-art-assembly-art0001', keep_manifest_list=False)
build_release_image.assert_any_await(
"4.10.99-assembly.art0001", "s390x", [], {},
"quay.io/openshift-release-dev/ocp-release:4.10.99-assembly.art0001-s390x", None,
'4.10-art-assembly-art0001-s390x', keep_manifest_list=False)
pipeline._slack_client.bind_channel.assert_called_once_with("4.10.99-assembly.art0001")
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.util.load_releases_config", return_value={
"releases": {"4.10.99": {"assembly": {"type": "standard"}}}
})
@patch("pyartcd.pipelines.promote.util.load_group_config", return_value=dict(arches=["x86_64", "s390x"]))
async def test_run_with_standard_assembly_without_upgrade_edges(self, load_group_config: AsyncMock,
load_releases_config: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
runtime.new_slack_client.return_value = AsyncMock()
runtime.new_slack_client.return_value.say.return_value = {'message': {'ts': ''}}
runtime.new_slack_client.return_value.bind_channel = MagicMock()
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
with self.assertRaisesRegex(ValueError, "missing the required `upgrades` field"):
await pipeline.run()
load_group_config.assert_awaited_once_with("openshift-4.10", "4.10.99", env=ANY)
load_releases_config.assert_awaited_once_with(group='openshift-4.10', data_path='https://example.com/ocp-build-data.git')
@patch("pyartcd.pipelines.promote.PromotePipeline.sign_artifacts")
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.build_release_image", return_value=None)
@patch("pyartcd.pipelines.promote.get_release_image_info", side_effect=lambda pullspec, raise_if_not_found=False: {
"image": pullspec,
"digest": f"fake:deadbeef-{pullspec}",
"metadata": {
"version": "4.10.99",
},
"references": {
"spec": {
"tags": [
{
"name": "machine-os-content",
"annotations": {"io.openshift.build.versions": "machine-os=00.00.212301010000-0"}
}
]
}
}
} if raise_if_not_found else None)
@patch("pyartcd.pipelines.promote.util.load_releases_config", return_value={
"releases": {"4.10.99": {"assembly": {"type": "standard", "basis": {"reference_releases": {
"x86_64": "nightly-x86_64",
"s390x": "nightly-s390x",
"ppc64le": "nightly-ppc64le",
"aarch64": "nightly-aarch64",
}}}}}
})
@patch("pyartcd.pipelines.promote.util.load_group_config", return_value={
"upgrades": "4.10.98,4.9.99",
"advisories": {"rpm": 1, "image": 2, "extras": 3, "metadata": 4},
"description": "whatever",
"arches": ["x86_64", "s390x", "ppc64le", "aarch64"],
})
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream")
async def test_run_with_standard_assembly(self, get_image_stream: AsyncMock, load_group_config: AsyncMock,
load_releases_config: AsyncMock, get_release_image_info: AsyncMock,
build_release_image: AsyncMock, _, __):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
runtime.new_slack_client.return_value = AsyncMock()
runtime.new_slack_client.return_value.say.return_value = {'message': {'ts': ''}}
runtime.new_slack_client.return_value.bind_channel = MagicMock()
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99",
skip_mirror_binaries=True, signing_env="prod")
pipeline.check_blocker_bugs = AsyncMock()
pipeline.change_advisory_state = AsyncMock()
pipeline.get_advisory_info = AsyncMock(return_value={
"id": 2,
"errata_id": 2222,
"fulladvisory": "RHBA-2099:2222-02",
"status": "QE",
})
pipeline.verify_attached_bugs = AsyncMock(return_value=None)
pipeline.get_image_stream_tag = AsyncMock(return_value=None)
pipeline.tag_release = AsyncMock(return_value=None)
pipeline.wait_for_stable = AsyncMock(return_value=None)
pipeline.send_image_list_email = AsyncMock()
pipeline.is_accepted = AsyncMock(return_value=False)
await pipeline.run()
load_group_config.assert_awaited_once_with("openshift-4.10", "4.10.99", env=ANY)
load_releases_config.assert_awaited_once_with(group='openshift-4.10', data_path='https://example.com/ocp-build-data.git')
pipeline.check_blocker_bugs.assert_awaited_once_with()
for advisory in [1, 2, 3, 4]:
pipeline.change_advisory_state.assert_any_await(advisory, "QE")
pipeline.get_advisory_info.assert_awaited_once_with(2)
pipeline.verify_attached_bugs.assert_awaited_once_with([1, 2, 3, 4], no_verify_blocking_bugs=False)
get_release_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64", raise_if_not_found=ANY)
get_release_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-s390x", raise_if_not_found=ANY)
build_release_image.assert_any_await("4.10.99", "x86_64", ["4.10.98", "4.9.99"], {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}, "quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64", "registry.ci.openshift.org/ocp/release:nightly-x86_64", None, keep_manifest_list=False)
build_release_image.assert_any_await("4.10.99", "s390x", ["4.10.98", "4.9.99"], {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}, "quay.io/openshift-release-dev/ocp-release:4.10.99-s390x", "registry.ci.openshift.org/ocp-s390x/release-s390x:nightly-s390x", None, keep_manifest_list=False)
build_release_image.assert_any_await("4.10.99", "ppc64le", ["4.10.98", "4.9.99"], {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}, "quay.io/openshift-release-dev/ocp-release:4.10.99-ppc64le", "registry.ci.openshift.org/ocp-ppc64le/release-ppc64le:nightly-ppc64le", None, keep_manifest_list=False)
build_release_image.assert_any_await("4.10.99", "aarch64", ["4.10.98", "4.9.99"], {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}, "quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64", "registry.ci.openshift.org/ocp-arm64/release-arm64:nightly-aarch64", None, keep_manifest_list=False)
pipeline._slack_client.bind_channel.assert_called_once_with("4.10.99")
pipeline.get_image_stream_tag.assert_any_await("ocp", "release:4.10.99")
pipeline.tag_release.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64", "ocp/release:4.10.99")
pipeline.tag_release.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-s390x", "ocp-s390x/release-s390x:4.10.99")
pipeline.tag_release.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-ppc64le", "ocp-ppc64le/release-ppc64le:4.10.99")
pipeline.tag_release.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64", "ocp-arm64/release-arm64:4.10.99")
pipeline.wait_for_stable.assert_any_await("4.10.99", "x86_64", "4-stable")
pipeline.wait_for_stable.assert_any_await("4.10.99", "s390x", "4-stable-s390x")
pipeline.wait_for_stable.assert_any_await("4.10.99", "ppc64le", "4-stable-ppc64le")
pipeline.wait_for_stable.assert_any_await("4.10.99", "aarch64", "4-stable-arm64")
pipeline.send_image_list_email.assert_awaited_once_with("4.10.99", 2, ANY)
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.tag_release", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream_tag", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.build_release_image", return_value=None)
@patch("pyartcd.pipelines.promote.get_release_image_info", side_effect=lambda pullspec, raise_if_not_found=False: {
"image": pullspec,
"digest": "fake:deadbeef",
"metadata": {
"version": "4.10.99",
},
"references": {
"spec": {
"tags": [
{
"name": "machine-os-content",
"annotations": {"io.openshift.build.versions": "machine-os=00.00.212301010000-0"}
}
]
}
}
} if raise_if_not_found else None)
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream")
async def test_promote_arch(self, get_image_stream: AsyncMock, get_release_image_info: AsyncMock,
build_release_image: AsyncMock, get_image_stream_tag: AsyncMock,
tag_release: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
previous_list = ["4.10.98", "4.10.97", "4.9.99"]
metadata = {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}
# test x86_64
reference_release = "whatever-x86_64"
actual = await pipeline._promote_arch(
release_name="4.10.99",
arch="x86_64",
previous_list=previous_list,
metadata=metadata,
reference_release=reference_release,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_release_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64")
build_release_image.assert_awaited_once_with("4.10.99", "x86_64", previous_list, metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64", f'registry.ci.openshift.org/ocp/release:{reference_release}', None, keep_manifest_list=False)
get_image_stream_tag.assert_awaited_once_with("ocp", "release:4.10.99")
tag_release.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64", "ocp/release:4.10.99")
self.assertEqual(actual["image"], "quay.io/openshift-release-dev/ocp-release:4.10.99-x86_64")
# test aarch64
reference_release = "whatever-aarch64"
get_release_image_info.reset_mock()
build_release_image.reset_mock()
get_image_stream_tag.reset_mock()
tag_release.reset_mock()
actual = await pipeline._promote_arch(
release_name="4.10.99",
arch="aarch64",
previous_list=previous_list,
metadata=metadata,
reference_release=reference_release,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_release_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64")
build_release_image.assert_awaited_once_with("4.10.99", "aarch64", previous_list, metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64", f'registry.ci.openshift.org/ocp-arm64/release-arm64:{reference_release}', None, keep_manifest_list=False)
get_image_stream_tag.assert_awaited_once_with("ocp-arm64", "release-arm64:4.10.99")
tag_release.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64", "ocp-arm64/release-arm64:4.10.99")
self.assertEqual(actual["image"], "quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64")
# test release tag already exists but doesn't match the to-be-promoted release image
get_image_stream_tag.return_value = {
"image": {
"dockerImageReference": "quay.io/openshift-release-dev/ocp-release@fake:foobar",
}
}
reference_release = "whatever-aarch64"
get_release_image_info.reset_mock()
build_release_image.reset_mock()
get_image_stream_tag.reset_mock()
tag_release.reset_mock()
with self.assertRaisesRegex(ValueError, "already exists, but it has a different digest"):
await pipeline._promote_arch(
release_name="4.10.99",
arch="aarch64",
previous_list=previous_list,
metadata=metadata,
reference_release=reference_release,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_release_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64")
build_release_image.assert_awaited_once_with("4.10.99", "aarch64", previous_list, metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-aarch64", f'registry.ci.openshift.org/ocp-arm64/release-arm64:{reference_release}', None, keep_manifest_list=False)
get_image_stream_tag.assert_awaited_once_with("ocp-arm64", "release-arm64:4.10.99")
tag_release.assert_not_awaited()
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.exectools.cmd_assert_async", return_value=0)
async def test_build_release_image_from_reference_release(self, cmd_assert_async: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
previous_list = ["4.10.98", "4.10.97", "4.9.99"]
metadata = {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}
# test x86_64
reference_release = "registry.ci.openshift.org/ocp/release:whatever-x86_64"
dest_pullspec = "example.com/foo/release:4.10.99-x86_64"
await pipeline.build_release_image("4.10.99", "x86_64", previous_list, metadata, dest_pullspec, reference_release, None, keep_manifest_list=False)
expected_cmd = ["oc", "adm", "release", "new", "-n", "ocp", "--name=4.10.99", "--to-image=example.com/foo/release:4.10.99-x86_64", f"--from-release={reference_release}", "--previous=4.10.98,4.10.97,4.9.99", "--metadata", "{\"description\": \"whatever\", \"url\": \"https://access.redhat.com/errata/RHBA-2099:2222\"}"]
cmd_assert_async.assert_awaited_once_with(expected_cmd, env=ANY, stdout=ANY)
# test aarch64
reference_release = "registry.ci.openshift.org/ocp-arm64/release-arm64:whatever-aarch64"
dest_pullspec = "example.com/foo/release:4.10.99-aarch64"
cmd_assert_async.reset_mock()
await pipeline.build_release_image("4.10.99", "aarch64", previous_list, metadata, dest_pullspec, reference_release, None, keep_manifest_list=False)
expected_cmd = ["oc", "adm", "release", "new", "-n", "ocp-arm64", "--name=4.10.99", "--to-image=example.com/foo/release:4.10.99-aarch64", f"--from-release={reference_release}", "--previous=4.10.98,4.10.97,4.9.99", "--metadata", "{\"description\": \"whatever\", \"url\": \"https://access.redhat.com/errata/RHBA-2099:2222\"}"]
cmd_assert_async.assert_awaited_once_with(expected_cmd, env=ANY, stdout=ANY)
# test multi-aarch64
reference_release = "registry.ci.openshift.org/ocp-arm64/release-arm64:whatever-multi-aarch64"
dest_pullspec = "example.com/foo/release:4.10.99-multi-aarch64"
cmd_assert_async.reset_mock()
await pipeline.build_release_image("4.10.99", "aarch64", previous_list, metadata, dest_pullspec, reference_release, None, keep_manifest_list=True)
expected_cmd = ["oc", "adm", "release", "new", "-n", "ocp-arm64", "--name=4.10.99", "--to-image=example.com/foo/release:4.10.99-multi-aarch64", f"--from-release={reference_release}", "--keep-manifest-list", "--previous=4.10.98,4.10.97,4.9.99", "--metadata", "{\"description\": \"whatever\", \"url\": \"https://access.redhat.com/errata/RHBA-2099:2222\"}"]
cmd_assert_async.assert_awaited_once_with(expected_cmd, env=ANY, stdout=ANY)
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.exectools.cmd_assert_async", return_value=0)
async def test_build_release_image_from_image_stream(self, cmd_assert_async: AsyncMock, _):
runtime = MagicMock(config={"build_config": {"ocp_build_data_url": "https://example.com/ocp-build-data.git"},
"jira": {"url": "https://issues.redhat.com/"}},
working_dir=Path("/path/to/working"), dry_run=False)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
previous_list = ["4.10.98", "4.10.97", "4.9.99"]
metadata = {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}
# test x86_64
reference_release = None
dest_pullspec = "example.com/foo/release:4.10.99-x86_64"
from_image_stream = "4.10-art-assembly-4.10.99"
await pipeline.build_release_image("4.10.99", "x86_64", previous_list, metadata, dest_pullspec, reference_release, from_image_stream, keep_manifest_list=False)
expected_cmd = ['oc', 'adm', 'release', 'new', '-n', 'ocp', '--name=4.10.99', '--to-image=example.com/foo/release:4.10.99-x86_64', '--reference-mode=source', '--from-image-stream=4.10-art-assembly-4.10.99', '--previous=4.10.98,4.10.97,4.9.99', '--metadata', '{"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}']
cmd_assert_async.assert_awaited_once_with(expected_cmd, env=ANY, stdout=ANY)
# test aarch64
reference_release = None
dest_pullspec = "example.com/foo/release:4.10.99-aarch64"
from_image_stream = "4.10-art-assembly-4.10.99-arm64"
cmd_assert_async.reset_mock()
await pipeline.build_release_image("4.10.99", "aarch64", previous_list, metadata, dest_pullspec, reference_release, from_image_stream, keep_manifest_list=False)
expected_cmd = ['oc', 'adm', 'release', 'new', '-n', 'ocp-arm64', '--name=4.10.99', '--to-image=example.com/foo/release:4.10.99-aarch64', '--reference-mode=source', '--from-image-stream=4.10-art-assembly-4.10.99-arm64', '--previous=4.10.98,4.10.97,4.9.99', '--metadata', '{"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}']
cmd_assert_async.assert_awaited_once_with(expected_cmd, env=ANY, stdout=ANY)
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.tag_release", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream_tag", return_value={
"tag": {
"from": {
"name": "quay.io/openshift-release-dev/ocp-release:4.10.99-multi",
}
}
})
@patch("pyartcd.pipelines.promote.PromotePipeline.push_manifest_list", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.build_release_image", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream", return_value={
"spec": {
"tags": [
{"name": "4.10.99-0.art-assembly-4.10.99-multi-2022-07-26-210300",
"from": {"name": "example.com/ocp-release@fake:deadbeef-source-manifest-list"}}
]
}
})
@patch('pyartcd.pipelines.promote.PromotePipeline.get_image_info', side_effect=lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): TestPromotePipeline.FAKE_DEST_MANIFEST_LIST,
}[pullspec, raise_if_not_found])
@patch('pyartcd.pipelines.promote.PromotePipeline.get_multi_image_digest', return_value='fake:deadbeef-toplevel-manifest-list')
async def test_promote_heterogeneous_payload(self, get_image_digest: AsyncMock, get_image_info: AsyncMock,
get_image_stream: AsyncMock, build_release_image: AsyncMock,
push_manifest_list: AsyncMock, get_image_stream_tag: AsyncMock,
tag_release: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
previous_list = ["4.10.98", "4.10.97", "4.9.99"]
metadata = {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}
# test: heterogeneous payload already exists
await pipeline._promote_heterogeneous_payload(
release_name="4.10.99",
include_arches=["x86_64", "aarch64"],
previous_list=previous_list,
metadata=metadata,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_image_digest.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
get_image_info.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_stream_tag.assert_awaited_once_with("ocp-multi", "release-multi:4.10.99")
build_release_image.assert_not_called()
tag_release.assert_not_called()
# test: promote a GA heterogeneous payload
get_image_digest.reset_mock()
get_image_digest.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", False): None,
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): "fake:deadbeef-dest-multi",
}[pullspec, raise_if_not_found]
get_image_info.reset_mock()
get_image_info.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True):
None if not push_manifest_list.called else TestPromotePipeline.FAKE_DEST_MANIFEST_LIST,
('example.com/ocp-release@fake:deadbeef-source-manifest-list', True): {
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-amd64",
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-ppc64le",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-s390x",
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1583,
"digest": "fake:deadbeef-source-multi-arm64",
"platform": {
"architecture": "arm64",
"os": "linux"
}
}
]
}
}[pullspec, raise_if_not_found]
get_image_stream.reset_mock()
get_image_stream_tag.reset_mock()
get_image_stream_tag.return_value = None
build_release_image.reset_mock()
push_manifest_list.reset_mock()
tag_release.reset_mock()
actual = await pipeline._promote_heterogeneous_payload(
release_name="4.10.99",
include_arches=["x86_64", "aarch64"],
previous_list=previous_list,
metadata=metadata,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_info.assert_any_await("example.com/ocp-release@fake:deadbeef-source-manifest-list", raise_if_not_found=True)
get_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_stream.assert_awaited_once_with("ocp-multi", "4.10-art-assembly-4.10.99-multi")
get_image_stream_tag.assert_awaited_once_with("ocp-multi", "release-multi:4.10.99")
dest_metadata = metadata.copy()
dest_metadata["release.openshift.io/architecture"] = "multi"
build_release_image.assert_any_await("4.10.99", "aarch64", previous_list, dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64", 'example.com/ocp-release@fake:deadbeef-source-multi-arm64', None, keep_manifest_list=True)
build_release_image.assert_any_await("4.10.99", "x86_64", previous_list, dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64", 'example.com/ocp-release@fake:deadbeef-source-multi-amd64', None, keep_manifest_list=True)
dest_manifest_list = {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi', 'manifests': [{'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64', 'platform': {'os': 'linux', 'architecture': 'amd64'}}, {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64', 'platform': {'os': 'linux', 'architecture': 'arm64'}}]}
push_manifest_list.assert_awaited_once_with("4.10.99", dest_manifest_list)
tag_release.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", "ocp-multi/release-multi:4.10.99")
self.assertEqual(actual["image"], "quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
self.assertEqual(actual["digest"], "fake:deadbeef-dest-multi")
# test: promote GA heterogeneous payload
get_image_digest.reset_mock()
get_image_digest.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", False): None,
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): "fake:deadbeef-dest-multi",
}[pullspec, raise_if_not_found]
get_image_info.reset_mock()
get_image_info.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True):
None if not push_manifest_list.called else TestPromotePipeline.FAKE_DEST_MANIFEST_LIST,
('example.com/ocp-release@fake:deadbeef-source-manifest-list', True): TestPromotePipeline.FAKE_SOURCE_MANIFEST_LIST,
}[pullspec, raise_if_not_found]
get_image_stream.reset_mock()
get_image_stream_tag.reset_mock()
get_image_stream_tag.return_value = None
build_release_image.reset_mock()
push_manifest_list.reset_mock()
tag_release.reset_mock()
actual = await pipeline._promote_heterogeneous_payload(
release_name="4.10.99",
include_arches=["x86_64", "aarch64"],
previous_list=previous_list,
metadata=metadata,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_info.assert_any_await("example.com/ocp-release@fake:deadbeef-source-manifest-list", raise_if_not_found=True)
get_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_stream.assert_awaited_once_with("ocp-multi", "4.10-art-assembly-4.10.99-multi")
get_image_stream_tag.assert_awaited_once_with("ocp-multi", "release-multi:4.10.99")
dest_metadata = metadata.copy()
dest_metadata["release.openshift.io/architecture"] = "multi"
build_release_image.assert_any_await("4.10.99", "aarch64", previous_list, dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64", 'example.com/ocp-release@fake:deadbeef-source-multi-arm64', None, keep_manifest_list=True)
build_release_image.assert_any_await("4.10.99", "x86_64", previous_list, dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64", 'example.com/ocp-release@fake:deadbeef-source-multi-amd64', None, keep_manifest_list=True)
dest_manifest_list = {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi', 'manifests': [{'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64', 'platform': {'os': 'linux', 'architecture': 'amd64'}}, {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64', 'platform': {'os': 'linux', 'architecture': 'arm64'}}]}
push_manifest_list.assert_awaited_once_with("4.10.99", dest_manifest_list)
tag_release.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", "ocp-multi/release-multi:4.10.99")
self.assertEqual(actual["image"], "quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
self.assertEqual(actual["digest"], "fake:deadbeef-dest-multi")
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.tag_release", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream_tag", return_value={
"tag": {
"from": {
"name": "quay.io/openshift-release-dev/ocp-release:4.10.99-multi",
}
}
})
@patch("pyartcd.pipelines.promote.PromotePipeline.push_manifest_list", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.build_release_image", return_value=None)
@patch("pyartcd.pipelines.promote.PromotePipeline.get_image_stream", return_value={
"spec": {
"tags": [
{"name": "4.10.99-0.art-assembly-4.10.99-multi-2022-07-26-210300",
"from": {"name": "example.com/ocp-release@fake:deadbeef-source-manifest-list"}}
]
}
})
@patch('pyartcd.pipelines.promote.PromotePipeline.get_image_info', side_effect=lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): TestPromotePipeline.FAKE_DEST_MANIFEST_LIST,
}[pullspec, raise_if_not_found])
@patch('pyartcd.pipelines.promote.PromotePipeline.get_multi_image_digest', return_value='fake:deadbeef-toplevel-manifest-list')
async def test_build_release_image_from_heterogeneous_image_stream(
self, get_image_digest: AsyncMock, get_image_info: AsyncMock, get_image_stream: AsyncMock,
build_release_image: AsyncMock, push_manifest_list: AsyncMock, get_image_stream_tag: AsyncMock,
tag_release: AsyncMock, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", use_multi_hack=True, signing_env="prod")
previous_list = ["4.10.98", "4.10.97", "4.9.99"]
metadata = {"description": "whatever", "url": "https://access.redhat.com/errata/RHBA-2099:2222"}
# test: heterogeneous payload already exists
await pipeline._promote_heterogeneous_payload(
release_name="4.10.99",
include_arches=["x86_64", "aarch64"],
previous_list=previous_list,
metadata=metadata,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_image_digest.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
get_image_info.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_stream_tag.assert_awaited_once_with("ocp-multi", "release-multi:4.10.99-multi")
build_release_image.assert_not_called()
tag_release.assert_not_called()
# test: promote a GA heterogeneous payload
get_image_digest.reset_mock()
get_image_digest.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", False): None,
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): "fake:deadbeef-dest-multi",
}[pullspec, raise_if_not_found]
get_image_info.reset_mock()
get_image_info.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): None if not push_manifest_list.called else TestPromotePipeline.FAKE_DEST_MANIFEST_LIST,
('example.com/ocp-release@fake:deadbeef-source-manifest-list', True): TestPromotePipeline.FAKE_SOURCE_MANIFEST_LIST,
}[pullspec, raise_if_not_found]
get_image_stream.reset_mock()
get_image_stream_tag.reset_mock()
get_image_stream_tag.return_value = None
build_release_image.reset_mock()
push_manifest_list.reset_mock()
tag_release.reset_mock()
actual = await pipeline._promote_heterogeneous_payload(
release_name="4.10.99",
include_arches=["x86_64", "aarch64"],
previous_list=previous_list,
metadata=metadata,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_info.assert_any_await("example.com/ocp-release@fake:deadbeef-source-manifest-list", raise_if_not_found=True)
get_image_stream.assert_awaited_once_with("ocp-multi", "4.10-art-assembly-4.10.99-multi")
get_image_stream_tag.assert_awaited_once_with("ocp-multi", "release-multi:4.10.99-multi")
dest_metadata = metadata.copy()
dest_metadata["release.openshift.io/architecture"] = "multi"
build_release_image.assert_any_await("4.10.99-multi", "aarch64", [], dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64", 'example.com/ocp-release@fake:deadbeef-source-multi-arm64', None, keep_manifest_list=True)
build_release_image.assert_any_await("4.10.99-multi", "x86_64", [], dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64", 'example.com/ocp-release@fake:deadbeef-source-multi-amd64', None, keep_manifest_list=True)
dest_manifest_list = {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi', 'manifests': [{'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64', 'platform': {'os': 'linux', 'architecture': 'amd64'}}, {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64', 'platform': {'os': 'linux', 'architecture': 'arm64'}}]}
push_manifest_list.assert_awaited_once_with("4.10.99-multi", dest_manifest_list)
tag_release.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", "ocp-multi/release-multi:4.10.99-multi")
self.assertEqual(actual["image"], "quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
self.assertEqual(actual["digest"], "fake:deadbeef-dest-multi")
# test: promote GA heterogeneous payload
get_image_digest.reset_mock()
get_image_digest.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", False): None,
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): "fake:deadbeef-dest-multi",
}[pullspec, raise_if_not_found]
get_image_info.reset_mock()
get_image_info.side_effect = lambda pullspec, raise_if_not_found=False: {
("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", True): None if not push_manifest_list.called else TestPromotePipeline.FAKE_DEST_MANIFEST_LIST,
('example.com/ocp-release@fake:deadbeef-source-manifest-list', True): TestPromotePipeline.FAKE_SOURCE_MANIFEST_LIST,
}[pullspec, raise_if_not_found]
get_image_stream.reset_mock()
get_image_stream_tag.reset_mock()
get_image_stream_tag.return_value = None
build_release_image.reset_mock()
push_manifest_list.reset_mock()
tag_release.reset_mock()
actual = await pipeline._promote_heterogeneous_payload(
release_name="4.10.99",
include_arches=["x86_64", "aarch64"],
previous_list=previous_list,
metadata=metadata,
tag_stable=True,
assembly_type=AssemblyTypes.CUSTOM
)
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
get_image_digest.assert_any_await("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", raise_if_not_found=True)
get_image_info.assert_any_await("example.com/ocp-release@fake:deadbeef-source-manifest-list", raise_if_not_found=True)
get_image_stream.assert_awaited_once_with("ocp-multi", "4.10-art-assembly-4.10.99-multi")
get_image_stream_tag.assert_awaited_once_with("ocp-multi", "release-multi:4.10.99-multi")
dest_metadata = metadata.copy()
dest_metadata["release.openshift.io/architecture"] = "multi"
build_release_image.assert_any_await("4.10.99-multi", "aarch64", [], dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64", 'example.com/ocp-release@fake:deadbeef-source-multi-arm64', None, keep_manifest_list=True)
build_release_image.assert_any_await("4.10.99-multi", "x86_64", [], dest_metadata, "quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64", 'example.com/ocp-release@fake:deadbeef-source-multi-amd64', None, keep_manifest_list=True)
dest_manifest_list = {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi', 'manifests': [{'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-x86_64', 'platform': {'os': 'linux', 'architecture': 'amd64'}}, {'image': 'quay.io/openshift-release-dev/ocp-release:4.10.99-multi-aarch64', 'platform': {'os': 'linux', 'architecture': 'arm64'}}]}
push_manifest_list.assert_awaited_once_with("4.10.99-multi", dest_manifest_list)
tag_release.assert_awaited_once_with("quay.io/openshift-release-dev/ocp-release:4.10.99-multi", "ocp-multi/release-multi:4.10.99-multi")
self.assertEqual(actual["image"], "quay.io/openshift-release-dev/ocp-release:4.10.99-multi")
self.assertEqual(actual["digest"], "fake:deadbeef-dest-multi")
@patch("pyartcd.jira.JIRAClient.from_url", return_value=None)
def test_build_create_symlink(self, _):
runtime = MagicMock(
config={
"build_config": {
"ocp_build_data_url": "https://example.com/ocp-build-data.git"
},
"jira": {
"url": "https://issues.redhat.com/"
}
},
working_dir=Path("/path/to/working"),
dry_run=False
)
pipeline = PromotePipeline(runtime, group="openshift-4.10", assembly="4.10.99", signing_env="prod")
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
open("openshift-client-linux-4.3.0-0.nightly-2019-12-06-161135.tar.gz", "w").close()
open("openshift-client-mac-4.3.0-0.nightly-2019-12-06-161135.tar.gz", "w").close()
open("openshift-install-mac-4.3.0-0.nightly-2019-12-06-161135.tar.gz", "w").close()
pipeline.create_symlink(temp_dir, False, False)
self.assertTrue(os.path.exists(os.path.join(temp_dir, 'openshift-client-linux.tar.gz')))
self.assertTrue(os.path.exists(os.path.join(temp_dir, 'openshift-client-mac.tar.gz')))
self.assertTrue(os.path.exists(os.path.join(temp_dir, 'openshift-install-mac.tar.gz')))
shutil.rmtree(temp_dir)
|
990,080 | 9d653decc9c6ae9d42d0c7b9e4a67f0175f77e08 | import os
import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
cfg = get_cfg()
# Setting output directory
cfg.OUTPUT_DIR = 'Table Detection/model/'
# Set device cuda/cpu
if torch.cuda.is_available():
cfg.MODEL.DEVICE = 'cuda'
else:
cfg.MODEL.DEVICE = 'cpu'
print('Setting Device to :', cfg.MODEL.DEVICE)
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
if os.path.isfile(os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')):
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
else:
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
# TRAIING PARAMETERS
cfg.SOLVER.STEPS = [] # do not decay learning rate
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class
cfg.DATALOADER.NUM_WORKERS = 2
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8
|
990,081 | 2f9eef47f0f60b47f5a51149b20920ae0bbad1c5 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from rest_framework_swagger.views import get_swagger_view
from .api import *
router = DefaultRouter()
router.register(r'reviews', ReviewsViewSet, basename='reviews')
schema_view = get_swagger_view(title='CovAnalytica API')
urlpatterns = [
path('', include(router.urls)),
] |
990,082 | 71248e02ceb6b9677682139e5be4fcf3153be205 | from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import visdom
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
from torchvision import transforms, datasets, models
import torch.nn.functional as F
from resnet import resnet32
import cifar10 as dataset
gpu_status = torch.cuda.is_available()
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
# Optimization options
parser.add_argument('--epochs', default=120, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch_size', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Miscs
parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
# Device options
parser.add_argument('--gpu', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
# Method options
parser.add_argument('--percent', type=float, default=0,
help='Percentage of noise')
parser.add_argument('--train_ratio', type=float, default=0.9,
help='Percentage of train')
parser.add_argument('--alpha', type=float, default=1.0,
help='Hyper parameter alpha of loss function')
parser.add_argument('--beta', type=float, default=0.5,
help='Hyper parameter beta of loss function')
parser.add_argument('--lamda', type=float, default=1000,
help='Hyper parameter beta of loss function')
parser.add_argument('--asym', action='store_true',
help='Asymmetric noise')
parser.add_argument('--out', default='./save_model',
help='Directory to output the result')
args = parser.parse_args()
class_num=10
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
np.random.seed(args.manualSeed)
viz=visdom.Visdom()
line = viz.line(Y=np.arange(args.epochs))
line2 = viz.line(Y=np.arange(args.epochs))
def main():
# start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
best_acc = 0.0
if not os.path.exists(args.out):
os.mkdir(args.out)
# Data
print(f'==> Preparing {"asymmetric" if args.asym else "symmetric"} nosiy cifar10')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset, valset = dataset.get_cifar10('./data', args, train=True, download=False, transform_train=transform_train,
transform_val=transform_val)
data_sizes=int(len(trainset))
print('trainset,valset',len(trainset),len(valset))
val_dataSizes=int(len(valset))
trainloader = data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)##################
valloader = data.DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=4)
# Model
print("==> creating preact_resnet")
model = resnet32()
# if os.path.exists('./save_model/resnet32.pth'):
# model.load_state_dict(torch.load('./save_model/resnet32.pth'))
# print('load resnet32.pth successfully')
# else:
# print('load resnet32.pth failed')
if gpu_status:
model = model.cuda()
cudnn.benchmark = True #可加快速度
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
start_time=time.time()
train_loss, test_loss_v, train_acc, test_acc_v, time_p ,corr_rate_all,epoch_num= [], [], [], [], [],[],[]
for epoch in range(args.epochs):
model.train()
running_loss = 0.0
running_corrects = 0.0
# labels_grad = np.zeros((data_sizes, 10), dtype=np.float32)
for batch_idx, (inputs, labels, indexs, labels_update, gtrue_labels) in enumerate(trainloader):
if gpu_status:
inputs, labels = inputs.cuda(), labels.cuda()
gtrue_labels=gtrue_labels.cuda()
# compute output
outputs = model(inputs)
loss=criterion(outputs,labels)
preds = torch.max(outputs.detach(), 1)[1]#不需要梯度,得到下标
optimizer.zero_grad()
loss.backward()
optimizer.step()#更新参数
running_loss += loss.item()*len(labels)#标量 用item()得到python数字
running_corrects += torch.sum(preds == gtrue_labels.detach())
scheduler.step()
epoch_loss = running_loss / data_sizes
if gpu_status:
epoch_acc = running_corrects.cpu().numpy() / data_sizes
else:
epoch_acc = running_corrects.numpy() / data_sizes
train_loss.append(epoch_loss)
train_acc.append(epoch_acc)
time_elapsed = time.time() - start_time
time_p.append(time_elapsed)
print("[{}/{} epoches] train_loss:{:.4f}||train_acc:{:.4f}||time passed:{:.0f}m {:.0f}s".format(epoch + 1,
args.epochs,
train_loss[-1],
train_acc[-1],
time_elapsed // 60,
time_elapsed % 60))
# validate
test_loss, test_acc = validate(model, valloader, criterion, epoch, val_dataSizes)
test_loss_v.append(test_loss)
test_acc_v.append(test_acc)
epoch_num.append(epoch + 1)
viz.line(X=np.column_stack((np.array(epoch_num), np.array(epoch_num))),
Y=np.column_stack((np.array(train_loss), np.array(test_loss_v))),
win=line,
opts=dict(xlabel='epoch',ylabel='Loss',legend=["train_loss", "val_loss"],
title="30% symmetric noise: ResNet-32 VAL Loss:{:.4f}".format(test_loss_v[-1])))
viz.line(X=np.column_stack((np.array(epoch_num), np.array(epoch_num))),
Y=np.column_stack((np.array(train_acc), np.array(test_acc_v))),
win=line2,
opts=dict(xlabel='epoch',ylabel='accuracy',legend=[ "train_acc", "val_acc"],
title="30% symmetric noise: ResNet-32 VAL ACC:{:.4f}".format(test_acc_v[-1])))
if test_acc > best_acc:
best_acc = test_acc
torch.save(model.state_dict(),os.path.join(args.out,'sym_3_resnet32.pth'))
print('resnet32 saved')
def validate(model,valloader,criterion, epoch,data_sizes):
start_time=time.time()
model.eval()
running_loss = 0.0
running_corrects = 0.0
with torch.no_grad():
for batch_idx, (inputs, labels) in enumerate(valloader):
if gpu_status:
inputs, labels = inputs.cuda(), labels.cuda()
# compute output
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.max(outputs.data, 1)[1] ####
running_loss += loss.item() * len(labels) # 标量
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / data_sizes
if gpu_status:
epoch_acc = running_corrects.cpu().numpy() / data_sizes
else:
epoch_acc = running_corrects.numpy() / data_sizes
time_elapsed = time.time() - start_time
print("[{}/{} epoches] val_loss:{:.4f}||val_acc:{:.4f}||time cost:{:.0f}m {:.0f}s".format(epoch + 1,args.epochs,
epoch_loss,epoch_acc,
time_elapsed // 60,time_elapsed % 60))
return epoch_loss,epoch_acc
def pencil_loss(outputs,labels_update,labels):
# sfm=nn.Softmax(1)
# pred=sfm(outputs)
# pred = pred.detach()#detach使得pred requires_grad=False,并且不影响outputs
pred = F.softmax(outputs, dim=1)####是否应该detach????????????/
# criterion = nn.CrossEntropyLoss()
# Lo=criterion(labels_update,labels)
Lo = -torch.mean(F.log_softmax(labels_update, dim=1)[torch.arange(labels_update.shape[0]),labels])
# Le=criterion(outputs,pred)
Le = -torch.mean(torch.sum(F.log_softmax(outputs, dim=1) * pred, dim=1))
# Lc=criterion(labels_update,pred)-criterion(outputs,pred)
Lc = -torch.mean(torch.sum(F.log_softmax(labels_update, dim=1) * pred, dim=1)) - Le
loss_total = Lc /class_num+args.alpha* Lo +args.beta* Le /class_num
return loss_total
if __name__ == '__main__':
main()
|
990,083 | 29056a332619e4b6fbe2ffe316a4fa1d119edc2a | import storymarket
from django.conf import settings
from django_storymarket.models import SyncedObject
QUEUE_UPLOADS = getattr(settings, 'STORYMARKET_QUEUE_UPLOADS', False)
if QUEUE_UPLOADS:
from .tasks import upload_blob_task
def save_to_storymarket(obj, storymarket_type, data):
"""
Push an object to Storymarket.
Called from the various parts of the admin that need to upload
objects -- ``save_model``, the ``upload_to_storymarket`` action,
etc.
"""
# TODO: should figure out how to do an update if the object already exists.
api = storymarket.Storymarket(settings.STORYMARKET_API_KEY)
# Fix some field names mapping from local to storymarket names
if 'pricing' in data:
data['pricing_scheme'] = data.pop('pricing')
if 'rights' in data:
data['rights_scheme'] = data.pop('rights')
# Packages are handled slightly different: each sub-item has to be
# uploaded first, then the package needs to be created.
if storymarket_type == 'package':
package_items = data.pop('items')
for subitem in package_items:
subobj = subitem.pop('object')
subtype = subitem.pop('type').rstrip('s')
synced, created = save_to_storymarket(subobj, subtype, subitem)
data.setdefault('%s_items' % subtype, []).append(synced.storymarket_id)
# Grab the appropriate manager for the given storymarket type.
# We want to "be liberal in what [we] accept," so try both with
# and without a trailing "s" -- this allows "photo" as well
# as "photos", for example.
try:
manager = getattr(api, storymarket_type)
except AttributeError:
try:
manager = getattr(api, storymarket_type+'s')
except AttributeError:
raise ValueError("Invalid storymarket type: %r" % storymarket_type)
# Pull out the blob from the data since it gets uploaded seperately.
blob = data.pop('blob', None)
sm_obj = manager.create(data)
# Upload the blob. This queues nad backgrounds the task using
# Celery if STORYMARKET_QUEUE_UPLOADS is True.
if blob:
if QUEUE_UPLOADS:
upload_blob_task.delay(sm_obj, blob)
else:
sm_obj.upload_blob(blob)
return SyncedObject.objects.mark_synced(obj, sm_obj)
|
990,084 | 9cfd04f2f8617f6a3a20637e416e201f9e7017b9 | # 9.1
class Restaurant:
"""Creating a Restaurant class"""
def __init__(self, restaurant_name, cuisine_type, number_served=0):
"""assigning name and type"""
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.number_served = number_served
def description(self):
"""about"""
print(f"{self.restaurant_name} is Located in chennai")
print("Non veg is also available")
def availability(self):
"""open"""
print(f'{self.restaurant_name} Restaurant is open now')
def set_number_served(self, number_served):
self.number_served = number_served
def increment_number_served(self, increment_number_served):
self.number_served = self.number_served + increment_number_served
if __name__ == '__main__':
ss = Restaurant('SS Hyderabad', 'Biryani')
print(ss.restaurant_name)
print(ss.cuisine_type)
ss.description()
ss.availability()
print()
# 9.2
muthu = Restaurant('Muthulakshmi', 'Fast food')
print(muthu.restaurant_name)
print(muthu.cuisine_type)
muthu.description()
muthu.availability()
print()
padi = Restaurant('Padi Kattu Kadai', 'Maligai Samanam')
print(padi.restaurant_name)
print(padi.cuisine_type)
padi.description()
padi.availability()
|
990,085 | 7b9afb9f6267d468268d1415b664ff3855481fb0 | import argparse
import pandas as pd
from Bio import Phylo
import itertools
import numpy as np
import scipy.sparse
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--tree',
help='newick file with tree on which to cluster sequences from')
parser.add_argument('--treeNameSep',
default='|',
help='character to seperate newick tree names on to match input seq names')
parser.add_argument('--treeNameField',
default=1,
type=int,
help='which field in the character seperated tree names to take to match input seq names')
parser.add_argument('--seqNames',
help='newline delimited text file of sequence names to cluster')
parser.add_argument('--threshold',
help='distance threshold to split clusters',
type=float,
default=0.3)
args = parser.parse_args()
#args.seqNames = 'data/weighted_downsampling/ga_focused_aligned_masked_weighted_ga_included_seqs.tsv'
#args.tree = 'data/weighted_downsampling/ga_focused_aligned_masked_weighted.treefile_tres/0/0_refined_time.newick'
tree = Phylo.read(open(args.tree, 'r'), 'newick')
# biopython does not read in branch lengths correctly, weird hack -- revisit todo
#for node in tree.find_clades():
# node.branch_length = float(node.branch_length.lstrip('_').replace('_', '-'))
tip_dict = {i.name.split(args.treeNameSep)[args.treeNameField]: i for i in tree.get_terminals()}
if args.seqNames:
get_tip_names = set(pd.read_csv(args.seqNames, sep='\t', header=None)[1])
get_tips = [tip_dict[i] for i in get_tip_names if i in tip_dict.keys()]
print([i for i in get_tip_names if i not in tip_dict.keys()])
print(f'{len(get_tips)} tips in --seqNames file found in tree')
else:
get_tips = list(tip_dict.values())
# use permutation so matrix is symmetrical
pairs = itertools.permutations(get_tips,2)
dists = \
[[pair[0].name, pair[1].name, tree.distance(pair[0], pair[1])] for pair in pairs]
dists = pd.DataFrame(dists)
dists[3] = dists[2].apply(lambda x: x < args.threshold).astype(int)
# generate matrix
mat_df = dists[[0,1,3]].pivot(index=0, columns=1)
mat = np.array(mat_df)
cc = scipy.sparse.csgraph.connected_components(mat)
cc_assigned = pd.DataFrame(zip(mat_df.index, cc[1]))
print(f'there are {len(cc_assigned[1].unique())} clusters using a threshold of {args.threshold}')
for cc_idx, cc_id in enumerate(np.unique(cc[1])):
print(f'there are {np.unique(cc[1], return_counts=True)[1][cc_idx]} sequences in cluster {cc_id}')
cc_assigned.to_csv(args.tree.split('.')[0]+f'_clusters_{args.threshold}.tsv', sep='\t', header=None, index=None)
if __name__ == "__main__":
run() |
990,086 | 6c78cbf97f3509270abf5f6d7a96fb851f93137e | """Analysis of SPH data.
The analysis sub-package contains Plonk implementations of typical
smoothed particle hydrodynamics post-simulation analysis tasks.
Examples
--------
Create a radial profile in the xy-plane.
>>> p = Profile(snap, cmin=10, cmax=200, n_bins=100)
>>> p.plot('radius', 'density')
Calculate the angular momentum on the particles.
>>> angmom = particles.angular_momentum(snap)
Calculate the total angular momentum over all particles.
>>> angmom_tot = total.angular_momentum(snap)
Calculate the Roche sphere radius given two sink particles.
>>> s1 = snap.sinks[0]
>>> s2 = snap.sinks[1]
>>> separation = plonk.utils.math.norm(s1['position'] - s2['position'])
>>> Roche = sinks.Roche_sphere(s1['mass'], s2['mass'], separation)
"""
from . import discs, filters, particles, sinks, sph, total
from .profile import Profile, load_profile
__all__ = [
'Profile',
'discs',
'filters',
'load_profile',
'particles',
'sinks',
'sph',
'total',
]
|
990,087 | a686eb1b93f1b800ba6ab5395fb3ef0083c1185d | from time import sleep, time;
import os;
from java.io import File;
from javax.imageio import ImageIO;
from java.awt.image import BufferedImage
#import java
from math import *
from gda.data import NumTracker
from gda.jython import InterfaceProvider
from gda.device.detector import DetectorBase
from gda.device.detector.areadetector.v17 import NDPluginBase;
from gda.device import Detector
from gda.analysis.datastructure import *
from gda.analysis import *
#from gda.analysis import DataSet
from uk.ac.diamond.scisoft.analysis import SDAPlotter
from gda.analysis.io import PNGLoader, PNGSaver;
from gov.aps.jca.event import PutEvent;
from gov.aps.jca.event import PutListener;
from gov.aps.jca import CAStatus;
from gov.aps.jca.dbr import DBRType;
import scisoftpy as dnp;
class NdArrayPluginDeviceClass(DetectorBase):
DETECTOR_STATUS_IDLE, DETECTOR_STATUS_BUSY, DETECTOR_STATUS_PAUSED, DETECTOR_STATUS_STANDBY, DETECTOR_STATUS_FAULT, DETECTOR_STATUS_MONITORING = range(6);
def __init__(self, name, adDetector, panelName="Fleacam"):
self.setName(name);
self.detector=adDetector
self.level = self.detector.getLevel() + 1;
self.panel = panelName;
self.filePrefix = None;
self.pathPostfix = None;
self.fileName=name;
self.filePath = name+"Image";
self.dataHolder = None;
self.rawData = None;
self.dataset = None;
self.dataType= None;
self.width = 1024;
self.height = 768;
self.exposureTime = 1;
self.alive = False;
self.save = True;
self.logScale = False;
self.extras=[];
self.scannableSetup();
def scannableSetup(self):
self.setInputNames([]);
extraNames = ['ExposureTime'];
extraNames.extend( self.extras );
outputFormat = ['%f'];
outputFormat.extend( ['%10.4f']*len(self.extras) );
if self.save:
extraNames.insert(1, 'file');
outputFormat.insert(1, "%s");
self.setExtraNames(extraNames);
self.setOutputFormat(outputFormat);
def setSave(self, save=True):
self.save = save;
self.scannableSetup();
def setAlive(self, alive=True):
self.alive = alive
def setFile(self, newPathPostfix, newFilePrefix):
self.pathPostfix = newPathPostfix;
self.filePrefix = newFilePrefix
self.setNewImagePath();
def getDataSet(self):
return self.dataset;
def setNewImagePath(self):
"""Set file path and name based on current scan run number"""
nextNum = NumTracker("tmp").getCurrentFileNumber();
basePath=InterfaceProvider.getPathConstructor().createFromDefaultProperty() + File.separator;
subDir="%d_%s"%(nextNum, self.pathPostfix);
newImagePath = os.path.join(basePath, subDir);
if not os.path.exists(newImagePath):
#print "Path does not exist. Create new one."
os.makedirs(newImagePath);
self.imageNumber=0;#To reset the image number
if not os.path.isdir(newImagePath):
print "Invalid path";
return;
self.filePath = newImagePath;
#print "Image file path set to " + self.filePath;
return self.filePath;
def getFilePath(self):
return self.filePath;
def getDimensions(self):
adBase=self.detector.getAdBase();
x=adBase.getMaxSizeX_RBV();
y=adBase.getMaxSizeY_RBV();
# x=adBase.getArraySizeX_RBV();
# y=adBase.getArraySizeY_RBV();
z=adBase.getArraySizeZ_RBV();
return x,y,z;
def getCameraData(self):
adBase=self.detector.getAdBase();
self.width, self.height, z = self.getDimensions();
ndArray=self.detector.getNdArray();
self.dataType=ndArray.getPluginBase().getDataType_RBV();
t0=time();
if self.dataType==NDPluginBase.UInt8:
# print "Debug: Fetching UInt8 data started"
self.rawData=ndArray.getByteArrayData()
#cast the byte array to unsigned then double array
tempDoubleList = [ float(x&0xFF) for x in self.rawData ];
elif NDPluginBase.UInt16:
# print "Debug: Fetch UInt16 data started"
self.rawData=ndArray.getShortArrayData()
# tempDoubleList = [ float(x&0xFFFF) for x in self.rawData ];
tempDoubleList = self.rawData;
else:
print "Unknown data type"
# print "Dubug: Fetching data finished within %d seconds" %(time()-t0);
da = dnp.array(tempDoubleList);
self.dataset = da.reshape([self.height, self.width]);
return self.dataset;
def saveImageFile(self, fileName, width=None, height=None, rawData=None):
if width == None:
width = self.width;
if height == None:
height = self.height;
if rawData == None:
rawData = self.rawData;
# Rend an image
# Create a buffered image in which to draw
# bufferedImage = BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
# bufferedImage.setRGB(0, 0, width, height, rawData, 0, width);
if self.dataType==NDPluginBase.UInt8:
bufferedImage = BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY);
elif NDPluginBase.UInt16:
bufferedImage = BufferedImage(width, height, BufferedImage.TYPE_USHORT_GRAY);
else:
print "Unknown data type"
# bufferedImage = BufferedImage(width, height, BufferedImage.TYPE_BYTE_INDEXED);
# bufferedImage = BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY);
bufferedImage.getRaster().setDataElements(0, 0, width, height, rawData);
# Create a graphics contents on the buffered image, draw something and then dispose it
# g2d = bufferedImage.createGraphics();
# g2d.setColor(Color.white);
# g2d.fillRect(0, 0, width, height);
# g2d.setColor(Color.black);
# g2d.fillOval(0, 0, width, height);
# g2d.dispose();
# Save as PNG
file=File(fileName);
ImageIO.write(bufferedImage, "png", file);
return;
def loadImageFile(self, fileName):
if fileName != None:#Get data from file directly
# self.data.load(PNGLoader(fileName));
self.dataHolder=dnp.io.load(fileName);
# self.dataset = self.data.getAxis(0);
self.dataset = self.dataHolder[0];
if self.alive:
self.display();
return self.dataset;
def postCapture(self):
if not self.save:
return;
runs=NumTracker(self.name);
nextNum = runs.getCurrentFileNumber() + 1;
fn="%s%05d.png"%(self.filePrefix, nextNum);
self.fileName = os.path.join(self.filePath, fn);
# print "Dubug: Saving file started";
t0=time();
#My Own PNG file writer
# self.saveImageFile(self.fileName);
#PNG file writer from GDA Analysis package
dnp.io.save(self.fileName, self.dataset, autoscale=False);
# print "Dubug: Saving file finished within %d seconds" %(time()-t0);
runs.incrementNumber();
def singleShot(self, newExpos):
self.setCollectionTime(newExpos);
self.detector.prepareForCollection();
self.collectData();
sleep(self.exposureTime);
while self.getStatus() != Detector.IDLE:
sleep(self.exposureTime/10.0);
return self.readout();
def display(self,dataset=None):
if dataset is None:
if self.dataset is None:
print "No dataset to display";
return;
else:
dataset = self.dataset;
if self.panel:
SDAPlotter.imagePlot(self.panel, dataset);
else:
print "No panel set to display"
raise Exception("No panel_name set in %s. Set this or set %s.setAlive(False)" % (self.name,self.name));
# Detector Implementation
def getCollectionTime(self):
self.exposureTime=self.detector.getCollectionTime();
return self.exposureTime;
def setCollectionTime(self, newExpos):
self.exposureTime = newExpos;
self.detector.setCollectionTime(self.exposureTime);
# adBase=self.detector.getAdBase();
def prepareForCollection(self):
if self.save:
self.setNewImagePath()
self.detector.prepareForCollection();
def collectData(self):
self.detector.collectData();
def getStatus(self):
return self.detector.getStatus();
def createsOwnFiles(self):
return self.save;
def readout(self):
self.getCameraData();#To get the dataset
self.postCapture();#To save file if needed
if self.alive:
self.display();
if self.save:
result=[self.exposureTime, self.fileName];
else:
result=[self.exposureTime];
return result;
class NdArrayWithStatPluginDeviceClass(DetectorBase):
DETECTOR_STATUS_IDLE, DETECTOR_STATUS_BUSY, DETECTOR_STATUS_PAUSED, DETECTOR_STATUS_STANDBY, DETECTOR_STATUS_FAULT, DETECTOR_STATUS_MONITORING = range(6);
def __init__(self, name, adDetector):
self.setName(name);
self.detector=adDetector
self.level = self.detector.getLevel() + 1;
self.exposureTime = 0.1;
self.detector.setComputeCentroid(True);
self.detector.setComputeStats(True);
self.ndStats=self.detector.getNdStats();
self.centroid=[None]*5;
self.statistics=[None]*5;
self.setInputNames([]);
self.setExtraNames(['exposure', 'cen_x', 'cen_y', 'cen_sx', 'cen_sy', 'cen_sxy', 'mean', 'sigma', 'max', 'min', 'sum']);
self.setOutputFormat(['%f', '%10.4f', '%10.4f', '%10.4f', '%10.4f', '%10.4f', '%10.4f', '%10.4f', '%10.4f', '%10.4f', '%10.4f']);
def getCentroid(self):
centroidX=self.ndStats.getCentroidX_RBV()
centroidY=self.ndStats.getCentroidY_RBV()
centroidSigmaX=self.ndStats.getSigmaX_RBV()
centroidSigmaY=self.ndStats.getSigmaY_RBV()
centroidSigmaXY=self.ndStats.getSigmaXY_RBV()
self.centroid=[centroidX, centroidY, centroidSigmaX, centroidSigmaY, centroidSigmaXY];
return self.centroid;
def getStatistics(self):
statMax=self.ndStats.getMaxValue_RBV()
statMin=self.ndStats.getMinValue_RBV()
statTotal=self.ndStats.getTotal_RBV()
statMean=self.ndStats.getMeanValue_RBV()
statSigma=self.ndStats.getSigma_RBV()
self.statistics=[statMean, statSigma, statMax, statMin, statTotal];
return self.statistics;
# Detector Implementation
def getCollectionTime(self):
self.exposureTime=self.detector.getCollectionTime();
return self.exposureTime;
def setCollectionTime(self, newExpos):
self.exposureTime = newExpos;
self.detector.setCollectionTime(self.exposureTime);
# adBase=self.detector.getAdBase();
# def prepareForCollection(self):
# self.detector.prepareForCollection();
def collectData(self):
self.detector.collectData();
def getStatus(self):
return self.detector.getStatus();
# return Detector.IDLE;
def createsOwnFiles(self):
return False;
def readout(self):
self.getStatistics();
self.getCentroid();
result=[self.exposureTime]
result.extend( self.centroid);
result.extend( self.statistics);
return result;
def getPosition(self):
return self.readout();
def toFormattedString(self):
w=self.centroid + self.statistics;
resultStr=self.getName() + ': ' + '%s, '*len(w) %tuple(w)
return resultStr.strip(', ');
#Usage
#import Diamond.AreaDetector.NdArrayPluginDevice; reload(Diamond.AreaDetector.NdArrayPluginDevice)
#from Diamond.AreaDetector.NdArrayPluginDevice import NdArrayPluginDeviceClass
#viewerName="Area Detector"
#d7cam = NdArrayPluginDeviceClass('f1', d7cam_ad, viewerName);
#d7cam.setFile('flea', 'd7cam_');
#Usage
#import Diamond.AreaDetector.NdArrayWithStatPluginDeviceClass; reload(Diamond.AreaDetector.NdArrayWithStatPluginDeviceClass)
#from Diamond.AreaDetector.NdArrayWithStatPluginDeviceClass import NdArrayWithStatPluginDeviceClass
#viewerName="Area Detector"
#d7cam = NdArrayWithStatPluginDeviceClass('f1', d7cam_ad, viewerName);
#d7cam.setFile('flea', 'd7cam_');
|
990,088 | a31cdd394365a216f1040499d57e4b1ddc223458 | #!/usr/bin/env python3
import subprocess as sub
class execHandler():
def __init__(self, password = 'asdf1234'):
self.password = password
def __del__(self):
self.password = ''
def decrypt(self, file):
proc = sub.Popen(['gpg', '--batch', '--passphrase-fd', '0', '--decrypt', file], stdin = sub.PIPE,
stdout = sub.PIPE, stderr = sub.PIPE)
(stdout, stderr) = proc.communicate(input = self.password.encode('utf-8'), timeout = 15)
print(stdout)
def encrypt(self, file, user):
proc = sub.Popen(['gpg', '--encrypt', '-o', file, '-r', user], stdin = sub.PIPE,
stdout = sub.PIPE, stderr = sub.PIPE)
(stdout, stderr) = proc.communicate(input = self.password.encode('utf-8'), timeout = 15)
if __name__ == '__main__':
ex = execHandler()
ex.encrypt('./encrypted.gpg', 'petter.rosander@gmail.com')
ex.decrypt('./encrypted.gpg')
|
990,089 | 41ec2d909fa40d35b59eacdf56fb61d169cadfcf | #Python program for merge sort
def merge_sort(list1):
if len(list1) > 1:
mid = len(list1)//2
left_list = list1[:mid]
right_list = list1[mid:]
merge_sort(left_list)
merge_sort(right_list)
i = 0
j = 0
k = 0
while i < len(left_list) and j < len(right_list):
if left_list[i] < right_list[j]:
list1[k] = left_list[i]
i = i+1
k = k+1
else:
list1[k] = right_list[j]
j = j+1
k = k+1
while i < len(left_list):
list1[k] = left_list[i]
i = i+1
k = k+1
while j < len(right_list):
list1[k] = right_list[j]
j = j+1
k = k+1
list1 = [77, 43, 56, 23, 11, 22, 90, 1, 7]
merge_sort(list1)
print("Sorted List is:", list1)
|
990,090 | fb4d2bad2e3cb91e73e32c547bd1e4852a55f1c3 | #! /usr/bin/env python
#coding=utf-8
from __future__ import division
class LanguageModel:
def __init__(self):
self.m={}
self.m0={}
count=0
for line in open(r'G:\wangzq\PolyU\Code-Switching\Emotion_CS_Analysis\comments.lm.seg.txt'):
words=[w.lower() for w in line.split()]
n=len(words)
for i in range(n):
if i<n-1:
j=i+1
# bigram
if words[i] not in self.m:
self.m[words[i]]={}
if words[j] not in self.m[words[i]]:
self.m[words[i]][words[j]]=0
self.m[words[i]][words[j]]+=1
# unigram
if words[i] not in self.m0:
self.m0[words[i]]=0
self.m0[words[i]]+=1
count+=1
for word0 in self.m0:
self.m0[word0]/=count
for word0 in self.m:
sumNum=sum(self.m[word0].values())
for word1 in self.m[word0]:
self.m[word0][word1]/=sumNum
def getProb(self,word0,word1):
if word0 in self.m and word1 in self.m[word0]:
return self.m[word0][word1]*self.m0[word1]
else:
return 0
#lm=LanguageModel() |
990,091 | 7c740ae10467e8d3e9092280cdb5d6593ca7bb30 |
live_grid = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
def display_grid(grid):
for i in range (len(grid)):
if i % 3 == 0 and i != 0:
print ("- - - - - - - - - - - - -")
for j in range (len(grid[0])):
if j % 3 == 0 and j != 0:
print(" | ", end = "")
if j == 8:
print(grid[i][j])
else:
print(str(grid[i][j]) + " ", end = "")
def find_space(grid):
for i in range (len(grid)):
for j in range (len(grid)):
if(grid[i][j] == 0):
return (j, i)
def unique_solution(solution1, grid):
return false:
display_grid(live_grid)
|
990,092 | 69094f836d6380444279124f1230e4d3190194e6 | #! python
import h5py
import numpy
import matplotlib.pyplot
import pandas
import datetime
def read_h5(filename):
def selectValues(name, obj):
if 'VALUES' in name:
lstDataSets.append(obj)
lstDataSets = []
with h5py.File(filename, mode='r') as of:
of.visititems(selectValues)
lstDataFrames = [pandas.DataFrame(dset[:,0]) for dset in lstDataSets]
data = pandas.concat(lstDataFrames)
data.rename(columns={col:col.strip(' ').upper() for col in data.columns.tolist()}, inplace=True)
if "DATETIME" in data.columns:
data["DATETIME"] = pandas.to_datetime(data["DATETIME"].str.decode("utf-8"))
data.set_index("DATETIME", inplace=True)
# elif "NPT" in data.columns:
# data.set_index("NPT", inplace=True)
data.sort_index(inplace=True)
data.replace(9999., numpy.nan)
return data
def convert_h5force_dataframe(fichier):
##on ordonne le fichier et on prends les données qui nous interessent, cf pdf de sylbain
Données_recueilles_force = ["NROT", "NLOT", "M0C", "RE0C", "PI0", "TI",
"ALPHAC", "BETA", "CXC", "CYC", "CZC", "CLAAC" , "CMAAC", "CNAAC", "Wing", "HTP", "VTP", "DATETIME", "NPT"]
for element in fichier.columns:
if element not in Données_recueilles_force:
del fichier[element]
##on decode les bytes et on remplace les AVEC/ SANS pas de 1 ou 0
for element in fichier.columns:
if type(fichier[element][0]) == bytes:
for i in range(len(fichier[element])):
fichier[element][i] = fichier[element][i].decode()
if fichier[element][i] == "AVEC":
fichier[element][i] = 1
else:
fichier[element][i] =0
def convert_h5pressure_dataframe(fichier):
for element in fichier.columns:
if element != "NPT" and not (element.startswith("KP_PS")) :
del fichier[element]
def code_confog_maquette(fichier):
fichier["CONF"] = ""
for i in range(len(fichier["CONF"])):
if fichier["HTP"][i] == 0 and fichier["VTP"][i] == 0:
fichier["CONF"][i] = "BW"
elif fichier["HTP"][i] == 1 and fichier["VTP"][i] == 0:
fichier["CONF"][i] = "BWH"
elif fichier["HTP"][i] == 1 and fichier["VTP"][i] == 1:
fichier["CONF"][i] = "BWHV"
del fichier["HTP"]
del fichier["VTP"]
# else :
# ##on ordonne le fichier et on prends les données qui nous interessent, cf pdf de sylbain
# Données_recueilles_pression = []
#
# for element in fichier.columns:
# if element not in Données_recueilles_pression:
# del fichier[element]
# ##on decode les bytes et on remplace les AVEC/ SANS pas de 1 ou 0
# for element in fichier.columns:
# if type(fichier[element][0]) == bytes:
# for i in range(len(fichier[element])):
# fichier[element][i] = fichier[element][i].decode()
# if fichier[element][i] == "AVEC":
# fichier[element][i] = 1
# else:
# fichier[element][i] =0
|
990,093 | ce9062fd96bc51a4286481276f3edc105fb44441 | import webapp2
import jinja2
import os
from google.appengine.ext import db
jinja_environment = jinja2.Environment(autoescape=True,
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), '.')))
class Blog(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('blogtemplate.htm')
b = db.GqlQuery("SELECT * FROM Post ORDER BY cdate DESC limit 10")
self.response.out.write(template.render({'blogs':b})) |
990,094 | d7e1d989e3d5f0622696d75899f431bd920140a2 | ################################################################################
################################################################################
## IMPORTANT NOTE ##
# 1. Please set appropiate mode of operation towards the END OF THIS SCRIPT
# Mode = 0 --> For single image and specify the filename
# Mode = 1 --> For multiple images and specify the range
################################################################################
################################################################################
## TEAM DETAILS ##
'''
e-Yantra 2015
Task 3 - Path Planning
eYRC
Team ID: eYRC#1011
File name: eYRC#1011_PathPlanning.py
Version: 1.0.0
Author: Meghana B
'''
## PROGRAM DESCRIPTION ##
'''
This is a program that calculates the shortest path from the start and end point
using A* algorithm.
An image is read and processed to get the positions of the start and end points
and also detect the obstacles.
The length and the coordinates of the shortest path detected is displayed along
with the path drawn on the output image.
'''
################################################################################
################################################################################
import numpy as np
import cv2
import math
import heapq
img = cv2.imread('test_images/test_image1.png')
h,w,c = img.shape
count = 1
imn = 'img'+str(count)
#Cordinates for Start/End nodes
start_x = 0
start_y = 0
end_x = 0
end_y = 0
#List of positions of the obstacles
wall_map = []
## Function which processes the image and returns the coordinates of the
## start and end point
## Also creates a list with the coordinates of the obstacles present
def get_start_end(img2):
y = 20
for i in range(0,10):
x = 20
if(y<h):
for j in range (0,10):
if(x<w):
#Detecting the colors
roi = img2.copy()
roi = roi[y-5:y+5, x-5:x+5, :]
hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
hue,sat,val,ret = cv2.mean(hsv)
#If red, set the position for start node
if(hue==0 and sat==255 and val==255):
start_x = i
start_y = j
#If green, set the position for end node
elif(hue==60 and sat==255 and val==255):
end_x = i
end_y = j
#If black, add the obstacle position into the list
elif (val==0):
wall_map.append((i,j))
x = x+40
y = y+40
return (start_x,start_y,end_x,end_y)
class Node(object):
def __init__(node, x, y, space):
#Coordinates of the current node
node.x = x
node.y = y
#Black spce flag and parent node
node.space = space
node.parent = None
#f, g, h values for A* algorithm
node.f = 0
node.g = 0
node.h = 0
class path_algorithm(object):
def __init__(a):
a.open_list = []
a.close_list = set()
#Transform open list into a heap (node with lowest f value at top)
heapq.heapify(a.open_list)
#List of nodes which are free white spaces
a.nodes = []
#Number of rows and columns of the grid
a.rows = 5
a.cols = 5
def get_pos(a, x, y):
pos = a.nodes[(x*a.rows)+y]
return pos
## Function to create a list which stores the position of white spaces and
## obstacles and to initialise the start and end node
def grid_map(a, sx, sy, ex, ey):
for i in range(0,a.rows):
for j in range(0,a.cols):
if (i,j) in wall_map:
space = 0
else:
space = 1
a.nodes.append(Node(i,j,space))
#Set the positions for start and end node
a.start = a.get_pos(sx,sy)
a.end = a.get_pos(ex,ey)
def get_adjacent(a, node):
adj_nodes = []
if (node.x < a.cols-1):
adj_nodes.append(a.get_pos(node.x+1, node.y))
if (node.y > 0):
adj_nodes.append(a.get_pos(node.x, node.y-1))
if (node.x > 0):
adj_nodes.append(a.get_pos(node.x-1, node.y))
if (node.y < a.rows-1):
adj_nodes.append(a.get_pos(node.x, node.y+1))
return adj_nodes
## Function which calcultes the heuristic value and returns it
def get_h(a, node):
#H factor value
h_factor = -20
dx = abs(node.x - a.end.x)
dy = abs(node.y - a.end.y)
h = h_factor * (dx + dy)
return h
## Function to update the values of the selected adjacent node
def update_values(a, adj, node):
adj.g = node.g + 40
adj.h = a.get_h(adj)
adj.f = adj.g + adj.h
adj.parent = node
## Function which adds the final coordinates of the path in the route_path list
## in the reverse order (excluding the start and end points)
def path_list(a, route_path):
node = a.end
# Back trace the path until the start node is encountered
while(node.parent is not a.start):
node = node.parent
route_path.append((node.y+1,node.x+1))
## Function to find the shortest path usinf A* algorithm
def path_detect(a, route_path):
#Add start node to open heap queue
heapq.heappush(a.open_list, (a.start.f, a.start))
while(len(a.open_list)):
#Pop node from open heap queue
f,node = heapq.heappop(a.open_list)
a.close_list.add(node)
#If it is the end node, then append the path list
if node is a.end:
a.path_list(route_path)
break
#To get a list of adjacent nodes
adj_list = a.get_adjacent(node)
#Get adjacent nodes and compare the f values
for adj in adj_list:
if(adj.space and (adj not in a.close_list)):
if((adj.f, adj) in a.open_list):
if(adj.g > (node.g + 40)):
a.update_values(adj, node)
else:
a.update_values(adj, node)
heapq.heappush(a.open_list, (adj.f, adj))
## Function to draw the shortest path and display the final output image
## Also calculates the total path length and returns it
def draw_path(path, ex, ey):
#Path length
length = len(path)-1
#Draw the colored path
imx = imn.copy()
for i in range(0,length):
y1,x1 = path[i]
y2,x2 = path[i+1]
if(i>0):
cl = (int)((255/(length)*i))
cv2.rectangle(imx,((y1*40)-38,(x1*40)-38),((y1*40)-2,(x1*40)-2),(0,cl,255-cl),-1)
opacity = 0.2
cv2.addWeighted(imx, opacity, imn, 1 - opacity, 0, imn)
#Draw the path lines
for i in range(0,length):
y1,x1 = path[i]
y2,x2 = path[i+1]
cv2.line(imn,((y1*40)-20,(x1*40)-20),((y2*40)-20,(x2*40)-20),(255,0,0),3)
#Mark the end node
cv2.circle(imn,((ey*40)-20,(ex*40)-20),8,(255,0,0),-1)
#Display the output image
name = 'Final Output '+str(count)
cv2.imshow(name,imn)
return length
## Function which take an image as its arguement, calculates the shortest path
## and returns the path length and a list of the route path
def play(img):
route_path = []
del wall_map[:]
start_x,start_y,end_x,end_y = get_start_end(imn)
route_path.append((end_y+1, end_x+1))
a = path_algorithm()
a.grid_map(start_x, start_y, end_x, end_y)
a.path_detect(route_path)
route_path.append((start_y+1, start_x+1))
route_path.reverse()
route_length = draw_path(route_path, end_x+1, end_y+1)
del route_path[0]
return route_length, route_path
#============================ SET MODE HERE ============================#
'''
Enter the Mode here:
MODE = 0 --> To check the output for a Single Image
And also enter the required file name below
MODE = 1 --> To check the output for Multiple Images in a range
And also set the range value in the 'for loop' below
'''
#SET THE MODE HERE
mode = 1
#FOR SINGLE IMAGE (mode = 0), ENTER FILENAME BELOW:
image_name = 'test_images/test_image1.png'
#FOR MULTIPLE IMAGES, ENTER THE RANGE:
range_start = 1
range_end = 5 ##Eg: Displays images from 1 to 5 (including 5)
if __name__ == "__main__":
#Checking output for single image
if (mode==0):
img1 = cv2.imread(image_name) ##Enter the File Name above
imn = img1.copy()
route_length, route_path = play(imn)
print 'Route Length =', route_length
print 'Route Path =', route_path
#Checking output for all images
elif (mode==1):
route_length_list = []
route_path_list = []
for file_number in range(range_start, range_end+1): ##Enter the Range above
file_name = 'test_images/test_image'+str(file_number)+'.png'
pic = cv2.imread(file_name)
imn = pic.copy()
route_length, route_path = play(imn)
count = count+1
route_length_list.append(route_length)
route_path_list.append(route_path)
print 'Route Lenth List: ',route_length_list
print '\nRoute Path List: '
for i in range(len(route_path_list)):
print '\nImage',i+1,':\n',route_path_list[i]
cv2.waitKey(0)
cv2.destroyAllWindows()
|
990,095 | 70b46fec7074701347e33f67c05843a82a488795 | """Test helper to handle making RPC requests to an AMPQ broker."""
import gzip
import json
import uuid
import time
from typing import Any, Optional
import pika
from pika.adapters.blocking_connection import BlockingChannel
Connection = pika.BlockingConnection
Channel = BlockingChannel
class ResponseTimeout(Exception):
pass
# pylint: disable=too-few-public-methods
class Client:
"""Set up RPC response consumer with handler & provide request caller."""
channel: Channel
connection: Connection
correlation_id: str
response: Any
def __init__(
self,
connection: Connection,
channel: Channel
):
self.connection = connection
self.channel = channel
print('declaring response queue')
result = self.channel.queue_declare(
queue='', exclusive=True, auto_delete=True)
self.callback_queue = result.method.queue
print('listening on response queue')
self.channel.basic_consume(
queue=self.callback_queue,
on_message_callback=self._on_response,
auto_ack=True)
def _on_response(self, _: Any, __: Any, props: Any, body: bytes) -> None:
if self.correlation_id == props.correlation_id:
self.response = json.loads(gzip.decompress(body).decode('UTF8'))
if self.response['success']:
print(f'Response received {self.response}')
elif self.response['success'] == False:
print('Error received:')
for line in self.response['error']['trace'].split('\n'):
print(line)
# PENDS python 3.9 support in pylint
# pylint: disable=unsubscriptable-object
def call(
self,
target_queue: str,
message: Optional[Any] = None,
timeout: int = 5000) -> Any:
"""Send message as RPC Request to given queue & return Response."""
self.response = None
self.correlation_id = str(uuid.uuid4())
message_props = pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=self.correlation_id)
message_as_dict = {
'data': message,
}
print(f'Sending message {message}')
self.channel.basic_publish(
exchange='',
routing_key=target_queue,
properties=message_props,
body=gzip.compress(json.dumps(message_as_dict).encode('UTF8')))
start_time = time.time()
print('Message sent, waiting for response...')
while self.response is None:
if (start_time + timeout) < time.time():
raise ResponseTimeout()
self.connection.process_data_events(time_limit=timeout)
# NOTE: mypy incorrectly thinks this statement is unreachable
# what it doesn't know is that connection.process_data_events()
# will call _on_response, setting self.response when a response
# is received on the callback queue defined in __init__
return self.response # type: ignore
|
990,096 | ac2a7aad5c3a11d7f8a774d7188cc90915a37388 | # Load libraries
import numpy as np
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.utils.multiclass import type_of_target
from sklearn.preprocessing import LabelEncoder
# Load dataset
url = "https://raw.githubusercontent.com/lauradiosan/AI-2019-2020/master/exam/4/tshirtsNew.csv"
names = ['temperature', 'femaleTshirts', 'maleTshirts', 'competitions', 'location']
dataset = read_csv(url, names=names, header=0)
# shape (cate randuri/coloane)
#print(dataset.shape)
# head
#print(dataset.head(20))
# descriptions
#print(dataset.describe())
# class distribution adica de cate ori apare
#print(dataset.groupby('femaleTshirts').size())
# Split-out validation dataset
array = dataset.values
X = array[:, (0, 3, 4)]
y = array[:, 1].reshape(-1, 1)
aux = X[:, 1].ravel()
print("AICI!!!!!!")
print(X)
#print(aux)
label_encoder = LabelEncoder()
Xaux = label_encoder.fit_transform(aux)
#print(Xaux)
aux2 = np.asarray(Xaux)
x_ar = aux2.reshape(-1, 1)
#(x_ar)
Xnou = np.concatenate((X,x_ar),axis=1)
Xnou = Xnou.reshape((-1, 4))
print(Xnou[1:])
print("PANA AICII!!!!")
Xspecial = Xnou
print(Xspecial)
X_train, X_validation, Y_train, Y_validation = train_test_split(Xspecial, y, test_size=0.20, random_state=1)
#print(Y_validation)
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=2, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train.ravel(), cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
info = [[26], ['many'], ['high-school']]
x = np.asarray(info)
#x_array = x.reshape(-1, 1)
print(x)
model = SVC(gamma='auto')
model.fit(X_train, Y_train.ravel())
predictions = model.predict(x_array)
print(predictions)
print(sum(predictions))
|
990,097 | 77540aef5c16171efacbc833730810a6d2329ca8 | # Module identify waveguides in refractive index data
# Written by R. H. White rwhite@eoas.ubc.ca
import numpy as np
import xarray as xr
import math
from scipy.interpolate import interp1d
from scipy import signal
from scipy.signal import butter, lfilter, sosfilt
import scipy.fftpack as fftpack
from datetime import date
# Once finalized these should be saved to rhwhitepackages3.waveguides_pre
## Preprocess code - final
def lowpass_butter(data,day1,fs,order=5):
lowcut=2.0 * (1.0/day1) * (1.0/fs) # fraction of Nyquist frequency; 2.0 because Nyquist frequency is 0.5 samples per days
sos = butter(order, lowcut, btype='lowpass',output='sos') #low pass filter
# run filter forwards and backwards to get zero phase shift
filtered = signal.sosfiltfilt(sos, data, axis=0)
try:
xrtimefilt = xr.DataArray(filtered,coords={'time':data.time,
'longitude':data.longitude.values,'latitude':data.latitude.values},
dims = ('time','latitude','longitude'))
except:
xrtimefilt = xr.DataArray(filtered,coords={'time':data.time,
'longitude':data.lon.values,'latitude':data.lat.values},
dims = ('time','latitude','longitude'))
return(xrtimefilt)
def butter_time_filter_wind(infile,cutoff,varname='u'):
#datain_noleap = infile.sel(time=~((infile.time.dt.month == 2) & (infile.time.dt.day == 29)))
# Get appropriate weights, convolve, and select every 5th timestep
nwghts = 31
fs = 1.0 # 1 per day in 1/days (sampling frequency)
day1 = cutoff #days
xrtimefilt = lowpass_butter(infile,day1,fs)
xrtimefilt = xrtimefilt.to_dataset(name=varname)
return(xrtimefilt)
def fourier_Tukey(indata,nlons,peak_freq,ndegs=360):
X_fft = fftpack.fft(indata)
f_s = nlons
freqs = fftpack.fftfreq(len(indata)) * f_s
t = np.linspace(0, ndegs, f_s, endpoint=False)
filt_fft = X_fft.copy()
filt_fft[np.abs(freqs) > peak_freq] = 0
filtered_sig = fftpack.ifft(filt_fft)
# create Tukey window to smooth the wavenumbers removed (so no exact cutoff at k=2,
#which will change at different latitudes)
# Window is 2 wavenumbers more than the peak, but multiplied by 2 because the Tukey window is symmetric
M = (peak_freq + 2)*2
alpha = 0.3 # co-sine weighting covers 30% of the window
tukeyWin = signal.tukey(M, alpha=0.3, sym=True)[int(M/2):M]
turfilt_fft = X_fft.copy()
n = len(turfilt_fft)
turfilt_fft[0:int(M/2)] = turfilt_fft[0:int(M/2)]*tukeyWin
turfilt_fft[int(M/2):n-int(M/2)] = 0
turfilt_fft[n-int(M/2):n] = turfilt_fft[n-int(M/2):n]*tukeyWin[::-1]
tur_filtered_sig = fftpack.ifft(turfilt_fft)
return(tur_filtered_sig,filtered_sig,t)
ntimes = len(infile.time)
tur_filt_data = np.ndarray(infile.shape)
std_filt_data = np.ndarray(infile.shape)
try:
nlats = len(infile.latitude)
for itime in range(0,ntimes):
for ilat in range(0,nlats):
x = infile.isel(latitude=ilat).isel(time=itime)
tur_filt_data[itime,ilat,:],std_filt_data[itime,ilat,:],t = fourier_Tukey(
x.values,len(x.longitude),peak_freq=peak_freq)
data_turfilt = xr.DataArray(tur_filt_data,coords={'time':infile.time,
'longitude':infile.longitude,'latitude':infile.latitude},
dims = ('time','latitude','longitude'))
data_stdfilt = xr.DataArray(std_filt_data,coords={'time':infile.time,
'longitude':infile.longitude,'latitude':infile.latitude},
dims = ('time','latitude','longitude'))
except:
nlats = len(infile.lat)
for itime in range(0,ntimes):
for ilat in range(0,nlats):
x = infile.isel(lat=ilat).isel(time=itime)
tur_filt_data[itime,ilat,:],std_filt_data[itime,ilat,:],t = fourier_Tukey(
x.values,len(x.lon),peak_freq=peak_freq)
data_turfilt = xr.DataArray(tur_filt_data,coords={'time':infile.time,
'longitude':infile.lon.values,'latitude':infile.lat.values},
dims = ('time','latitude','longitude'))
data_stdfilt = xr.DataArray(std_filt_data,coords={'time':infile.time,
'longitude':infile.lon.values,'latitude':infile.lat.values},
dims = ('time','latitude','longitude'))
data_turfilt = data_turfilt.to_dataset(name='u')
return(data_turfilt)
def calc_Ks_SG(Uin,SG_step1=0,SG_step2=0,winlen=41):
## Calculate BetaM
## Hoskins and Karoly (see also Vallis (page 551) and Petoukhov et al 2013
## and Hoskins and Ambrizzi (1993))
OMEGA = 7.2921E-5
a = 6.3781E6
try:
lats_r = np.deg2rad(Uin.latitude)
except AttributeError:
lats_r = np.deg2rad(Uin.lat)
coslat = np.cos(lats_r)
betaM1 = 2.0 * OMEGA * coslat * coslat / a
Um = Uin / coslat
cos2Um = Um * coslat * coslat
# first differentiation
ddy_1 = ddy_merc(cos2Um)
# divide by cos2phi
ddy_1_over_cos2p = ddy_1 * (1.0/(coslat * coslat))
# Apply Savitzky-Golay filter
if SG_step1 > 0:
# Check that axis 1 is latitude
if Uin.dims[1] in ['latitude','lat','lats']:
temp = signal.savgol_filter(ddy_1_over_cos2p,
window_length=winlen, polyorder=SG_step1,
axis=1)
ddy_1_over_cos2p = xr.DataArray(temp,coords={'time':ddy_1_over_cos2p.time,
'longitude':ddy_1_over_cos2p.longitude,
'latitude':ddy_1_over_cos2p.latitude},
dims = ('time','latitude','longitude'))
else:
error('latitude axis is not as expected, or not named latitude, lat or lats')
# second differentiation
ddy_2 = ddy_merc(ddy_1_over_cos2p)
# Apply Savitzky-Golay filter
if SG_step2 > 0:
# Check that axis 1 is latitude
if Uin.dims[1] in ['latitude','lat','lats']:
temp = signal.savgol_filter(ddy_2,
window_length=winlen, polyorder=SG_step2,
axis=1)
ddy_2 = xr.DataArray(temp,coords={'time':ddy_2.time,
'longitude':ddy_2.longitude,
'latitude':ddy_2.latitude},
dims = ('time','latitude','longitude'))
else:
error('latitude axis is not as expected, or not named latitude, lat or lats')
betaM = betaM1 - ddy_2
# Now calculate Ks from BetaM
Ks2 = a * a * betaM/Um
Ks = np.sqrt(Ks2)
return(ddy_1,Ks,Ks2) #,betaM)
def calc_Ks_rolling(Uin,rolling=0,rolling2=0,rolling3=0):
## Calculate BetaM
## Hoskins and Karoly (see also Vallis (page 551) and Petoukhov et al 2013
## and Hoskins and Ambrizzi (1993))
OMEGA = 7.2921E-5
a = 6.3781E6
try:
lats_r = np.deg2rad(Uin.latitude)
except AttributeError:
lats_r = np.deg2rad(Uin.lat)
coslat = np.cos(lats_r)
betaM1 = 2.0 * OMEGA * coslat * coslat / a
Um = Uin / coslat
cos2Um = Um * coslat * coslat
# first differentiation
ddy_1 = ddy_merc(cos2Um)
# divide by cos2phi
ddy_1_over_cos2p = ddy_1 * (1.0/(coslat * coslat))
if rolling > 0:
# Rolling mean over 3 degrees to smooth
ddy_1_over_cos2p = ddy_1_over_cos2p.rolling(latitude=rolling, min_periods=None, center=True).mean()
# second differentiation
ddy_2 = ddy_merc(ddy_1_over_cos2p)
if rolling2 > 0:
# Rolling mean over 3 degrees to smooth
ddy_2 = ddy_2.rolling(latitude=rolling2, min_periods=None, center=True).mean()
betaM = betaM1 - ddy_2
# Now calculate Ks from BetaM
Ks2 = a * a * betaM/Um
if rolling3 > 0:
Ks2 = Ks2.rolling(latitude=rolling3, min_periods=None, center=True).mean()
Ks = np.sqrt(Ks2)
return(ddy_1,Ks,Ks2) #,betaM)
def ddy_merc(invar):
# based on Hoskins and Karoly:
# https://journals.ametsoc.org/doi/pdf/10.1175/1520-0469%281981%29038%3C1179%3ATSLROA%3E2.0.CO%3B2
# ddy = cos(phi)/a ddphi
try:
nlats = len(invar['lat'])
latname = 'lat'
lats = invar['lat']
except KeyError:
nlats = len(invar['latitude'])
latname = 'latitude'
lats = invar['latitude']
phi = np.deg2rad(lats)
cosphi = np.cos(phi).values
dims = invar.shape
dvardy = invar.copy(deep=True)
if latname == 'lat':
dims_var = invar.dims
latidx_var = dims_var.index('lat')
dims_lat = invar.lat.dims
latidx_lat = dims_lat.index('lat')
elif latname == 'latitude':
dims_var = invar.dims
latidx_var = dims_var.index('latitude')
dims_lat = invar.latitude.dims
latidx_lat = dims_lat.index('latitude')
dvar = np.gradient(invar,axis=latidx_var)
dphi = np.gradient(phi,axis=latidx_lat)
if len(dims_var) > 1:
dvardy[...] = (dvar/dphi[:,None]) * (cosphi[:,None] / rearth)
else:
dvardy[...] = (dvar/dphi) *(cosphi / rearth)
return(dvardy)
|
990,098 | 23a44d2ca57208d033762661f5f5c7ac382da66f | from abc import ABC, abstractmethod
class IranKhodro(ABC):
@abstractmethod
def engine(self):
pass
class Samand(IranKhodro):
def engine(self):
print("man Samandam!")
class Pejo207(IranKhodro):
def engine(self):
print("man Pejo207am")
class Rona(IranKhodro):
def engine(self):
print("Samand ba peride tarkib shod")
class IranKhodroEngineFactory:
def __init__(self):
self.typeEngine = input("che motori mikhay(Samand or Pejo27 or Rona)?")
eval(self.typeEngine)().engine()
if __name__ == "__main__":
f = IranKhodroEngineFactory()
|
990,099 | 9c1023e12e33ed39687ac3491f8a8c25e75e1620 | import requests
import re
# https://github.com/firehol/blocklist-ipsets/blob/master/blocklist_de.ipset
def getBaseIP(url: str) -> list:
"""Get IP address from IPset
"""
response = requests.get(url) #get data
ip_sets = response.text
ip_list = re.findall(r'(?:\d{1,3}\.)+(?:\d{1,3})', ip_sets)
return ip_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.