blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
266f1a48bfa41437d2e351500350b7afec97facb | Python | porwalameet/practicepython | /ex4.py | UTF-8 | 516 | 4.4375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Create a program that asks the user for a number and then prints out a list of all the divisors of that number.
(If you don\’t know what a divisor is, it is a number that divides evenly into another number. For example, 13 is a divisor of 26 because 26 \/ 13 has no remainder.)
"""
num = int(raw_input("enter the number whose divisors to be looked for:"))
divisor_list = []
for i in range (1, num+1):
if num % i ==0:
divisor_list.append(i)
print("divisor list = "), divisor_list
| true |
ed0954941a900f66c6f50a4aa1233ae340133e36 | Python | scott-mao/yolov5_prune_sfp | /inference.py | UTF-8 | 18,305 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
author:xhw
date:2021.4.26
class Detector_model(object):
def __init__(self):
选择设备、加载模型、模型是否半精度、检查图像尺寸、获取类别名、预热
def img_process(self, img):
letterbox、转换通道、Totensor、图片是否半精度、归一化、扩展维度
def detect_frame(self, frame):
img_process、推理(detect类:将三个特征图合为一个)、NMS(xywh->xyxy)、写入结果(坐标重缩放)
'''
import os
import time
import numpy as np
import math
import yaml
import cv2
import torch
from numpy import random
import torchvision
import torch.nn as nn
dataset = 'voc'
if dataset == 'coco':
names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
elif dataset == 'voc':
names = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
elif dataset == 'self':
names = []
class Detector_model(object):
def __init__(self, model_path, device, img_size=640):
# 设备选择
self.device = select_device(device)
# 是否半精度
self.half = self.device.type != 'cpu'
#self.half = False
# 加载模型
self.model = attempt_load(model_path, map_location=self.device) # load FP32 model
# 检查图像尺寸
self.imgsz = check_img_size(img_size, s=self.model.stride.max()) # check img_size
# 是否半精度(FP16)
if self.half:
self.model.half() # to FP16
# 获取类别名称
self.names = names
# 定义每个类别的颜色
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.names))]
# 预热(随机初始化一张图片进行推理)
img = torch.zeros((1, 3, self.imgsz, self.imgsz), device=self.device) # init img
_ = self.model(img.half() if self.half else img) if self.device.type != 'cpu' else None # run once
#图像预处理函数
def img_process(self, img):
# 将图片尺寸变为mx640或者640xn(32的倍数,通过缩放、补灰边的方式)
img = letterbox(img, new_shape=self.imgsz)[0]
# 转换通道(在datasets.py里改的)
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# 将numpy数组转为tensor
img = torch.from_numpy(img).to(self.device)
# 将图片变为FP16
img = img.half() if self.half else img.float() # uint8 to fp16/32
# 归一化
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# 在batch维度扩展一维
if img.ndimension() == 3:
img = img.unsqueeze(0)
return img
#检测视频帧
def detect_frame(self, frame, show_label=False):
ori_shape = frame.shape
fontScale = 0.5
frame_copy=frame.copy()
# box的厚度
bbox_thick = int(0.6 * (ori_shape[0]+ori_shape[1] ) / 600)
#预处理
frame = self.img_process(frame)
#推理并进行计时,默认调用forward方法
t1 = time.time()
pred = self.model(frame)[0]
t2 = time.time()
points=[]
# 使用NMS(注:在这个过程中将xywh转为了xyxy)
pred = non_max_suppression(pred, 0.25, 0.35)
# 获取类别名称
class_name = self.names
# 每个图片的检测结果
for det in pred: # detections per image
# 判断检测结果是否为空
if det is not None and len(det):
# 将boxes的坐标从img_size尺寸重新缩放到im0尺寸,且都变为整型
det[:, :4] = scale_coords(frame.shape[2:], det[:, :4], ori_shape).round()
# 结果写入,*xyxy,conf,cls表示将前四个赋给xyxy,第五个赋给conf,第六个赋给cls
for *xyxy, conf, cls in det:
x1,y1,x2,y2 = int(xyxy[0]),int(xyxy[1]),int(xyxy[2]),int(xyxy[3])
points.append([x1,y1,x2,y2, points])
if show_label:
#cv2.rectangle(frame_copy, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.rectangle(frame_copy, (x1, y1), (x2, y2), self.colors[int(cls)], 2)
#print()
#print(int(cls))
bbox_mess = '%s: %.2f' % (class_name[int(cls)], float(conf))
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]
#cv2.rectangle(frame_copy, (int(xyxy[0]), int(xyxy[1])), (int(xyxy[0]) + t_size[0], int(xyxy[1]) - t_size[1] - 3), (0, 0, 255),-1) # filled
cv2.rectangle(frame_copy, (int(xyxy[0]), int(xyxy[1])), (int(xyxy[0]) + t_size[0], int(xyxy[1]) - t_size[1] - 3), self.colors[int(cls)],-1) # filled^M
cv2.putText(frame_copy, bbox_mess, (int(xyxy[0]), int(xyxy[1]) - 2), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
t3 = time.time()
# 打印推理和后处理时间
print('infer', t2-t1, 'process', t3-t2)
return frame_copy,points
# 检查img_size是否为s的倍数
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
print('new_size', new_size)
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def make_divisible(x, divisor):
# Returns x evenly divisble by divisor
return math.ceil(x / divisor) * divisor
# 选择device ==> Detector_model.__init__
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = 'Using CUDA '
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
(s, i, x[i].name, x[i].total_memory / c))
else:
print('Using CPU')
print('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
# 将图片尺寸变为mx640或者640xn(32的倍数,通过缩放、补灰边的方式)==> Detector_model.img_process
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
# 将boxes的坐标从img_size尺寸重新缩放到im0尺寸,且都变为整型 ==> Detector_model.detect_frame
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
# 将坐标限制在范围内
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
# NMS ==> Detector_model.detect_frame
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
"""Performs Non-Maximum Suppression (NMS) on inference results
Returns:
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
"""
if prediction.dtype is torch.float16:
prediction = prediction.float() # to FP32
nc = prediction[0].shape[1] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
t = time.time()
output = [None] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
print(x, i, x.shape, i.shape)
pass
output[xi] = x[i]
if (time.time() - t) > time_limit:
break # time limit exceeded
return output
# 将xywh转为xyxy ==> non_max_suppression
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
# 计算box1和box2之间的iou ==> non_max_suppression
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.Hardswish() if act else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.cat(y, 1) # nms ensemble
y = torch.stack(y).mean(0) # mean ensemble
return y, None # inference, train output
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
model.append(torch.load(w, map_location=map_location)['model'].float().eval()) # load FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble
if __name__ == '__main__':
#runs/train/weights/best.pt
#get_small_script/small_model_all.pt
model = Detector_model('/root/xhw/yolov5-3.1-self_simple/get_small_script/small_model_all.pt', 'cpu')
path='images/call3.jpg'
frame = cv2.imread(path)
t1 = time.time()
for i in range(5):
image, points = model.detect_frame(frame, True)
#cv2.imshow('aa', image)
#cv2.waitKey(0)
cv2.imwrite('res.jpg',image)
| true |
b5c954a5ec8d8a6bdfc8ade8ac36db03a1cf8f26 | Python | Jiachengyou/chat-room | /main/chatrooms_server.py | UTF-8 | 1,340 | 2.828125 | 3 | [] | no_license | import mysql.connector
import os
from termcolor import colored,cprint
'''YOUR DATABASE DETAILS'''
chatroom_serv=mysql.connector.connect(
host=os.getenv('HOST', None),
user=os.getenv("USER", None),
passwd=os.getenv('PASSWORD', None),
database = 'user_info'
)
# print(os.getenv('USER', None))
rooms=chatroom_serv.cursor()
def all_rooms():
room_list=[]
rooms.execute("SELECT server_rooms FROM chatserver")
chat_rooms_list=rooms.fetchall()
for i in range(len(chat_rooms_list)):
room_list.append(chat_rooms_list[i][0])
return room_list
def new_room(room_name):
sql= "INSERT INTO chatserver (server_rooms, id) VALUE (%s, %s)"
val=(room_name, 0)
rooms.execute(sql,val)
chatroom_serv.commit()
def insert_message(val):
sql = "INSERT INTO messages (id,server_rooms,user, message) VALUE (%s, %s, %s, %s)"
rooms.execute(sql,val)
chatroom_serv.commit()
def get_message(chatroom):
# print("Message history:")
# fuck this bug
sql = "SELECT user, message FROM messages WHERE server_rooms = '{0}'".format(chatroom)
rooms.execute(sql)
message_list=rooms.fetchall()
for i in range(len(message_list)):
print(colored("[{}]: {}".format(message_list[i][0], message_list[i][1]), "yellow"))
| true |
677103cb51dfa0bfec573baf0cdb5e32098d6c5a | Python | STOC-Machine/ros_packages | /offb_py/src/cmd_pub.py | UTF-8 | 2,157 | 2.65625 | 3 | [] | no_license | #! /usr/bin/env python
import rospy
from std_msgs.msg import Int32MultiArray
class cmd_pub(object):
def __init__(self):
#topics
self.command_pub_0 = rospy.Publisher('uav0/mavros/command',
Int32MultiArray, queue_size=1)
#self.command_pub_1 = rospy.Publisher('uav1/mavros/command',
# Int32MultiArray, queue_size=1)
#self.command_pub_2 = rospy.Publisher('uav2/mavros/command',
# Int32MultiArray, queue_size=1)
#self.command_pub_3 = rospy.Publisher('uav3/mavros/command',
# Int32MultiArray, queue_size=1)
#variables
self.cmd = Int32MultiArray()
self.rate = rospy.Rate(20)
self.ctrl_c = False
#wait for initialization
for _ in range(20):
self.rate.sleep()
#shutdown
rospy.on_shutdown(self.shutdownhook)
def shutdownhook(self):
self.ctrl_c = True
def publish(self, msg):
# set command msg
#drone_num,
action, direction = msg
self.cmd.data = [action, direction]
# publish to specific drone
self.command_pub_0.publish(self.cmd)
"""
if drone_num == 0:
self.command_pub_0.publish(self.cmd)
elif drone_num == 1:
self.command_pub_1.publish(self.cmd)
elif drone_num == 2:
self.command_pub_2.publish(self.cmd)
elif drone_num == 3:
self.command_pub_3.publish(self.cmd)
elif drone_num == 4:
self.command_pub_0.publish(self.cmd)
self.command_pub_1.publish(self.cmd)
self.command_pub_2.publish(self.cmd)
self.command_pub_3.publish(self.cmd)
"""
if __name__ == '__main__':
rospy.init_node('cmd_pub_node')
# test publish to all drones
rate = rospy.Rate(1)
print "begin tests"
cmd_obj = cmd_pub()
print "created object"
cmd_obj.publish(0,0,0)
print "published to 0"
rate.sleep()
cmd_obj.publish(1,1,1)
print "published to 1"
rate.sleep()
cmd_obj.publish(-1,3,0)
print "published to -1"
rospy.spin()
| true |
2eb62a7394e7bfc47816d0d0caa9e834533fdfc5 | Python | wajid-shaikh/Python-Examples | /14 decorators/decorator_intro.py | UTF-8 | 475 | 4.09375 | 4 | [] | no_license | # you have to have a complete understanding of functions,
# first class function / closure
# then finally we will learn about decorators
def square(a):
return a**2
s = square # assign square function to another variable
# print(s(7))
print(s.__name__) # gives function name i.e square
print(square.__name__) # gives function name i.e square
# both are on same memory location
print(s) # <function square at 0x007EB660>
print(square)# <function square at 0x007EB660> | true |
0ee33855e594afe0a42015a0156d985f19151584 | Python | huashanqitu/iHome-python | /ihome/libs/yuntongxun/sms.py | UTF-8 | 2,660 | 2.984375 | 3 | [
"MIT"
] | permissive | # coding=utf-8
from CCPRestSDK import REST
#主帐号
accountSid= '8aaf070867e8660f0167fdc0b2700b7e'
#主帐号Token
accountToken= 'beb1548364064eb0965f2d7c1a82471b'
#应用Id
appId='8aaf070867e8660f0167fdc0b2cd0b85'
#请求地址,格式如下,不需要写http://
serverIP='app.cloopen.com'
#请求端口
serverPort='8883'
#REST版本号
softVersion='2013-12-26'
# 发送模板短信
# @param to 手机号码
# @param datas 内容数据 格式为数组 例如:['12','34'],如不需替换请填 ''
# @param $tempId 模板Id
class CCP(object):
"""
自己封装的发送短信的辅助类
为什么自己封装?
避免多次初始化 REST SDK
"""
# 用来保存对象的类属性
instance = None
def __new__(cls):
# 判断CCP类有没有已经创建好的对象,如果没有,创建一个对象,并且保存
# 如果有,则将保存的队形直接返回
if cls.instance is None:
obj = super(CCP, cls).__new__(cls)
# 初始化REST SDK
obj.rest = REST(serverIP, serverPort, softVersion)
obj.rest.setAccount(accountSid, accountToken)
obj.rest.setAppId(appId)
cls.instance = obj
return cls.instance
def send_template_sms(self, to, datas, temp_id):
""""""
result = self.rest.sendTemplateSMS(to, datas, temp_id)
# for k,v in result.iteritems():
# if k=='templateSMS' :
# for k,s in v.iteritems():
# print '%s:%s' % (k, s)
# else:
# print '%s:%s' % (k, v)
# smsMessageSid:fa9c702947c146efa897083a089dea7c
# dateCreated:20181230145634
# statusCode:000000
status_code = result.get("statusCode")
if status_code == "000000":
# 表示发送短信成功
return 0
else:
# 表示发送失败
return -1
# def sendTemplateSMS(to,datas,tempId):
#
#
# #初始化REST SDK
# rest = REST(serverIP,serverPort,softVersion)
# rest.setAccount(accountSid,accountToken)
# rest.setAppId(appId)
#
# result = rest.sendTemplateSMS(to,datas,tempId)
# for k,v in result.iteritems():
#
# if k=='templateSMS' :
# for k,s in v.iteritems():
# print '%s:%s' % (k, s)
# else:
# print '%s:%s' % (k, v)
#sendTemplateSMS(手机号码,内容数据,模板Id)
if __name__ == '__main__':
ccp = CCP()
ret = ccp.send_template_sms("13534755504", ["1234", "5"], 1)
print(ret) | true |
b11bb0a1c7d162ab0d87780b0c8c3f39b7e6de46 | Python | Santhoshkumard11/django-template-cheatsheet | /django_notes.py | UTF-8 | 1,700 | 2.65625 | 3 | [
"MIT"
] | permissive | #when creating the user don't add the password right away
u = User("enter the fields")
u.set_password("raw passowrd string")
u.save
#getting the id for a model
user_id = User.objects.all().first().id
print(user_id)
#get distinct values (will give you the list of email id's)
SubUser.objects.filter(username=username).values_list('email_id', flat=True).distinct()
#will return only the sepecified field inside the values list
#if you want to set a expiry time for your session - you can also put datetime object in the params
request.session.set_expiry(300) would make the session expire in 5 minutes.
#doc string
""" Add a relavant description of the method here
Args:
username (str,required): email id of the user
Returns:
(bool) : true if the user has devices else false
"""
#if you want to see a list of items in the admin page
@admin.register(models.Post)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('title', 'id', 'status', 'slug', 'author')
#NOTE: you have to replace the entire code of admin register to this and make sure the field names are the same
#----------------
#access django db shell
python manage.py dbshell
#-----------------
#don't name the apps with the default names of the packages.
''''
allauth.account',
is same as 'account',
change the apps settings to the following,
class ProfileConfig(AppConfig):
name = 'account'
label="profile"
and add this to installed apps
'account.apps.ProfileConfig',
'''
#-----------------
#if you have more than one backend auth then specify which one to use in login methond
auth.login(request,user,backend='django.contrib.auth.backends.ModelBackend')
#-----------------
| true |
ffde4ae5c58187d8d9d4510746f1a09d54ce3bce | Python | doublenote1/PythonLearning | /6. 制御フロー/1. 関数/2.5 名前空間とスコープ.py | UTF-8 | 2,466 | 4.0625 | 4 | [] | no_license | # === 名前空間とは? ===
# 名前空間: 変数や関数が所属している領域(関数)
# スコープ: 変数や関数が有効である範囲
# ビルトインスコープ: 組込変数や関数の有効範囲
# モジュールスコープ(グローバルスコープ): python スクリプトファイル内のトップレベルで定義した時の有効範囲
# ローカルスコープ: 関数・クラス内を有効範囲とするスコープ
# 参照の順番:
# 1. L: Local
# 2. E: Enclosing function local
# 3. G: Global
# 4. B: Built-in
def a():
# 関数a内部の名前空間
l = 0
s = 'apple'
def b():
# 関数b内部の名前空間
m = 1
t = 'banana'
print(dir())
print(globals().keys())
print(dir())
b()
# globalの名前空間
n = 2
u = 'cake'
a()
# ['b', 'l', 's']
# ['m', 't']
# dict_keys(['__name__', '__doc__', '__package__', '__loader__', '__spec__', '__annotations__', '__builtins__', '__file__', '__cached__', 'a', 'n', 'u'])
print(globals().keys())
# dict_keys(['__name__', '__doc__', '__package__', '__loader__', '__spec__', '__annotations__', '__builtins__', '__file__', '__cached__', 'a', 'n', 'u'])
# === 明示的なScopeの指定 ===
spam = 0
def a():
# globalを変更しようとしても、localに新しく変数ができてしまう
spam = 1
a()
print(spam) # -> 0
def b():
# globalを指定すれば、globalの変数に代入することが可能
global spam
spam = 1
b()
print(spam) # -> 1
spam = 0
def a():
spam = 1
def b():
# global を指定することで、通常優先度が高いnonlocalではなくglobalの変数を使うことができる
global spam
return spam
return b()
print(a()) # -> 0
# Python チュートリアルより
def scope_test():
def do_local():
spam = "local spam"
def do_nonlocal():
nonlocal spam
spam = "nonlocal spam"
def do_global():
global spam
spam = "global spam"
spam = "test spam"
do_local()
print("After local assignment:", spam)
do_nonlocal()
print("After nonlocal assignment:", spam)
do_global()
print("After global assignment:", spam)
scope_test()
# After local assignment: test spam
# After nonlocal assignment: nonlocal spam
# After global assignment: nonlocal spam
print("In global scope:", spam)
# -> In global scope: global spam
| true |
8cca42cf024f84012e2ecf83ec6898bb3c9d8598 | Python | Brugel18/VimHUD | /plugin/vp.py | UTF-8 | 2,890 | 3 | 3 | [] | no_license | import sys
from mode import Mode
from modeslist import ModesList
from processor import Processor
from os.path import expanduser
home = expanduser("~")
# Parser class
# As Vim is a modular editor, we associate a 'Mode' object
# with each given Vim mode. That object finds text written
# in its mode, and parses commands based off of that
class Parser:
def __init__(self, str):
#The string to parse
self.toParse = str;
#An object that stores an array of all modes
self.ModesList = ModesList(self.toParse)
#The list of commands parsed from the string
self.tokens = [];
def parseStr(self):
for mode in self.ModesList.Modes:
#Parse and process the commands for the mode
#print self.toParse
mode.parse()
strs = mode.tokens
#Extract the tokens for the mode
#and add them to the complete list
for str in strs:
#print 'String ' + str
self.tokens.append(str)
def printTokens(self):
for mode in self.ModesList.Modes:
tokList = mode.tokens
#print 'Printing ' + mode.name
#for tok in tokList:
#print 'Token ' + tok
def addFreq(self):
f = open('user_short_stats.txt')
self.clean(f)
lines = f.readlines()
for line in lines:
seps = string.split(line, '\t')
if len(line) == 2:
lrt = ""
com = seps[0]
freq = seps[1]
freq_int = int(freq) + '\t'
freq_int += 1
freq = str(freq_int) + '\n'
lrt += com
lrt += freq
def clean(self, f):
lines = f.readlines()
lrt = ""
for line in lines:
seps = string.split(line, '\t')
if len(seps) == 2:
com = seps[0]
freq = seps[1]
freq_int = 1
freq = str(freq_int) + '\n'
lrt += com + '\t'
lrt += freq
f.write(lrt)
def update():
#print 'Calling Update...'
#print home
f = open(home + '/.vimlog', 'r+')
#print 'File Opened'
lines = f.read()
print lines
parse = Parser(lines)
parse.parseStr()
for mode in parse.ModesList.Modes:
tokList = mode.tokens
for tok in tokList:
parse.addFreq(tok)
#print 'Cleaning File...'
f.seek(0)
f.truncate()
f.close()
# Create the parser for the input file
#parse = Parser('BACKSPACE^CwqIHELLO')
#parse.parseStr()
#parse.printTokens()
#update()
| true |
b9662da972e1d0769ef0c896eed6d25384c69afc | Python | vaidyaenc/vaidya | /201902-aruba-py-1/basic-demos/str-2.py | UTF-8 | 81 | 2.625 | 3 | [] | no_license |
s1='Hello World'
print(s1,id(s1))
s2=s1.upper()
print(s1,id(s1))
print(s2,id(s2))
| true |
b3164e3402defb9da3be0cfa0abaed6dc6970d4e | Python | khuyentran1401/Python-data-science-code-snippet | /code_snippets/pandas/dataframe_pipe.py | UTF-8 | 678 | 3.71875 | 4 | [] | no_license | # pip install textblob
import pandas as pd
from textblob import TextBlob
def remove_white_space(df: pd.DataFrame):
df['text'] = df['text'].apply(lambda row: row.strip())
return df
def get_sentiment(df: pd.DataFrame):
df['sentiment'] = df['text'].apply(lambda row:
TextBlob(row).sentiment[0])
return df
df = pd.DataFrame({'text': ["It is a beautiful day today ",
" This movie is terrible"]})
df = (df.pipe(remove_white_space)
.pipe(get_sentiment)
)
print(df)
"""
text sentiment
0 It is a beautiful day today 0.85
1 This movie is terrible -1.00
""" | true |
f6c0b815a8a5512ac41c55293e2af58cf8f1ef7e | Python | TobyKillen/UUJCOM101 | /UUJ COURSEWORK PROGRAMMING.py | UTF-8 | 5,885 | 3.78125 | 4 | [] | no_license | #UUJ COURSEWORK
#OBJECT PYTHON FOR COM101
#EMPOLYEE DATA MANAGEMENT TOOL
#DEVELOPED BY TOBY KILLEN
#B00753973
#SECTION 1 - IMPORTING
import os
import time
#SECTION 2 - FUNCTIONS
def header():
print("Welcome to the Employee Management Tool v1.6")
print("Developed By Toby Killen")
print("--------------------------------------------")
def helpFunction():
print("read - Print out number of data entires in database.")
print("readall - List employees and Details.")
print("salary - Print report on total salary.")
print("salary -a - Print report on average salary based on number of employee.")
print("add - Add new employee to database.")
print("group - Print report detailing number of employee grouped by each position.")
print("query - Query the list, returning employees above set salary.")
print("exit - Quits the Data management tool.")
print("cls - Clears the GUI.")
def mainMenu1():
header()
while True:
userinput = str(input(">>> "))
if userinput == str("readall"):
readallFunction()
elif userinput == str("read"):
readFunction()
elif userinput == "add":
addFunction()
elif userinput == "salary":
salaryFunction()
elif userinput == "salary -a":
avgSalaryFunction()
elif userinput == "group":
groupFunction()
elif userinput == "query":
queryFunction()
elif userinput == "cls":
os.system("cls")
mainMenu1()
elif userinput == "exit":
quit()
elif userinput == "help":
helpFunction()
def readallFunction():
dataset = open("dataset.txt","r")
next(dataset)
fdataset = dataset.read()
print(fdataset)
dataset.close()
def readFunction():
dataset = open("dataset.txt","r")
line = 0
for lines in dataset:
line = line + 1
dataset.close()
print("This dataset contains",line - 1,"data entries. ")
def addFunction():
dataset = open('dataset.txt','a') # OPENS THE FILE IN APPEND MODE
emp_no = int(input("Employee Number: "))
for line in dataset:
if emp_no in line:
print("Please Enter a employee number which has not already been taken. ")
else:
pass
emp_name = input("Employee Name: ")
age = int(input("Employee Age: "))
if age >= 18:
pass
else:
print("Error. Employee must be 18 years old or older. ")
time.sleep(5)
os.system("cls")
mainMenu1()
position = input("Employee Position: ")
if position == "Developer":
pass
elif position == "Tester":
pass
elif position == "DevOps":
pass
elif position == "Analyst":
pass
else:
print("Error. Position has to be 'Developer','Tester','DevOps' or 'Analyst'")
time.sleep(5)
os.system("cls")
addFunction()
salary = int(input("Employee Salary: "))
yrs_emp = int(input("Year's Employed: "))
if yrs_emp >= 0:
pass
else:
print("Error. Year's employed can not be negative. ")
time.sleep(5)
os.system("cls")
mainMenu1()
dataset.write("\n")
dataset.write(str(emp_no) + ", " + str(emp_name) + ", " + str(age) + ", "+ str(position) + ", "+ str(salary)+ ", " + str(yrs_emp))
print("\nEmployee Successfully added")
time.sleep(3)
dataset.close()
os.system("cls")
mainMenu1()
def salaryFunction():
dataset = open("dataset.txt","r")
next(dataset)
totalsalary = 0
for line in dataset:
salary1 = line.split(',')
salary = salary1[4]
salary2 = int(salary)
totalsalary += salary2
print("The total salary bill for all employee's is",format(totalsalary, ".2f"))
dataset.close()
def avgSalaryFunction():
dataset = open("dataset.txt", "r")
next(dataset)
totalsalary = 0
for line in dataset:
salary1 = line.split(',')
salary = salary1[4]
salary2 = int(salary)
totalsalary += salary2
dataset = open("dataset.txt","r")
line = 0
for lines in dataset:
line = line + 1
dataset.close()
print("The Average salary for each employee is $",format(totalsalary / line, ".2f"))
def groupFunction():
dataset = open("dataset.txt","r")
next(dataset)
devopscount = int(0)
testercount = int(0)
devcount = int(0)
analystcount = int(0)
for line in dataset:
job1 = line.split(',')
job = job1[3]
if "DevOps" in job:
devopscount += 1
else:
pass
if "Developer" in job:
devcount += 1
else:
pass
if "Tester" in job:
testercount += 1
else:
pass
if "Analyst" in job:
analystcount += 1
else:
pass
print("Currently employed are the following: ")
print(devopscount, "DevOps")
print(devcount, "Developers")
print(testercount, "Testers")
print(analystcount, "Analysts")
dataset.close()
def queryFunction():
dataset = open("dataset.txt","r")
next(dataset)
userinput = input("Salary Query. Please enter the salary threshold you would like to return: ")
for line in dataset:
salary3 = line.split(',')
salary = salary3[4]
if salary3 >= userinput:
return line
else:
print("No Employee earns above set threshold. ")
time.sleep(4)
os.system("cls")
mainMenu1()
dataset.close()
#SECTION 3 - MAIN FUNCTION CALL
mainMenu1() | true |
c065ebe8f011d1c3a7081b7ed3e4af2ce92a5ff8 | Python | jerlfan/PyMoDAQ | /src/pymodaq/daq_utils/parameter/pymodaq_ptypes/text.py | UTF-8 | 4,646 | 2.75 | 3 | [
"MIT",
"CECILL-B"
] | permissive | import os
from pathlib import Path
from qtpy import QtWidgets, QtCore, QtGui
from pyqtgraph.parametertree.Parameter import ParameterItem
from pyqtgraph.parametertree.parameterTypes.basetypes import WidgetParameterItem
from pyqtgraph.parametertree import Parameter
class PlainTextWidget(QtWidgets.QWidget):
"""
================ ========================
**Attributes** **Type**
*value_changed* instance of pyqt Signal
================ ========================
See Also
--------
initUI, emitsignal
"""
value_changed = QtCore.Signal(str)
def __init__(self):
super().__init__()
self.initUI()
self.text_edit.textChanged.connect(self.emitsignal)
def emitsignal(self):
"""
Emit the value changed signal from the text_edit attribute.
"""
text = self.text_edit.toPlainText()
self.value_changed.emit(text)
def set_value(self, txt):
"""
Set the value of the text_edit attribute.
=============== =========== ================================
**Parameters** **Type** **Description**
*txt* string the string value to be setted
=============== =========== ================================
"""
self.text_edit.setPlainText(txt)
def get_value(self):
"""
Get the value of the text_edit attribute.
Returns
-------
string
The string value of text_edit.
"""
return self.text_edit.toPlainText()
def initUI(self):
"""
Init the User Interface.
"""
self.hor_layout = QtWidgets.QHBoxLayout()
self.text_edit = QtWidgets.QPlainTextEdit()
self.text_edit.setReadOnly(True)
self.text_edit.setMaximumHeight(50)
self.add_pb = QtWidgets.QPushButton()
self.add_pb.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/Add2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.add_pb.setIcon(icon3)
self.hor_layout.addWidget(self.text_edit)
verlayout = QtWidgets.QVBoxLayout()
verlayout.addWidget(self.add_pb)
verlayout.addStretch()
self.hor_layout.addLayout(verlayout)
self.hor_layout.setSpacing(0)
self.setLayout(self.hor_layout)
class PlainTextParameterItem(WidgetParameterItem):
def __init__(self, param, depth):
super().__init__(param, depth)
self.hideWidget = False
self.subItem = QtWidgets.QTreeWidgetItem()
self.addChild(self.subItem)
def treeWidgetChanged(self):
# # TODO: fix so that superclass method can be called
# # (WidgetParameter should just natively support this style)
# WidgetParameterItem.treeWidgetChanged(self)
self.treeWidget().setFirstItemColumnSpanned(self.subItem, True)
self.treeWidget().setItemWidget(self.subItem, 0, self.w)
# for now, these are copied from ParameterItem.treeWidgetChanged
self.setHidden(not self.param.opts.get('visible', True))
self.setExpanded(self.param.opts.get('expanded', True))
def makeWidget(self):
"""
Make and initialize an instance of Plain_text_pb object from parameter options dictionnary (using 'readonly' key).
Returns
-------
Plain_text_pb object
The initialized object.
See Also
--------
Plain_text_pb, buttonClicked
"""
self.w = PlainTextWidget()
self.w.text_edit.setReadOnly(self.param.opts.get('readonly', False))
self.w.value = self.w.get_value
self.w.setValue = self.w.set_value
self.w.sigChanged = self.w.value_changed
self.w.add_pb.clicked.connect(self.buttonClicked)
return self.w
def buttonClicked(self):
text, ok = QtWidgets.QInputDialog.getText(None, "Enter a value to add to the parameter",
"String value:", QtWidgets.QLineEdit.Normal)
if ok and not (text == ""):
self.param.setValue(self.param.value() + '\n' + text)
class PlainTextPbParameter(Parameter):
"""Editable string; displayed as large text box in the tree."""
itemClass = PlainTextParameterItem
sigActivated = QtCore.Signal(object)
def activate(self):
"""
Send the Activated signal.
"""
self.sigActivated.emit(self)
self.emitStateChanged('activated', None)
| true |
b676246660a95d623d265b4adef588806c8c4d0a | Python | chaitanya-lohar/Python-programs | /constructor_overloading.py | UTF-8 | 410 | 3.15625 | 3 | [] | no_license | class demo:
def __init__(self,a=None,b=None,c=None):
if(a!=None and b!=None and c!=None):
self.a=a
self.b=b
self.c=c
print(self.a+self.b+self.c)
elif(a!=None and b!=None):
self.a=a
self.b=b
print(self.a+self.b)
else:
self.a=a
print(self.a)
obj1=demo(10,20)
| true |
e10be3614f2d896107e3fb2866651fe862544bcf | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4490/codes/1592_1804.py | UTF-8 | 133 | 3.203125 | 3 | [] | no_license | xa = float(input())
ya = float(input())
xb = float(input())
yb = float(input())
d = ((xb-xa)**2+(ya-yb)**2)**(1/2)
print(round(d,3)) | true |
beb59979029df099b695b3a4a3fc7baa6a00b7dc | Python | redhat-openstack/ansible-pacemaker | /modules/pacemaker_is_active.py | UTF-8 | 9,424 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# (c) 2017, Sofer Athlan-Guyot <sathlang@redhat.com>
#
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DOCUMENTATION = '''
---
module: pacemaker_is_active
short_description: Check if a resource is active.
version_added: "2.3"
author: "Sofer Athlan-Guyot (chem)"
description:
- Check if a resource is completly started in a pacemaker cluster.
- This works for master/slave, clone, primitive resource.
options:
resource:
description:
- The name of the resource to check, without any "-clone", "-master"
suffix.
required: true
max_wait:
description:
- How many seconds should we wait for the resource to be active.
required: false
default: 5
'''
EXAMPLES = '''
---
- name: Ensure galera is started
hosts: localhost
gather_facts: no
tasks:
- name: galera ready
pacemaker_is_active:
resource: galera
max_wait: 10
'''
RETURN = '''
changed:
description: True if the resource is active.
type: bool
out:
description: A short summary of the resource.
type: string
sample: {"out": "Resource galera is active."}
'''
ANSIBLE_METADATA = r"""
status:
- stableinterface
supported_by: committer
version: "1.0"
"""
# Should be at the top (flake8 E402), but ansible requires that module
# import being after metadata.
import subprocess
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from lxml import etree
class Resource(object):
"Base clase for resource and resource factory."
get_type = None
def _filter_xpath(self, xpath):
"Filter the cib on some xpath."
xml_string = self.mod.run_command(['crm_mon', '-r', '--as-xml'],
{'check_rc': True})[1]
tree = etree.fromstring(str(xml_string))
return tree.xpath(xpath)
def _current_count(self, role):
"Calculate the current active instance."
return int(self._filter_xpath(
"count(//resource[@id='{0}' and {1} and {2} and {3} and {4}])"
.format(self.name,
"@orphaned='false'",
"@failed='false'",
"@active='true'",
"@role='{0}'".format(role),
)
))
def _get_crm_resource(self, prop):
return self.mod.run_command(
['crm_resource', '-r',
self.name,
'--meta', '-g', prop]
)
def _create_result(self, msg):
return {
'resource_type': self.get_type,
'resource_name': self.name,
'msg': msg,
}
def __init__(self, mod, resource_name):
self.mod = mod
self.name = resource_name
def fail(self, msg):
result = self._create_result(msg)
return self.mod.fail_json(**result)
def success(self, msg):
result = self._create_result(msg)
result['changed'] = False
return self.mod.exit_json(**result)
def from_type(self):
"""Infer the type of a resource from its name. Factory method.
Using the resource name as a parameter it returns a "Clone",
"Master", "Primitive" instance. If no resource matching the name
could be found, it return a "Resource" instance.
"""
res_array = self._filter_xpath(
'//resources/*[contains(@id,"{0}")]'.format(self.name)
)
if len(res_array) == 0:
return self
res = res_array[0]
if res.tag == 'resource':
return Primitive(self.mod, self.name)
elif res.tag == 'clone':
if res.get('multi_state') == 'false':
return Clone(self.mod, self.name)
elif res.get('multi_state') == 'true':
return Master(self.mod, self.name)
return self
class Master(Resource):
"Representation of a master/slave resource."
get_type = 'master'
def expected_count(self):
"""Return the expected number of instance of a master resource.
This function takes a resource name (the resource must be of master
type) and returns the master-max attribute if present or 1 if the
attribute is not present. It raise a error in other cases..
"""
rc, stdout, stderr = self._get_crm_resource('master-max')
if rc == 0:
return int(stdout)
elif rc == 6:
return 1
return self.fail(
"Unknow error geting crm_resource for master '{0}'."
.format(self.name)
)
def current_count(self):
"Calculate the current active instance."
return self._current_count('Master')
class Clone(Resource):
"Representation of a clone resource."
get_type = 'clone'
def _pipe_no_shell(self, cmd1_array, cmd2_array):
"Pipe cmd1_array into cmd2_array without using shell interpolation."
self.mod.get_bin_path(cmd1_array[0], required=True)
self.mod.get_bin_path(cmd2_array[0], required=True)
cmd1 = subprocess.Popen(cmd1_array, stdout=subprocess.PIPE)
cmd2 = subprocess.Popen(cmd2_array,
stdin=cmd1.stdout,
stdout=subprocess.PIPE)
return cmd2.communicate()
def expected_count(self):
"""Return the expected number of clone resource on the system.
This function takes a resource name which should be of type
"clone" and returns the clone-max attribute if present. If
clone-max is not present it returns the number of nodes which
have the property "$resourcename-role" set to true (composable
ha). If that number is 0 (pre-composable ha), we count the
number of nodes in the cluster We raise an error in other
cases.
"""
rc, stdout, stderr = self._get_crm_resource('clone-max')
if rc == 0:
return int(stdout)
elif rc == 6:
count = int(self._pipe_no_shell(
['pcs', 'property'],
['grep', '-c',
"{0}-role=true".format(self.name)]
)[0])
if count == 0:
return int(self._pipe_no_shell(['crm_node', '-l'],
['wc', '-l'])[0])
else:
return count
return self.fail(
"Unknow error geting crm_resource for master '{0}'."
.format(self.name)
)
def current_count(self):
"Calculate the current active instance."
return self._current_count("Started")
class Primitive(Clone):
"Representation of a primitive resource."
get_type = 'primitive'
def expected_count(self):
return 1
def is_resource_active(mod):
"""Return success if a resource active, failure otherwise.
Takes the resource name as an argument and does the following:
a) master/slave resources
Returns active only if the needed number of masters is set
e.g. galera needs to be master on all nodes where galera is
supposed to run (that is == to the number of controllers in
pre-composable ha and the number of nodes with galera-role=true
properties set in composable ha) redis will need to have master on
only one node.
b) cloned resources
Returns active if the resource is started on the needed nodes
e.g. same as master/slave resources the needed number of nodes is
equal to the cluster nodes in pre-composable and to the
haproxy-role property count in composable ha.
c) primitive resources returns active
If the resource is started on one node e.g. A/P resources like
cinder-volume, VIPs.
"""
max_tries = int(mod.params["max_wait"])
resource_name = mod.params["resource"]
current_try = 0
resource = Resource(mod, resource_name).from_type()
if resource.get_type is None:
return resource.fail("Resource '{0}' doesn't exist in the cib.".format(
resource.name
))
resource_expected_count = resource.expected_count()
while resource_expected_count != resource.current_count():
if current_try >= max_tries-1:
return resource.fail(
"Max wait time of {0} seconds reached waiting for {1}".format(
max_tries, resource.name
))
sleep(1)
current_try += 1
return resource.success("{0} resource {1} is active".format(resource.get_type,
resource.name))
def main():
"Main function called by Ansible."
mod = AnsibleModule(
argument_spec=dict(
resource=dict(type='str',required=True),
max_wait=dict(type='int',default=5), # in seconds
)
)
return is_resource_active(mod)
if __name__ == '__main__':
main()
| true |
fc6baabcb736fe808f53a72fc34c0cab39b2f05a | Python | viseth89/python-100 | /mega-course/ex8.py | UTF-8 | 246 | 3.03125 | 3 | [] | no_license | student_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]
countlist = student_grades.count(10.0)
print(countlist)
# how to find out what code needed
# dir(list)
# Notice how we are using .count
# property/function of built in list | true |
1fdee6a087ea8e53f47c26b05fbbb6fe2032b431 | Python | danny-94/coroutines | /grep.py | UTF-8 | 350 | 2.828125 | 3 | [] | no_license | def grep(pattern):
print('looing for %s pattern'%pattern)
while True:
line = (yield)
if pattern in line:
print(line)
if __name__ == "__main__":
g = grep('python')
g.__next__()
g.send('Yeah, but no, but yeah, but no')
g.send('A series of tubes"')
g.send('python generators rock!')
| true |
efa7899637e4d0ff1231296b2cdb54b2ef7a238f | Python | ngflanders/Project-Euler-Problems | /ProblemsPack/Twenty.py | UTF-8 | 774 | 4.09375 | 4 | [] | no_license | '''
n! means n * (n - 1) * ... * 3 * 2 * 1
For example, 10! = 10 * 9 * ... * 3 * 2 * 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
'''
def factorial(y):
result = 1
for i in range(1,y+1):
result = i * result
print(result)
return result
def sumdigits(z):
mystring = str(z)
pull = 0
total = 0
while (len(mystring)>0):
pull = mystring[-1:]
total += int(pull)
mystring = mystring[:-1]
return total
'''
total = 0
pull = 0
while (z > 9):
pull = z % 10
total += pull
z = z / 10
# z = floor(z)
total += z
return total
'''
print(sumdigits(factorial(100)))
| true |
872b75057a2815da9f68e21fa4689ccefabeeab3 | Python | east4ming/Rocket | /game_rocket.py | UTF-8 | 856 | 3.359375 | 3 | [] | no_license | """The Rocket main function.
- 一个Screen(有背景)
- 一个Rocket
- Rocket可以上下左右移动
- Rocket不能移动出Screen
"""
import pygame
from rocket import Rocket
from settings import Settings
import game_functions as gf
def run_game():
pygame.init()
rkt_settings = Settings()
screen = pygame.display.set_mode((rkt_settings.screen_width, rkt_settings.screen_height))
pygame.display.set_caption('CASEY ROCKET')
# 初始化火箭对象
rocket = Rocket(screen)
# 游戏主循环
while True:
gf.check_events(rocket)
# 响应箭头, 移动火箭
rocket.move_rocket(rkt_settings)
# 如果`screen.fill(rkt_settings.bg_color)`这一句放在`check_events()`的for循环里, 就会出现火箭残影
# 重绘函数
gf.blime(screen, rkt_settings, rocket)
run_game() | true |
a6edca840e33a4c0db3838aea6bea5f1956b97d9 | Python | N8Brooks/aoc_py | /aoc/year2017/day13.py | UTF-8 | 1,402 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
https://adventofcode.com/2017/day/13
"""
from heapq import heapify, heappop, heappush
from itertools import starmap
from math import log2, prod
from operator import mod
import numpy as np
from data.utils import get_input
def process(line):
return map(int, line.split(": "))
def part1(text):
def severity(depth, scope):
return 0 if depth % (scope + scope - 2) else depth * scope
return sum(starmap(severity, map(process, text.splitlines())))
def part2(text):
quotients, scopes = zip(*map(process, text.splitlines()))
divisors = tuple(scope + scope - 2 for scope in scopes)
if all(starmap(mod, zip(quotients, divisors))):
return 0
lo, hi = 0, 1
heapify(queue := [((d - q) % d, d) for q, d in zip(quotients, divisors)])
for _ in range(int(log2(prod(divisors))) + 1):
delays = np.ones(lo, bool)
while queue[0][0] < hi:
mi, divisor = heappop(queue)
delays[mi - lo : hi - lo : divisor] = False
length = len(range(mi, hi, divisor))
heappush(queue, (mi + divisor * length, divisor))
if len(indices := np.argwhere(delays)):
return lo + int(indices[0])
lo, hi = hi, hi + hi
if __name__ == "__main__": # pragma: no cover
text = get_input(2017, 13)
print(part1(text))
print(part2(text))
| true |
3b4bab4716af3376ae4781810e6789aac0cb7aff | Python | ryanpennings/workshop_swinburne_2021 | /examples/501_linear_order.py | UTF-8 | 836 | 3.3125 | 3 | [
"MIT"
] | permissive | import functools
import random
from compas.geometry import Box
# random but repeatable ;)
random.seed(2666)
@functools.total_ordering
class BoxComparer(object):
def __init__(self, box, *args):
self.box = box
def __eq__(self, other):
return self.box.data == other.box.data
def __lt__(self, other):
return self.box.dimensions < other.box.dimensions
bricks = set()
for i in range(10):
w, h, d = random.random(), random.random(), random.random()
brick = Box.from_width_height_depth(w, h, d)
bricks.add(brick)
print('No sorting guaranteed (depends on implementation)')
for b in bricks:
print('{:.3f}, {:.3f}, {:.3f}'.format(*b.dimensions))
print()
print('Defined total ordering')
for b in sorted(bricks, key=BoxComparer):
print('{:.3f}, {:.3f}, {:.3f}'.format(*b.dimensions))
| true |
1871082240a841d434d7dbf00b4abaced6a25fe8 | Python | SunshineBrother/ScriptStudy | /基础知识/day03-字典/dic.py | UTF-8 | 166 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
dict = { 'abc': 123, 98.6: 37 }
print(dict["abc"])
print(dict[98.6])
print(str(dict))
print(dict.items())
print(dict.keys()) | true |
6cd8db4c427a2d067ce021cafd9e9f90a8ea4a87 | Python | BITMystery/leetcode-journey | /77. Combinations.py | UTF-8 | 580 | 3.296875 | 3 | [] | no_license | class Solution(object):
def combineList(self, l, k):
res = []
if k == 1:
for num in l:
res.append([num])
else:
for i in xrange(len(l)-k+1):
sub_res = self.combineList(l[i+1:], k-1)
for r in sub_res:
res.append([l[i]] + r)
return res
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n <= 0 or k <= 0 or n < k:
return []
return self.combineList([i for i in xrange(1, n+1)], k)
s = Solution()
print s.combine(5, 3) | true |
92c7092f17435da4a045344fd7d26477109000a5 | Python | cosmologist10/sunpy | /sunpy/net/tests/test_download.py | UTF-8 | 4,152 | 2.578125 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
#pylint: disable=W0613
from __future__ import absolute_import
import pytest
import os
import tempfile
import threading
from functools import partial
import sunpy
from sunpy.net.download import Downloader, default_name
class CalledProxy(object):
def __init__(self, fn):
self.fn = fn
self.fired = False
def __call__(self, *args, **kwargs):
self.fn(*args, **kwargs)
self.fired = True
class MockConfig(object):
def __init__(self):
self.dct = {}
def add_section(self, name, dct):
self.dct[name] = dct
def get(self, one, other):
return self.dct[one][other]
def wait_for(n, callback): #pylint: disable=W0613
items = []
def _fun(handler):
items.append(handler)
if len(items) == n:
callback(items)
return _fun
def path_fun(*args, **kwargs):
raise ValueError
def get_and_create_temp_directory(tmpdir):
sunpy.config = MockConfig()
sunpy.config.add_section(
"downloads", {"download_dir": tmpdir}
)
if not os.path.isdir(sunpy.config.get('downloads', 'download_dir')):
os.makedirs(sunpy.config.get('downloads', 'download_dir'))
return sunpy.config.get('downloads', 'download_dir')
@pytest.mark.online
def test_path_exception():
x = threading.Event()
dw = Downloader(1, 2)
dw.download(
"http://google.at", path_fun, errback=wait_for(1, lambda a: x.set())
)
th = threading.Thread(target=dw.wait)
th.daemon = True
th.start()
x.wait(10)
assert x.isSet()
dw.stop()
@pytest.mark.online
def test_download_http():
items = []
lck = threading.Lock()
def wait_for(n, callback): # pylint: disable=W0613
def _fun(handler):
with lck:
items.append(handler)
if len(items) == n:
callback(items)
return _fun
tmp = tempfile.mkdtemp()
path_fun = partial(default_name, tmp)
dw = Downloader(1, 1)
_stop = lambda _: dw.stop()
timeout = CalledProxy(dw.stop)
timer = threading.Timer(60, timeout)
timer.start()
on_finish = wait_for(3, lambda _: dw.stop())
dw.download('http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js', path_fun, on_finish)
dw.download('http://ajax.googleapis.com/ajax/libs/webfont/1.4.2/webfont.js', path_fun, on_finish)
dw.download('https://raw.github.com/sunpy/sunpy/master/INSTALL.txt', path_fun, on_finish)
# dw.download('ftp://speedtest.inode.at/speedtest-100mb', path_fun, on_finish)
dw.wait()
timer.cancel()
assert len(items) == 3
assert not timeout.fired
for item in items:
assert os.path.exists(item['path'])
@pytest.mark.online
def test_download_default_dir():
_config = sunpy.config
try:
tmpdir = tempfile.mkdtemp()
path = get_and_create_temp_directory(tmpdir)
dw = Downloader(1, 1)
_stop = lambda _: dw.stop()
timeout = CalledProxy(dw.stop)
errback = CalledProxy(_stop)
dw.download(
'http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js',
path=path,
callback=_stop,
errback=errback
)
timer = threading.Timer(10, timeout)
timer.start()
dw.wait()
timer.cancel()
assert not timeout.fired
assert not errback.fired
assert os.path.exists(os.path.join(tmpdir, 'jquery.min.js'))
finally:
sunpy.config = _config
@pytest.mark.online
def test_download_dir():
tmpdir = tempfile.mkdtemp()
dw = Downloader(1, 1)
_stop = lambda _: dw.stop()
timeout = CalledProxy(dw.stop)
errback = CalledProxy(_stop)
dw.download(
'http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js',
tmpdir,
callback=_stop,
errback=errback
)
timer = threading.Timer(10, timeout)
timer.start()
dw.wait()
timer.cancel()
assert not timeout.fired
assert not errback.fired
assert os.path.exists(os.path.join(tmpdir, 'jquery.min.js'))
| true |
14f77e8c9a4c9442f7cd2f6db9070993e0e2a738 | Python | Belval/Scanner3D | /scanner3d/scanners/step_scanner.py | UTF-8 | 3,687 | 2.59375 | 3 | [
"MIT"
] | permissive | """
steps runs the 3D reconstruction process in steps.
1- Acquisition
2- Registration
Very similar to live_scanner, but takes a group registration algorithm instead of
a pair registration algorithm.
"""
import cv2
import logging
import numpy as np
import open3d as o3d
import os
import time
from scanner3d.preprocess.estimate_normals import EstimateNormalsPreprocessor
from scanner3d.preprocess.remove_outliers import RemoveOutliersPreprocessor
from scanner3d.preprocess.downsample import DownsamplePreprocessor
from scanner3d.preprocess.preprocessor_sequence import PreprocessorSequence
from scanner3d.registration.group.base_group_reg import BaseGroupReg
from scanner3d.scanners.scanner import Scanner
from scanner3d.camera import Camera
class StepScanner(Scanner):
def __init__(
self, log_level, registration_algorithm: BaseGroupReg, cloud_dir: str = None
):
super(StepScanner, self).__init__(log_level)
self.reg = registration_algorithm
self.vis = None
self.pcd = None
self.continuous_capture = False
self.rotated_capture = False
self.cloud_dir = cloud_dir
self.pcds = (
[]
if cloud_dir is None
else [
o3d.io.read_point_cloud(os.path.join(cloud_dir, f))
for f in os.listdir(cloud_dir)
]
)
self.trans_matrices = []
def start(self):
logging.info("Starting acquisition in step scanner")
window = cv2.namedWindow("3D Scanner", cv2.WINDOW_NORMAL)
self.continuous_capture = False
self.rotated_capture = False
while cv2.getWindowProperty("3D Scanner", cv2.WND_PROP_VISIBLE) >= 1:
color_image, depth_colormap = self.camera.image_depth()
images = np.hstack((color_image, depth_colormap))
cv2.imshow("3D Scanner", images)
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
self.camera.stop()
break
if cv2.waitKey(1) & 0xFF == ord("r"):
print("Rotated capture toggled")
if cv2.waitKey(1) & 0xFF == ord("c"):
self.pcds.pop()
self.vis.update(self.pcd)
continue
if cv2.waitKey(1) & 0xFF == ord("g"):
print("Continuous capture toggled")
self.continuous_capture = not self.continuous_capture
if cv2.waitKey(1) & 0xFF == ord("s"):
print("Saving point cloud")
pcd = self.camera.pcd()
if self.cloud_dir:
o3d.io.write_point_cloud(f"clouds/{time.time()}.pcd", pcd)
self.pcds.append(pcd)
if self.continuous_capture:
pcd = self.camera.pcd()
if self.cloud_dir:
o3d.io.write_point_cloud(f"clouds/{time.time()}.pcd", pcd)
self.pcds.append(pcd)
self.camera.stop()
cv2.destroyAllWindows()
logging.info("Starting registration in step scanner")
preprocessed_pcds = PreprocessorSequence(
[
EstimateNormalsPreprocessor(radius=0.1, max_nn=30),
DownsamplePreprocessor(voxel_size=0.01),
]
).preprocess(self.pcds)
transformations = self.reg.register(preprocessed_pcds)
pcd_combined = o3d.geometry.PointCloud()
for pcd, trans in zip(self.pcds, transformations):
pcd.transform(trans)
pcd_combined += pcd
self.pcd = pcd_combined
o3d.visualization.draw_geometries([self.pcd])
self.save_point_cloud()
| true |
7c392c41597f87c635cb6f247fda2a7bd30c9593 | Python | nabanitapaul1/Regression | /LinearRegression/Linear_regression(Advertisement-Salary).py | UTF-8 | 1,259 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 28 11:36:38 2021
@author: Nabanita Paul
"""
# Linear regression (mainly focus on the problem of Multi-Collinearity)
import pandas as pd
import seaborn as sns
#1. Salary dataset
salary_data = pd.read_csv("Salary_Data.csv")
salary_data
salary_data.shape
salary_data.columns
X = salary_data[["YearsExperience","Age"]]
y= salary_data["Salary"]
sns.pairplot(salary_data)
salary_data.corr()
salary_data.iloc[:,:2].corr()
## Linear Regression
import statsmodels.api as sm
X = sm.add_constant(X)
salary_lm = sm.OLS(y,X).fit()
salary_lm.summary()
# Droping the "Age"
X =X.drop(["Age"],axis=1)
X.columns
salary_lm = sm.OLS(y,X).fit()
salary_lm.summary()
#2. Advertisement Dataset
adv_data=pd.read_csv("Advertising.csv")
adv_data.head()
adv_data.columns
X = adv_data[["TV","radio","newspaper"]]
y= adv_data["sales"]
sns.pairplot(adv_data[["TV","radio","newspaper","sales"]])
adv_data.corr()
adv_data[["TV","radio","newspaper","sales"]].corr()
## Linear Regression
import statsmodels.api as sm
X = sm.add_constant(X)
X
adv_lm = sm.OLS(y,X).fit()
adv_lm.summary()
X.drop("newspaper",axis=1, inplace=True)
X
adv_lm = sm.OLS(y,X).fit()
adv_lm.summary()
| true |
f1baee1946a156ac08095f010d60aefcd3e9f08e | Python | ompilompier/Sugarscape | /SocsSugarscape/packageSOCS/NewBaseAgent.py | UTF-8 | 722 | 2.9375 | 3 | [] | no_license |
####################################################
class NewBaseAgent:
def __init__(self, xPos, yPos ,health):
self.xPos = xPos
self.yPos = yPos
self.health = health
self.stateOfBeing = 0 #0 for human 1 for zombie
#
def Move(self, grid):
# change position this
oldPosition = [self.xPos, self.yPos]
newPosition = []
tmpReturn = [oldPosition, newPosition]
return tmpReturn
#
def ChangeHealth(self, amountHealthAlteration):
self.health = self.health + amountHealthAlteration
return self.health
#
def GetState(self):
state = self.stateOfBeing
return state
#
| true |
7336c09980c5671f3f8e0ab70eee8f9ddfac0fff | Python | karthikapresannan/karthikapresannan | /Execption_handling/execption.py | UTF-8 | 712 | 3.171875 | 3 | [] | no_license | #no1=int(input("enter the number1"))#10
# no2=int(input("enter the number2"))#0
# try:
# res=no1/no2
# print(res)
# except Exception as e:
# no2=int(input("enter th number"))#0
# try:
# res=no1/no2
# print(res)
# except Exception as e:
# print(e.args)
# finally:
# print("data base operation")
# print("file write")
# num=int(input("enter the num"))#10
#
# try:
# if age<18: # actually this type expection is not found
# raise Exception("invalid age")
# except Exception as e:
# print(e.args)
num= int(input("enter the num"))
try:
if num<0:
raise Exception("invalid")
except Exception as e:
print(e.args)
| true |
c8a40d6c03f61a80d07ee0e157962420945bb3fc | Python | Yiwans/titanic | /Titanic.py | UTF-8 | 6,969 | 2.765625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
# Algorithms
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
#------------------------------ ----------------------
#-------------------Data_imshow ---------------
#------------------------------ ----------------------
data_train = pd.read_csv('train.csv')
data_test = pd.read_csv('test.csv')
data_all = pd.concat([data_train, data_test])
print('----------------------------------------------------------------------------')
print( data_all[["Pclass","Survived"]].groupby(["Pclass"], as_index = False).mean())
print('----------------------------------------------------------------------------')
print( data_all[["Sex","Survived"]].groupby(["Sex"], as_index = False).mean())
print('----------------------------------------------------------------------------')
data_all['FamilySize'] = data_all['SibSp'] + data_all['Parch'] + 1
print( data_all[["FamilySize","Survived"]].groupby(["FamilySize"], as_index = False).mean())
print('----------------------------------------------------------------------------')
print( data_all[["Embarked","Survived"]].groupby(["Embarked"], as_index = False).mean() )
print('----------------------------------------------------------------------')
#------------------------------ ----------------------
#-------------------Data_preprocessing ---------------
#------------------------------ ----------------------
#-----------------------Missing ----------------------
N_begin = data_all.isnull().sum()
total_cells = np.product(data_test.shape) + np.product(data_train.shape)
total_missing = data_train.isnull().sum() + data_test.isnull().sum()
Per = (total_missing/total_cells) * 100
print(Per)
print('----------------------------------------------------------------------')
#--------------------- Embarked ----------------------
data_all.Embarked.fillna('S', inplace = True)
#----------------------Cabin--------------------------
data_all.Cabin.fillna('NaN', inplace = True)
#----------------------Fare--------------------------
imputer = Imputer(missing_values='NaN',strategy='median',axis=1)
new = imputer.fit_transform(data_all.Fare.values.reshape(1,-1))
data_all['Fare'] = new.T
N_final= data_all.isnull().sum()
data_all['Fare_category'] = pd.qcut(data_all['Fare'], 4)
print( data_all[["Fare_category","Survived"]].groupby(["Fare_category"], as_index = False).mean())
print('----------------------------------------------------------------------')
#--------------------- Title ----- --------------------
data_all['Title'] = data_all.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
data_all['Title'] = data_all['Title'].replace(['Mlle',' Ms'], 'Miss')
data_all['Title'] = data_all['Title'].replace(['Mme','Lady', 'Dona','Countess'], 'Mrs')
data_all['Title'] = data_all['Title'].replace(['Jonkheer','Sir','Don','Dr','Capt','Col','Major','Rev'], 'Mr')
#------------------------Age -------------------------
#imputer = Imputer(missing_values='NaN',strategy='median',axis=1)
#new = imputer.fit_transform(data_all.Age.values.reshape(1,-1))
#data_all['Age'] = new.T
mean = data_all["Age"].mean()
std = data_all["Age"].std()
is_null = data_all["Age"].isnull().sum()
rand_age = np.random.randint(mean - std, mean + std, size = is_null)
age_slice = data_all["Age"].copy()
age_slice[np.isnan(age_slice)] = rand_age
data_all["Age"] = age_slice
data_all["Age"] = data_all["Age"].astype(int)
data_all["Age"].isnull().sum()
data_all['Age_category'] = pd.cut(data_all['Age'], 4)
print( data_all[["Age_category","Survived"]].groupby(["Age_category"], as_index = False).mean() )
print('----------------------------------------------------------------------')
#------------------------------ ----------------------
#--------------Categ signs encoding-------------
#------------------------------ ----------------------
data_all['Sex'] = LabelEncoder().fit_transform(data_all['Sex'])
data_all['Title'] = LabelEncoder().fit_transform(data_all['Title'])
data_all['Age_category'] = LabelEncoder().fit_transform(data_all['Age_category'])
data_all['Embarked'] = LabelEncoder().fit_transform(data_all['Embarked'])
data_all['Fare_category'] = LabelEncoder().fit_transform(data_all['Fare_category']);
#pd.get_dummies(data_all.Embarked, prefix="Emb", drop_first = True);
data_all.drop(['Name', 'Ticket', 'Cabin', 'SibSp', 'Parch', 'Fare','Age'], axis=1, inplace=True);
##------------------------------ ----------------------
##---------------------Data_preparing---------------
##------------------------------ ----------------------
train_dataset = data_all[:len(data_train)]
test_dataset = data_all[len(data_train):]
test_dataset.drop(['Survived'], axis=1, inplace=True)
Y_train = train_dataset["Survived"]
X_train = train_dataset.drop(['PassengerId','Survived'], axis=1).copy()
X_test = test_dataset.drop('PassengerId', axis=1)
##------------------------------ ----------------------
##---------------------RandForestClassif---------------
##------------------------------ ----------------------
RFClassif = RandomForestClassifier(criterion='entropy',n_estimators=600,
min_samples_split = 20,
min_samples_leaf=1,
max_features='auto',
oob_score=True,
random_state=1,
n_jobs=-1)
x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.3)
RFClassif.fit(x_train, np.ravel(y_train))
print("RF_accur: " + repr(round(RFClassif.score(x_test, y_test) * 100, 2)) + "%")
result_rf = cross_val_score(RFClassif,x_train,y_train,cv=10,scoring='accuracy')
print('CrossValidScore',round(result_rf.mean()*100,2))
y_pred = cross_val_predict(RFClassif,x_train,y_train,cv=10)
##------------------------------ ----------------------
##---------------------Result_predict---------------
##------------------------------ ----------------------
result = RFClassif.predict(X_test)
submission = pd.DataFrame({'PassengerId':test_dataset.PassengerId,'Survived':result})
submission.Survived = submission.Survived.astype(int)
print(submission.shape)
filename = 'Titanic_final.csv'
submission.to_csv(filename,index=False)
print('Saved file: ' + filename)
| true |
d93459af460a4181a3057c6372cf863a166f8c5c | Python | Parthimosm/Python- | /Udacity-Python/work timer.py | UTF-8 | 513 | 3.171875 | 3 | [] | no_license | import time
import webbrowser
import sys
running = False
runner = input("Do you want this program to start? (y/n): ")
if runner == "y":
print("The Program has started")
running = True
else:
print("The Program will not start")
while running == True:
stop = input("Type stop to stop")
if stop == "stop":
running = False
print("The Program has stopped")
time.sleep(10)
webbrowser.open("https://www.youtube.com/watch?v=FrZRIW87eWI")
| true |
bc35b9fff9712202ef2f82ef4c296e76f914925b | Python | Peiyu-Rang/LeetCode | /138. Copy List with Random Pointer.py | UTF-8 | 3,675 | 3.765625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 13 20:59:18 2021
@author: Caven
"""
class Solution(object):
def __init__(self):
# Creating a visited dictionary to hold old node reference as "key" and new node reference as the "value"
self.visited = {}
def getClonedNode(self, node):
# If node exists then
if node:
# Check if its in the visited dictionary
if node in self.visited:
# If its in the visited dictionary then return the new node reference from the dictionary
return self.visited[node]
else:
# Otherwise create a new node, save the reference in the visited dictionary and return it.
self.visited[node] = Node(node.val, None, None)
return self.visited[node]
return None
def copyRandomList(self, head):
"""
:type head: Node
:rtype: Node
"""
if not head:
return head
old_node = head
# Creating the new head node.
new_node = Node(old_node.val, None, None)
self.visited[old_node] = new_node
# Iterate on the linked list until all nodes are cloned.
while old_node != None:
# Get the clones of the nodes referenced by random and next pointers.
new_node.random = self.getClonedNode(old_node.random)
new_node.next = self.getClonedNode(old_node.next)
# Move one step ahead in the linked list.
old_node = old_node.next
new_node = new_node.next
return self.visited[head]
"""
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return head
seen = {}
stack = [head]
while stack:
node = stack.pop()
if node not in seen:
seen[node] = Node(node.val)
if node.next:
if node.next not in seen:
seen[node.next] = Node(node.next.val)
seen[node].next = seen[node.next]
stack.append(node.next)
if node.random:
if node.random not in seen:
seen[node.random] = Node(node.random.val)
seen[node].random = seen[node.random]
return seen[head]
"""
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return head
pt = head
while pt:
new_node = Node(pt.val)
new_node.next = pt.next
pt.next = new_node
pt = new_node.next
pt = head
while pt:
pt.next.random = pt.random.next if pt.random else None
pt = pt.next.next
pt_old = head
pt_new = head.next
head_new = head.next
while pt_old:
pt_old.next = pt_old.next.next
pt_new.next = pt_new.next.next if pt_new.next else None
pt_old = pt_old.next
pt_new = pt_new.next
return head_new | true |
151d261da3082df4c74561b165a00320fa801014 | Python | AnoopKunju/code_eval | /easy/penultimate_word.py2 | UTF-8 | 579 | 3.875 | 4 | [] | no_license | #!/usr/bin/env python2
# encoding: utf-8
"""
Penultimate Word.
Challenge Description:
Write a program which finds the next-to-last word in a string.
Input sample:
Your program should accept as its first argument a path to a filename. Input
example is the following
some line with text
another line
Each line has more than one word.
Output sample:
Print the next-to-last word in the following way.
with
another
"""
import sys
with open(sys.argv[1], 'r') as input:
test_cases = input.read().strip().splitlines()
for test in test_cases:
print test.split()[-2]
| true |
41d806e4f397e94390db5a3a125cf8e02f1d893c | Python | us0173ol/RMmgr | /items.py | UTF-8 | 705 | 3.390625 | 3 | [] | no_license | class Item:
'''Each item that is on the menu(for now, can add more later)'''
'''Will also be used to add items to the menu and change prices'''
Null_Index = None
def __init__(self, itemName, itemPrice, itemID=Null_Index):
self.itemID = itemID
self.itemName = itemName
self.itemPrice = itemPrice
# def set_index(self, index):
# self.index = index
def __str__(self):
if self.index is None:
index_str = '(none)'
else:
index_str = self.index
return 'Index: {} ItemID: {} ItemName: {} ItemPrice: ${:.2f}' \
.format(self.index, self.itemID, self.itemName.title(), self.itemPrice)
| true |
6e1ba7b70fdd5409fe5ae69cbc1f806b09e50494 | Python | mziv/Tomi | /rooms.py | UTF-8 | 6,038 | 2.59375 | 3 | [] | no_license | import discord
import asyncio
import os
from discord.ext import commands
class Rooms(commands.Cog):
"""
All things related to entering/joining rooms of the house.
"""
def __init__(self, bot):
self.bot = bot
# Create a shared lock to protect code which cannot run concurrently.
self.lock = asyncio.Lock()
# TODO(rachel0): allow picking a channel name to use for publishing log
# of people entering voice channels.
@commands.Cog.listener()
async def on_voice_state_update(self, member, prev, cur):
# If this is some update other than entering/leaving a channel, ignore it.
if cur.channel and prev.channel and cur.channel.id == prev.channel.id:
return
# Lock this region so multiple members joining a channel at once can't
# trigger concurrent on_voice_state_update handlers and mess up the state.
async with self.lock:
guild = cur.channel.guild if cur.channel else prev.channel.guild
# Possibly entering a voice channel.
if cur.channel is not None:
# Set up a private text channel for this voice channel if it doesn't exist already.
role = discord.utils.get(guild.roles, name=cur.channel.name)
if role is None:
role = await guild.create_role(name=cur.channel.name)
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
role: discord.PermissionOverwrite(read_messages=True)
}
channel_name = cur.channel.name.lower()
print(f"Creating text channel {channel_name}")
channel = await guild.create_text_channel(channel_name, overwrites=overwrites,
category=cur.channel.category,
position=cur.channel.position)
print(f"Adding {member.display_name} to {role.name}")
await member.add_roles(role)
comms_channel = discord.utils.get(guild.channels, name="comms")
if comms_channel is not None:
await comms_channel.send(f"{member.display_name} has joined the {channel_name}!")
jpg_image_path = os.path.join('images', f"{channel_name}.jpg")
png_image_path = os.path.join('images', f"{channel_name}.png")
if os.path.isfile(jpg_image_path):
await channel.send(f"Welcome to the {channel_name}!",
file=discord.File(jpg_image_path))
elif os.path.isfile(png_image_path):
await channel.send(f"Welcome to the {channel_name}!",
file=discord.File(png_image_path))
else:
await channel.send(f"Welcome to the {channel_name}! There isn't a picture to show, since this room is still under construction. Please send suggestions in the #renovations channel!")
# Role already existed, but the user is newly joining the channel.
elif role not in member.roles:
print(f"Adding {member.display_name} to {role.name}")
await member.add_roles(role)
channel = discord.utils.get(guild.channels, name=cur.channel.name.lower())
await channel.send(f"Hi {member.display_name}!")
comms_channel = discord.utils.get(guild.channels, name="comms")
if comms_channel is not None:
await comms_channel.send(f"{member.display_name} has joined the {cur.channel.name.lower()}!")
# The user defeaned, turned on their video or otherwise triggered
# this event without actually leaving/joining a channel
else:
logger.error(f"{member} already has role {role}")
# Possibly leaving a voice channel.
if prev.channel is not None:
role = discord.utils.get(guild.roles, name=prev.channel.name)
# Do error checking, you know, just in case.
if role is None:
logger.error(f"{prev.channel.name} is not a role!")
return
if role not in member.roles:
logger.error(f"{member} does not have role '{role}'!")
return
await member.remove_roles(role)
print(f"Removing {member.display_name} from {role.name}")
# We're the last one out, so turn off the lights.
if len(prev.channel.members) == 0:
await role.delete()
text_channel = discord.utils.get(guild.text_channels, name=prev.channel.name.lower())
print(f"Deleting text channel {prev.channel.name.lower()}")
await text_channel.delete()
'''
### Note: does not work right now. Supposed to auto-play playlist if user enters the library, has a playlist registered, and has auto-play on
#if cur.channel.name != "Library" or prev.channel.name == "Library":
#return
for guild in bot.guilds:
if guild.name == "Bot Testing Server":
# Temp channel
text_channel = bot.get_channel(752221173546221598)
print("asdf")
### this part does not work!
print(guild.voiceConnection)
print("fdsa")
await autoplay_playlist_helper(text_channel, cur.channel, member, guild.voiceConnection)
return
elif guild.name == "The Co-op":
# Library
text_channel = bot.get_channel(708882378877173811)
await autoplay_playlist_helper(text_channel, cur.channel, member, guild.voiceConnection)
return
'''
def setup(bot):
bot.add_cog(Rooms(bot))
| true |
d4881020867d97b75fc521d49ffb5a605618314d | Python | shumka-kik/basic_python_course | /lesson3/lesson3_task6.py | UTF-8 | 1,513 | 4.625 | 5 | [] | no_license | #6. Реализовать функцию int_func(), принимающую слово из маленьких латинских букв и возвращающую его же,
# но с прописной первой буквой. Например, print(int_func(‘text’)) -> Text.
# Продолжить работу над заданием. В программу должна попадать строка из слов, разделенных пробелом.
# Каждое слово состоит из латинских букв в нижнем регистре. Сделать вывод исходной строки,
# но каждое слово должно начинаться с заглавной буквы. Необходимо использовать написанную ранее функцию int_func().
def int_func(user_text):
input_list = user_text.split(" ")
result_str = ""
for word in input_list:
char_list = list(word)
is_bad_word = False
for char in char_list:
if ord(char) < 97 or ord(char) > 122:
is_bad_word = True
break
if is_bad_word:
result_str += " " + word
else:
result_str += " " + word.capitalize()
result = f"Исходная строка: {user_text}\n Обработанная строка: {result_str}"
return result
print(int_func(input("Введите слово/слова, разделенные пробелом, из маленьких латинских букв:"))) | true |
325063258510da240ad77222b00e5ae42be432d2 | Python | ChangxingJiang/LeetCode | /0801-0900/0895/0895_Python_1.py | UTF-8 | 1,110 | 3.84375 | 4 | [] | no_license | import collections
class FreqStack:
def __init__(self):
self.count = collections.Counter()
self.stack = []
def push(self, x: int) -> None:
self.count[x] += 1
self.stack.append(x)
def pop(self) -> int:
# 计算所有最频繁的元素
most_common = self.count.most_common()
maybe_ans = [most_common[0][0]]
now_max = most_common[0][1]
idx = 1
while idx < len(most_common):
if most_common[idx][1] == now_max:
maybe_ans.append(most_common[idx][0])
idx += 1
else:
break
# 计算最靠近栈顶的元素
for i in range(len(self.stack) - 1, -1, -1):
if self.stack[i] in maybe_ans:
self.count[self.stack[i]] -= 1
return self.stack.pop(i)
if __name__ == "__main__":
obj = FreqStack()
obj.push(5)
obj.push(7)
obj.push(5)
obj.push(7)
obj.push(4)
obj.push(5)
print(obj.pop()) # 5
print(obj.pop()) # 7
print(obj.pop()) # 5
print(obj.pop()) # 4
| true |
c9c1337f9b57a1c1a91a7e6a8d1e3d788044deeb | Python | carlosfigueroa97/class-26-10 | /MainCilindro.py | UTF-8 | 470 | 3.671875 | 4 | [] | no_license | """
Nombre: MainCilindro.py
Objetivo: INstanciar a clase cilindro.
Autor: tomado de
Fecha: 9/11/2019
"""
from tkinter import *
from Cilindro import Cilindro
def main():
w = Tk()
w.title("Objetos tipo Cilindro")
lblX = Label(w, text="Coordenada en X: ")
lblX.grid(column=5, row=5)
lblY = Label(w, text="Coordenada en Y: ")
lblY.grid(column=5, row=8)
cil = Cilindro(10,10, 12.13, 25.11)
w.mainloop()
if __name__ == "__main__":
main() | true |
d18123953a5fffbdc3ed7548aeb12aa06ae59c48 | Python | JagritiG/interview-questions-answers-python | /code/set_2_linkedlist/707_design_linked_list.py | UTF-8 | 6,773 | 4.59375 | 5 | [] | no_license | # Design Linked List
# Design your implementation of the linked list. You can choose to use the singly linked list or the doubly linked list.
# A node in a singly linked list should have two attributes: val and next. val is the value of the current node,
# and next is a pointer/reference to the next node. If you want to use the doubly linked list, you will need one more
# attribute prev to indicate the previous node in the linked list. Assume all nodes in the linked list are 0-indexed.
# Implement these functions in your linked list class:
# get(index) : Get the value of the index-th node in the linked list. If the index is invalid, return -1.
# addAtHead(val) : Add a node of value val before the first element of the linked list. After the insertion,
# the new node will be the first node of the linked list.
# addAtTail(val) : Append a node of value val to the last element of the linked list.
# addAtIndex(index, val) : Add a node of value val before the index-th node in the linked list.
# If index equals to the length of linked list, the node will be appended to the end of linked list.
# If index is greater than the length, the node will not be inserted.
# deleteAtIndex(index) : Delete the index-th node in the linked list, if the index is valid.
# Example:
# Input:
# ["MyLinkedList","addAtHead","addAtTail","addAtIndex","get","deleteAtIndex","get"]
# [[],[1],[3],[1,2],[1],[1],[1]]
# Output:
# [null,null,null,null,2,null,3]
# Explanation:
# MyLinkedList linkedList = new MyLinkedList(); // Initialize empty LinkedList
# linkedList.addAtHead(1);
# linkedList.addAtTail(3);
# linkedList.addAtIndex(1, 2); // linked list becomes 1->2->3
# linkedList.get(1); // returns 2
# linkedList.deleteAtIndex(1); // now the linked list is 1->3
# linkedList.get(1); // returns 3
# Constraints:
# 0 <= index,val <= 1000
# Please do not use the built-in LinkedList library.
# At most 2000 calls will be made to get, addAtHead, addAtTail, addAtIndex and deleteAtIndex.
# ============================================================================================
# Algorithm:
#
# TC:
# SC: https://www.youtube.com/watch?v=nrUCaiCG29w
# ============================================================================================
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __repr__(self):
"""Returns a printable representation of object we call it on."""
return "{}".format(self.val)
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
"""
self.head = None
self.size = 0
def get(self, index: int) -> int:
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
"""
if index >= self.size:
return -1
curr = self.head
position = 0
while position < index and curr.next:
curr = curr.next
position += 1
return curr.val
def add_at_head(self, val: int) -> None:
"""
Add a node of value val before the first element of the linked list. After the insertion,
the new node will be the first node of the linked list.
"""
new_node = ListNode(val)
new_node.next = self.head
self.head = new_node
self.size += 1
def add_at_tail(self, val: int) -> None:
"""
Append a node of value val to the last element of the linked list.
"""
curr = self.head
while curr.next:
curr = curr.next
curr.next = ListNode(val)
self.size += 1
def add_at_index(self, index: int, val: int) -> None:
"""
Add a node of value val before the index-th node in the linked list.
If index equals to the length of linked list, the node will be appended to the end of linked list.
If index is greater than the length, the node will not be inserted.
"""
# When given position is greater than size of list
if index > self.size:
return
# when add at first position
elif index == 0:
self.add_at_head(val)
# when add at last position
elif index == self.size:
self.add_at_tail(val)
else:
new_node = ListNode(val)
curr = self.head
position = 0
while position < index - 1:
curr = curr.next
position += 1
new_node.next = curr.next
curr.next = new_node
self.size += 1
def delete_at_index(self, index: int) -> None:
"""
Delete the index-th node in the linked list, if the index is valid.
"""
# When given position is greater than size of list
if index >= self.size:
return
# Delete first node
if index == 0:
self.head = self.head.next
# Delete nth node
else:
curr = self.head
position = 0
while position < index - 1:
curr = curr.next
position += 1
curr.next = curr.next.next
self.size -= 1
# =========================================================
# Utility function to get size of the linked list
def size_list(self):
"""Size of the Linked List."""
size = 0
curr = self.head
while curr:
curr = curr.next
size += 1
return size
# Utility function to print the linked list
def print_list(self):
"""Print all the elements of the Linked List."""
curr = self.head
while curr:
print(curr.val,)
curr = curr.next
if __name__ == "__main__":
# Input:
# ["MyLinkedList","addAtHead","addAtTail","addAtIndex","get","deleteAtIndex","get"]
# [[],[1],[3],[1,2],[1],[1],[1]]
# Output:
# [null,null,null,null,2,null,3]
sll = MyLinkedList()
# sll.print_list()
# print("\n")
sll.add_at_head(1)
sll.add_at_tail(3)
# sll.print_list()
# print("\n")
sll.add_at_index(1, 2)
# sll.print_list()
# print("\n")
#
# print(sll.get(1))
# sll.print_list()
# print("\n")
#
# sll.delete_at_index(1)
# sll.print_list()
# print("\n")
#
print(sll.get(1))
sll.print_list()
# Your MyLinkedList object will be instantiated and called as such:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index)
| true |
3f9539af10919fddc06d79c983e7b07c3edd4f8a | Python | luanrr98/Logica-Computacao-e-Algoritmos-Lista1 | /Exercicio1.py | UTF-8 | 645 | 4.34375 | 4 | [] | no_license | #Crie um algoritmo que receba, como entrada, o valor de três notas de um aluno - com valor entre 0 e 10, e, em seguida, informe a média entre elas.
#Neste momento, não é necessário validar se a nota está dentro do intervalo válido!
def calculo_media(nota1,nota2,nota3):
media = (nota1+nota2+nota3)/3
media = round(media,2)
print(f"A média das notas foi: {media}.")
print("\nCalculo de média de notas! *Digite apenas notas entre 0 e 10\n")
nota1= float(input("Digite a primeira nota: "))
nota2= float(input("Digite a segunda nota: "))
nota3= float(input("Digite a terceira nota: "))
calculo_media(nota1,nota2,nota3)
| true |
1c8be5175ea9ef9c1c377769c310e1fe93111774 | Python | daniel880423/PoTsen | /Image recognition sample 0419/Menu.py | UTF-8 | 2,938 | 3.75 | 4 | [] | no_license | # -*- coding: utf-8 -*-
import tkinter as tk # 使用Tkinter前需要先導入
# 第1步,產生實體object,建立視窗window
window = tk.Tk()
# 第2步,給窗口的視覺化起名字
window.title('My Window')
# 第3步,設定窗口的大小(長 * 寬)
window.geometry('500x300') # 這裡的乘是小x
# 第4步,在圖形介面上創建一個標籤用以顯示內容並放置
l = tk.Label(window, text=' ', bg='green')
l.pack()
# 第10步,定義一個函數功能,用來代表功能表選項的功能,這裡為了操作簡單,定義的功能比較簡單
counter = 0
def do_job():
global counter
l.config(text='do '+ str(counter))
counter += 1
# 第5步,創建一個功能表列,這裡我們可以把他理解成一個容器,在視窗的上方
menubar = tk.Menu(window)
# 第6步,創建一個File功能表項目(預設不下拉,下拉內容包括New,Open,Save,Exit功能項)
filemenu = tk.Menu(menubar, tearoff=0)
# 將上面定義的空功能表命名為File,放在功能表列中,就是裝入那個容器中
menubar.add_cascade(label='File', menu=filemenu)
# 在File中加入New、Open、Save等小功能表,即我們平時看到的下拉式功能表,每一個小功能表對應命令操作。
filemenu.add_command(label='New', command=do_job)
filemenu.add_command(label='Open', command=do_job)
filemenu.add_command(label='Save', command=do_job)
filemenu.add_separator() # 添加一條分隔線
filemenu.add_command(label='Exit', command=window.quit) # 用tkinter裡面自帶的quit()函數
# 第7步,創建一個Edit功能表項目(預設不下拉,下拉內容包括Cut,Copy,Paste功能項)
editmenu = tk.Menu(menubar, tearoff=0)
# 將上面定義的空功能表命名為 Edit,放在功能表列中,就是裝入那個容器中
menubar.add_cascade(label='Edit', menu=editmenu)
# 同樣的在 Edit 中加入Cut、Copy、Paste等小命令功能單元,如果點擊這些單元, 就會觸發do_job的功能
editmenu.add_command(label='Cut', command=do_job)
editmenu.add_command(label='Copy', command=do_job)
editmenu.add_command(label='Paste', command=do_job)
# 第8步,創建第二級菜單,即功能表項目裡面的菜單
submenu = tk.Menu(filemenu) # 和上面定義功能表一樣,不過此處實在File上創建一個空的功能表
filemenu.add_cascade(label='Import', menu=submenu, underline=0) # 給放入的菜單submenu命名為Import
# 第9步,創建第三級功能表命令,即功能表項目裡面的功能表項目裡面的功能表命令(有點拗口,笑~~~)
submenu.add_command(label='Submenu_1', command=do_job) # 這裡和上面創建原理也一樣,在Import功能表項目中加入一個小功能表命令Submenu_1
# 第11步,創建功能表列完成後,配置讓功能表列menubar顯示出來
window.config(menu=menubar)
# 第12步,主視窗迴圈顯示
window.mainloop()
| true |
3be733ecc71eb18f220bf3f72c2f5b8485540098 | Python | kjblakemore/Data-Science | /Sentiment/top_ten.py | UTF-8 | 1,539 | 3.59375 | 4 | [] | no_license | #
# Compute the top ten most frequently occurring hashtags in the twitter data.
#
import sys
import json
from collections import Counter
def main():
tweet_file = open(sys.argv[1])
# Load the tweets
lines = tweet_file.readlines()
results = {} # a list of tweets as dictionaries
for i in range(len(lines)):
pyresponse = json.loads(lines[i])
results[i] = pyresponse
# Create a dictionary of hashtags and their counts
dictionary = {}
for i in range(len(results)): # for each tweet in the file
entities = {}
if(results[i].has_key("entities")):
entities = results[i]["entities"]
hashtags = []
if(entities.has_key("hashtags")):
hashtags = entities["hashtags"]
for j in range(len(hashtags)):
hashtag = hashtags[j]["text"]
if(dictionary.has_key(hashtag)): # Already in dictionary, increment count
dictionary[hashtag] += 1
else: # New entry in dictionary
dictionary[hashtag] = 1
# Sort dictionary and print top 10 hashtags and their values
sorted_dictionary = Counter(dictionary)
for hashtag, count in sorted_dictionary.most_common(10):
encoded_hashtag = hashtag.encode('utf-8')
print '%s: %i' % (encoded_hashtag, count)
if __name__ == '__main__':
main() | true |
6b4bc5ffd31c2664d4d7b1402bfd0ce53ab84c5e | Python | vedantsankhe/Test_Demo | /classes.py | UTF-8 | 1,883 | 3.859375 | 4 | [] | no_license | #class programs
class a:
def f(self):
print("hello")
a1=a()
print(a1.f())
#
class d:
a=4
def __init__(self,a):
self.a=a
print(a)
def f(this):
print("hello")
d1=d(2)
print(d1.a)
#const
class d:
a=8111
def __init__(self,a):
self.a=a
print(a)
def __init__(self,b):
self.b=b
print("this is 2nd const:",b)
def f(self):
print("hello")
d1=d(40)
print(d1.a)
print(d1.b)
#self
class a:
def __init__(self):
print("one")
def __init__(self):
print("two")
def __init__(self):
print("three")
print(a())
#_init_
class Person:
# init method or constructor
def __init__(self, name):
self.name = name
# Sample Method
def say_hi(self):
print('Hello, my name is', self.name)
p = Person('Vedant')
p.say_hi()
#
class Employee:
no_of_leaves = 8
def __init__(self, aname, asalary, arole):
self.name = aname
self.salary = asalary
self.role = arole
def printdetails(self):
return f"The Name is {self.name}. Salary is {self.salary} and role is {self.role}"
@classmethod
def change_leaves(cls,newleaves):
cls.no_of_leaves=newleaves
class Programmer(Employee):
no_of_holiday = 56
def __init__(self, aname, asalary, arole, languages):
self.name = aname
self.salary = asalary
self.role = arole
self.languages = languages
def printprog(self):
return f"The Programmer's Name is {self.name}. Salary is {self.salary} and role is {self.role}.The languages are {self.languages}"
person = Employee("robert", 255, "Instructor")
stark = Employee()
stark.name = "Rohan"
stark.salary = 4554
stark.role = "Student"
bruce = Programmer("bruce", 555, "Programmer", ["python"]) | true |
761002592f856409565644774d714469e3e5676b | Python | mehmoh41/django-book-store | /book_outlet/models.py | UTF-8 | 2,212 | 2.59375 | 3 | [] | no_license | from django.core import validators
from django.db import models
from django.core.validators import MinValueValidator , MaxValueValidator
from django.urls import reverse
# Create your models here.
class Country(models.Model):
name = models.CharField(max_length=50)
code = models.CharField(max_length=2)
def __str__(self):
return f"{self.name}, {self.code}"
class Meta:
verbose_name_plural = "Countries"
class Address(models.Model):
street = models.CharField(max_length=100)
postal_code = models.CharField(max_length=5)
city = models.CharField(max_length=30)
def __str__(self):
return f"{self.street}, {self.postal_code}, {self.city}"
# inner classes are used for meta configuration
class Meta:
verbose_name_plural = "Address Entries"
class Author(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
address = models.OneToOneField(Address , on_delete= models.CASCADE , null=True)
# foreignkey is used for many to one relations
def full_name(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.full_name()
class Book(models.Model) :
title = models.CharField(max_length=50)
rating = models.IntegerField(validators=[MinValueValidator(1) , MaxValueValidator(5)])
author = models.ForeignKey(Author, on_delete=models.CASCADE , null=True,related_name="books")
published_counteries = models.ManyToManyField(Country,related_name="countries")
# CASCADE means if a author deleted any related books should also be deleted
# SETNULL ...
is_best_selling_book = models.BooleanField(default=False)
slug = models.SlugField(default="" , blank=True, null=False , db_index=True)
def get_absolute_url(self):
# kwargs={"pk": self.pk}
return reverse("book-detail", args=[self.slug] )
# def save(self , *args , **kwargs):
# self.slug = slugify(self.title)
# super().save( *args , **kwargs)
def __str__(self):
return f"{self.title} ({self.rating}) ({self.is_best_selling_book}) author: ({self.author}) slug: ({self.slug})" | true |
16388616f26be494b7bab2d374b6bc40c9866f98 | Python | rahulmr/My-Python-Projects | /Task/newTask.py | UTF-8 | 658 | 3 | 3 | [] | no_license | import collections
# test_case=int(input())
# for i in range(test_case):
# inputs = int(input())
# name=[0 for r in range(inputs)]
# tweet=[0 for r in range(inputs)]
# for j in range(inputs):
# name[j],tweet[j] = input().split()
# freq={}
# for item in name:
# if item in freq:
# freq[item]+=1
# else:
# freq[item]=1
# all_values = freq.values()
# max_value = max(all_values)
# for key,value in freq.items():
# if value == max_value:
# print(key, value)
#
res = lambda a,b: a*b
s = "mango is a is bold fruit"
a = s.split()
print(collections.Counter(a))
| true |
371d17cc4cae3a3f677d43e6c5798517dc3d14ed | Python | yukraven/vitg | /Archive/Sources/Tools/LocationLoader.py | UTF-8 | 2,235 | 2.875 | 3 | [] | permissive | import json
import random
from Archive.Sources.Resources.Location import Location
class LocationLoader:
jsonFiles = []
locationsNames = [[]]
unusedLocationsNames = [[]]
def __init__(self, jsonFiles):
self.jsonFiles = jsonFiles
self.locationsNames = []
self.unusedLocationsNames = []
index = 0
for jsonFile in self.jsonFiles:
self.locationsNames.append([])
with open(jsonFile, "r") as readFile:
data = json.load(readFile)
readFile.close()
if data:
for name in data:
self.locationsNames[index].append(name)
index += 1
self.unusedLocationsNames = self.locationsNames.copy()
def getRandomLocation(self):
randomJsonNumber = random.randint(0, len(self.jsonFiles)-1)
jsonPath = self.jsonFiles[randomJsonNumber]
randomLocationNumber = random.randint(0, len(self.locationsNames[randomJsonNumber])-1)
locationName = self.locationsNames[randomJsonNumber][randomLocationNumber]
return self.createLocation(jsonPath, locationName)
def getNextRandomLocation(self):
locationName = ""
jsonPath = ""
while locationName == "":
randomJsonNumber = random.randint(0, len(self.jsonFiles)-1)
jsonPath = self.jsonFiles[randomJsonNumber]
if len(self.unusedLocationsNames[randomJsonNumber]) > 0:
randomLocationNumber = random.randint(0, len(self.unusedLocationsNames[randomJsonNumber])-1)
locationName = self.unusedLocationsNames[randomJsonNumber][randomLocationNumber]
self.unusedLocationsNames[randomJsonNumber].pop(randomLocationNumber)
return self.createLocation(jsonPath, locationName)
def createLocation(self, jsonPath, locationName):
with open(jsonPath, "r") as readFile:
data = json.load(readFile)
readFile.close()
jsonLocation = data[locationName]
dictResource = {"name": locationName, **jsonLocation["dictResource"]}
dictLocation = jsonLocation["dictLocation"]
location = Location(dictResource, dictLocation)
return location
| true |
a09da9094b41921596df14cf7acc98e1f72abfd8 | Python | Wakamesky/spmod | /y2t/Original/Polynominal_interpolation.py | UTF-8 | 881 | 3.296875 | 3 | [] | no_license | # -*- coding : utf-8 -*-
"""
y = 2 * t (ガウシアンノイズなし) の多項式補間
"""
# import required packages
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
t_0 = np.array(np.linspace(start=1, stop=15, num=15, dtype=int))
t_1 = np.array(np.linspace(start=1, stop=15, num=1500, dtype=int))
print(t_0)
# t (1, 15)
y = 2 * t_0
Phi = np.vander(t_0)
x = np.dot(np.linalg.inv(Phi), y.transpose())
# y (1, 15)
# Phi (15, 15)
print("y : ", y, " y_shape : ", np.shape(y))
print("Phi : ", Phi, " Phi_shape : ", np.shape(Phi))
print("x : ", x, " x_shape : ", np.shape(x))
# Get Polynominal interpolation
polynominal = np.poly1d([i for i in x])
print(polynominal)
p = [p for p in polynominal(t_1)]
# print(p)
# Plot data
plt.plot(t_0, y, "o", label="data")
plt.plot(t_1, p, "-", label="poly")
plt.xlabel("t")
plt.ylabel("y")
plt.legend()
plt.show() | true |
0475ff8d41574db66e9ea4f1caaeec06b77417bd | Python | vangavenunath/WebServiceAPI | /resources/tasks.py | UTF-8 | 2,457 | 2.71875 | 3 | [] | no_license | from flask import Response, request
from flask_restful import Resource
from database.interactions import DatabaseInteractions
class TaskApi(Resource):
def put(self):
dbobj = DatabaseInteractions()
print("PUT method is called")
out = dbobj.update_employee_tasks(request.json['EmployeeName'][0])
return out, 200
def post(self):
dbobj = DatabaseInteractions()
print("POST Method is called")
print(request.json)
out = dbobj.insert_employee_tasks(request.json['EmployeeName'][0], request.json['TaskName'][0])
return out, 200
def delete(self):
print("DELETE method is called")
print(request.json)
return 'Success', 201
class EmployeeLoginLogoutApi(Resource):
def put(self, employee_name):
dbobj = DatabaseInteractions()
print("PUT method is called")
out = dbobj.update_employee_clockinout(employee_name)
return out, 200
def post(self, employee_name):
dbobj = DatabaseInteractions()
print("POST Method is called")
print(request.json)
out = dbobj.insert_employee_clockin(employee_name)
return out, 200
def get(self, employee_name):
dbobj = DatabaseInteractions()
print("GET Method is called")
print(request.json)
out = dbobj.get_employee_clock_in_out_status(employee_name)
return out, 200
class EmployeeTasksApi(Resource):
def get(self, employee_name):
dbobj = DatabaseInteractions()
print("EmployeeTasksApi GET method is called")
print(employee_name)
json_data = eval(dbobj.get_tasks_by_employee_name(employee_name).replace("null",'""'))
return json_data, 200
class TasksApi(Resource):
def get(self, task_name):
dbobj = DatabaseInteractions()
print("TasksApi GET method is called")
tasks = eval(dbobj.get_tasks(task_name))
return tasks, 200
class EmployeesApi(Resource):
def get(self, employee_name):
dbobj = DatabaseInteractions()
print("TasksApi GET method is called")
employees = eval(dbobj.get_employees(employee_name))
return employees, 200
class EmployeeClockApi(Resource):
def get(self, employee_name):
dbobj = DatabaseInteractions()
print("EmployeeClockApi GET method is called")
status = dbobj.get_clock_in_out_status(employee_name)
return status, 200 | true |
577396ec1c0fa724fa3b3d9ecef63428de9571a8 | Python | ErickRyu/PythonPlayground | /start/loop.py | UTF-8 | 222 | 3 | 3 | [] | no_license |
def test_loop1():
i = 0
for i in range(0, 100):
a = i
print(i)
def test_loop2():
for i in range(0, 100):
a = i
print(i)
if __name__ == "__main__":
test_loop1()
test_loop2()
| true |
304d72a06bb118562e96f51bd2d46500299ed772 | Python | lassefolkersen/highfrontier | /gui_components.py | UTF-8 | 49,212 | 2.890625 | 3 | [] | no_license | import math
#from ocempgui.widgets.Constants import *
#from ocempgui.object import BaseObject
#from ocempgui.widgets import *
import global_variables
import pygame
import primitives
import time
import random
class entry():
"""
Box that accepts text
"""
def __init__(self, surface, topleft, width, max_letters, starting_text = "", restrict_input_to = " QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm1234567890"):
self.surface = surface
self.topleft = topleft
self.width = width
self.height = 30
self.max_letters = max_letters
self.text = starting_text
self.restrict_input_to = restrict_input_to
self.rect = pygame.Rect(self.topleft[0],self.topleft[1],self.width,self.height)
self.active = True
self.draw()
def receive_text(self, event):
print(event)
if self.active:
# print event
if event.unicode == "\x08":
self.text = self.text[0:(len(self.text)-1)]
self.draw()
# elif event.key == 13:
# print "enter"
# return "enter"
else:
if self.restrict_input_to is not None:
if event.unicode not in self.restrict_input_to:
return
if len(self.text) < self.max_letters:
self.text = self.text + event.unicode
self.draw()
def activate(self,position):
self.active = True
def draw(self):
pygame.draw.rect(self.surface,(255,255,255),self.rect)
pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0], self.topleft[1] + 1), (self.topleft[0] + self.width - 1, self.topleft[1] + 1),2) #black horizontal
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + 1, self.topleft[1]), (self.topleft[0] + 1, self.topleft[1] + self.height - 1),2) #black vertical
rendered_text = global_variables.standard_font.render(self.text,True,(0,0,0))
self.surface.blit(rendered_text,(self.topleft[0] + 5, self.topleft[1] + 6))
pygame.display.flip()
class vscrollbar():
def __init__(self,surface, function, topleft, length_of_bar_in_pixel, range_of_values, range_seen = None, start_position = 0, function_parameter=None):
"""
Draws a scroll bar
length_of_bar_in_pixel An integer with the length of the bar in pixels
range_of_values A tuple giving the values at each end of the bar
range_seen Optional integer giving how much of the range_of_values is seen at a given time (eg. the number
of entries in a scrolled list visible). Default to None, which is equal to a square
slider (for time-settings etc).
"""
if not isinstance(length_of_bar_in_pixel, int):
raise Exception("length_of_bar_in_pixel must be an integer")
if not isinstance(range_of_values, tuple):
raise Exception("range_of_values must be a tuple")
if len(range_of_values) != 2:
raise Exception("range_of_values must be a tuple of length 2")
if range_of_values[1] < range_of_values[0]:
raise Exception("the first entry in range_of_values must be smaller than the second")
if range_of_values[0] < 0 or range_of_values[1] < 0:
raise Exception("range_of_values cannot contain negative entries")
if not isinstance(range_of_values[0], int):
raise Exception("range_of_values[0] must be an integer")
if not isinstance(range_of_values[1], int):
raise Exception("range_of_values[0] must be an integer")
if start_position < range_of_values[0] or start_position > range_of_values[1]:
raise Exception("start position must be within the range_of_values")
if range_of_values[1] - range_of_values[0] <= 0:
self.unmovable = True
else:
self.unmovable = False
if range_seen is not None:
if not isinstance(range_seen, int):
raise Exception("if given, range_seen must be an integer")
if range_seen <= 0:
raise Exception("if given, range_seen must be above zero")
if range_seen > (range_of_values[1] - range_of_values[0]):
self.unmovable = True
#
self.surface = surface
self.topleft = topleft
self.length_of_bar_in_pixel = length_of_bar_in_pixel
self.range_of_values = range_of_values
self.range_seen = range_seen
self.width = 20
self.function = function
self.function_parameter = function_parameter
self.position = start_position
self.rect = pygame.Rect(self.topleft[0],self.topleft[1],self.width,self.length_of_bar_in_pixel)
# print self.calculate_extent_of_slider()
self.draw()
def calculate_extent_of_slider(self):
"""
Function that calculates at what points (in pixels) the slider should be based on the self.position, self.range_of_values,
self.step_size and self.length_of_bar_in_pixels.
Returns a length-two tuple with the start and end in pixel measured from topleft
"""
if not self.unmovable:
if self.range_seen is None:
#the simple case with a square slider. First calculate the operating-space (ie. all except end-arrows and space for the actual slider
operational_length = self.length_of_bar_in_pixel - 3 * self.width
#the fraction of the operational space at which the start of the slider is (as given in self.position)
percentage_position = float(self.position - self.range_of_values[0]) / float(self.range_of_values[1] - self.range_of_values[0])
start_of_slider = int(percentage_position * operational_length) + self.width
return (start_of_slider, start_of_slider + self.width)
else: #for scrolledlist etc.
#the more complicated case with a variable length slider. First calculate the operating-space (ie. all except end-arrows and space for the actual slider
operational_length_without_slider = self.length_of_bar_in_pixel - 2 * self.width
percentage_taken_by_slider = float(self.range_seen) / float(self.range_of_values[1] - self.range_of_values[0])
length_of_slider = int(operational_length_without_slider * percentage_taken_by_slider)
operational_length = operational_length_without_slider - length_of_slider
#the fraction of the operational space at which the start of the slider is (as given in self.position)
percentage_position = float(self.position - self.range_of_values[0]) / float(self.range_of_values[1] - self.range_of_values[0])
start_of_slider = int(percentage_position * operational_length) + self.width
return (start_of_slider, start_of_slider + length_of_slider)
else: #if unmovable
return (self.width, self.length_of_bar_in_pixel - self.width)
def draw(self):
#draw frame
pygame.draw.rect(self.surface,(212,212,212),self.rect)
pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
#draw slider
extent_of_slider = self.calculate_extent_of_slider()
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + extent_of_slider[0]), (self.topleft[0] + 2, self.topleft[1] + extent_of_slider[1]),2) #vertical
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + extent_of_slider[0]), (self.topleft[0] + self.width - 2, self.topleft[1] + extent_of_slider[0]),2) #horizontal
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + self.width - 2, self.topleft[1] + extent_of_slider[0]), (self.topleft[0] + self.width - 2, self.topleft[1] + extent_of_slider[1]),2) #vertical
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0], self.topleft[1] + extent_of_slider[1]), (self.topleft[0] + self.width - 2, self.topleft[1] + extent_of_slider[1]),2) #horizontal
#draw up-arrow
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + 2), (self.topleft[0] + 2, self.topleft[1] + self.width - 2),2) #vertical white
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + 2), (self.topleft[0] + self.width - 2, self.topleft[1] + 2),2) #horizontal white
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + self.width - 2, self.topleft[1] + 2), (self.topleft[0] + self.width - 2, self.topleft[1] + self.width - 2),2) #vertical black
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + 2, self.topleft[1] + self.width - 2), (self.topleft[0] + self.width - 2, self.topleft[1] + self.width - 2),2) #horizontal black
pygame.draw.polygon(self.surface, (0,0,0), [ (self.topleft[0] + self.width /2, self.topleft[1] + 6), (self.topleft[0] + 5, self.topleft[1] + 12), (self.topleft[0] + self.width - 5, self.topleft[1] +12)])
#draw down-arrow
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + 2 + self.length_of_bar_in_pixel - self.width), (self.topleft[0] + 2, self.topleft[1] + self.length_of_bar_in_pixel - 2),2) #vertical white
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + 2 + self.length_of_bar_in_pixel - self.width), (self.topleft[0] + self.width - 2, self.topleft[1] + 2 + self.length_of_bar_in_pixel - self.width),2) #horizontal white
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + self.width - 2, self.topleft[1] + 2 + self.length_of_bar_in_pixel - self.width), (self.topleft[0] + self.width - 2, self.topleft[1] + self.length_of_bar_in_pixel - 2),2) #vertical black
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + 2, self.topleft[1] + self.length_of_bar_in_pixel - 2), (self.topleft[0] + self.width - 2, self.topleft[1] + self.length_of_bar_in_pixel - 2),2) #horizontal black
pygame.draw.polygon(self.surface, (0,0,0), [ (self.topleft[0] + self.width/2, self.topleft[1] - 6 + self.length_of_bar_in_pixel), (self.topleft[0] + 5, self.topleft[1] - 12 + self.length_of_bar_in_pixel), (self.topleft[0] + self.width - 5, self.topleft[1] - 12 + self.length_of_bar_in_pixel)])
pygame.display.flip()
def receive_click(self, event):
self.activate(event.pos)
def activate(self, pos):
"""
Will distribute a click according to if it is on the slider or on the arrows. For now, no mouse-down sliding of sliders :-(
"""
if not self.unmovable:
if pos[1] - self.topleft[1] < self.width: #up-arrow
if self.range_seen is None:
if self.position > self.range_of_values[0]:
self.position = self.position - 1
else:
if self.position - self.range_seen > self.range_of_values[0]:
self.position = self.position - self.range_seen
else:
self.position = self.range_of_values[0]
elif pos[1] - self.topleft[1] > self.length_of_bar_in_pixel - self.width: #down-arrow
if self.range_seen is None:
if self.position < self.range_of_values[1]:
self.position = self.position + 1
else:
if self.position + self.range_seen < self.range_of_values[1]:
self.position = self.position + self.range_seen
else:
self.position = self.range_of_values[1]
else:
operational_space = self.length_of_bar_in_pixel - 2 * self.width
percentage_pos = (pos[1] - self.topleft[1] - self.width) / float(operational_space)
self.position = int((self.range_of_values[1] - self.range_of_values[0]) * percentage_pos) + self.range_of_values[0]
self.draw()
self.function(self.position,self.function_parameter)
class hscrollbar():
def __init__(self,surface, function, topleft, length_of_bar_in_pixel, range_of_values, range_seen = None, start_position = 0, function_parameter=None):
"""
Draws a scroll bar
length_of_bar_in_pixel An integer with the length of the bar in pixels
range_of_values A tuple giving the values at each end of the bar
range_seen Optional integer giving how much of the range_of_values is seen at a given time (eg. the number
of entries in a scrolled list visible). Default to None, which is equal to a square
slider (for time-settings etc).
"""
if not isinstance(length_of_bar_in_pixel, int):
raise Exception("length_of_bar_in_pixel must be an integer")
if not isinstance(range_of_values, tuple):
raise Exception("range_of_values must be a tuple")
if len(range_of_values) != 2:
raise Exception("range_of_values must be a tuple of length 2")
if range_of_values[1] < range_of_values[0]:
raise Exception("the first entry in range_of_values must be smaller than the second: " + str(range_of_values))
if range_of_values[0] < 0 or range_of_values[1] < 0:
raise Exception("range_of_values cannot contain negative entries: " + str(range_of_values))
if not (isinstance(range_of_values[0], int) or isinstance(range_of_values[0], int)):
raise Exception("range_of_values[0] must be an integer. It was " + str(range_of_values[0]))
if not (isinstance(range_of_values[1], int) or isinstance(range_of_values[1], int)):
raise Exception("range_of_values[1] must be an integer. It was " + str(range_of_values[1]))
if start_position < range_of_values[0] or start_position > range_of_values[1]:
raise Exception("start position must be within the range_of_values. It was " + str(start_position) + " and range_of_values were: " + str(range_of_values))
if range_of_values[1] - range_of_values[0] <= 0:
self.unmovable = True
else:
self.unmovable = False
if range_seen is not None:
if not isinstance(range_seen, int):
raise Exception("if given, range_seen must be an integer")
if range_seen <= 0:
raise Exception("if given, range_seen must be above zero")
if range_seen > (range_of_values[1] - range_of_values[0]):
self.unmovable = True
#
self.surface = surface
self.topleft = topleft
self.length_of_bar_in_pixel = length_of_bar_in_pixel
self.range_of_values = range_of_values
self.range_seen = range_seen
self.width = 20
self.function = function
self.function_parameter = function_parameter
self.position = start_position
self.rect = pygame.Rect(self.topleft[0],self.topleft[1],self.length_of_bar_in_pixel,self.width)
self.draw()
def calculate_extent_of_slider(self):
"""
Function that calculates at what points (in pixels) the slider should be based on the self.position, self.range_of_values,
self.step_size and self.length_of_bar_in_pixels.
Returns a length-two tuple with the start and end in pixel measured from topleft
"""
if not self.unmovable:
if self.range_seen is None:
#the simple case with a square slider. First calculate the operating-space (ie. all except end-arrows and space for the actual slider
operational_length = self.length_of_bar_in_pixel - 3 * self.width
#the fraction of the operational space at which the start of the slider is (as given in self.position)
percentage_position = float(self.position - self.range_of_values[0]) / float(self.range_of_values[1] - self.range_of_values[0])
start_of_slider = int(percentage_position * operational_length) + self.width
return (start_of_slider, start_of_slider + self.width)
else: #for scrolledlist etc.
#the more complicated case with a variable length slider. First calculate the operating-space (ie. all except end-arrows and space for the actual slider
operational_length_without_slider = self.length_of_bar_in_pixel - 2 * self.width
percentage_taken_by_slider = float(self.range_seen) / float(self.range_of_values[1] - self.range_of_values[0])
length_of_slider = int(operational_length_without_slider * percentage_taken_by_slider)
operational_length = operational_length_without_slider - length_of_slider
#the fraction of the operational space at which the start of the slider is (as given in self.position)
percentage_position = float(self.position - self.range_of_values[0]) / float(self.range_of_values[1] - self.range_of_values[0])
start_of_slider = int(percentage_position * operational_length) + self.width
return (start_of_slider, start_of_slider + length_of_slider)
else: #if unmovable
return (self.width, self.length_of_bar_in_pixel - self.width)
def draw(self):
#draw frame
pygame.draw.rect(self.surface,(212,212,212),self.rect)
pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
#draw slider
extent_of_slider = self.calculate_extent_of_slider()
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + extent_of_slider[0], self.topleft[1] + 2), (self.topleft[0] + extent_of_slider[0], self.topleft[1] + self.width - 2),2) #vertical white
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + extent_of_slider[0], self.topleft[1] + 2), (self.topleft[0]+ extent_of_slider[1] - 2, self.topleft[1] + 2),2) #horizontal white
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + extent_of_slider[1] - 2, self.topleft[1] + 2), (self.topleft[0] + extent_of_slider[1] - 2, self.topleft[1] + self.width - 2),2) #vertical black
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + extent_of_slider[0], self.topleft[1] + self.width - 2), (self.topleft[0] + extent_of_slider[1] - 2, self.topleft[1] + self.width - 2),2) #horizontal black
#draw left-arrow
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + 2), (self.topleft[0] + 2, self.topleft[1] + self.width - 2),2) #vertical white
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + 2, self.topleft[1] + 2), (self.topleft[0] + self.width - 2, self.topleft[1] + 2),2) #horizontal white
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + self.width - 2, self.topleft[1] + 2), (self.topleft[0] + self.width - 2, self.topleft[1] + self.width - 2),2) #vertical black
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + 2, self.topleft[1] + self.width - 2), (self.topleft[0] + self.width - 2, self.topleft[1] + self.width - 2),2) #horizontal black
pygame.draw.polygon(self.surface, (0,0,0), [ (self.topleft[0] + 6, self.topleft[1] + self.width/2), (self.topleft[0] + 12, self.topleft[1] + 5), (self.topleft[0] + 12, self.topleft[1] - 5 + self.width)])
#draw right-arrow
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + self.length_of_bar_in_pixel - self.width + 2, self.topleft[1] + 2), (self.topleft[0] - 2 + self.length_of_bar_in_pixel, self.topleft[1] + 2),2) #horizontal white
pygame.draw.line(self.surface,(255,255,255),(self.topleft[0] + self.length_of_bar_in_pixel + 2 - self.width, self.topleft[1] + 2), (self.topleft[0] + self.length_of_bar_in_pixel + 2 - self.width, self.topleft[1] - 2 + self.width),2) #vertical white
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + self.length_of_bar_in_pixel - self.width + 2, self.topleft[1] - 2 + self.width), (self.topleft[0] - 2 + self.length_of_bar_in_pixel, self.topleft[1] - 2 + self.width),2) #horizontal black
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0] + self.length_of_bar_in_pixel - 2, self.topleft[1] + 2), (self.topleft[0] + self.length_of_bar_in_pixel - 2, self.topleft[1] - 2 + self.width),2) #vertical black
pygame.draw.polygon(self.surface, (0,0,0), [ (self.topleft[0] + self.length_of_bar_in_pixel - 6, self.topleft[1] + self.width/2), (self.topleft[0] + self.length_of_bar_in_pixel - 12, self.topleft[1] + 5), (self.topleft[0] + self.length_of_bar_in_pixel - 12, self.topleft[1] - 5 + self.width)])
pygame.display.flip()
def activate(self, pos):
"""
Will distribute a click according to if it is on the slider or on the arrows. For now, no mouse-down sliding of sliders :-(
"""
if not self.unmovable:
if pos[0] - self.topleft[0] < self.width: #up-arrow
if self.range_seen is None:
if self.position > self.range_of_values[0]:
self.position = self.position - 1
else:
if self.position - self.range_seen > self.range_of_values[0]:
self.position = self.position - self.range_seen
else:
self.position = self.range_of_values[0]
elif pos[0] - self.topleft[0] > self.length_of_bar_in_pixel - self.width: #down-arrow
if self.range_seen is None:
if self.position < self.range_of_values[1]:
self.position = self.position + 1
else:
if self.position + self.range_seen < self.range_of_values[1]:
self.position = self.position + self.range_seen
else:
self.position = self.range_of_values[1]
else:
operational_space = self.length_of_bar_in_pixel - 2 * self.width
percentage_pos = (pos[0] - self.topleft[0] - self.width) / float(operational_space)
self.position = int((self.range_of_values[1] - self.range_of_values[0]) * percentage_pos) + self.range_of_values[0]
self.draw()
self.function(self.position,self.function_parameter)
class radiobuttons():
def __init__(self,labels,surface, function,function_parameter = None, topleft = (0,0), selected = None):
self.textheight = 15
self.topleft = topleft
self.labels = labels
self.surface = surface
self.function = function
self.function_parameter = function_parameter
if selected is None:
self.selected = self.labels[0]
else:
if selected in self.labels:
self.selected = selected
else:
raise Exception("The pre-selected radiobutton " + str(selected) + " was not found in labels")
self.rect = pygame.Rect(self.topleft[0],self.topleft[1],20,len(labels)*self.textheight)
self.draw()
def activate(self, pos):
selected_pos = (pos[1] - self.topleft[1]) // self.textheight
self.selected = self.labels[selected_pos]
self.update_radiobuttons()
self.function(self.selected,self.function_parameter)
def draw(self):
for i, label in enumerate(self.labels):
rendered_label = global_variables.standard_font.render(label,True,(0,0,0))
self.surface.blit(rendered_label,(self.topleft[0] + 20, self.topleft[1] + self.textheight * i))
self.update_radiobuttons()
def update_radiobuttons(self):
for i, label in enumerate(self.labels):
pygame.draw.circle(self.surface,(255,255,255),(self.topleft[0] + 10,self.topleft[1] + self.textheight // 2 + self.textheight*i),6)
pygame.draw.circle(self.surface,(0,0,0),(self.topleft[0] + 10,self.topleft[1] + self.textheight // 2 + self.textheight*i),6, 1)
if label == self.selected:
pygame.draw.circle(self.surface,(0,0,0),(self.topleft[0] + 10,self.topleft[1] + self.textheight // 2 + self.textheight*i),4)
pygame.display.flip()
class button():
"""
Class that defines buttons. Takes the name of the button, the surface that it should be drawn on, a function to execute on pressing
and optionally a position. Size will be determined by length of label.
"""
def __init__(self,label, surface, function, function_parameter = None, topleft = (0,0), fixed_size = None):
self.padding = 5
# self.topleft = topleft
self.label = label
self.surface = surface
self.function = function
self.function_parameter = function_parameter
self.rendered_label = global_variables.standard_font.render(self.label,True,(0,0,0))
# self.size = self.rendered_label.get_size()
if fixed_size is None:
labelsize = self.rendered_label.get_size()
self.rect = pygame.Rect(topleft[0],topleft[1],labelsize[0] + 2 * self.padding,labelsize[1] + 2 * self.padding)
else:
self.rect = pygame.Rect(topleft[0],topleft[1],fixed_size[0],fixed_size[1])
self.draw()
def activate(self, pos):
self.draw_pressed()
return_value = self.function(self.label, self.function_parameter)
return return_value
def draw(self):
pygame.draw.rect(self.surface,(212,212,212),self.rect)
pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
pygame.draw.line(self.surface,(255,255,255),(self.rect[0], self.rect[1]),(self.rect[0],self.rect[1]+self.rect[3]))
pygame.draw.line(self.surface,(255,255,255),(self.rect[0], self.rect[1]),(self.rect[0]+self.rect[2],self.rect[1]))
self.surface.blit(self.rendered_label,(self.rect[0] + self.padding, self.rect[1] + self.padding))
pygame.display.flip()
def draw_pressed(self):
# pygame.draw.rect(self.surface,(212,212,212),self.rect)
# pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
pygame.draw.rect(self.surface,(112,112,112),self.rect)
pygame.draw.line(self.surface,(0,0,0),(self.rect[0]+1,self.rect[1]),(self.rect[0]+1,self.rect[1] + self.rect[3] - 2)) #vertical
pygame.draw.line(self.surface,(0,0,0),(self.rect[0],self.rect[1]+1),(self.rect[0]+self.rect[2] - 2,self.rect[1]+1),1) #horizontal
self.surface.blit(self.rendered_label,(self.rect[0] + self.padding, self.rect[1] + self.padding))
pygame.display.flip()
time.sleep(0.05)
self.draw()
class togglebutton():
"""
Class that defines ToggleButtons. Takes the name of the button, the surface that it should be drawn on, a function to execute on pressing
and optionally a position. Size will be determined by length of label.
"""
def __init__(self,label, surface, function, function_parameter = None, topleft = (0,0), fixed_size = None, pressed = False):
self.padding = 10
self.topleft = topleft
self.label = label
self.pressed = pressed
self.surface = surface
self.function = function
self.function_parameter = function_parameter
self.rendered_label = global_variables.standard_font.render(self.label,True,(0,0,0))
if fixed_size is None:
labelsize = self.rendered_label.get_size()
self.rect = pygame.Rect(self.topleft[0],self.topleft[1],labelsize[0] + 2 * self.padding,labelsize[1] + 2 * self.padding)
else:
self.rect = pygame.Rect(self.topleft[0],self.topleft[1],fixed_size[0],fixed_size[1])
if self.pressed:
self.draw_pressed()
else:
self.draw_unpressed()
def activate(self, pos):
if self.pressed:
self.draw_unpressed()
self.pressed = False
else:
self.draw_pressed()
self.pressed = True
self.function(self.pressed, self.function_parameter)
def draw_unpressed(self):
pygame.draw.rect(self.surface,(212,212,212),self.rect)
pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
pygame.draw.line(self.surface,(255,255,255),self.topleft,(self.topleft[0],self.topleft[1]+self.rect[3]))
pygame.draw.line(self.surface,(255,255,255),self.topleft,(self.topleft[0]+self.rect[2],self.topleft[1]))
self.surface.blit(self.rendered_label,(self.topleft[0] + self.padding, self.topleft[1] + self.padding))
pygame.display.flip()
def draw_pressed(self):
pygame.draw.rect(self.surface,(212,212,212),self.rect)
pygame.draw.rect(self.surface,(0,0,0),self.rect,1)
pygame.draw.rect(self.surface,(112,112,112),pygame.Rect(self.topleft[0],self.topleft[1],self.rect[2],self.rect[3]))
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0]+1,self.topleft[1]),(self.topleft[0]+1,self.topleft[1]+self.rect[3]-2)) #vertical
pygame.draw.line(self.surface,(0,0,0),(self.topleft[0],self.topleft[1]+1),(self.topleft[0]+self.rect[2]+ - 2,self.topleft[1]+1),1) #horizontal
self.surface.blit(self.rendered_label,(self.topleft[0] + self.padding, self.topleft[1] + self.padding))
pygame.display.flip()
class fast_list():
"""
A list similar to the ScrolledList, but faster
"""
def __init__(self, surface, tabular_data, rect, column_order = None, sort_by = "rownames"):
self.surface = surface
self.rect = rect
self.rect_for_main_list = pygame.Rect(self.rect[0],self.rect[1] + 20,self.rect[2]-20,self.rect[3]-20)
self.text_height = global_variables.courier_font.size("abcdefghijklmnopqrstuvxysABCDEFGHIJKLMNOPQRSTU")[1]
self.left_list_frame = 5 # how much the text in the list is indented
self.top_frame_width = 5 # this is a guesstimate of how much the Frame is filling
self.title = None #either None (in which case it can't be rendered" or else a dictionary with key "text", containing a string with the text to write, and key "entry_span" containing another dictionary with the column names as values and their length in pixels as keys
self.original_tabular_data = tabular_data
self.receive_data(tabular_data,column_order = column_order, sort_by = sort_by)
self.selected = None
self.selected_name = None
self.reverse_sorting = False
self.create_fast_list()
# if self.title is not None:
# self.render_title()
def create_fast_list(self):
"""
The creation function. Doesn't return anything, but saves self.list_frame variable and renders using the self.renderer.
Needs to have something saved to self.data first
"""
title_surface = pygame.Surface((self.rect[2],20))
title_surface.fill((224,218,213))
if self.title is not None:
rendered_titleline = global_variables.courier_font.render(self.title["text"],True,(0,0,0))
title_surface.blit(rendered_titleline,(self.left_list_frame, 0))
self.surface.blit(title_surface,(self.rect[0],self.rect[1]))
#expanding rectangle to catch clicks on title
# self.rect = pygame.Rect(self.rect[0],self.rect[1] - self.text_height, self.rect[2], self.rect[3] + self.text_height)
self.lines_visible = int( math.floor( self.rect[3] / self.text_height) - 1)
self.vscrollbar = vscrollbar(self.surface, self.scrolling, (self.rect[0] + self.rect[2] - 20, self.rect[1]), self.rect[3], (0,len(self.data)), range_seen = self.lines_visible, start_position = 0, function_parameter=None)
self.update_fast_list()
pygame.display.flip()
def scrolling(self,position,function_parameter):
self.update_fast_list()
def receive_click(self,event):
if event.pos[0] > self.rect[0] + self.rect[2] - 20:
self.vscrollbar.activate(event.pos)
pygame.display.flip()
else:
if self.title is not None:
if event.pos[1] < self.rect[1] + self.text_height:
for key in list(self.title["entry_span"].keys()):
if key[0] < event.pos[0] and event.pos[0] < key[1]:
sort_by_this_column = self.title["entry_span"][key]
# print self.title["entry_span"][key]
# try: self.original_tabular_data
# except: raise Exception("The fast_list was supposed to have the self.orginal_tabular_data variable, but didn't")
# else:
if self.sorted_by_this_column == sort_by_this_column:
self.reverse_sorting = not self.reverse_sorting
else:
self.reverse_sorting = self.reverse_sorting
self.receive_data(self.original_tabular_data, sort_by = sort_by_this_column , column_order = self.original_column_order, reverse_sort = self.reverse_sorting)
self.update_fast_list()
pygame.display.flip()
return
index_selected = int((event.pos[1] - 20 - self.rect[1] - self.top_frame_width) / self.text_height + self.interval[0])
# print "clicked at relative y_pos: " + str(event.pos[1] - self.rect[1] - self.top_frame_width) + " which is index: " + str(index_selected)
if 0 <= index_selected and index_selected < len(self.data):
self.selected = self.data[index_selected]
self.selected_name = self.selected.split(" ")[0]
self.selected_name = self.selected_name.rstrip(" ")
self.update_fast_list()
pygame.display.flip()
# def notify(self,event):
# try: self.list_frame
# except: pass
# else:
# try: self.vscrollbar
# except: right_border = self.topleft[0] + self.list_size[0]
# else: right_border = self.topleft[0] + self.list_size[0] - self.vscrollbar.width
#
# if self.topleft[0] < event.data.pos[0] and event.data.pos[0] < right_border:
# if self.topleft[1] < event.data.pos[1] and event.data.pos[1] < self.topleft[1] + self.list_size[1]:
# index_selected = (event.data.pos[1] - self.topleft[1] - self.top_frame_width) / self.text_height + self.interval[0]
# #print "clicked at relative y_pos: " + str(event.data.pos[1] - self.topleft[1] - self.top_frame_width) + " which is index: " + str(index_selected)
# if 0 <= index_selected and index_selected < len(self.data):
# self.selected = self.data[index_selected]
# self.selected_name = self.selected.split(" ")[0]
# self.selected_name = self.selected_name.rstrip(" ")
# self.update_fast_list()
# #print self.selected
#
# #checking to see if the click is on the titlebar
# try: self.list_frame
# except: pass
# else:
# top_border = int(self.topleft[1] - 2 * self.text_height)
# bottom_border = int(self.topleft[1] - self.text_height + self.top_frame_width * 2)
# if top_border < event.data.pos[1] and event.data.pos[1] < bottom_border:
# horizontal_position = event.data.pos[0] - self.topleft[0]
# for key in self.title["entry_span"].keys():
# if key[0] < horizontal_position and horizontal_position < key[1]:
# sort_by_this_column = self.title["entry_span"][key]
# try: self.original_tabular_data
# except: raise Exception("The fast_list was supposed to have the self.orginal_tabular_data variable, but didn't")
# else:
# print "self.sorted_by_this_column: " + str(self.sorted_by_this_column)
# print "sort_by_this_column: " + str(sort_by_this_column)
# if self.sorted_by_this_column == sort_by_this_column:
# self.reverse_sorting = not self.reverse_sorting
# else:
# self.reverse_sorting = self.reverse_sorting
#
#
# self.receive_data(self.original_tabular_data, sort_by = sort_by_this_column , column_order = self.original_column_order, reverse_sort = self.reverse_sorting)
#
# self.update_fast_list()
def update_fast_list(self):
"""
Function to update the fast list. Changes the look of the surface "self.list_surface", sets it as picture in the
self.list_surface_label and calls renderer.update. Adds scrollbar if necessary.
"""
if(len(set(self.data)) != len(self.data)):
raise Exception("The fast_list contains non-unique values!")
pygame.draw.rect(self.surface, (234,228,223), self.rect_for_main_list)
#in the case where data can fit on screen
if self.lines_visible >= len(self.data):
self.interval = list(range(0,len(self.data)))
for i in range(0,len(self.data)):
if self.data[i] == self.selected:
rendered_dataline = global_variables.courier_font.render(self.data[i],True,(255,0,0))
else:
rendered_dataline = global_variables.courier_font.render(self.data[i],True,(0,0,0))
self.surface.blit(rendered_dataline,(self.rect_for_main_list[0] + self.left_list_frame, self.rect_for_main_list[1] + i*self.text_height + self.top_frame_width))
pygame.display.flip()
#in the case where data can't fit on screen
else:
percentage_position = float(self.vscrollbar.position) / float(self.vscrollbar.range_of_values[1] - self.vscrollbar.range_of_values[0])
per_entry_position = int(percentage_position * (len(self.data)-self.lines_visible))
self.interval = list(range(int(per_entry_position),int(per_entry_position) + self.lines_visible))
for j, i in enumerate(self.interval):
if self.data[i] == self.selected:
rendered_dataline = global_variables.courier_font.render(self.data[i],True,(255,0,0))
else:
rendered_dataline = global_variables.courier_font.render(self.data[i],True,(0,0,0))
self.surface.blit(rendered_dataline,(self.rect_for_main_list[0] + self.left_list_frame, self.rect_for_main_list[1] + j*self.text_height + self.top_frame_width))
#
# def render_title(self):
# """
# Function that will check if the fast_list has a self.title entry, and in that case
# render it in a separate frame just above the main list
# """
# title_surface = pygame.Surface((self.rect[2],self.text_height))
# title_surface.fill((234,228,223))
# rendered_titleline = global_variables.courier_font.render(self.title["text"],True,(0,0,0))
# title_surface.blit(rendered_titleline,(self.left_list_frame, 0))
#
# self.surface.blit(title_surface,(self.rect[0],self.rect[1]-self.text_height))
#
# pygame.display.flip()
#
# #expanding rectangle to catch clicks on title
# self.rect = pygame.Rect(self.rect[0],self.rect[1] - self.text_height, self.rect[2], self.rect[3] + self.text_height)
def receive_data(self,data,sort_by="rownames",column_order=None,reverse_sort=False):
"""
Function that takes tabular data either of the form imported by primitives.import_datasheet (a dictionary with rows as keys and values
being another dictionary were colums are keys and values are data entries) or else as a simple list.
This is then saved in self.data as a regular list, with the data in flattened and sorted form.
The first line of the data is the title
Optional arguments:
sort_by A string .If given the table will be sorted by this column name. Defaults to sorting by row-title name
column_order a list. If given the columns will appear in this order. Use 'rownames' to refer to the rownames. Omitted entries will not be in the list.
"""
if isinstance(data,list):
self.data = data
elif isinstance(data,dict):
if data == {}:
self.data = []
self.title = {}
self.title["text"] = ""
self.title["entry_span"] = {}
else:
#checking that the column_order is correct
collection = []
self.original_tabular_data = data
self.sorted_by_this_column = sort_by
self.original_column_order = column_order
try: list(data[list(data.keys())[0]].keys())
except:
print(data)
raise Exception("The data given to fast_list did not follow standards. It has been printed above")
original_columns = ["rownames"] + list(data[list(data.keys())[0]].keys())
if column_order is None:
column_order = original_columns
else:
for column_name in column_order:
if column_name not in original_columns:
raise Exception("Received a column_order entry " + str(column_name) + " that was not located in the data columns: " + str(original_columns))
#determining the max number of letters in each column
max_letters_per_column = {"rownames":0}
for column_name in column_order:
max_letters_per_column[column_name] = 0
for row in data:
for column_name in column_order:
if column_name == "rownames":
entry = str(row)
else:
entry = data[row][column_name]
if isinstance(entry,int) or isinstance(entry,int) or isinstance(entry,float):
entry_length = 13
else:
entry_length = len(str(entry))
max_letters_per_column[column_name] = max(max_letters_per_column[column_name],entry_length)
for column_name in column_order:
max_letters_per_column[column_name] = max(max_letters_per_column[column_name],len(column_name))
for max_letters_per_column_entry in max_letters_per_column:
max_letters_per_column[max_letters_per_column_entry] = max_letters_per_column[max_letters_per_column_entry] + 2
#sorting the rows according to sort_by
if sort_by not in column_order:
print(column_order)
raise Exception("The sort_by variable was not found in the column_order. Remember the rownames must also be present if needed")
if sort_by == "rownames":
sorting_list = list(data.keys())
sorting_list.sort()
else:
temp_dict = {}
#from http://mail.python.org/pipermail/python-list/2002-May/146190.html - thanks xtian
for row in data:
temp_dict[row] = data[row][sort_by]
def sorter(x, y):
return cmp(x[1],y[1])
i = list(temp_dict.items())
i.sort(sorter)
sorting_list = []
for i_entry in i:
sorting_list.append(i_entry[0])
if reverse_sort:
sorting_list.reverse()
for rowname in sorting_list:
rowstring = ""
for column_entry in column_order:
if column_entry == "rownames":
data_point_here = rowname
else:
data_point_here = data[rowname][column_entry]
if isinstance(data_point_here,int) or isinstance(data_point_here,int) or isinstance(data_point_here,float):
if isinstance(data_point_here,float):
if abs(data_point_here) > 1000:
data_point_here = int(data_point_here)
else:
data_point_here = "%.4g" % data_point_here
if isinstance(data_point_here,int) or isinstance(data_point_here,int):
if abs(data_point_here) > 1000*1000*1000*1000*1000*3:
data_point_here = "%.4g" % data_point_here
elif abs(data_point_here) > 1000*1000*1000*1000*3:
data_point_here = str(int(data_point_here / (1000*1000*1000*1000) )) + " trillion"
elif abs(data_point_here) > 1000*1000*1000*3:
data_point_here = str(int(data_point_here / (1000*1000*1000) )) + " billion"
elif abs(data_point_here) > 1000*1000*3:
data_point_here = str(int(data_point_here / (1000*1000) )) + " million"
elif abs(data_point_here) > 1000*3:
data_point_here = str(int(data_point_here / 1000)) + " thousand"
else:
data_point_here = str(data_point_here)
#data_point_here = "%.3g" % data_point_here
else:
data_point_here = str(data_point_here)
seperator = " "
seperator = seperator[0:(max_letters_per_column[column_entry] - len(data_point_here))]
rowstring = rowstring + data_point_here + seperator
collection.append(rowstring)
#creating title
#a dictionary with key "text", containing a string with the text to write, and key "entry_span" containing another
#dictionary with the column names as values and their length in pixels as keys
self.title = {}
self.title["text"] = ""
self.title["entry_span"] = {}
entry_end = 5 #FIXME?
for column_entry in column_order:
if column_entry == "rownames":
column_title = ""
else:
column_title = column_entry
seperator = " "
seperator = seperator[0:(max_letters_per_column[column_entry] - len(column_title))]
entry_start = entry_end
entry_end = global_variables.courier_font.size(column_title + seperator)[0] + entry_start
self.title["entry_span"][(entry_start,entry_end)] = column_entry
#
self.title["text"] = self.title["text"] + column_title + seperator
self.data = collection
else:
print(data)
raise Exception("The data passed to the fast_list was not recognised")
| true |
31a4e27108014f20e492970fb7332385acc11467 | Python | franchb/Pynams | /pynams/wholeblock.py | UTF-8 | 44,347 | 2.765625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 11:42:04 2015
@author: Ferriss
For applying the whole-block method of Ferriss et al. 2015 American
Mineralogist to obtain FTIR concentration profiles, diffusivities, and
internal concentration distributions following diffusion experiments.
Also includes 1D and 3D slice diffusion calculations for a rectangular
parallelepiped
MIT license
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import uncertainties
from uncertainties import ufloat
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
import lmfit
# - plotting profiles in three panels
# - Generating whole-block area and water profiles
# - Diffusion in 1D, 3D, and 3D-WB
# - Forward model curve-fitting to diffusion profiles
# - Write objective functions that work with lmfit
# - Call those objective functions with, e.g., getD_1D()
#%% 3D Plot setup
def plot_3panels_outline(style=None, top=1.2):
"""Outline setup for 3 subplots for 3D profiles"""
if style is None:
style = {'color' : 'lightgreen', 'linewidth' : 4}
fig, axis3 = plt.subplots(nrows=1, ncols=3)
for k in range(3):
axis3[k].set_ylim(0, top)
axis3[k].grid()
axis3[0].set_ylabel('C/C$_0$')
axis3[1].set_xlabel('position along slice ($\mu$m)')
plt.setp(axis3[1].get_yticklabels(), visible=False)
plt.setp(axis3[2].get_yticklabels(), visible=False)
plt.tight_layout
fig.autofmt_xdate()
return fig, axis3
def plot_3panels(positions_microns, area_profiles, lengths,
style=None, top=1.2, figaxis3=None, show_line_at_1=True,
centered=True):
"""Make 3 subplots for 3D and 3DWB profiles. The position and area profiles
are passed in lists of three lists for a, b, and c.
Positions are assumed to start at 0 and then are centered.
"""
if figaxis3 is None:
fig, axis3 = plot_3panels_outline(style, top)
else:
axis3 = figaxis3
for k in range(3):
x = positions_microns[k]
y = area_profiles[k]
if len(x) != len(y):
print 'Problem in plot_3panels'
print 'len(x):', len(x)
print 'len(y):', len(y)
a = lengths[k] / 2.
axis3[k].set_xlim(-a, a)
if show_line_at_1 is True:
axis3[k].plot([-a, a], [1., 1.], '-k')
if style is None:
if len(x) > 45:
axis3[k].plot(x-a, y)
else:
axis3[k].plot(x-a, y, 'o')
else:
axis3[k].plot(x-a, y, **style)
if figaxis3 is None:
return fig, axis3
#%% Generate 3D whole-block area and water profiles
def make_3DWB_area_profile(final_profile,
initial_profile=None,
initial_area_list=None,
initial_area_positions_microns=None,
show_plot=True, top=1.2, fig_ax=None,
peakwn=None):
"""Take final 3D-wholeblock FTIR profile and returns
a profile of the ratio of the two (A/Ao). A/A0 is also saved in the
profile attribute wb_areas.
Requires information about initial state, so either an initial
profile (best) as the profile's initial_profile attribute,
or else an initial area list passed in with its position.
Defaults to making a plot with max y-value 'top'.
Note initial_area_positions_microns is assumed to start at 0 and then
gets shifted for the fit.
"""
fin = final_profile
leng = fin.set_len()
# If whole-block areas are already made, use them.
# Otherwise make them.
if (fin.wb_areas is not None) and (len(fin.wb_areas) > 0):
wb_areas = fin.wb_areas
else:
print fin.wb_areas
# initial checks
if len(fin.positions_microns) == 0:
print 'Need position information'
return False
if fin.len_microns is None:
check = fin.set_len()
if check is False:
print 'Need more info to set profile length'
return False
if fin.len_microns is None:
fin.set_len()
if len(fin.areas_list) == 0:
print 'making area list for profile'
fin.make_area_list(show_plot=False)
# What to normalize to? Priority given to self.wb_initial_profile, then
# initial_profile passed in here, then initial area list passed in here.
if fin.initial_profile is not None:
init = fin.initial_profile
if init.len_microns != fin.len_microns:
print 'initial and final lengths must be the same!'
return False
# Make sure area lists are populated
for profile in [init, fin]:
if len(profile.areas_list) == 0:
print profile.profile_name
print 'making area list for profile'
profile.make_area_list(show_plot=False)
A0 = init.areas_list
positions0 = init.positions_microns
elif initial_profile is not None:
init = initial_profile
if isinstance(init, Profile) is False:
print 'initial_profile argument must be a pynams Profile.'
print 'Consider using initial_area_list and positions instead'
return False
# Make sure area lists are populated
for profile in [init, fin]:
if len(profile.areas_list) == 0:
print 'making area list for profile'
profile.make_area_list(show_plot=False)
A0 = init.areas_list
positions0 = init.positions_microns
elif initial_area_list is not None:
if initial_area_positions_microns is None:
print 'Need initial_area_positions_microns for initial_area_list'
return False
A0 = initial_area_list
positions0 = initial_area_positions_microns
if len(fin.areas_list) == 0:
print 'making area list for final profile'
fin.make_area_list(show_plot=False)
else:
print 'Need some information about initial state'
return False
# More initial checks
if len(fin.areas_list) != len(fin.positions_microns):
print 'area and position lists do not match'
print 'length areas_list:', len(fin.areas_list)
print 'length positions list:', len(fin.positions_microns)
return False
if len(A0) < 1:
print 'Nothing in initial area list'
return False
if len(positions0) < 1:
print 'Nothing in initial positions list'
return False
if len(A0) == 1:
print 'Using single point to generate initial line'
A0.extend([A0[0], A0[0]])
positions0.extend([0, fin.len_microns])
# Use best-fit line through initial values to normalize final data
p = np.polyfit(positions0-(leng/2.), A0, 1)
normalizeto = np.polyval(p, fin.areas_list)
wb_areas = fin.areas_list / normalizeto
# Save whole-block areas as part of profile
fin.wb_areas = wb_areas
if show_plot is True:
if fig_ax is None:
f, ax = final_profile.plot_area_profile_outline()
else:
ax = fig_ax
ax.set_ylim(0, top)
ylabelstring = 'Final area / Initial area'
if peakwn is not None:
print 'NOT READY FOR PEAKFITTING YET'
# extrabit = '\n for peak at ' + str(peakwn) + ' cm$^{-1}$'
# ylabelstring = ylabelstring + extrabit
ax.set_ylabel(ylabelstring)
style = fin.choose_marker_style()
ax.plot([-leng/2.0, leng/2.0], [1, 1], **style_1)
ax.plot(fin.positions_microns - (leng/2.0), wb_areas, **style)
if fig_ax is None:
return wb_areas, f, ax
else:
return wb_areas
else:
return wb_areas
def make_3DWB_water_profile(final_profile, water_ppmH2O_initial=None,
initial_profile=None,
initial_area_list=None,
initial_area_positions_microns=None,
show_plot=True, top=1.2, fig_ax=None):
"""Take a profile and initial water content.
Returns the whole-block water concentration profile based on
the profile's attribute wb_areas. If wb_areas have not been made,
some initial profile information and various options are passed
to make_3DWB_area_profile().
Default makes a plot showing A/Ao and water on parasite y-axis
"""
fin = final_profile
init = initial_profile
# Set initial water
if water_ppmH2O_initial is not None:
w0 = water_ppmH2O_initial
else:
if fin.sample is not None:
if fin.sample.initial_water is not None:
w0 = fin.sample.initial_water
elif init is not None:
if init.sample is not None:
if init.sample.initial_water is not None:
w0 = init.sample.initial_water
else:
print 'Need initial water content.'
return False
# Set whole-block areas
if (fin.wb_areas is not None) and (len(fin.wb_areas) > 0):
wb_areas = fin.wb_areas
else:
wb_areas = make_3DWB_area_profile(fin, initial_profile,
initial_area_list,
initial_area_positions_microns)
water = wb_areas * w0
if show_plot is True:
# Use a parasite y-axis to show water content
fig = plt.figure()
ax_areas = SubplotHost(fig, 1,1,1)
fig.add_subplot(ax_areas)
area_tick_marks = np.arange(0, 100, 0.2)
ax_areas.set_yticks(area_tick_marks)
ax_water = ax_areas.twin()
ax_water.set_yticks(area_tick_marks)
if isinstance(w0, uncertainties.Variable):
ax_water.set_yticklabels(area_tick_marks*w0.n)
else:
ax_water.set_yticklabels(area_tick_marks*w0)
ax_areas.axis["bottom"].set_label('Position ($\mu$m)')
ax_areas.axis["left"].set_label('Final area / Initial area')
ax_water.axis["right"].set_label('ppm H$_2$O')
ax_water.axis["top"].major_ticklabels.set_visible(False)
ax_water.axis["right"].major_ticklabels.set_visible(True)
ax_areas.grid()
ax_areas.set_ylim(0, 1.2)
if fin.len_microns is not None:
leng = fin.len_microns
else:
leng = fin.set_len()
ax_areas.set_xlim(-leng/2.0, leng/2.0)
style = fin.choose_marker_style()
ax_areas.plot([-leng/2.0, leng/2.0], [1, 1], **style_1)
ax_areas.plot(fin.positions_microns-leng/2.0, wb_areas, **style)
return water, fig, ax_areas
else:
return water
#%%
#
#
# Writing diffusion equations as suitable ojective functions that can be
# called by lmfit.minimize.
#
#
def diffusion_1D(params, data_x_microns=None, data_y_unit_areas=None,
erf_or_sum='erf', show_plot=True, fig_axis=None,
style=None, need_to_center_x_data=True,
infinity=100, points=50):
"""Function set up to follow lmfit fitting requirements.
Requires input as lmfit parameters value dictionary
passing in key information as 'length_microns',
'time_seconds', 'log10D_m2s', and 'initial_unit_value'.
Here is an example of the setup:
params = lmfit.Parameters()
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('length_microns', 1000, False, 0.0, None, None),
('time_seconds', 3600*10., False, 0.0, None, None),
('log10D_m2s', -12., True, None, None, None),
('initial_unit_value', 1., False, 0.0, 1.0, None))
Fitting is a profile function fitDiffusivity().
For fitting, include data_x_microns and data_y_unit_areas.
If need_to_center_x_data is True (default), x data starts at 0 and
will get moved. Also, lmfit screws up if you try setting a minimum
on log10D_m2s.
Return values
If data are None (default), returns 1D unit diffusion profile
x_microns and y as vectors of length points (default 50).
Optional keywords:
- erf_or_sum: whether to use python's error functions (default)
or infinite sums
- show_plot: whether to plot results (default True, so plot)
- fig_ax: which figure axis to plot onto (default None makes new fig)
- style: curve plotting style dictionary
# - whether diffusion is in or out of sample (default out),
# - equilibrium concentration (default 0 for diffusion out; 1 for in),
- points sets how many points to calculate in profile. Default is 50.
- what 'infinity' is if using infinite sum approximation
"""
# extract important values from parameter dictionary passed in
p = params.valuesdict()
L_meters = p['length_microns'] / 1e6
t = p['time_seconds']
D = 10.**p['log10D_m2s']
# I will make this keyword optional at some point
initial_value = p['initial_unit_value']
a_meters = L_meters / 2.
if t < 0:
print 'no negative time'
return
# Fitting to data or not? Default is not
fitting = False
if (data_x_microns is not None) and (data_y_unit_areas is not None):
if len(data_x_microns) == len(data_y_unit_areas):
fitting = True
else:
print 'x and y data must be the same length'
print 'x', len(data_x_microns)
print 'y', len(data_y_unit_areas)
# x is in meters and assumed centered around 0
if fitting is True:
# Change x to meters and center it
x = np.array(data_x_microns) / 1e6
if need_to_center_x_data is True:
x = x - a_meters
else:
x = np.linspace(-a_meters, a_meters, points)
if erf_or_sum == 'infsum':
xsum = np.zeros_like(x)
for n in range(infinity):
xadd = ((((-1.)**n) / ((2.*n)+1.)) +
(np.exp((-D * (((2.*n)+1.)**2.) * (np.pi**2.) * t) /
(L_meters**2.))) +
(np.cos(((2.*n)+1.) * np.pi * x / L_meters)))
xsum = xsum + xadd
model = xsum * 4. / np.pi
elif erf_or_sum == 'erf':
sqrtDt = (D*t)**0.5
model = ((scipy.special.erf((a_meters+x)/(2*sqrtDt))) +
(scipy.special.erf((a_meters-x)/(2*sqrtDt))) - 1)
else:
print ('erf_or_sum must be set to either "erf" for python built-in ' +
'error function approximation (defaul) or "sum" for infinite ' +
'sum approximation with infinity=whatever, defaulting to ' +
str(infinity))
return False
# Revisit for in_or_out
model = model * initial_value
x_microns = x * 1e6
if show_plot is True:
a_microns = a_meters * 1e6
if fig_axis is None:
fig, fig_axis = plt.subplots()
fig_axis.grid()
fig_axis.set_ylim(0, 1.2)
fig_axis.set_xlim(-a_microns, a_microns)
fig_axis.set_ylabel('C/C$_0$')
fig_axis.set_xlabel('position ($\mu$m)')
if style is None:
if fitting is True:
style = {'linestyle' : 'none', 'marker' : 'o'}
else:
style = {'color' : 'lightgreen', 'linewidth' : 4}
fig_axis.plot([-a_microns, a_microns], [initial_value, initial_value],
'-k')
fig_axis.plot(x_microns, model, **style)
# If not including data, just return the model values
# With data, return the residual for use in fitting.
if fitting is False:
return x_microns, model
return model-data_y_unit_areas
def diffusion_3D(params, data_x_microns=None, data_y_unit_areas=None,
erf_or_sum='erf', show_plot=True, fig_ax=None,
style=None, need_to_center_x_data=True,
infinity=100, points=50, show_1Dplots=False):
""" Diffusion in 3 dimensions without path integration.
lmfit parameter list input must include:
length_microns_a, length_microns_b, length_microns_c,
time_seconds, initial unit value, and
diffusivities in log10 m2/s logDx, logDy, logDz
Example parameter setup for input here:
params = lmfit.Parameters()
# (Name, Value, Vary, Min, Max, Expr)
params.add('microns_twoa', L3[0], False, 0.0, None, None)
params.add('microns_twob', L3[1], False, 0.0, None, None)
params.add('microns_twoc', L3[2], False, 0.0, None, None)
params.add('initial_unit_value_a', 1., False, 0.0, None, None)
params.add('initial_unit_value_b', 1., True, 0.0, None, None)
params.add('initial_unit_value_c', 1., False, 0.0, None, None)
params.add('log10Dx', D3[0], True, 0.0, None, None)
params.add('log10Dy', D3[1], True, 0.0, None, None)
params.add('log10Dz', D3[2], False, 0.0, None, None)
params.add('time_seconds', t, False, 0.0, None, None)
"""
# Fitting to data or not? Default is not
# Add appropriate x and y data to fit
fitting = False
if (data_x_microns is not None) and (data_y_unit_areas is not None):
x_data = np.array(data_x_microns)
y_data = np.array(data_y_unit_areas)
if np.shape(x_data) == np.shape(y_data):
fitting = True
print 'fitting to data'
else:
print 'x and y data must be the same shape'
print 'x', np.shape(x_data)
print 'y', np.shape(y_data)
p = params.valuesdict()
L3_microns = np.array([p['microns_twoa'], p['microns_twob'],
p['microns_twoc']])
t = p['time_seconds']
init = [p['initial_unit_value_a'],
p['initial_unit_value_b'],
p['initial_unit_value_c']]
vary_init = [params['initial_unit_value_a'].vary,
params['initial_unit_value_b'].vary,
params['initial_unit_value_c'].vary]
log10D3 = [p['log10Dx'], p['log10Dy'], p['log10Dz']]
vary_D = [params['log10Dx'].vary,
params['log10Dy'].vary,
params['log10Dz'].vary]
# First create 3 1D profiles, 1 in each direction
xprofiles = []
yprofiles = []
kwdict = {'show_plot' : show_1Dplots, 'points' : points}
for k in range(3):
p1D = lmfit.Parameters()
p1D.add('length_microns', L3_microns[k], False)
p1D.add('time_seconds', t, params['time_seconds'].vary)
p1D.add('log10D_m2s', log10D3[k], vary_D[k])
p1D.add('initial_unit_value', init[k], vary_init[k])
x, y = diffusion_1D(p1D, **kwdict)
xprofiles.append(x)
yprofiles.append(y)
# Then multiply them together to get a 3D matrix
v = np.ones((points, points, points))
for d in range(0, points):
for e in range(0, points):
for f in range(0, points):
v[d][e][f] = yprofiles[0][d]*yprofiles[1][e]*yprofiles[2][f]
mid = points/2
aslice = v[:, mid][:, mid]
bslice = v[mid][:, mid]
cslice = v[mid][mid]
sliceprofiles = [aslice, bslice, cslice]
if show_plot is True:
if fig_ax is None:
f, fig_ax = plot_3panels_outline()
if style is None:
style = {'color' : 'lightgreen', 'linewidth' : 4}
positions = []
for k in range(3):
a = L3_microns[k] / 2.
x = np.linspace(0, a*2., points)
positions.append(x)
plot_3panels(positions, sliceprofiles, L3_microns, style=style,
figaxis3=fig_ax)
# Returning full matrix and
# slice profiles in one long list for use in fitting
sliceprofiles = [aslice, bslice, cslice]
if fitting is False:
return v, sliceprofiles
else:
### Still need to set up residuals! ###
residuals = np.zeros_like(sliceprofiles)
return residuals
def diffusion_3DWB(params, data_x_microns=None, data_y_unit_areas=None,
raypaths=None, erf_or_sum='erf', show_plot=True,
fig_ax=None,
style=None, need_to_center_x_data=True,
infinity=100, points=200, show_1Dplots=False):
""" Diffusion in 3 dimensions with path integration.
lmfit parameter list input must include:
length_microns_a, length_microns_b, length_microns_c,
time_seconds, initial unit value,
diffusivities in log10 m2/s logDx, logDy, logDz,
Also must pass in a keyword list of raypaths in an order consistent
with the length directions as, e.g., ['c', 'c', 'b']
Example parameter setup for input here:
params = lmfit.Parameters()
# (Name, Value, Vary, Min, Max, Expr)
params.add('microns_twoa', L3[0], False, 0.0, None, None)
params.add('microns_twob', L3[1], False, 0.0, None, None)
params.add('microns_twoc', L3[2], False, 0.0, None, None)
params.add('initial_unit_value_a', 1., False, 0.0, None, None)
params.add('initial_unit_value_b', 1., True, 0.0, None, None)
params.add('initial_unit_value_c', 1., False, 0.0, None, None)
params.add('log10Dx', D3[0], True, 0.0, None, None)
params.add('log10Dy', D3[1], True, 0.0, None, None)
params.add('log10Dz', D3[2], False, 0.0, None, None)
params.add('time_seconds', t, False, 0.0, None, None)
"""
if raypaths is None:
print 'raypaths must be in the form of a list of three abc directions'
return
# v is the model 3D array of internal concentrations
### Need to add in all the keywords ###
v, sliceprofiles = diffusion_3D(params, show_plot=False, points=points)
# Fitting to data or not? Default is not
# Add appropriate x and y data to fit
fitting = False
if (data_x_microns is not None) and (data_y_unit_areas is not None):
x_array = np.array(data_x_microns)
y_array = np.array(data_y_unit_areas)
if np.shape(x_array) == np.shape(y_array):
print 'fitting to data'
fitting = True
else:
print 'x and y data must be the same shape'
print 'x', np.shape(x_array)
print 'y', np.shape(y_array)
# Whole-block measurements can be obtained through any of the three
# planes of the whole-block, so profiles can come from one of two ray path
# directions. These are the planes.
raypathA = v.mean(axis=0)
raypathB = v.mean(axis=1)
raypathC = v.mean(axis=2)
# Specify whole-block profiles in model
mid = points/2
if raypaths[0] == 'b':
wbA = raypathB[:, mid]
elif raypaths[0] == 'c':
wbA = raypathC[:, mid]
else:
print 'raypaths[0] for profile || a must be "b" or "c"'
return
if raypaths[1] == 'a':
wbB = raypathA[:, mid]
elif raypaths[1] == 'c':
wbB = raypathC[mid]
else:
print 'raypaths[1] for profile || b must be "a" or "c"'
return
if raypaths[2] == 'a':
wbC = raypathA[mid]
elif raypaths[2] == 'b':
wbC = raypathB[mid]
else:
print 'raypaths[2] for profile || c must be "a" or "b"'
return
p = params.valuesdict()
L3 = [p['microns_twoa'], p['microns_twob'], p['microns_twoc']]
# if fitting is True:
# wb_profiles = data_y_unit_areas
# wb_positions = np.array(data_x_microns)
# else:
wb_profiles = [wbA, wbB, wbC]
wb_positions = []
for k in range(3):
a = L3[k] / 2.
x_microns = np.linspace(0., 2.*a, points)
wb_positions.append(x_microns)
if show_plot is True:
if style is None:
style = {'color' : 'lightgreen', 'linewidth' : 4}
if fig_ax is None:
f, fig_ax = plot_3panels(wb_positions, wb_profiles, L3, style)
else:
plot_3panels(wb_positions, wb_profiles, L3, style,
figaxis3=fig_ax)
if fitting is False:
return wb_positions, wb_profiles
if fitting is True:
# Return residuals
y_model = []
y_data = []
residuals = []
for k in range(3):
for pos in range(len(x_array[k])):
# wb_positions are centered, data are not
microns = x_array[k][pos]
# Find the index of the full model whole-block value
# closest to the data positions
idx = (np.abs(wb_positions[k]-microns).argmin())
model = wb_profiles[k][idx]
data = y_array[k][pos]
res = model - data
y_model.append(model)
y_data.append(data)
residuals.append(res)
return residuals
#%% Group profiles together as whole-block unit
class WholeBlock():
profiles = []
name = None
# generated by setupWB below
directions = None
raypaths = None
initial_profiles = None
lengths = None
# optional for diffusion work and plotting
style_base = None
time_seconds = None
diffusivities_log10_m2s = None
diffusivity_errors = None
# peak fitting
peak_positions = [None, None, None]
def setupWB(self, peakfit=True, make_wb_areas=True):
"""Sets up and checks WholeBlock instance
- Check that profiles list contains a list of three (3) profiles
- Generate list of initial profiles
- Generate list of profile directions
- Verify three profile directions are orthogonal (['a', 'b', 'c'])
- Generate list of ray paths
- Verify three ray path directions are compatible with directions list
"""
if len(self.profiles) != 3:
print 'For now, only a list of 3 profiles is allowed'
return
d = []
r = []
ip = []
L = []
for prof in self.profiles:
if isinstance(prof, Profile) is False:
print 'Only profiles objects allowed in profile list!'
return
d.append(prof.direction)
r.append(prof.raypath)
ip.append(prof.initial_profile)
L.append(prof.set_len())
if make_wb_areas is True:
prof.make_wholeblock(peakfit=peakfit)
self.directions = d
self.raypaths = r
self.initial_profiles = ip
self.lengths = L
if peakfit is True:
self.get_peakfit()
return
def plot_wb_data(self, peak_idx=None, peakwn=None, fig_ax3=None, top=1.2):
"""Plot whole-block data on three panels"""
if ((self.directions is None) or (self.raypaths is None) or
(self.initial_profiles is None) or (self.lengths is None)):
self.setupWB(make_wb_areas=False, peakfit=False)
# concatenate positions and areas across three profiles to
# send in to the plotting function
positions = []
areas = []
for prof in self.profiles:
positions.append(prof.positions_microns)
# Bulk hydrogen
if peak_idx is None and peakwn is None:
if prof.wb_areas is None:
make_3DWB_area_profile(prof, show_plot=False)
areas.append(prof.wb_areas)
# Peak-specific
else:
peak_wb_areas, peakwn = prof.get_peak_wb_areas(peak_idx,
peakwn)
areas.append(peak_wb_areas)
# Sent positions and areas to plotting command above
if fig_ax3 is not None:
plot_3panels(positions, areas, self.lengths, figaxis3=fig_ax3,
style=self.style_base, top=top)
else:
f, fig_ax3 = plot_3panels(positions, areas, self.lengths,
style=self.style_base, top=top)
# Change title if peak-specific rather than bulk
if peakwn is not None:
tit = self.name + '\nPeak at ' + str(peakwn) + ' /cm wavenumber'
else:
tit = self.name + '\nBulk hydrogen'
fig_ax3[1].set_title(tit)
def show_spectra_names(self, show_initials=True):
"""Print out fnames of all spectra associated with the whole-block
instance."""
if show_initials is True:
if self.initial_profiles is None:
self.setupWB()
if self.initial_profiles is not None:
print '--Initial profiles--'
for prof in self.initial_profiles:
print prof.profile_name
spec_list = []
for spectrum in prof.spectra_list:
spec_list.append(spectrum.fname)
print spec_list
print ' '
else:
print 'No initial profiles given'
if self.profiles is not None:
print '--Final profiles--'
for prof in self.profiles:
print prof.profile_name
spec_list = []
for spectrum in prof.spectra_list:
spec_list.append(spectrum.fname)
print spec_list
print ' '
def make_profile_list(self, initial_too=True):
"""Return False or a list of profiles"""
if initial_too is True:
if self.initial_profiles is None:
self.setupWB()
if self.initial_profiles is None:
print 'No initial profiles'
profile_list = self.profiles
else:
profile_list = self.initial_profiles + self.profiles
else:
profile_list = self.profiles
return profile_list
def make_baselines(self, initial_too=True, line_order=1, shiftline=None,
show_fit_values=False, show_plot=False):
"""Make spectra baselines for all spectra in profiles attribute."""
profile_list = self.make_profile_list(initial_too)
for prof in profile_list:
for spectrum in prof.spectra_list:
spectrum.make_baseline(line_order, shiftline, show_fit_values,
show_plot)
def show_averages(self, profile_idx_list=range(3)):
"""Plot average spectra for profiles."""
for k in profile_idx_list:
pr = self.profiles[k]
pr.show_averages()
def save_baselines(self, initial_too=True):
"""Make and save spectra baselines for all spectra."""
profile_list = self.make_profile_list(initial_too)
for prof in profile_list:
for spectrum in prof.spectra_list:
spectrum.save_baseline()
def matlab_spectra_list(self, initial_too=True):
"""Print out a list of spectra names in a matlab-friendly way"""
if initial_too is True:
if self.initial_profiles is None:
self.setupWB()
string = "{"
for prof in self.initial_profiles:
for spec in prof.spectra_list:
stringname = spec.fname
string = string + "'" + stringname + "' "
string = string + "};"
print string, '\n'
string = "["
for prof in self.profiles:
for spec in prof.spectra_list:
stringname = spec.fname
string = string + "'" + stringname + "' "
string = string + "]"
print string
def get_peakfit(self):
"""get peaks fit in MATLAB for all spectra."""
for prof in self.profiles:
prof.get_peakfit()
if self.initial_profiles is None:
self.setupWB()
# Check the peak positions are all consistent.
# Variations are allowed for different directions
for k in range(3):
prof = self.profiles[k]
iprof = self.initial_profiles[k]
if self.peak_positions[k] is None:
self.peak_positions[k] = prof.spectra_list[0].peakpos
for spectrum in prof.spectra_list + iprof.spectra_list:
if spectrum.peakpos.all() != self.peak_positions[k].all():
print 'inconsistent:'
print spectrum.fname, spectrum.peakpos
def plot_areas(self, profile_index=None, peak_idx=None, peakwn=None,
show_initials=False, show_finals=True):
"""Plots profiles on one figure.
Set initial_instead_of_final to True to see initial areas.
Need to add legend and checks is wavenumber not in list."""
# Which profiles to plot
if show_initials is True:
if self.initial_profiles is None:
self.setupWB()
if self.initial_profiles is None:
print 'Need initial profile'
return
if self.initial_profiles is None:
self.setupWB(False, False)
# get wavenumber if only peak_idx is givin
if peak_idx is not None:
if profile_index is not None:
idx = profile_index
else:
idx = 0
prof = self.profiles[idx]
if prof.pos is None:
prof.get_peakfit()
if peak_idx is None:
peak_idx = np.where(prof.peakpos==peakwn)[0][0]
f, ax = self.profiles[0].plot_area_profile_outline(peakwn=peakwn)
if profile_index is None:
if show_finals is True:
ai = self.profiles[0]
bi = self.profiles[1]
ci = self.profiles[2]
ai.plot_area_profile(figaxis=ax, peakwn=peakwn,
peak_idx=peak_idx)
bi.plot_area_profile(figaxis=ax, peakwn=peakwn,
peak_idx=peak_idx)
ci.plot_area_profile(figaxis=ax, peakwn=peakwn,
peak_idx=peak_idx)
if show_initials is True:
ai = self.initial_profiles[0]
bi = self.initial_profiles[1]
ci = self.initial_profiles[2]
ai.plot_area_profile(figaxis=ax, peakwn=peakwn,
peak_idx=peak_idx)
bi.plot_area_profile(figaxis=ax, peakwn=peakwn,
peak_idx=peak_idx)
ci.plot_area_profile(figaxis=ax, peakwn=peakwn,
peak_idx=peak_idx)
else:
if show_finals is True:
self.profiles[profile_index].plot_area_profile(
figaxis=ax, peakwn=peakwn, peak_idx=peak_idx)
if show_initials is True:
self.initial_profiles[profile_index].plot_area_profile(
figaxis=ax, peakwn=peakwn, peak_idx=peak_idx)
return f, ax
def show_diffusion(self, peak_idx=None, peakwn=None,
time_seconds=None,
list_of_log10D_m2s=[-13., -13., -13.],
initial_value=None, in_or_out='out', erf_or_sum='erf',
equilibrium_value=None, show_plot=True,
show_slice=False, style=None, wb_or_3Dnpi='wb',
fig_ax=None, points=50,
top=1.2, numformat='{:.1f}'):
"""Applies 3-dimensionsal diffusion equations to instance shape.
Requires lengths, time in seconds, and three diffusivities either
explicitly passed here or as attributes of the WholeBlock object.
Assuming whole-block diffusion (wb_or_3Dnpi='wb') but could also
do 3D non-path-integrated ('npi')
"""
if time_seconds is None:
if self.time_seconds is not None:
time_seconds = self.time_seconds
else:
print 'Need time information'
return
if self.diffusivities_log10_m2s is not None:
D3 = self.diffusivities_log10_m2s
else:
D3 = list_of_log10D_m2s
if (wb_or_3Dnpi != 'npi') and (wb_or_3Dnpi != 'wb'):
print 'wb_or_3Dnpi only takes "wb" or "npi"'
return
if self.directions is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if self.initial_profiles is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if self.lengths is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if self.raypaths is None:
self.setupWB(peakfit=False, make_wb_areas=False)
# end initial checks and setup
# Set up parameters to pass into equations
L3 = self.lengths
params = lmfit.Parameters()
# (Name, Value, Vary, Min, Max, Expr)
params.add('microns_twoa', L3[0], False, None, None, None)
params.add('microns_twob', L3[1], False, None, None, None)
params.add('microns_twoc', L3[2], False, None, None, None)
params.add('initial_unit_value_a', 1., False, None, None, None)
params.add('initial_unit_value_b', 1., False, None, None, None)
params.add('initial_unit_value_c', 1., False, None, None, None)
params.add('log10Dx', D3[0], True, None, None, None)
params.add('log10Dy', D3[1], True, None, None, None)
params.add('log10Dz', D3[2], True, None, None, None)
params.add('time_seconds', time_seconds, False, None, None, None)
# Set up the plot
if show_plot is True:
if fig_ax is None:
fig, fig_ax = plot_3panels_outline(top=top)
kws = {'show_plot' : show_plot, 'fig_ax' : fig_ax,
'style' : style, 'points' : points}
# send to diffusion equation
# diffusion line plots get made here
if wb_or_3Dnpi == 'npi':
diffusion_3D(params, **kws)
else:
R3 = self.raypaths
kws['raypaths'] = R3
diffusion_3DWB(params, **kws)
# Add data on top of diffusion line in plot
if show_plot is True:
self.plot_wb_data(fig_ax3=fig_ax, peak_idx=peak_idx,
peakwn=peakwn)
for k in range(3):
dlabel = str(numformat.format(D3[k])) + ' m$^2$s$^{-1}$'
fig_ax[k].text(0, top-top*0.12, dlabel,
horizontalalignment='center',
backgroundcolor='w')
return params
def fitD(self, peak_idx=None, peakwn=None,
initial_unit_values=[1., 1., 1.],
vary_initials=['False', 'False', 'False'],
guesses=[-13., -13., -13.], show_plot=True,
vary_D=['True', 'True', 'True'],
approx_with_1D=False, wb_or_3Dnpi='wb', polyorder=1,
show_initial_guess=True, style_initial=None,
style_final={'color' : 'red'}, points=100):
"""Forward modeling to determine diffusivities in three dimensions
from whole-block data.
"""
if wb_or_3Dnpi != 'wb' and wb_or_3Dnpi != 'npi':
print 'wb_or_3Dnpi can only be wb or npi'
return
if wb_or_3Dnpi == 'npi':
print 'NPI not supported yet'
return
# Plot setup and initial guess lines
if show_plot is True:
fig, ax3 = plot_3panels_outline()
else:
ax3 = None
# Set up parameters and keywords to go into diffusion equations
# This will plot the initial guess diffusivities if show_initial_guess
# is True
dict_plotting = {'show_plot' : show_initial_guess, 'fig_ax' : ax3,
'wb_or_3Dnpi' : wb_or_3Dnpi, 'style' : style_initial,
'points' : points, 'peakwn' : peakwn, 'peak_idx' : peak_idx}
dict_fitting = {'show_plot' : False, 'points' : points}
params = self.show_diffusion(**dict_plotting)
# x and y are the data that will be fit to
x = []
y = []
for prof in self.profiles:
# Get positions
if prof.positions_microns is None:
print ''
print prof.profile_name
print 'Need to set profile positions'
return
x.append(prof.positions_microns)
# Get whole-block areas
if peakwn is None and peak_idx is None:
# bulk hydrogen is the default
if prof.wb_areas is None:
prof.make_3DWB_water_list(polyorder)
y.append(prof.wb_areas)
else:
# peak-specific
wb_areas, peakwn = prof.get_peak_wb_areas(peak_idx, peakwn)
y.append(wb_areas)
# Fitting takes place here
if wb_or_3Dnpi == 'wb':
dict_fitting['raypaths'] = self.raypaths
lmfit.minimize(diffusion_3DWB, params, args=(x, y),
kws=dict_fitting)
elif wb_or_3Dnpi == 'npi':
diffusion_3D(params, x, y, **dict_fitting)
lmfit.minimize(diffusion_3D, params, args=(x, y), kws=dict_fitting)
# results
best_Dx = ufloat(params['log10Dx'].value,
params['log10Dx'].stderr)
best_Dy = ufloat(params['log10Dy'].value,
params['log10Dy'].stderr)
best_Dz= ufloat(params['log10Dz'].value,
params['log10Dz'].stderr)
best_init_a = ufloat(params['initial_unit_value_a'].value,
params['initial_unit_value_a'].stderr)
best_init_b = ufloat(params['initial_unit_value_b'].value,
params['initial_unit_value_b'].stderr)
best_init_c = ufloat(params['initial_unit_value_c'].value,
params['initial_unit_value_c'].stderr)
D3 = [best_Dx.n, best_Dy.n, best_Dz.n]
D3errors = [best_Dx.s, best_Dy.s, best_Dz.s]
# and then plot after
if show_plot is True:
dict_plotting['show_plot'] = True
dict_plotting['style'] = style_final
self.show_diffusion(list_of_log10D_m2s=D3, **dict_plotting)
print '\ntime in hours:', params['time_seconds'].value / 3600.
print '\ninitial unit values:'
print best_init_a
print best_init_b
print best_init_c
print '\nbestfit log10D in m2/s:'
print best_Dx
print best_Dy
print best_Dz
return D3, D3errors
def invert(self, grid_xyz, symmetry_constraint=True,
smoothness_constraint=True, rim_constraint=True,
rim_value=None, weighting_factor_lambda=0.2,
show_residuals_plot=True):
"""Takes a list of three whole-block concentration profiles (either A/Ao
or water ok but must be consistent for all three) in three orthogonal
directions and list of three integers to indicate number of divisions
in each direction. Returns matrix of values in each grid cell.
Default plot showing residuals for how well results match the whole-block
observations."""
pass
| true |
1ffda676748388b7e112af1c8ee87661635f691e | Python | dmitarint/algo_and_structures_python | /Lesson_3/8.py | UTF-8 | 1,040 | 4.25 | 4 | [] | no_license | """
8. Матрица 5x4 заполняется вводом с клавиатуры кроме последних элементов строк.
Программа должна вычислять сумму введенных элементов каждой строки и
записывать ее в последнюю ячейку строки.
В конце следует вывести полученную матрицу.
"""
import random
#Создаем матрицу 5х4
a=[]
for i in range(4):
a.append([])
for j in range(5):
a[i].append(int())
print(a)
#Заполняем элементы матрицы с 1 по 4-й индекс
#в пятый элемент записываем сумму 4-х элементов
for i in range(4):
for j in range(5):
if j==4:
a[i][j] = sum(a[i][:j])
else:
# a[i][j]=int(input('Введите число: ')))
a[i][j]=int(random.randint(0,100))
print(a)
# for i in range(4):
# a[i][4]=sum(a[i][:4])
# print(a) | true |
12fd8ad8d45b660ceb90ef092275eb95aaa870fa | Python | MaryAhn/Codetree | /Graph/Topology_dfs.py | UTF-8 | 781 | 3.21875 | 3 | [] | no_license | n = int(input())
graph = []
for i in range(n):
graph.append(list(map(int, input().split())))
stack = []
visited = [-1] * n
def dfs(present, visited, graph, stack):
if visited[present] == -1: # 노드 방문 처리
visited[present] = 0
# 작은 번호부터 연결되어 있다면 dfs를 수행함
for next in range(n):
if visited[next] == -1 and graph[present][next] != 0:
dfs(next, visited, graph, stack)
# dfs의 종료, 즉 연결된 간선이 없는 노드에 도달했을 경우 stack에 append
stack.append(present)
return stack
for i in range(n):
# 작은 번호부터 dfs 수행
if visited[i] == -1:
stack = dfs(i, visited, graph, stack)
for i in range(n):
print(stack.pop() + 1, end=' ')
| true |
b03e1a273b70375e8ec937dd9a08ba412554e6c3 | Python | jinyoungcho/Algorithm_study | /PYTHON_CODE/12851.py3.py | UTF-8 | 600 | 2.875 | 3 | [] | no_license | from collections import deque
MAX = 100000
n,k = list(map(int,input().split()))
check = [-1] * (MAX+1)
ans = [-1] * (MAX+1)
q = deque()
q.append(n)
check[n] = 0
ans[n] = 1
while q:
s = q.popleft()
s_li = [s+1, s-1, s*2]
for ss in s_li:
if 0 <= ss <= MAX:
if check[ss] == -1: #한번도 들른적이 없다!
q.append(ss)
check[ss] = check[s]+1
ans[ss] = ans[s]
elif check[ss] == check[s]+1:
ans[ss] += ans[s]
print(check[k])
print(ans[k]) | true |
4627cd6f83b792dac2ad3af7513dcd4b98709b1c | Python | ivadimn/py-input | /algorithm/stepik/pizano.py | UTF-8 | 473 | 2.8125 | 3 | [] | no_license | import cProfile
n = 8
m = 4
def period_pisano(n, m):
p = []
p.append(0)
if m == 1:
return p
p.append(1)
if n <= 1:
return p
f0 = 0
f1 = 1
for __ in range(m * 6):
f0, f1 = f1, (f0 + f1) % m
p.append(f1 % m)
if p[len(p) - 1] == 1 \
and p[len(p) - 2] == 0:
break
return p[:-2]
pisano = cProfile.run("period_pisano(n, m)")
#print(pisano)
print(pisano[n % len(pisano)]) | true |
41ba0d723962aa9b8f14c1c9fb59eed64cae66a0 | Python | SherryZhu240/python-exercise | /py3/scrapy/scrapy_lamyoung_title_multiprocess.py | UTF-8 | 936 | 2.59375 | 3 | [
"MIT"
] | permissive |
#author lamyoung
import requests
import re
from multiprocessing.dummy import Pool
regex = r"<a href=\"(.*)\">[\s]*?<h2 class=\"post-title\">[\s]*(.*)[\s]*</h2>[\s\S]*?</a>"
def scrapy(index):
page_url = '';
if index>1:
page_url=f'page{index}/'
url=f'http://lamyoung.com/{page_url}';
print(url);
html=requests.get(url);
if html.status_code == 200:
html_bytes=html.content;
html_str=html_bytes.decode();
# print(html_str)
all_items=re.findall(regex,html_str);
# print(all_items)
write_content=''
for item in all_items:
write_content=f'{write_content}\n{item[1]}\nhttp://lamyoung.com{item[0]}\n'
return write_content
else:
return ''
pool = Pool(3);
orign_num=[x for x in range(1,10)];
result = pool.map(scrapy,orign_num);
# print(f'result : {result}')
write_content = '';
for c in result:
write_content+=c;
with open('lamyoung_title_multi_out.txt','w',encoding='utf-8') as f:
f.write(write_content)
| true |
832578c54ec32123a3b413f261d6851fe561f968 | Python | priyakg/100DayOfCodePython | /day012.py | UTF-8 | 135 | 3.875 | 4 | [] | no_license | #Methode 1
if(int(input())%2 == 0):
print("even")
else:
print("odd")
#Methode 2
print("even" if int(input())%2 is 0 else "odd")
| true |
48e6a71ae677ea1379e8b726b465f3ba044fbed5 | Python | paulghaddad/solve-it | /leetcode/125_valid_palindrome/test_valid_palindrome.py | UTF-8 | 398 | 3.078125 | 3 | [] | no_license | import pytest
from valid_palindrome import is_palindrome
def test_is_palindrome():
assert is_palindrome('') == True
assert is_palindrome('ana') == True
assert is_palindrome('anb') == False
assert is_palindrome('Ana') == True
assert is_palindrome('A man, a plan, a canal: Panama') == True
assert is_palindrome('race a car') == False
assert is_palindrome('0P') == False
| true |
00040e0881598bd43601c08a604d7597e5833739 | Python | duanbibo/app-autotes-python | /util/appiumserver.py | UTF-8 | 827 | 2.5625 | 3 | [] | no_license | import os
import time
#停止和启动本地的appium服务
def stop_appium(post_num=4723):
'''关闭appium服务'''
p = os.popen(f'lsof -i tcp:{post_num}')
p0 = p.read()
if p0.strip() != '':
p1 = int(p0.split('\n')[1].split()[1]) # 获取进程号
os.popen(f'kill {p1}') # 结束进程
print("appium end")
def start_appium(post_num=4723):
'''开启appium服务'''
stop_appium(post_num) # 先判断端口是否被占用,如果被占用则关闭该端口号
# 根据系统,启动对应的服务
cmd_dict = {
'MAC': f'appium -a 127.0.0.1 -p {post_num} --log appium.log --local-timezone & '
}
os.system(cmd_dict['MAC'])
time.sleep(3) # 等待启动完成
print("appium start") | true |
add042226bdfc2b5525955f2eddc2aa350875f2f | Python | kukarzev/Reinforcement-Learning---UofA | /Bandit Task Programming/w1_exp.py | UTF-8 | 3,350 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env python
"""
Author: Adam White, Matthew Schlegel, Mohammad M. Ajallooeian
Purpose: for use of Rienforcement learning course University of Alberta Fall 2017
Last Modified by: Andrew Jacobsen, Victor Silva, Mohammad M. Ajallooeian
Last Modified on: 16/9/2017
Experiment runs 2000 runs, each 1000 steps, of an n-armed bandit problem
"""
from rl_glue import * # Required for RL-Glue
RLGlue("w1_env", "w1_agent") #setting the file names of .py of the environment and the agent
import numpy as np
import sys
def save_results(data, data_size, filename): # data: floating point, data_size: integer, filename: string
with open(filename, "w") as data_file:
for i in range(data_size):
data_file.write("{0}\n".format(data[i]))
if __name__ == "__main__":
num_runs = 2000 #number of runs
max_steps = 1000 #number of steps
# array to store the results of each step
optimal_action = np.zeros(max_steps) #array to store the sum of the boolean value (1 or 0) if the optimal action was chosen and later obtain the mean of each step
cumulative_actions_step = np.zeros(max_steps)#array to store the sum of the rewards obtained in each step and each run and later obtain the mean of these rewards per step
print "\nPrinting one dot for every run: {0} total Runs to complete".format(num_runs)
for k in range(num_runs):
RL_init() #initializing the RL-glue algorithm
RL_start() #starting RL-glue algorithm
for i in range(max_steps):
# RL_step returns (reward, state, action, is_terminal); we need only the
# action in this problem
#best_action = (RL_agent_message("best action?")) #obtain the best actions array(k=1,2,3,...,10) in that step
step = RL_step() #take an action and obtain (reward, state, action, is_terminal)
cumulative_actions_step[i] = cumulative_actions_step[i] + step[0] #sum the reward value obtained in this step to the reward-actions array
best_action = RL_env_message("optimal_action?")
# check if the best action was chosen by verifying if the best action is one of the best actions available
if best_action == step[2]:
optimal_action[i] += 1 # true
'''
check if action taken was optimal
you need to get the optimal action; see the news/notices
announcement on eClass for how to implement this
'''
# update your optimal action statistic here
RL_cleanup() #clean the algorithm to start a new run
print ".",
sys.stdout.flush()
#obtain the mean value in each position of the array that store the sum of all rewards in each step
for n in range(max_steps):
cumulative_actions_step[n] = cumulative_actions_step[n]/num_runs
#obtain the mean value in each position of the array that store boolean value for the action chosen (1 = optimal action chosen, 0 = optimal action not chosen)
for n in range(max_steps):
optimal_action[n] = optimal_action[n]/num_runs
save_results(cumulative_actions_step, max_steps, "RL_EXP_OUT.dat") #save mean rewards results into RL_EXP_OUT.dat
save_results(optimal_action, max_steps, "RL_EXP_BOOLEAN_OPTIMAL_OUT.dat") #save percentage of optimal actions obtained per step into RL_EXP_BOOLEAN_OPTIMAL_OUT.dat
print "\nDone"
| true |
bc23d290c271c1ba0b155a1f8b037a57085100b1 | Python | leeorb321/Data-Structures | /tests/bloomfilter_test.py | UTF-8 | 723 | 2.96875 | 3 | [] | no_license | import unittest
import sys
from structures.bloom import *
class TestBloomFilter(unittest.TestCase):
def test_contructor(self):
bf = BloomFilter(555)
self.assertEqual(bf._bv, 0)
self.assertEqual(bf.size, 555)
def test_add_check(self):
bf = BloomFilter()
bf.add(4)
for i in range(20):
if i == 4:
self.assertTrue(bf.check(i))
else:
self.assertFalse(bf.check(i))
bf.add('hello')
self.assertTrue(bf.check('hello'))
self.assertFalse(bf.check('goodbye'))
bf.add('192.168.1.1')
self.assertTrue(bf.check(('192.168.1.1')))
self.assertFalse(bf.check(('192.168.1.2')))
| true |
1d45f6a4067e1b3ca88fbef0d598acd2be936307 | Python | hungnphan/How-to-MPI4Py | /03-Collective-p1-bcast.py | UTF-8 | 241 | 2.546875 | 3 | [] | no_license | from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
data = np.arange(10, dtype=np.int) * 10
else:
data = None
data = comm.bcast(data, root=0)
print(f"Rank {rank} received {data}") | true |
5492ee5740b18f05c2ea7566199b7fd0a558726e | Python | ho-lol/CorpusReader | /mkdic_three.py | UTF-8 | 18,598 | 2.59375 | 3 | [] | no_license | import os
import re
from difflib import SequenceMatcher
from itertools import chain, tee, islice
from typing import Union
import nltk.tag.util
##__space_mark="@SP@"
__pre_mark = "<" ##사전만들때 꺾쇠<를 넣거나 일단은 cyk를 위해 공백넣음
__post_mark = ">" ##>
##rule1=re.compile(r'__[0-9]+')
##rule2=re.compile(r'/[A-Z+]+')
##rule3=re.compile(r'/[A-Z+]')
def remove_num(data):
return re.compile(r'__[0-9]+').sub('', data)
def remove_alpha(data):
return re.compile(r'/[A-Z+]').sub('', data)
def remove_alphaplus(data):
return re.compile(r'/[A-Z+]+').sub('', data)
def remove_tag(data):
return remove_num(data[0:data.rfind('/')])
def exist(values, data):
for i in values:
if compare(i[1:], data):
i[0] += 1
return True
return False
def compare(list1, list2):
if not len(list1) == len(list2):
return False
for i in range(len(list1)):
if not list1[i] == list2[i]:
return False
return True
def search_alpha(data):
return re.compile(r'/[A-Z+]').search(data)
def count_dict(dic, key, value):
if dic.get(key):
if not exist(dic[key], value):
dic[key].append([1] + [ele for ele in value])
else:
dic[key] = [[1] + [ele for ele in value]]
def make_resdata(mat_blocks, tag_word):
result_txt = tag_word[mat_blocks[0][0]:mat_blocks[-2][0] + mat_blocks[-2][2]]
result_list = result_txt.split('+')
tail = tag_word[mat_blocks[-2][0]:]
tail_alpha = search_alpha(tail)
if tail_alpha:
result_list[-1] = result_list[-1] + str(tail_alpha.group(0))
return [data.split('/') for data in result_list]
def make_arrays(fnr, fnt):
_raw_array = []
_tagged_array = []
for line in open(fnr, 'r', encoding="utf8").readlines():
if line and not line.strip():
continue
_raw_array.append(line.split())
for line in open(fnt, 'r', encoding="utf8").readlines():
if line and not line.strip():
continue
_tagged_array.append(line.split())
return _raw_array, _tagged_array
def include_delete(opcodes):
for code in opcodes:
if code[0] == "delete": return True
return False
def contain_equal(opcodes):
for code in opcodes:
if code[0] != "equal": return False
return True
def previous_and_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return zip(prevs, items, nexts)
def mor_replace(pyo_temp, dic_temp, postag_temp, tag_morph, fraction):
prev_raw = pyo_temp[-2]
prev_dic = dic_temp[-2]
prev_postag = postag_temp[-2]
if len(prev_raw) == 0: # insert,replace 이런경우는 없는듯.
print("인설트 리플레이스")
if prev_dic.count('++'): # +가 있을때 요건 좀 애매한데 이런경우 잇을까?
print("+가 있을때")
if not prev_dic.count('+'): ## 자유|롭게 자유|로+/vv웁+게
prev_dic = prev_dic + __pre_mark ##꺾쇠
dic_temp[-2] = prev_dic
dic_temp[-1] = __post_mark + dic_temp[-1]
else: ## 앞에 +잇을경우
if prev_dic.rfind('+') == len(prev_dic) - 1: ##제일 끝+ 붙은거를 <로 바꿈
prev_dic = prev_dic[:-1] + __pre_mark
dic_temp[-2] = prev_dic
dic_temp[-1] = __post_mark + dic_temp[-1]
else: ##아니더라도 꺽쇠 붙이기
prev_dic = prev_dic + __pre_mark
dic_temp[-2] = prev_dic
dic_temp[-1] = __post_mark + dic_temp[-1]
split_list_byp = prev_dic.split('+')
if len(split_list_byp) != 1:
if split_list_byp[-1].find(__pre_mark) != -1:
# 여기서 좀헷갈리네
# 의무+교육+화+된 or 의무+교육+화된 화<된 화|된 어떻게 생각할것인가
temp = (pyo_temp[-1], dic_temp[-1], postag_temp[-1])
pyo_temp = pyo_temp[:-2]
dic_temp = dic_temp[:-2]
postag_temp = postag_temp[:-2]
lastwd_index = 0
for word in split_list_byp:
if word.rfind(__pre_mark) == len(word) - 1:
pyo_temp.append(word[:-1])
else:
pyo_temp.append(word)
dic_temp.append(word)
for morph_tag in tag_morph:
morph, postag = nltk.str2tuple(morph_tag)
if morph.find(word) != -1:
postag_temp.append(postag)
break
if word.rfind(__pre_mark) == len(word) - 1:
if fraction[lastwd_index][1].find('+') == -1:
postag_temp.append(fraction[lastwd_index][1])
break
else:
postag_temp.append(fraction[lastwd_index][1][:-1])
break
lastwd_index += len(word)
pyo_temp.append(temp[0])
dic_temp.append(temp[1])
postag_temp.append(temp[2])
return pyo_temp, dic_temp, postag_temp
def mor_freplace(pyo_temp, dic_temp, postag_temp, tag_morph):
for i in postag_temp:
i = i.replace('/', '')
def mor_insert(pyo_temp, dic_temp, postag_temp, tag_morph, fraction):
prev_raw = pyo_temp[-2]
prev_dic = dic_temp[-2]
prev_postag = postag_temp[-2]
if prev_dic.rfind('+') == len(prev_dic) - 1: ##제일 끝+ 붙은거를 <로 바꿈
prev_dic = prev_dic[:-1] + __pre_mark
else:
prev_dic = prev_dic + __pre_mark
split_list_byp = prev_dic.split('+') # plus 가 있으면
if len(split_list_byp) != 1:
if split_list_byp[-1].find(__pre_mark) != -1:
temp = (pyo_temp[-1], dic_temp[-1], postag_temp[-1])
pyo_temp = pyo_temp[:-2]
dic_temp = dic_temp[:-2]
postag_temp = postag_temp[:-2]
lastwd_index = 0
for word in split_list_byp:
if word.rfind(__pre_mark) == len(word) - 1:
pyo_temp.append(word[:-1]) ## 표청어 사전
dic_temp.append(word + temp[1])
else:
pyo_temp.append(word)
dic_temp.append(word)
for morph_tag in tag_morph: ## 태깅한거 넣는곳
morph, postag = nltk.str2tuple(morph_tag)
if morph.find(word) != -1:
postag_temp.append(postag)
break
if word.rfind(__pre_mark) == len(word) - 1:
if fraction[lastwd_index][1].find('+') == -1:
postag_temp.append(fraction[lastwd_index][1] + '+' + temp[2])
break
else:
postag_temp.append(fraction[lastwd_index][1][:-1] + '+' + temp[2])
break
lastwd_index += len(word)
else: ##플러스가 없을때는 간단
dic_temp[-2] = prev_dic + __post_mark + dic_temp[-1]
dic_temp = dic_temp[:-1]
if postag_temp[-2].find('/') != -1:
postag_temp[-2] = postag_temp[-2][0:postag_temp[-2].find('/')] + '+' + postag_temp[-1]
postag_temp = postag_temp[:-1]
pyo_temp = pyo_temp[:-1]
return pyo_temp, dic_temp, postag_temp
def make_del_block(fraction, raw_word, merge_morph):
mat_blocks = []
index_merge = 0
for index_raw in range(len(raw_word)): ##한글자씩 한글자씩 비교해가면서 달라지는곳만 합쳐서 넣었음.
if raw_word[index_raw] == merge_morph[index_merge]:
mat_blocks.append([raw_word[index_raw], fraction[index_merge]])
index_merge += 1
else:
for nxt_raw in range(index_raw + 1, len(raw_word)):
for nxt_merge in range(index_merge + 1, len(merge_morph)):
if raw_word[nxt_raw] == merge_morph[nxt_merge]:
fraction_morph = []
fraction_merge = []
for index in range(index_merge, nxt_merge):
fraction_merge.extend(fraction[index])
if index_raw != 0:
if mat_blocks[index_raw - 1][1][0].find('+') != -1:
mat_blocks[index_raw - 1][1][0] = mat_blocks[index_raw - 1][1][0][:-1] + __pre_mark
mer_mor = __post_mark + "".join(
fraction_merge[i] for i in range(0, len(fraction_merge), 2))
else:
mat_blocks[index_raw - 1][1][0] = mat_blocks[index_raw - 1][1][0] + __pre_mark
mer_mor = __post_mark + "".join(
fraction_merge[i] for i in range(0, len(fraction_merge), 2))
else:
mer_mor = "".join(fraction_merge[i] for i in range(0, len(fraction_merge), 2))
mer_tag = "".join(fraction_merge[i] for i in range(1, len(fraction_merge), 2))
mat_blocks.append([raw_word[index_raw:nxt_raw], [mer_mor, mer_tag]])
mat_blocks.append([raw_word[nxt_raw], fraction[nxt_merge]])
index_raw = nxt_raw
index_merge = nxt_merge
break
if index_raw == len(raw_word) - 1:
break # 요기까지 프랙션 했다가 다시 붙이기
return mat_blocks
def find_mergeblock(mat_blocks):
merge_block = []
start = 0
while (start != len(mat_blocks) - 1):
j = start + 1
while (j != len(mat_blocks)):
if mat_blocks[start][1][1].replace('+', "") != mat_blocks[j][1][1].replace('+', ""):
merge_block.append([start, j])
start = j - 1
break
else:
if mat_blocks[j][1][1].find('+') != -1:
merge_block.append([start, j + 1])
start = j
break
j += 1
start = start + 1
return merge_block
def find_mergeblocklist(merge_block, mat_blocks):
block_list = []
for ele in merge_block:
if ele[1] - ele[0] > 1:
start = ele[0]
end = ele[1]
block_temp = mat_blocks[start]
for f_i in range(ele[0] + 1, ele[1]):
block_temp[0] += mat_blocks[f_i][0]
block_temp[1][0] += mat_blocks[f_i][1][0]
block_list.append([start, end, block_temp])
return block_list
def make_del_list(merge_block_list, mat_blocks):
blocks = []
if len(merge_block_list) != 0:
len_word = 0
if merge_block_list[0][0] == 0:
blocks.append(merge_block_list[0][2])
len_word = merge_block_list[0][1]
while (len_word != len(mat_blocks)):
blocks.append(mat_blocks[len_word])
for start, end, block_temp in merge_block_list:
if (len_word == start):
blocks.pop()
blocks.append(block_temp)
len_word = end - 1
break
len_word += 1
else:
blocks.append([block for block in mat_blocks])
return blocks
def del_dup(postag_temp):
for i in range(len(postag_temp)):
temp = postag_temp[i]
rep_temp = temp.replace("+/", '+')
postag_temp[i] = del_slash(rep_temp)
return postag_temp
def del_slash(postag):
if len(postag) < 5:
return postag
not_alpha = [0]
temp_pos = []
for i in range(len(postag)):
if not postag[i].isalpha():
not_alpha.append(i)
not_alpha.append(len(postag))
temp_pos.append(postag[not_alpha[0]:not_alpha[1]])
for i in zip(not_alpha[1:], not_alpha[2:]):
if i[0] == i[1]:
continue
if temp_pos[-1] == postag[i[0] + 1:i[1]]:
continue
else:
temp_pos.append(postag[i[0] + 1:i[1]])
return "+".join(temp_pos)
def make_dict(result_dic, raw_array, tagged_array):
for raw_sent, tagged_sent in zip(raw_array, tagged_array):
if not len(raw_sent) == len(tagged_sent):
continue
for raw_word, tag_word in zip(raw_sent, tagged_sent):
tag_morph = re.split("(?<=/[A-Z]{2})\+|(?<=/[A-Z]{3})\+", tag_word) # lookbehind 사용했으나 fixed되야되서 or로 처리
merge_morph = "".join([remove_tag(morph) for morph in tag_morph])
SM = SequenceMatcher(None, raw_word, merge_morph)
opcodes = SM.get_opcodes()
fraction, pyochung_list, dic_list, postag_list = [], [], [], []
for morph_tag in tag_morph:
morph, tag = nltk.str2tuple(morph_tag)
for syl in morph:
fraction.append([syl, tag])
fraction[-1][0] = fraction[-1][0] + '+'
fraction[-1][1] = fraction[-1][1] + '+' ##음절 뒤에 +붙이기
fraction[-1][0] = fraction[-1][0][0]
fraction[-1][1] = fraction[-1][1][:-1]
if contain_equal(opcodes): ##전부 같을 때
for morph in tag_morph:
pyochung, postag = nltk.str2tuple(morph)
pyochung_list.append(pyochung)
dic_list.append(pyochung)
postag_list.append(postag)
continue
elif not include_delete(opcodes): ##Delete가 경우가 달라서 Delete만 따로
pyo_temp, dic_temp, postag_temp = [], [], []
for prev, curr, nxt in previous_and_next(opcodes): # insert, replace 처리
i1, i2, j1, j2 = curr[1], curr[2], curr[3], curr[4]
pyo_temp.append(raw_word[i1:i2])
dic_temp.append("".join([w[0] for w in fraction[j1:j2]]))
postag_temp.append("/".join([w[1] for w in fraction[j1:j2]]))
if curr[0] == "replace":
if prev != None:
pyo_temp, dic_temp, postag_temp = mor_replace(pyo_temp, dic_temp, postag_temp, tag_morph,
fraction)
if nxt != None: # replace,insert 이런경우도 잇을까?
if nxt[0] == "insert":
print("리플레이스 인설트")
print(raw_word)
print(merge_morph)
print(opcodes)
else:
mor_freplace(pyo_temp, dic_temp, postag_temp, tag_morph)
elif curr[0] == "insert":
if prev == None: ##걸렸음 이어 이--05/NP이/VCP/어/EC 이게뭐옄ㅋㅋㅋ
## print(raw_word)
## print(merge_morph)
## print(tag_word)
## print(opcodes)
continue
pyo_temp, dic_temp, postag_temp = mor_insert(pyo_temp, dic_temp, postag_temp, tag_morph,
fraction)
postag_temp = del_dup(postag_temp)
pyochung_list.extend(pyo_temp)
dic_list.extend(dic_temp)
postag_list.extend(postag_temp)
else: ##대망의 딜리트...
mat_blocks = make_del_block(fraction, raw_word, merge_morph)
## 붙였는데 태깅중복되는것들 다시 합치기 딜리트를 쓰면 되게 쉬울것같았는데 지우면 바로 리스트에 인덱스가 전부 바뀌어버려서 생각보다 하드코딩함...
merge_block = find_mergeblock(mat_blocks)
merge_block_list = find_mergeblocklist(merge_block, mat_blocks)
del_result_list = make_del_list(merge_block_list, mat_blocks)
for block in del_result_list:
pyochung_list.append(block[0])
dic_list.append(block[1][0])
postag_list.extend(block[1][1])
##요정도까지 하면 전부 나오긴나오는데 + > 에대한 정의를 확실히 내리고 다시한번 봐야될듯 그리고 너무 하드코딩이라 고민해봐야됨
##사전에 넣는거는 그리어렵지 않으니 일단 월요일날 다시 가서 살펴봐야될
for pyo, di, pos in zip(pyochung_list, dic_list, postag_list):
count_dict(result_dic, str(pyo), [di, pos])
def make_df(result, fn="dictionary1.bin"):
import pickle
with open(fn, 'wb') as f:
pickle.dump(result, f)
def make_df_txt(result, fn="dictionary1.txt"):
with open(fn, 'w') as f:
for k, v in result.items():
print(k, v, file=f)
if __name__ == "__main__":
result_dic = {}
curr_path: Union[bytes, str] = os.path.abspath(__file__)
path: object = nltk.data.find('corpora\\sejong').path
assert isinstance(path, object)
os.chdir(path)
files_raw = []
files_tagged = []
for fn in os.listdir(path):
assert isinstance(fn, object)
if "sjr" in fn:
files_raw.append(fn)
elif "sjt" in fn:
files_tagged.append(fn)
# 테스트용 나중에 삭제바람
# files_raw = [files_raw[1]]
# files_tagged = [files_tagged[1]]
raw_array = []
tagged_array = []
for i in range(len(files_raw)):
temp_raw_array, temp_tagged_array = make_arrays(fnr=files_raw[i], fnt=files_tagged[i])
raw_array.extend(temp_raw_array)
tagged_array.extend(temp_tagged_array)
print("리스트만들기 완료")
make_dict(result_dic, raw_array, tagged_array)
print("사전만들기완료")
os.chdir(curr_path)
make_df_txt(result_dic)
# 딕셔너리가 본디렉토리가 아니라 세종코퍼스아래디렉토리로 가는 문제 및 exception processing필
| true |
87f97e315bf9b1695c3aaa8e6efbfb341067bfa1 | Python | lafengnan/algorithm | /bin/test.py | UTF-8 | 6,341 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
import sys
import os
import random
import traceback
from optparse import OptionParser
os.path.sys.path.append(os.path.dirname
(os.path.dirname(os.path.abspath(__file__))))
from algorithms import sort, polynomial
from data_structure import debug, linklist, stack, queue
Commands = ("sort", "singlelinklist", "doublelinklist",
"search", "stack", 'poly', 'queue', 'pqueue')
USAGE = """
%prog <command> [options]
Commands:
""" + '\n'.join(["%10s: " % x for x in Commands])
def main():
parser = OptionParser(USAGE)
parser.add_option('-a', '--algorithm', type="string", dest="algorithm",
default='qsort',
help="config which sort algorithm to\
run[bubble|insert|merge|heapsort|qsort]")
parser.add_option('-p', '--poly', type="string", dest="poly",
default='horner',
help="config the polynomial evaluation\
algorithm[horner|naive]")
parser.add_option('-s', '--shardsize', type="int", dest="shard_size",
default=4,
help="config the shard size for merge_with_insert\
algorithm")
parser.add_option('-v', action="store_true", dest="verbose", default=False,
help="verbose mode")
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
print "Error: config the command"
return 1
cmd = args[0]
if cmd not in Commands:
parser.print_help()
print "Error: Unkown command: ", cmd
return 1
if cmd == 'sort':
#a = [40, 50, 20, 0, 1, 2, -1 ,30]
a = [40, 50, 40, 20, 0, 1, 2, -1 ,30, 60]
b = range(1000, -1, -1)
x = sort.Sorter()
if options.algorithm not in x.algorithms:
print "No such sort algorithm:{}".format(options.algorithm)
return 1
method = options.algorithm
if method == 'bubble_recursion':
x.run(method, a, len=len(a))
elif method == 'insert_recursion':
x.run(method, a, idx=1)
elif method.startswith('merge') or method == 'qsort':
x.run(method, a, low=0, high=len(a)-1)
# Below case is used for get reversion-pare numbers in one sequence
# <<Introduction to Algorithms, page 24, 2-4>>, the reversion-pair
# number equals to the swap times of insert sort
#c = [2, 3, 8, 6, 1, 5]
#x.run(method, c, low=0, high=len(b)-1, shard_size=3)
#x.run(method, b, low=0, high=len(b)-1)
else:
x.run(method, a)
#x.run(method, b)
x.info()
elif cmd == 'singlelinklist':
link_list = linklist.SingleLinkList()
data = ['a', 'b', 'c', 1, 2, 4, '0', 'anan', 'shanghai', 'rain']
for i in xrange(len(data)):
node = linklist.Node(data[i])
link_list.insert_node_rear(node)
#link_list.insert_node_head(node)
print link_list
print "trave list:"
link_list.travel_list()
link_list.reverse_list()
print "after reverse:"
link_list.travel_list()
mid = link_list.seek_mid()
print "middle node index {}, data: {}".format(mid.idx, mid.data)
link_list[mid.idx] = 'anan'
link_list.travel_list()
print link_list[0]
#link_list.remove_node(mid.idx)
elif cmd == 'doublelinklist':
dllist = linklist.DoubleLinkList()
try:
for i in xrange(5):
dllist.insert_node(i)
except IndexError as e:
print e
print dllist
dllist.travel()
try:
dllist.insert_node('anan', 3)
except IndexError as e:
print e
print dllist
dllist.travel()
for p in dllist:
print p
try:
dllist.remove_node(2)
except IndexError as e:
print e
print dllist
dllist.travel()
print "\n", dllist[4]
dllist[0] = 'anan'
print "\n"
dllist.travel()
elif cmd == 'stack':
s = stack.Stack(size=15, verbose=options.verbose)
for i in xrange(10):
s.push(i)
s.info()
print "stack size:{}, active elements:{}, free_space:{}"\
.format(s.size, len(s), s.free_space)
elif cmd == 'poly':
x = 5
factors = range(1000)
if options.poly == 'horner':
print polynomial.horner(x, factors)
else:
print polynomial.naive(x, factors)
elif cmd == 'search':
a = [40, 50, 40, 20, 0, 1, 2, -1 ,30]
b = range(1000,0,-1)
search = sort.Search(options.algorithm, verbose=options.verbose)
print search.search(1, a)
#print search.search(0, a)
#print search.search(2, a)
#print search.search(40, a)
#print search.search(60, a)
print search.search(20, b)
print search.search(90, b)
print search.search(90, a)
elif cmd == 'queue':
a = [40, 50, 40, 20, 0, 1, 2, -1 ,30]
q = queue.Queue(capacity=20, verbose=options.verbose)
for x in a:
q.enqueue(x)
q.info()
print q[3]
q[3] = 'anan'
q.info()
print q.head, q.rear
i = 0
while i < len(a):
try:
print "dequeue: ", q.dequeue()
except Exception as e:
pass
i += 1
q.info()
print q.head, q.rear
elif cmd == 'pqueue':
#a = [40, 50, 40, 20, 0, 1, 2, -1 ,30, 60]
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
pf = lambda end: random.randint(0, end)
q = queue.PQueue(20, verbose=options.verbose)
try:
for x in a:
q.enqueue(x, pf(3))
except Exception as e:
debug(e, "exceptions")
exc_type, exc_value, exc_tb = sys.exc_info()
debug(traceback.print_tb(exc_tb, file=sys.stdout), "Exception")
q.info()
print "dequeue: ", q.dequeue()
q.info()
q.change_priority(-1, -3)
q.info()
if __name__ == '__main__':
sys.exit(main())
| true |
f7eb64166936c15160784b703c694e42c380e76f | Python | DavidKo3/lstm_tensorflow | /src/rnn_tensorflow.py | UTF-8 | 2,180 | 3.1875 | 3 | [] | no_license | '''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
'''
To classify images using a recurrent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN( X, weights, biases):
# Prepare data shape to match 'rnn' function requirements
# Current data input shape: (batch_size , n_steps, n_input)
# Required shape : 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
X = tf.transpose(x, [1, 0 , 2]) # ( n_steps, batch_size , n_input)
# Reshaping to (n_steps*batch_size , n_input)
X = tf.reshape(x , [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
X = tf.split(0, n_steps, X)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype = tf.float32)
| true |
b503a260629e8c774127e2fc62eb00a960393aa2 | Python | babiswas/Python-Design-Patterns | /testC80.py | UTF-8 | 1,310 | 3.859375 | 4 | [] | no_license | from abc import ABC,abstractmethod
class Pet:
@abstractmethod
def speak(self):
pass
@abstractmethod
def have_food(self):
pass
class Dog(Pet):
def __init__(self,name):
self.name=name
def speak(self):
return f"{self.name} bow bow"
def have_food(self):
return f"{self.name} have flesh"
def __str__(self):
return f"{self.name} is dog name"
class Cat(Pet):
def __init__(self,name):
self.name=name
def speak(self):
return f"{self.name} mew mew"
def have_food(self):
return f"{self.name} have milk"
def __str__(self):
return f"{self.name} is cat name"
class DogFactory:
def get_pet(self):
return Dog("doggy")
class CatFactory:
def get_pet(self):
return Cat("meww")
class Pet_factory:
def __init__(self,pet_factory=None):
self.pet_fact=pet_factory
def pet_act(self):
pet=self.pet_fact.get_pet()
pet_speak=pet.speak()
pet_food=pet.have_food()
print(f"{pet} is the pet name")
print(f"{pet_food} is pet food")
print(f"{pet_speak} is pet speak")
dog_factory=DogFactory()
m=Pet_factory(dog_factory)
m.pet_act()
cat_factory=CatFactory()
m=Pet_factory(cat_factory)
m.pet_act()
| true |
7c429e9a47bb3293a25f8fbf80b0484a4535548c | Python | zhenghuadai/tinynn | /core/loss.py | UTF-8 | 3,750 | 3.375 | 3 | [
"MIT"
] | permissive | """Loss functions"""
import numpy as np
class Loss(object):
def loss(self, predicted, actual):
raise NotImplementedError
def grad(self, predicted, actual):
raise NotImplementedError
class MSE(Loss):
def loss(self, predicted, actual):
m = predicted.shape[0]
return 0.5 * np.sum((predicted - actual) ** 2) / m
def grad(self, predicted, actual):
m = predicted.shape[0]
return (predicted - actual) / m
class MAE(Loss):
def loss(self, predicted, actual):
m = predicted.shape[0]
return np.sum(np.abs(predicted - actual)) / m
def grad(self, predicted, actual):
m = predicted.shape[0]
return np.sign(predicted - actual) / m
class Huber(Loss):
def __init__(self, delta=1.0):
self._delta = delta
def loss(self, predicted, actual):
l1_dist = np.abs(predicted - actual)
mse_mask = l1_dist < self._delta # MSE part
mae_mask = ~mse_mask # MAE part
mse = 0.5 * (predicted - actual) ** 2
mae = self._delta * np.abs(predicted - actual) - 0.5 * self._delta ** 2
m = predicted.shape[0]
return np.sum(mse * mse_mask + mae * mae_mask) / m
def grad(self, predicted, actual):
err = predicted - actual
mse_mask = np.abs(err) < self._delta # MSE part
mae_mask = ~mse_mask # MAE part
m = predicted.shape[0]
mse_grad = err / m
mae_grad = np.sign(err) / m
return (mae_grad * mae_mask + mse_grad * mse_mask) / m
class SoftmaxCrossEntropy(Loss):
def __init__(self, weight=None):
"""
L = weight[class] * (-log(exp(x[class]) / sum(exp(x))))
:param weight: A 1D tensor [n_classes] assigning weight to each corresponding sample.
"""
weight = np.asarray(weight) if weight is not None else weight
self._weight = weight
def loss(self, logits, labels):
m = logits.shape[0]
exps = np.exp(logits - np.max(logits))
p = exps / np.sum(exps)
nll = -np.log(np.sum(p * labels, axis=1))
if self._weight is not None:
nll *= self._weight[labels]
return np.sum(nll) / m
def grad(self, logits, labels):
m = logits.shape[0]
grad = np.copy(logits)
grad -= labels
return grad / m
class SparseSoftmaxCrossEntropy(Loss):
def __init__(self, weight=None):
weight = np.asarray(weight) if weight is not None else weight
self._weight = weight
def loss(self, logits, labels):
m = logits.shape[0]
exps = np.exp(logits - np.max(logits))
p = exps / np.sum(exps)
nll = -np.log(p[range(m), labels])
if self._weight is not None:
nll *= self._weight[labels]
return np.sum(nll) / m
def grad(self, logits, actual):
m = logits.shape[0]
grad = np.copy(logits)
grad[range(m), actual] -= 1.0
return grad / m
class SigmoidCrossEntropy(Loss):
"""
logits = a, label = y
L = -y * log(1 / (1 + exp(-a)) - (1-y) * log(exp(-a) / (1 + exp(-a))
= -y * a + log(1 + exp(a))
In order to get stable version, we can further derive
L = -y * a + log((1 + exp(-a)) / exp(-a))
= -y * a + log(1 + exp(-a)) + a
"""
def __init__(self, weight=None):
weight = np.asarray(weight) if weight is not None else weight
self._weight = weight
def loss(self, logits, labels):
m = logits.shape[0]
cost = - labels * logits + np.log(1 + np.exp(-logits)) + logits
return np.sum(cost) / m
def grad(self, logits, labels):
m = logits.shape[0]
grad = -labels + 1.0 / (1 + np.exp(-logits))
return grad / m
| true |
77db3075c76c6e8ce14926f3355fd4d11804c019 | Python | gschivley/pudl | /src/pudl/glue/ferc1_eia.py | UTF-8 | 28,084 | 3 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | """
Extract and transform glue tables between FERC Form 1 and EIA 860/923.
FERC1 and EIA report on many of the same plants and utilities, but have no
embedded connection. We have combed through the FERC and EIA plants and
utilities to generate id's which can connect these datasets. The resulting
fields in the PUDL tables are `plant_id_pudl` and `utility_id_pudl`,
respectively. This was done by hand in a spreadsheet which is in the
`package_data/glue` directory. When mapping plants, we considered a plant a
co-located collection of electricity generation equipment. If a coal plant was
converted to a natural gas unit, our aim was to consider this the same plant.
This module simply reads in the mapping spreadsheet and converts it to a
dictionary of dataframes.
Because these mappings were done by hand and for every one of FERC Form 1's
thousands of reported plants, we know there are probably some incorrect or
incomplete mappings. If you see a `plant_id_pudl` or `utility_id_pudl` mapping
that you think is incorrect, please open an issue on our Github!
Note that the PUDL IDs may change over time. They are not guaranteed to be
stable. If you need to find a particular plant or utility reliably, you should
use its plant_id_eia, utility_id_eia, or utility_id_ferc1.
Another note about these id's: these id's map our definition of plants, which
is not the most granular level of plant unit. The generators are typically the
smaller, more interesting unit. FERC does not typically report in units
(although it sometimes does), but it does often break up gas units from coal
units. EIA reports on the generator and boiler level. When trying to use these
PUDL id's, consider the granularity that you desire and the potential
implications of using a co-located set of plant infrastructure as an id.
"""
import importlib
import logging
import pandas as pd
import sqlalchemy as sa
import pudl
import pudl.constants as pc
logger = logging.getLogger(__name__)
def get_plant_map():
"""Read in the manual FERC to EIA plant mapping data."""
map_eia_ferc_file = importlib.resources.open_binary(
'pudl.package_data.glue', 'mapping_eia923_ferc1.xlsx')
return pd.read_excel(
map_eia_ferc_file, 'plants_output',
na_values='', keep_default_na=False,
converters={'plant_id_pudl': int,
'plant_name_pudl': str,
'utility_id_ferc1': int,
'utility_name_ferc1': str,
'plant_name_ferc1': str,
'plant_id_eia': int,
'plant_name_eia': str,
'utility_name_eia': str,
'utility_id_eia': int})
def get_utility_map():
"""Read in the manual FERC to EIA utility mapping data."""
map_eia_ferc_file = importlib.resources.open_binary(
'pudl.package_data.glue', 'mapping_eia923_ferc1.xlsx')
return pd.read_excel(map_eia_ferc_file, 'utilities_output',
na_values='', keep_default_na=False,
converters={'utility_id_pudl': int,
'utility_name_pudl': str,
'utility_id_ferc1': int,
'utility_name_ferc1': str,
'utility_id_eia': int,
'utility_name_eia': str})
def get_db_plants_ferc1(pudl_settings, years):
"""
Pull a dataframe of all plants in the FERC Form 1 DB for the given years.
This function looks in the f1_steam, f1_gnrt_plant, f1_hydro and
f1_pumped_storage tables, and generates a dataframe containing every unique
combination of respondent_id (utility_id_ferc1) and plant_name is finds.
Also included is the capacity of the plant in MW (as reported in the
raw FERC Form 1 DB), the respondent_name (utility_name_ferc1) and a column
indicating which of the plant tables the record came from. Plant and
utility names are translated to lowercase, with leading and trailing
whitespace stripped and repeating internal whitespace compacted to a single
space.
This function is primarily meant for use generating inputs into the manual
mapping of FERC to EIA plants with PUDL IDs.
Args:
pudl_settings (dict): Dictionary containing various paths and database
URLs used by PUDL.
years (iterable): Years for which plants should be compiled.
Returns:
pandas.DataFrame: A dataframe containing columns
utility_id_ferc1, utility_name_ferc1, plant_name, capacity_mw, and
plant_table. Each row is a unique combination of utility_id_ferc1 and
plant_name.
"""
# Need to be able to use years outside the "valid" range if we're trying
# to get new plant ID info...
for yr in years:
if yr not in pc.data_years['ferc1']:
raise ValueError(
f"Input year {yr} is not available in the FERC data.")
# Grab the FERC 1 DB metadata so we can query against the DB w/ SQLAlchemy:
ferc1_engine = sa.create_engine(pudl_settings["ferc1_db"])
ferc1_meta = sa.MetaData(bind=ferc1_engine)
ferc1_meta.reflect()
ferc1_tables = ferc1_meta.tables
# This table contains the utility names and IDs:
respondent_table = ferc1_tables['f1_respondent_id']
# These are all the tables we're gathering "plants" from:
plant_tables = ['f1_steam', 'f1_gnrt_plant',
'f1_hydro', 'f1_pumped_storage']
# FERC doesn't use the sme column names for the same values across all of
# Their tables... but all of these are cpacity in MW.
capacity_cols = {'f1_steam': 'tot_capacity',
'f1_gnrt_plant': 'capacity_rating',
'f1_hydro': 'tot_capacity',
'f1_pumped_storage': 'tot_capacity'}
# Generate a list of all combinations of utility ID, utility name, and
# plant name that currently exist inside the raw FERC Form 1 Database, by
# iterating over the tables that contain "plants" and grabbing those
# columns (along with their capacity, since that's useful for matching
# purposes)
all_plants = pd.DataFrame()
for tbl in plant_tables:
plant_select = sa.sql.select([
ferc1_tables[tbl].c.respondent_id,
ferc1_tables[tbl].c.plant_name,
ferc1_tables[tbl].columns[capacity_cols[tbl]],
respondent_table.c.respondent_name
]).distinct().where(
sa.and_(
ferc1_tables[tbl].c.respondent_id == respondent_table.c.respondent_id,
ferc1_tables[tbl].c.plant_name != '',
ferc1_tables[tbl].c.report_year.in_(years)
)
)
# Add all the plants from the current table to our bigger list:
all_plants = all_plants.append(
pd.read_sql(plant_select, ferc1_engine).
rename(columns={"respondent_id": "utility_id_ferc1",
"respondent_name": "utility_name_ferc1",
"plant_name": "plant_name_ferc1",
capacity_cols[tbl]: "capacity_mw"}).
pipe(pudl.helpers.strip_lower, columns=["plant_name_ferc1",
"utility_name_ferc1"]).
assign(plant_table=tbl).
loc[:, ["utility_id_ferc1",
"utility_name_ferc1",
"plant_name_ferc1",
"capacity_mw",
"plant_table"]]
)
# We don't want dupes, and sorting makes the whole thing more readable:
all_plants = (
all_plants.drop_duplicates(["utility_id_ferc1", "plant_name_ferc1"]).
sort_values(["utility_id_ferc1", "plant_name_ferc1"])
)
return all_plants
def get_mapped_plants_ferc1():
"""
Generate a dataframe containing all previously mapped FERC 1 plants.
Many plants are reported in FERC Form 1 with different versions of the same
name in different years. Because FERC provides no unique ID for plants,
these names must be used as part of their identifier. We manually curate a
list of all the versions of plant names which map to the same actual plant.
In order to identify new plants each year, we have to compare the new plant
names and respondent IDs against this raw mapping, not the contents of the
PUDL data, since within PUDL we use one canonical name for the plant. This
function pulls that list of various plant names and their corresponding
utilities (both name and ID) for use in identifying which plants have yet
to be mapped when we are integrating new data.
Args:
None
Returns:
pandas.DataFrame A DataFrame with three columns: plant_name,
utility_id_ferc1, and utility_name_ferc1. Each row represents a unique
combination of utility_id_ferc1 and plant_name.
"""
# If we're only trying to get the NEW plants, then we need to see which
# ones we have already integrated into the PUDL database. However, because
# FERC doesn't use the same plant names from year to year, we have to rely
# on the full mapping of FERC plant names to PUDL IDs, which only exists
# in the ID mapping spreadhseet (the FERC Plant names in the PUDL DB are
# canonincal names we've chosen to represent all the varied plant names
# that exist in the raw FERC DB.
ferc1_mapped_plants = (
pudl.glue.ferc1_eia.get_plant_map().
loc[:, ["utility_id_ferc1", "utility_name_ferc1", "plant_name_ferc1"]].
dropna(subset=["utility_id_ferc1"]).
pipe(pudl.helpers.strip_lower,
columns=["utility_id_ferc1",
"utility_name_ferc1",
"plant_name_ferc1"]).
astype({"utility_id_ferc1": int}).
drop_duplicates(["utility_id_ferc1", "plant_name_ferc1"]).
sort_values(["utility_id_ferc1", "plant_name_ferc1"])
)
return ferc1_mapped_plants
def get_mapped_utils_ferc1():
"""
Read in the list of manually mapped utilities for FERC Form 1.
Unless a new utility has appeared in the database, this should be identical
to the full list of utilities available in the FERC Form 1 database.
Args:
None
Returns:
pandas.DataFrame
"""
ferc1_mapped_utils = (
pudl.glue.ferc1_eia.get_utility_map().
loc[:, ["utility_id_ferc1", "utility_name_ferc1"]].
dropna(subset=["utility_id_ferc1"]).
pipe(pudl.helpers.strip_lower,
columns=["utility_id_ferc1", "utility_name_ferc1"]).
drop_duplicates("utility_id_ferc1").
astype({"utility_id_ferc1": int}).
sort_values(["utility_id_ferc1"])
)
return ferc1_mapped_utils
def get_unmapped_plants_ferc1(pudl_settings, years):
"""
Generate a DataFrame of all unmapped FERC plants in the given years.
Pulls all plants from the FERC Form 1 DB for the given years, and compares
that list against the already mapped plants. Any plants found in the
database but not in the list of mapped plants are returned.
Args:
pudl_settings (dict): Dictionary containing various paths and database
URLs used by PUDL.
years (iterable): Years for which plants should be compiled from the
raw FERC Form 1 DB.
Returns:
pandas.DataFrame: A dataframe containing five columns:
utility_id_ferc1, utility_name_ferc1, plant_name, capacity_mw, and
plant_table. Each row is a unique combination of utility_id_ferc1 and
plant_name, which appears in the FERC Form 1 DB, but not in the list of
manually mapped plants.
"""
db_plants = (
get_db_plants_ferc1(pudl_settings, years).
set_index(["utility_id_ferc1", "plant_name_ferc1"])
)
mapped_plants = (
get_mapped_plants_ferc1().
set_index(["utility_id_ferc1", "plant_name_ferc1"])
)
new_plants_index = db_plants.index.difference(mapped_plants.index)
unmapped_plants = db_plants.loc[new_plants_index].reset_index()
return unmapped_plants
def get_unmapped_utils_ferc1(pudl_settings, years):
"""
Generate a list of as-of-yet unmapped utilities from the FERC Form 1 DB.
Find any utilities which exist in the FERC Form 1 database for the years
requested, but which do not show up in the mapped plants. Note that there
are many more utilities in FERC Form 1 that simply have no plants
associated with them that will not show up here.
Args:
pudl_settings (dict): Dictionary containing various paths and database
URLs used by PUDL.
years (iterable): Years for which plants should be compiled from the
raw FERC Form 1 DB.
Returns:
pandas.DataFrame
"""
# Note: we only map the utlities that have plants associated with them.
# Grab the list of all utilities listed in the mapped plants:
mapped_utilities = get_mapped_utils_ferc1().set_index("utility_id_ferc1")
# Generate a list of all utilities which have unmapped plants:
# (Since any unmapped utility *must* have unmapped plants)
utils_with_unmapped_plants = (
get_unmapped_plants_ferc1(pudl_settings, years).
loc[:, ["utility_id_ferc1", "utility_name_ferc1"]].
drop_duplicates("utility_id_ferc1").
set_index("utility_id_ferc1")
)
# Find the indices of all utilities with unmapped plants that do not appear
# in the list of mapped utilities at all:
new_utilities_index = (
utils_with_unmapped_plants.index.
difference(mapped_utilities.index)
)
# Use that index to select only the previously unmapped utilities:
unmapped_utilities = (
utils_with_unmapped_plants.
loc[new_utilities_index].
reset_index()
)
return unmapped_utilities
def get_db_plants_eia(pudl_engine):
"""
Get a list of all EIA plants appearing in the PUDL DB.
This list of plants is used to determine which plants need to be added to
the FERC 1 / EIA plant mappings, where we assign PUDL Plant IDs. Unless a
new year's worth of data has been added to the PUDL DB, but the plants
have not yet been mapped, all plants in the PUDL DB should also appear in
the plant mappings. It only makes sense to run this with a connection to a
PUDL DB that has all the EIA data in it.
Args:
pudl_engine (sqlalchemy.engine.Engine): A database connection
engine for connecting to a PUDL SQLite database.
Returns:
pandas.DataFrame: A DataFrame with plant_id_eia, plant_name_eia, and
state columns, for addition to the FERC 1 / EIA plant mappings.
"""
db_plants_eia = (
pd.read_sql("plants_entity_eia", pudl_engine).
loc[:, ["plant_id_eia", "plant_name_eia", "state"]].
pipe(pudl.helpers.strip_lower, columns=["plant_name_eia"]).
astype({"plant_id_eia": int}).
drop_duplicates("plant_id_eia").
sort_values("plant_id_eia")
)
return db_plants_eia
def get_mapped_plants_eia():
"""
Get a list of all EIA plants that have been assigned PUDL Plant IDs.
Read in the list of already mapped EIA plants from the FERC 1 / EIA plant
and utility mapping spreadsheet kept in the package_data.
Args:
None
Returns:
pandas.DataFrame: A DataFrame listing the plant_id_eia and
plant_name_eia values for every EIA plant which has already been
assigned a PUDL Plant ID.
"""
mapped_plants_eia = (
pudl.glue.ferc1_eia.get_plant_map().
loc[:, ["plant_id_eia", "plant_name_eia"]].
dropna(subset=["plant_id_eia"]).
pipe(pudl.helpers.strip_lower, columns=["plant_name_eia"]).
astype({"plant_id_eia": int}).
drop_duplicates("plant_id_eia").
sort_values("plant_id_eia")
)
return mapped_plants_eia
def get_unmapped_plants_eia(pudl_engine):
"""Identify any as-of-yet unmapped EIA Plants."""
plants_utils_eia = (
pd.read_sql("""SELECT DISTINCT plant_id_eia, utility_id_eia
FROM plants_eia860;""", pudl_engine).
dropna().
astype({"plant_id_eia": int,
"utility_id_eia": int}).
drop_duplicates().
# Need to get the name of the utility, to merge with the ID
merge(get_db_utils_eia(pudl_engine).reset_index(),
on="utility_id_eia")
)
plant_capacity_mw = (
pd.read_sql("SELECT * FROM generators_eia860;", pudl_engine).
groupby(["plant_id_eia"])[["capacity_mw"]].agg(sum).
reset_index()
)
db_plants_eia = get_db_plants_eia(pudl_engine).set_index("plant_id_eia")
mapped_plants_eia = get_mapped_plants_eia().set_index("plant_id_eia")
unmapped_plants_idx = (
db_plants_eia.index.
difference(mapped_plants_eia.index)
)
unmapped_plants_eia = (
db_plants_eia.loc[unmapped_plants_idx].
merge(plants_utils_eia, how="left", on="plant_id_eia").
merge(plant_capacity_mw, how="left", on="plant_id_eia").
loc[:, ["plant_id_eia", "plant_name_eia",
"utility_id_eia", "utility_name_eia",
"state", "capacity_mw"]].
astype({"utility_id_eia": "Int32"}) # Woo! Nullable Integers FTW!
)
return unmapped_plants_eia
def get_lost_plants_eia(pudl_engine):
"""Identify any EIA plants which were mapped, but then lost from the DB."""
mapped_plants_eia = get_mapped_plants_eia().set_index("plant_id_eia")
db_plants_eia = get_db_plants_eia(pudl_engine).set_index("plant_id_eia")
lost_plants_idx = mapped_plants_eia.index.difference(db_plants_eia.index)
lost_plants_eia = mapped_plants_eia.loc[lost_plants_idx]
return lost_plants_eia
def get_db_utils_eia(pudl_engine):
"""Get a list of all EIA Utilities appearing in the PUDL DB."""
db_utils_eia = (
pd.read_sql("utilities_entity_eia", pudl_engine).
loc[:, ["utility_id_eia", "utility_name_eia"]].
pipe(pudl.helpers.strip_lower, columns=["utility_name_eia"]).
astype({"utility_id_eia": int}).
drop_duplicates("utility_id_eia").
sort_values("utility_id_eia").
set_index("utility_id_eia")
)
return db_utils_eia
def get_mapped_utils_eia():
"""Get a list of all the EIA Utilities that have PUDL IDs."""
mapped_utils_eia = (
pudl.glue.ferc1_eia.get_utility_map().
loc[:, ["utility_id_eia", "utility_name_eia"]].
dropna(subset=["utility_id_eia"]).
pipe(pudl.helpers.strip_lower, columns=["utility_name_eia"]).
astype({"utility_id_eia": int}).
drop_duplicates(["utility_id_eia"]).
sort_values(["utility_id_eia"]).
set_index("utility_id_eia")
)
return mapped_utils_eia
def get_unmapped_utils_eia(pudl_engine):
"""Get a list of all the EIA Utilities in the PUDL DB without PUDL IDs."""
db_utils_eia = get_db_utils_eia(pudl_engine)
mapped_utils_eia = get_mapped_utils_eia()
unmapped_utils_idx = db_utils_eia.index.difference(mapped_utils_eia.index)
unmapped_utils_eia = db_utils_eia.loc[unmapped_utils_idx]
return unmapped_utils_eia
def get_unmapped_utils_with_plants_eia(pudl_engine):
"""Get all EIA Utilities that lack PUDL IDs but have plants/ownership."""
pudl_out = pudl.output.pudltabl.PudlTabl(pudl_engine)
utils_idx = ["utility_id_eia", "report_date"]
plants_idx = ["plant_id_eia", "report_date"]
own_idx = ["plant_id_eia", "generator_id",
"owner_utility_id_eia", "report_date"]
utils_eia860 = (
pudl_out.utils_eia860()
.dropna(subset=utils_idx)
.set_index(utils_idx)
)
plants_eia860 = (
pudl_out.plants_eia860()
.dropna(subset=plants_idx)
.set_index(plants_idx)
)
own_eia860 = (
pudl_out.own_eia860()
.dropna(subset=own_idx)
.set_index(own_idx)
)
own_miss_utils = set(
own_eia860[own_eia860.utility_id_pudl.isnull()]
.utility_id_eia.unique()
)
plants_miss_utils = set(
plants_eia860[plants_eia860.utility_id_pudl.isnull()]
.utility_id_eia.unique()
)
utils_eia860 = utils_eia860.reset_index()
miss_utils = utils_eia860[
(utils_eia860.utility_id_pudl.isna()) &
(
(utils_eia860.plants_reported_owner == "True") |
(utils_eia860.plants_reported_asset_manager == "True") |
(utils_eia860.plants_reported_operator == "True") |
(utils_eia860.plants_reported_other_relationship == "True") |
(utils_eia860.utility_id_eia.isin(own_miss_utils)) |
(utils_eia860.utility_id_eia.isin(plants_miss_utils))
)
]
miss_utils = (
miss_utils.drop_duplicates("utility_id_eia")
.set_index("utility_id_eia")
.loc[:, ["utility_name_eia"]]
)
return miss_utils
def get_lost_utils_eia(pudl_engine):
"""Get a list of all mapped EIA Utilites not found in the PUDL DB."""
db_utils_eia = get_db_utils_eia(pudl_engine)
mapped_utils_eia = get_mapped_utils_eia()
lost_utils_idx = mapped_utils_eia.index.difference(db_utils_eia.index)
lost_utils_eia = mapped_utils_eia.loc[lost_utils_idx]
return lost_utils_eia
def glue(ferc1=False, eia=False):
"""Generates a dictionary of dataframes for glue tables between FERC1, EIA.
That data is primarily stored in the plant_output and
utility_output tabs of package_data/glue/mapping_eia923_ferc1.xlsx in the
repository. There are a total of seven relations described in this data:
- utilities: Unique id and name for each utility for use across the
PUDL DB.
- plants: Unique id and name for each plant for use across the PUDL DB.
- utilities_eia: EIA operator ids and names attached to a PUDL
utility id.
- plants_eia: EIA plant ids and names attached to a PUDL plant id.
- utilities_ferc: FERC respondent ids & names attached to a PUDL
utility id.
- plants_ferc: A combination of FERC plant names and respondent ids,
associated with a PUDL plant ID. This is necessary because FERC does
not provide plant ids, so the unique plant identifier is a
combination of the respondent id and plant name.
- utility_plant_assn: An association table which describes which plants
have relationships with what utilities. If a record exists in this
table then combination of PUDL utility id & PUDL plant id does have
an association of some kind. The nature of that association is
somewhat fluid, and more scrutiny will likely be required for use in
analysis.
Presently, the 'glue' tables are a very basic piece of infrastructure for
the PUDL DB, because they contain the primary key fields for utilities and
plants in FERC1.
Args:
ferc1 (bool): Are we ingesting FERC Form 1 data?
eia (bool): Are we ingesting EIA data?
Returns:
dict: a dictionary of glue table DataFrames
"""
# ferc glue tables are structurally entity tables w/ foreign key
# relationships to ferc datatables, so we need some of the eia/ferc 'glue'
# even when only ferc is ingested into the database.
if not ferc1 and not eia:
return
# We need to standardize plant names -- same capitalization and no leading
# or trailing white space... since this field is being used as a key in
# many cases. This also needs to be done any time plant_name is pulled in
# from other tables.
plant_map = (
get_plant_map().
pipe(pudl.helpers.strip_lower, ['plant_name_ferc1'])
)
plants_pudl = (
plant_map.
loc[:, ['plant_id_pudl', 'plant_name_pudl']].
drop_duplicates('plant_id_pudl')
)
plants_eia = (
plant_map.
loc[:, ['plant_id_eia', 'plant_name_eia', 'plant_id_pudl']].
drop_duplicates("plant_id_eia").
dropna(subset=["plant_id_eia"])
)
plants_ferc1 = (
plant_map.
loc[:, ['plant_name_ferc1', 'utility_id_ferc1', 'plant_id_pudl']].
drop_duplicates(['plant_name_ferc1', 'utility_id_ferc1']).
dropna(subset=["utility_id_ferc1", "plant_name_ferc1"])
)
utility_map = get_utility_map()
utilities_pudl = (
utility_map.loc[:, ['utility_id_pudl', 'utility_name_pudl']].
drop_duplicates('utility_id_pudl')
)
utilities_eia = (
utility_map.
loc[:, ['utility_id_eia', 'utility_name_eia', 'utility_id_pudl']].
drop_duplicates('utility_id_eia').
dropna(subset=['utility_id_eia'])
)
utilities_ferc1 = (
utility_map.
loc[:, ['utility_id_ferc1', 'utility_name_ferc1', 'utility_id_pudl']].
drop_duplicates('utility_id_ferc1').
dropna(subset=['utility_id_ferc1'])
)
# Now we need to create a table that indicates which plants are associated
# with every utility.
# These dataframes map our plant_id to FERC respondents and EIA
# operators -- the equivalents of our "utilities"
plants_utilities_ferc1 = (
plant_map.
loc[:, ['plant_id_pudl', 'utility_id_ferc1']].
dropna(subset=['utility_id_ferc1'])
)
plants_utilities_eia = (
plant_map.
loc[:, ['plant_id_pudl', 'utility_id_eia']].
dropna(subset=['utility_id_eia'])
)
# Here we treat the dataframes like database tables, and join on the
# FERC respondent_id and EIA operator_id, respectively.
# Now we can concatenate the two dataframes, and get rid of all the columns
# except for plant_id and utility_id (which determine the utility to plant
# association), and get rid of any duplicates or lingering NaN values...
utility_plant_assn = (
pd.concat(
[pd.merge(utilities_eia,
plants_utilities_eia,
on='utility_id_eia'),
pd.merge(utilities_ferc1,
plants_utilities_ferc1,
on='utility_id_ferc1')],
sort=True
)
)
utility_plant_assn = (
utility_plant_assn.
loc[:, ['plant_id_pudl', 'utility_id_pudl']].
dropna().
drop_duplicates()
)
# At this point there should be at most one row in each of these data
# frames with NaN values after we drop_duplicates in each. This is because
# there will be some plants and utilities that only exist in FERC, or only
# exist in EIA, and while they will have PUDL IDs, they may not have
# FERC/EIA info (and it'll get pulled in as NaN)
for df, df_n in zip(
[plants_eia, plants_ferc1, utilities_eia, utilities_ferc1],
['plants_eia', 'plants_ferc1', 'utilities_eia', 'utilities_ferc1']
):
if df[pd.isnull(df).any(axis=1)].shape[0] > 1:
raise AssertionError(f"FERC to EIA glue breaking in {df_n}")
df = df.dropna()
# Before we start inserting records into the database, let's do some basic
# sanity checks to ensure that it's (at least kind of) clean.
# INSERT SANITY HERE
# Any FERC respondent_id that appears in plants_ferc1 must also exist in
# utilities_ferc1:
# INSERT MORE SANITY HERE
glue_dfs = {
"plants_pudl": plants_pudl,
"utilities_pudl": utilities_pudl,
"plants_ferc1": plants_ferc1,
"utilities_ferc1": utilities_ferc1,
"plants_eia": plants_eia,
"utilities_eia": utilities_eia,
"utility_plant_assn": utility_plant_assn,
}
# if we're not ingesting eia, exclude eia only tables
if not eia:
del glue_dfs['utilities_eia']
del glue_dfs['plants_eia']
# if we're not ingesting ferc, exclude ferc1 only tables
if not ferc1:
del glue_dfs['utilities_ferc1']
del glue_dfs['plants_ferc1']
return glue_dfs
| true |
2f2dbe587dfd327165fb94f9ddc4b96817b34fff | Python | ke0kul/luongnguyenvinhbao-fundamentals-C4E13 | /Session4/Homework/Ex_20-8-1.py | UTF-8 | 372 | 3.671875 | 4 | [] | no_license | sentence = 'ThiS is String with Upper and lower case Letters'
sentence = sentence.lower()
import string
alphabet = string.ascii_lowercase
char_list = {}
for i in sentence:
if i in alphabet:
if i in char_list:
char_list[i] += 1
else:
char_list[i] = 1
keys = char_list.keys()
for i in sorted(keys):
print(i, char_list[i])
| true |
940561bc0144d1d600c5659bdcaa5d2bc021d22b | Python | ineedaspo1/QuantTradingSys | /Code/lib/plot_utils.py | UTF-8 | 9,877 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 31 18:34:16 2018
@author: kruegkj
plot_utils.py
"""
import matplotlib.pylab as plt
import matplotlib as mpl
import matplotlib.ticker as ticker
import numpy as np
from matplotlib import cm as cm
import pandas as pd
import matplotlib.dates as mdates
class PlotUtility:
def plot_v1(self, data, title):
fig, ax = plt.subplots(figsize=(10,4))
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.2f}'))
plt.plot(data)
# Label the axes and provide a title
ax.set_title(title)
ax.grid(True, which='both')
fig.autofmt_xdate()
ax.xaxis_date()
ax.autoscale_view()
ax.grid(b=True, which='major', color='k', linestyle='-', alpha=0.6)
ax.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
ax.minorticks_on()
ax.tick_params(axis='y',which='major',bottom='off')
return fig, ax
def histogram(self, data, x_label, y_label, title):
fig, ax = plt.subplots(figsize=(6,2))
ax.hist(data, color = '#539caf', bins = 3)
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
tick_spacing = 1
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
ax.grid(b=True, which='major', color='k', linestyle='-')
ax.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
ax.minorticks_on()
ax.tick_params(axis='y',which='minor',bottom='off')
plt.show(block=False)
def plot_v2x(self, plotDataSet, title):
numSubPlots = 2
fig, axes = plt.subplots(numSubPlots, ncols=1, figsize=(numSubPlots*7,6), sharex=True)
buys = plotDataSet.loc[(plotDataSet['beLong'] > 0)]
sells = plotDataSet.loc[(plotDataSet['beLong'] < 0)]
axes[0].plot(plotDataSet.index, plotDataSet['Close'])
axes[0].plot(buys.index, plotDataSet.loc[buys.index]['Close'], '^', markersize=10, color='g', label='Buy')
axes[0].plot(sells.index, plotDataSet.loc[sells.index]['Close'], 'v', markersize=10, color='r', label='Sell')
axes[1].plot(plotDataSet['beLong'], color='red', alpha =0.8)
plt.subplots_adjust(hspace=0.05)
#fig.suptitle(title)
axes[0].set_title(title)
fig.autofmt_xdate()
for ax in axes:
ax.label_outer()
ax.legend(loc='upper left', frameon=True, fontsize=8)
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
ax.grid(True, which='both')
ax.xaxis_date()
ax.autoscale_view()
ax.grid(b=True, which='major', color='k', linestyle='-', alpha=0.6)
ax.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
ax.minorticks_on()
#ax.tick_params(axis='y',which='minor',bottom='off')
#axes[1].set_yticks((-1,0,1), minor=False)
axes[1].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
plt.show(block=False)
return fig, (axes[0], axes[1])
def plot_beLongs(self, title, issue, df, start_date, end_date):
plotTitle = title + ": " + issue + ", " + str(start_date) + " to " + str(end_date)
self.plot_v2x(df, plotTitle)
self.histogram(df['beLong'], x_label="beLong signal", y_label="Frequency", title = "beLong distribution for " + issue)
def price_Ind_Vol_Plot(self, plot_dict, df):
# Subplots are organized in a Rows x Cols Grid
issue = plot_dict['Issue']
key_to_value_lengths = {k:len(v) for k, v in plot_dict.items()}
#print(key_to_value_lengths)
#key_to_value_lengths['Plot_Vars']
subplot_len = key_to_value_lengths['Plot_Vars']
#print(subplot_len)
if plot_dict['Volume']=='Yes':
total_rows = 2 + subplot_len
else:
total_rows = 1 + subplot_len
Cols = 1
N = len(df)
ind = np.arange(N) # the evenly spaced plot indices
def format_date(x, pos=None):
thisind = np.clip(int(x + 0.5), 0, N - 1)
return df.index[thisind].strftime('%Y-%m-%d')
#myFmt = mdates.DateFormatter('%Y-%m-%d')
fig = plt.figure(1,figsize=(14,total_rows*2))
plt.subplots_adjust(hspace=0.05)
cnt = 0
for n in range(1,total_rows+1):
if n==1:
ax = fig.add_subplot(total_rows,Cols,1)
ax.plot(ind, df['Close'], label=issue)
elif n < subplot_len+2:
ax = fig.add_subplot(total_rows,Cols,n,sharex=ax)
ax.plot(ind, df[plot_dict['Plot_Vars'][cnt]], label=plot_dict['Plot_Vars'][cnt])
cnt += 1
else: # add Volume plot if requested
ax = fig.add_subplot(total_rows,Cols,n)
ax.bar(ind, df['Volume'], label='Volume')
ax.grid(b=True, which='major', color='k', linestyle='-')
ax.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
ax.label_outer()
ax.legend(loc='upper left', frameon=True, fontsize=10)
ax.minorticks_on()
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.2f}'))
#ax.xaxis.set_major_formatter(myFmt)
#ax.xaxis_date()
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
plt.show(block=False)
def correlation_matrix(self, df):
from matplotlib import pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('jet', 30)
corr = df.corr()
cax = ax1.imshow(corr, interpolation="nearest", cmap=cmap)
ax1.grid(True)
plt.title('Feature Correlation')
plt.xticks(range(len(corr.columns)), corr.columns, rotation='vertical');
plt.yticks(range(len(corr.columns)), corr.columns);
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, ticks=[-1, -.5, 0, .5 ,1])
plt.show()
def plot_corr(df,size=10):
'''Function plots a graphical correlation matrix for each pair of columns in the dataframe.
Input:
df: pandas DataFrame
size: vertical and horizontal size of the plot'''
cmdf = pd.DataFrame()
cmdf = df.copy()
cm = cmdf.corr()
fig, ax = plt.subplots(figsize=(size, size))
ax.matshow(cm)
plt.xticks(range(len(cm.columns)), cm.columns)
plt.yticks(range(len(cm.columns)), cm.columns)
def plot_equity_drawdown(self, issue, df):
# plot_tms = df.set_index(pd.DatetimeIndex(df['Date']))
# plot_tms=plot_tms.drop('Date', axis=1)
plotTitle = "Equity curve for " + issue
self.plot_v1(df['equity'][:-2], plotTitle)
plotTitle = "Drawdown for " + issue
self.plot_v1(df['drawdown'][:-2], plotTitle)
plt.show()
def plot_CAR25_close(self, issue, df):
fig = plt.figure(figsize=(11,6))
fig.suptitle('CAR25 and issue price for' + issue)
ax1 = fig.add_subplot(111)
#ax1.plot(sst1.safef, color='green',label='safe-f')
ax1.plot(df.CAR25, color='blue',label='CAR25')
#ax1.plot(valData.equityValBeLongSignals, color='purple',label='ValBeLong')
ax1.legend(loc='upper left', frameon=True, fontsize=8)
ax1.label_outer()
ax1.tick_params(axis='x',which='major',bottom=True)
ax1.minorticks_on()
ax1.grid(True, which='major', color='k', linestyle='-', alpha=0.6)
ax1.grid(True, which='minor', color='r', linestyle='-', alpha=0.2)
#sst1['Pri']=valData.Pri
ax2 = ax1.twinx()
ax2.plot(df.Close,
color='black',
alpha=0.6,
label='CLOSE',
linestyle='--'
)
ax2.legend(loc='center left', frameon=True, fontsize=8)
ax2.label_outer()
fig.autofmt_xdate()
plt.show()
if __name__ == "__main__":
from retrieve_data import DataRetrieve, ComputeTarget
dSet = DataRetrieve()
plotIt = PlotUtility()
cT = ComputeTarget()
dataLoadStartDate = "2008-02-01"
dataLoadEndDate = "2010-04-01"
issue = "TLT"
dataSet = dSet.read_issue_data(issue)
dataSet = dSet.set_date_range(dataSet, dataLoadStartDate,dataLoadEndDate)
beLongThreshold = 0
dataSet = cT.setTarget(dataSet,
"Long",
beLongThreshold
)
# Plot price and indicators
startDate = "2008-02-01"
endDate = "2010-04-01"
plotDataSet = dataSet[startDate:endDate].copy()
# Set up plot dictionary
plot_dict = {}
plot_dict['Issue'] = issue
plot_dict['Plot_Vars'] = ['beLong']
plot_dict['Volume'] = 'Yes'
plotIt.price_Ind_Vol_Plot(plot_dict, plotDataSet)
plotTitle = "Closing price for " + issue + ", " + str(dataLoadStartDate) + " to " + str(dataLoadEndDate)
plotIt.plot_v1(plotDataSet['Close'], plotTitle)
plotTitle = "beLong signal for " + issue + ", " + str(dataLoadStartDate) + " to " + str(dataLoadEndDate)
plotIt.plot_v1(plotDataSet['beLong'], plotTitle)
plotIt.histogram(
plotDataSet['beLong'],
x_label="beLong signal",
y_label="Frequency",
title = "beLong distribution for " + issue)
plotTitle = issue + ", " + str(dataLoadStartDate) + " to " + str(dataLoadEndDate)
plotIt.plot_v2x(plotDataSet, plotTitle)
#plotIt.plot_beLongs("Plot of beLongs", issue, plotDataSet, dataLoadStartDate, dataLoadEndDate)
plotIt.correlation_matrix(dataSet)
| true |
d5f035d6c459adb7b1eea8234819b6ff85fcdace | Python | funkelab/funlib.learn.torch | /funlib/tests/test_conv4d.py | UTF-8 | 3,766 | 2.734375 | 3 | [] | no_license | from funlib.learn.torch.models import Conv4d
import numpy as np
import torch
import unittest
class TestConv4D(unittest.TestCase):
def test_conv4d(self):
# Generate random input 4D tensor (+ batch dimension, + channel
# dimension)
np.random.seed(42)
input_numpy = np.round(np.random.random((1, 1, 10, 11, 12, 13)) * 100)
input_torch = torch.from_numpy(input_numpy).float()
# Convolve with a randomly initialized kernel
# Initialize the 4D convolutional layer with random kernels
conv4d_layer = \
Conv4d(
in_channels=1,
out_channels=1,
kernel_size=(3, 3, 3, 3),
bias_initializer=lambda x: torch.nn.init.constant_(x, 0))
# Pass the input tensor through that layer
output = conv4d_layer.forward(input_torch).data.numpy()
# Select the 3D kernels for the manual computation and comparison
kernels = [
conv4d_layer.conv3d_layers[i].weight.data.numpy().flatten()
for i in range(3)
]
# Compare the conv4d_layer result and the manual convolution
# computation at 3 randomly chosen locations
for i in range(3):
# Randomly choose a location and select the conv4d_layer output
loc = [
np.random.randint(0, output.shape[2] - 2),
np.random.randint(0, output.shape[3] - 2),
np.random.randint(0, output.shape[4] - 2),
np.random.randint(0, output.shape[5] - 2)
]
conv4d = output[0, 0, loc[0], loc[1], loc[2], loc[3]]
# Select slices from the input tensor and compute manual
# convolution
slices = [
input_numpy[
0, 0, loc[0] + j, loc[1]:loc[1] + 3,
loc[2]:loc[2] + 3, loc[3]:loc[3] + 3].flatten()
for j in range(3)
]
manual = np.sum([slices[j] * kernels[j] for j in range(3)])
np.testing.assert_array_almost_equal(conv4d, manual, 3)
# Convolve with a kernel initialized to be all ones
conv4d_layer = \
Conv4d(
in_channels=1,
out_channels=1,
kernel_size=(3, 3, 3, 3),
padding=1,
kernel_initializer=lambda x: torch.nn.init.constant_(x, 1),
bias_initializer=lambda x: torch.nn.init.constant_(x, 0))
output = conv4d_layer.forward(input_torch).data.numpy()
# Define relu(x) = max(x, 0) for simplified indexing below
def relu(x: float) -> float:
return x * (x > 0)
# Compare the conv4d_layer result and the manual convolution
# computation at 3 randomly chosen locations
for i in range(3):
# Randomly choose a location and select the conv4d_layer
# output
loc = [np.random.randint(0, output.shape[2] - 2),
np.random.randint(0, output.shape[3] - 2),
np.random.randint(0, output.shape[4] - 2),
np.random.randint(0, output.shape[5] - 2)]
conv4d = output[0, 0, loc[0], loc[1], loc[2], loc[3]]
# For a kernel that is all 1s, we only need to sum up the elements
# of the input (the ReLU takes care of the padding!)
manual = input_numpy[0, 0,
relu(loc[0] - 1):loc[0] + 2,
relu(loc[1] - 1):loc[1] + 2,
relu(loc[2] - 1):loc[2] + 2,
relu(loc[3] - 1):loc[3] + 2].sum()
np.testing.assert_array_almost_equal(conv4d, manual, 3)
| true |
7039ecbcdbdabdfbb4091bad6f34d50ba6a181ac | Python | siverka/adaptive-python-en | /Step 134 Matrices.py | UTF-8 | 339 | 2.671875 | 3 | [] | no_license | import numpy as np
x_shape = tuple(map(int, input().split()))
X = np.fromiter(map(int, input().split()), np.int).reshape(x_shape)
y_shape = tuple(map(int, input().split()))
Y = np.fromiter(map(int, input().split()), np.int).reshape(y_shape)
try:
Z = X.dot(Y.T)
print(Z)
except ValueError:
print('matrix shapes do not match')
| true |
0cfc665639c067a4da263cfeb4db51ec7838e86e | Python | albertwildeman/Two-sum | /main.py | UTF-8 | 331 | 2.640625 | 3 | [] | no_license | from FileReadLib import get_array
from TwoSumLib import two_sum_dict
import numpy as np
filename = "algo1-programming_prob-2sum"
a = get_array(filename)
# Prepare to find 2-sums by sorting the array
a.sort()
targets = range(-10000,10001)
found_2sum = two_sum_dict(a, targets)
np.save("output", found_2sum)
print("all done.") | true |
fba5f131dea5658da083d7947e3cc3aa35935f54 | Python | JackHoeg/NUCS349---Final-Coronavirus | /coronavirus-2020/exp/my_pred.py | UTF-8 | 3,843 | 3.171875 | 3 | [
"MIT"
] | permissive | """
Experiment summary
------------------
Compares Results of a Decision Tree Regressor
and Linear Regression for predicting cases
of COVID-19 in the US.
Much of this is based on:
https://medium.com/@randerson112358/predict-stock-prices-using-machine-learning-python-f554b7167b36
"""
import sys
sys.path.insert(0, '..')
from utils import data
import os
import sklearn
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import json
# ------------ HYPERPARAMETERS -------------
BASE_PATH = 'COVID-19/csse_covid_19_data/'
# ------------------------------------------
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_US.csv')
confirmed = data.load_csv_data(confirmed)
tmpFeatures = []
for val in np.unique(confirmed["Country_Region"]):
df = data.filter_by_attribute(
confirmed, "Country_Region", val)
cases, _ = data.get_cases_chronologically(df)
tmpFeatures.append(cases)
tmpFeatures = np.concatenate(tmpFeatures, axis=0)
features = np.sum(tmpFeatures, axis=0)
newCases = np.zeros(features.shape, dtype=np.int32)
i = len(features) - 1
while i > 0:
newCases[i] = features[i] - features[i - 1]
i -= 1
newCases[0] = features[0]
dates = np.arange(len(newCases))
daysGiven = 14
daysPred = 7
totDays = daysGiven + daysPred
newFeatures = np.zeros((len(newCases) - totDays, daysGiven))
newLabels = np.zeros((len(newFeatures), daysPred))
for i in range(len(newFeatures)):
newFeatures[i, :] = newCases[i:i+daysGiven]
newLabels[i] = newCases[i+daysGiven:i+totDays]
x_train, x_test, y_train, y_test = train_test_split(newFeatures, newLabels, test_size=0.4)
tree = DecisionTreeRegressor().fit(x_train, y_train)
preds = tree.predict(x_test)
tmpDates = range(totDays)
diff = 0
acc = 0
for i in range(len(preds)):
real = np.zeros(totDays)
fake = np.zeros(totDays)
real[0:daysGiven] = x_test[i]
real[daysGiven:] = y_test[i]
fake[0:daysGiven] = x_test[i]
fake[daysGiven:] = preds[i]
realSum = np.sum(y_test[i])
fakeSum = np.sum(preds[i])
thisDiff = fakeSum - realSum
diff += thisDiff
if realSum != 0:
thisAcc = np.abs(thisDiff) / realSum
line1, = plt.plot(tmpDates, fake)
line1.set_label('preds')
line2, = plt.plot(tmpDates, real)
line2.set_label('actual')
plt.title('Decision Tree Regressor')
plt.xlabel('Days')
plt.ylabel('New Cases')
plt.legend()
plt.show()
plt.clf()
acc += thisAcc
diff /= len(preds)
acc /= len(preds)
print("\nDECISION TREE REGRESSION")
print("\naverage difference = ", diff)
print("average error = ", acc)
tree = LinearRegression().fit(x_train, y_train)
preds = tree.predict(x_test)
tmpDates = range(totDays)
diff = 0
acc = 0
for i in range(len(preds)):
real = np.zeros(totDays)
fake = np.zeros(totDays)
real[0:daysGiven] = x_test[i]
real[daysGiven:] = y_test[i]
fake[0:daysGiven] = x_test[i]
fake[daysGiven:] = preds[i]
realSum = np.sum(y_test[i])
fakeSum = np.sum(preds[i])
thisDiff = fakeSum - realSum
diff += thisDiff
if realSum != 0:
thisAcc = np.abs(thisDiff) / realSum
line1, = plt.plot(tmpDates, fake)
line1.set_label('preds')
line2, = plt.plot(tmpDates, real)
line2.set_label('actual')
plt.title('Linear Regression')
plt.xlabel('Days')
plt.ylabel('New Cases')
plt.legend()
plt.show()
plt.clf()
acc += thisAcc
diff /= len(preds)
acc /= len(preds)
print("\nLINEAR REGRESSION")
print("\naverage difference = ", diff)
print("average error = ", acc)
| true |
61c18765abf3ead4ca54d6c8256b00123e4c5316 | Python | blueones/LeetcodePractices | /simplifypath71.py | UTF-8 | 1,059 | 3.15625 | 3 | [] | no_license | class Solution:
def simplifyPath(self, path: str) -> str:
list_levels = path.split("/")
stack_levels = []
for item in list_levels:
if item =="..":
if stack_levels:
stack_levels.pop(-1)
elif item =="." or item=="":
continue
else:
stack_levels.append(item)
result_string = ""
while stack_levels:
result_string+="/"+stack_levels[0]
stack_levels.pop(0)
return result_string if result_string !="" else "/"
class Solution1:
def simplifyPath(self, path: str) -> str:
#solution from LC
list_levels = path.split("/")
stack_levels = []
for item in list_levels:
if item =="..":
if stack_levels:
stack_levels.pop(-1)
elif item =="." or item=="":
continue
else:
stack_levels.append(item)
return "/"+"/".join(stack_levels)
| true |
01cd03f8c92da9ce3e9adeceaf844f05e9aac9f4 | Python | CaoYiMing0/PythonStudy_2020621 | /day_05/Test13_for循环与序列解包.py | UTF-8 | 247 | 3.40625 | 3 | [] | no_license | # for循环的一大好处是可以在循环中使用序列解包
tups = {'卢本伟': '1001', '五五开': '1002', 'white': '1003'}
for key, value in tups.items():
print('%s:%s' % (key, value))
"""
卢本伟:1001
五五开:1002
white:1003
""" | true |
8761340057979a7f689c524c95f3cb233de72e58 | Python | HipHopCoderS/Python_Practice | /code_practice/maoPaoPaiXu.py | UTF-8 | 864 | 3.421875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
__author__ = 'HipHopCoder'
# 冒泡排序求最大值
def MaxMaoPao1(listTest):
lenList = len(listTest)
while lenList >0:
for i in range(lenList-1):
if listTest[i]>listTest[i+1]:
listTest[i] = listTest[i] + listTest[i+1]
listTest[i+1] = listTest[i] - listTest[i+1]
listTest[i] = listTest[i] - listTest[i+1]
lenList -= 1
print listTest
# 冒泡排序求最大值
# i 控制比较的数字
# j 控制比较的位置
# 外层for 控制每个数的循环
# 内层for 控制每次的循环
def MaxMaoPao2(listTest):
lenList = len(listTest)
for i in range(lenList-1):
for j in range(lenList-i):
if listTest[i] > listTest[i+1]:
value = listTest[i+1]
listTest[i+1] = listTest[i]
listTest[i] = value
print listTest
listTest = [11,33,22,44,76,4,77]
MaxMaoPao1(listTest)
MaxMaoPao2(listTest) | true |
81e5a9e47c09e8473ceacacae7a38231b8b4a375 | Python | avahoffman/practice_notebooks | /base_python_practice.py | UTF-8 | 30,894 | 4.6875 | 5 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 07:53:00 2018
@author: whitneyreiner
"""
##############################################################################
##############################################################################
##############################################################################
##################################List basics#################################
##############################################################################
##############################################################################
##############################################################################
# make a list
myList = [1,2,3,4]
# make the list 3 times as big by repeating it three times
A = [myList]*3
print(A)
# replace the second indexed item with 45
myList[2]=45
print(A)
# lists can have different types of data
myList = [1024, 3, True, 6.5]
# adding (appending) a boolean term to the list
myList.append(False)
print(myList)
# insert 4.5 two times into this list
# list.insert(how many times you want to insert it, what do youw ant to insert?)
myList.insert(2,4.5)
print(myList)
# The pop() method removes and returns the element at the given index
# (passed as an argument) from the list. If no index is given, it will return
# the last element
print(myList.pop())
print(myList)
print(myList.pop(1))
print(myList)
myList.pop(2)
print(myList)
# sort, ascending by default
myList.sort()
print(myList)
# reverse the list!
myList.reverse()
print(myList)
# How many times is X in my list?
print(myList.count(6.5))
# At what spot is this thing in my list?
print(myList.index(4.5))
myList.remove(6.5)
print(myList)
del myList[0]
print(myList)
# How to build a list
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist[0]) # prints 1
print(mylist[1]) # prints 2
print(mylist[2]) # prints 3
# prints out 1,2,3
for x in mylist:
print(x)
# =============================================================================
# ##############################################################################
# ##############################################################################
# ##############################################################################
# ###############################String formatting##############################
# ##############################################################################
# ##############################################################################
# ##############################################################################
# =============================================================================
# This prints out "Hello, John!"
name = "John"
print("Hello, %s!" % name)
# Two or more arg specifiers require a tuple (parentheses)
# This prints out "John is 23 years old."
name = "John"
age = 23
print("%s is %d years old." % (name, age))
# You can do this with non-strings too
# This prints out: A list: [1, 2, 3]
mylist = [1,2,3]
print("A list: %s" % mylist)
# =============================================================================
# # %s - String (or any object with a string representation, like numbers)
#
# # %d - Integers
#
# # %f - Floating point numbers
#
# # %.<number of digits>f - Floating point numbers with a fixed amount of digits
# # to the right of the dot.
#
# # %x/%X - Integers in hex representation (lowercase/uppercase)
# # You will need to write a format string which prints out the data using the
# =============================================================================
# following syntax: Hello John Doe. Your current balance is $53.44.
data = ("John", "Doe", 53.44)
format_string = "Hello %s %s. Your current balance is $%s."
print(format_string % data)
#
#
#
######WHY ISN'T the last %s %f?#######
#
#
#
# make a string, prints length of string incl. the spaces
astring = "Hello world!"
print("single quotes are ' '")
print(len(astring))
# print position of a letter or something within a string
astring = "Hello world!"
print(astring.index("o"))
# count number of l in string
astring = "Hello world!"
print(astring.count("l"))
# pring slice of string
astring = "Hello world!"
print(astring[3:7])
# =============================================================================
# # If you just have one number in the brackets, it will give you the single
# # character at that index. If you leave out the first number but keep the colon,
# # it will give you a slice from the start to the number you left in. If you
# # leave out the second number, if will give you a slice from the first number
# # to the end.
#
# # You can even put negative numbers inside the brackets.
# # They are an easy way of starting at the end of the string instead of the
# # beginning. This way, -3 means "3rd character from the end".
#
# =============================================================================
# This is extended slice syntax. The general form is [start:stop:step].
# This prints the characters of string from 3 to 7 skipping one character.
astring = "Hello world!"
print(astring[3:7:2])
# pring reverse of string
astring = "Hello world!"
print(astring[::-1])
# lower or all upper case
astring = "Hello world!"
print(astring.upper())
print(astring.lower())
# determine whether the string starts with something or ends with something.
astring = "Hello world!"
print(astring.startswith("Hello"))
print(astring.endswith("asdfasdfasdf"))
# splits the string into a bunch of strings grouped together in a list.
# This example splits at a space.
astring = "Hello world!"
afewwords = astring.split(" ")
afewwords
# Boolean operators
name = "John"
age = 23
if name == "John" and age == 23:
print("Your name is John, and you are also 23 years old.")
if name == "John" or name == "Rick":
print("Your name is either John or Rick.")
# The "in" operator could be used to check if a specified object exists within
# an iterable object container, such as a list:
# interactivepython.org
# using a dict (unordered)
capitals = {'Iowa':'DesMoines','Wisconsin':'Madison'}
print(capitals['Iowa'])
capitals['Utah']='SaltLakeCity'
print(capitals)
capitals['California']='Sacramento'
print(len(capitals))
for k in capitals:
print(capitals[k]," is the capital of ", k)
phoneext={'david':1410,'brad':1137}
phoneext
phoneext.keys()
list(phoneext.keys())
phoneext.values()
list(phoneext.values())
phoneext.items()
list(phoneext.items())
phoneext.get("kent")
phoneext.get("kent","NO ENTRY")
# =============================================================================
# # user input
# # Python’s input function takes a single parameter that is a string.
# # This string is often called the prompt because it contains some helpful text
# # prompting the user to enter something. For example, you might call input as
# # follows:
#
# =============================================================================
aName = input("Please enter your name ")
print("Your name in all capitals is",aName.upper(),
"and has length", len(aName))
# the value returned from the input function will be a string representing the
# exact characters that were entered after the prompt.
# If you want this string interpreted as another type, you must provide the
# type conversion explicitly.
sradius = input("Please enter the radius of the circle ")
radius = float(sradius)
diameter = 2 * radius
# string formatting
print("Hello")
print("Hello","World")
print("Hello","World", sep="***")
print("Hello","World", end="***")
# formatted strings
# not good because you would have to reassign is and years old every time
print(aName, "is", age, "years old.")
# now it will change depending on aName and age
print("%s is %d years old." % (aName, age))
# =============================================================================
# # % operator is a string operator called the format operator.
# # The number of values in the collection on the right side corresponds with the
# # number of % characters in the format string. Values are taken—in order, left
# # to right—from the collection and inserted into the format string.
#
# # The format string may contain one or more conversion specifications.
# # A conversion character tells the format operator what type of value is going
# # to be inserted into that position in the string. In the example above,
# # the %s specifies a string, while the %d specifies an integer.
#
# =============================================================================
#####################
# =============================================================================
#The right side of the format operator is a collection of values that will be
#inserted into the format string. The collection will be either a tuple or a
#dictionary. If the collection is a tuple, the values are inserted in order of
# position. That is, the first element in the tuple corresponds to the first
# format character in the format string. If the collection is a dictionary, the
# values are inserted according to their keys. In this case all format
# characters must use the (name) modifier to specify the name of the key.
# =============================================================================
# =============================================================================
# price = 24
# item = "banana"
# print("The %s costs %d cents"%(item,price))
# The banana costs 24 cents
# print("The %+10s costs %5.2f cents"%(item,price))
# The banana costs 24.00 cents
# print("The %+10s costs %10.2f cents"%(item,price))
# The banana costs 24.00 cents
# itemdict = {"item":"banana","cost":24}
# print("The %(item)s costs %(cost)7.1f cents"%itemdict)
# The banana costs 24.0 cents
# =============================================================================
# while statement repeats a body of code as long as a condition is true.
counter = 1
while counter <= 5:
print("Hello, world")
counter = counter + 1
# =============================================================================
# The for statement can be used to iterate over the members of a collection,
# so long as the collection is a sequence.
# =============================================================================
for item in [1,3,6,2,5]:
print(item)
#iterate over a range of values
for item in range(5):
print((item**2))
# Processing Each Character in a List of Strings
wordlist = ['cat','dog','rabbit']
letterlist = [ ]
for aword in wordlist:
for aletter in aword:
letterlist.append(aletter)
print(letterlist)
# selection constructs
score=[]
if score >= 90:
print('A')
else:
if score >=80:
print('B')
else:
if score >= 70:
print('C')
else:
if score >= 60:
print('D')
else:
print('F')
# alternative syntax for this type of nested selection uses the elif keyword
if score >= 90:
print('A')
elif score >=80:
print('B')
elif score >= 70:
print('C')
elif score >= 60:
print('D')
else:
print('F')
# =============================================================================
# Python also has a single way selection construct, the if statement.
# With this statement, if the condition is true, an action is performed.
# In the case where the condition is false, processing simply continues on to
# the next statement after the if. For example, the following fragment will
# first check to see if the value of a variable n is negative. If it is, then
# it is modified by the absolute value function. Regardless, the next action is
# to compute the square root.
#
# =============================================================================
if n<0:
n = abs(n)
print(math.sqrt(n))
# the answer is: ['c', 'a', 't', 'd', 'o', 'g', 'r', 'b', 'i']
# the answer is: ['c', 'a', 't', 'd', 'o', 'g', 'r', 'b', 'i']
wordlist = ['cat','dog','rabbit']
letterlist = [ ]
for aword in wordlist:
for aletter in aword:
if aletter in letterlist:
pass
else:
letterlist.append(aletter)
#before append make sure is not already in list
# or remove duplicates before print or
print(letterlist)
wordlist = ['cat','dog','rabbit']
letterlist = [ ]
for aword in wordlist:
for aletter in aword:
if aletter not in letterlist:
letterlist.append(aletter)
#before append make sure is not already in list
# or remove duplicates before print or
print(letterlist)
# list comprehension
#you could do this:
sqlist=[]
for x in range(1,11):
sqlist.append(x*x)
sqlist
#[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
# =============================================================================
# #But using a list comprehension, we can do this in one step:
# =============================================================================
sqlist=[x*x for x in range(1,11)]
sqlist
#[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
# =============================================================================
# The general syntax for a list comprehension also allows a selection criteria
# to be added so that only certain items get added.
# =============================================================================
sqlist=[x*x for x in range(1,11) if x%2 != 0]
sqlist
#[1, 9, 25, 49, 81]
[ch.upper() for ch in 'comprehension' if ch not in 'aeiou']
#['C', 'M', 'P', 'R', 'H', 'N', 'S', 'N']
# space separated integers
# if you're doing it with an array, easy way is print(*array)
wordlist=['cat','dog', 'rabbit']
#both methods will print each character
print([word for word in "" .join(wordlist)])
print([word[i] for word in wordlist for i in range(len(word))])
#takes out duplicates because you made it a set
print(set([word[i] for word in wordlist for i in range(len(word))]))
# THESE DO THE SAME THING
def sumOfN(n):
theSum = 0
for i in range(1,n+1):
theSum = theSum + i
return theSum
print(sumOfN(10))
def foo(tom):
fred = 0
for bill in range(1,tom+1):
barney = bill
fred = fred + barney
return fred
print(foo(10))
#hackerrank
def gradingStudents(grades):
n= int(input().strip())
for a0 in range(n):
x=int(input().strip()) #greater than failing grade = no rounding
if x>=38:
#if diff between grade and next multiple of 5 is less than 3 round up to the next multiple of 5. % is modulus, not divide- it returns the remainder
if x % 5 > 2:
#while the grade is not divisible by 5 add 1 until the grade is divisible by 5
while x % 5 !=0: x +=1
print(x)
print(*range(1, int(input())+1), sep='')
def fun(*args):
total = 0
for i in args:
total += i
return total
print(fun())
print(fun(1))
print(fun(1, 2, 3))
# * means "everything in this set"
For short operator '*' allows you to pass an array. Example:
name="steve"
password="qwerty"
captcha="h3g2"
user = [name, password, captcha]
def login(n,p,c):
print(n+p+c)
login(*user)
# =============================================================================
# In general, you pass in arguments like so:
#
# function(arg1,arg2,arg3)
# But, if you put them in a list such as:
#
# function([arg1,arg2,arg3])
# then it's like you're only passing in 1 argument (the list) and you'll get an error saying function was expecting 3 arguments and only got 1.
#
# On the other hand if you were to include * it would "unpack" the list (imagine the list is a suitcase and the elements are pieces of clothing you want to put on the bed to look at individually. you can't do that without unpacking the suitcase, right?) so that it would still work:
#
# function(*[arg1,arg2,arg3])
# becomes
#
# function(arg1,arg2,arg3)
# So *user changes from a list:
#
# user = [name, password, captcha]
# =============================================================================
# =============================================================================
# to arguments:
#
# *user = name, password, captcha
# So now log_in will work without errors because it was passed 3 arguments instead of 1!
#
# n = int(input()) *range (1, n+1) = range is from 1 to n, exclusive of n.
# so if your input is 3 then it prints 1 2. So input to the range function is 1 to n+1
#
# if you don't give sep='' in print statement then each value is seperated by a space
#
# * Explanation
# Python's Print Function defines the input as *objects, not as iterable like it does for Map.
#
# An example of an iterable is a list because we can keep stepping through the elements one at a time.
#
# print instead needs multiple arguments for each object you want to print.
# =============================================================================
# * is a way to "unpack" a list so that it becomes arguments instead of a list. f(*[1,2,3]) converts to f(1,2,3).
# #
# =============================================================================
# Example using print (from future in py2):
#
# print([1,2,3])
# #[1, 2, 3]
#
# print(*[1,2,3])
# #1 2 3
#
# print(1,2,3)
# #1 2 3
# The last two exhibit the desired result, you just need to set sep='' to get 123 (w/o spaces).
# =============================================================================
n = int(input()) *range (1, n+1)
# =============================================================================
# # = range is from 1 to n, exclusive of n.
# so if your input is 3 then it prints 1 2. So input to the range function is 1 to n+1
# =============================================================================
# =============================================================================
#
# if you don't give sep='' in print statement then each value is seperated by a space
# =============================================================================
# import calendar
#Calendar module
print(list(calendar.day_name)[calendar.weekday(y, m, d)].upper())
#Doomsday algorithm???
weekdays = ['SUNDAY','MONDAY','TUESDAY','WEDNESDAY','THURSDAY','FRIDAY','SATURDAY']
def dayofweek(month, day, yr):
"""dayOfWeek() takes a Gregorian date as (month, day, yr), and finds the day of the week for that date.
Source: http://hackerpublicradio.org/eps/hpr1240/doomsday.py"""
centuryAnchor = (16 - 2 * (yr // 100 % 4)) % 7 # Find the Doomsday Century Anchor
yr = yr % 100 # Calculate Doomsday number for year within century
yrs_12 = yr % 12
offset = yr // 12 + yrs_12 + yrs_12 // 4 + centuryAnchor
monpos = [0, 3, 0, 7, 4, 9, 6, 11, 8, 12, 10, 14, 12][month] # MonthPosLookUp
if month < 3 and (yr or centuryAnchor == 2) and not yr % 4: # January and February need leap year adjustment
monpos += 1 # 1: If on a 4N year and year is not a Century boundary (i.e., not 0) or Century Anchor is Tuesday
return weekdays[(35 + day - monpos + offset) % 7] # Make adjustments from Doomsday setting for the month
print(dayofweek(*map(int, input().split())))
This is the Python's slice syntax of string, list, or tuple. s[::-1] returns s as reversed order like this:
s = 'abcde'
print(s[::-1]) # => 'edcba'
word='caturday'
word[-1::-1]
def front_back(str):
front=str[:1]
back=str[:-1]
middle= str[1:-1]
return back+middle+front
# =============================================================================
# Printing the number of even numbers in an array
#
# Return the number of even ints in the given array.
# Note: the % "mod" operator computes the remainder, e.g. 5 % 2 is 1.
# def count_evens(nums):
# =============================================================================
count=0
for n in nums:
count -= n%2-1
return count
# =============================================================================
# Given an array length 1 or more of ints, return the difference between the
# largest and smallest values in the array. Note: the built-in min(v1, v2)
# and max(v1, v2) functions return the smaller or larger of two values.
# =============================================================================
def big_diff(nums):
for num in nums:
nummax= max(nums)
nummin=min(nums)
return nummax-nummin
# =============================================================================
# Return the "centered" average of an array of ints, which we'll say is the mean
# average of the values, except ignoring the largest and smallest values in the
# array. If there are multiple copies of the smallest value, ignore just one
# copy, and likewise for the largest value. Use int division to produce the final
# average. You may assume that the array is length 3 or more.
# =============================================================================
def centered_average(nums):
nums.sort()
return sum(nums[1:-1])/(len(nums)-2)
# Takes the array and sorts it (asc by default)
# Then returns the sum of all the ints in the array except for the first and
# the last one and divides it by the length -2 of the array (-2 because removed
# two of the values).
# =============================================================================
# Return the sum of the numbers in the array, returning 0 for an empty array.
# Except the number 13 is very unlucky, so it does not count and numbers that
# come immediately after a 13 also do not count.
# =============================================================================
nums = ([1, 2, 2, 1, 13, 1])
def sum13(nums):
while 13 in nums:
if nums.index(13) < len(nums)-1:
nums.pop(nums.index(13)+1)
nums.pop(nums.index(13))
return sum(nums)
print(sum13(nums))
# %% Return the sum of the numbers in the array, except ignore sections of numbers
# starting with a 6 and extending to the next 7 (every 6 will be followed by at
# least one 7). Return 0 for no numbers.
# =============================================================================
# =============================================================================
# Given an array of ints, return True if 6 appears as either the first or last
# element in the array. The array will be length 1 or more.
# =============================================================================
def first_last6(nums):
return nums[0] == 6 or nums[-1] == 6
# =============================================================================
# Given an array of ints, return True if the array is length 1 or more, and the
# first element and the last element are equal.
# =============================================================================
def same_first_last(nums):
return len(nums) >= 1 and nums[0] == nums[-1]
# =============================================================================
# Return an int array length 3 containing the first 3 digits of pi, {3, 1, 4}.
# =============================================================================
def make_pi():
return [3,1,4]
# =============================================================================
# Given 2 arrays of ints, a and b, return True if they have the same first
# element or they have the same last element. Both arrays will be length 1 or
# more.
# =============================================================================
def common_end(a, b):
return a[0]==b[0] or a[-1]==b[-1]
# =============================================================================
# Given an array of ints length 3, return the sum of all the elements.
# =============================================================================
def sum3(nums):
for num in nums:
return sum(nums)
#could also just
def sum3(nums):
return sum(nums)
# =============================================================================
# Given an array of ints length 3, return an array with the elements
# "rotated left" so {1, 2, 3} yields {2, 3, 1}.
# =============================================================================
nums=([1, 2, 3])
def rotate_left3(nums):
nums.append(nums[0]) #append the first element to the end
nums.pop(0) #it is still at the beginning so remove the first element
return nums #return the new array
rotate_left3(nums)
# =============================================================================
# Given an array of ints length 3, return a new array with the elements in
# reverse order, so {1, 2, 3} becomes {3, 2, 1}.
# =============================================================================
nums=([1,2,3])
def reverse3(nums):
return(nums[::-1]) #reverses order in a list
reverse3(nums)
# =============================================================================
# Given an array of ints length 3, figure out which is larger, the first or last
# element in the array, and set all the other elements to be that value.
# Return the changed array.
# =============================================================================
def max_end3(nums):
for num in nums:
if nums[0] >= nums[-1]: #>= in case the first and last are equal
nums[1]=nums[0]
nums[2]=nums[1]
elif nums[-1]>=nums[0]:
nums[0]=nums[2]
nums[1]=nums[2]
return nums
#Alt. solution:
def max_end3(nums):
big = max(nums[0], nums[2])
nums[0] = big
nums[1] = big
nums[2] = big
return nums
# =============================================================================
# Given an array of ints, return the sum of the first 2 elements in the array.
# If the array length is less than 2, just sum up the elements that exist,
# returning 0 if the array is length 0.
# =============================================================================
def sum2(nums):
if len(nums) >= 2:
return nums[0] + nums[1]
elif len(nums) ==1:
return nums[0]
else:
return 0
# %%
# Given 2 int arrays, a and b, each length 3, return a new array length 2
# containing their middle elements.
a=[(1,2,3])
b=([2,3,4])
def middle_way(a, b):
c= [a[1],b[1]] #c needs to be an array so must wrap it in [] c=a[1],b[1] will
#return (2,3) but you need c to return an array, not a tuple
return c
#%% Given an array of ints, return a new array length 2 containing the first
# and last elements from the original array. The original array will be length
# 1 or more.
def make_ends(nums):
if len(nums) >1:
return [nums[0],nums[-1]] #again, need it to be an array not a tuple so wrap in []
else: #if array only has one int
return [nums[0],nums[0]]
#%% Given an int array length 2, return True if it contains a 2 or a 3.
#just need true or false so can just ask it to return if contains 2 or 3
def has23(nums):
return 2 in nums or 3 in nums
# %% Given a string and a non-negative int n, return a larger string that is
# n copies of the original string.
def string_times(str, n):
return n*str
#Alt solution:
def string_times(str, n):
result = ""
for i in range(n): # range(n) is [0, 1, 2, .... n-1]
result = result + str # could use += here
return result
# %% Given a string and a non-negative int n, we'll say that the front of the
# string is the first 3 chars, or whatever is there if the string is less than
# length 3. Return n copies of the front;
def front_times(str, n):
return n*str[0:3]
# %% Given a string, return a new string made of every other char starting with
# the first, so "Hello" yields "Hlo".
def string_bits(str):
return str[::2] #slices str getting every letter from index 0 to end and
#takes every other letter (because of the 2). str[::-2] would reverse the order
# of the characters
# %% Given a non-empty string like "Code" return a string like "CCoCodCode".
def string_splosion(str):
result = ""
# On each iteration, add the substring of the chars 0..i
for i in range(len(str)):
result = result + str[:i+1]
return result
# %% Given a string, return the count of the number of times that a substring
# length 2 appears in the string and also as the last 2 chars of the string,
# so "hixxxhi" yields 1 (we won't count the end substring).
def last2(str):
# Screen out too-short string case.
if len(str) < 2:
return 0
# last 2 chars, can be written as str[-2:]
last2 = str[len(str)-2:]
count = 0
# Check each substring length 2 starting at i
for i in range(len(str)-2):
sub = str[i:i+2]
if sub == last2:
count = count + 1
return count
# %% Given an array of ints, return the number of 9's in the array.
def array_count9(nums):
count = 0 #initialize count
# Check each array for 9
for num in nums:
if num == 9:
count = count + 1 #add 1 to count for every 9 in array
return count
# %% Given an array of ints, return True if one of the first 4 elements in the
# array is a 9. The array length may be less than 4.
def array_front9(nums):
if len(nums) == 0:
return False #check len so return false if empty array
frontnums= nums[0:4] #check the front
for num in nums:
if 9 in frontnums: #if 9 is in first 4 slots
return True #return true
else:
return False #otherwise, return false
# alt. solution
def array_front9(nums):
# First figure the end for the loop
end = len(nums)
if end > 4:
end = 4
for i in range(end): # loop over index [0, 1, 2, 3]
if nums[i] == 9:
return True
return False
# %% Given an array of ints, return True if the sequence of numbers 1, 2, 3
# appears in the array somewhere.
def array123(nums):
seq = [1, 2, 3]
if str(seq)[1:-1] in str(nums): #change to a str to see if sequ IN array
return True
else:
return False
# alternate solution
def array123(nums):
# Note: iterate with length-2, so can use i+1 and i+2 in the loop
for i in range(len(nums)-2):
if nums[i]==1 and nums[i+1]==2 and nums[i+2]==3:
return True
return False
def last2(str):
return len([i for i in range(len(str) - 2) if str[i:i+2] == str[-2:]])
# %%
Given 2 strings, a and b, return the number of the positions where they contain
# the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the
# "xx", "aa", and "az" substrings appear in the same place in both strings.
def string_match(a, b):
# Figure which string is shorter.
shorter = min(len(a), len(b))
count = 0
# Loop i over every substring starting spot.
# Use length-1 here, so can use char str[i+1] in the loop
for i in range(shorter-1):
a_sub = a[i:i+2]
b_sub = b[i:i+2]
if a_sub == b_sub:
count = count + 1
return count
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
| true |
4763cfa57b705ec80b8cfc23e7f2e233c158a61b | Python | kialio/gsfcpyboot | /Day_01/05_Basemap/bcbm1.py | UTF-8 | 10,253 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# Purpose : Python Boot Camp - Basemap Teaching Program 1.
# Ensure that environment variable PYTHONUNBUFFERED=yes
# This allows STDOUT and STDERR to both be logged in chronological order
import sys # platform, args, run tools
import os # platform, args, run tools
import argparse # For parsing command line
import datetime # For date/time processing
import numpy as np
import h5py
import matplotlib as mpl
mpl.use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show, subplots
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm as bm_cm
import matplotlib.cm as mpl_cm
#########################################################################
# Command Line Parameters Class
#########################################################################
class Bcbm1CP():
def bcbm1_cp(self, bcbm1_cmd_line):
description = ("Python Boot Camp - Basemap Teaching Program 1")
parser = argparse.ArgumentParser(description=description)
help_text = ("Display processing messages to STDOUT " +
"(DEFAULT=NO)")
parser.add_argument("-v", "--verbose",
default=False,
help=help_text,
action="store_true",
dest="verbose")
help_text = ("Run program in test mode " +
"(DEFAULT=NO)")
parser.add_argument("-t", "--test_mode",
default=False,
help=help_text,
action="store_true",
dest="test_mode")
self.args = parser.parse_args(bcbm1_cmd_line)
if (self.args.verbose):
sys.stdout.write("BCBM1 : bcbm1_cmd_line = " + str(bcbm1_cmd_line) + "\n")
# Return
return(0)
#########################################################################
# Main Program
#########################################################################
class Bcbm1():
def bcbm1(self, bcbm1_cmd_line):
# Start time
self.start_time = datetime.datetime.today()
# Parse input parameters from cmd line
bcbm1_cp1 = Bcbm1CP()
bcbm1_cp1_ret = bcbm1_cp1.bcbm1_cp(bcbm1_cmd_line)
self.bcbm1_cmd_line = bcbm1_cmd_line
if (len(self.bcbm1_cmd_line) == 0):
self.bcbm1_cmd_line = " "
if (bcbm1_cp1_ret):
return(bcbm1_cp1_ret)
self.verbose = bcbm1_cp1.args.verbose
self.test_mode = bcbm1_cp1.args.test_mode
if (self.test_mode):
self.timestamp = "Test Mode Date/Time Stamp"
if (self.verbose):
sys.stdout.write("BCBM1 : Running in test mode\n")
sys.stdout.write("BCBM1 : sys.version = " + str(sys.version) + "\n")
else:
self.timestamp = datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if (self.verbose):
sys.stdout.write("BCBM1 : Program started : " + str(self.start_time) + "\n")
sys.stdout.write("BCBM1 : sys.version = " + str(sys.version) + "\n")
if (self.verbose):
sys.stdout.write("BCBM1 : sys.version = " + str(sys.version) + "\n")
sys.stdout.write("BCBM1 : self.verbose = " + str(self.verbose) + "\n")
sys.stdout.write("BCBM1 : self.test_mode = " + str(self.test_mode) + "\n")
# Call functions
bcbm1_f11_ret = self.display_map1()
if (bcbm1_f11_ret):
return(bcbm1_f11_ret)
#bcbm1_f21_ret = self.display_map2()
#if (bcbm1_f21_ret):
# return(bcbm1_f21_ret)
#bcbm1_f31_ret = self.display_map3()
#if (bcbm1_f31_ret):
# return(bcbm1_f31_ret)
# End program
self.end_time = datetime.datetime.today()
self.run_time = self.end_time - self.start_time
if (self.verbose):
if (self.test_mode):
pass
else:
sys.stdout.write("BCBM1 : Program ended : " + str(self.end_time) + "\n")
sys.stdout.write("BCBM1 : Run time : " + str(self.run_time) + "\n")
if (self.verbose):
sys.stdout.write("BCBM1 : Program completed normally\n")
return(0)
# Define functions
#------------------------------------------------------------------------------
def display_map1(self):
if (self.verbose):
sys.stdout.write("BCBM1 : display_map1 ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - First Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "Mollweide Projection")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='moll',
lon_0=0,
#lat_0=0,
resolution='c')
#self.map.drawmapboundary(fill_color='aqua')
#self.map.fillcontinents(color='coral',lake_color='aqua')
self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
# Write the output to a graphic file
self.current_figure.savefig("bcbm1_plot1")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
def display_map2(self):
if (self.verbose):
sys.stdout.write("BCBM1 : display_map2 ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - Second Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "Robinson Projection - Blue Marble")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='robin',
lon_0=0,
lat_0=0,
resolution='c')
#self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
self.map.bluemarble() # Known bug here - may appear upside down
# Write the output to a graphic file
self.current_figure.savefig("bcbm1_plot2")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
def display_map3(self):
if (self.verbose):
sys.stdout.write("BCBM1 : display_map3 ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - Third Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "Near-Sided Perspective Projection - Different Colours")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='nsper',
lon_0=0,
lat_0=0,
resolution='c')
#self.map.drawmapboundary(fill_color='#7777ff')
#self.map.fillcontinents(color='#ddaa66',lake_color='#7777ff')
self.map.drawlsmask(land_color = "#ddaa66",
ocean_color="#7777ff")
#self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
# Display day and night shading
#self.date = datetime.datetime.utcnow()
#self.map_nightshade = self.map.nightshade(self.date)
# Write the output to a graphic file
self.current_figure.savefig("bcbm1_plot3")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
####################################################
def main(argv=None): # When run as a script
if argv is None:
bcbm1_cmd_line = sys.argv[1:]
bcbm1 = Bcbm1()
bcbm1_ret = bcbm1.bcbm1(bcbm1_cmd_line)
if __name__ == '__main__':
sys.exit(main())
| true |
c9d4e282c9627caaa3fe8327d84bde8f4de01538 | Python | JStephenD/TicTacToeConsole | /rules.py | UTF-8 | 846 | 3.046875 | 3 | [
"MIT"
] | permissive | from typing import List
def check_winner_row(grid: List[List[str]]) -> str:
for row in grid:
if row[0] == " ":
continue
if len(set(row)) == 1:
return row[0]
def check_winner_column(grid: List[List[str]]) -> str:
gridSize = len(grid)
for y in range(gridSize):
if grid[0][y] == " ":
continue
if len(set(grid[i][y] for i in range(gridSize))) == 1:
return grid[0][y]
def check_winner_diagonal(grid: List[List[str]]) -> str:
gridSize = len(grid)
if grid[int(gridSize / 2)][int(gridSize / 2)] == " ":
return
if len(set(grid[i][i] for i in range(gridSize))) == 1:
return grid[0][0]
if len(set(grid[i][gridSize - i - 1] for i in range(gridSize))) == 1:
return grid[0][gridSize - 1]
# can be added:
# custom rules
| true |
7f25b538de6e8bf82b3e527dba13328034d82c8c | Python | laura-liber/fsdi_111 | /database/__init__.py | UTF-8 | 408 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""Database models"""
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String, nullable=False)
last_name = db.Column(db.String, nullable=False)
hobbies = db.Column(db.String, nullable=True)
def __repr__(self):
return "<User %r %r>" % (self.first_name, self.last_name)
| true |
58ffd1716240fbf2bfa3fcff64fd656535a43d50 | Python | amydrummond/container_test | /initial_test.py | UTF-8 | 10,944 | 2.625 | 3 | [] | no_license | print('Script started.')
import requests, random, time, pandas as pd, civis, datetime, os, sys
import lxml.html as LH
def text(elt):
return elt.text_content().replace(u'\xa0', u' ')
def dict_merge(parent_dic, added_dic):
for obj in list(added_dic.keys()):
parent_dic[obj]=added_dic.get(obj)
return parent_dic
def changetoint(dataframe, columnlist):
dataframe[columnlist] = dataframe[columnlist].fillna(0.0).astype(int)
def available_races(tree):
election_types = ['General', 'Primary', 'Special_General', 'Special_Primary', 'Special']
these_elections = []
for etype in election_types:
try:
tree.get_element_by_id(etype)
these_elections.append(etype)
except:
pass
return these_elections
def available_offices(tree):
offices = ['US-Senate', 'US-House', 'Statewide', 'State-Senate', 'State-House']
these_offices = []
for office in offices:
try:
tree.get_element_by_id(office)
these_offices.append(office)
except:
pass
return these_offices
def get_candidate(candidate_class_element):
candidate_data = {}
candidate = candidate_class_element
name = ''
endorsed = ''
cand_party = ''
current_status = ''
grade = ''
party = ''
try:
name = candidate.find_class('candidate-name')[0].text_content().strip()
except:
name = ''
if name[0]=='*':
name = name[1:]
try:
grade = candidate.find_class('candidate-grade')[0].text_content().strip()
except:
grade = ''
try:
if candidate.get('class') == 'print-candidate candidate-endorsed-true':
endorsed = 'Y'
else:
endorsed = 'N'
except:
endorsed = ''
try:
if len(candidate.find_class('candidate-endorsed')[0].getchildren()) > 0:
endorsed = 'Y'
else:
endorsed = 'N'
except:
pass
try:
cand_party = candidate.find_class('candidate-incumbent')[0].text_content().strip()
except:
cand_party = ''
try:
current_status = cand_party[:cand_party.find('(')-1].strip()
except:
current_status = ''
try:
party = cand_party[-2]
except:
party = ''
candidate_data['name']=name
candidate_data['cand_party'] = cand_party
candidate_data['party'] = party
candidate_data['current_status'] = current_status
candidate_data['endorsed']=endorsed
candidate_data['grade']=grade
return candidate_data
def civis_upload_status(civis_upload, data_name='data'):
while civis_upload._civis_state in ['queued', 'running']:
print("waiting...")
time.sleep(10)
print(civis_upload._civis_state)
if civis_upload._civis_state == 'failed':
print("Import to Civis failed.")
print(civis_upload.result(10))
print("Ending without completing ...")
sys.exit(1)
else:
print("New {} has been uploaded to Civis Platform.".format(data_name))
def nameparse(list_of_tuples):
parsed_name = {'PrefixMarital' : '', 'PrefixOther' : '', 'GivenName' : '',
'FirstInitial' : '', 'MiddleName' : '', 'MiddleInitial' : '',
'Surname' : '', 'LastInitial' : '', 'SuffixGenerational' : '',
'SuffixOther' : '', 'Nickname' : '', 'Other' : ''}
for pair in list_of_tuples:
existing = parsed_name.get(pair[1])
if existing == None:
parsed_name['Other']= str(parsed_name.get('Other') + ' '
+ str(pair[0])).strip()
else:
parsed_name[pair[1]]= str(existing + ' ' + str(pair[0])).strip()
first_name = ''
middle_name = ''
last_name = ''
if parsed_name.get('GivenName')=='':
first_name = parsed_name.get('FirstInitial')
else:
first_name = parsed_name.get('GivenName')
if parsed_name.get('MiddleName')=='':
middle_name = parsed_name.get('MiddleInitial')
else:
middle_name = parsed_name.get('MiddleName')
if parsed_name.get('Surname')=='':
last_name = parsed_name.get('LastInitial')
else:
last_name = parsed_name.get('Surname')
if first_name == '' and last_name == '' and parsed_name.get('Other') != '':
first_name = parsed_name['Other'][:parsed_name['Other'].find(' ')].strip()
last_name = parsed_name['Other'][parsed_name['Other'].rfind(' '):].strip()
final_name = {'prefix' : (parsed_name.get('PrefixMarital') + ' ' + parsed_name.get('PrefixOther')).strip(),
'first_name' : first_name, 'middle_name' : middle_name, 'last_name' :last_name,
'suffix' : (parsed_name.get('SuffixGenerational') + ' ' + parsed_name.get('SuffixOther')).strip(),
'nickname' : parsed_name.get('Nickname'), 'other' : parsed_name.get('Other')}
return(final_name)
def cdf(col_list):
df = pd.DataFrame(columns=col_list, index = ())
return df
nra_grade_columns = ['state', 'year', 'race', 'date', 'level', 'contest', 'district', 'name', 'cand_party',
'party', 'current_status', 'endorsed', 'grade']
nra_grades = cdf(nra_grade_columns)
print('Created NRA df.')
run_log_columns = ['runtime', 'records', 'next_run']
run_log = cdf(run_log_columns)
print('Created run log columns.')
state_time_col = ['runtime', 'state']
client = civis.APIClient()
db_id = client.get_database_id('Everytown for Gun Safety')
abb_sname = {}
sname_abb = {}
def now():
'''
Returns a timestamp as a string.
'''
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return timestamp
def pad(num):
strnum = str(num)
if len(strnum) == 1:
strnum = '0' + strnum
return strnum
## First, determine next runtime
runtime = now()
nextday = random.randrange(1,7)
nexthour = random.randrange(1,24)
nextmin = random.randrange(1,60)
min = 0
if nextmin + int(now()[14:16])>=60:
nexthour+=1
min = nextmin + int(now()[14:16]) - 60
else:
min = nextmin + int(now()[14:16])
min = pad(min)
hour = 0
if nexthour + int(now()[11:13])>=24:
nextday+=1
hour = nexthour + int(now()[11:13]) - 23
else:
hour = nexthour + int(now()[11:13])
hour = pad(hour)
time_of_run = hour +':' + min + ':00'
day = 0
month = int(now()[5:7])
ryear = int(now()[:4])
if nextday + int(now()[8:10])>=28:
month+=1
day = nextday + int(now()[8:10]) - 27
else:
day = nextday + int(now()[8:10])
if month > 12:
month = 1
ryear+=1
month = pad(month)
day = pad(day)
date_of_run = (str(ryear) + '-' + month + '-' + day)
nextrun = date_of_run + ' ' + time_of_run
print('Determined next run time: {}.'.format(nextrun))
print('Getting states.')
states = civis.io.read_civis(table='legiscan.ls_state',
database='Everytown for Gun Safety', use_pandas=True)
state_record_dic = states.to_dict('records')
print('Retrieving state records.')
state_ids = {}
for record in state_record_dic:
state_ids[record.get('state_name')]=record.get('state_abbr')
for state in list(state_ids.keys()):
print('Now working on {}.'.format(state))
state_time = cdf(state_time_col)
state_time = state_time.append({'runtime' : now(), 'state' : state}, ignore_index=True)
import_update = civis.io.dataframe_to_civis(df=state_time, database = db_id,
table='amydrummond.state_update_log', max_errors = 2,
existing_table_rows = 'append', polling_interval=10)
civis_upload_status(import_update, 'state log')
url = 'https://www.nrapvf.org/grades/archive/' + state.replace(' ', '%20')
try:
r = requests.get(url)
root = LH.fromstring(r.content)
html_doc = LH.document_fromstring(r.content)
races = available_races(html_doc)
for race in races:
row = {}
row['state']=state_ids.get(state)
row['year']=now()[:4]
race_section = html_doc.get_element_by_id(race)
election_group = race_section.find_class('election-group')[0]
election_date = election_group.find_class('election-date')[0].text_content().strip()
offices_up = available_offices(election_group)
row['race']=race
row['date']=election_date
for level in offices_up:
row['level']=level
lev = election_group.get_element_by_id(level)
specific_elections = lev.getchildren()
for contest in specific_elections:
row['contest']=contest[0].text_content().strip()
district = contest.find_class('election-location')[0].text_content().strip()
row['district']=district
candidates = contest.find_class('print-candidate')
for candidate in candidates:
row = dict_merge(row,get_candidate(candidate))
nra_grades = nra_grades.append({'state' : row.get('state'), 'year' : row.get('year'),
'race' : row.get('race'), 'date' : row.get('date'), 'level' : row.get('level'),
'contest' : row.get('contest'), 'district' : row.get('district'), 'name' : row.get('name'),
'cand_party' : row.get('cand_party'), 'party' : row.get('party'), 'current_status' : row.get('current_status'),
'endorsed' : row.get('endorsed'), 'grade' : row.get('grade')}, ignore_index=True)
print('Data loaded. Naptime...')
time.sleep(random.randrange(0,100))
except:
print("Unable to get records for {} from {}.".format(state, now()[:4]))
time.sleep(random.randrange(0,60))
changetoint(nra_grades, ['year'])
run_log = run_log.append({'runtime' : runtime, 'records' : len(nra_grades), 'next_run' : nextrun}, ignore_index=True)
print("Uploading to Civis.")
print('Now uploading session changes. There are {} records.'.format(len(nra_grades)))
import_summary = civis.io.dataframe_to_civis(df=nra_grades, database = db_id,
table='amydrummond.nra_grades_t', max_errors = 2,
existing_table_rows = 'truncate', polling_interval=10)
civis_upload_status(import_summary, 'nra grades')
import_log = civis.io.dataframe_to_civis(df=run_log, database = db_id,
table='state_leg.nra_log', max_errors = 2,
existing_table_rows = 'append', polling_interval=10)
civis_upload_status(import_summary, 'nra log')
print('All records have been uploaded.')
| true |
680f832e7604cffac62289be7e728329af7a4720 | Python | bowenwen/personal_finance | /market_data.py | UTF-8 | 4,303 | 2.78125 | 3 | [
"MIT"
] | permissive | import os
import json
import pandas as pd
import yfinance as yf
class MarketData:
"""A class to retrieve market data using yfinance"""
def getData(self):
self.__data_downloader()
self.__data_aggregator()
def __init__(self, config_file="config.json"):
self.config = {}
# read self.config
with open(config_file, 'r') as f:
self.config = json.load(f)
self.directory = os.path.abspath(
os.getcwd() + self.config['market_data_folder'])
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def __data_downloader(self):
# get stock price history
for ticker in self.config['tickers']:
try:
print(f"downloading history for {ticker}...")
price_history = yf.download(
tickers=ticker,
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max (optional, default is '1mo')
period=self.config['period'],
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo (optional, default is '1d')
interval=self.config['interval']).round(2).reset_index()
price_history['ticker_tmp'] = ticker
price_history.insert(
loc=0,
column='Ticker',
value=price_history['ticker_tmp'])
price_history.drop(columns=['ticker_tmp'], inplace=True)
startDate = str(price_history['Date'][0].date())
price_history.to_csv(
os.path.join(
self.directory,
'{}_{}_{}_{}.csv'.format('history',
ticker,
startDate,
self.config['period'])),
index=False)
except:
print(f"ticker {ticker} history download failed, skipping...")
# get stock dividends
for ticker in self.config['tickers']:
try:
print(f"downloading dividend for {ticker}...")
ticker_obj = yf.Ticker(ticker)
div_history = ticker_obj.dividends
div_history = pd.DataFrame(div_history).round(2).reset_index()
div_history['ticker_tmp'] = ticker
div_history.insert(
loc=0,
column='Ticker',
value=div_history['ticker_tmp'])
div_history.drop(columns=['ticker_tmp'], inplace=True)
startDate = str(div_history['Date'][0].date())
div_history.to_csv(
os.path.join(
self.directory,
'{}_{}_{}_{}.csv'.format('dividend',
ticker,
startDate,
self.config['period'])),
index=False)
except:
print(f"ticker {ticker} dividend download failed, skipping...")
def __data_aggregator(self):
pattern_list = ['history', 'dividend']
# aggregate files
for pattern in pattern_list:
dir_dict = {}
i = 0
for root, dirs, files in os.walk(os.getcwd()):
for file in files:
if file.startswith(pattern) and file.endswith(".csv"):
label = file
dir_dict[label] = os.path.join(root, file)
i = i+1
master_df = pd.DataFrame()
for filename in dir_dict:
# load raw data
master_df = pd.concat(
[pd.read_csv(dir_dict[filename]), master_df],
ignore_index=True)
master_df.drop_duplicates(
subset=None, keep='first', inplace=True)
master_df.to_csv(
os.path.join(
self.directory,
self.config['market_data_export'][pattern]),
index=False)
del master_df, dir_dict
| true |
a516cbe2ef8739302c7989b1c0a64f5525bb7546 | Python | HonniLin/leetcode | /history/32.py | UTF-8 | 1,913 | 3.90625 | 4 | [] | no_license | '''
@Description: 32. Longest Valid Parentheses
@Author: linhong
@Date: 2019-10-15 20:18:22
@LastEditTime: 2019-10-15 22:18:31
@desc: 给定一个只包含 '(' 和 ')' 的字符串,找出最长的包含有效括号的子串的长度。
1/栈记录字符串的下标,进行长度的计算。 list[-1]取栈顶
2/也有dp的做法:
def longestValidParentheses(self, s):
dp, res = [0] * len(s), 0 # 初始化dp、定义最优结果变量
for i in range(len(s)):
if s[i] == ')': # 只考虑以')'结尾的子串
if i > 0 and s[i - 1] == '(': # 第一中情况,直接加 2
dp[i] = dp[i - 2] + 2
if i > 0 and s[i - 1] == ')': # 第二种情况,
if i - dp[i - 1] > 0 and s[i - dp[i - 1] - 1] == '(':
if i - dp[i - 1] - 1 > 0: # 当前合法子串,前面还有子串,的情况
dp[i] = dp[i - 1] + dp[i - dp[i - 1] - 2] + 2
else: # 当前合法子串,前面--没有--子串,的情况
dp[i] = dp[i - 1] + 2
res = max(res, dp[i]) # 更新最长的合法子串
return res
'''
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
medium = [-1]
ans = 0
for i in range(len(s)):
if s[i] == '(':
medium.append(i)
else:
medium.pop()
if not medium:
medium.append(i)
else:
#print i, medium[-1]
ans = max(ans, i - medium[-1])
return ans
if __name__ == "__main__":
sol = Solution()
s = "()(()"
#s = ")()())"
# s = "()(()"
# s = "()(()"
ans = sol.longestValidParentheses(s)
print ans
| true |
cda59a5c677b22bfd59f53cf5a8218588adbcf77 | Python | nltk/nltk_contrib | /nltk_contrib/scripttranscriber/tokens.py | UTF-8 | 8,602 | 2.515625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Definition for tokens, languages, documents and doclists, to store
the results of extraction, and express in XML.
For the XML format see dochandler.py
"""
__author__ = """
rws@uiuc.edu (Richard Sproat)
hollingk@cslu.ogi.edu (Kristy Hollingshead)
"""
import xml.sax.saxutils
from math import sqrt
from __init__ import BASE_
import documents
XML_HEADER_ = '<?xml version="1.0" encoding="UTF-8"?>'
LANG_INDENT_ = ' ' * 4
TOKEN_INDENT_ = ' ' * 6
def SumProd(x, y):
return sum(map(lambda x, y: x * y, x, y))
class Token:
"""A token is a term extracted from text, with attributes
count, pronunciation, morphological decomposition
"""
def __init__(self, string):
try: self.string_ = string.encode('utf-8')
except UnicodeDecodeError: self.string_ = string
self.count_ = 1
self.morphs_ = []
self.pronunciations_ = []
self.frequencies_ = []
self.langid_ = ''
def __eq__(self, other):
skey = self.EncodeForHash()
okey = other.EncodeForHash()
return skey == okey
def __repr__(self):
return '#<%s %d %s %s %s>' % (self.string_,
self.count_,
self.morphs_,
self.pronunciations_,
self.langid_)
def XmlEncode(self):
xml_string_ = '<token count="%d" morphs="%s" prons="%s">%s</token>'
morphs = ' '.join(self.morphs_)
morphs = xml.sax.saxutils.escape(morphs)
prons = ' ; '.join(self.pronunciations_)
prons = xml.sax.saxutils.escape(prons)
string_ = xml.sax.saxutils.escape(self.string_)
xml_result = xml_string_ % (self.count_, morphs, prons, string_)
return TOKEN_INDENT_ + xml_result
def EncodeForHash(self):
return '%s<%s><%s><%s>' % (self.String(),
' '.join(self.Morphs()),
' '.join(self.Pronunciations()),
self.LangId())
def String(self):
return self.string_
def SetCount(self, count):
self.count_ = count
def IncrementCount(self, increment = 1):
self.count_ += increment
def Count(self):
return self.count_
def AddPronunciation(self, pron):
if pron not in self.pronunciations_:
try: self.pronunciations_.append(pron.encode('utf-8'))
except UnicodeDecodeError: self.pronunciations_.append(pron)
def Pronunciations(self):
return self.pronunciations_
def SetMorphs(self, morphs):
self.morphs_ = []
for m in morphs:
try: self.morphs_.append(m.encode('utf-8'))
except UnicodeDecodeError: self.morphs_.append(m)
def Morphs(self):
return self.morphs_
def SetLangId(self, lang):
self.langid_ = lang
def LangId(self):
return self.langid_
class TokenFreqStats:
"""Holder for token frequency-statistics such as
relative frequency-counts and variance.
"""
def __init__(self, tok):
self.token_ = tok
self.frequencies_ = []
self.freqsum_ = 0
self.freqsumsq_ = 0
self.variance_ = 0
def __repr__(self):
return '#<%s %s %.6f %.6f %.6f>' % (self.token_,
self.frequencies_,
self.freqsum_,
self.freqsumsq_,
self.variance_)
def Token(self):
return self.token_
def Frequencies(self):
return self.frequencies_
def AddFrequency(self, f):
self.frequencies_.append(f)
def SetFrequencies(self, freq):
self.frequencies_ = []
for f in freq:
self.frequencies_.append(f)
def NormFrequencies(self):
self.frequencies_ = [float(f) for f in self.frequencies_]
sumfreqs = float(sum(self.frequencies_))
if sumfreqs != 0.0:
self.frequencies_ = [f/sumfreqs for f in self.frequencies_]
def CalcFreqStats(self):
n = len(self.frequencies_)
self.freqsum_ = float(sum(self.frequencies_))
self.freqsumsq_ = SumProd(self.frequencies_, self.frequencies_)
self.variance_ = self.freqsumsq_/n - (self.freqsum_**2)/(n**2)
def FreqSum(self):
return self.freqsum_
def FreqVariance(self):
return self.variance_
class DocTokenStats:
"""Holder for Doclist-specific token statistics, such as frequency
counts. Also allows for calculation of pairwise comparison metrics
such as Pearson's correlation.
"""
def __init__(self, doclist=None):
if doclist is None:
self.doclist_ = documents.Doclist()
else: self.doclist_ = doclist
self.n_ = len(self.doclist_.Docs())
self.tokstats_ = {}
def InitTokenStats(self, tok):
tstats = TokenFreqStats(tok)
tfreq = []
for doc in self.doclist_.Docs():
c = 0
for lang in doc.Langs():
if tok.LangId() != lang.Id(): continue
tmptok = lang.MatchToken(tok)
if tmptok is not None:
c += tmptok.Count()
tfreq.append(c)
tstats.SetFrequencies(tfreq)
tstats.NormFrequencies()
tstats.CalcFreqStats()
self.tokstats_[tok.EncodeForHash()] = tstats
return tstats
def AddTokenStats(self, tstats):
tokhash = tstats.Token().EncodeForHash()
if tokhash not in self.tokstats_:
self.tokstats_[tokhash] = tstats
def GetTokenStats(self, tok):
try: return self.tokstats_[tok.EncodeForHash()]
except KeyError: return self.InitTokenStats(tok)
def TokenStats(self):
return self.tokstats_.values()
def SetN(self, n):
self.n_ = n
def GetN(self):
return self.n_
def PearsonsCorrelation(self, token1, token2):
stats1 = self.GetTokenStats(token1)
stats2 = self.GetTokenStats(token2)
freq1 = stats1.Frequencies()
freq2 = stats2.Frequencies()
sumxy = sum(map(lambda x, y: x * y, freq1, freq2))
covxy = sumxy/float(self.n_) - \
(stats1.FreqSum()*stats2.FreqSum())/float(self.n_**2)
try:
rho = covxy/sqrt(stats1.FreqVariance()*stats2.FreqVariance())
except ZeroDivisionError:
rho = 0.0
#print x.String(),y.String(),sumx2,sumy2,varx,vary,sumxy,covxy,rho
return rho
class Lang:
"""Holder for tokens in a language.
"""
def __init__(self):
self.id_ = ''
self.tokens_ = []
def XmlEncode(self):
if len(self.tokens_) == 0: return ''
xml_string_ = '<lang id="%s">\n%s\n%s</lang>'
xml_tokens = []
for token_ in self.Tokens():
xml_tokens.append(token_.XmlEncode())
xml_result = xml_string_ % (self.id_, '\n'.join(xml_tokens),
LANG_INDENT_)
return LANG_INDENT_ + xml_result
def Id(self):
return self.id_
def SetId(self, id):
self.id_ = id.encode('utf-8')
def Tokens(self):
return self.tokens_
def SetTokens(self, tokens):
self.tokens_ = []
for t in tokens:
self.AddToken(t)
def AddToken(self, token, merge=False):
"""If an identical token already exists in dictionary,
will merge tokens and cumulate their counts. Checks to
see that morphology and pronunciations are identical,
otherwise the tokens will not be merged.
"""
token.SetLangId(self.id_)
if not merge:
self.tokens_.append(token)
else:
exists = self.MatchToken(token)
if exists is None:
self.tokens_.append(token)
else:
exists.IncrementCount(token.Count())
def MatchToken(self, token):
try:
i = self.tokens_.index(token)
return self.tokens_[i]
except ValueError:
return None
def CompactTokens(self):
"""Merge identical tokens and cumulate their counts. Checks to see
that morphology and pronunciations are identical, otherwise the
tokens will not be merged.
"""
map = {}
for token_ in self.tokens_:
hash_string = token_.EncodeForHash()
try: map[hash_string].append(token_)
except KeyError: map[hash_string] = [token_]
ntokens = []
keys = map.keys()
keys.sort()
for k in keys:
token_ = map[k][0]
for otoken in map[k][1:]:
token_.IncrementCount(otoken.Count())
ntokens.append(token_)
self.tokens_ = ntokens
| true |
7c142ada5557ec3875f7c070b9c25579c1775ea4 | Python | Swati213/pythonbasic | /4-6/class&object/oop/simple_class.py | UTF-8 | 248 | 3.140625 | 3 | [] | no_license | class personal :
def __init__(self, name, age) :
self.name = name
self.age = age
def employee (self):
print "Name {0} Age {1}".format (self.name, self.age)
p = personal("aman", "20")
| true |
b15d08f1c82f3987f028b0013b513866e354b45f | Python | anlzou/sharing-bikes-emotional-analysis | /CodeAndDate/Py_one.py | UTF-8 | 7,423 | 3.015625 | 3 | [] | no_license | import xlrd
from xlutils.copy import copy
def readExcel(path,File,col): #从excel中提取评论数据保存到txt;只支持读取'.xlsx'后缀文件
fname = path + File
filename = xlrd.open_workbook(fname)
sheet = filename.sheets()[0]
nrows = sheet.nrows-1
File = File.replace(".xlsx", "数据提取.txt")
path_txt = path + File
file = open(path_txt, "w", encoding="utf-8")
file.write('评论数据(行|人):' + str(nrows) + '\n')
for i in range(1,nrows):
cell_value = sheet.cell_value(i, col)
file.write(str(cell_value)+'\n')
print(File, "ok")
file.close()
readExcel('./data/','小黄车.xlsx',2)
readExcel('./data/','小蓝车.xlsx',2)
readExcel('./data/',"共享单车 - 副本.xlsx",1)
import jieba
import numpy as np
#打开词典文件,返回列表
def open_dict(Dict = 'name', path=r'./data/字典/'):
path = path + '%s.txt' % Dict
dictionary = open(path, 'r',encoding="utf-8")
dict = []
for word in dictionary:
word = word.strip('\n')
dict.append(word)
return dict
def judgeodd(num):
if (num % 2) == 0:
return 'even'
else:
return 'odd'
#path路径为相对路径。
deny_word = open_dict(Dict = '否定词', path= r'./data/字典/')
posdict = open_dict(Dict = 'positive', path= r'./data/字典/')
negdict = open_dict(Dict = 'negative', path= r'./data/字典/')
degree_word = open_dict(Dict = '程度级别词语', path= r'./data/字典/')
mostdict = degree_word[degree_word.index('extreme')+1 : degree_word.index('very')]#权重4,即在情感词前乘以4
verydict = degree_word[degree_word.index('very')+1 : degree_word.index('more')]#权重3
moredict = degree_word[degree_word.index('more')+1 : degree_word.index('ish')]#权重2
ishdict = degree_word[degree_word.index('ish')+1 : degree_word.index('last')]#权重0.5
def sentiment_score_list(dataset):
seg_sentence = dataset.split('。')
count1 = []
count2 = []
for sen in seg_sentence: #循环遍历每一个评论
segtmp = jieba.lcut(sen, cut_all=False) #把句子进行分词,以列表的形式返回
i = 0 #记录扫描到的词的位置
a = 0 #记录情感词的位置
poscount = 0 #积极词的第一次分值
poscount2 = 0 #积极词反转后的分值
poscount3 = 0 #积极词的最后分值(包括叹号的分值)
negcount = 0
negcount2 = 0
negcount3 = 0
for word in segtmp:
if word in posdict: # 判断词语是否是情感词
poscount += 1
c = 0
for w in segtmp[a:i]: # 扫描情感词前的程度词
if w in mostdict:
poscount *= 4.0
elif w in verydict:
poscount *= 3.0
elif w in moredict:
poscount *= 2.0
elif w in ishdict:
poscount *= 0.5
elif w in deny_word:
c += 1
if judgeodd(c) == 'odd': # 扫描情感词前的否定词数
poscount *= -1.0
poscount2 += poscount
poscount = 0
poscount3 = poscount + poscount2 + poscount3
poscount2 = 0
else:
poscount3 = poscount + poscount2 + poscount3
poscount = 0
a = i + 1 # 情感词的位置变化
elif word in negdict: # 消极情感的分析,与上面一致
negcount += 1
d = 0
for w in segtmp[a:i]:
if w in mostdict:
negcount *= 4.0
elif w in verydict:
negcount *= 3.0
elif w in moredict:
negcount *= 2.0
elif w in ishdict:
negcount *= 0.5
elif w in degree_word:
d += 1
if judgeodd(d) == 'odd':
negcount *= -1.0
negcount2 += negcount
negcount = 0
negcount3 = negcount + negcount2 + negcount3
negcount2 = 0
else:
negcount3 = negcount + negcount2 + negcount3
negcount = 0
a = i + 1
elif word == '!' or word == '!': ##判断句子是否有感叹号
for w2 in segtmp[::-1]: # 扫描感叹号前的情感词,发现后权值+2,然后退出循环
if w2 in posdict or negdict:
poscount3 += 2
negcount3 += 2
break
i += 1 # 扫描词位置前移
# 以下是防止出现负数的情况
pos_count = 0
neg_count = 0
if poscount3 < 0 and negcount3 > 0:
neg_count += negcount3 - poscount3
pos_count = 0
elif negcount3 < 0 and poscount3 > 0:
pos_count = poscount3 - negcount3
neg_count = 0
elif poscount3 < 0 and negcount3 < 0:
neg_count = -poscount3
pos_count = -negcount3
else:
pos_count = poscount3
neg_count = negcount3
count1.append([pos_count, neg_count])
count2.append(count1)
count1 = []
return count2
def sentiment_score(senti_score_list):
score = []
for review in senti_score_list:
score_array = np.array(review)
Pos = np.sum(score_array[:, 0])
Neg = np.sum(score_array[:, 1])
AvgPos = np.mean(score_array[:, 0])
AvgPos = float('%.1f'%AvgPos)
AvgNeg = np.mean(score_array[:, 1])
AvgNeg = float('%.1f'%AvgNeg)
StdPos = np.std(score_array[:, 0])
StdPos = float('%.1f'%StdPos)
StdNeg = np.std(score_array[:, 1])
StdNeg = float('%.1f'%StdNeg)
score.append([Pos, Neg, AvgPos, AvgNeg, StdPos, StdNeg]) #[积极分值, 消极分值,积极情感均值,消极情感均值,积极情感方差,消极情感方差]
return score[0]
def readText(path,File): #从评论数据文件txt提取数据处理后保存到新的txt
fname = path + File
file = open(fname, "r",encoding="utf-8")
readlines = file.readlines()
File = File.replace("数据提取.txt", "数据处理结果.txt")
path_txt = path + File
filewrite = open(path_txt, "w", encoding="utf-8")
i = 1
for line in readlines:
line_one = line
line = line.replace('。',',') #每条数据不留’。‘
line = line.replace('\n','匹配符。匹配符') #末尾加’。‘
if i == 1:
nrow = line_one[line_one.find(":")+1:len(line_one)].rstrip("\n")
print(nrow)
filewrite.write(line_one)
i = i + 1
continue
data = sentiment_score(sentiment_score_list(line))
s = str(data).replace('[', '').replace(']','') # 去除[],这两行按数据不同,可以选择
s = s.replace("'",'').replace(',','') +'\n' #去除单引号,逗号,每行末尾追加换行符
filewrite.write(s)
i = i + 1
print(File,"ok")
readText('./data/','小黄车数据提取.txt')
readText('./data/','小蓝车数据提取.txt')
readText('./data/','共享单车 - 副本数据提取.txt')
| true |
0d7cdf97005f75e7caca44f33028e074547196de | Python | Tony308/PythonPractices | /Product.py | UTF-8 | 107 | 3.78125 | 4 | [] | no_license | def product(items):
product = 1
for item in items:
product *= item
return product
| true |
3ae0d9cf0ff42b9a1eb4b139be4e3bdd1f8e9d97 | Python | ElinorThorne/QA_Work | /NumberToString.py | UTF-8 | 559 | 3.671875 | 4 | [] | no_license | def ones(x):
y={}
y[1] = "one"
y[2] = "two"
y[3] = "three"
y[4] = "four"
y[5] = "five"
y[6] = "six"
y[7] = "seven"
y[8] = "eight"
y[9] = "nine"
y[10] = "ten"
y[11] = "eleven"
y[12] = "twelve"
y[13] = "thirteen"
y[14] = "forteen"
y[15] = "fifteen"
y[16] = "sixteen"
y[17] = "seventeen"
y[18] = "eighteen"
y[19] = "nineteen"
for i in y.keys():
if x == i:
x = i
print(y.get(x))
num = int(input("Please input any number between 1 and 9999:\n"))
word = ""
a = int(num/1000)
print(a)
#ones(a/1000)
| true |
29d61e386d9f019a414d6b4943c4a89b242b3038 | Python | Aaiyesho89/roguehostapd | /roguehostapd/apctrl.py | UTF-8 | 6,660 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python2
"""
This module was made to wrap the hostapd
"""
import os
import threading
import ctypes
import glob
from roguehostapd.config.hostapdconfig import (
HostapdConfig, HOSTAPD_EXECUTION_PATH, HOSTAPD_DIR,
ROGUEHOSTAPD_RUNTIME_CONFIGPATH,
ROGUEHOSTAPD_DENY_MACS_CONFIGPATH)
def find_so():
"""
Find roguehostapd .so file
"""
so = glob.glob(HOSTAPD_DIR + '/hostapd/*.so')
return so[0]
class KarmaData(ctypes.Structure):
"""
Handle the hostapd return mac/ssid data
"""
pass
KarmaData._fields_ = [("is_assoc", ctypes.c_ubyte), ("ssid_len",
ctypes.c_size_t),
("ssid", ctypes.c_ubyte * 32), ("mac_addr",
ctypes.c_ubyte * 6),
("next_data", ctypes.POINTER(KarmaData))]
class Hostapd(object):
"""
Hostapd wrapper class
"""
def __init__(self):
"""
Contruct the class
:param self: A Hostapd object
:type self: Hostapd
:return: None
:rtype: None
"""
self.config_obj = None
self.hostapd_thread = None
self.hostapd_lib = None
self.config_obj = HostapdConfig()
@staticmethod
def _parse_karma_data(karma_data):
"""
get the associated clients' mac address and essid
:param self: A Hostapd object
:type self: Hostapd
:param karma_data: A KarmaData object
:type karma_data: KarmaData
:return: A list of tuple of essid and mac address tuple
:rtype: list
"""
ret = []
if karma_data:
current = karma_data
while current:
if current.contents.is_assoc:
# convert ssid_len to integer
ssid_len = int(current.contents.ssid_len)
# convert mac address to string
mac_addr = current.contents.mac_addr
mac_l = [format(mac_addr[i], 'x') for i in range(6)]
mac_str = ':'.join(mac_l)
# convert ssid to string
ssid_buf = current.contents.ssid
ssid_list = [ssid_buf[i] for i in range(ssid_len)]
ssid = ''.join(map(chr, ssid_list))
ret.append((mac_str, ssid))
current = current.contents.next_data
return ret
def get_karma_data(self):
"""
get the data for the KARMA attack victims from hostapd
:param self: A Hostapd object
:type self: Hostapd
:return: A list of tuple of essid and mac address tuple
:rtype: list
"""
karma_data = self.hostapd_lib.get_assoc_karma_data()
mac_ssid_pairs = self._parse_karma_data(karma_data)
return mac_ssid_pairs
def is_alive(self):
"""
API for check if the hostapd thread is running
:param self: A Hostapd object
:type self: Hostapd
:return: True if the hostapd is running else False
:rtype: bool
"""
return self.hostapd_thread.is_alive()
def create_hostapd_conf_file(self, hostapd_config, options):
"""
Create the roguehostapd configuration file
:param self: A Hostapd object
:type self: Hostapd
:param hostapd_config: Hostapd configuration for hostapd.conf
:type hostapd_config: dict
:param options: Hostapd command line options
:type options: dict
:return: None
:rtype: None
"""
self.config_obj.init_config()
self.config_obj.write_configs(hostapd_config, options)
def start(self, hostapd_config, options):
"""
Start the hostapd process
:param self: A Hostapd object
:type self: Hostapd
:param hostapd_config: Hostapd configuration for hostapd.conf
:type hostapd_config: dict
:param options: Hostapd command line options
:type options: dict
:return: None
:rtype: None
..note: the start function uses ctypes to load the shared library
of hostapd and use it to call the main function to lunch the AP
"""
# update the hostapd configuration based on user input
self.create_hostapd_conf_file(hostapd_config, options)
# get the hostapd command to lunch the hostapd
hostapd_cmd = [
HOSTAPD_EXECUTION_PATH.encode("utf-8"),
ROGUEHOSTAPD_RUNTIME_CONFIGPATH.encode("utf-8")
]
for key in self.config_obj.options:
if self.config_obj.options[key]:
hostapd_cmd += self.config_obj.options[key]
num_of_args = len(hostapd_cmd)
str_arr_type = ctypes.c_char_p * num_of_args
hostapd_cmd = str_arr_type(*hostapd_cmd)
# get the hostapd shared library
libpath = find_so()
self.hostapd_lib = ctypes.cdll.LoadLibrary(libpath)
# init hostapd lib info
self.hostapd_lib.get_assoc_karma_data.restype = ctypes.POINTER(
KarmaData)
# start the hostapd thread
self.hostapd_thread = threading.Thread(
target=self.hostapd_lib.main, args=(len(hostapd_cmd), hostapd_cmd))
self.hostapd_thread.start()
def stop(self):
"""
Stop the hostapd
:param self: A Hostapd object
:type self: Hostapd
:return: None
:rtype: None
..note: the stop function uses the eloop_terminate function in hostapd
shared library to stop AP.
"""
self.hostapd_lib.eloop_terminate()
if self.hostapd_thread.is_alive():
self.hostapd_thread.join(5)
if os.path.isfile(ROGUEHOSTAPD_RUNTIME_CONFIGPATH):
os.remove(ROGUEHOSTAPD_RUNTIME_CONFIGPATH)
if os.path.isfile(ROGUEHOSTAPD_DENY_MACS_CONFIGPATH):
os.remove(ROGUEHOSTAPD_DENY_MACS_CONFIGPATH)
if __name__ == '__main__':
HOSTAPD_CONFIG_DICT = {
'ssid': 'test',
'interface': 'wlan0',
'karma_enable': 1,
'deny_macs': ['00:00:00:11:22:33']
}
HOSTAPD_OPTION_DICT = {
'debug_verbose': True,
'key_data': True,
'timestamp': False,
'version': False,
'mute': True,
'eloop_term_disable': True
}
HOSTAPD_OBJ = Hostapd()
HOSTAPD_OBJ.start(HOSTAPD_CONFIG_DICT, HOSTAPD_OPTION_DICT)
import time
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
HOSTAPD_OBJ.stop()
break
| true |
8f6cf1f7f5497dd497fe6a60c3521e6361ad260c | Python | splintered-reality/py_trees_ros_tutorials | /py_trees_ros_tutorials/eight_dynamic_application_loading.py | UTF-8 | 19,891 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://github.com/splintered-reality/py_trees_ros_tutorials/raw/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
About
^^^^^
The previous tutorial enables execution of a specific job upon
request. You will inevitably grow the functionality of the robot beyond this
and a very common use case for the trees is to switch the context of the robot
between 'applications' - calibration, tests, demos, scheduled tasks from
a fleet server, etc.
While these contexts could be entirely managed by the tree simultaneously,
the exclusivity of the applications lends itself far more easily to the following
paradigm:
1. Construct a tree on bringup for ticking over basic functionality while idling
2. Dynamically insert/prune application subtrees on demand, rejecting requests when already busy
This mirrors both the way smart phones operate (which also happens to be a reasonable
mode of operation for robots due to similar resource contention arguments) and the
conventional use of roslaunch files to bringup a core and later bootstrap / tear
down application level processes on demand.
This tutorial uses a wrapper class around :class:`py_trees_ros.trees.BehaviourTree` to handle:
1. Construction of the core tree
2. A job (application) request callback
3. Insertion of the application subtree in the request callback (if not busy)
4. Pruning of the application subtree in a post-tick handler (if finished)
5. A status report service for external clients of the tree
.. note::
Only the basics are demonstrated here, but you could imagine extensions
to this class that would make it truly useful in an application driven robotics
system - abstractions so application modules need not be known in advance,
application subtrees delivered as python code, more
detailed tree introspection in status reports (given it's responsibility
to be the decision making engine for the robot, it is the best snapshot of the
robot's current activity). You're only limited by your imagination!
Core Tree (Dot Graph)
^^^^^^^^^^^^^^^^^^^^^
.. code-block:: bash
$ py-trees-render -b py_trees_ros_tutorials.eight_dynamic_application_loading.tutorial_create_root
.. graphviz:: dot/tutorial-eight-core-tree.dot
:align: center
:caption: py_trees_ros_tutorials.eight_dynamic_application_loading.tutorial_create_root
Application Subtree (Dot Graph)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: bash
$ py-trees-render --with-blackboard-variables py_trees_ros_tutorials.eight_dynamic_application_loading.tutorial_create_scan_subtree
.. graphviz:: dot/tutorial-eight-application-subtree.dot
:align: center
:caption: py_trees_ros_tutorials.eight_dynamic_application_loading.tutorial_create_scan_subtree
Dynamic Application Tree (Class)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../py_trees_ros_tutorials/eight_dynamic_application_loading.py
:language: python
:linenos:
:lines: 380-384
:caption: Dynamic Application Tree
.. literalinclude:: ../py_trees_ros_tutorials/eight_dynamic_application_loading.py
:language: python
:linenos:
:lines: 386-397
:caption: Init - Create the Root Tree
.. literalinclude:: ../py_trees_ros_tutorials/eight_dynamic_application_loading.py
:language: python
:linenos:
:lines: 399-419
:caption: Setup - Application Subscribers & Services
.. literalinclude:: ../py_trees_ros_tutorials/eight_dynamic_application_loading.py
:language: python
:linenos:
:lines: 421-444
:caption: Requests - Inserting Application Subtrees
.. literalinclude:: ../py_trees_ros_tutorials/eight_dynamic_application_loading.py
:language: python
:linenos:
:lines: 467-482
:caption: Post-Execution - Pruning Application Subtrees
.. literalinclude:: ../py_trees_ros_tutorials/eight_dynamic_application_loading.py
:language: python
:linenos:
:lines: 445-465
:caption: Status Reports
.. note::
In the code above, there is a conspicuous absence of thread locks. This is
possible due to the use of ROS2's single threaded executors to handle service and
subscriber callbacks along with the tree's tick tock that operates from within
ROS2 timer callbacks. If using a behaviour tree, as is exemplified here,
to handle robot application logic, you should never need to go beyond single
threaded execution and thus avoid the complexity and bugs that come along with
having to handle concurrency (this is a considerable improvement on the situation
for ROS1).
Running
^^^^^^^
.. code-block:: bash
# Launch the tutorial
$ ros2 launch py_trees_ros_tutorials tutorial_eight_dynamic_application_loading_launch.py
# In another shell, catch the tree snapshots
$ py-trees-tree-watcher -b
# Trigger scan/cancel requests from the qt dashboard
.. image:: images/tutorial-eight-dynamic-application-loading.png
"""
##############################################################################
# Imports
##############################################################################
import operator
import sys
import launch
import launch_ros
import py_trees
import py_trees_ros.trees
import py_trees.console as console
import py_trees_ros_interfaces.action as py_trees_actions # noqa
import py_trees_ros_interfaces.srv as py_trees_srvs # noqa
import rclpy
import std_msgs.msg as std_msgs
from . import behaviours
from . import mock
##############################################################################
# Launcher
##############################################################################
def generate_launch_description():
"""
Launcher for the tutorial.
Returns:
the launch description
"""
return launch.LaunchDescription(
mock.launch.generate_launch_nodes() +
[
launch_ros.actions.Node(
package='py_trees_ros_tutorials',
executable="tree-dynamic-application-loading",
output='screen',
emulate_tty=True,
)
]
)
##############################################################################
# Tutorial
##############################################################################
def tutorial_create_root() -> py_trees.behaviour.Behaviour:
"""
Insert a task between battery emergency and idle behaviours that
controls a rotation action controller and notifications simultaenously
to scan a room.
Returns:
the root of the tree
"""
root = py_trees.composites.Parallel(
name="Tutorial Eight",
policy=py_trees.common.ParallelPolicy.SuccessOnAll(
synchronise=False
)
)
topics2bb = py_trees.composites.Sequence(name="Topics2BB", memory=True)
scan2bb = py_trees_ros.subscribers.EventToBlackboard(
name="Scan2BB",
topic_name="/dashboard/scan",
qos_profile=py_trees_ros.utilities.qos_profile_unlatched(),
variable_name="event_scan_button"
)
cancel2bb = py_trees_ros.subscribers.EventToBlackboard(
name="Cancel2BB",
topic_name="/dashboard/cancel",
qos_profile=py_trees_ros.utilities.qos_profile_unlatched(),
variable_name="event_cancel_button"
)
battery2bb = py_trees_ros.battery.ToBlackboard(
name="Battery2BB",
topic_name="/battery/state",
qos_profile=py_trees_ros.utilities.qos_profile_unlatched(),
threshold=30.0
)
tasks = py_trees.composites.Selector(name="Tasks", memory=False)
flash_red = behaviours.FlashLedStrip(
name="Flash Red",
colour="red"
)
# Emergency Tasks
def check_battery_low_on_blackboard(blackboard: py_trees.blackboard.Blackboard) -> bool:
return blackboard.battery_low_warning
battery_emergency = py_trees.decorators.EternalGuard(
name="Battery Low?",
condition=check_battery_low_on_blackboard,
blackboard_keys={"battery_low_warning"},
child=flash_red
)
# Fallback task
idle = py_trees.behaviours.Running(name="Idle")
root.add_child(topics2bb)
topics2bb.add_children([scan2bb, cancel2bb, battery2bb])
root.add_child(tasks)
tasks.add_children([battery_emergency, idle])
return root
def tutorial_create_scan_subtree() -> py_trees.behaviour.Behaviour:
"""
Create the job subtree based on the incoming goal specification.
Args:
goal (:class:`~std_msgs.msg.Empty`): incoming goal specification
Returns:
:class:`~py_trees.behaviour.Behaviour`: subtree root
"""
# behaviours
scan = py_trees.composites.Sequence(name="Scan", memory=True)
scan_or_die = py_trees.composites.Selector(name="Scan or Die", memory=False)
die = py_trees.composites.Sequence(name="Die", memory=True)
failed_notification = py_trees.composites.Parallel(
name="Notification",
policy=py_trees.common.ParallelPolicy.SuccessOnOne()
)
failed_flash_green = behaviours.FlashLedStrip(name="Flash Red", colour="red")
failed_pause = py_trees.timers.Timer("Pause", duration=3.0)
result_failed_to_bb = py_trees.behaviours.SetBlackboardVariable(
name="Result2BB\n'failed'",
variable_name='scan_result',
variable_value='failed',
overwrite=True
)
ere_we_go = py_trees.composites.Sequence(name="Ere we Go", memory=True)
undock = py_trees_ros.actions.ActionClient(
name="UnDock",
action_type=py_trees_actions.Dock,
action_name="dock",
action_goal=py_trees_actions.Dock.Goal(dock=False), # noqa
generate_feedback_message=lambda msg: "undocking"
)
scan_or_be_cancelled = py_trees.composites.Selector(name="Scan or Be Cancelled", memory=False)
cancelling = py_trees.composites.Sequence(name="Cancelling?", memory=True)
is_cancel_requested = py_trees.behaviours.CheckBlackboardVariableValue(
name="Cancel?",
check=py_trees.common.ComparisonExpression(
variable="event_cancel_button",
value=True,
operator=operator.eq
)
)
move_home_after_cancel = py_trees_ros.actions.ActionClient(
name="Move Home",
action_type=py_trees_actions.MoveBase,
action_name="move_base",
action_goal=py_trees_actions.MoveBase.Goal(), # noqa
generate_feedback_message=lambda msg: "moving home"
)
result_cancelled_to_bb = py_trees.behaviours.SetBlackboardVariable(
name="Result2BB\n'cancelled'",
variable_name='scan_result',
variable_value='cancelled',
overwrite=True
)
move_out_and_scan = py_trees.composites.Sequence(name="Move Out and Scan", memory=True)
move_base = py_trees_ros.actions.ActionClient(
name="Move Out",
action_type=py_trees_actions.MoveBase,
action_name="move_base",
action_goal=py_trees_actions.MoveBase.Goal(), # noqa
generate_feedback_message=lambda msg: "moving out"
)
scanning = py_trees.composites.Parallel(
name="Scanning",
policy=py_trees.common.ParallelPolicy.SuccessOnOne()
)
scan_context_switch = behaviours.ScanContext("Context Switch")
scan_rotate = py_trees_ros.actions.ActionClient(
name="Rotate",
action_type=py_trees_actions.Rotate,
action_name="rotate",
action_goal=py_trees_actions.Rotate.Goal(), # noqa
generate_feedback_message=lambda msg: "{:.2f}%%".format(msg.feedback.percentage_completed)
)
scan_flash_blue = behaviours.FlashLedStrip(name="Flash Blue", colour="blue")
move_home_after_scan = py_trees_ros.actions.ActionClient(
name="Move Home",
action_type=py_trees_actions.MoveBase,
action_name="move_base",
action_goal=py_trees_actions.MoveBase.Goal(), # noqa
generate_feedback_message=lambda msg: "moving home"
)
result_succeeded_to_bb = py_trees.behaviours.SetBlackboardVariable(
name="Result2BB\n'succeeded'",
variable_name='scan_result',
variable_value='succeeded',
overwrite=True
)
celebrate = py_trees.composites.Parallel(
name="Celebrate",
policy=py_trees.common.ParallelPolicy.SuccessOnOne()
)
celebrate_flash_green = behaviours.FlashLedStrip(name="Flash Green", colour="green")
celebrate_pause = py_trees.timers.Timer("Pause", duration=3.0)
dock = py_trees_ros.actions.ActionClient(
name="Dock",
action_type=py_trees_actions.Dock,
action_name="dock",
action_goal=py_trees_actions.Dock.Goal(dock=True), # noqa
generate_feedback_message=lambda msg: "docking"
)
class SendResult(py_trees.behaviour.Behaviour):
def __init__(self, name: str):
super().__init__(name="Send Result")
self.blackboard = self.attach_blackboard_client(name=self.name)
self.blackboard.register_key(
key="scan_result",
access=py_trees.common.Access.READ
)
def update(self):
print(console.green +
"********** Result: {} **********".format(self.blackboard.scan_result) +
console.reset
)
return py_trees.common.Status.SUCCESS
send_result = SendResult(name="Send Result")
scan.add_children([scan_or_die, send_result])
scan_or_die.add_children([ere_we_go, die])
die.add_children([failed_notification, result_failed_to_bb])
failed_notification.add_children([failed_flash_green, failed_pause])
ere_we_go.add_children([undock, scan_or_be_cancelled, dock, celebrate])
scan_or_be_cancelled.add_children([cancelling, move_out_and_scan])
cancelling.add_children([is_cancel_requested, move_home_after_cancel, result_cancelled_to_bb])
move_out_and_scan.add_children([move_base, scanning, move_home_after_scan, result_succeeded_to_bb])
scanning.add_children([scan_context_switch, scan_rotate, scan_flash_blue])
celebrate.add_children([celebrate_flash_green, celebrate_pause])
return scan
class DynamicApplicationTree(py_trees_ros.trees.BehaviourTree):
"""
Wraps the ROS behaviour tree manager in a class that manages loading
and unloading of jobs.
"""
def __init__(self):
"""
Create the core tree and add post tick handlers for post-execution
management of the tree.
"""
super().__init__(
root=tutorial_create_root(),
unicode_tree_debug=True
)
self.add_post_tick_handler(
self.prune_application_subtree_if_done
)
def setup(self, timeout: float):
"""
Setup the tree and connect additional application management / status
report subscribers and services.
Args:
timeout: time (s) to wait (use common.Duration.INFINITE to block indefinitely)
"""
super().setup(timeout=timeout)
self._report_service = self.node.create_service(
srv_type=py_trees_srvs.StatusReport,
srv_name="~/report",
callback=self.deliver_status_report,
qos_profile=rclpy.qos.qos_profile_services_default
)
self._job_subscriber = self.node.create_subscription(
msg_type=std_msgs.Empty,
topic="/dashboard/scan",
callback=self.receive_incoming_job,
qos_profile=py_trees_ros.utilities.qos_profile_unlatched()
)
def receive_incoming_job(self, msg: std_msgs.Empty):
"""
Incoming job callback.
Args:
msg: incoming goal message
Raises:
Exception: be ready to catch if any of the behaviours raise an exception
"""
if self.busy():
self.node.get_logger().warning("rejecting new job, last job is still active")
else:
scan_subtree = tutorial_create_scan_subtree()
try:
py_trees.trees.setup(
root=scan_subtree,
node=self.node
)
except Exception as e:
console.logerror(console.red + "failed to setup the scan subtree, aborting [{}]".format(str(e)) + console.reset)
sys.exit(1)
self.insert_subtree(scan_subtree, self.priorities.id, 1)
self.node.get_logger().info("inserted job subtree")
def deliver_status_report(
self,
unused_request: py_trees_srvs.StatusReport.Request, # noqa
response: py_trees_srvs.StatusReport.Response # noqa
):
"""
Prepare a status report for an external service client.
Args:
unused_request: empty request message
"""
# last result value or none
last_result = self.blackboard_exchange.blackboard.get(name="scan_result")
if self.busy():
response.report = "executing"
elif self.root.tip().has_parent_with_name("Battery Emergency"):
response.report = "battery [last result: {}]".format(last_result)
else:
response.report = "idle [last result: {}]".format(last_result)
return response
def prune_application_subtree_if_done(self, tree):
"""
Check if a job is running and if it has finished. If so, prune the job subtree from the tree.
Additionally, make a status report upon introspection of the tree.
Args:
tree (:class:`~py_trees.trees.BehaviourTree`): tree to investigate/manipulate.
"""
# executing
if self.busy():
job = self.priorities.children[-2]
# finished
if job.status == py_trees.common.Status.SUCCESS or job.status == py_trees.common.Status.FAILURE:
self.node.get_logger().info("{0}: finished [{1}]".format(job.name, job.status))
for node in job.iterate():
node.shutdown()
tree.prune_subtree(job.id)
def busy(self):
"""
Check if a job subtree exists and is running. Only one job is permitted at
a time, so it is sufficient to just check that the priority task selector
is of length three (note: there is always emergency and idle tasks
alongside the active job). When the job is not active, it is
pruned from the tree, leaving just two prioritised tasks (emergency and idle).
Returns:
:obj:`bool`: whether it is busy with a job subtree or not
"""
return len(self.priorities.children) == 3
@property
def priorities(self) -> py_trees.composites.Selector:
"""
Returns the composite (:class:`~py_trees.composites.Selector`) that is
home to the prioritised list of tasks.
"""
return self.root.children[-1]
def tutorial_main():
"""
Entry point for the demo script.
"""
rclpy.init(args=None)
tree = DynamicApplicationTree()
try:
tree.setup(timeout=15)
except py_trees_ros.exceptions.TimedOutError as e:
console.logerror(console.red + "failed to setup the tree, aborting [{}]".format(str(e)) + console.reset)
tree.shutdown()
rclpy.try_shutdown()
sys.exit(1)
except KeyboardInterrupt:
# not a warning, nor error, usually a user-initiated shutdown
console.logerror("tree setup interrupted")
tree.shutdown()
rclpy.try_shutdown()
sys.exit(1)
tree.tick_tock(period_ms=1000.0)
try:
rclpy.spin(tree.node)
except (KeyboardInterrupt, rclpy.executors.ExternalShutdownException):
pass
finally:
tree.shutdown()
rclpy.try_shutdown()
| true |
5b82dc3bf76ca84912bb3ab0db3d617a17953691 | Python | zhd204/Python | /day55_flask-HTML&URL-parsing/higher-lower/server.py | UTF-8 | 1,325 | 3.21875 | 3 | [] | no_license | from flask import Flask
import random
ref = random.randint(0, 9)
print(ref)
app = Flask(__name__)
@app.route("/")
def root():
return '<h1 style="text-align: center">Guess a number between 0 and 9</h1>' \
'<div style="text-align: center">' \
'<img src=https://media.giphy.com/media/3o7aCSPqXE5C6T8tBC/giphy.gif width=300></img>' \
'</div>'
@app.route("/<int:number>")
def guess_number(number):
if number > ref:
return '<h1 style="color: purple" align="center">Too high, try again!</h1>' \
'<div style="text-align: center">' \
'<img src=https://media.giphy.com/media/3o6ZtaO9BZHcOjmErm/giphy.gif width=300></img>' \
'</div>'
elif number < ref:
return '<h1 style="color: red" align="center">Too low, try again!</h1>' \
'<div style="text-align: center">' \
'<img src=https://media.giphy.com/media/jD4DwBtqPXRXa/giphy.gif width=300></img>' \
'</div>'
else:
return '<h1 style=style="color: green" align="center">You found me!</h1>' \
'<div style="text-align: center">' \
'<img src=https://media.giphy.com/media/4T7e4DmcrP9du/giphy.gif width=300></img>' \
'</div>'
if __name__ == "__main__":
app.run(debug=True)
| true |
8807d1e27e0c0ac9e2c26652a8a7377d4ce53c93 | Python | jcebo/pp1 | /03-FileHandling/21.py | UTF-8 | 245 | 3.515625 | 4 | [] | no_license | import re
counter=0
sum=0
with open('numbersinrows.txt','r') as file:
for n in file:
tab=re.split(',',n)
counter+=len(tab)
for i in tab:
sum+=int(i)
print('Ilość liczb: {}\nSuma: {}'.format(counter,sum)) | true |
b4086ca1255f8bd1a14b2de33d6c64391ce41236 | Python | venkateshgithub301/pythonprograms | /sort.py | UTF-8 | 67 | 2.8125 | 3 | [] | no_license |
aList = [1,2,3,2,2,5,4,4];
aList.sort();
print "List : ", aList | true |
21a8f28df3e16d6bf910b55949302e4adf1fbe1e | Python | jorzel/codefights | /arcade/core/reflectString.py | UTF-8 | 490 | 4.125 | 4 | [] | no_license | """
Define an alphabet reflection as follows: a turns into z, b turns into y, c turns into x, ..., n turns into m, m turns into n, ..., z turns into a.
Define a string reflection as the result of applying the alphabet reflection to each of its characters.
Reflect the given string.
Example
For inputString = "name", the output should be
reflectString(inputString) = "mznv".
"""
def reflectString(inputString):
return ''.join([chr(ord('a') + (122 - ord(s))) for s in inputString])
| true |
aa8cd9aa4878d0c5b3300bf5059e8fcda3c5ff11 | Python | oogutierrez/flightaware | /jobapplication.py | UTF-8 | 647 | 2.609375 | 3 | [] | no_license | # This is for my application for Software Engineer - Big Data in FlightAware
import requests
import json
url = 'https://flightaware.com/about/careers/apply?data='
headers = {'Content-type': 'application/json'}
urls = [
"https://www.linkedin.com/in/orlando-gutierrez-09995a2/",
"https://github.com/oogutierrez/flightaware"
]
data = {'name': 'Orlando Gutierrez',
'email': 'oogutierrez@yahoo.com',
'position': 'Software Engineer - Big Data',
'urls': [ urls ],
'comment': 'Please call me at 8323788532 or 4046617285'}
r = requests.post(url + json.dumps(data), headers=headers)
print(r.status_code, r.text)
| true |
49fae601222865da5a18d96c33b6acfb6a47f397 | Python | sungho123/2020-Python-Algorithm- | /제출/Algorithm/1_04_4주차/4월 4주차 연습문제 1 김우중.py | UTF-8 | 218 | 3.171875 | 3 | [] | no_license | a = int(input())
b = int(input())
s1 = str(a)
s2 = str(b)
step1 = a * int(s2[2])
step2 = a * int(s2[1])
step3 = a * int(s2[0])
result = a * b
print(step1)
print(step2)
print(step3)
print(result)
| true |
d15bfd7e7553d193c84963cd3c7badfc1dcdd4f9 | Python | miadugas/Scraper | /scrape.py | UTF-8 | 2,227 | 2.921875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import requests
from bs4 import BeautifulSoup
# to give an interval between multiple requests
import time
# csv import
import csv
# Import send function
import send_mail
# to rename the csv daily
from datetime import date
# urls to scrape
urls = ["https://finance.yahoo.com/quote/AMZN?p=AMZN&.tsrc=fin-srch", "https://finance.yahoo.com/quote/FB?p=FB", "https://finance.yahoo.com/quote/TWTR?p=TWTR&.tsrc=fin-srch", "https://finance.yahoo.com/quote/AAPL?p=AAPL&.tsrc=fin-srch"]
# putting this in to make it appear that a browser is making the query's and not a bot :p
# find your user agent for this value
headers = { "Your User Agent" }
today = str(date.today()) + ".csv"
csv_file = open(today, "w")
csv_writer = csv.writer(csv_file)
csv_writer.writerow(["Stock Name", "Current Price", "Previous Close", "Open", "Bid", "Ask", "Day Range", "52 Week Range", "Volume", "Avg. Volume"])
for url in urls:
stock = []
html_page = requests.get(url, headers=headers)
# pass the parser, lxml is the fastest so I'm using that
soup = BeautifulSoup(html_page.content, 'lxml')
# If I want to grab the page title
# print(soup.title)
# extract page title and drop the tags
# title = soup.find("title").get_text()
# print(title)
header_info = soup.find_all("div", id="quote-header-info")[0]
stock_title = header_info.find("h1").get_text()
current_price = header_info.find("div", class_="My(6px) Pos(r) smartphone_Mt(6px)").find("span").get_text()
stock.append(stock_title)
stock.append(current_price)
# print(stock_title)
# print(current_price)
table_info = soup.find_all("div", class_="D(ib) W(1/2) Bxz(bb) Pend(12px) Va(t) ie-7_D(i) smartphone_D(b) smartphone_W(100%) smartphone_Pend(0px) smartphone_BdY smartphone_Bdc($seperatorColor)")[0].find_all("tr")
for i in range(0,8):
heading = table_info[i].find_all("td")[0].get_text()
# print()
value = table_info[i].find_all("td")[1].get_text()
stock.append(value)
# print(heading + " : " + value)
csv_writer.writerow(stock)
# sleep for 5 seconds before next request
time.sleep(5)
# once file created close it
csv_file.close()
send_mail.send(filename=today) | true |
65e5abde6b90d4410ac6587db205f9a881fa4969 | Python | fernakasa/Testing-Interpreter-Pattern | /test/test_interpreterPattern.py | UTF-8 | 1,323 | 3.875 | 4 | [] | no_license | import os
import sys
root_folder = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root_folder)
from src.interpreterPattern import Number, Add, Subtract
def test_interpreterPattern():
# The Client
# The sentence complies with a simple grammar of
# Number -> Operator -> Number -> etc,
SENTENCE = "5 + 4 - 3 + 7 - 2"
print(SENTENCE)
# Split the sentence into individual expressions that will be added
# to an Abstract Syntax Tree (AST) as Terminal and Non-Terminal
# expressions
TOKENS = SENTENCE.split(" ")
print(TOKENS)
# Manually Creating an Abstract Syntax Tree from the tokens
AST: list[AbstractExpression] = [] # A list of AbstractExpressions
# 5 + 4
AST.append(Add(Number(TOKENS[0]), Number(TOKENS[2])))
# ^ - 3
AST.append(Subtract(AST[0], Number(TOKENS[4])))
# ^ + 7
AST.append(Add(AST[1], Number(TOKENS[6])))
# ^ - 2
AST.append(Subtract(AST[2], Number(TOKENS[8])))
# Use the final AST row as the root node.
AST_ROOT = AST.pop()
# Interpret recursively through the full AST starting from the root.
print(AST_ROOT.interpret())
# Print out a representation of the AST_ROOT
print(AST_ROOT)
assert "((((5 Add 4) Subtract 3) Add 7) Subtract 2)" == str(AST_ROOT) | true |
68c601e437569e594913ee7bd093d45321f31234 | Python | wangerde/codewars | /python2/kyu_6/pile_of_cubes.py | UTF-8 | 1,202 | 4.03125 | 4 | [] | no_license | """Build a pile of Cubes
http://www.codewars.com/kata/5592e3bd57b64d00f3000047
Your task is to construct a building which will be a pile of n cubes.
The cube at the bottom will have a volume of n^3, the cube above will have
volume of (n-1)^3 and so on until the top which will have a volume of 1^3.
You are given the total volume m of the building. Being given m can you find
the number n of cubes you will have to build?
The parameter of the function findNb (find_nb, find-nb) will be an integer m
and you have to return the integer n such as n^3 + (n-1)^3 + ... + 1^3 = m
if such a n exists or -1 if there is no such n.
Examples:
findNb(1071225) --> 45
findNb(91716553919377) --> -1
Suggestion:
Try to optimize your function.
"""
import decimal
from decimal import Decimal
decimal.getcontext().prec = 100
def find_nb(volume):
"""Find a number of cubes to fill the volume
Args:
volume (int)
Returns:
int: Number of cubes or -1
Examples:
>>> find_nb(1071225)
45
>>> find_nb(91716553919377)
-1
"""
pile = (-1 + Decimal(1 + 8*Decimal(volume).sqrt()).sqrt())/2
return int(pile) if pile % 1 == 0 else -1
| true |