text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
sys.path.insert(0, './')
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.models as models
from spice.config import Config
from spice.data.build_dataset import build_dataset
from spice.model.build_model_sim import build_model_sim
from spice.model.sim2sem import Sim2Sem
from spice.solver import make_lr_scheduler, make_optimizer
from spice.utils.miscellaneous import mkdir, save_config
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
from spice.utils.evaluation import calculate_acc, calculate_nmi, calculate_ari
from spice.utils.load_model_weights import load_model_weights
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument(
"--config-file",
default="./configs/cifar10/embedding.py",
metavar="FILE",
help="path to config file",
type=str,
)
def main():
args = parser.parse_args()
cfg = Config.fromfile(args.config_file)
output_dir = cfg.results.output_dir
if output_dir:
mkdir(output_dir)
output_config_path = os.path.join(output_dir, 'config.py')
save_config(cfg, output_config_path)
if cfg.seed is not None:
random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if cfg.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if cfg.dist_url == "env://" and cfg.world_size == -1:
cfg.world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed = cfg.world_size > 1 or cfg.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if cfg.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
cfg.world_size = ngpus_per_node * cfg.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, cfg.copy()))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, cfg)
def main_worker(gpu, ngpus_per_node, cfg):
cfg.gpu = gpu
# suppress printing if not master
if cfg.multiprocessing_distributed and cfg.gpu != 0:
def print_pass(*cfg):
pass
builtins.print = print_pass
if cfg.gpu is not None:
print("Use GPU: {} for training".format(cfg.gpu))
if cfg.distributed:
if cfg.dist_url == "env://" and cfg.rank == -1:
cfg.rank = int(os.environ["RANK"])
if cfg.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
cfg.rank = cfg.rank * ngpus_per_node + gpu
dist.init_process_group(backend=cfg.dist_backend, init_method=cfg.dist_url,
world_size=cfg.world_size, rank=cfg.rank)
# create model
model_sim = build_model_sim(cfg.model_sim)
print(model_sim)
if cfg.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if cfg.gpu is not None:
torch.cuda.set_device(cfg.gpu)
model_sim.cuda(cfg.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
cfg.batch_size = int(cfg.batch_size / ngpus_per_node)
cfg.workers = int((cfg.workers + ngpus_per_node - 1) / ngpus_per_node)
model_sim = torch.nn.parallel.DistributedDataParallel(model_sim, device_ids=[cfg.gpu])
else:
model_sim.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model_sim = torch.nn.parallel.DistributedDataParallel(model_sim)
elif cfg.gpu is not None:
torch.cuda.set_device(cfg.gpu)
model_sim = model_sim.cuda(cfg.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# Load similarity model
if cfg.model_sim.pretrained is not None:
load_model_weights(model_sim, cfg.model_sim.pretrained, cfg.model_sim.model_type)
cudnn.benchmark = True
dataset_val = build_dataset(cfg.data_test)
val_loader = torch.utils.data.DataLoader(dataset_val, batch_size=cfg.batch_size, shuffle=False, num_workers=1)
model_sim.eval()
pool = nn.AdaptiveAvgPool2d(1)
feas_sim = []
for _, (images, _, labels, idx) in enumerate(val_loader):
images = images.to(cfg.gpu, non_blocking=True)
print(images.shape)
with torch.no_grad():
feas_sim_i = model_sim(images)
if len(feas_sim_i.shape) == 4:
feas_sim_i = pool(feas_sim_i)
feas_sim_i = torch.flatten(feas_sim_i, start_dim=1)
feas_sim_i = nn.functional.normalize(feas_sim_i, dim=1)
feas_sim.append(feas_sim_i.cpu())
feas_sim = torch.cat(feas_sim, dim=0)
feas_sim = feas_sim.numpy()
np.save("{}/feas_moco_512_l2.npy".format(cfg.results.output_dir), feas_sim)
if __name__ == '__main__':
main()
|
"""
---------------------------------------- AppendLastNToFirst ------------------------------------------
You have been given a singly linked list of integers along with an integer 'N'. Write a function to append
the last 'N' nodes towards the front of the singly linked list and returns the new head to the list.
#### Input format :
*The first line contains an Integer 't' which denotes the number of test cases or queries to be run.
Then the test cases follow.
*The first line of each test case or query contains the elements of the singly linked list separated
by a single space.
*The second line contains the integer value 'N'. It denotes the number of nodes to be moved from last
to the front of the singly linked list.
Remember/Consider :
While specifying the list elements for input, -1 indicates the end of the singly linked list and
hence, would never be a list element.
#### Output format :
*For each test case/query, print the resulting singly linked list of integers in a row, separated by
a single space.
*Output for every test case will be printed in a seperate line.
#### Constraints :
1 <= t <= 10^2
0 <= M <= 10^5
0 <= N < M
Time Limit: 1sec
Where 'M' is the size of the singly linked list.
#### Sample Input 1 :
2
1 2 3 4 5 -1
3
10 20 30 40 50 60 -1
5
#### Sample Output 1 :
3 4 5 1 2
20 30 40 50 60 10
#### Sample Input 2 :
1
10 6 77 90 61 67 100 -1
4
#### Sample Output 2 :
90 61 67 100 10 6 77
Explanation to Sample Input 2 :
We have been required to move the last 4 nodes to the front of the list. Here, "90->61->67->100"
is the list which represents the last 4 nodes. When we move this list to the front then the remaining
part of the initial list which is, "10->6->77" is attached after 100. Hence, the new list formed
with an updated head pointing to 90.
"""
''' Time Complexity : O(n)
Space Complexity : O(1)
Where 'n' is the size of the Singly Linked List
'''
from sys import stdin
class Node:
def __init__(self, data):
self.data = data
self.next = None
def appendLastNToFirst(head, n) :
if n == 0 or head is None :
return head
fast = head
slow = head
initialHead = head
for i in range(n) :
fast = fast.next
while fast.next is not None :
slow = slow.next
fast = fast.next
temp = slow.next
slow.next = None
fast.next = initialHead
head = temp
return head
def takeInput() :
head = None
tail = None
datas = list(map(int, stdin.readline().rstrip().split(" ")))
i = 0
while (i < len(datas)) and (datas[i] != -1) :
data = datas[i]
newNode = Node(data)
if head is None :
head = newNode
tail = newNode
else :
tail.next = newNode
tail = newNode
i += 1
return head
def printLinkedList(head) :
while head is not None :
print(head.data, end = " ")
head = head.next
print()
#main
t = int(stdin.readline().rstrip())
while t > 0 :
head = takeInput()
n = int(stdin.readline().rstrip())
head = appendLastNToFirst(head, n)
printLinkedList(head)
t -= 1 |
#!/Users/sche/anaconda/bin/python3
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import matplotlib.ticker as ticker
from read_data import read_data
# set style sheet
#plt.style.use("ggplot")
sns.set_style("white")
#sns.set_style("darkgrid")
def plot_precision_at_k(val_result, val_result_random, val_result_popular):
# read dataframe that contains score df = pd.read_csv("data/validation/"+str(val_result))
df = pd.read_csv("data/validation/"+str(val_result))
df_random = pd.read_csv("data/validation/"+str(val_result_random))
df_popular = pd.read_csv("data/validation/"+str(val_result_popular))
# set subplots
fig, ax = plt.subplots(1,1,figsize=(10, 6))
# plot bedroom counts
bins = np.arange(0, 0.0008, 0.00005)
hist_kws={"histtype": "bar", "linewidth": 2,"alpha": 0.5}
sns.distplot( df["precision_at_k_ws"], bins=bins, ax=ax, kde=False, label='Warm start', color="salmon", hist_kws=hist_kws)
sns.distplot( df["precision_at_k_cs"], bins=bins, ax=ax, kde=False, label='Cold start', color="dodgerblue", hist_kws=hist_kws)
sns.distplot( df_random["precision_at_k_random"], bins=bins, ax=ax, kde=False, label='Random', color="gray", hist_kws=hist_kws)
sns.distplot( df_popular["precision_at_k_mostpopular"], bins=bins, ax=ax, kde=False, label='Most popular', color="orange", hist_kws=hist_kws)
# customize plots
#ax.set_xlim([0.20,0.30])
#ax.set_ylim([0,1])
ax.set_xlabel("Precision at 10", size = 20)
ax.set_ylabel("Test sample", size = 20)
ax.set_xticks(ax.get_xticks()[::2])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title("Precision at 10", size = 20)
plt.legend(loc=1, prop={'size': 20})
# save figure
plt.savefig("plots/precision_at_k_%s.png" % val_result[:-3])
return
def plot_recall_at_k(val_result, val_result_random, val_result_popular):
# read dataframe that contains score
df = pd.read_csv("data/validation/"+str(val_result))
df_random = pd.read_csv("data/validation/"+str(val_result_random))
df_popular = pd.read_csv("data/validation/"+str(val_result_popular))
# set subplots
fig, ax = plt.subplots(1,1,figsize=(10, 6))
# plot bedroom counts
hist_kws={"histtype": "bar", "linewidth": 2,"alpha": 0.5}
bins = np.arange(0, 0.008, 0.0005)
sns.distplot( df["recall_at_k_ws"], bins=bins, ax=ax, kde=False, label='Warm start', color="salmon", hist_kws=hist_kws)
sns.distplot( df["recall_at_k_cs"], bins=bins, ax=ax, kde=False, label='Cold start', color="dodgerblue", hist_kws=hist_kws)
sns.distplot( df_random["recall_at_k_random"], bins=bins, ax=ax, kde=False, label='Random', color="gray", hist_kws=hist_kws)
sns.distplot( df_popular["recall_at_k_mostpopular"], bins=bins, ax=ax, kde=False, label='Most popular', color="orange", hist_kws=hist_kws)
# customize plots
ax.set_xlabel("Recall at 10", size = 20)
ax.set_ylabel("Test sample", size = 20)
ax.set_xticks(ax.get_xticks()[::2])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title("Recall at 10", size = 20)
plt.legend(loc=1, prop={'size': 20})
# save figure
plt.savefig("plots/recall_at_k_%s.png" % val_result[:-3])
return
def plot_f1_at_k(val_result, val_result_random, val_result_popular):
# read dataframe that contains score
df = pd.read_csv("data/validation/"+str(val_result))
df_random = pd.read_csv("data/validation/"+str(val_result_random))
df_mostpopular = pd.read_csv("data/validation/"+str(val_result_popular))
# calculate F1 score
df["f1_at_k_ws"] = 2*(df["recall_at_k_ws"] * df["precision_at_k_ws"]) / (df["recall_at_k_ws"]+df["precision_at_k_ws"])
df["f1_at_k_cs"] = 2*(df["recall_at_k_cs"] * df["precision_at_k_cs"]) / (df["recall_at_k_cs"]+df["precision_at_k_cs"])
df["f1_at_k_random"] = 2*(df_random["recall_at_k_random"] * df_random["precision_at_k_random"]) / (df_random["recall_at_k_random"]+df_random["precision_at_k_random"])
df["f1_at_k_mostpopular"] = 2*(df_mostpopular["recall_at_k_mostpopular"] * df_mostpopular["precision_at_k_mostpopular"]) / (df_mostpopular["recall_at_k_mostpopular"]+df_mostpopular["precision_at_k_mostpopular"])
# set subplots
fig, ax = plt.subplots(1,1,figsize=(10, 6))
# plot bedroom counts
hist_kws={"histtype": "bar", "linewidth": 2,"alpha": 0.5}
bins = np.arange(0, 0.0015, 0.00005)
sns.distplot( df["f1_at_k_ws"], bins=bins, ax=ax, kde=False, label='Warm start', color="salmon", hist_kws=hist_kws)
sns.distplot( df["f1_at_k_cs"], bins=bins, ax=ax, kde=False, label='Cold start', color="dodgerblue", hist_kws=hist_kws)
sns.distplot( df["f1_at_k_random"], bins=bins, ax=ax, kde=False, label='Random', color="gray", hist_kws=hist_kws)
sns.distplot( df["f1_at_k_mostpopular"], bins=bins, ax=ax, kde=False, label='Most popular', color="orange", hist_kws=hist_kws)
# customize plots
ax.set_xlabel("F1 score", size = 20)
ax.set_ylabel("Test sample", size = 20)
ax.set_xticks(ax.get_xticks()[::2])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title("F1 score", size = 20)
plt.legend(loc=1, prop={'size': 20})
# save figure
plt.savefig("plots/f1_at_k_%s.png" % val_result[:-3])
return
def plot_reciprocal_rank(val_result, val_result_random, val_result_popular):
# read dataframe that contains score
df = pd.read_csv("data/validation/"+str(val_result))
df_random = pd.read_csv("data/validation/"+str(val_result_random))
df_popular = pd.read_csv("data/validation/"+str(val_result_popular))
# set subplots
fig, ax = plt.subplots(1,1,figsize=(5, 2.5))
# plot reciprocal rank
sns.distplot( df["reciprocal_rank_ws"], bins=10, ax=ax, kde=False, label='Warm start')
sns.distplot( df["reciprocal_rank_cs"], bins=10, ax=ax, kde=False, label='Cold start')
sns.distplot( df_random["reciprocal_rank_random"], bins=10, ax=ax, kde=False, label='Random')
sns.distplot( df_popular["reciprocal_rank_mostpopular"], bins=10, ax=ax, kde=False, label='Most popular')
# customize plots
#ax.set_xlim([0.70,1.0])
#ax.set_ylim([0,1])
ax.set_xlabel("Trained model")
ax.set_ylabel("Reciprocal rank")
plt.legend(loc=1)
# save figure
plt.savefig("plots/reciprocal_rank_%s.png" %val_result[:-3])
return
def plot_precision_epoch():
# read dataframe that contains score
df = pd.read_csv("data/validation/df.epoch.csv")
# set subplots
fig, ax = plt.subplots(1,1,figsize=(5, 3))
# plot reciprocal rank
ax = df.plot()
# customize plots
#ax.set_xlim([0.70,1.0])
#ax.set_ylim([0,1])
ax.set_xlabel("Epoch")
ax.set_ylabel("Validation score")
plt.legend(loc=2)
plt.tight_layout()
# save figure
plt.savefig("plots/epoch.png")
return
def plot_most_common_keywords():
# read dataframe that contains score
df = pd.read_csv("data/keywords/df.csv")
# only plot upto top common keywords
df = df[:20]
# rename column names
df = df.rename(index=str, columns={"Unnamed: 0": "keywords", "0": "kwds_count"})
df["kwds_count"] = df["kwds_count"].apply(lambda x: float(x)/320)
#df["kwds_count"] = df["kwds_count"].apply(lambda x: x * 2)
# set subplots
fig, ax = plt.subplots(1,1,figsize=(5, 4))
# plot keywords with its count as values
plt.barh(range(len(df)),df.kwds_count.tolist(),tick_label=df.keywords.tolist(), color="red", alpha=0.5)
# customize plots
ax.set_xlabel("% of users")
ax.set_ylabel("Tags")
plt.legend(loc=2)
plt.gca().invert_yaxis()
plt.xticks(rotation='vertical')
plt.tight_layout()
# save figure
plt.savefig("plots/keywords.png")
return
def plot_articles():
# load list of articles
dfa = read_data("list_articles")
# convert string to datetime
dfa["post_time"] = pd.to_datetime(dfa['post_time'])
# set subplots
fig, ax = plt.subplots(1,1,figsize=(8, 5))
# new dataframe only containing title and date
dfb = dfa[["post_time","title"]]
# count number of articles each month
dfc = dfb.set_index('post_time').resample('M').count()
# set proper axis
ax = dfc.plot(x_compat=True, color="red", alpha=0.5)
plt.gca().xaxis.set_major_locator(ticker.MaxNLocator(8))
plt.gca().xaxis.set_major_formatter(dates.DateFormatter('%b\n%y'))
plt.gcf().autofmt_xdate(rotation=0, ha="center")
# set axis label
ax.set_xlabel("Time")
ax.set_ylabel("Articles")
# remove legend
ax.legend_.remove()
# use tight layout
fig.autofmt_xdate()
plt.tight_layout()
# save figure
plt.savefig("plots/article_time.png")
return
# main function
if __name__ == "__main__":
# specify input
val_result = "df.csv" # group 0, all users
val_result_random = "df.random.csv" # group 2, intermediate users
val_result_popular = "df.mostpopular.csv" # group 2, intermediate users
# plot validation plots
plot_precision_at_k(val_result, val_result_random, val_result_popular)
plot_recall_at_k(val_result, val_result_random, val_result_popular)
# plot F1 score
plot_f1_at_k(val_result, val_result_random, val_result_popular)
# plot precision vs epoch
plot_precision_epoch()
# plot most common keywords
plot_most_common_keywords()
# plot number of articles as a function of time
plot_articles()
|
def num_check(password, num_numbers=1):
'''
(str, int) -> bool
Contains a number of number characters.
Have it optionally take a num_numbers arg
>>> check_num('Woops')
False
>>> check_num('Woops33', 2)
True
>>> check_num('Woops33', 6)
False
'''
num_count = 0
for each in password:
if each.isnumeric(): # isnum() does not exist- check docs
# rather than return, increment the num_count
# return True
num_count += 1
if num_count >= num_numbers:
return True
else:
return False
# The above if/else can be reduced to one liner
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 15:50:00 2015
@author: sponsar
"""
import urllib2,re,sys,time
browser=urllib2.build_opener()
browser.addheaders=[('User-agent', 'Mozilla/5.0')]
infile=open("in.txt")
link=infile.readline().strip()
infile.close()
count=0
page=1
while True:
url=link+'/ref=cm_cr_pr_btm_link_'+str(page)+'?ie=UTF8&showViewpoints=1&sortBy=recent&reviewerType=all_reviews&formatType=all_formats&filterByStar=all_stars&pageNumber='+str(page)
#url=link+'/ref=cm_cr_pr_btm_link_'+str(page)+'?ie=UTF8&showViewpoints=1&sortBy=helpful&reviewerType=all_reviews&filterByStar=all_stars&pageNumber='+str(page)
try:
response=browser.open(url)
except Exception as e:
error_type, error_obj, error_info = sys.exc_info()
print 'ERROR FOR LINK:',url
print error_type, 'Line:', error_info.tb_lineno
continue
html=response.read()
#if re.search('Sorry, no reviews match your current selections',html): break
if html.find('Sorry, no reviews match your current selections')!=-1:break
#m=len(re.findall('<span class="a-size-base a-color-secondary review-date">on August(.*?)</span>',html))
#count+=m
reviews=re.finditer('<span class="a-size-base a-color-secondary review-date">on August(.*?)</span>',html)
m=0
for review in reviews:
m+=1
count+=1
print 'page',page,m
page+=1
time.sleep(2)
print count
fileWriter=open('out.txt','w')
fileWriter.write(str(count))
fileWriter.close()
|
import pygame
# 1
pygame.init()
pygame.display.set_caption("pong game")
# set up game window
SIZE = (600,600)
BG_COLOR = (8, 113, 142)
canvas = pygame.display.set_mode(SIZE)
clock = pygame.time.Clock()
paddle_image = pygame.image.load("assets/paddle.png")
ball_image = pygame.image.load("assets/ball.png")
x1 = 0
y1 = 100
x2 = 570
y2 = 100
ball_x = 300
ball_y = 10
ball_v_x = 4
ball_v_y = 2
w_pressed = False
s_pressed = False
o_pressed = False
l_pressed = False
loop = True
while loop:
# pooling
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
loop = False
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_w:
w_pressed = True
if e.key == pygame.K_s:
s_pressed = True
if e.key == pygame.K_o:
o_pressed = True
if e.key == pygame.K_l:
l_pressed = True
elif e.type == pygame.KEYUP:
w_pressed = False
s_pressed = False
o_pressed = False
l_pressed = False
if w_pressed:
y1 -= 5
if s_pressed:
y1 += 5
if o_pressed:
y2 -= 5
if l_pressed:
y2 += 5
ball_x += ball_v_x
ball_y += ball_v_y
if ball_x >= 580 or ball_x <=0 :
ball_v_x = -ball_v_x
if ball_y == 580 and ball_x >= 0 and ball_x <= 600:
ball_v_y = - ball_v_y
if ball_y == 0 and ball_x >= 0 and ball_x <= 600:
ball_v_y = - ball_v_y
if ball_x <= 30 and ball_y >= y1 and ball_y <= y1+120:
ball_v_x = -ball_v_x
if ball_x >= 550 and ball_y >= y2 and ball_y <= y2+120:
ball_v_x = -ball_v_x
canvas.fill(BG_COLOR)
canvas.blit(paddle_image,(x1, y1))
canvas.blit(paddle_image,(x2, y2))
canvas.blit(ball_image,(ball_x, ball_y))
clock.tick(60)
pygame.display.flip() |
import sys
import os
sys.path.append(r"Y:\tool\ND_Tools\DCC")
sys.path.append(
r"Y:\tool\ND_Tools\DCC\ND_AssetExporter\pycode\maya_lib\on_maya")
sys.path.append(r"Y:\tool\ND_Tools\DCC\ND_AssetExporter\pycode")
sys.path.append(r"Y:\tool\ND_Tools\DCC\ND_AssetExporter\pycode\maya")
import maya.cmds as cmds
from imp import reload
import ND_AssetExporter.pycode.shell_lib.util_exporter as util_exporter
def get_ns_list():
namespaces = cmds.namespaceInfo(lon=True)
_nestedNS = []
for ns in namespaces:
nestedNS = cmds.namespaceInfo(ns, lon=True)
if nestedNS != None:
_nestedNS += nestedNS
namespaces += _nestedNS
namespaces.remove('UI')
namespaces.remove('shared')
return namespaces
def ls_asset_code(AssetClass_list):
result_list = []
for asset_ins in AssetClass_list:
result_list.append(asset_ins.regular_asset_name)
return result_list
class AssetClass():
def __init__(self, scene_asset_name_list, regular_asset_name=None, sg_aaset=None):
self.scene_asset_name_list = scene_asset_name_list
self.regular_asset_name = regular_asset_name
self.sg_asset = sg_aaset
def export_asset(self, debug="True", override_shotpath=None, override_exptype=None, add_attr=None):
if override_exptype is not None:
export_type = override_exptype
else:
export_type = self.sg_asset['sg_export_type']
if override_shotpath == None:
scene_path = cmds.file(q=True, sn=True)
opc = util_exporter.outputPathConf(
scene_path, export_type=export_type)
publish_ver_anim_path = opc.publish_ver_anim_path
publish_ver_abc_path = opc.publsh_ver_abc_path
else:
publish_ver_anim_path = override_shotpath.replace(os.path.sep, '/')
publish_ver_abc_path = override_shotpath.replace(os.path.sep, '/')
argsdic = {
'publish_ver_anim_path': publish_ver_anim_path,
'publish_ver_abc_path': publish_ver_abc_path,
'anim_item': self.sg_asset["sg_anim_export_list"],
'abc_item': self.sg_asset['sg_abc_export_list'],
'namespace': self.sg_asset['sg_namespace'],
'top_node': self.sg_asset['sg_top_node'],
'asset_path': self.sg_asset['sg_asset_path'],
'step_value': 1.0,
'frame_range': False,
'frame_handle': 0,
'scene_timewarp': False}
if add_attr is not None:
argsdic['add_attr'] = add_attr
if export_type == 'anim':
from maya_lib.ndPyLibExportAnim import export_anim_main
# reload(export_anim_main)
export_anim_main(**argsdic)
if export_type == 'abc':
# from maya_lib.ndPyLibExportAbc import export_abc_main
import maya_lib.ndPyLibExportAbc as ndPyLibExportAbc
reload(ndPyLibExportAbc)
ndPyLibExportAbc.export_abc_main(**argsdic)
def ls_asset_class():
PathClass = util_exporter.outputPathConf(cmds.file(q=True, sceneName=True))
project = PathClass.pro_name
base_fieldcodes = ["code", "sg_namespace", "sg_export_type",
"sg_top_node", "sg_abc_export_list",
"sg_anim_export_list", "sg_asset_path",
"sequences", "shots", "assets"]
ProSGClass = util_exporter.SGProjectClass(project, base_fieldcodes)
AssetSG_list = ProSGClass.get_dict("Asset")
asset_list = []
for sg_asset in AssetSG_list:
if sg_asset["sg_namespace"] is not None:
asset_list.append(sg_asset)
#ls scene_space
import re
scene_namespaces = get_ns_list()
class_list = []
for sg_asset in asset_list:
found_namespaces = []
sg_namespace = sg_asset["sg_namespace"]
for scene_namespace in scene_namespaces:
if re.match(sg_namespace, scene_namespace) is not None:
found_namespaces.append(sg_asset["code"])
if len(found_namespaces) != 0:
# for ProSG_dict in ProSG_list:
class_list.append(AssetClass(found_namespaces,
sg_asset["code"], sg_asset))
return class_list
def get_asset_class_dict():
PathClass = util_exporter.outputPathConf(cmds.file(q=True, sceneName=True))
project = PathClass.pro_name
base_fieldcodes = ["code", "sg_namespace", "sg_export_type",
"sg_top_node", "sg_abc_export_list",
"sg_anim_export_list", "sg_asset_path",
"sequences", "shots", "assets"]
ProSGClass = util_exporter.SGProjectClass(project, base_fieldcodes)
AssetSG_list = ProSGClass.get_dict("Asset")
asset_list = []
for sg_asset in AssetSG_list:
if sg_asset["sg_namespace"] is not None:
asset_list.append(sg_asset)
#ls scene_space
import re
scene_namespaces = get_ns_list()
class_dict = {}
for sg_asset in asset_list:
found_namespaces = []
sg_namespace = sg_asset["sg_namespace"]
for scene_namespace in scene_namespaces:
if re.match(sg_namespace, scene_namespace) is not None:
found_namespaces.append(sg_asset["code"])
if len(found_namespaces) != 0:
# for ProSG_dict in ProSG_list:
class_dict[sg_asset['code']] = AssetClass(
found_namespaces, sg_asset["code"], sg_asset)
return class_dict
if __name__ == "__main__":
sys.path.append(r"Y:\tool\ND_Tools\DCC\ND_AssetExporter\pycode\maya_lib")
import util_asset
reload(util_asset)
# AssetClass_list = on_maya_main.ls_asset_class()
asset_code_list = util_asset.ls_asset_code(util_asset.ls_asset_class())
AssetClass_dict = util_asset.get_asset_class_dict()
import pprint
pprint.pprint(AssetClass_dict)
# export_path = "C:/Users/k_ueda/Desktop/work"
export_path = 'P:/Project/RAM1/shots/ep022/s2227/c008/publish/cache/alembic/s2227c008_anm_v004_old_asset'
# AssetClass_list[0].export_asset(mode="Local", override_shotpath=None, override_exptype="abc", add_attr="shop_materialpath")
# AssetClass_list[0].export_asset(override_shotpath=export_path, override_exptype="abc", add_attr="shop_materialpath")
AssetClass_dict['NursedesseiDragon'].export_asset(
override_shotpath=export_path, override_exptype="abc", add_attr="shop_materialpath")
# print AssetClass.get_asset_list() ->['gutsFalconFighter', 'vernierNml', 'vulcanNml', 'vulcanDual']
|
import lecturescode
# the print(__name__) function over here prints the the name of file imported indicating that the function being called here is not declared in this file instead it is imported
lecturescode.main()
lecturescode.mostimpfunction() |
from Acquisition import aq_parent
from z3c.relationfield.relation import RelationValue
from Products.CMFPlone.interfaces import IPloneSiteRoot
from Products.CMFCore.utils import getToolByName
from plone.dexterity.utils import createContentInContainer
from ..interfaces import IStory
from ..interfaces import IIteration
from ..interfaces import IProject
def create_story(context, data, reindex=True):
## XXX FIXME 2013-06-15:
## subjects are stored into 'subject' attribute
## see https://github.com/plone/plone.app.dexterity/blob/master/plone/app/dexterity/behaviors/metadata.py#L331
## we should use behavior magic to do this
if 'subjects' in data:
data['subject'] = data.pop('subjects')
# make sure we don't get duplicates for assignees
data['assigned_to'] = list(set(data['assigned_to'] or []))
item = createContentInContainer(
context,
'Story',
**data
)
if reindex:
item.reindexObject()
return item
def get_ancestor(iface, context, default=None):
"""Gets the ancestor of ``context`` providing ``iface``.
Returns ``default`` if not found.
"""
current = context
while current is not None:
if iface.providedBy(current):
return current
# stop when Plone site is found
if IPloneSiteRoot.providedBy(current):
return default
current = aq_parent(current)
return default
def get_story(context, default=None):
return get_ancestor(IStory, context, default)
def get_project(context, default=None):
return get_ancestor(IProject, context, default)
def get_iteration(context, default=None):
return get_ancestor(IIteration, context, default)
def get_epic_by_story(story):
obj = getattr(story, 'epic', None)
if not obj:
return {}
if isinstance(obj, RelationValue):
if obj.isBroken():
return
obj = obj.to_object
if not obj:
return {}
return {
'url': obj.absolute_url(),
'title': obj.title
}
def get_wf_state_info(brain, context=None):
"""Returns some informations on workflow state
:param obj: Object
:returns: name and title of workflow state
:rtype: dict
"""
if not context:
purl = getToolByName(brain, 'portal_url')
context = purl.getPortalObject()
wt = getToolByName(context, 'portal_workflow')
_info = {
'title': None,
'state': None
}
_info['state'] = brain.review_state
# wt.getInfoFor(obj, 'review_state')
if _info['state']:
_info['title'] = context.translate(
wt.getTitleForStateOnType(
_info['state'], brain.portal_type
)
)
return _info
class BreadcrumbGetter(object):
"""Gets a breadcrumb from a brain
"""
def __init__(self, catalog):
self.catalog = catalog
def get_title(self, path):
results = self.catalog.searchResults(path={
'query': path,
'depth': 0
})
if len(results) > 0:
return results[0]['Title']
return None
def __call__(self, brain):
breadcrumb = []
path = brain.getPath()
breadcrumb.append(self.get_title(path))
path_components = path.split("/")
for i in xrange(1, len(path_components)):
title = self.get_title("/".join(path_components[:-1*i]))
if title is not None:
breadcrumb.append(title)
breadcrumb.reverse()
return breadcrumb
|
from ..base import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--plain",
action="store_true",
help="use the plain python shell.",
)
parser.add_argument(
"--interface",
choices=["ipython", "bpython"],
default="bpython",
help="use the plain python shell.",
)
def handle(self, options):
if options.plain:
return self.load_plain(app=self.app, settings=self.app.settings)
interface = getattr(self, "load_{}".format(options.interface))
interface(app=self.app, settings=self.app.settings)
@staticmethod
def load_plain(app, settings): # pragma: no cover
import code
new_vars = globals()
new_vars.update(locals())
new_vars.update({
"settings": settings,
"app": app
})
try:
import readline
import rlcompleter
except ImportError:
pass
else:
readline.set_completer(rlcompleter.Completer(new_vars).complete)
readline.parse_and_bind("tab: complete")
shell = code.InteractiveConsole(new_vars)
shell.interact()
@staticmethod
def load_bpython(app, settings): # pragma: no cover
import bpython
bpython.embed(locals_={"app": app, "settings": settings})
@staticmethod
def load_ipython(app, settings): # pragma: no cover
from IPython import start_ipython
start_ipython(argv=[], user_ns={"app": app, "settings": settings})
|
from drawLine import ViewPort,bresenham,drawLine
import sys,random
from graphics import *
'''
100 100
50 0
100 -100
0 -50
-100 -100
-50 0
-100 100
0 50
'''
def drawPoly(vertices,win,color='white'):
vert = vertices.copy()
vert+=[vert[0]]
for i in range(len(vert)-1):
x1,y1,x2,y2 = *vert[i],*vert[i+1]
#print(win,color,x1,y1,x2,y2)
drawLine(win,color,x1,y1,x2,y2)
def main():
vert = []
while(1):
try:
x,y=map(int,input('Next vert?').split())
vert.append((x,y))
except:
print(vert)
break
if(input('Defualt viewPort is (-400 -400, 400 400). Change?(y/Y)')in ('y','Y')):
x,y=map(int,input('viewPort\'s xMax yMax : ').split())
new_view = ViewPort(-x,-y,x,y)
else:
new_view =ViewPort(-400,-400,400,400)
print('ViewPort :',new_view)
pixel = []
filled= []
pixel_dict={}
vert+=[vert[0]]
win = new_view.init_view()
for i in range(len(vert)-1):
pixel+=bresenham(*vert[i],*vert[i+1])
for i in pixel:
x,y= i
win.plot(*i)
pixel_dict[(x,y)]=1
stack=[(0,0)]
input('Exit?')
if __name__=='__main__':
main()
|
import math
import random
from helpers import *
def print_points(points):
print(len(points))
for point in points:
print(point)
def generate_d_set():
points_x_range = (-1000.0, 1000.0)
points_number = 1000
vector_a = Point(-1.0, 0.0)
vector_b = Point(1.0, 0.1)
f = create_line_func_from_two_points(vector_a, vector_b)
points = [create_random_point_on_line(f, points_x_range) for i in range(points_number)]
return points
def generate_circle_set(points_number, r):
points = [create_random_point_on_circle(r) for i in range(points_number)]
print_points(points)
return points
def generate_range_set(points_number, rangeX, rangeY):
plane_range = RangeXY(rangeX.low, rangeX.high, rangeY.low, rangeY.high)
points = [create_random_point(plane_range) for i in range(points_number)]
return points
def generate_border_rectangle_set(points_number, rangeX, rangeY):
rectangle = RangeXY(rangeX.low, rangeX.high, rangeY.low, rangeY.high)
points = [create_random_point_on_rectangle(rectangle) for i in range(points_number)]
print_points(points)
return points
def generate_square_samples(points_on_sides_number, points_on_diagonals_number, v1, v2, v3, v4):
points = [v1, v2, v3, v4]
f1 = create_line_func_from_two_points(v1, v3)
f2 = create_line_func_from_two_points(v2, v4)
x_range = Range(min(v1.x, v2.x, v3.x, v4.x), max(v1.x, v2.x, v3.x, v4.x))
points_on_diagonal = [create_random_point_on_diagonals(f1, f2, x_range) for i in range(points_on_diagonals_number)]
points_on_sides = [create_random_point_on_axis_sides(v3.y) for i in range(points_on_sides_number)]
points.extend(points_on_diagonal)
points.extend(points_on_sides)
return points
import getopt, sys
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "rcps", [])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
for o, a in opts:
if o == "-r":
rangeX = Range(float(args[1]), float(args[2]))
rangeY = Range(float(args[3]), float(args[4]))
generate_range_set(int(args[0]), rangeX, rangeY)
break
elif o == "-c":
generate_circle_set(int(args[0]), float(args[1]))
break
elif o == "-p":
rangeX = Range(float(args[1]), float(args[2]))
rangeY = Range(float(args[3]), float(args[4]))
generate_border_rectangle_set(int(args[0]), rangeX, rangeY)
break
elif o == "-s":
v1 = Point(float(args[2]), float(args[3]))
v2 = Point(float(args[4]), float(args[5]))
v3 = Point(float(args[6]), float(args[7]))
v4 = Point(float(args[8]), float(args[9]))
generate_square_samples(int(args[0]), int(args[1]), v1, v2, v3, v4)
break
else:
print('No option selected')
def create_line_func_from_two_points(a, b):
direction_coeficient = (b.y - a.y) / (b.x - a.x)
translation_coeficient = b.y - direction_coeficient * b.x
return lambda x: direction_coeficient * x + translation_coeficient
def create_random_point_on_line(f, x_range):
low_x, high_x = x_range
x = random.uniform(low_x, high_x)
return Point(x, f(x))
def create_random_point_on_diagonals(f1, f2, x_range):
point = create_random_point_on_line(f1, (x_range.low, x_range.high*2))
if point.x < x_range.high:
return point
x = point.x - x_range.high
return Point(x, f2(x))
def create_random_point_on_axis_sides(y_max):
line = RangeXY(0, 0, 0, y_max*2)
point = create_random_point(line)
if point.y < y_max:
return point
return Point(point.y - y_max, 0)
def create_random_point(plane_range):
x = random.uniform(plane_range.low_x, plane_range.high_x)
y = random.uniform(plane_range.low_y, plane_range.high_y)
return Point(x, y)
def create_random_point_on_rectangle(plane_range):
xWidth = plane_range.high_x - plane_range.low_x
yWidth = plane_range.high_y - plane_range.low_y
lineWidth = 2*xWidth + 2*yWidth
place = random.uniform(0.0, lineWidth)
if 0 < place < xWidth:
return Point(plane_range.low_x + place, plane_range.high_y)
if xWidth < place < xWidth+yWidth:
return Point(plane_range.high_x, plane_range.high_y - (place - xWidth))
if xWidth+yWidth < place < 2*xWidth + yWidth:
return Point(plane_range.high_x - (place - xWidth - yWidth), plane_range.low_y)
if 2*xWidth + yWidth < place < 2*xWidth + 2*yWidth:
return Point(plane_range.low_x, plane_range.low_y + (place - 2*xWidth- yWidth))
def create_random_point_on_circle(r):
angle = random.uniform(0.0, 2 * math.pi)
x = r * math.cos(angle)
y = r * math.sin(angle)
return Point(x, y)
if __name__ == '__main__':
main()
|
# Author: Jingping.zhao
# Exception: 0x20 - 0x2F
import json
import traceback
from pymysql.err import MySQLError
from app import database
from app import application
from framework.lib.common import OrcDefaultDict
from framework.database import ClsTableLib
from framework.database.sequence import OrcTableIDGenerator
from framework.database.sequence import OrcIdGenerator
from framework.exception import OrcFrameworkDatabaseException
from sqlalchemy.exc import SQLAlchemyError
class OrcTable(object):
"""
For table apps
Exception: 0x20 - 0x28
"""
_session = database.session
def __init__(self, p_table):
object.__init__(self)
self._table = ClsTableLib.table(p_table)
self._fields = self._table().to_dict().keys()
def _param_boundary(self, p_data):
"""
:param p_data:
:return:
"""
for _key, _value in self._table.extra.orc_length.items():
if _key in p_data and _value < len(p_data[_key]):
raise OrcFrameworkDatabaseException(0X10, "Field %s is too long, %s." % (_key, p_data))
def _param_mandatory_exist(self, p_data):
"""
:return:
"""
for _key in self._table.extra.orc_mandatory:
if _key not in p_data:
raise OrcFrameworkDatabaseException(0X12, "Field %s is missing, %s." % (_key, p_data))
def _param_mandatory_length(self, p_data):
"""
:return:
"""
for _key in self._table.extra.orc_mandatory:
if _key in p_data and not p_data[_key]:
raise OrcFrameworkDatabaseException(0X13, "Field %s can't be empty, %s." % (_key, p_data))
def add(self, p_data):
"""
新增
:param p_data:
:return:
"""
application.logger.debug("Add item to table %s, data is %s." % (self._table.__tablename__, p_data))
# Param check
self._param_boundary(p_data)
self._param_mandatory_exist(p_data)
self._param_mandatory_length(p_data)
# Add
_data = OrcDefaultDict(p_data)
_node = self._table()
try:
# Set data
for _field in self._fields:
_value = _data.value(_field)
if isinstance(_value, list) or isinstance(_value, dict):
_value = json.dumps(_value)
if 'id' == _field:
_node.id = OrcIdGenerator.s_get(OrcTableIDGenerator.s_get(_node.seq_flag))
else:
setattr(_node, _field, _value)
application.logger.debug("Add item %s" % _node)
self._session.add(_node)
self._session.commit()
except (SQLAlchemyError, MySQLError):
self._session.rollback()
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x01)
return _node.to_dict()
def delete(self, p_data):
"""
:param p_data:
:return:
"""
application.logger.debug("Delete from table %s, id is %s." % (self._table.__tablename__, p_data))
try:
ids = p_data["id"]
if isinstance(ids, list):
for _id in ids:
self._delete(_id)
else:
self._delete(ids)
return True
except KeyError:
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x05)
except (SQLAlchemyError, MySQLError):
self._session.rollback()
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x06)
def _delete(self, p_id):
"""
删除
:param p_id:
:return:
"""
self._session.query(self._table).filter(getattr(self._table, 'id') == p_id).delete()
self._session.commit()
def update(self, p_data):
"""
更新
:param p_data:
:type p_data: dict
:return:
"""
application.logger.debug("Update table %s, condition is %s." % (self._table.__tablename__, p_data))
# Param check
self._param_boundary(p_data)
self._param_mandatory_length(p_data)
# Update
try:
for _key in p_data:
if "id" == _key:
continue
_item = self._session.query(self._table).filter(getattr(self._table, 'id') == p_data["id"])
_item.update({_key: (None if "" == p_data[_key] else p_data[_key])})
self._session.commit()
except (SQLAlchemyError, MySQLError):
self._session.rollback()
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x03)
return True
def query(self, p_cond):
"""
查询
:param p_cond:
:return:
"""
application.logger.debug("Query from table %s, condition is %s." % (self._table.__tablename__, p_cond))
result = self._session.query(self._table)
page = None if '__page__' not in p_cond else int(p_cond['__page__'])
number = None if '__number__' not in p_cond else int(p_cond['__number__'])
order = None if '__order__' not in p_cond else p_cond['__order__']
for _key in p_cond:
if _key not in self._fields:
continue
try:
result = self._filter(result, _key, p_cond[_key])
except KeyError:
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x04)
except SQLAlchemyError:
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x04)
if order is not None:
assert isinstance(order, dict)
result = result.order_by(*tuple([getattr(getattr(self._table, _field), _mode)()
for _field, _mode in order.items()]))
try:
if (page is not None) and (number is not None):
record_num = result.count()
result = result.offset((page - 1) * number).limit(number)
return dict(__number__=record_num, data=[item.to_dict() for item in result.all()])
else:
return [item.to_dict() for item in result.all()]
except (SQLAlchemyError, MySQLError):
application.logger.error(traceback.format_exc())
raise OrcFrameworkDatabaseException(0x04, "Query failed, condition is: %s" % p_cond)
def _filter(self, p_res, p_key, p_value):
"""
Search method
:param p_key: field name
:type p_key: str
:param p_value: search value
str: value
list: (mode, value)
mode: 'in', like, eq(default)
:return:
"""
_mode = 'eq'
_value = p_value
try:
_temp = json.loads(_value) if isinstance(_value, str) else _value
if isinstance(_temp, (list, tuple)):
_mode = _temp[0]
_value = _temp[1]
except (ValueError, TypeError):
pass
if 'eq' == _mode:
return p_res.filter(getattr(self._table, p_key) == _value)
elif 'in' == _mode:
return p_res.filter(getattr(getattr(self._table, p_key), 'in_')(_value))
elif 'like' == _mode:
return p_res.filter(getattr(getattr(self._table, p_key), 'like')(_value))
else:
return p_res
class OrcOrderTable(OrcTable):
"""
Table with order
"""
def __init__(self, p_table):
OrcTable.__init__(self, p_table)
self._key = []
self._order = self._table.extra.orc_order
if not self._order:
raise OrcFrameworkDatabaseException(0X01, "Order table info orc_key or orc_order is missing for table %s."
% getattr(self._table, "__tablename__"))
def _order_cond(self, p_data):
"""
:param p_data:
:return:
"""
return {_key: p_data[_key] for _key in self._table.extra.orc_keys if _key in p_data}
def add(self, p_data):
"""
Calculate order
:param p_data:
:return:
"""
cond = self._order_cond(p_data)
p_data[self._order] = len(super(OrcOrderTable, self).query(cond)) + 1
return super(OrcOrderTable, self).add(p_data)
def delete(self, p_data):
"""
Reorder
:param p_data:
:return:
"""
try:
_ids = p_data["id"] if isinstance(p_data["id"], list) else [p_data["id"]]
except KeyError:
raise OrcFrameworkDatabaseException(0x1, "No id found in delete command.")
data = super(OrcOrderTable, self).query({"id": ("in", _ids)})
result = super(OrcOrderTable, self).delete(p_data)
# Todo 需要优化
if result:
for _data in data:
self._reorder(_data)
return True
def _reorder(self, p_cond: dict):
"""
:param p_cond: flag/pid removed order
:return:
"""
next_cond = p_cond.copy()
next_cond[self._order] += 1
next_item = self.query(p_cond)
if next_item:
self._change_order(next_item[0].id, p_cond[self._order])
self._reorder(next_item[0].to_dict())
def update(self, p_data):
"""
Remove order
:param p_data:
:return:
"""
if self._order in p_data:
del p_data[self._order]
return super(OrcOrderTable, self).update(p_data)
def up(self, p_data):
"""
:param p_data:
:return:
"""
cur_item = self.query(p_data)
# More than 1 item to be up.
if 1 != len(cur_item):
return False
cur_item = cur_item[0]
assert isinstance(cur_item, dict)
# Previous item
cond = self._order_cond(cur_item)
cond[self._order] = cur_item[self._order] - 1
pre_item = self.query(cond)
if 1 != len(pre_item):
return True
pre_item = pre_item[0]
assert isinstance(pre_item, dict)
self._change_order(pre_item["id"], cur_item[self._order])
self._change_order(cur_item["id"], pre_item[self._order])
def down(self, p_data):
"""
:param p_data:
:return:
"""
cur_item = self.query(p_data)
# More than 1 item to be up.
if 1 != len(cur_item):
return False
cur_item = cur_item[0]
assert isinstance(cur_item, dict)
# Next item
cond = self._order_cond(cur_item)
cond[self._order] = cur_item[self._order] + 1
next_item = self.query(cond)
if 1 != len(next_item):
return True
next_item = next_item[0]
assert isinstance(next_item, dict)
self._change_order(next_item["id"], cur_item[self._order])
self._change_order(cur_item["id"], next_item[self._order])
def _change_order(self, p_id, p_order):
"""
:param p_id:
:param p_order:
:return:
"""
super(OrcOrderTable, self).update({"id": p_id, self._order: p_order})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@author: lizhaohui
@contact: lizhaoh2015@gmail.com
@file: block.py #block类,用于随机生成地面块
@time: 2019/3/16 22:05
'''
import random
import cocos
import os
class Block(cocos.sprite.Sprite):
def __init__(self,position):
super(Block,self).__init__('black.png')
# 锚点
self.image_anchor=0,0
x,y=position
if x==0:
self.scale_x=4.5
self.scale_y=1
else:
self.scale_x=0.5+random.random()*1.5
self.scale_y=min(max(y-50+random.random()*100,50),300)/100.0
self.position=x+50+random.random()*100,0 |
import csv
from datetime import datetime
import server.models as models
from server.app import db
def add_school_weather(school_name, file_path):
school = models.School(name=school_name)
db.session.add(school)
db.session.commit()
reader = csv.DictReader(open(file_path))
for row in reader:
weather_point = models.Weather(
school_id=school.id,
average_wind_speed=float(row["AWND"]) if row["AWND"] else None,
peak_gust_time=int(row["PGTM"]) if row["PGTM"] else None,
precipitation=float(row["PRCP"]) if row["PRCP"] else None,
max_temp=int(row["TMAX"]) if row["TMAX"] else None,
min_temp=int(row["TMIN"]) if row["TMIN"] else None,
date=datetime.strptime(row["DATE"], "%Y-%m-%d"),
WDF2=int(row["WDF2"]) if row["WDF2"] else None,
WDF5=int(row["WDF5"]) if row["WDF5"] else None,
WSF2=float(row["WSF2"]) if row["WSF2"] else None,
WSF5=float(row["WSF5"]) if row["WSF5"] else None,
has_fog = True if row["WT01"] else False,
has_heavy_fog = True if row["WT02"] else False,
has_thunder = True if row["WT03"] else False,
has_smoke = True if row["WT08"] else False,
)
db.session.add(weather_point)
db.session.commit()
if __name__ == "__main__":
add_school_weather("Yale", "server/data/yale.csv")
add_school_weather("Harvard", "server/data/harvard.csv") |
import requests
from bs4 import BeautifulSoup
from python_utils import converters
def get_parsed_page(url):
return BeautifulSoup(requests.get(url).text, "lxml")
def top5teams():
home = get_parsed_page("http://hltv.org/")
count = 0
teams = []
for team in home.find_all("div", {"class": "vsbox", })[:5]:
count += 1
teamname = team.find_all("div")[2].text.strip()
teams.append(teamname)
return teams
def top20teams():
page = get_parsed_page("http://www.hltv.org/ranking/teams/")
teams = page.select("div.ranking-box")
teamlist = []
for team in teams:
newteam = {'name': team.select('.ranking-teamName > a')[0].text.strip(),
'rank': converters.to_int(team.select('.ranking-number')[0].text.strip(), regexp=True),
'rank-points': converters.to_int(team.select('.ranking-teamName > span')[0].text, regexp=True),
'team-id': converters.to_int(team.select('.ranking-delta')[0].get('id'), regexp=True),
'team-players': []}
for player_div in team.select('.ranking-lineup > div'):
player = {}
player_anchor = player_div.select('.ranking-playerNick > a')[0]
player['name'] = player_anchor.text.strip()
player_link = player_anchor.get('href')
if 'pageid' in player_link:
player['player-id'] = converters.to_int(player_link[player_link.index('playerid'):], regexp=True)
else:
player['player-id'] = converters.to_int(player_link, regexp=True)
if player['name'].startswith("[email"):
player_page = get_parsed_page(str("http://www.hltv.org" + player_anchor['href']))
player['name'] = player_page.title.text.split()[0]
newteam['team-players'].append(player)
teamlist.append(newteam)
return teamlist
def top_players():
page = get_parsed_page("http://www.hltv.org/?pageid=348&statsfilter=10&mapid=0")
boxes = page.find_all("div", {"class": "framedBox"})
top_player_categories = []
for box in boxes:
category_obj = {'category': box.find("h2").text}
players = []
for player_elem in box.select("> div"):
player = {}
player_link = player_elem.find('a')
player['name'] = player_link.text
player['team'] = player_elem.text.split("(")[1].split(")")[0]
p_url = player_link['href']
player['player-id'] = converters.to_int(p_url[p_url.index('playerid=')+9:p_url.index('&statsfilter')])
player['stat'] = player_elem.select('div:nth-of-type(2)')[0].text
players.append(player)
category_obj['players'] = players
top_player_categories.append(category_obj)
return top_player_categories
def get_players(teamid):
page = get_parsed_page("http://www.hltv.org/?pageid=362&teamid=" + teamid)
titlebox = page.find("div", {"class": "centerFade"})
players = []
for player in titlebox.find_all("div")[5:25]:
players.append(player.text.strip())
print([x for x in set(players) if x is not u''])
def get_team_info(teamid):
"""
:param teamid: integer (or string consisting of integers)
:return: dictionary of team
example team id: 5378 (virtus pro)
"""
page = get_parsed_page("http://www.hltv.org/?pageid=179&teamid=" + str(teamid))
team_info = {}
content_boxes = page.select('div.centerFade .covGroupBoxContent')
team_info['team-name']=content_boxes[0].select('> div')[0].text.strip()
team_info['region'] = content_boxes[0].select('> div')[4].select('.covSmallHeadline')[1].text.strip()
current_lineup_div = content_boxes[1]
current_lineup = _get_lineup(current_lineup_div.select('a'))
team_info['current-lineup'] = current_lineup
historical_players_div = content_boxes[2]
historical_players = _get_lineup(historical_players_div.select('a'))
team_info['historical-players'] = historical_players
team_stats_div = content_boxes[3]
team_stats = {}
for index, stat_div in enumerate(team_stats_div.select('> div')[3:]):
if (index%2):
stat_title = stat_div.select('.covSmallHeadline')[0].text.strip()
stat_value = stat_div.select('.covSmallHeadline')[1].text.strip()
team_stats[stat_title] = stat_value
team_info['stats'] = team_stats
return team_info
def _get_lineup(player_anchors):
"""
helper function for function above
:return: list of players
"""
players = []
for player_anchor in player_anchors:
player = {}
player_link = player_anchor.get('href')
player['player-id'] = converters.to_int(player_link[player_link.index('playerid'):], regexp=True)
player_text = player_anchor.text
player['name'] = player_text[0:player_text.index("(")].strip()
player['maps-played'] = converters.to_int(player_text[player_text.index("("):], regexp=True)
players.append(player)
return players
def get_matches():
matches = get_parsed_page("http://www.hltv.org/matches/")
matchlist = matches.find_all("div", {"class": ["matchListBox", "matchListDateBox"]})
datestring = ""
matches_list = []
for match in matchlist:
if match['class'][0] == "matchListDateBox":
# TODO possibly change this into real date object
datestring = match.text.strip()
else:
try:
#What does matchd mean?
matchd = {}
matchd['date'] = datestring + " - " + match.find("div", {"class": "matchTimeCell"}).text.strip()
team1div = match.find("div", {"class": "matchTeam1Cell"})
team1 = {}
team1["name"] = team1div.text.strip()
team1href = team1div.select('a')[0].get('href')
team1["id"] = converters.to_int(team1href[team1href.index('teamid'):], regexp=True)
matchd['team1'] = team1
team2div = match.find("div", {"class": "matchTeam2Cell"})
team2 = {}
team2["name"] = team2div.text.strip()
team2href = team2div.select('a')[0].get('href')
team2["id"] = converters.to_int(team2href[team2href.index('teamid'):], regexp=True)
matchd['team2'] = team2
# include link (id) to match page
matchd['matchid'] = match.find("div", {"class": "matchActionCell"}).find("a").get('href') #What a fucking mess lmao
matches_list.append(matchd)
except:
# what does this do man?
print(match.text[:7].strip(), match.text[7:-7].strip())
return matches_list
def get_results():
results = get_parsed_page("http://www.hltv.org/results/")
resultslist = results.find_all("div", {"class": ["matchListBox", "matchListDateBox"]})
datestring = ""
results_list = []
for result in resultslist:
if result['class'][0] == "matchListDateBox":
# TODO possibly change this into a real date object
datestring = result.text.strip()
else:
#What does resultd mean?
resultd = {}
#This page uses the time box to show map played
resultd['date'] = datestring
resultd['map'] = result.find("div", {"class": "matchTimeCell"}).text.strip()
scores = result.find("div", {"class": "matchScoreCell"}).text.strip()
#Team 1 info
team1div = result.find("div", {"class": "matchTeam1Cell"})
team1 = {}
team1['name'] = team1div.text.strip()
#I seem to get the ID slightly differently, still works fine though
team1href = team1div.select('a')[0].get('href')
team1['id'] = converters.to_int(team1href.split("=")[-1], regexp=True)
team1['score'] = converters.to_int(scores.split("-")[0].strip(), regexp=True)
resultd['team1'] = team1
#Team 2 info
team2div = result.find("div", {"class": "matchTeam2Cell"})
team2 = {}
team2['name'] = team2div.text.strip()
team2href = team2div.select('a')[0].get('href')
team2['id'] = converters.to_int(team2href.split("=")[-1], regexp=True)
team2['score'] = converters.to_int(scores.split("-")[1].strip(), regexp=True)
resultd['team2'] = team2
resultd['matchid'] = result.find("div", {"class": "matchActionCell"}).find("a").get('href') #What a fucking mess lmao
results_list.append(resultd)
return(results_list)
if __name__ == "__main__":
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(get_results())
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/6/2 14:47
@Author : QDY
@FileName: 1011. 在 D 天内送达包裹的能力_二分查找.py
传送带上的包裹必须在 D 天内从一个港口运送到另一个港口。
传送带上的第 i 个包裹的重量为 weights[i]。每一天,我们都会按给出重量的顺序往传送带上装载包裹。
我们装载的重量不会超过船的最大运载重量。
返回能在 D 天内将传送带上的所有包裹送达的船的最低运载能力。
示例 1:
输入:weights = [1,2,3,4,5,6,7,8,9,10], D = 5
输出:15
解释:
船舶最低载重 15 就能够在 5 天内送达所有包裹,如下所示:
第 1 天:1, 2, 3, 4, 5
第 2 天:6, 7
第 3 天:8
第 4 天:9
第 5 天:10
请注意,货物必须按照给定的顺序装运,因此使用载重能力为 14 的船舶并将包装分成
(2, 3, 4, 5), (1, 6, 7), (8), (9), (10) 是不允许的。
示例 2:
输入:weights = [3,2,2,4,1,4], D = 3
输出:6
解释:
船舶最低载重 6 就能够在 3 天内送达所有包裹,如下所示:
第 1 天:3, 2
第 2 天:2, 4
第 3 天:1, 4
示例 3:
输入:weights = [1,2,3,1,1], D = 4
输出:3
解释:
第 1 天:1
第 2 天:2
第 3 天:3
第 4 天:1, 1
"""
class Solution:
def shipWithinDays(self, weights, D):
# 二分查找,载重从max(weights)到sum(weights),计算所需天数
left, right = max(weights), sum(weights)
length = len(weights)
if D == 1: return right
prefix = [weights[0]] # 使用前缀和减少重复计算
for i in range(1, length):
prefix.append(prefix[i - 1] + weights[i])
def count_d(load):
prev, i = 0, 0
for day in range(D):
while prefix[i] - prev <= load:
i += 1
if i == length: return True
prev = prefix[i - 1]
# i = 0
# for day in range(D):
# cur_load = 0
# while (cur_load + weights[i]) <= load:
# cur_load += weights[i]
# i += 1
# if i == length:return True
return False
while left <= right:
mid = left + (right - left) // 2
if count_d(mid): # 当前负载mid可以完成目标,尝试减小负载
right = mid - 1
else:
left = mid + 1
return left
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
def lastOfUs(amount, step) :
guys = [i for i in range(1, amount+1)]
while len(guys) > 1 :
fakeStep = step
while fakeStep > len(guys) : fakeStep -= len(guys)
guys = guys[fakeStep:] + guys[:fakeStep-1]
#print(guys)
return guys[0]
a = int(input("Enter amount of guys: "))
c = int(input("Enter step: "))
print("The last guy is {}".format(lastOfUs(a,c) if a > 0 else "nobody"))
|
"""
This script computes the amount of training data in each of the 3 training sections
"""
import csv
from utils import get_training_files
fnames = get_training_files(base_path="src", section="Attempt7")
g1 = 0
g1t = 0
g2 = 0
g2t = 0
g3 = 0
g3t = 0
for i in range(max(fnames) + 1):
try:
with open(f"src\\data\\training\\Attempt7\\{fnames[i]}") as f:
# print(f"{i}\t:\t{sum(1 for row in f)}")
if i < 60:
g1 += sum(1 for row in f)
g1t += 1
elif i < 80:
g2 += sum(1 for row in f)
g2t += 1
elif i not in [109, 131]:
g3 += sum(1 for row in f)
g3t += 1
except:
pass
print(g1 / g1t, g2 / g2t, g3 / g3t)
|
#!/usr/bin/env python
"""utilities for larch
"""
from __future__ import print_function
import re
import sys
import os
from .symboltable import Group
def PrintExceptErr(err_str, print_trace=True):
" print error on exceptions"
print('\n***********************************')
print(err_str)
#print 'PrintExceptErr', err_str
try:
print('Error: %s' % sys.exc_type)
etype, evalue, tback = sys.exc_info()
if print_trace == False:
tback = ''
sys.excepthook(etype, evalue, tback)
except:
print('Error printing exception error!!')
raise
print('***********************************\n')
def strip_comments(sinp, char='#'):
"find character in a string, skipping over quoted text"
if sinp.find(char) < 0:
return sinp
i = 0
while i < len(sinp):
tchar = sinp[i]
if tchar in ('"',"'"):
eoc = sinp[i+1:].find(tchar)
if eoc > 0:
i = i + eoc
elif tchar == char:
return sinp[:i].rstrip()
i = i + 1
return sinp
RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'continue', 'def',
'del', 'elif', 'else', 'except', 'finally', 'for',
'from', 'if', 'import', 'in', 'is', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while',
'group', 'end', 'endwhile', 'endif', 'endfor',
'endtry', 'enddef', 'True', 'False', 'None')
NAME_MATCH = re.compile(r"[a-z_][a-z0-9_]*(.[a-z_][a-z0-9_]*)*$").match
def isValidName(name):
"input is a valid name"
tnam = name[:].lower()
if tnam in RESERVED_WORDS:
return False
return NAME_MATCH(tnam) is not None
def isNumber(num):
"input is a number"
try:
cnum = complex(num)
return True
except ValueError:
return False
def isLiteralStr(inp):
"is a literal string"
return ((inp.startswith("'") and inp.endswith("'")) or
(inp.startswith('"') and inp.endswith('"')))
##
class DefinedVariable(object):
"""defined variable: re-evaluate on access
Note that the localGroup/moduleGroup are cached
at compile time, and restored for evaluation.
"""
def __init__(self, expr=None, larch=None):
self.expr = expr
self.larch = larch
self.ast = None
self._groups = None, None
self.compile()
def __repr__(self):
return "<DefinedVariable: '%s'>" % (self.expr)
def compile(self):
"""compile to ast"""
if self.larch is not None and self.expr is not None:
self.ast = self.larch.compile(self.expr)
def evaluate(self):
"actually evaluate ast to a value"
if self.ast is None:
self.compile()
if self.ast is None:
msg = "Cannot compile '%s'" % (self.expr)
raise Warning(msg)
if hasattr(self.larch, 'interp'):
# save current localGroup/moduleGroup
self.larch.symtable.save_frame()
rval = self.larch.interp(self.ast, expr=self.expr)
self.larch.symtable.restore_frame()
return rval
else:
msg = "Cannot evaluate '%s'" % (self.expr)
raise ValueError(msg)
class Procedure(object):
"""larch procedure: function """
def __init__(self, name, larch=None, doc=None,
fname='<StdInput>', lineno=0,
body=None, args=None, kwargs=None,
vararg=None, varkws=None):
self.name = name
self.larch = larch
self.modgroup = larch.symtable._sys.moduleGroup
self.body = body
self.argnames = args
self.kwargs = kwargs
self.vararg = vararg
self.varkws = varkws
self.__doc__ = doc
self.lineno = lineno
self.fname = fname
def __repr__(self):
sig = ""
if len(self.argnames) > 0:
sig = "%s%s" % (sig, ', '.join(self.argnames))
if self.vararg is not None:
sig = "%s, *%s" % (sig, self.vararg)
if len(self.kwargs) > 0:
if len(sig) > 0:
sig = "%s, " % sig
_kw = ["%s=%s" % (k, v) for k, v in self.kwargs]
sig = "%s%s" % (sig, ', '.join(_kw))
if self.varkws is not None:
sig = "%s, **%s" % (sig, self.varkws)
sig = "<Procedure %s(%s), file=%s>" % (self.name, sig, self.fname)
if self.__doc__ is not None:
sig = "%s\n %s" % (sig, self.__doc__)
return sig
def __call__(self, *args, **kwargs):
# msg = 'Cannot run Procedure %s' % self.name
# self.larch.on_except(None, msg=msg, expr='<>',
# fname=self.fname, lineno=self.lineno,
# py_exc=sys.exc_info())
stable = self.larch.symtable
lgroup = Group()
args = list(args)
n_args = len(args)
n_expected = len(self.argnames)
if n_args != n_expected:
msg = None
if n_args < n_expected:
msg = 'not enough arguments for Procedure %s' % self.name
msg = '%s (expected %i, got %i)'% (msg,
n_expected,
n_args)
self.larch.on_except(None, msg=msg, expr='<>',
fname=self.fname, lineno=self.lineno,
py_exc=sys.exc_info())
msg = "too many arguments for Procedure %s" % self.name
for argname in self.argnames:
setattr(lgroup, argname, args.pop(0))
if len(args) > 0 and self.kwargs is not None:
msg = "got multiple values for keyword argument '%s' Procedure %s"
for t_a, t_kw in zip(args, self.kwargs):
if t_kw[0] in kwargs:
msg = msg % (t_kw[0], self.name)
self.larch.on_except(None, msg=msg, expr='<>',
fname=self.fname,
lineno=self.lineno,
py_exc=sys.exc_info())
else:
kwargs[t_a] = t_kw[1]
try:
if self.vararg is not None:
setattr(lgroup, self.vararg, tuple(args))
for key, val in self.kwargs:
if key in kwargs:
val = kwargs.pop(key)
setattr(lgroup, key, val)
if self.varkws is not None:
setattr(lgroup, self.varkws, kwargs)
elif len(kwargs) > 0:
msg = 'extra keyword arguments for Procedure %s (%s)'
msg = msg % (self.name, ','.join(list(kwargs.keys())))
self.larch.on_except(None, msg=msg, expr='<>',
fname=self.fname, lineno=self.lineno,
py_exc=sys.exc_info())
except (ValueError, LookupError, TypeError,
NameError, AttributeError):
msg = 'incorrect arguments for Procedure %s' % self.name
self.larch.on_except(None, msg=msg, expr='<>',
fname=self.fname, lineno=self.lineno,
py_exc=sys.exc_info())
stable.save_frame()
stable.set_frame((lgroup, self.modgroup))
retval = None
self.larch.retval = None
for node in self.body:
self.larch.interp(node, expr='<>',
fname=self.fname, lineno=self.lineno)
if len(self.larch.error) > 0:
break
if self.larch.retval is not None:
retval = self.larch.retval
break
stable.restore_frame()
self.larch.retval = None
del lgroup
return retval
class LarchExceptionHolder:
"basic exception handler"
def __init__(self, node, msg='', fname='<StdInput>',
py_exc=(None, None),
expr=None, lineno=-3):
self.node = node
self.fname = fname
self.expr = expr
self.msg = msg
self.py_exc = py_exc
self.lineno = lineno
self.exc_info = sys.exc_info()
def get_error(self):
"retrieve error data"
node = self.node
node_lineno = 0
node_col_offset = 0
if node is not None:
try:
node_lineno = node.lineno
node_col_offset = self.node.col_offset
except:
pass
lineno = self.lineno + node_lineno
exc_text = str(self.exc_info[1])
if exc_text in (None, 'None'):
exc_text = ''
expr = self.expr
if expr == '<>': # denotes non-saved expression -- go fetch from file!
try:
ftmp = open(self.fname, 'r')
expr = ftmp.readlines()[lineno-1][:-1]
ftmp.close()
except IOError:
pass
out = []
if len(exc_text) > 0:
out.append(exc_text)
else:
py_etype, py_eval = self.py_exc
if py_etype is not None and py_eval is not None:
out.append("%s: %s" % (py_etype, py_eval))
if self.fname == '<StdInput>' and self.lineno <= 0:
out.append('<StdInput>')
else:
out.append("%s, line number %i" % (self.fname, 1+self.lineno))
out.append(" %s" % expr)
if node_col_offset > 0:
out.append(" %s^^^" % ((node_col_offset)*' '))
return (self.msg, '\n'.join(out))
def normpath(*paths, **kwargs):
'''normpath(path, path, ...[, unix=True, one=True]) -> path or [path]
normalizes paths using os.path.normpath.
if one=True and only one path is given, returns the path bare (not in a
one-elt list)
if unix=True, converts Windows-style backslash paths to UNIX-style slash
paths.
'''
ret = map(os.path.normpath, paths)
if sys.platform == 'win32' and kwargs.get('unix'):
ret = [ p.replace(r'\\', '/') for p in ret ]
if kwargs.get('one', True) and len(ret) == 1: return ret[0]
else: return ret
|
args = [0, 1, 4, 9]
def unpacking_Argument_List(a,b,c,d):
print("a = "+ str(a))
print("b = " + str(b))
print("c = " + str(c))
print("d = " + str(d))
def packing_Arguments_List(*data):
newList = list(data)
newList[1]= "Asal"
print(newList)
unpacking_Argument_List(*args)
packing_Arguments_List("Hello","World")
n = int(input())
output=""
if 1 <= n <= 150:
for i in range(1, n+1):
output += str(i)
print(output)
|
from django.urls import path
from .views import fetchJsonData
urlpatterns = [
path('', fetchJsonData, name='jsonUrl')
]
|
# Generated by Django 3.2.2 on 2021-05-14 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('compiler', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='problem',
name='seo_title',
field=models.CharField(default='NA', max_length=200),
),
]
|
import numpy as np
from PIL import ImageGrab
import cv2
import time
from time import sleep
from key import *
def key_press():
print('\n')
PressKey(SHIFT)
PressKey(B)
sleep(1)
ReleaseKey(B)
ReleaseKey(SHIFT)
PressKey(Y)
sleep(1)
ReleaseKey(Y)
PressKey(SPACE)
sleep(1)
ReleaseKey(SPACE)
PressKey(SHIFT)
PressKey(R)
sleep(1)
ReleaseKey(R)
ReleaseKey(SHIFT)
PressKey(I)
sleep(1)
ReleaseKey(I)
PressKey(Z)
sleep(1)
ReleaseKey(Z)
PressKey(W)
sleep(1)
ReleaseKey(W)
PressKey(A)
sleep(1)
ReleaseKey(A)
PressKey(N)
sleep(1)
ReleaseKey(N)
PressKey(DECIMAL)
ReleaseKey(DECIMAL)
PressKey(SHIFT)
sleep(1)
PressKey(A)
ReleaseKey(A)
sleep(1)
PressKey(R)
ReleaseKey(R)
def screen_record():
last_time = time.time()
while(True):
printscreen = np.array(ImageGrab.grab(bbox=(0,40,640,480)))
Edge = proc_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
last_time = time.time()
cv2.imshow('window',cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
key_press()
|
"""
Module to read data streams per line.
Supported inputs:
- raw
- json
- json timeseries (contains "date" and "value" field)
Can produce the following generators:
- raw lines
- json objects
- (date,value) tuples
Can produce the following output as non-generator:
- pandas
"""
import json
from pandas import DataFrame
#To read data files
from datetime import datetime, timedelta
from dateutil import parser
class Reader(object):
"""
Reader class to read data streams.
"""
def __init__(self, inputStream, begin=None, end=None):
"""
Create reader.
Parameters
----------
inputStream : file-like, anything that supports readlines or xreadlines.
Input is read from this inputStream with readlines or xreadlines.
If the end parameter is specified, the inputStream must be in
chronological order.
begin : datetime
Inclusive begin date. All values before this date will be discarded.
end : datetime
Inclusive end date. All values after this date will be discarded.
"""
self.fd_ = inputStream
self.reset = False
self.begin = begin
self.end = end
def setReset(self, reset=True):
"""
Wether to start at the beginning of the stream before each read. Can be
used to read the same data multiple times. Can only be used if the
inputStream supports seek.
"""
self.reset = reset
def rawLines(self, n=None):
"""
A generator that returns the first n lines.
"""
if self.reset:
self.fd_.seek(0)
i = 0
func = self.fd_.readlines
if hasattr(self.fd_, 'xreadlines'):
func = self.fd_.xreadlines
for line in func():
yield line
i += 1
if n is not None and i >= n:
return
def rawJSON(self, n=None):
"""
A generator that returns the first n json objects.
"""
if self.reset:
self.fd_.seek(0)
i = 0
func = self.fd_.readlines
if hasattr(self.fd_, 'xreadlines'):
func = self.fd_.xreadlines
for line in func():
yield json.loads(line)
i += 1
if n is not None and i >= n:
return
def pandas(self, n=None, includeSensors=None, excludeSensors=None,
includeBurst=True, onlyFirstValueOfBurst=False):
"""
Import data into pandas DataFrame.
Each row corresponds to a sensor value. Sensors are mapped to
columns in the following way. If the sensor has a single value,
then the column name is the sensor name. If the sensor has a json
value, the json keys in the root are mapped to column names:
"<sensor>_<key>". Burst sensor values are expanded into multiple
rows, with the column names derived from the "headers" key in the
root: "<sensor>_<header>".
Parameters
----------
includeSensors : iterable
Only import data from these sensors.
excludeSensors : iterable
Don't import these sensors.
includeBurst : boolean
Whether to include burst sensors. (Default True).
onlyFirstValueOfBurst : boolean
Whether to only use a single value (the first) instead of the whole burst. (Default False).
"""
if includeSensors is not None and excludeSensors is not None:
raise (ValueError("Only one of includeSensors\
and excludeSensors can be set!"))
elif onlyFirstValueOfBurst and not includeBurst:
raise (ValueError("onlyFirstValueOfBurst can only be used withi\
includeBurst=True"))
dates = []
data = []
total = DataFrame([])
bufSize = 10000
def writeBack():
"""
Store buffered data and dates into total.
"""
#put buffer into DataFrame
df = DataFrame(data, index=dates)
#clear the buffer
del data[:]
del dates[:]
#append to total
return total.append(df)
#return total.join(df)
for row in self.rawJSON(n):
sensor = row["sensor_name"]
if excludeSensors != None and sensor in excludeSensors:
continue
if includeSensors != None and sensor not in includeSensors:
continue
if not includeBurst and "(burst-mode)" in sensor:
continue
date = datetime.fromtimestamp(row['date'])
if self.begin is not None and date < self.begin:
continue
if self.end is not None and date > self.end:
break
value = row['value']
if "burst-mode" in sensor and "values" in value and\
"header" in value and "interval" in value:
#append value for each row in burst
sensorName = sensor.replace("(burst-mode)","").strip()
names = []
for key in value['header'].split(','):
keyName = key.strip().replace(' ', '_')
names.append("{}_{}".format(sensorName, keyName))
if len(names) == 1:
#Rather just use sensor name
names = [sensorName]
offset = date
interval = timedelta(milliseconds=value['interval'])
cumTime = offset
for v in value['values']:
if len(names) == 1:
singleValue = {names[0]:v}
else:
singleValue = dict(zip(names, v))
data.append(singleValue)
dates.append(cumTime)
cumTime += interval
if onlyFirstValueOfBurst:
break
elif isinstance(value, dict):
#prepend all keys with the sensor name and an underscore
tmp = {}
for (k,v) in value.items():
newKey = "{}_{}".format(sensor, k)
tmp[newKey] = v
data.append(tmp)
dates.append(date)
else:
data.append({sensor:row['value']})
dates.append(date)
if len(data) > bufSize:
total = writeBack()
total = writeBack()
return total
def dateValuePairs(self, n=None):
"""
A generator that returns the first n (date, value) tuples.
"""
for row in self.rawJSON(n):
(date, value) = (parser.parse(row['date']), row['value'])
#filter on date
if self.begin is not None and date < self.begin:
continue
if self.end is not None and date > self.end:
return
yield (date, value)
def dateJSONValuePairs(self, n=None):
"""
A generator that returns the first n (date, jsonValue) tuples.
"""
for (date, value) in self.dateValuePairs(n):
yield (date, json.loads(value))
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Tweet(models.Model):
text = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
|
# Generated by Django 2.2.17 on 2021-06-22 15:25
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("taxonomy", "0005_service_contact_reasons"),
("people", "0024_auto_20210412_1428"),
("wagtailcore", "0062_comment_models_and_pagesubscription"),
]
operations = [
migrations.CreateModel(
name="EventIndexPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
],
options={"abstract": False,},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"sort_order",
models.IntegerField(blank=True, editable=False, null=True),
),
("title", models.CharField(max_length=255)),
("intro", models.TextField(verbose_name="Description")),
("link_external", models.URLField(verbose_name="External link")),
("date", models.DateField(verbose_name="Event date")),
("event_type", models.CharField(max_length=30)),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="authors",
to="people.Author",
verbose_name="Host",
),
),
(
"page",
modelcluster.fields.ParentalKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="events",
to="events.EventIndexPage",
),
),
(
"related_services",
modelcluster.fields.ParentalManyToManyField(
related_name="events", to="taxonomy.Service"
),
),
],
options={"ordering": ["sort_order"], "abstract": False,},
),
]
|
import project_functions as func
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.firefox.options import Options
import random
import threading
# =====================================================================================
class Yad2Bot(object):
def __init__(self, url, phone):
self.count = 0
self.error = ""
self.url = ""
self.phone = phone
self.posts = []
self.exit = False
self.options = Options()
self.options.headless = True
self.driver = ""
self.url = url
def start_search(self):
self.driver = webdriver.Firefox(options=self.options)
t = threading.Thread(target=self.post_notify())
t.start()
# Notification about new post in search
def post_notify(self):
while True:
# flag for stoping the thread
if self.exit:
print("search stopped")
break
else:
options = Options()
options.headless = True
with self.driver as driver:
wait = WebDriverWait(driver, 60)
# taking the first result
while True:
try:
driver.get(self.url)
wait.until(presence_of_element_located((By.ID, "feed_item_0")))
i = 0
while i < 5:
result = driver.find_element_by_css_selector(
"#feed_item_{0} > div:nth-child(1) > div:nth-child(1)".format(i))
if "עסקי" in result.text:
i += 1
else:
break
old = result.text
print("old: " + old)
except Exception as e:
print("Error section 1:{0}".format(e))
break
# checking for update
while True:
try:
n = random.randint(60, 120)
time.sleep(n)
driver.refresh()
wait.until(presence_of_element_located((By.ID, "feed_item_0")))
i = 0
# skip adds
while i < 5:
result = driver.find_element_by_css_selector(
"#feed_item_{0} > div:nth-child(1) > div:nth-child(1)".format(i))
if "עסקי" in result.text:
i += 1
else:
break
new = result.text
print("new: " + new)
name, price, place = func.stinger(result.text)
except Exception as e:
print("Error section 2: {0}".format(e))
break
if new != old:
try:
# taking contact name and phone number
driver.refresh()
result = driver.find_element_by_css_selector(
"#feed_item_{0} > div:nth-child(1) > div:nth-child(1)".format(i))
contact, number = func.get_contact(result, driver)
# send massage
func.send_massage(
"מוצר חדש שעלול לעניין אותך נכנס\nהמוצר: {0}\nמיקום: {2}\nמחיר: {1}\nאיש קשר: {4} - {5}\nלצפייה: {3}".format(name, price, place, self.url, contact, number),
self.phone)
print("massage sent")
self.posts.append([name.replace(",", ""), place.replace(",", ""),
price.replace(",", ""), contact.replace(",", ""),
number.replace(",", ""), self.url])
except Exception as execpt:
print("Error section 3 (massage not sent): {0}".format(execpt))
break
break
# return all new posts found
def get_all(self):
return str(self.posts)
# stop search
def stop(self):
self.exit = True
time.sleep(5)
try:
self.driver.close()
except Exception as e:
print(e)
# ----------------------------------------------
|
from jsk_teleop_joy.joy_plugin import JSKJoyPlugin
import imp
try:
imp.find_module("geometry_msgs")
except:
import roslib; roslib.load_manifest('jsk_teleop_joy')
from geometry_msgs.msg import Twist
import tf
import rospy
import numpy
import math
import tf
import numpy
import time
class JoyCmdVel(JSKJoyPlugin):
#def __init__(self, name='JoyPose6D', publish_pose=True):
def __init__(self, name, args):
JSKJoyPlugin.__init__(self, name, args)
self.cmd_vel = Twist()
self.publish_cmd_vel = self.getArg('publish_cmd_vel', True)
self.max_vel = self.getArg('max_vel', 0.2)
self.max_omega = self.getArg('max_omega', 0.17) # 10[deg]
self.orthogonal_axis_mode = self.getArg('orthogonal_axis_mode', True)
self.prev_time = rospy.Time.now()
if self.publish_cmd_vel:
self.twist_pub = rospy.Publisher(self.getArg('cmd_vel', 'cmd_vel'), Twist, queue_size = 1)
def joyCB(self, status, history):
if history.length() > 0:
latest = history.latest()
if status.R3 and status.L2 and status.R2 and not (latest.R3 and latest.L2 and latest.R2):
self.followView(not self.followView())
cmd_vel = Twist()
# currently only support 2D plane movement
if not status.R3:
# xy
dist = numpy.sqrt(status.left_analog_y * status.left_analog_y + status.left_analog_x * status.left_analog_x) # dist is assumed to be 0 < dist < 1
scale_v = self.max_vel * dist
if self.orthogonal_axis_mode:
if abs(status.left_analog_y) - abs(status.left_analog_x) > 0.2:
x_diff = status.left_analog_y * scale_v
y_diff = 0.0
elif abs(status.left_analog_y) - abs(status.left_analog_x) < -0.2:
x_diff = 0.0
y_diff = status.left_analog_x * scale_v
else:
x_diff = 0.0
y_diff = 0.0
else:
x_diff = status.left_analog_y * scale_v
y_diff = status.left_analog_x * scale_v
else:
x_diff = 0.0
y_diff = 0.0
cmd_vel.linear.x = x_diff
cmd_vel.linear.y = y_diff
cmd_vel.linear.z = 0.0
dyaw = 0.0
if not status.R3:
if status.L1:
dyaw = self.max_omega
elif status.R1:
dyaw = -self.max_omega
cmd_vel.angular.x = 0.0
cmd_vel.angular.y = 0.0
cmd_vel.angular.z = dyaw
# publish at 10hz
if self.publish_cmd_vel:
now = rospy.Time.from_sec(time.time())
# placement.time_from_start = now - self.prev_time
if (now - self.prev_time).to_sec() > 1 / 30.0:
self.twist_pub.publish(cmd_vel)
self.prev_time = now
|
import wx
from wx.lib.pubsub import Publisher
import oo_dialogbox
class tab4(wx.Panel):
def __init__(self, parent):
ww=wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
#static text widget----------------------------------------------------------
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
heading = wx.StaticText(self, label='wx.StaticText', pos=(15, 15))
heading.SetFont(font)
wx.StaticLine(self, pos=(25, 50), size=(500,1))
rtb = wx.ToggleButton(self, label='toggle', pos=(15, 65))
rtb.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleRed)
wx.StaticBox(self, -1, 'Personal Info', pos=(15, 110), size=(240, 30))
cbox2=wx.CheckBox(self, -1 ,'Male', (15, 150))
cbox=wx.CheckBox(self, -1 ,'Married', (15, 170))
cbox.SetValue(True)
cbox.Bind(wx.EVT_CHECKBOX, self.ShowOrHideTitle)
wx.StaticText(self, -1, 'Age', (15, 220))
wx.SpinCtrl(self, -1, '1', (15, 240), (60, -1), min=1, max=120)
wx.Button(self, 1, 'Ok', (15, 275), (60, -1))
distros = ['Ubuntu', 'Arch', 'Fedora', 'Debian', 'Mint']
cb = wx.ComboBox(self, pos=(15, 320), choices=distros,
style=wx.CB_READONLY)
slider = wx.Slider(self, 5, 6, 1, 10, (15, 360), (110, -1))
slider.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
cb.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
cbox.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
rtb.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
heading.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
def OnWidgetEnter(self, e):
name = e.GetEventObject().GetClassName()
print (name + ' widget')
# sb.SetStatusText(name + ' widget')
e.Skip()
def ShowOrHideTitle(self, e):
sender = e.GetEventObject()
isChecked = sender.GetValue()
if isChecked:
self.SetBackgroundColour('blue')
else:
self.SetBackgroundColour('green')
def ToggleRed(self, e):
obj = e.GetEventObject()
isPressed = obj.GetValue()
if isPressed:
print 'pressed'
else:
print 'not pressed'
class tab2(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.SetBackgroundColour('red')
b1 = wx.Button(self, label="Change Security", pos=(1, 1),size=(180, 60), style=wx.STAY_ON_TOP)
self.Bind(wx.EVT_BUTTON, self.OnButton, b1)
def OnButton(self, evt):
dlg = oo_dialogbox.SecuritySelection(None, -1)
dlg.Show()
class Choicebook(wx.Notebook):
#----------------------------------------------------------------------
def __init__(self, parent):
wx.Notebook.__init__(self, parent, wx.ID_ANY)
# Create the first tab and add it to the notebook
self.AddPage(tab2(self), "Model Directory/Editor")
# Create and add the second tab
self.AddPage(tab4(self), "Position/Risk Metrics")
self.Bind(wx.EVT_CHOICEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_CHOICEBOOK_PAGE_CHANGING, self.OnPageChanging)
#----------------------------------------------------------------------
def OnPageChanged(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
event.Skip()
#----------------------------------------------------------------------
def OnPageChanging(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
event.Skip() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gtfgy', '0011_auto_20150218_1623'),
('mjdxvqk', '0009_auto_20150218_1623'),
]
operations = [
migrations.RemoveField(
model_name='vmnilim',
name='vkvsbd',
),
migrations.RemoveField(
model_name='edugsywcj',
name='nvhgs',
),
migrations.RemoveField(
model_name='gedwra',
name='utpyfgmizz',
),
migrations.AddField(
model_name='curcmm',
name='gasoypwn',
field=models.CharField(default='', max_length=121),
),
migrations.AddField(
model_name='gedwra',
name='samswfwyn',
field=models.ForeignKey(null=True, related_name='+', to='gtfgy.Rqjyygz'),
),
migrations.DeleteModel(
name='Vmnilim',
),
]
|
import pygame
from settings import *
class Ghost(pygame.sprite.Sprite):
def __init__(self, game, x, y):
pygame.sprite.Sprite.__init__(Ghost, self)
self.game = game
self.imageForward = pygame.transform.scale(pygame.image.load("ghostSprite/ghostForwardMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.image=self.imageForward
self.imageLeft=pygame.transform.scale(pygame.image.load("ghostSprite/ghostLeftMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.imageRight=pygame.transform.scale(pygame.image.load("ghostSprite/ghostRightMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.imageBack=pygame.transform.scale(pygame.image.load("ghostSprite/ghostBackMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.rect = self.image.get_rect()
self.x = x
self.y = y
#for puzzle 1 and puzzle 3, for reverse tiles and ect
self.reversed=False
#for puzzle3, sets if player is charged
self.isCharged = False
#for puzzle 3, sets is player is being "moved" by a tile
self.isSliding=False
#VELOCITY for puzzlethree
self.dx,self.dy=0,0
self.keys = 0
self.freeze=False
self.canMove=True
def keyUpdate(self,dx=0,dy=0):
self.dx,self.dy=dx,dy
self.x+=dx
self.y+=dy
def update(self, dt, keysDown, screenWidth, screenHeight):
#if the player stops sliding, they can't be reversed anymore
if self.reversed:
self.reversed=False
if not self.reversed:
if self.isSliding:
pygame.time.wait(250)
self.canMove=False
#initialize dx and dy values
dx,dy=0,0
if keysDown(pygame.K_RIGHT):
dx=1
self.image=self.imageRight
elif keysDown(pygame.K_LEFT):
dx=-1
self.image=self.imageLeft
elif keysDown(pygame.K_UP):
dy=-1
self.image=self.imageBack
elif keysDown(pygame.K_DOWN):
dy=1
self.image=self.imageForward
else:
#initialize dx and dy values
dx,dy=0,0
if keysDown(pygame.K_RIGHT):
dx=-1
self.image=self.imageLeft
elif keysDown(pygame.K_LEFT):
dx=1
self.image=self.imageRight
elif keysDown(pygame.K_UP):
dy=1
self.image=self.imageForward
elif keysDown(pygame.K_DOWN):
dy=-1
self.image=self.imageForward
if not self.collideWithWalls(dx,dy):
self.x += dx
self.y += dy
#update for sliding player
def slideUpdate(self,dt,keysDown,screenWidth,screenHeight):
if not self.reversed:
#initialize dx and dy values
dx,dy=0,0
if keysDown(pygame.K_RIGHT):
dx=1
self.image=self.imageRight
elif keysDown(pygame.K_LEFT):
dx=-1
self.image=self.imageLeft
elif keysDown(pygame.K_UP):
dy=-1
self.image=self.imageBack
elif keysDown(pygame.K_DOWN):
dy=1
self.image=self.imageForward
else:
#initialize dx and dy values
dx,dy=0,0
if keysDown(pygame.K_RIGHT):
dx=-1
self.image=self.imageLeft
elif keysDown(pygame.K_LEFT):
dx=1
self.image=self.imageRight
elif keysDown(pygame.K_UP):
dy=1
self.image=self.imageForward
elif keysDown(pygame.K_DOWN):
dy=-1
self.image=self.imageForward
if not self.collideWithWalls(dx,dy):
self.x += dx
self.y += dy
#this function (having to do with the walls) is from Chris Bradfield as well!
def collideWithWalls(self,dx=0,dy=0):
for wall in self.game.walls:
if wall.x==self.x+dx and wall.y==self.y+dy:
print("collided with wall!")
return True
#add something for closed gate
for gate in self.game.gates:
if gate.isClosed:
if gate.x==self.x+dx and gate.y==self.y+dy:
return True
return False
#if the player is on the puzzle1 startspot
def onSpotOne(self):
spot = self.game.startSpot
if spot.x==self.x and spot.y==self.y:
return True
return False
def onEndSpot(self):
for spot in self.game.endSpots:
if spot.x==self.x and spot.y==self.y:
return True
def draw(self,screen):
#draws ghost at certain tile (topleft midpoint, 4x4)
screen.blit(self.image,pygame.Rect(3*TILESIZE, 3*TILESIZE, TILESIZE,
TILESIZE))
class Reflection(pygame.sprite.Sprite):
def __init__(self, game, x, y):
pygame.sprite.Sprite.__init__(Reflection, self)
self.game = game
self.imageForward = pygame.transform.scale(pygame.image.load("ghostSprite/ghostForwardMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.image=self.imageForward
self.imageLeft=pygame.transform.scale(pygame.image.load("ghostSprite/ghostLeftMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.imageRight=pygame.transform.scale(pygame.image.load("ghostSprite/ghostRightMid.png").convert_alpha(),(TILESIZE,TILESIZE))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.drawX = 0
self.canMove=False
def update(self, dt, keysDown, screenWidth, screenHeight):
self.scrollX,self.scrollY = self.game.getPlayerPosition()
if self.game.puzzleOneEnded:
print('KILLLLLL')
self.kill()
if self.canMove:
#initialize dx and dy values
dx=0
dy=0
if keysDown(pygame.K_RIGHT):
dx=-1
self.image=self.imageLeft
elif keysDown(pygame.K_LEFT):
dx=1
self.image=self.imageRight
elif keysDown(pygame.K_UP):
dy=1
self.image=self.imageForward
elif keysDown(pygame.K_DOWN):
dy=-1
self.image=self.imageForward
if not self.collideWithWalls(dx,dy):
self.x += dx
self.y += dy
def collideWithWalls(self,dx=0,dy=0):
for wall in self.game.walls:
if wall.x==self.x+dx and wall.y==self.y+dy:
return True
return False
def onEndSpot(self):
for spot in self.game.endSpots:
if spot.x==self.x and spot.y==self.y:
return True
def reDraw(self,screen):
midX,midY=3,3
self.drawX=self.x+midX-self.scrollX
self.drawY=self.y+midY-self.scrollY
#draws ghost at certain tile
screen.blit(self.image,pygame.Rect(self.drawX*TILESIZE, self.drawY*TILESIZE, TILESIZE,
TILESIZE))
|
import re
import os, sys
# local
try:
# need when 「python3 gfzs/markup.py」
if __name__ == "__main__":
# https://codechacha.com/ja/how-to-import-python-files/
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from multibyte import Multibyte
import color
import runtime.config as runtime_config
import logger
if os.environ.get("DEBUG"):
import debug
# need when 「cat fixtures/rust.json | python -m gfzs」
# need when 「cat fixtures/rust.json | bin/gfzs」
else:
from gfzs.utils.multibyte import Multibyte
import gfzs.utils.color as color
import gfzs.runtime.config as runtime_config
import gfzs.utils.logger as logger
if os.environ.get("DEBUG"):
from gfzs.utils import debug
# need when 「python3 gfzs/controller.py」
except ModuleNotFoundError:
# https://codechacha.com/ja/how-to-import-python-files/
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname("../"))))
from utils.multibyte import Multibyte
import utils.color as color
import runtime.config as runtime_config
import utils.logger as logger
if os.environ.get("DEBUG"):
from utils import debug
class Markup:
def __init__(self):
logger.debug("[Markup] init")
self.multibyte = Multibyte()
self.color_data = runtime_config.data["view"]["search_result"]["color"]
self.colors = self._create_colors(self.color_data)
def parse(self, text, search_text):
logger.debug("[Markup] parse by search_text: '%s'" % search_text)
result = {}
if search_text is None or search_text is "":
return result
remake_text = self.multibyte.marked_full_width(text)
search_texts = [item for item in search_text.split(" ") if item]
for st in search_texts:
# Markup Partial
result_at_partial = self._parse_as_partial(remake_text, st)
for item in result_at_partial:
result[item] = result_at_partial[item]
# Markup Char
result_at_char = self._parse_as_char(remake_text, st)
for item in result_at_char:
if not item in result:
result[item] = result_at_char[item]
return result
def _parse_as_partial(self, remake_text, search_text):
result = {}
remake_search_text = self.multibyte.marked_full_width(search_text)
for m in re.finditer(remake_search_text, remake_text):
if m is None:
continue
if not search_text in result:
result[search_text] = []
span = m.span()
result[search_text].append(
{
"half_width": {"start_index": span[0], "end_index": span[1]},
"color": self.colors["markup_partial"],
"match": self.multibyte.unmarked_full_width(m.group()),
"_type": "partial",
}
)
return result
def _parse_as_char(self, remake_text, search_text):
result = {}
char_pattern = re.compile(r"[{0}]".format(search_text))
for m in char_pattern.finditer(remake_text):
if m is None:
continue
if not search_text in result:
result[search_text] = []
span = m.span()
result[search_text].append(
{
"half_width": {"start_index": span[0], "end_index": span[1]},
"color": self.colors["markup_char"],
"match": m.group(),
"_type": "char",
}
)
return result
def _create_colors(self, color_data) -> dict:
result = {}
for view_name in color_data:
result[view_name] = color.use(color_data[view_name])
return result
if __name__ == "__main__":
import curses
progname = "gfzs.utils.markup"
properties = {"progname": progname, "severity": 0, "log_path": "./tmp/gfzs.log"}
logger.init_properties(**properties)
logger.debug("start %s" % progname)
runtime_config.init()
if not runtime_config.valid():
logger.debug("[print] 'Config is invalid.'")
print("Config is invalid.")
for error in runtime_config.errors:
logger.error(error)
print("Error: %s" % error)
logger.debug("exit 0")
sys.exit(1)
try:
# initscr() returns a window object representing the entire screen.
logger.debug("init curses")
stdscr = curses.initscr()
color.init()
markup = Markup()
text = "Rustは非常に高速でメモリ効率が高くランタイムやガベージコレクタがないため、パフォーマンス重視のサービスを実装できますし、組込み機器上で実行したり他の言語との調和も簡単にできます。 信頼性. Rustの豊かな型システムと所有権 ..."
search_text = "Rust 非常 効率"
result = markup.parse(text, search_text)
print("Partial: ", result)
search_text = "パピプペポ"
result = markup.parse(text, search_text)
print("Char: ", result)
search_text = None
result = markup.parse(text, search_text)
print("None: ", result)
search_text = ""
result = markup.parse(text, search_text)
print("Blank: ", result)
search_text = "\0"
result = markup.parse(text, search_text)
print("Null: ", result)
finally:
logger.debug("end curses")
logger.debug("end %s" % progname, new_line=True)
curses.endwin()
|
"""
다양한 수로 이루어진 배열이 있을 때 주어진 수들을 M번 더하여 가장 큰 수를 만든다.
단, 배열의 특정한 인덱스(번호)에 해당하는 수가 연속해서 K번을 초과하여 더해질 수 없다.
N=배열의 크기
M=숫자가 더해지는 횟수
[2,4,5,4,6]이 있을 때 M=8이고 K=3이면 결과는,
6+6+6+5+6+6+6+5 = 46이다.
""""
"""
--- 내가 풀어본 것 ---
from random import randint
array = []
N = randint(2,1000)
M = randint(1,10000)
K = randint(1,M)
# array = [2,4,5,4,6]
# N = 5
# M = 8
# K = 3
sum = 0
for _ in range(N):
array.append(randint(1,10000))
array.sort(reverse=True)
first_value = array[0]
second_value = array[1]
a = M // (K+1)
b = M % (K+1)
for _ in range(a):
sum += (first_value * K) + second_value
sum += first_value * b
print(N,M,K, sep=" ")
print(sum)
"""
# N,M,K를 공백으로 구분하여 입력받기
n, m, k = map(int, input().split())
# N개의 수를 공백으로 구분하여 입력받기
data = list(map(int, input().split()))
# 입력받은 수들 정렬하기
data.sort()
first = data[n-1]
second = data[n-2]
result = 0
while True:
for i in range(k):
if m == 0:
break
result += first
m -= 1
if m == 0:
break
result += second
m -= 1
print(result)
"""
내가 풀은 것이 효율적인 답안 예시로 나왔음!
while문 이용하는 방법도 알아두기
""" |
import argparse
import cv2
import numpy as np
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
import Util
import subprocess
# compute true diff of two frames based on their histograms
def histogram_diff(f1,f2):
h_bins = 50
s_bins = 60
histSize = [h_bins, s_bins]
h_ranges = [0, 180]
s_ranges = [0, 256]
ranges = h_ranges + s_ranges
channels = [0, 1]
f1 = cv2.calcHist([f1], channels, None, histSize, ranges, accumulate=False)
cv2.normalize(f1, f1, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)
f2 = cv2.calcHist([f2], channels, None, histSize, ranges, accumulate=False)
cv2.normalize(f2, f2, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)
diff = cv2.compareHist(f1, f2, 0)
if diff < threshold:
return True
return False
# find the general keyframe (i.e, the keyframe in the videos without prior knowledge)
def find_general_keyframe():
compare = histogram_diff
cap = cv2.VideoCapture(video_path)
suc, frame = cap.read()
m = {}
while suc:
suc, curr_frame = cap.read()
if suc:
s = int(cap.get(cv2.CAP_PROP_POS_MSEC)/1000)
k = list(m.keys())
for i in k:
if i < s - timeframe:
m.pop(i)
for i in m.get(s - timeframe,[]):
diff = compare(curr_frame,i)
if diff:
NotInclude = True
for i in range(0,timeframe):
if s - i in ret:
NotInclude = False
if NotInclude:
images[s] = curr_frame
ret[s] = diff
break
m[s] = m.get(s,[])
m[s].append(curr_frame)
def check_target_image(video_path, target_image, threshold = 150):
img_target = cv2.imread(target_image, cv2.IMREAD_GRAYSCALE)
cap = cv2.VideoCapture(video_path)
suc,frame = cap.read()
while suc:
suc, curr_frame = cap.read()
if suc:
img = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
diff = Util.image_comparision(img_target,img)
if diff >= threshold:
print(video_path)
print(diff)
return True
return False
def remove_videos(l):
if not len(l):
return
string = " ".join(l)
subprocess.call('rm {}'.format(string), shell = True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Key-frame detection program for extracting and retrieving important frames afrom long-duration videos.')
parser.add_argument('--video-path', help='Path to the video.')
parser.add_argument('--threshold', help='The threshold for frames comparations. The value should be in range 0.99-0.9999', default=0.99)
parser.add_argument('--time_interval', help='The time interval for generating the short videos from collected keyframe.', default=2)
parser.add_argument('--number-of-frames', help='The numbers of keyframes to collect.',default=6)
parser.add_argument('--target-image', help='path the target image to filter keyfame', default=None)
args = parser.parse_args()
ret = {}
images = {}
video_path = args.video_path
threshold = float(args.threshold)
timeframe = int(args.time_interval)
number_of_keyframe = int(args.number_of_frames)
target_image= args.target_image
find_general_keyframe()
names = []
for i,_ in sorted(ret.items(), key = lambda x: x[1] )[:number_of_keyframe]:
file_name = 'images'+str(i)+'.png'
cv2.imwrite('images'+str(i)+'.png',images[i])
video_name = "frame_test{}.mp4".format(str(i))
Util.get_short_video(video_path,i, name=video_name)
names.append((file_name,video_name))
# if target_image and not check_target_image(name,target_image):
# remove_lst.append(name)
if target_image:
remove_lst = []
for fname,vname in names:
if not check_target_image(vname,target_image):
remove_lst.append(fname)
remove_lst.append(vname)
remove_videos(remove_lst)
|
#!/usr/bin/python
# ==============================================================================
# Author: Tao Li (taoli@ucsd.edu)
# Date: Jun 3, 2015
# Question: 069-Sqrt
# Link: https://leetcode.com/problems/sqrtx/
# ==============================================================================
# Implement int sqrt(int x).
#
# Compute and return the square root of x.
# ==============================================================================
# Method: Newton Formula
# ==============================================================================
class Solution:
# @param {integer} x
# @return {integer}
def mySqrt(self, a):
if a < 0:
return -1
elif a == 0:
return 0
x = 1.
last = float('inf')
while abs(last-x) >= 0.1**10:
last = x
x = (x + a/x) / 2
return int(x)
if __name__ == '__main__':
print Solution().mySqrt(3) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import Http404
from .serializers import CampaignSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Campaign
import datetime
class UserList(APIView):
"""
List all campaign, or create a new campaign.
# """
def get(self, request, format=None):
users = Campaign.objects.all()
serializer = CampaignSerializer(users, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = CampaignSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetail(APIView):
def get_object(self, partner_id):
try:
return Campaign.objects.get(partner_id=partner_id)
except User.DoesNotExist:
raise Http404
def get(self, request, partner_id, format=None):
user = self.get_object(partner_id)
user = CampaignSerializer(user)
con_time = user.data["creation_time"]
time_in_seconds = (int(con_time.split(':')[0]) * 3600) + (int(con_time.split(':')[1]) * 60) + float(
con_time.split(':')[2])
current_time = str(datetime.datetime.now().time())
time_in_seconds1 = (int(current_time.split(':')[0]) * 3600) + (int(current_time.split(':')[1]) * 60) + float(
current_time.split(':')[2])
camp_duration = int(user.data["duration"])
if (time_in_seconds1) < (time_in_seconds + camp_duration):
return Response(user.data)
else:
return Response("""{"status":"Campaign is not Active"}""") |
from import_export import resources
from apps.mascota.models import Mascota
class MascotaResource(resources.ModelResource):
class Meta:
model = Mascota
import_id_fields= ['nombre']
|
from dateutil.relativedelta import relativedelta
from datetime import datetime, timedelta
def get_datefilters():
now = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
from_to = dict()
from_to['Hoje']=[ 'now()::date', '''now()::date + interval '1 day' - interval '1 minute' ''',
now, now + timedelta(days=1) - timedelta(minutes=1)]
from_to['Ontem']=[''' now()::date - interval '1 day' ''', '''now()::date - interval '1 minute' ''',
now - timedelta(days=1), now - timedelta(minutes=1)]
from_to['Últimos 7 Dias']=[''' now()::date - interval '6 day' ''', '''now()::date + interval '1 day' - interval '1 minute' ''',
now - timedelta(days=6), now + timedelta(days=1) - timedelta(minutes=1)]
from_to['Últimos 30 Dias']=[''' now()::date - interval '29 day' ''', '''now()::date + interval '1 day' - interval '1 minute' ''',
now - timedelta(days=29), now + timedelta(days=1) - timedelta(minutes=1)]
from_to['Mês Anterior']=['''to_char(now(), 'YYYY-MM-01')::date - interval '1 month' ''', ''' to_char(now(), 'YYYY-MM-01')::date - interval '1 minute' ''',
now.replace(day=1) - relativedelta(months=1), now.replace(day=1) - timedelta(minutes=1)]
from_to['Mês Atual']=[''' to_char(now(), 'YYYY-MM-01')::date ''', ''' to_char(now(), 'YYYY-MM-01')::date + interval '1 month' - interval '1 minute' ''',
now.replace(day=1), now.replace(day=1) + relativedelta(months=1) - timedelta(minutes=1)]
from_to['Últimos 6 Meses']=['''now()::date - interval '12 months' ''', '''now()::date + interval '1 day' - interval '1 minute' ''',
now - relativedelta(months=6), now + timedelta(days=1) - timedelta(minutes=1)]
from_to['Ano Passado']=['''to_char(now(), 'YYYY-01-01')::date - interval '1 year' ''', '''to_char(now(), 'YYYY-01-01')::date - interval '1 minute' ''',
now.replace(month=1 ,day=1) - relativedelta(years=1), now.replace(month=1 ,day=1) - timedelta(minutes=1)]
from_to['Ano Atual']=['''to_char(now(), 'YYYY-01-01')::date ''', '''to_char(now(), 'YYYY-01-01')::date + interval '1 year' - interval '1 minute' ''',
now.replace(month=1 ,day=1), now.replace(month=1 ,day=1) + relativedelta(years=1) - timedelta(minutes=1)]
from_to['Últimos 12 Meses']=['''now()::date - interval '12 months' ''', '''now()::date + interval '1 day' - interval '1 minute' ''',
now - relativedelta(months=12), now + timedelta(days=1) - timedelta(minutes=1)]
return from_to |
"""wxPython-specific property classes"""
from basicproperty import basic, common
from basictypes.wxtypes import colour, pen, font
from basictypes import enumeration
## DATA-model properties
class ColourProperty(basic.BasicProperty):
"""wxColour property"""
baseType = colour.wxColour_DT
friendlyName = "Colour"
class PenProperty( basic.BasicProperty ):
"""wxPen property"""
baseType = pen.wxPen
friendlyName = "Pen"
class FontProperty( basic.BasicProperty ):
"""wxFont property"""
baseType = font.wxFont_DT
friendlyName = "Font"
## LIVE properties
class wxPenStyleProperty(
enumeration.EnumerationProperty,
basic.MethodStore,
basic.BasicProperty
):
"""wxPen Style property (live)"""
baseType = pen.PenStyle
getMethod = "GetStyle"
setMethod = "SetStyle"
friendlyName = "Line Style"
class wxPenCapProperty(
enumeration.EnumerationProperty,
basic.MethodStore,
basic.BasicProperty
):
"""wxPen Cap property (live)"""
baseType = pen.PenCap
getMethod = "GetCap"
setMethod = "SetCap"
friendlyName = "Cap Style"
class wxPenJoinProperty(
enumeration.EnumerationProperty,
basic.MethodStore,
basic.BasicProperty
):
"""wxPen Join property (live)"""
baseType = pen.PenJoin
getMethod = "GetJoin"
setMethod = "SetJoin"
friendlyName = "Corner Style"
class wxWidthProperty( basic.MethodStore, common.IntegerProperty ):
"""wxObject Width property (live)"""
getMethod = "GetWidth"
setMethod = "SetWidth"
friendlyName = "Width"
class wxColourProperty( basic.MethodStore, ColourProperty ):
"""wxObject Colour property (live)"""
getMethod = "GetColour"
setMethod = "SetColour"
friendlyName = "Colour"
|
import matplotlib.pyplot as plt
# y value series
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
# zip() combines two data series to tuples
total_error = [x + y for x, y in zip(variance, bias_squared)]
# x values
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
# green solid line, red dot-dashed line, blue dotted line
plt.plot(xs, variance, 'g-', label='variance')
plt.plot(xs, bias_squared, 'r-.', label='bias^2')
plt.plot(xs, total_error, 'b:', label='total error')
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show() |
import flask
from keg_mail.views import (
LogStatusWebhook as LogStatusWebhookBase,
WebhookBase,
)
km_blueprint = flask.Blueprint('keg_mail', __name__)
class NoOpWebhook(WebhookBase):
blueprint = km_blueprint
url = '/noop-webhook'
class LogStatusWebhook(LogStatusWebhookBase):
blueprint = km_blueprint
url = '/log-status-webhook'
|
#!/usr/bin/env python
# _*_ coding:utf-8_*_
# author:jinxiu89@163.com
# create by thomas on 18-1-27.
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
import numpy as np
from PIL import Image
class LSB:
"""
Implements encryption and decryption functionality for Least Significant
Bit Stenography.
For encryption, the color components (Red-Green-Blue) of every pixel of
a given image are taken and the last 2 bits of every component are
replaced with 2 bits of a character of a given text. For optimal
performance, Numpy arrays and bitwise operations are heavily used.
For decryption, the last 2 bits of every color component of every pixel
are retrieved and concatenated back to a hidden text.
Given texts are encoded using UTF-8
"""
FILENAME_OUTPUT = 'output.png'
@staticmethod
def handle(args):
img = Image.open(args.image)
if args.retrieve:
LSB.retrieve(img)
else:
text = LSB.get_text(args)
LSB.insert(img, text)
@staticmethod
def retrieve(img: Image):
"""Retrieves a hidden text from a given Image"""
arr = np.array(img).astype(np.uint8).reshape(-1)
bits = arr & 3
text = LSB.bits_to_str(bits)
print(text)
@staticmethod
def insert(img: Image, msg: str):
"""Inserts a given text into a given Image and saves the output"""
arr = np.array(img).astype(np.uint8)
flat = arr.reshape(-1).copy()
bits = LSB.str_to_bits(msg)
length = len(bits)
flat[:length] = (flat[:length] & 252) + bits
stego = flat.reshape(arr.shape)
Image.fromarray(stego).save(LSB.FILENAME_OUTPUT)
@staticmethod
def get_text(args):
"""Returns a text given as an argument or reads in a given file"""
if args.message is not None:
return args.message
return open(args.file, encoding='utf-8').read()
@staticmethod
def bits_to_str(bits: np.ndarray):
"""Sums up blocks of 4 and returns a char representation of the sum"""
chars = []
for i in np.arange(0, len(bits), 4):
val = LSB.bits_to_int(bits[i:i + 4])
if val == 255:
return bytes(chars).decode('utf-8')
chars.append(val)
raise ValueError('Could not find end block during decryption.')
@staticmethod
def bits_to_int(bits: np.ndarray) -> int:
"""
Shifts 2-bit pairs back into their position of an 8-bit string and
sums up their integer values.
Example:
Input = [1, 2, 2, 0] = [b'01', b'10', b'10', b'00']
Element 1 = b'01' << 6 = b'01000000' = 64
Element 2 = b'10' << 4 = b'00100000' = 32
Element 3 = b'10' << 2 = b'00001000' = 8
Element 4 = b'00' << 0 = b'00000000' = 0
Sum of all elements = 64 + 32 + 8 + 0 = 104
Returns 104
:param bits: array of 4 2-bit elements as int
:return: sum of shifted bits as int
"""
ints = [(bits[i] << op) for i, op in enumerate(range(6, -1, -2))]
return sum(ints)
@staticmethod
def str_to_bits(text: str) -> np.ndarray:
"""
Converts a string (text) to a 1d Numpy bit array.
The string is first converted to bytes based on UTF-8.
Every byte is then converted into 4 2-bit elements.
All 2-bit elements are stored in a Numpy array.
Example
'foo'.encode('utf-8') = [102, 111, 111] (as ASCII numbers)
[102, 111, 111] = ['01100110', '01101111', '01101111']
['01100110', ...] = ['01', '10', '01', '10', ...]
['01', '10', '01', '10', ...] = [1, 2, 1, 2, ...]
Returns [1, 2, 1, 2, ...]
:param text: the string to be converted
:return: Numpy bit array
"""
msg_bytes = text.encode('utf-8')
bits = []
for byte in msg_bytes:
bits.extend([(byte >> i) & 3 for i in range(6, -1, -2)])
bits.extend([3, 3, 3, 3])
return np.array(bits)
|
import pickle
import pandas as pd
import chardet
import codecs
class Test:
def __init__(self):
pass
def test_set_attr(self, name, value):
self.__setattr__(name, value)
def __str__(self):
res = ''
for each in self.__dict__:
res += f'{each}:{self.__getattribute__(each)}'
return res
if __name__ == '__main__':
test = Test()
print(test)
test.test_set_attr('hello', 1)
print(test)
|
# Generated by Django 3.0 on 2021-06-09 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0039_orders'),
]
operations = [
migrations.AddField(
model_name='cart',
name='amount',
field=models.IntegerField(default=0, null=True),
),
]
|
from bitstring import *
class SHA_1:
def __init__(self):
self.mod = pow(2, 32)
self.K_list = [BitArray("0x5a827999"),
BitArray("0x6ed9eba1"),
BitArray("0x8f1bbcdc"),
BitArray("0xca62c1d6")]
self.H = [BitArray("0x67452301"),
BitArray("0xefcdab89"),
BitArray("0x98badcfe"),
BitArray("0x10325476"),
BitArray("0xc3d2e1f0")]
def f(self, t, x, y, z):
if t >= 0 and t <= 19:
return (x & y) ^ (~x & z)
elif t >= 20 and t <= 39:
return x ^ y ^ z
elif t >= 40 and t <= 59:
return (x & y) ^ (x & z) ^ (y & z)
return x ^ y ^ z
def K(self, t):
if t >= 0 and t <= 19:
return self.K_list[0][:]
elif t >= 20 and t <= 39:
return self.K_list[1][:]
elif t >= 40 and t <= 59:
return self.K_list[2][:]
return self.K_list[3][:]
def preprocess(self, text):
padded = BitArray(bytes=text.encode("utf-8"))
l = len(text) * 8
k = (448 + (- l - 1) % 512) % 512
padded = padded + [1] + [0] * k + BitArray(uint = l, length = 64)
M = []
for i in range(len(padded) // 512):
M.append(padded[i * 512:(i + 1) * 512])
return M
def add(self, a, b):
return BitArray(uint = (a.uint + b.uint) % self.mod, length = 32)
def digest(self, text):
M = self.preprocess(text)
for i in range(len(M)):
W = []
for t in range(16):
W.append(M[i].copy()[t * 32:(t + 1) * 32])
for t in range(16, 80):
W.append(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16])
W[t].rol(1)
a = self.H[0][:]
b = self.H[1][:]
c = self.H[2][:]
d = self.H[3][:]
e = self.H[4][:]
for t in range(80):
T = a[:]
T.rol(5)
T = self.add(T, self.f(t, b, c, d))
T = self.add(T, e)
T = self.add(T, self.K(t))
T = self.add(T, W[t])
e = d
d = c
c = b
c.rol(30)
b = a
a = T
self.H[0] = self.add(a, self.H[0])
self.H[1] = self.add(b, self.H[1])
self.H[2] = self.add(c, self.H[2])
self.H[3] = self.add(d, self.H[3])
self.H[4] = self.add(e, self.H[4])
result = []
for i in range(5):
result = result + self.H[i]
return result.hex
@staticmethod
def get_bits(n, hash):
temp = BitArray("0x" + hash)
return str(temp[:n])[2:]
|
import os
import cv2
import time
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torchvision
import torchvision.transforms as T
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt
import argparse
from dataset import FacemaskRecognitionDataset, dir_to_df
from MyModel import get_model
from train import get_transform, collate_fn, get_gpu
from evaluation import calc_iou
# paths
model_path = 'model'
predictions_path = 'prediction.csv'
def predict(test_path, model=None, save=True):
#Test Dataset
test_df = dir_to_df(test_path)
test_dataset = FacemaskRecognitionDataset(test_df, test_path, mode = 'test', transforms = get_transform())
#Test data loader
test_loader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
drop_last=False,
collate_fn=collate_fn
)
prediction_df = pd.DataFrame(columns=['filename', 'x', 'y', 'w', 'h', 'proper_mask'])
device = get_gpu()
if model is None:
model = get_model('fasterrcnn_resnet')
#model.load_state_dict(torch.load(f'{model_path}_fasterrcnn_resnet.pkl'))
model.load_state_dict(torch.load(f'{model_path}.pkl'))
model.eval()
model.to(device)
threshold = 0.5
print('Evaluating')
iou_scores = []
acc_scores = []
i = 0
true_count = 0
false_count = 0
for idx, (images, image_names) in enumerate(test_loader):
#Forward ->
images = list(image.to(device) for image in images)
with torch.no_grad():
output = model(images)
#Converting tensors to array
out_boxes = output[0]['boxes'].data.cpu().numpy()
out_scores = output[0]['scores'].data.cpu().numpy()
out_labels = output[0]['labels'].data.cpu().numpy()
if len(out_boxes) > 0:
boxes = out_boxes[0]
label = 'True' if out_scores[0] > threshold else 'False'
else:
# guess
boxes = [0.25 * images[0].size()[1],
0.35 * images[0].size()[2],
0.75 * images[0].size()[1],
0.65 * images[0].size()[2]]
label = 'False'
x = boxes[0]
y = boxes[1]
w = boxes[2] - x
h = boxes[3] - y
#Creating row for df
row = {"filename" : image_names[0], "x" : x, "y" : y, "w" : w, "h" : h, "proper_mask" : label}
#Appending to df
prediction_df = prediction_df.append(row, ignore_index = True)
pred_box = [x, y, w, h]
true_box = [int(val) for val in image_names[0].split('__')[1][1:-1].split(',')]
curr_iou_score = calc_iou(pred_box, true_box)
iou_scores.append(curr_iou_score)
true_label = image_names[0].split('__')[2].split('.')[0]
acc_scores.append(label == true_label)
if idx % 500 == 0 and idx > 0:
print(f'Completed {idx}')
iou_score = np.mean(iou_scores)
acc_score = np.mean(acc_scores)
print(f'IoU score: {iou_score}')
print(f'acc_score: {acc_score}')
print(f'\nWriting predictions to: {predictions_path}')
if save:
prediction_df.to_csv(predictions_path, index = False)
return acc_score, iou_score
if __name__=="__main__":
# Parsing script arguments
parser = argparse.ArgumentParser(description='Process input')
parser.add_argument('test_path', type=str, help='test directory path')
args = parser.parse_args()
predict(args.test_path)
|
#!/usr/bin/env python
from gnuradio import gr
from gnuradio import blocks
from gnuradio import digital
import string_to_list
from frame_sync import frame_sync
class top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
##################################################
# Variables
##################################################
#Create Input Vector here
input_vector = 0 # <-- Change this: *Hint: Use string_to_list.conv_string_to_1_0_list(s)
##################################################
# Blocks
##################################################
self.input_vector_source = blocks.vector_source_b(input_vector, True, 1, [])
self.input_unpacked_to_packed = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST)
self.mod = digital.dbpsk_mod(
samples_per_symbol=2,
excess_bw=0.35,
mod_code="gray",
verbose=False,
log=False)
self.demod = digital.dbpsk_demod(
samples_per_symbol=2,
excess_bw=0.35,
freq_bw=6.28/100.0,
phase_bw=6.28/100.0,
timing_bw=6.28/100.0,
mod_code="gray",
verbose=False,
log=False
)
self.output_unpacked_to_packed = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST)
self.frame_sync = frame_sync() # Your custom block!!!
self.output_file_sink = blocks.file_sink(gr.sizeof_char*1, "output.txt", False)
self.output_file_sink.set_unbuffered(True)
##################################################
# Connections
##################################################
self.connect(self.input_vector_source, self.input_unpacked_to_packed, self.mod, self.demod, self.output_unpacked_to_packed, self.frame_sync, self.output_file_sink)
if __name__ == '__main__':
tb = top_block()
tb.start()
tb.wait()
# tb.run()
tb.stop()
|
def part1(nums):
for n1 in nums:
for n2 in nums:
if n1 + n2 == 2020:
return n1 * n2
def part2(nums):
for n1 in nums:
for n2 in nums:
for n3 in nums:
if n1 + n2 + n3 == 2020:
return n1 * n2 * n3
def main():
file = 'day1.txt'
with open(file, 'r') as in_f:
nums = [int(l.strip()) for l in in_f.readlines()]
print(part1(nums))
print(part2(nums))
if __name__ == '__main__':
main() |
# File: ex_8.5-is_prime.py
# Date: 2019-12-20
# Author: "Hannes Thiersen" <hannesthiersen@gmail.com>
# Version: 0.1
# Description:
# A positive whole number n > 2 is prime if no number between 2 and sqrt(n)
# (inclusive) evenly divides n. Write a program that accepts a vlaue of n as
# input and determins if the vlaue is prime. If n is not prime, your program
# should quit as soon as it finds a value that evenly divides n.
#------------------------------------------------------------------------------
# IMPORTS
#------------------------------------------------------------------------------
from math import sqrt
#------------------------------------------------------------------------------
# FUNCTIONS
#------------------------------------------------------------------------------
def getValue():
return eval(input("Insert a whole number: "))
#------------------------------------------------------------------------------
def isPrime(num):
if num < 2:
return False
root = int(sqrt(num))
for factor in range(2,root + 1):
if not num%factor:
return False
return True
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
def main():
number = getValue()
if isPrime(number):
print(f"{number} is prime.")
else:
print(f"{number} is not prime.")
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
from .track import track_page_view
def page_view_tracking_middleware(get_response):
def middleware(request):
response = get_response(request)
if response.status_code == 200:
track_page_view(request)
return response
return middleware
|
#This converts european lift numbers to US numbers, I didnt even know this was a thing
inp = input ("European Floor? ") #this asks the user to enter a floor number
usf = int (inp) +1 # this converts the string for European Floor to an integer so we can add 1 to it
print ("US floor", usf) # displays the result
# in the future come back and learn data validation |
# 97. k-meansクラスタリング
# 96の単語ベクトルに対して,k-meansクラスタリングをクラスタ数k=5として実行せよ.
import numpy as np
import dill
from sklearn.cluster import KMeans
def save(file_name, data):
with open(f"./dills/{file_name}", 'wb') as f_out:
dill.dump(data, f_out)
def load(file_name):
with open(f"./dills/{file_name}", 'rb') as f_in:
data = dill.load(f_in)
return data
countries = load('countries')
X = load('X')
kmeans = KMeans(n_clusters=2, random_state=0).fit(np.array(X))
with open('out97.txt', 'w+', encoding='utf-8') as f:
for country, cluster in zip(countries, kmeans.labels_):
print(f'{country} {cluster}', file=f)
|
import matplotlib.pyplot as plt
import math
data = open('pions.f14', 'r')
k = 0
heading_id = 'UQMD'
pions = []
pions_plus = []
pions_minus = []
pions_0 = []
strings = []
resonances = []
number_of_events = 100000
for line in data:
line = line.split(' ')
temp_l = []
k += 1
for j in line:#delete '0' and '\n' elements from sublists of data_l
if len(j) != 0 and '\n' not in j:
temp_l.append(j)
elif '\n' in j:
temp_l.append(j[0:len(j)-1])
line = temp_l
if k == 20:
if line[0] == heading_id:
k = 0
continue
if line[9] == '101' and (float(line[5])**2 + float(line[6])**2)**(1/2)/float(line[7])\
<= 0.105104235266 and float(line[7]) < 0:
pions.append(float(line[4]))
if line[10] == '2':
pions_plus.append(float(line[4]))
elif line[10] == '-2':
pions_minus.append(float(line[4]))
elif line[10] == '0':
pions_0.append(float(line[4]))
if line[14] == '20':
resonances.append(float(line[4]))
elif line[14] in ['15', '23', '24', '27', '28']:
strings.append(float(line[4]))
k = 19
print(pions)
delta_e = 0.01
def plot(array, label = 'None', m = 0.140):
"draws a plot"
number_of_elements = int(max(array)//delta_e + 1)
energy_interval = []
number_of_particles = []
for i in range(number_of_elements):
number_of_particles.append(0)
energy_interval.append(delta_e*(i+1))
for energy in array:
n = int(energy//delta_e)
number_of_particles[n] += 1
x = []
y = []
for i in range(len(number_of_particles)):
if energy_interval[i] > m:
if number_of_particles[i] != 0:
x.append(energy_interval[i])
y.append(number_of_particles[i]/(math.sqrt(energy_interval[i]**2 - m**2))/delta_e/number_of_events)
plt.step(x, y, label = label)
plt.figure(1)
plot(pions, 'pi')
plot(pions_plus)
plot(pions_minus)
plot(pions_0)
plt.yscale('log')
plt.figure(2)
plot(pions, 'pi')
plot(resonances, 'resonances')
plot(strings, 'strings')
plt.yscale('log')
plt.legend()
plt.show()
|
# See README
from microprediction import MicroReader
import random
EXAMPLE_STREAMS = ['electricity-lbmp-nyiso-west.json','electricity-load-nyiso-nyc.json']
def random_name():
""" Choose a random name of a stream """
mr = MicroReader()
names = [n for n in mr.get_stream_names() if '~' not in n ]
return random.choice(names)
def get_values(name:str=None)->[float]:
""" Get a real world time series """
mr = MicroReader()
if name is None:
name = random_name()
lagged = mr.get_lagged_values(name=name)
values = list(reversed(lagged))
return values
if __name__=='__main__':
values = get_values()
print(len(values))
|
from django.db import models
from django.utils.translation import gettext_lazy as _
class SentMessage(models.Model):
name = models.CharField(max_length=255, verbose_name=_('Имя'))
phone = models.CharField(max_length=255, verbose_name=_('Телефон'))
email = models.CharField(max_length=255, verbose_name=_('Email'))
message = models.TextField(verbose_name=_('Сообщения'))
sent_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.name}, {self.phone}, {self.email}, {self.message}, {self.sent_at}'
class Meta:
verbose_name = _('Сообщения')
verbose_name_plural = _('Сообщении')
ordering = '-sent_at',
class RequisitesPhone(models.Model):
title = models.CharField(max_length=255, verbose_name=_('Телефон'))
def __str__(self):
return self.title
class Meta:
verbose_name = _('Телефон')
verbose_name_plural = _('Телефоны')
ordering = '-pk',
class RequisitesEmail(models.Model):
title = models.CharField(max_length=255, verbose_name=_('Электронная почта'))
def __str__(self):
return self.title
class Meta:
verbose_name = _('Электронная почта')
verbose_name_plural = _('Электронные почты')
ordering = '-pk',
class RequisitesAddress(models.Model):
title = models.CharField(max_length=255, verbose_name=_('Адрес'))
def __str__(self):
return self.title
class Meta:
verbose_name = _('Адрес')
verbose_name_plural = _('Адреса')
ordering = '-pk',
class RequisitesCompanyName(models.Model):
title = models.CharField(max_length=255, verbose_name=_('Наименование организации'))
def __str__(self):
return self.title
class Meta:
verbose_name = _('Наименование организации')
verbose_name_plural = _('Наименование организации')
ordering = '-pk',
class RequisitesBank(models.Model):
title = models.CharField(max_length=255, verbose_name=_('Банковские реквизиты'))
def __str__(self):
return self.title
class Meta:
verbose_name = _('Банковские реквизиты')
verbose_name_plural = _('Банковские реквизиты')
ordering = '-pk',
class RequisitesINN_KPP(models.Model):
title = models.CharField(max_length=255, verbose_name=_('ИНН / КПП'))
def __str__(self):
return self.title
class Meta:
verbose_name = _('ИНН / КПП')
verbose_name_plural = _('ИНН / КПП')
ordering = '-pk',
|
# -----------------------------------------------------------
# Second Attempt
# Runtime: 48 ms, faster than 78.44% of Python3 online submissions for Two Sum.
# Memory Usage: 14.4 MB, less than 34.18% of Python3 online submissions for Two Sum.
# Comment: NA
# -----------------------------------------------------------
def twoSum(self, nums: List[int], target: int) -> List[int]:
diffs = {}
for idx, num in enumerate(nums):
if target-num in diffs.keys():
return [diffs[target-num], idx]
else:
diffs[num] = idx
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Parselmouth - Tree Builder
Ad providers such as DFP employ complex tree structures to organize
zones or adunits within their system. The TreeBuilder class helps
build and manipulate collections of objects with these tree structures.
"""
# Future-proof
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Standard Library Imports
from collections import defaultdict
# Parselmouth Imports
from parselmouth.constants import ParselmouthTargetTypes
from parselmouth.targeting import AdUnit
from parselmouth.targeting import Custom
from parselmouth.targeting import Geography
class NodeTree(object):
"""
Class which represents a tree of ObjectModels
"""
def __init__(self, node, children, depth=None):
self.node = node
self.children = children
self.depth = depth
def __ne__(self, other):
return not(self == other)
def __eq__(self, other):
if self.node != other.node or self.depth != other.depth:
return False
if len(self.children) != len(other.children):
return False
for child in self.children:
matching_child = [c for c in other.children if c == child]
if not matching_child:
return False
return True
def to_doc(self):
return {
'node': self.node.to_doc() if self.node else None,
'children': [c.to_doc() for c in self.children],
'depth': self.depth,
}
def get_subtree(self, field_name, field_value):
"""
Find a subtree within a list of NodeTrees with the field value given
@param field_name: ParselmouthFields
@param field_value: str
@return: NodeTree
"""
subtree = None
if self.children:
for branch in self.children:
_node = branch.node
if _node and vars(_node).get(field_name) == field_value:
subtree = branch
elif branch.children:
subtree = branch.get_subtree(
field_name,
field_value,
)
if subtree:
return subtree
return subtree
def get_subtree_parents(self, field_name, field_value):
"""
Get the node associated to the given field_name/field_value,
then return a list of all parent nodes for this node
=
@param field_name: ParselmouthFields
@param field_values: str
@return: list(ObjectModel)
"""
parent_nodes = []
if self.node and self.get_subtree(field_name, field_value):
parent_nodes.append(self.node)
if self.children:
for branch in self.children:
parent_nodes += branch.get_subtree_parents(field_name, field_value)
return parent_nodes
def get_max_depth(self):
"""
Get the maximum depth of any node within the tree
@return: int|None
"""
max_depth = self.depth
if self.children:
for branch in self.children:
max_depth = max([max_depth, branch.get_max_depth()])
return max_depth
def flatten(self, depth=None, only_leaves=False):
"""
Get a flat list of all descendants from a subtree.
If depth is given, give only nodes at the given depth
@param depth: int|None
@param only_leaves: bool, only return maximal depth nodes
@return: list(ObjectModel)
"""
if only_leaves:
assert depth is None
descendants = []
if self.node and \
(depth is None or self.depth == depth) and \
(only_leaves is False or len(self.children) == 0):
descendants.append(self.node)
if self.children:
for branch in self.children:
if branch.children:
descendants += branch.flatten(
depth=depth,
only_leaves=only_leaves,
)
elif branch.node and (depth is None or branch.depth == depth):
descendants.append(branch.node)
return descendants
def filter_tree_by_key(self, key, filter_ids):
"""
Filter a given tree to include branches that are either
included in the set of filter_ids or at least of of its
children are in the set of filter_ids
@param key: ParselmouthField, key to filter on
@param filter_ids: set(str)
@return: NodeTree
"""
if isinstance(filter_ids, list):
filter_set = set(filter_ids)
else:
filter_set = filter_ids
assert isinstance(filter_set, set)
filtered_children = []
for branch in self.children:
_descendants = branch.flatten()
_descendant_ids = set([vars(d)[key] for d in _descendants])
# if some descendant of this branch is in filter_ids
# keep this branch and update its children
if filter_set.intersection(_descendant_ids):
new_branch = branch.filter_tree_by_key(
key, filter_set,
)
if new_branch:
filtered_children.append(new_branch)
return NodeTree(self.node, filtered_children, self.depth)
def filter_tree(self, filter_ids):
"""
Filter a given tree to include branches that are either
included in the set of filter_ids or at least of of its
children are in the set of filter_ids
@param tree: list(dict)
@param filter_ids: set(str)
@return: NodeTree
"""
return self.filter_tree_by_key('external_name', filter_ids)
def update_external_names(self, id_map):
"""
Set external names of nodes in given tree to new values
given by the dictionary id_map: id field --> external_name field.
NOTE: This edits the nodes of this tree in place
@param id_map: dict
"""
current_node = self.node
if current_node:
_external_name = id_map.get(current_node.id)
if _external_name:
current_node.external_name = _external_name
for branch in self.children:
branch.update_external_names(id_map)
class TreeBuilder(object):
"""
Interface from building ad provider data into trees
Subject include:
* Demographic data
* Geographic data
* Ad Placement data
"""
TARGET_CLASS_MAP = {
ParselmouthTargetTypes.adunit: AdUnit,
ParselmouthTargetTypes.geography: Geography,
ParselmouthTargetTypes.demographics: Custom,
ParselmouthTargetTypes.ad_position: Custom,
ParselmouthTargetTypes.custom: Custom,
}
"""
dict, associate to each target_type the appropriate targeting class
"""
INTERFACE_FUNCTION_MAP = {
ParselmouthTargetTypes.adunit: lambda i: i.get_adunit_targets(),
ParselmouthTargetTypes.geography: lambda i: i.get_geography_targets(),
ParselmouthTargetTypes.demographics: lambda i: i.get_custom_targets(),
ParselmouthTargetTypes.ad_position: lambda i: i.get_custom_targets(),
ParselmouthTargetTypes.custom: lambda i: i.get_custom_targets(),
}
"""
dict, associate to each target_type the interface target getter function
"""
def __init__(self, provider_name, interface=None):
"""
Constructor
@param domain: str
@param provider_name: ParselmouthProviders
@param interface: Interface for provider
"""
self.provider_name = provider_name
self.interface = interface
def _recursive_make_tree(self, pid, parents, node_map, depth=0):
"""
Recursively construct tree by creating a nested dictionary
based off of data from parents that gives a parent child
relationship
@param pid: parent id
@param parents: dict, the keys are parent ids, and values
are all of the children of that id
@param node_map: dict, node documents associated with each id
@param depth: int, recursive handle on node depth in a tree
@return: list(dict)
"""
trees = []
for child in parents.get(pid, []):
_id = child.id
node = node_map.get(_id)
tree = NodeTree(
node,
children=self._recursive_make_tree(
_id, parents, node_map, depth + 1
),
depth=depth,
)
trees.append(tree)
return trees
def build_tree(self, nodes):
"""
Interface for translating a flat data structure into a native
hierarchical type.
@param nodes: list(TargetingModel) with id and parent_id fields
@return: NodeTree
"""
node_map = {}
# Create flat list of all parents and their immediate children
parents = defaultdict(list)
for node in nodes:
parents[node.parent_id].append(node)
node_map[node.id] = node
# Construct tree by building trees at each maximal parent
maximal_trees = []
for pid in parents.keys():
# Determine if pid is a maximal parent
# By checking to see if pid is a child of any other id
maximal = True
for children in parents.values():
children_ids = [c.id for c in children]
# If pid is a child of another id, then flag as not maximal
if pid in children_ids:
maximal = False
break
# Create tree starting at this maximal parent
if maximal:
max_node = node_map.get(pid)
children = self._recursive_make_tree(
pid, parents, node_map,
)
if max_node:
tree = NodeTree(max_node, children)
maximal_trees.append(tree)
else:
maximal_trees.extend(children)
return NodeTree(
node=None,
children=maximal_trees,
)
def construct_tree(self, target_type):
"""
Get all data of type target_type from ad provider,
and build a tree
@param taget_type: ParselmouthTargetTypes
@return: NodeTree
"""
assert target_type in ParselmouthTargetTypes
assert self.interface
nodes = self.INTERFACE_FUNCTION_MAP[target_type](self.interface)
return self.build_tree(nodes)
def _convert_node_tree_to_doc(self, tree):
"""
Convert NodeTree into a dict that can be written to mongo
@param tree: NodeTree
@return: dict
"""
return tree.to_doc()
def _convert_doc_to_node_tree(self, doc, target_type):
"""
Convert a mongo tree doc into a NodeTree Object
@param doc: dict
@param target_type: ParselmouthTargetTypes
@return: NodeTree
"""
_target_class = self.TARGET_CLASS_MAP[target_type]
if doc['node']:
node = _target_class.from_doc(doc['node'])
else:
node = None
children = doc.get('children', [])
depth = doc.get('depth')
return NodeTree(
node=node,
children=[self._convert_doc_to_node_tree(c, target_type) for c in children],
depth=depth,
)
|
import logging
from django.core.management.base import BaseCommand
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError, DataError
from dashboard.models import Contract
from utils.django.models import defaults_dict
from ._excel_contract import ExcelContract
logger = logging.getLogger('command')
class Command(BaseCommand):
help = 'Interact with the Contract table in the database'
def add_arguments(self, parser):
parser.add_argument(
'-f',
'--file',
dest='filename',
help='Specify import EXCEL file',
)
def handle(self, *args, **options):
self.stdout.write("[CONTRACT] Waiting...")
if options['filename'] is not None:
excel = ExcelContract(options['filename'])
nb_before = Contract.objects.count()
nb_update = 0
if not excel.ERROR:
for row in excel.read():
logger.info(row)
pk = row.pop("id")
try:
defaults = defaults_dict(Contract, row, "id")
obj, created = Contract.objects.update_or_create(pk=pk, defaults=defaults)
if not created:
nb_update += 1
except IntegrityError as err:
logger.error(f"[CONTRACT_CMD] IntegrityError: {pk} - {err}")
except Contract.MultipleObjectsReturned as err:
logger.error(f"[CONTRACT_CMD] MultipleObjectsReturned: {pk} - {err}")
except DataError as err:
logger.error(f"[CONTRACT_CMD] DataError: {pk} - {err}")
except ValidationError as err:
logger.error(f"[CONTRACT_CMD] ValidationError: {pk} - {err}")
nb_after = Contract.objects.count()
self.stdout.write(
self.style.SUCCESS(
f"[CONTRACT] Data update completed: EXCEL_LINES = {excel.nrows} | ADD = " +
f"{nb_after - nb_before} | UPDATE = {nb_update} | TOTAL = {nb_after}"
)
)
else:
self.stdout.write(self.style.WARNING("[CONTRACT] No excel file found"))
else:
self.stdout.write(self.style.WARNING("[CONTRACT] Path to excel file missing !"))
|
# -*- coding: utf-8 -*-s
def exer1():
n = 1
h = 100
s = 100
while n <= 10:
h = h / 2
s += h * 2
n = n + 1
print("小球共经过: " + str(s) + "米")
print("小球反弹高度: " + str(h) + "米")
def exer2():
n = 1
a = 1
sum = 0
while n <= 20:
a *= n
sum += a
n = n + 1
print(sum)
def exer3(n):
count = 0
reverse_str = ""
while n != 0:
reverse_str += str(n % 10)
n = n // 10
count += 1
print("位数:" + str(count))
print("反向输出:" + reverse_str)
if __name__ == "__main__":
print("Hello world!")
# exer1()
# exer2()
# n = int(input("请输入一个正整数:"))
# exer3(n)
|
import json
import datetime
import time
import os
import dateutil.parser
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# --- Helpers that build all of the responses ---
def elicit_intent(session_attributes, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitIntent',
'message': {
'contentType': 'PlainText',
'content': message
}
}
}
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def confirm_intent(session_attributes, intent_name, slots, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ConfirmIntent',
'intentName': intent_name,
'slots': slots,
'message': message
}
}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
# --- Helper Functions ---
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
def safe_int(n):
"""
Safely convert n value to int.
"""
if n is not None:
return int(n)
return n
def try_ex(func):
"""
Call passed in function in try block. If KeyError is encountered return None.
This function is intended to be used to safely access dictionary.
Note that this function would have negative impact on performance.
"""
try:
return func()
except KeyError:
return None
def build_validation_result(isvalid, violated_slot, message_content):
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
def isvalid_city_location(city):
valid_cities = ['new york', 'los angeles', 'chicago', 'houston', 'philadelphia', 'phoenix', 'san antonio',
'san diego', 'dallas', 'san jose', 'austin', 'jacksonville', 'san francisco', 'indianapolis',
'columbus', 'fort worth', 'charlotte', 'detroit', 'el paso', 'seattle', 'denver',
'washington dc', 'manhattan', 'queens', 'bay area',
'memphis', 'boston', 'nashville', 'baltimore', 'portland']
return city.lower() in valid_cities
def isvalid_date(date):
try:
dateutil.parser.parse(date)
return True
except ValueError:
return False
def isvalid_cuisine(cuisine_type):
valid_cuisines = ['chinese','mexican','newamerican','halal','italian','japanese']
return cuisine_type.lower() in valid_cuisines
def validate_dining(slots):
location = try_ex(lambda: slots['Location'])
cuisine_type = try_ex(lambda: slots['CuisineType'])
num_people = safe_int(try_ex(lambda: slots['NumPeople']))
dining_date = try_ex(lambda: slots['DiningDate'])
dining_time = try_ex(lambda: slots['DiningTime'])
email = try_ex(lambda: slots['Email'])
if location and not isvalid_city_location(location):
return build_validation_result(
False,
'Location',
'We currently do not support {} as a valid destination. Can you try a different city/location?'.format(location)
)
if cuisine_type and not isvalid_cuisine(cuisine_type):
return build_validation_result(
False,
'CuisineType',
'We currently do not provide {} Cuisine. Can you try a different Cuisine Type?'.format(cuisine_type)
)
if num_people:
if num_people >= 8:
return build_validation_result(False,'NumPeople','We currently can only hold < 8 people. Try with less people?')
if dining_date is not None:
if not isvalid_date(dining_date):
return build_validation_result(False, 'DiningDate', 'Sorry. We don\'t recognize the date you entered. Can you enter again?')
elif datetime.datetime.strptime(dining_date, '%Y-%m-%d').date() < datetime.date.today():
return build_validation_result(False, 'DiningDate', 'You can reserve from today onwards. What day would you like to reserve?')
if dining_time is not None:
if datetime.datetime.strptime(dining_date, '%Y-%m-%d').date() == datetime.date.today():
if datetime.datetime.strptime(dining_date + " " + dining_time, '%Y-%m-%d %H:%M') < (datetime.datetime.now()+ datetime.timedelta(hours=1)):
return build_validation_result(False, 'DiningTime','Sorry. If you book today\'s resturant, you can only book 1 hours after current time. Can you enter again?')
if len(dining_time) != 5:
return build_validation_result(False, 'DiningTime','Sorry. We don\'t recognize the time you entered. Can you enter again?')
return {'isValid': True}
# --- DiningSuggestionsIntent ---
def diningsuggestions_intent(intent_request):
location = try_ex(lambda: intent_request['currentIntent']['slots']['Location'])
cuisine_Type = try_ex(lambda: intent_request['currentIntent']['slots']['CuisineType'])
num_people = safe_int(try_ex(lambda: intent_request['currentIntent']['slots']['NumPeople']))
dining_date = try_ex(lambda: intent_request['currentIntent']['slots']['DiningDate'])
dining_time = try_ex(lambda: intent_request['currentIntent']['slots']['DiningTime'])
Email = try_ex(lambda: intent_request['currentIntent']['slots']['Email'])
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
# Load confirmation history and track the current reservation.
reservation = json.dumps({
'ReservationType': 'Dining',
'Location':location,
'CuisineType': cuisine_Type,
'NumPeople': num_people,
'DiningDate': dining_date,
'DiningTime': dining_time,
'email': Email,
})
session_attributes['currentReservation'] = reservation
# Validate any slots which have been specified. If any are invalid, re-elicit for their value
validation_result = validate_dining(intent_request['currentIntent']['slots'])
if not validation_result['isValid']:
slots = intent_request['currentIntent']['slots']
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
intent_request['currentIntent']['name'],
slots,
validation_result['violatedSlot'],
validation_result['message']
)
return delegate(session_attributes, intent_request['currentIntent']['slots'])
# --- ThankYou Intent ---
def thankyou_intent(intent_request):
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'You\'re welcome.'
}
)
# --- greetIntents ---
def greet_intent(intent_request):
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
return elicit_intent(session_attributes, 'Hi there, how can I help?')
# --- Intents ---
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
if intent_name == "GreetingIntent":
return greet_intent(intent_request)
elif intent_name == "ThankYouIntent":
return thankyou_intent(intent_request)
elif intent_name == "DiningSuggestionsIntent":
return diningsuggestions_intent(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
# --- Main handler ---
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event)
|
import argparse
import pandas as pd
import requests
from bs4 import BeautifulSoup
import locale
from joblib import Parallel, delayed
import logging
from pathlib import Path
def get_gcd_single_pair(pair):
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
if len(pair) != 6:
return None
org = pair[:3]
dst = pair[3:]
url = f"http://www.gcmap.com/mapui?P={org}-{dst}"
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, "lxml")
gcd_table = soup.find("table", attrs={'id': 'mdist'})
distance_str = gcd_table.tbody.find("td", attrs={'class': 'd'})
return {'NDOD': pair, 'gcd_mile': locale.atoi(distance_str.text.split(' ')[0])}
if __name__ == '__main__':
logger = logging.getLogger('gcdCalculatorApp')
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--arps", help="Provide File Path which has airport pairs in each row without header", default=None, type=Path)
parser.add_argument("-o", "--out", help="Provide File path of the output gcd file", default = None, type = Path)
args = parser.parse_args()
with open(args.arps) as f:
arp_pairs = f.read().splitlines()
result = pd.DataFrame(Parallel(n_jobs=-1)(delayed(get_gcd_single_pair)(arp_pair) for arp_pair in arp_pairs))
result.to_csv(args.out, index=False)
|
from dataclasses import dataclass
from position import Position, EarthPosition
@dataclass(eq=True, frozen=False)
class Location:
name: str
position: Position
def __post_init__(self):
if self.name == "":
raise ValueError("Location name cannot be empty")
hong_kong = Location("Hong Kong", EarthPosition(22.29, 114.16))
stockholm = Location("Stockholm", EarthPosition(59.33, 18.06))
cape_town = Location("Cape Town", EarthPosition(-33.93, 18.42))
rotterdam = Location("Rotterdam", EarthPosition(51.96, 4.47))
maracaibo = Location("Maracaibo", EarthPosition(10.65, -71.65))
|
import matplotlib.pyplot as plt
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.utils import np_utils
K.set_image_dim_ordering('th')
seed = 120
np.random.seed(seed)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
num_pixels = x_train.shape[1] * x_train.shape[2]
x_train = x_train.reshape(x_train.shape[0], num_pixels).astype('float32')
x_test = x_test.reshape(x_test.shape[0], num_pixels).astype('float32')
x_train = x_train / 255
x_test = x_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def baseline_model():
'''Baseline model using mlp'''
model = Sequential()
model.add(Dense(num_pixels,
input_dim=num_pixels,
kernel_initializer='normal',
activation='relu'))
model.add(Dense(num_classes,
kernel_initializer='normal',
activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = baseline_model()
model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
epochs=10,
batch_size=200,
verbose=1)
scores = model.evaluate(x_test, y_test, verbose=0)
print('Baseline error: %.2f%%' % (100 - scores[1] * 100))
seed = 120
np.random.seed(seed)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28).astype('float32')
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28).astype('float32')
x_train = x_train / 255
x_test = x_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def cnn_baseline():
model = Sequential()
model.add(Conv2D(32, (5, 5),
input_shape=(1, 28, 28),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = cnn_baseline()
model.summary()
model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
epochs=100,
batch_size=128,
verbose=1)
scores = model.evaluate(x_test, y_test, verbose=0)
print('Baseline CNN: %.2f%%' % (100 - scores[1] * 100))
|
import sys
import json
import torch
import os
import numpy as np
from pycocotools.cocoeval import COCOeval
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
def dump_to_json():
pass
def eval(anno_json,
result_json,
anno_type):
annType = ['segm', 'bbox', 'keypoints']
annType = annType[1] # specify type here
print('Running demo for *%s* results.' % (annType))
# initialize COCO ground truth api
cocoGt = COCO(anno_json)
# initialize COCO detections api
cocoDt = cocoGt.loadRes(result_json)
imgIds = sorted(cocoGt.getImgIds())
imgIds = imgIds[0:100]
imgIds = imgIds[np.random.randint(100)]
# running evaluation
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
pass
|
import sys, math
for line in sys.stdin:
n = int(line)
if n == 0:
break
print(int(math.floor(math.sqrt(n))))
|
import os
import time
last_served = 0
def get_last_frame(data):
global last_served
# does it exist at all?
if not os.path.exists("/home/xilinx/projects/videoSELECT.txt"):
return {"video-error": "No active video"}
with open("/home/xilinx/projects/videoSELECT.txt") as f:
sel = f.read()
if len(sel) != 1:
sel = "A"
if not os.path.exists(f"/home/xilinx/projects/video{sel}.jpg"):
return {"video-error": "No active video"}
# has video been updating in the past 15 seconds?
last_updated = os.path.getmtime(f"/home/xilinx/projects/video{sel}.jpg")
if time.time() - last_updated > 15:
return {"video-error": "No active video"}
# has it been updated since the last time it was sent?
if last_served == last_updated and data['flushCache'] is not True:
return {"video-error": "USE_CACHED"}
# serve it!
last_served = last_updated
return {
"file": f"/home/xilinx/projects/video{sel}.jpg"
}
# *****************************************
# Unit tests
# *****************************************
import unittest
class TestVideo(unittest.TestCase):
def test_no_select_file(self):
try:
os.remove("/home/xilinx/projects/videoSELECT.jpg")
except:
pass
result = get_last_frame({"flushCache": False})
self.assertEqual(result, {"video-error": "No active video"})
def test_no_image_file(self):
# remove image
try:
os.remove("/home/xilinx/projects/videoA.jpg")
except:
pass
# set ping pong selector
with open("/home/xilinx/projects/videoSELECT.txt", 'w') as f:
f.write("A")
# run
result = get_last_frame({"flushCache": False})
self.assertEqual(result, {"video-error": "No active video"})
def test_read_image(self):
# create image
from PIL import Image
Image.new('RGB', (1280, 720)).save("/home/xilinx/projects/videoA.jpg")
# set ping pong selector
with open("/home/xilinx/projects/videoSELECT.txt", 'w') as f:
f.write("A")
# run
result = get_last_frame({"flushCache": False})
self.assertEqual(result, {"file": "/home/xilinx/projects/videoA.jpg"})
def test_stale_video(self):
# create image
from PIL import Image
Image.new('RGB', (1280, 720)).save("/home/xilinx/projects/videoA.jpg")
# set ping pong selector
with open("/home/xilinx/projects/videoSELECT.txt", 'w') as f:
f.write("A")
# wait for timeout
time.sleep(20)
# run
result = get_last_frame({"flushCache": False})
self.assertEqual(result, {"video-error": "No active video"})
if __name__ == '__main__':
unittest.main()
|
from db import nova_conexao
from mysql.connector.errors import ProgrammingError
exclui_tabela_email = """
drop table if exists emails
"""
exclui_tabela_grupos = """
drop table if exists grupos
"""
try:
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.execute(exclui_tabela_email)
cursor.execute(exclui_tabela_grupos)
except ProgrammingError as e:
print(f'Erro: {e.msg}')
except ProgrammingError as e:
print(f'Erro externo: {e.msg}')
|
from django.db import models
from django.utils import timezone
from phone_field import PhoneField
# Create your models here.
class Participant(models.Model):
name = models.CharField(max_length = 40)
phone_number = models.CharField(max_length = 40)
college = models.CharField(max_length = 100, blank=True, null=True)
gender = models.CharField(max_length=1, choices=(
('M', 'Male'),
('F', 'Female'),
))
email = models.CharField(max_length=40, null = True)
position_applied = models.CharField(max_length = 30, blank = True, null = True)
resume = models.FileField(null=True)
created_at = models.DateTimeField(default=timezone.now, editable=False)
class Interview(models.Model):
title = models.CharField(max_length = 40)
date = models.DateField()
start_time = models.DateTimeField()
end_time = models.DateTimeField(blank=True, null=True)
candidates_count = models.IntegerField(default = 0)
result = models.CharField( max_length=3, choices=(
('S', 'Selected'),
('R', 'Rejected'),
('J', 'Joined'),
('DNJ', 'Did Not Join'),
('TBD', 'Pending'),), default='TBD' )
class InterviewParticipants(models.Model):
interview = models.ForeignKey('Interview', related_name='interview', on_delete=models.CASCADE)
candidate_one = models.ForeignKey('Participant', related_name='pariticipant_one', on_delete = models.SET_NULL, blank=True, null=True)
candidate_two = models.ForeignKey('Participant', related_name='pariticipant_two', on_delete = models.SET_NULL, blank=True, null=True)
|
#経路変更の閾値1.5,2,2.5でグラフに出した、さらに日本語対応した
#buffer、patternの順で回せるようにした
#これはSSGW(0)に接続している全ノードからの通信量が1.2倍になった場合
import statistics
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import random
import fairnessIndex
mpl.rcParams['font.family'] = 'AppleGothic'#漢字を出せるようにする、数という漢字は対応していない
#figure()でグラフを表示する領域をつくり,figというオブジェクトにする.
fig = plt.figure()
#add_subplot()でグラフを描画する領域を追加する.引数は行,列,場所
ax_buffer = fig.add_subplot(2, 1, 1,xlabel="受信バッファ[%]",ylabel="IoT GWへのトラヒック量[packet]")
ax_stdev = fig.add_subplot(2, 1, 2,)
def show_network():
ssgw_sample=[]#実験終了時のSSGW受信バッファ使用率を格納
#print("{:<3}".format(buffer_control),"%:")
print("Threshold:", Threshold)
for SSGW in SSGW_list:
if SSGW.id == 0:
print("SSGW(0)の受信バッファは{:.3g}%".format((SSGW.current_packet_size/SSGW.max_packet_size)*100))
ssgw_sample.append((SSGW.current_packet_size/SSGW.max_packet_size)*100)
if SSGW.id == 1:
print("SSGW(1)の受信バッファは{:.3g}%".format((SSGW.current_packet_size/SSGW.max_packet_size)*100))
ssgw_sample.append((SSGW.current_packet_size/SSGW.max_packet_size)*100)
if SSGW.id == 2:
print("SSGW(2)の受信バッファは{:.3g}%".format((SSGW.current_packet_size/SSGW.max_packet_size)*100))
ssgw_sample.append((SSGW.current_packet_size/SSGW.max_packet_size)*100)
if SSGW.id == 3:
print("SSGW(3)の受信バッファは{:.3g}%".format((SSGW.current_packet_size/SSGW.max_packet_size)*100))
ssgw_sample.append((SSGW.current_packet_size/SSGW.max_packet_size)*100)
if SSGW.id == 4:
print("SSGW(4)の受信バッファは{:.3g}%".format((SSGW.current_packet_size/SSGW.max_packet_size)*100))
ssgw_sample.append((SSGW.current_packet_size/SSGW.max_packet_size)*100)
fairness_index = fairnessIndex.FairnessFunc(ssgw_sample)
print("fairness_index:",fairness_index)
if Threshold ==15:
f_index_list15.append(fairness_index)
elif Threshold ==20:
f_index_list20.append(fairness_index)
elif Threshold ==25:
f_index_list25.append(fairness_index)
#stdev = statistics.stdev(ssgw_stdev)
#print("標本標準偏差は:{:.1f}".format(stdev))
for IoTGW in IoTGW_list:
if IoTGW.id == 0:
print("IoTGW(0)の所持パケットは",IoTGW.storage_packet_size)
def node_control(ssgw):#ssgwの受信バッファの状態によってnodeに経路変更or広告window制限をかける関数
if (ssgw.current_packet_size/ssgw.max_packet_size) <= (buffer_control/100): #受信バッファの90%ならば制御モード
pass
else:
ave_packet_size = ssgw.current_packet_size / len(ssgw.mynodes)#該当SSGWに接続しているnodeの平均送信量
#print(ave_packet_size,ssgw.id)
#Threshold=2#経路変更のための閾値(上に書いた)
control_flag=False#通信量が増えた特定nodeを経路変更した場合、広告ウィンドウを下げないようにするため
for node in ssgw.mynodes:
if node.send_packet_size > ave_packet_size * (Threshold/10):#閾値倍以上のnodeが存在するか確認、あれば経路変更、なければ該当するSSGWに接続している全nodeの広告windowを下げる
ssgw_current_packet_list= []
#print("SSGW({})に平均通信量の{}倍以上のnode({})あり:".format(ssgw.id,Threshold,node.id),node.send_packet_size)
for minssgw in SSGW_list:#一度リストにパケットサイズを格納して、そこから最も小さいssgwを探す
ssgw_current_packet_list.append(minssgw.current_packet_size)
for sendssgw in SSGW_list:
if sendssgw.current_packet_size == min(ssgw_current_packet_list):
if sendssgw.id==ssgw.id:#元と送信先のSSGWが同じならば変更しない
pass
else:
if control_flag==False:
ssgw.mynodes.remove(node)#ここで元のSSGWからnodeの登録を消す
node.set_myssgw(sendssgw)#ここでnodeに接続先のnodeを登録
sendssgw.add_mynode(node)#ここで接続先SSGWにnodeを登録
#print("接続先SSGWのid:",sendssgw.id)
control_flag=True
if control_flag==False:
#else: #受信バッファは閾値を越えているが、単品のnodeからの送信が爆増しているわけではない
for node in ssgw.mynodes:
node.control_switch=1
#print("SSGW({})の全nodeへ広告ウィンドウ制御".format(ssgw.id))
#pass
#print("接続されている全nodeの送信量を抑える")
ssgw_number=5
with open("file.txt","a")as f:#aは追記、wは上書
print("SSGWの数は:",ssgw_number,file=f)
print("SSGWの数は:",ssgw_number)
send_packet_size_kikaku1=500 #通信規格1-3を設定しておく
send_packet_size_kikaku2=300
send_packet_size_kikaku3=100
increase=2 #バーストモードの時、nodeが何倍の送信量になるか
timerange=4 #何秒間送信するか、バースト前と後で時間は同じにしている
buffer_control_list=[] #何割で制御を開始するかを決める閾値%を入れるリスト
for plus in range(50,105,5):#50~100までで5刻みの値をlistに入れる
buffer_control_list.append(plus)
for buffer_control in buffer_control_list: #ここでバッファの回し
f_index_list15=[]#SSGWの受信バッファ%ごとのfairness indexの値を入れるThreshold=15
f_index_list20=[]#fairness indexの値を入れるThreshold=20
f_index_list25=[]#fairness indexの値を入れるThreshold=25
print("%:",buffer_control)
for pattern in range(100):#ここで乱数のパターンの数を決めることができる
send_list=[]#ここにnodeの接続先SSGWのidを入れていく
random.seed(pattern)
print("seed:",pattern)
for i in range(30):
x= random.randint(0, ssgw_number-1)#ここで送信先のSSGWの番号を指定できる(0,4)ならばSSGWの数が5個の場合である
send_list.append(x)
for Threshold in range(15,30,5):#ここで何倍で特定ノードを経路変更するか(÷10されている)
#if Threshold==20:
x_list=[]
y_list=[]
IoTGW_list=[]
SSGW_list=[]
Node_list=[]
class IoTGW():
def __init__(self,id):
self.id = id #識別id
self.name = "iotgw_"+str(id) #名前をつける
self.current_packet_size = 0 #1s間に受信しているパケット総数
self.storage_packet_size = 0 #ストレージ
self.trash_packet_size = 0 #廃棄したパケットの総数
self.max_packet_size = 10000 #受信ウィンドウサイズと同義、一度に受信できるパケットの上限
def ReceivePacket(self, packet):
if self.max_packet_size < self.current_packet_size + packet: #もし受信バッファより大きかった場合、超えた分をtrashに捨てる
over_size = self.current_packet_size + packet - self. max_packet_size
self.trash_packet_size += over_size #溢れた分を加算
else :
self.current_packet_size += packet
self.storage_packet_size += packet
class SSGW():
def __init__(self,id):
self.id = id #識別id
self.name = "ssgw_"+str(id) #名前をつける
self.current_packet_size = 0 #1s間に受信しているパケット総数
self.storage_packet_size = 0 #ストレージ
self.trash_packet_size = 0 #廃棄したパケットの総数
self.max_packet_size = 5000 #受信ウィンドウサイズと同義、一度に受信できるパケットの上限
self.send_packet_size = 5000 #SSGWから1sで送るパケット数
self.current_packet_size_for_control= 0
self.mynodes = [] #ここに自分に送信しているnodeのオブジェクトを登録する
def ReceivePacket(self, packet):
if self.max_packet_size < self.current_packet_size + packet: #もし受信バッファより大きかった場合、超えた分をtrashに捨てる
over_size = self.current_packet_size + packet - self. max_packet_size
self.trash_packet_size += over_size #溢れた分を加算
else :
self.current_packet_size += packet
def SendPacket(self) :
for IoTGW in IoTGW_list:
if self.send_packet_size < self.current_packet_size:
IoTGW.ReceivePacket(self.send_packet_size)
else :
IoTGW.ReceivePacket(self.current_packet_size)
def add_mynode(self,node):
self.mynodes.append(node)
class Node():
#SSGW_listからID=0を見つける
#そのSSGWを指定してReceivePacketを使う
def __init__(self,id):
self.id = id
self.name = "node_"+str(id) #名前をつける
self.send_packet_size=0
self.control_switch=0
def set_myssgw(self,ssgw) :#ここで送信先のSSGWのオブジェクトを代入
self.myssgw = ssgw
def SendPacket(self,state):
if self.control_switch ==0:#広告window下げるか、どうか0はしない、1で広告window下げる
if state == 0:#通常状態
if self.id < 10: #idによって送信パケット数を割り振る
self.send_packet_size = send_packet_size_kikaku1
elif self.id >=10 and self.id <20:
self.send_packet_size = send_packet_size_kikaku2
else:
self.send_packet_size = send_packet_size_kikaku3
elif state==1: #異常
if self.id < 10: #idによって送信パケット数を割り振る
self.send_packet_size = send_packet_size_kikaku1*increase
elif self.id >=10 and self.id <20:
self.send_packet_size = send_packet_size_kikaku2*increase
else:
self.send_packet_size = send_packet_size_kikaku3*increase
elif self.control_switch ==1:
if state == 0:#通常状態
if self.id < 10: #idによって送信パケット数を割り振る
self.send_packet_size = send_packet_size_kikaku1*0.8
elif self.id >=10 and self.id <20:
self.send_packet_size = send_packet_size_kikaku2*0.8
else:
self.send_packet_size = send_packet_size_kikaku3*0.8
elif state==1: #異常
if self.id < 10: #idによって送信パケット数を割り振る
self.send_packet_size = send_packet_size_kikaku1*increase*0.8
elif self.id >=10 and self.id <20:
self.send_packet_size = send_packet_size_kikaku2*increase*0.8
else:
self.send_packet_size = send_packet_size_kikaku3*increase*0.8
self.myssgw.ReceivePacket(self.send_packet_size) #myssgeにSSGW(0)とかのオブジェクト自体を持ってこれているので、そのままそのオブジェの関数を使用することができる
#SSGWとnodeの数を決め、インスタンスをlist追加
IoTGW_list.append(IoTGW(0))
for i in range(ssgw_number):
ssgw = SSGW(i)
SSGW_list.append(ssgw) #インスタンスを作成、それをlistに追加
for i in range(30):
node = Node(i)
for ssgw in SSGW_list:
if ssgw.id == send_list[i]:
node.set_myssgw(ssgw)#ここで相互に接続先のオブジェクトを登録することで、情報(数値)のやりとりを円滑にしている
ssgw.add_mynode(node)
Node_list.append(node)
#パケット送信制御部分
for time in range(timerange): #何秒間やるか
#print(time,"秒目")
#for ssgw in SSGW_list:
#node_control(ssgw) #ここでssgwに関数をnode_control関数を用いて制御をする
for ssgw in SSGW_list:
ssgw.current_packet_size=0 #ここで受信バッファを0にする(毎回処理するので)
for node in Node_list: #nodeは30個ある
node.SendPacket(0)#0が入っている場合は通常状態、1ならばバースト
#print("接続先SSGW({})".format(node.myssgw.id))
for ssgw in SSGW_list:
ssgw.SendPacket() #IoTGW(0)に送信
for iotgw in IoTGW_list:
iotgw.current_packet_size=0
#if time == 0:
#show_network()
for time_burst in range(timerange): #バーストを何秒間やるか
#print(time_burst+timerange,"秒目(バーストモード)")
#for ssgw in SSGW_list:
#node_control(ssgw)
for ssgw in SSGW_list:
ssgw.current_packet_size=0 #ここで受信バッファを0にする(毎回処理するので)
for burst_ssgw in SSGW_list:
if burst_ssgw.id==0:
for burst_node in burst_ssgw.mynodes:
burst_node.SendPacket(1)
else:
for burst_node in burst_ssgw.mynodes:
burst_node.SendPacket(0)
for ssgw in SSGW_list:
ssgw.SendPacket() #IoTGW(0)に送信
for iotgw in IoTGW_list:
iotgw.current_packet_size=0
#show_network()
x_list.append(buffer_control)
for IoTGW in IoTGW_list:
if IoTGW.id == 0:
y_list.append(IoTGW.storage_packet_size)
show_network()
if Threshold == 15:#1.5倍で経路変更
y2_list=[]
for y in y_list:
y2_list.append(y-35)
ax_buffer.plot(x_list,y2_list,color="Green", marker="o")
elif Threshold ==20:#2倍で経路変更
y2_list=[]
for y in y_list:
y2_list.append(y-70)
ax_buffer.plot(x_list,y2_list,color="Blue", marker="o")
elif Threshold ==25:
ax_buffer.plot(x_list,y_list,color="Red", marker="o")
#elif Threshold ==30:#意味なかった(25と同じ)
#plt.plot(x_list,y_list,color="Pink")
ax_buffer.legend( ("1.5倍", "2倍","2.5倍"), loc=2)
#plt.show()
def average(xs):
return sum(xs) / len(xs)
with open("fairness2.txt","a")as f:#aは追記、wは上書
print("buff(%):",buffer_control,file=f)
print("Thr=15",average(f_index_list15),file=f)
print("Thr=20",average(f_index_list20),file=f)
print("Thr=25",average(f_inde88x_list25),file=f)
#print("f_index_list15:",f_index_list15,"\n",average(f_index_list15))
#print("f_index_list20:",f_index_list20,"\n",average(f_index_list20))
#print("f_index_list25:",f_index_list25,"\n",average(f_index_list25))
|
import threading
import time
def action(arg):
time.sleep(2)
print('time is %s\t'%(arg))
theads = []
for i in range(5):
t = threading.Thread(target=action,args=(i,))
t1 = threading.Thread(target=action,args=(i,))
theads.append(t)
theads.append(t1)
for i in theads:
i.setDaemon(True)
i.start()
for l in theads:
t.join() |
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
# import BeautifulSoup
import mechanize
import httplib, urllib
import sys, re
from config import *
#TODO: error handling
# (1) if on_date - today > 7, inform user
def main():
page = submit_form()
prices = parse_page( page.read(), TRIGGER )
if any( [ p <= max_price for p in prices ] ):
if MODE == 'pushover':
send_pushover( min( prices ))
elif MODE == 'email':
send_mail(min(prices))
def submit_form():
br = mechanize.Browser() # create browser instance
response = br.open( url ) # load page
# hack
rp_data = response.get_data()
rp_data = re.sub(r'<optgroup label=".+">', "", rp_data) # replace all optgroup elements
response.set_data( rp_data )
br.set_response( response )
# eohack
br.select_form( name='form_spar' )
# fill in custom values
br[ 'trip_mode' ] = [ 'trip_simple' ] # alt: 'trip_both'
br[ 'from_spar' ] = from_city
br[ 'to_spar' ] = to_city
br.form.find_control( 'start_datum' ).readonly = False
br[ 'start_datum' ] = on_date
br[ 'start_time' ] = at_time
return br.submit()
def parse_page( haystack, needle ):
heaps = haystack.split( '<' )
gems = []
for heap in heaps:
if needle in heap and not IMPOSTOR in heap:
price = re.split( DELIMITERS, heap )[1]
price = re.sub( ',', '.', price )
gems.append( float( price ))
return gems
def send_pushover( cheapest ):
if not USER_TOKEN:
print( "You have to configure your Pushover user token in config.py for this to work." )
sys.exit()
conn = httplib.HTTPSConnection( PUSHOVER_URL )
conn.request( 'POST', PUSHOVER_PATH,
urllib.urlencode({
'title' : '( : ltur für ' + str( cheapest ) + ' ',
'token' : APP_TOKEN,
'user' : USER_TOKEN,
'message': ')',
}), { 'Content-type': 'application/x-www-form-urlencoded' })
# for debugging
res = conn.getresponse()
conn.close()
def send_mail(cheapest):
import smtplib
from email.mime.text import MIMEText
# Create a text/plain message
msg = MIMEText("Ltur notification. cheapest offer: %s €\n\n%s" % (str(cheapest), url))
msg['Subject'] = 'Ltur notifier: %s ' % str(cheapest)
msg['From'] = FROM_EMAIL
msg['To'] = EMAIL
s = smtplib.SMTP(SMTP_SERVER)
if SMTP_USER and SMTP_PASS:
s.login(SMTP_USER, SMTP_PASS)
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
if __name__ == '__main__':
main()
|
# import pandas, numpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from collections import Counter
from tigerml.eda import Analyser
# Create the required data frames by reading in the files
df=pd.read_excel('SaleData.xlsx')
df1=pd.read_csv("imdb.csv",escapechar='\\')
df2=pd.read_csv('diamonds.csv')
df3=pd.read_csv('movie_metadata.csv')
# Q1 Find least sales amount for each item
# has been solved as an example
def least_sales(df):
# write code to return pandas dataframe
ls = df.groupby(["Item"])["Sale_amt"].min().reset_index()
return ls
# Q2 compute total sales at each year X region
def sales_year_region(df):
# write code to return pandas dataframe
df['yr'] = df.OrderDate.apply(lambda x: x.strftime('%Y'))
syr = df.groupby(['yr','Region'])["Sale_amt"].sum().reset_index()
return syr
# Q3 append column with no of days difference from present date to each order date
def days_diff(df):
# write code to return pandas dataframe
curr_time = pd.to_datetime("now")
def day_left(x):
t=str(x)
return t[0:3]
df['days_difference'] = curr_time - df['OrderDate']
df['days_diff']=df['days_difference'].apply(day_left)
df=df.drop(['days_difference'],axis=1)
return df
# Q4 get dataframe with manager as first column and salesman under them as lists in rows in second column.
def mgr_slsmn(df):
# write code to return pandas dataframe
x=df.groupby(['Manager'])['SalesMan'].unique().reset_index()
return x
# Q5 For all regions find number of salesman and number of units
def slsmn_units(df):
# write code to return pandas dataframe
s=df.groupby('Region')['Units'].sum().reset_index()
r=df.groupby('Region')['SalesMan'].nunique().reset_index()
new_df=pd.merge(r,s,how='inner',on='Region')
new_df.rename(columns={'SalesMan':'salesmen_count','Units' : 'total_sales'}, inplace=True)
return new_df
# Q6 Find total sales as percentage for each manager
def sales_pct(df):
# write code to return pandas dataframe
b=df.groupby('Manager')['Sale_amt'].sum().reset_index()
total_1=b['Sale_amt'].sum()
b['percent_sales ']=(b['Sale_amt']/total_1)*100
b=b.drop(['Sale_amt'],axis=1)
return b
# Q7 get imdb rating for fifth movie of dataframe
def fifth_movie(df):
# write code here
p=df.iloc[4]['imdbRating']
return p
# Q8 return titles of movies with shortest and longest run time
def movies(df):
# write code here
l=[]
mydict={}
short = df[df['duration']==df['duration'].min()]['title'].reset_index().iloc[0][1]
long = df[df['duration']==df['duration'].max()]['title'].reset_index().iloc[0][1]
#l['shortest_run_time_movie_name']=short
#l['longest_run_time_movie_name']=long
l.append(short)
l.append(long)
movie=['shortest_run_time_movie_name','longest_run_time_movie_name']
mydict['movie']=movie
mydict['name']=l
res=pd.DataFrame(mydict)
return res
# Q9 sort by two columns - release_date (earliest) and Imdb rating(highest to lowest)
def sort_df(df):
# write code here
'''
a=df.groupby(['year','imdbRating'])['title'].unique().reset_index()
a['title']=a['title'].apply(lambda x: x[0])
b=df.drop(['imdbRating','year'],axis=1)
new_df=pd.merge(a,b,how='inner',on='title')
return new_df
'''
ls=df.sort_values(['year','imdbRating'],ascending=[True,False])
return ls
# Q10 subset revenue more than 2 million and spent less than 1 million & duration between 30 mintues to 180 minutes
def subset_df(df):
# write code here
result_1 = df[(df['gross'] > 20000000) & (df['budget'] < 10000000) & (df['duration'] >= 30) & (df['duration'] <= 180)]
#result = df[(df['duration'] >= 30) & (df['duration'] <= 180)]
return result_1
# Q11 count the duplicate rows of diamonds DataFrame.
def dupl_rows(df):
# write code here
result=len(df)-len(df.drop_duplicates())
return result
# Q12 droping those rows where any value in a row is missing in carat and cut columns
def drop_row(df):
# write code here
df = df[pd.notnull(df['carat']) & pd.notnull(df['cut'])]
return df
# Q13 subset only numeric columns
def sub_numeric(df):
# write code here
t=df._get_numeric_data()
return t
# Q14 compute volume as (x*y*z) when depth > 60 else 8
def volume(df):
# write code here
df['x'].fillna((df['x'].mean()), inplace=True)
df['y'].fillna((df['y'].mean()), inplace=True)
df["z"].fillna(method ='ffill', inplace = True)
for i,j in enumerate(df['z']):
if j=='None':
df['z'][i]='1'
df['z']=df['z'].apply(lambda x: float(x))
for i,j in enumerate(df['depth']):
if (df['depth'][i]>60):
df['volume']=df['x']*df['y']*df['z']
else:
df['volume']=8
return df
# Q15 impute missing price values with mean
def impute(df):
# write code here
df['price'].fillna((df['price'].mean()), inplace=True)
return df
#Bonus question
#1.
def report_1(df1):
data1=df1.groupby(['type','year'])['nrOfGenre'].unique().reset_index()
data2=df1.groupby(['type','year'])['imdbRating'].max().reset_index()
data2.rename(columns={'imdbRating':'max_rating'}, inplace=True)
data3=df1.groupby(['type','year'])['imdbRating'].min().reset_index()
data3.rename(columns={'imdbRating':'min_rating'}, inplace=True)
data4=df1.groupby(['type','year'])['imdbRating'].mean().reset_index()
data4.rename(columns={'imdbRating':'mean_rating'}, inplace=True)
data5=df1.groupby(['type','year'])['duration'].sum().reset_index()
data5.rename(columns={'duration':'total_run_time'}, inplace=True)
data1['min_imdbRating']=data3['min_rating']
data1['max_imdbRating']=data2['max_rating']
data1['mean_imdbRating']=data4['mean_rating']
data1['total_run_time_minute']=data5['total_run_time']
final_list=[]
for i in range(len(df1)):
temp_list_1=['Action', 'Adult', 'Adventure', 'Animation', 'Biography',
'Comedy', 'Crime', 'Documentary', 'Drama', 'Family', 'Fantasy',
'FilmNoir', 'GameShow', 'History', 'Horror', 'Music', 'Musical',
'Mystery', 'News', 'RealityTV', 'Romance', 'SciFi', 'Short', 'Sport',
'TalkShow', 'Thriller', 'War', 'Western']
temp_list_2=[]
temp_var_1=df1.iloc[i]
temp_var_2=temp_var_1[16:44]
#print(i)
#print('\n')
#print(temp_var_2)
for k,j in enumerate(temp_var_2):
#print(j)
#print(type(j))
if j==1:
temp_list_2.append(temp_list_1[k])
#print(temp_list_2)
final_list.append(temp_list_2)
#print('\n')
#df1['genere_combination_row_wise']=temp_list_2
df1['genere_combination_row_wise']=final_list
temp_df_1=df1.groupby(['type','year'])['genere_combination_row_wise'].sum().reset_index()
global_list=[]
for i,j in enumerate(temp_df_1['genere_combination_row_wise']):
x=list(set(j))
#print(i,j,x)
global_list.append(x)
data1['genere_groups']=global_list
data1.to_csv(r'report_1.csv')
#2.
#by ploting the graph we came to know that there is no specific relation but by apllying df1.corr() we came to know that these two columns #are correlated to some extent
#.........
df1['len_of_char']=df1['title'].apply(lambda x:len(x.split(' (')[0]))
def showgraph(df1):
#df1['len_of_char']=df1['title'].apply(lambda x:len(x.split('(')[0]))
x = df1['len_of_char']
y = df1['imdbRating']
plt.plot(x,y)
plt.show()
def count_less_than_25(x):
#a=df['len_of_char']
c=0
percentile_25=np.percentile(df1['len_of_char'], 25)
'''
percentile_50=np.percentile(df1['len_of_char'], 50)
percentile_75=np.percentile(df1['len_of_char'], 75)
percentile_100=np.percentile(df1['len_of_char'], 100)
'''
for j in x:
#c=c+1
if j<percentile_25:
c=c+1
return c
def count_between_25_50(x):
#a=df['len_of_char']
c=0
percentile_25=np.percentile(df1['len_of_char'], 25)
percentile_50=np.percentile(df1['len_of_char'], 50)
'''
percentile_75=np.percentile(df1['len_of_char'], 75)
percentile_100=np.percentile(df1['len_of_char'], 100)
'''
for j in x:
#c=c+1
if j>=percentile_25 and j<percentile_50:
c=c+1
return c
def count_between_50_75(x):
#a=df['len_of_char']
c=0
#percentile_25=np.percentile(df1['len_of_char'], 25)
percentile_50=np.percentile(df1['len_of_char'], 50)
percentile_75=np.percentile(df1['len_of_char'], 75)
#percentile_100=np.percentile(df1['len_of_char'], 100)
for j in x:
#c=c+1
if j>=percentile_50 and j<percentile_75:
c=c+1
return c
def count_greater_than_75(x):
#a=df['len_of_char']
c=0
#percentile_25=np.percentile(df1['len_of_char'], 25)
#percentile_50=np.percentile(df1['len_of_char'], 50)
percentile_75=np.percentile(df1['len_of_char'], 75)
#percentile_100=np.percentile(df1['len_of_char'], 100)
for j in x:
#c=c+1
if j>=percentile_75:
c=c+1
return c
def report_2(df1):
p=df1.groupby(['year'])['len_of_char'].min().reset_index()
p.rename(columns={'len_of_char':'min_len_title'}, inplace=True)
q=df1.groupby(['year'])['len_of_char'].max().reset_index()
q.rename(columns={'len_of_char':'max_len_title'}, inplace=True)
p['max_len_title']=q['max_len_title']
t=df1.groupby(['year'])['len_of_char'].apply(np.hstack).reset_index()
p['less_than_25']=t['len_of_char'].apply(count_less_than_25)
p['between_25_50']=t['len_of_char'].apply(count_between_25_50)
p['between_50_75']=t['len_of_char'].apply(count_between_50_75)
p['greater_than_75']=t['len_of_char'].apply(count_greater_than_75)
return p
#3.
def diamond_crosstab(df4):
#volume function is written above
df4=volume(df4)
df4['quantile_ex_1']=pd.qcut(df4['volume'],q=4)
report_3=pd.crosstab(df4.quantile_ex_1,df4.cut,normalize='index')
return report_3
#4
#this will work for movie_metadata.csv file
#quarter by quarter data is not given so i did it yearly
def report_4(df1):
q=df1.sort_values(['title_year','gross'],ascending=[False,False])
avg_ratings_top_10_percent=[]
year=[2016,2015,2014,2013,2012,2011,2010,2009,2008,2007]
for i in year:
ten_percent=int(len(q[q['title_year']==i])*.1)
avg_ratings_top_10_percent.append(q[q['title_year']==2016].sort_values(['gross'],ascending=False).iloc[0:ten_percent]['imdb_score'].mean())
mydict={}
mydict['year']=year
mydict['avg_ratings_top_10_percent']=avg_ratings_top_10_percent
res=pd.DataFrame(mydict)
return res
#5
def report_5(df1):
final_list=[]
for i in range(0,14332):
temp_list_1=['Action', 'Adult', 'Adventure', 'Animation', 'Biography',
'Comedy', 'Crime', 'Documentary', 'Drama', 'Family', 'Fantasy',
'FilmNoir', 'GameShow', 'History', 'Horror', 'Music', 'Musical',
'Mystery', 'News', 'RealityTV', 'Romance', 'SciFi', 'Short', 'Sport',
'TalkShow', 'Thriller', 'War', 'Western']
temp_list_2=[]
temp_var_1=df1.iloc[i]
temp_var_2=temp_var_1[16:44]
#print(i)
#print('\n')
#print(temp_var_2)
for k,j in enumerate(temp_var_2):
#print(j)
#print(type(j))
if j==1:
temp_list_2.append(temp_list_1[k])
#print(temp_list_2)
final_list.append(temp_list_2)
#print('\n')
#df1['genere_combination_row_wise']=temp_list_2
df1['genere_combination_row_wise']=final_list
df1['duration_decile']=pd.qcut(df1['duration'],q=10)
a=df1.groupby(['duration_decile'])['nrOfWins'].sum().reset_index()
b=df1.groupby(['duration_decile'])['nrOfNominations'].sum().reset_index()
c=df1.groupby(['duration_decile'])['ratingCount'].sum().reset_index()
d=df1['duration_decile'].value_counts().reset_index()
d.rename(columns={'index':'duration_decile', 'duration_decile':'count'}, inplace=True)
a['nrOfNominations']=b['nrOfNominations']
a['ratingCount']=c['ratingCount']
a['count']=d['count']
list_n=[]
for i,j in enumerate(df1.groupby(['duration_decile'])['genere_combination_row_wise'].sum()):
#print(i,list(set(j))[0:3])
#list_n.append(list(set(j))[0:3])
#Counter(df1.groupby(['duration_decile'])['genere_combination_row_wise'].sum()[0])
p=Counter(j)
p=sorted(p.items(), key=lambda x: x[1], reverse=True)
q=p[0:3]
list_n.append(q)
#print(i,p[0:3])
#print('\n')
a['top_3_genere']=list_n
return a
#6
'''
#pass both the data imdb and movie_metadata in this function and it will generate two separate insight report
def report_6(df1,df2):
an = Analyser(df1)
an.get_report()
an_movie_metadata=Analyser(df2)
an_movie_metadata.get_report()
'''
|
#!/usr/bin/env python3
import os
import re
from depthcharge import Depthcharge, Console, OperationFailed, log
from depthcharge.monitor import Monitor
def setup():
# Optional: Launch a console monitor so we can
# keep an eye on the underlying operations. We
# use a terminal-based monitor here.
mon = Monitor.create('term')
# Connect to the target device's serial port
console = Console('/dev/ttyUSB0', baudrate=115200, monitor=mon)
# Create and return an initialized Depthcharge context
return Depthcharge(console, arch='arm', allow_deploy=True, allow_reboot=True)
# Alternatively, create it from a previously created device config file
# This will allow Depthcharge to skip any "inspection" steps.
#return Depthcharge.load('my_device.cfg', console)
def get_buses(ctx):
buses = []
resp = ctx.send_command('i2c bus')
for line in resp.splitlines():
match = re.match(r'Bus (\d+)', line)
if match:
busno = int(match.group(1))
log.note('Available: Bus {:d}'.format(busno))
buses.append(busno)
return buses
def find_devices(ctx, buses):
results = []
for bus in buses:
log.note('Probing bus {:d}'.format(bus))
try:
cmd = 'i2c dev {:d}'.format(bus)
# Raise an exception on error via check=True
ctx.send_command(cmd, check=True)
# This may fail for buses (or pinmux settings) that are configured
# appropriately. Thus, we drop check=True and just look results
resp = ctx.send_command('i2c probe')
match = re.match(r'Valid chip addresses: ([0-9a-fA-F\t ]+)', resp)
if not match:
# A failing bus will spew failures for a while. Keep trying
# to interrupt it (Ctrl-C) until we know we're back at a prompt.
log.warning('No devices or bus failing. Waiting for prompt.')
ctx.interrupt(timeout=120)
continue
for addr in match.group(1).split():
addr = int(addr, 16)
log.info('Found device: Bus={:d}, Address=0x{:02x}'.format(bus, addr))
results.append((bus, addr))
except OperationFailed as error:
log.error('Command failed: ' + cmd + os.linesep + str(error))
return results
if __name__ == '__main__':
# Attach to the device and get a Depthcharge context
ctx = setup()
log.info('Identifying available I2C buses.')
buses = get_buses(ctx)
log.info('Probing I2C buses for devices. This may take some time.')
# We'll just log results as we go, rather than use the return value
find_devices(ctx, buses)
|
class Question:
options = []
answers = []
level_of_ques = []
topic_name = []
prompt = []
def __init__(self):
pass
def addQuestion(self,topic_name,level_of_ques):
statement = input("Enter question: ")
Question.prompt.append(statement)
Question.topic_name.append(topic_name)
Question.level_of_ques.append(level_of_ques)
l1 = []
for i in range(4):
l1.append(input("Enter option no. "+str((i+1))+": "))
Question.options.append(l1)
ans = input("Choose the correct option: (1/2/3/4) ? ")
Question.answers.append(int(ans))
def displayAllQuestion(self):
print("Total number of questions are: ", len(Question.prompt))
print("\n")
for i in range(len(Question.prompt)):
print("Ques. "+str(i+1) + Question.prompt[i])
for j in range(len(Question.options[i])):
print(str(j+1)+")"+str(Question.options[i][j]))
print("\n\n")
def viewTopics(self):
a = set(Question.topic_name)
print(a)
@classmethod
def run_test(cls):
score = 0
for statement in range(len(cls.prompt)):
print("Topic: "+cls.topic_name[statement])
print(cls.prompt[statement])
for i in range(len(cls.options[statement])):
print(str(i+1)+")"+str(cls.options[statement][i]))
print("Question Level: ",cls.level_of_ques[statement])
print("\n")
answer = int(input("choose option number as answer: "))
print("\n")
if answer == cls.answers[statement]:
score+=1
print("\n")
print("you got",str(score),"/",str(len(cls.prompt)),"correct. ") |
import sys
# SOURCE: collaborated with John Gauthier
# implementation ideas largely from Danny Yoo of UC Berkley on hashcollision.org
class RedBlackTree:
class Node():
def __init__(self, key=None,color='red'):
self.right = None
self.left = None
self.p = None
self.key = key
self.color = color
def __init__(self):
self.NIL = self.Node(key = None, color='black')
self.root = self.NIL
self.size = 0
self.ordered = []
pass
def left_rotate(self, x):
y = x.right
x.right = y.left
if y.left != self.NIL:
y.left.p = x
y.p = x.p
if x.p == self.NIL:
self.root = y
elif x == x.p.left:
x.p.left = y
else:
x.p.right = y
y.left = x
x.p = y
def right_rotate(self, x):
y = x.left
x.left = y.right
if y.right != self.NIL:
y.right.p = x
y.p = x.p
if x.p == self.NIL:
self.root = y
elif x == x.p.right:
x.p.right = y
else:
x.p.left = y
y.right = x
x.p = y
def insert(self, z):
new_node = self.Node(key = z)
self._insert(new_node)
self.size += 1
def _insert(self, z):
y = self.NIL
x = self.root
while x != self.NIL:
y = x
if z.key < x.key :
x = x.left
else:
x = x.right
z.p = y
if y == self.NIL:
self.root = z
elif z.key < y.key:
y.left = z
else:
y.right = z
z.left = self.NIL
z.right = self.NIL
z.color = "red"
self.rb_insert_fixup(z)
def rb_insert_fixup(self, z):
i = 0
while z.p.color == "red":
if z.p == z.p.p.left:
y = z.p.p.right
if y.color == 'red':
z.p.color = "black"
y.color = "black"
z.p.p.color = "red"
z = z.p.p
else:
if z == z.p.right:
z = z.p
self.left_rotate(z)
z.p.color = 'black'
z.p.p.color = 'red'
self.right_rotate(z.p.p)
else:
y = z.p.p.left
if y.color == 'red':
z.p.color = "black"
y.color = "black"
z.p.p.color = "red"
z = z.p.p
else:
if z == z.p.left:
z = z.p
self.right_rotate(z)
z.p.color = 'black'
z.p.p.color = 'red'
self.left_rotate(z.p.p)
i += 1
self.root.color = 'black'
def transplant(self, u, v):
if u.p == self.NIL:
self.root = v
elif u == u.p.left:
u.p.left = v
else:
u.p.right = v
v.p = u.p
def remove(self, z):
if self.size == 0:
print("TreeError")
return
our_node = self.key_search(z)
self._remove(our_node)
self.size -= 1
def _remove(self, z):
y = z
original_color = y.color
if z.left == self.NIL:
x = z.right
self.transplant(z, z.right)
elif z.right == self.NIL:
x = z.left
self.transplant(z, z.right)
else:
y = self._min_node(z.right)
original_color = y.color
x = y.right
if y.p == z:
x.p = y
else:
self.transplant(y, y.right)
y.right = z.right
y.right.p = y
self.transplant(z,y)
y.left = z.left
y.left.p = y
y.color = z.color
if original_color == 'black':
self.rb_delete_fixup(x)
def rb_delete_fixup(self, x):
while x != self.root and x.color == 'black':
if x == x.p.left:
w = x.p.right
if w.color == 'red':
w.color = 'black'
x.p.color = 'red'
self.left_rotate(x.p)
w = x.p.right
if w.left.color == 'black' and w.right.color == 'black':
w.color = 'red'
x = x.p
else:
if w.right.color == 'black':
w.left.color = 'black'
w.color = 'red'
self.right_rotate(w)
w = x.p.right
w.color = x.p.color
x.p.color = 'black'
w.right.color = 'black'
self.left_rotate(x.p)
x = self.root
else:
w = x.p.left
if w.color == 'red':
w.color = 'black'
x.p.color = 'red'
self.right_rotate(x.p)
w = x.p.left
if w.right.color == 'black' and w.left.color == 'black':
w.color = 'red'
x = x.p
else:
if w.left.color == 'black':
w.right.color = 'black'
w.color = 'red'
self.left_rotate(w)
w = x.p.left
w.color = x.p.color
x.p.color = 'black'
w.left.color = 'black'
self.right_rotate(x.p)
x = self.root
x.color = 'black'
def search(self, x):
return self._search(self.root, x)
def _search(self, current_node, target):
if current_node == self.NIL:
return "NotFound"
elif target == current_node.key:
return "Found"
elif target < current_node.key:
return self._search(current_node.left, target)
else:
return self._search(current_node.right, target)
def key_search(self, target):
return self._key_search(self.root, target)
def _key_search(self, current_node, target):
if current_node == self.NIL:
return None
elif target == current_node.key:
return current_node
elif target < current_node.key:
return self._key_search(current_node.left, target)
else:
return self._key_search(current_node.right, target)
def maximum(self):
if self.size == 0:
return "Empty"
return self._maximum(self.root)
def _maximum(self, x):
while x.right != self.NIL:
x = x.right
return x.key
def minimum(self):
if self.size == 0:
return "Empty"
return self._minimum(self.root)
def _minimum(self, x):
while x.left != self.NIL:
x = x.left
return x.key
def _min_node(self, x):
while x.left != self.NIL:
x = x.left
return x
def inprint(self):
if self.size == 0:
print("Empty")
return
self._inprint(self.root)
for i in range(len(self.ordered)-1):
print(self.ordered[i], end=' ')
print(self.ordered[-1])
self.ordered = []
def _inprint(self, x):
if x != self.NIL and x.key != None:
self._inprint(x.left)
self.ordered.append(x.key)
self._inprint(x.right)
'''
rb = RedBlackTree()
rb.insert(1)
rb.inprint()
print()
rb.insert(2)
rb.inprint()
print()
rb.insert(3)
rb.inprint()
print()
rb.insert(10)
rb.insert(5)
rb.inprint()
print()
print()
rb.remove(5)
rb.inprint()
'''
def driver():
rb = RedBlackTree()
with open(sys.argv[1]) as f:
n = int(f.readline().strip())
for _ in range(n):
data_line = f.readline().strip().split()
action = data_line[0]
#print(action)
#(data_line)
if action == "insert":
#print(data_line[1])
rb.insert(int(data_line[1]))
elif action == "remove":
rb.remove(int(data_line[1]))
elif action == "search":
print(rb.search(int(data_line[1])))
elif action == "max":
print(rb.maximum())
elif action == "min":
print(rb.minimum())
elif action == "inprint":
rb.inprint()
pass
if __name__ == '__main__':
driver()
|
import os
import cv2
from PIL import Image
import imagehash
import glob
def extract_images(vloc):
# Read the video from specified path
cam = cv2.VideoCapture(vloc)
try:
# creating a folder named data
if not os.path.exists('data'):
os.makedirs('data')
# if not created then raise error
except OSError:
print ('Error: Creating directory of data')
# frame
currentframe = 0
while(True):
# reading from frame
ret,frame = cam.read()
if ret:
if(currentframe%30==0):
# location to store the image
name = './data/frame' + str(currentframe) + '.jpg'
print ('Creating...' + name)
# writing the extracted images
cv2.imwrite(name, frame)
# increasing counter so that it will
# show how many frames are created
currentframe += 1
else:
break
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
def delete_similar_images():
img_list=(glob.glob("./data/*.jpg"))
cutoff = 10
if(len(img_list)>0):
mem_img= imagehash.average_hash(Image.open(img_list[0]))
for loc in range(1,len(img_list)):
new_image = imagehash.average_hash(Image.open(img_list[loc]))
if abs(new_image - mem_img) < cutoff:
# print('images are similar')
os.remove(img_list[loc])
else:
# print('images are not similar')
mem_img=imagehash.average_hash(Image.open(img_list[loc]))
else:
print("Error: No images")
if __name__=="__main__":
#vloc -> input video location
vloc=r"C:\Users\sidha\Desktop\evideo\trial.mp4"
extract_images(vloc)
delete_similar_images() |
def solution(prices):
"""
Goal: is to make as many transactions as possible to make max profit
Method: we have to buy low and sell high, then we check every pair prices and make transaction
"""
maxProfit = 0
for i in range(1, len(prices)):
if prices[i] > prices[i-1]:
maxProfit += prices[i] - prices[i-1]
return maxProfit
def valley_peek(prices):
"""
Same idea as mention, we want to make max profit with many transactions
then we first must find the LOCAL minimum and its next LOCAL maximum and make transaction,
and keep repeating the process until no more stock
"""
n = len(prices)
maxProfit = i = 0
localMin = localMax = prices[0]
while i < n - 1:
# keep iterating to find the LOCAL min
while i < n and prices[i] >= prices[i+1]:
i +=1
# while loop break that means we find the local min
localMin = prices[i]
# keep iterating to find the LOCAL max
while i < n and prices[i] <= prices[i-1]:
i +=1
# we find the local max
localMax = prices[i]
maxProfit += localMax - localMin
return maxProfit
|
import numpy as np
import io
import sys
import codecs
from collections import defaultdict, Counter
from user import User
import glob
import cPickle as pickle
import os
from twitter_dm.utility.general_utils import read_grouped_by_newline_file
from collections import defaultdict
from textunit import TextUnit
from constraints import get_id_and_value_map
from constraints import IDENTITY_PREFIX, SENTWORD_PREFIX
float_formatter = lambda x: "%.6f" % x
np.set_printoptions(threshold=10000,
linewidth=100,
formatter={'float_kind':float_formatter})
import io
import re
from constraints import IDENTITY_PREFIX, SENTWORD_PREFIX
# read in sentiment word values
sent_to_id = {}
sent_values = {}
for i,x in enumerate(io.open("../data/sentiment_data/clean_epa_terms.txt")):
x_spl = x.split("\t")
word = x_spl[0]
id_val = SENTWORD_PREFIX + str(i)
sent_to_id[word] = id_val
sent_values[id_val +'e'] = float(x_spl[1])+.0001
sent_values[id_val +'p'] = float(x_spl[2])+.0001
sent_values[id_val +'a'] = float(x_spl[3])+.0001
# make up identity values
all_identities = [x.strip() for x in io.open("../data/identity_data/final_identities_list.txt").readlines()]
identities_with_no_sent_data = [x for x in all_identities if x not in sent_to_id]
identity_to_id = {identity : IDENTITY_PREFIX+str(i) for i, identity in enumerate(identities_with_no_sent_data)}
id_to_identity = {v : k for k, v in identity_to_id.items()}
# get grams to compbine
gram_list = set(identity_to_id.keys())|set(sent_to_id.keys())
identity_values = {}
def get_textunits_sc(x):
##### GET THE DATA
spl = x[0].split("\t")
uid = spl[11]
tweet_id = spl[10]
date = x[0].split("\t")[-2]
s = TextUnit(uid+ "\t" + tweet_id, date,
sent_to_id,identity_to_id,gram_list,
emoji_info=False,
emoticon_to_eval_dim=False,
dependency_parsed_conll=x,
sent_values=sent_values,
hashtag_epa_data=False,
vader_dict=False,
do_negation_on_full_sentence=False,
use_events=False,
use_behaviors=False,
use_isa=False,
use_parent_child=False,
use_clause_level=True,
use_own_full_sentence=False)
for k, v in s.identities_to_constraint_string_map.items():
c0_constraint, c1_constraint = v
if type(c0_constraint) == int or type(c0_constraint) == float:
c0 = c0_constraint
else:
c0 = eval(c0_constraint)
if type(c1_constraint) == int or type(c1_constraint) == float:
c1 = c1_constraint
else:
c1 = eval(c1_constraint)
if c0 == 0:
print 'arg, not going to work'
yield ((uid, id_to_identity[k[:-1]], k[-1] ), (-(c1/(2*c0)), 1))
dep_parse = read_grouped_by_newline_file("test_data/one_percent_sample.txt")
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from pyspark import SparkContext, SparkConf
conf = (SparkConf()
.setMaster("local[*]")
.setAppName("My app")
.set("spark.driver.maxResultSize", "10g"))
sc = SparkContext(conf=conf)
dat = sc.parallelize(dep_parse, 80).flatMap(get_textunits_sc).collect()
dat = sc.parallelize(dat).reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).collect()
m = open("../data/sentiment_data/data_for_empirical_priors.tsv","w")
for d in dat:
m.write("\t".join([str(x) for x in [y for y in d[0]]+[r for r in d[1]]])+"\n")
m.close()
|
import numpy as np
def benjamini_hochberg(pvalues, FDR=0.05):
pvalues = np.array(pvalues)
if pvalues.size == 0:
return np.nan
sorted_values = np.sort(pvalues[np.logical_not(np.logical_or(np.isnan(pvalues), np.isinf(pvalues)))], axis=None)
critical_values = np.arange(1, len(sorted_values) + 1) / len(sorted_values) * FDR
idx = np.argwhere(sorted_values < critical_values).flatten()
if idx.size == 0:
return np.nan
else:
return sorted_values[idx[-1]]
|
from __future__ import unicode_literals
from django.db import models
class CourseManger(models.Manager):
def validate(self,data):
if (len(data['name']) <5 or len(data['desc']) <15 ):
return False
return True
# Create your models here.
class Course(models.Model):
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
desc = models.TextField()
objects = CourseManger()
|
from django.contrib.auth.tokens import PasswordResetTokenGenerator
class TokenGenerator(PasswordResetTokenGenerator):
def _make_hash_vlaue(self, user, timestamp):
return (str(user.pk)+str(timestamp)+str(user.is_active))
account_activation_token = TokenGenerator() |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (default, Aug 31 2020, 07:22:35)
# [Clang 10.0.0 ]
# Embedded file name: opponent.py
# Compiled at: 2020-08-28 21:03:54
# Size of source mod 2**32: 6992 bytes
import math
from collections import defaultdict
import time, random
random.seed(2020)
from fishing_game_core.shared import ACTION_TO_STR
class MinimaxModel(object):
def __init__(self, initial_data, space_subdivisions, use_lookups=True):
self.get_fish_scores_and_types(initial_data)
self.space_subdivisions = space_subdivisions
def get_fish_scores_and_types(self, data):
data.pop('game_over', None)
self.fish_scores = {int(key.split('fish')[1]):value['score'] for key, value in data.items()}
scores_to_type = {s:t for t, s in enumerate(set(self.fish_scores.values()))}
self.fish_types = {f:scores_to_type[s] for f, s in self.fish_scores.items()}
def next_move(self, node):
tree_depth = 6
max_time = 0.1
self.start = time.time()
self.max_time = max_time * 0.999999
self.max_player = node.state.player
self.max_depth = tree_depth
children = node.compute_and_get_children()
if len(children) == 1:
return ACTION_TO_STR[children[0].move]
alpha = -math.inf
beta = math.inf
best_value = -math.inf
best_move = 0
children_values = [-math.inf] * len(children)
for i, child in enumerate(children):
value = self.alpha_beta_prunning(child, alpha, beta, depth=tree_depth)
children_values[i] = value
if value > best_value:
best_value = value
best_move = ACTION_TO_STR[child.move]
alpha = value
if time.time() - self.start > self.max_time:
return best_move
if best_value == math.inf:
return best_move
return best_move
def alpha_beta_prunning(self, node, alpha, beta, depth):
if depth == self.max_depth:
return self.compute_heuristic(node.state)
children = node.compute_and_get_children()
if len(children) == 0:
return self.compute_heuristic(node.state)
player = node.state.player
if player == self.max_player:
best_value = -math.inf
best_move = 0
for child in children:
value = self.alpha_beta_prunning(child, alpha, beta, depth + 1)
if value > best_value:
best_value = value
best_move = child.move
alpha = max(alpha, best_value)
if not best_value == math.inf:
if alpha >= beta:
break
if time.time() - self.start > self.max_time:
break
else:
best_value = math.inf
best_move = 0
for child in children:
value = self.alpha_beta_prunning(child, alpha, beta, depth + 1)
if value < best_value:
best_value = value
best_move = child.move
beta = min(beta, best_value)
if not best_value == -math.inf:
if alpha >= beta:
break
if time.time() - self.start > self.max_time:
break
return best_value
def compute_heuristic(self, state, only_scores=False):
scores = state.get_player_scores()
hook_positions = state.get_hook_positions()
fish_positions = state.get_fish_positions()
caught_fish = state.get_caught()
score_based_value = self.get_score_based_value(caught_fish, scores)
n_fish = len(fish_positions)
n_caught = int(caught_fish[0] != None) + int(caught_fish[1] != None)
if n_fish == 0 or n_fish == n_caught:
if score_based_value > 0:
return math.inf
if score_based_value < 0:
return -math.inf
return 0.0
if only_scores:
return score_based_value
value_max_player = self.get_proximity_value(hook_positions, fish_positions, caught_fish, self.max_player)
value_min_player = self.get_proximity_value(hook_positions, fish_positions, caught_fish, 1 - self.max_player)
proximity_value = value_max_player - value_min_player
return score_based_value + proximity_value
def get_score_based_value(self, caught_fish, scores):
extra_score_max = self.fish_scores[caught_fish[self.max_player]] if caught_fish[self.max_player] is not None else 0
extra_score_min = self.fish_scores[caught_fish[(1 - self.max_player)]] if caught_fish[(1 - self.max_player)] is not None else 0
value = 100 * (scores[self.max_player] - scores[(1 - self.max_player)] + extra_score_max - extra_score_min)
return value
def get_proximity_value(self, hook_positions, fish_positions, caught_fish, player):
value = 0.0
for fish, fish_position in fish_positions.items():
if fish in caught_fish:
continue
else:
distance_x = min(abs(fish_position[0] - hook_positions[player][0]), self.space_subdivisions - abs(fish_position[0] - hook_positions[player][0]))
distance_y = abs(fish_position[1] - hook_positions[player][1])
distance = distance_x + distance_y
value += float(self.fish_scores[int(fish)]) * math.exp(-2 * distance)
return value
class StateRepresentative(object):
def __init__(self, explored_depth, value, best_move):
self.explored_depth = explored_depth
self.value = value
self.best_move = best_move
# okay decompiling opponent.pyc
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""DHT 温度湿度センサー."""
import logging
from datetime import datetime
import Adafruit_DHT as DHT
from db import MongoDB
class Dht(MongoDB):
"""DHT IO."""
def __init__(self):
"""イニシャライザ."""
super().__init__()
# センサー
self.PIN = 4
# MongoDB
db_name = 'dht'
self.db = self.client[db_name]
def __str__(self):
return "dht"
def get_sensor(self):
"""Get センサー値.
Returns
-------
dict
各センサー値
"""
try:
humi, temp = DHT.read_retry(DHT.DHT11, self.PIN)
return {
'dtemp': float(temp),
'humi': float(humi),
'timestamp': datetime.now()
}
except Exception as e:
logging.error('SensorError: {}'.format(e))
return None
|
'''
Created on Jun 27, 2016
@author: KatherineMJB
'''
class Incrementer:
import tensorflow as tf
def __init__(self, base):
self.base = base
def run(self, arr):
ret = []
for i in range(0, len(arr), 2):
carry, place = self.ex(arr[i], arr[i+1])
ret.append(carry)
ret.append(place)
return ret
def ex(self, l, r):
return tf.div(tf.add(l, r), base), tf.mod(tf.add(l,r), base) |
import logging
def setLogger(logg):
logg.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logg.addHandler(ch)
return logg
|
# Generated by Django 3.0.7 on 2020-07-02 05:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cart', '0006_auto_20200702_1307'),
]
operations = [
migrations.AlterField(
model_name='orderitem',
name='user_order',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cart.UserOrder'),
),
]
|
import plotly
import plotly.offline as offline
import plotly.graph_objs as go
import json
import sys
def plot(input, output):
dataFile = open(input, 'r')
data = json.load(dataFile)
# set up relevant data arrays
labels = data["distanceHistogram"]["labels"]
vals = data["distanceHistogram"]["values"]
# construct plots
distanceHistogram = go.Bar(x=labels, y=vals, name='Number of elements')
plots = [distanceHistogram]
layout = dict(title="Histogram of Distances from Found Global Minima")
offline.plot(dict(data=plots, layout=layout), filename=output + ".html", auto_open=False)
if __name__ == "__main__":
plot(sys.argv[1], sys.argv[2]) |
primes = [2]
solution = 2
for x in xrange(3, 2000000, 2):
for number in primes:
if x % number == 0:
break
if x % number != 0:
print "%d\r" % x
solution += x
primes.append(x)
print solution
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
from paddle.vision.datasets import Cifar10
from .preprocess import build_transforms
from .builder import DATASETS
from ..utils.misc import accuracy
from PIL import Image
@DATASETS.register()
class CIFAR10(Cifar10):
def __init__(self,
datafile=None,
mode='train',
return_label=False,
return_two_sample=True,
transforms=None,
download=True):
transform = build_transforms(transforms) if transforms is not None else None
super(CIFAR10, self).__init__(datafile, mode=mode, transform=transform, download=download)
self.return_label = return_label
self.return_two_sample = return_two_sample
def __getitem__(self, idx):
image, label = self.data[idx]
image = np.reshape(image, [3, 32, 32])
image = image.transpose([1, 2, 0])
if self.backend == 'pil':
image = Image.fromarray(image.astype('uint8'))
if self.return_two_sample:
image1 = self.transform(image)
image2 = self.transform(image)
return image1, image2
if self.transform is not None:
image = self.transform(image)
if self.return_label:
return image, np.array(label).astype('int64')
return image
def evaluate(self, preds, labels, topk=(1, 5)):
eval_res = {}
eval_res['acc1'], eval_res['acc5'] = accuracy(preds, labels, topk)
return eval_res
@DATASETS.register()
class CIFAR100(CIFAR10):
def __init__(self,
datafile=None,
mode='train',
return_label=False,
return_two_sample=True,
transforms=None,
download=True):
super(CIFAR100, self).__init__(datafile, mode, return_label, return_two_sample, transforms, download)
def _init_url_md5_flag(self):
URL_PREFIX = 'https://dataset.bj.bcebos.com/cifar/'
CIFAR100_URL = URL_PREFIX + 'cifar-100-python.tar.gz'
CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'
MODE_FLAG_MAP = {
'train10': 'data_batch',
'test10': 'test_batch',
'train100': 'train',
'test100': 'test'
}
self.data_url = CIFAR100_URL
self.data_md5 = CIFAR100_MD5
self.flag = MODE_FLAG_MAP[self.mode + '100'] |
#!/usr/bin/env python
"""
Origin: http://networkx.github.com/documentation/latest/examples/drawing/giant_component.html
This example illustrates the sudden appearance of a
giant connected component in a binomial random graph.
Requires pygraphviz and matplotlib to draw.
"""
# Copyright (C) 2006-2008
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
import math
import os
import pickle
try:
from networkx import graphviz_layout
layout=nx.graphviz_layout
except ImportError:
print "PyGraphviz not found; drawing with spring layout; will be slow."
layout=nx.spring_layout
#n=150 # 150 nodes
n = int(os.environ['n'])
# p value at which giant component (of size log(n) nodes) is expected
p_giant=1.0/(n-1)
# p value at which graph is expected to become completely connected
p_conn=math.log(n)/float(n)
# the following range of p values should be close to the threshold
#pvals=[0.003, 0.006, 0.008, 0.015]
#region=220 # for pylab 2x2 subplot layout
#plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
#for p in pvals:
p = float(os.environ['p'])
if True:
G=nx.binomial_graph(n,p)
pos=layout(G)
#region+=1
#plt.subplot(region)
plt.title("n = %d, p = %5.4f"%(n,p))
nx.draw(G,pos,
with_labels=False,
node_size=10
)
# identify largest connected component
Gcc=nx.connected_component_subgraphs(G)
G0=Gcc[0]
nx.draw_networkx_edges(G0,pos,
with_labels=False,
edge_color='r',
width=6.0
)
# show other connected components
for Gi in Gcc[1:]:
if len(Gi)>1:
nx.draw_networkx_edges(Gi,pos,
with_labels=False,
edge_color='r',
alpha=0.3,
width=5.0
)
# dump graph in human-readable form
print "Generated binomial graph (n=%d, p=%5.4f):" % (n,p)
for n,ns in nx.to_dict_of_lists(G).iteritems():
print " %s\t%s" % (n, ns)
print
# as well as a pickled form
pickle.dump(G, file("graph.pickle", "w"))
print "Created graph.pickle"
plt.savefig("giant_component.png")
#plt.show() # display
print "Created giant_component.png"
|
from datetime import datetime
from typing import Optional
from codemate.block import Block
from codemate.exceptions import SaveFileError
def generate_header() -> str:
"""
Generates a file header.
Returns:
str: The generated header of a file.
"""
syntax = " Warning generated file ".center(90, "-")
date = datetime.now().isoformat()
syntax += "\n"
syntax += f"Generated at: {date}"
syntax += "\n"
syntax += "".center(90, "-")
return syntax
class File(Block):
"""
Creates a Python file syntax.
Args:
header (Optional[str]): A block string that represents the files header
"""
def __init__(self, header: Optional[str] = generate_header()) -> None:
super().__init__()
if header:
self.add_doc_block(block=header)
def save(self, path: str, use_black: bool = True) -> None:
"""
Save the generated Python file in a given location.
Args:
path (str): The path to the location that we want to save the file at.
use_black (bool): When true black linter will be used to format the generated
Python code.
Raises:
SaveFileError: When the generated Python code file can't be created.
"""
try:
with open(path, "w", encoding="utf-8") as file:
if use_black:
file.write(self.use_black())
else:
file.write(self.syntax())
except OSError as error:
raise SaveFileError("Can't create the generated file") from error
|
#!/usr/bin/env python
# coding=utf-8
import hashlib
import imghdr
from webapp.web import BaseHandler
from model import dbapi
MAX_FILE_SIZE = 5000000 #upload file size setting < 5MB
class UploadHandler(BaseHandler):
def check_xsrf(self):
if self.check_xsrf_cookie() == False:
self.redirect("ftypeerror")
def check(self):
email = self.get_secure_cookie("email")
user = dbapi.User()
if email and user.get_user(email) == 0:
profile = user.get_user_all(email)
if profile:
self.id = profile[0]
self.time = profile[4]
self.email = email
else:
self.clear_cookies()
self.redirect("/")
def get_filesize(self, file):
file.seek(0, 2)
size = file.tell()
file.seek(0)
return size
def post(self):
self.check_xsrf()
self.check()
fileitem = self.request.files["filename"]
if fileitem.filename:
#fn = os.path.basename(fileitem.filename)
filetype = imghdr.what(fileitem.file)
filesize = self.get_filesize(fileitem.file)
if filesize > MAX_FILE_SIZE:
self.redirect("/ftypeerror")
if filetype is "jpeg" or filetype is "png" or filetype is "gif":
m = hashlib.md5()
m.update(self.email)
email_md5 = m.hexdigest()
open("images/" + email_md5, "wb").write(fileitem.file.read())
self.redirect("/user")
else:
self.redirect("/ftypeerror")
else:
self.redirect("/user")
|
import asyncio
from functools import wraps
def shielded(func):
"""
Protects a coroutine from cancellation.
"""
@wraps(func)
async def wrapped(*args, **kwargs):
return await asyncio.shield(func(*args, **kwargs))
return wrapped
|
#!/usr/bin/env python
#%%
import basis_set_exchange as bse
import click
# %%
@click.command()
@click.argument("element")
def find_compatible_basissets(element):
found = {}
Z = bse.lut.element_Z_from_sym("N")
for basis in bse.get_all_basis_names():
try:
db = bse.get_basis(basis, element)
except:
continue
try:
db = db["elements"][str(Z)]["electron_shells"]
except:
continue
works = True
count = 0
for shell in db:
if shell["function_type"] != "gto":
works = False
for angmom, coeffs in zip(shell["angular_momentum"], shell["coefficients"]):
if angmom > 1:
works = False
if angmom == 0:
count += 1
if angmom == 1:
count += 3
if count * 2 < Z:
works = False
if works:
found[basis] = count
for k, v in sorted(found.items(), key=lambda x: x[1]):
print(k)
if __name__ == "__main__":
find_compatible_basissets()
|
# *Exercise 8*
# Write a function that takes an ARRAY and prints the item in the array in reversed order. This should be
# done recursively! (edited)
def item_in_reverse_order(array, size_of_array):
# me = "hi"
reversed_item = []
current_item = size_of_array
if size_of_array <= 0:
return
elif current_item > 0:
item = array[current_item]
print("index",item)
reversed_item.append(item)
item_in_reverse_order(array,size_of_array - 1)
print(reversed_item)
else:
return reversed_item
# total_deferent_sensor_value += get_sum_of_sensor_values(sensor_A, sensor_B, length - 1)
# return total_deferent_sensor_value
array = ["r","e","t","s","e","v"]
size_of_array = len(array) - 1
print(item_in_reverse_order(array,size_of_array)) |
#!/usr/bin/python3
import ROOT as rt
from ROOT import gPad, gROOT, gStyle, TFile
from ROOT import TGraphAsymmErrors, TF1
import sys
sys.path.append('../')
import plot_utils as ut
from models import load_starlight_y
#_____________________________________________________________________________
def main():
#bins in |y|
#ybins = rt.vector(rt.double)([0, 0.2, 0.5, 1])
#ybins = rt.vector(rt.double)([-1, 1])
#|y| interval
aymin = 0
aymax = 1
#aymax = 0.2
#aymin = 0.2
#aymax = 0.5
#aymin = 0.5
#aymax = 1
#number of gamma-gamma from mass fit
ngg = 181 # |y| < 1
#ngg = 74 # |y| < 0.2
#ngg = 62 # 0.2 < |y| < 0.5
#ngg = 27 # 0.5 < |y| < 1
#incoherent shape
inc1 = 923.2 # |y| < 1 80.6/2 = 40.3
inc2 = 3.304
#inc1 = 270.8 # |y| < 0.2 24.4/0.4 = 61
#inc2 = 3.77
#inc1 = 328.35 # 0.2 < |y| < 0.5 30.9/0.6 = 51.5
#inc2 = 2.92
#inc1 = 285.88 # 0.5 < |y| < 1 26.2/1 = 26.2
#inc2 = 3.43
#maximal |t|
tmax = 0.109
#mass interval
mmin = 2.75
mmax = 3.2
lumi = 13871.907 # lumi in inv. ub
#correction to luminosity for ana/triggered events
ratio_ana = 3420950./3694000
#scale the lumi for |z| around nominal bunch crossing
ratio_zdc_vtx = 0.502
Reta = 0.503 # pseudorapidity preselection
#Reta = 1.
trg_eff = 0.67 # bemc trigger efficiency
ratio_tof = 1.433 # tof correction to efficiency
bbceff = 0.97 # BBC veto inefficiency
zdc_acc = 0.49 # ZDC acceptance to XnXn 0.7
#zdc_acc = 1.
br = 0.05971 # dielectrons branching ratio
#Starlight
gSlight = load_starlight_y()
#data
basedir = "../../../star-upc-data/ana/muDst/muDst_run1/sel5"
infile = "ana_muDst_run1_all_sel5z.root"
#MC
basedir_sl = "../../../star-upc-data/ana/starsim/slight14e/sel5"
infile_sl = "ana_slight14e1x3_s6_sel5z.root"
#
basedir_bgen = "../../../star-upc-data/ana/starsim/bgen14a/sel5"
infile_bgen = "ana_bgen14a1_v0_sel5z_s6.root"
#
basedir_gg = "../../../star-upc-data/ana/starsim/slight14e/sel5"
infile_gg = "ana_slight14e2x1_sel5_nzvtx.root"
#open the inputs
inp = TFile.Open(basedir+"/"+infile)
tree = inp.Get("jRecTree")
#
inp_sl = TFile.Open(basedir_sl+"/"+infile_sl)
tree_sl_gen = inp_sl.Get("jGenTree")
#
inp_gg = TFile.Open(basedir_gg+"/"+infile_gg)
tree_gg = inp_gg.Get("jRecTree")
#
inp_bgen = TFile.Open(basedir_bgen+"/"+infile_bgen)
tree_bgen_gen = inp_bgen.Get("jGenTree")
#load the data
mcsel = "jGenPt*jGenPt<{0:.3f}".format(tmax)
mcsel += "&& TMath::Abs(jGenY)>{0:.3f} && TMath::Abs(jGenY)<{1:.3f}".format(aymin, aymax)
datasel = "jRecM>{0:.3f} && jRecM<{1:.3f} && jRecPt*jRecPt<{2:.3f}".format(mmin, mmax, tmax)
datasel += "&& TMath::Abs(jRecY)>{0:.3f} && TMath::Abs(jRecY)<{1:.3f}".format(aymin, aymax)
#hY = ut.prepare_TH1D_vec("hY", ybins)
hY = ut.prepare_TH1D_n("hY", 1, aymin, aymax)
#tree.Draw("jRecY >> hY" , datasel)
tree.Draw("TMath::Abs(jRecY) >> hY" , datasel)
#for ibin in range(1, hY.GetNbinsX()+1):
#print(ibin, hY.GetBinLowEdge(ibin), hY.GetBinLowEdge(ibin)+hY.GetBinWidth(ibin))
#subtract gamma-gamma and incoherent components
hY.Sumw2()
print("Data entries:", hY.Integral())
#gamma-gamma component
#h_gg = ut.prepare_TH1D_vec("h_gg", ybins)
h_gg = ut.prepare_TH1D_n("h_gg", 1, aymin, aymax)
#tree_gg.Draw("jRecY >> h_gg" , datasel)
tree_gg.Draw("TMath::Abs(jRecY) >> h_gg" , datasel)
ut.norm_to_num(h_gg, ngg)
print("Gamma-gamma component:", h_gg.Integral())
#subtract the gamma-gamma component
hY.Add(h_gg, -1)
print("Entries after gamma-gamma subtraction:", hY.Integral())
#incoherent functional shape
func_incoh_pt2 = TF1("func_incoh", "[0]*exp(-[1]*x)", 0., 10.)
func_incoh_pt2.SetParameters(inc1, inc2)
#load the incoherent shape to retrieve its normalization
inc_bins = ut.get_bins_vec_2pt(0.004, 0.01, 0, 0.109, 0.06)
hPtIncoh = ut.prepare_TH1D_vec("hPtIncoh", inc_bins)
ut.fill_h1_tf(hPtIncoh, func_incoh_pt2, rt.kRed)
#subtract the incoherent component
h_inc = hY.Clone()
ut.norm_to_num(h_inc, hPtIncoh.Integral())
print("Incoherent entries:", h_inc.Integral())
hY.Add(h_inc, -1)
print("Entries after all subtractions:", hY.GetBinContent(1), "+/-", hY.GetBinError(1))
#AxE for coherent signal
tree_mc = tree_sl_gen
#tree_mc = tree_bgen_gen
nall = tree_mc.Draw("", mcsel)
nsel = tree_mc.Draw("", "jAccept==1"+"&&"+datasel)
#selections to reproduce the deconv method:
# "(jGenPt*jGenPt<{0:.3f})".format(tmax)
# "jAccept==1"+"&&(jRecPt*jRecPt<{0:.3f})".format(tmax)
axe = nsel/nall
print("Numeric AxE:", axe)
#scale the luminosity
lumi_scaled = lumi*ratio_ana*ratio_zdc_vtx
#print("lumi_scaled:", lumi_scaled)
#denominator for the cross section in micro barn
den = Reta*br*zdc_acc*trg_eff*bbceff*ratio_tof*lumi_scaled
#calculate the cross section
sigma = hY.GetBinContent(1)/(axe*den*hY.GetBinWidth(1)*2)
sigma_err = hY.GetBinError(1)/(axe*den*hY.GetBinWidth(1)*2)
print("Sigma (micro barn):", sigma, "+/-", sigma_err)
return
#apply the denominator and bin width
#ut.norm_to_den_w(hY, den)
#print("Integrated sigma from data (mb):", hY.GetBinContent(1), hY.GetBinError(1)) # hY.Integral("width")
#plot the data (development)
can = ut.box_canvas()
hY.Draw()
gSlight.Draw("lsame")
#gSlight.Draw("al")
gPad.SetGrid()
ut.invert_col(rt.gPad)
can.SaveAs("01fig.pdf")
#_____________________________________________________________________________
if __name__ == "__main__":
gROOT.SetBatch()
gStyle.SetPadTickX(1)
gStyle.SetFrameLineWidth(2)
main()
|
import asyncio
from logging import getLogger
from math import ceil
from typing import Callable, Dict, Optional, Sequence, Set, Tuple, Union
from aiohttp import ClientSession, ClientTimeout
from lxml.html import document_fromstring
from holodule.errors import HTTPStatusError
from holodule.schedule import Schedule
CHUNK_SIZE = 50
YOUTUBE_API = "https://www.googleapis.com/youtube/v3/videos"
TARGET = [
"all",
"hololive",
"holostars",
"indonesia",
"english",
"holostars_english",
]
log = getLogger(__name__)
class Holodule:
def __init__(self, holodule_page: str, youtube_key: str, save_dir: str) -> None:
self.page_url = holodule_page
self.yt_key = youtube_key
self.save_dir = save_dir
self.session = None
self.videos = {}
async def run(self) -> int:
# ClientSession.__aenter__ does nothing
# but ClientSession.__aexit__ closes this sessoin, so we have to do that.
# https://github.com/aio-libs/aiohttp/blob/fe647a08d1acb53404b703b46b37409602ab18b4/aiohttp/client.py#L986
self.session = ClientSession(
timeout=ClientTimeout(total=30),
headers={"User-Agent": "sarisia/holodule-ics"},
)
status = 0
try:
await self.do_run()
except:
log.error("Failed: ", exc_info=True)
status = 1
finally:
await self.session.close()
return status
async def do_run(self) -> None:
pages_html = await self.get_pages(TARGET)
schedules: Dict[str, Schedule] = {}
for t, p in pages_html.items():
index = document_fromstring(p)
elem = index.xpath(f'//*[@id="all"]')
if elem:
log.info(f"Found target: {t}")
schedules[t] = Schedule(t, elem[0])
# currently 'all' has all video ids so fetch this
video_ids = schedules["all"].video_ids
await self.get_videos(video_ids)
for s in schedules.values():
s.assign_youtube(self.videos)
log.info(f"Dump {s.name}...")
try:
s.dump(self.save_dir)
except:
log.error(f"Failed to dump {s.name}: ", exc_info=True)
log.info("Done!")
async def get_page(self, target: str = "") -> Optional[Tuple[str, str]]:
log.info(f"({target}) getting page...")
try:
async with self.session.get(f"{self.page_url}/{target}") as resp:
text = await resp.text()
if resp.status != 200:
log.error(f"({target}) failed to get: {resp.status} {text}'")
return
return target, text
except:
log.error(f"unhandled: ", exc_info=True)
async def get_pages(self, targets: Sequence[str]) -> Dict[str, str]:
pages: Dict[str, str] = {} # target: content
tasks = [self.get_page(t) for t in targets]
res = [r for r in await asyncio.gather(*tasks) if r]
for r in res:
target, content = r
pages[target] = content
return pages
async def get_videos(self, video_ids: Set[str]) -> None:
# divide to chunks each contains 50 videos
videos = list(video_ids)
tasks = []
for i in range(ceil(len(videos) / CHUNK_SIZE)):
tasks.append(
self.do_get_videos(
videos[i * CHUNK_SIZE : min(len(videos), (i + 1) * CHUNK_SIZE)]
)
)
results = await asyncio.gather(*tasks)
for resp in results:
for item in resp["items"]:
self.videos[item["id"]] = item
async def do_get_videos(self, video_ids: Sequence[str]) -> dict:
async with self.session.get(
YOUTUBE_API,
params={
"key": self.yt_key,
"part": "id,snippet,liveStreamingDetails",
"id": ",".join(video_ids),
},
) as resp:
if resp.status != 200:
raise HTTPStatusError(resp.status)
return await resp.json()
|
__author__ = 'keleigong'
class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
letters = 'ZABCDEFGHIJKLMNOPQRSTUVWXYZ'
res = ''
while n > 0:
reminder = n % 26
res = letters[reminder] + res
# if reminder == 0:
# # res = 'A' + res
# break
n /= 26
return res
s = Solution()
num = 52
print s.convertToTitle(num) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.