index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
991,500 | eb8eeae4e4e15b6ec776099838ffde8c1859859d | """Helper functions."""
from typing import Dict, Callable, List, Any
import pandas as pd
import numpy as np
NON_METRIC_COLS = ["home_team", "away_team", "year", "round_number", "ml_model"]
def _replace_metric_col_names(team_type: str) -> Callable:
return lambda col: col if col in NON_METRIC_COLS else team_type + "_" + col
def _replace_team_col_names(team_type: str) -> Dict[str, str]:
oppo_team_type = "away" if team_type == "home" else "home"
return {"team": team_type + "_team", "oppo_team": oppo_team_type + "_team"}
def _home_away_data_frame(data_frame: pd.DataFrame, team_type: str) -> pd.DataFrame:
is_at_home = team_type == "home"
at_home_query = 1 if is_at_home else 0 # pylint: disable=W0612
return (
data_frame.query("at_home == @at_home_query")
.drop("at_home", axis=1)
.rename(columns=_replace_team_col_names(team_type))
.rename(columns=_replace_metric_col_names(team_type))
.reset_index(drop=True)
)
def pivot_team_matches_to_matches(team_match_df: pd.DataFrame) -> pd.DataFrame:
"""
Pivots data frame from team-match rows to match rows with home_ and away_ columns.
Due to how columns are renamed, currently only works for prediction data frames.
Params:
-------
team_match_df: Prediction data structured to have two rows per match
(one for each participating team) with team & oppo_team columns
Returns:
--------
Prediction data frame reshaped to have one row per match, with columns for home_team
and away_team.
"""
home_data_frame = _home_away_data_frame(team_match_df, "home")
away_data_frame = _home_away_data_frame(team_match_df, "away")
return home_data_frame.merge(
away_data_frame,
on=["home_team", "away_team", "year", "round_number", "ml_model"],
how="inner",
)
def convert_to_dict(data_frame: pd.DataFrame) -> List[Dict[str, Any]]:
"""
Convert DataFrame to list of record dicts with necessary dtype conversions.
Params:
-------
data_frame: Any old data frame you choose.
Returns:
--------
List of dicts.
"""
type_conversion = {"date": str} if "date" in data_frame.columns else {}
return data_frame.replace({np.nan: None}).astype(type_conversion).to_dict("records")
|
991,501 | 5cd9613e656a1dbea607a5e7f5aa10bdd4cc218d | """
* Assignment: Exception Raise Many
* Required: yes
* Complexity: easy
* Lines of code: 6 lines
* Time: 5 min
English:
1. Validate value passed to a `result` function
2. If `value` is:
a. other type than `int` or `float` raise `TypeError`
a. less than zero, raise `ValueError`
a. below `ADULT`, raise `PermissionError`
3. Non-functional requirements
a. Write solution inside `result` function
b. Mind the indentation level
4. Run doctests - all must succeed
Polish:
1. Sprawdź poprawność wartości przekazanej do funckji `result`
2. Jeżeli `age` jest:
b. innego typu niż `int` lub `float`, podnieś wyjątek `TypeError`
b. mniejsze niż zero, podnieś wyjątek `ValueError`
c. mniejsze niż `ADULT`, podnieś wyjątek `PermissionError`
3. Wymagania niefunkcjonalne
a. Rozwiązanie zapisz wewnątrz funkcji `result`
b. Zwróć uwagę na poziom wcięć
4. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> result(18)
>>> result(17.9999)
Traceback (most recent call last):
PermissionError
>>> result(-1)
Traceback (most recent call last):
ValueError
>>> result('one')
Traceback (most recent call last):
TypeError
>>> result(True)
Traceback (most recent call last):
TypeError
"""
ADULT = 18
age = "abc"
def result(age):
if type(age) not in (int, float):
raise TypeError()
elif age < 0:
raise ValueError()
elif age < 18:
raise PermissionError() |
991,502 | 5d70afaf4bbb96853f04462d57ecdff35d6891e1 | #! /usr/bin/env python
def swap(l,i):
tmp = l[l[i]-1]
l[l[i]-1] = l[i]
l[i]=tmp
def f(l):
if not l:
return 1
length = len(l)
for i in range(len(l)):
while l[i] > 0 and l[i]<=length:
if l[i] != i+1:
swap(l, i)
else:
break
for i in range(len(l)):
if l[i] <= 0 or l[i]>length:
return i+1
print f([3,4,-1,1])
|
991,503 | 4ba37a0336216414c10c81b2887dcfbbf5448997 | # -*- coding: utf-8 -*-
from typing import List
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
result = []
text_list = text.split(' ')
for i in range(0, len(text_list)-2):
if text_list[i] == first and text_list[i+1] == second:
result.append(text_list[i+2])
return result
if __name__ == '__main__':
text = 'alice is a good girl she is a good student'
first = 'a'
second = 'good'
print(Solution().findOcurrences(text=text, first=first, second=second)) |
991,504 | 3dbf9417099b20fae0331a41dbb3aeab6daabafe | #I'm following this tutorial by Adrian Rosenbrock
#https://www.pyimagesearch.com/2018/02/26/face-detection-with-opencv-and-deep-learning/?__s=k1tfi5xcxncrpsppkeop
#We are going to perform a fast, accurate face detection with open CV using a
#pre trained deep learning face detector model shipped with the library.
#In August 2017 OpenCV was officially released, including a number of DL
#frameworks like Caffe, TensorFlow and PyTorch
#The Caffe based face detector can be found in the face_detector sub-directory
#of the dnn samples (https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector)
#When using OpenCVs deep neural network module with Caffe models, you'll need
#two sets of files
#1. .prototxt file(s), which define the model architecture (i.e. the
#hidden layers themselves)
#2. .caffemodel file which contains the weights for the actual layers
#The weigt files are not included in the openCV samples direcotry and it
#requires a bit more digging to find them
#Thanks to the hard work of Aleksandr Rybnikov and the other
#contributors to OpenCV’s dnn module, we can enjoy these more accurate OpenCV face detectors in our own applications.
#In this first example we'll learn how to apply face detection with openCV to
#single input images
# USAGE
# cd '/Users/adrian/desktop/CompVision'
# python detect_faces.py --image rooster.jpg --prototxt deploy.prototxt.txt --model res10_300x300_ssd_iter_140000.caffemodel
#python detect_faces.py --image Webp.net-resizeimage.jpg --prototxt deploy.prototxt.txt --model res10_300x300_ssd_iter_140000.caffemodel
import numpy as np
import argparse
import cv2
#construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
#We have three required arguments
#--image: The path to the input image
ap.add_argument("-i", "--image", required=True, help ="path to input image")
#--prototxt: The path to the Caffe prototxt file
ap.add_argument("-p", "--prototxt", required=True, help ="Path to Caffe 'deploy' prototxt file")
#--model: The path to the pretrained Caffe Model
ap.add_argument("-m", "--model", required=True, help="path to Caffe pre-trained model")
#an optional argument, --confidence, can overwrite default treshold of 05. if you wish
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum propability to filter weak decisions")
args=vars(ap.parse_args())
#From now let's load our model and create a blob from our image:
#load our serialized model from disk
print("[Info] loading model...")
#First, we load our model using our --prototxt and --model file paths
#we store the model as 'net'
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
#We then load the input image, extract the dimensions (Line 54),and
#construct an input blop for the image by resizing to
#a fixed 300x300 pixels and then nomalizing it (Line 59)
image=cv2.imread(args["image"])
(h,w) = image.shape[:2]
#The dnn.blopFromImage takes care of pre-preocessing which includes setting the
#blop domensions and nomalization.
#More about the blopFromImage-Function:
#https://www.pyimagesearch.com/2017/11/06/deep-learning-opencvs-blobfromimage-works/
blop= cv2.dnn.blobFromImage(cv2.resize(image,(300,300)),1.0, (300,300),(104.0, 177.0,123.0))
#Now, we'll apply face detection:
#pass the blop through the network and obtain the detecions and predictions
#To detect faces, we pass the blop through the net
print("[INFO] computing object detecions...")
net.setInput(blop)
detections=net.forward()
#And from there we'll loop over the detections and draw boxes around the detected
#faces
#Loop over the detections
for i in range (0, detections.shape[2]):
#extract the confidence (i.e., propability) associated with the prediction
confidence = detections[0,0,i,2]
#filter out weak detections by ensuring the 'confidence' is greater than
#the minimum confidence
#We perform this check to filter out weak detections
if confidence > args["confidence"]:
#If the confidence meets the minumum treshild, we proceed to draw
#a recatngle and along with the propability of the detection. To
#Accomplish this, we first compute the (x-y)coordinates of the bounding
#box for the object
box = detections[0,0,i,3:7]*np.array([w,h,w,h])
(startX, startY, endX,endY) = box.astype("int")
#We now build the confidence text string which contains the propbility
#of the detecion
#draw the bounding box of the face along with the associated propability
text = "{:.2f}%".format(confidence*100)
#In case our text would go off-image borders, we shift it down by 10 pixels
y= startY -10 if startY-10 > 10 else startY +10
#Our face rectangle and confidence text is drawn on the image
cv2.rectangle(image,(startX, startY), (endX, endY), (0,0,255),2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
#From here we loop back for additional detections following the process again.
#if no detections remain, we're ready to show our output image in the screen
#Show the output image
cv2.imshow("Output", image)
cv2.waitKey(0)
|
991,505 | 822e6df5190010fedccedccd5f8439181f24b8f7 | from flask import Flask
import os
import mysql.connector
app = Flask(__name__)
@app.route('/')
def hello_world():
cnx = mysql.connector.connect(user='root', password='abcdefg', host='mysql-dev') #been defined in docker-compose.yml
cursor = cnx.cursor()
databases = ("show databases")
cursor.execute(databases)
msg = []
for (databases) in cursor:
msg.append(databases[0])
cnx.close()
return ('Hello, mysql connected, and has databases: ' + ','.join(msg) )
|
991,506 | ecd3cd38cffd80cc93210a421126ae9b9a7c583d | soma = 0
cont = 0
for c in range(1, 500, 2):
if c % 3 == 0:
soma += c
cont += 1
print(f'Foram encontrados {cont} números ímpares e divisíveis por 3 entre 1 e 500 e seu somatório é {soma}')
|
991,507 | e0753fee08e51580ead3bd619d0ef8c7d131e4a7 | import numpy as np
from ase import units
def cosine_squared_window(n_points):
points = np.arange(n_points)
window = np.cos(np.pi * points / (n_points - 1) / 2) ** 2
return window
def _single_fft_autocorrelation(data, normalize=False):
if normalize:
data = (data - np.mean(data)) / np.std(data)
n_points = data.shape[0]
fft_forward = np.fft.fft(data, n=2 * n_points)
fft_autocorr = fft_forward * np.conjugate(fft_forward)
fft_backward = np.fft.ifft(fft_autocorr)[:n_points] / n_points
return np.real(fft_backward)
def fft_autocorrelation(data):
orig_shape = data.shape
reshaped_data = data.reshape((orig_shape[0], -1))
autocorrelations = np.zeros((reshaped_data.shape[1], data.shape[0]))
for i in range(reshaped_data.shape[1]):
autocorrelations[i, :] = _single_fft_autocorrelation(reshaped_data[:, i])
return autocorrelations.reshape((*orig_shape[1:], -1))
def velocity_autocorrelation(velocities):
autocorrelation = fft_autocorrelation(velocities)
autocorrelation = np.sum(autocorrelation, axis=1)
autocorrelation = np.mean(autocorrelation, axis=0)
return autocorrelation
def compute_spectra(data, timestep, resolution=None, frequency_units='THz'):
if resolution:
data = data[: resolution]
orig_shape = data.shape[0]
data *= cosine_squared_window(orig_shape)
data_padded = np.zeros(4 * orig_shape)
data_padded[:orig_shape] = data
data_mirrored = np.hstack((np.flipud(data_padded), data_padded))
n_fourier = 8 * orig_shape
intensities = np.abs(timestep * np.fft.fft(data_mirrored, n=n_fourier)[: n_fourier // 2])
frequencies = np.arange(n_fourier // 2) / (n_fourier * timestep)
if frequency_units:
if frequency_units.lower() == 'thz':
frequencies *= 1e3 * units.fs
else:
raise ValueError()
return frequencies, intensities
def pdos_spectrum(velocities, timestep, resolution=None):
data = velocity_autocorrelation(velocities)
frequencies, intensities = compute_spectra(data, timestep=timestep, resolution=resolution)
return frequencies, intensities
|
991,508 | c2e0728562a55a3d09ed748f6039b39ef081ca70 | import itertools
import math
import os
import loompy
import h5py
import copy
import umap
import numpy as np
import pandas as pd
from collections import Counter
import seaborn as sns; sns.set(style="white", color_codes=True)
import matplotlib
import matplotlib.colors as mcol
from scipy.stats import spearmanr
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from IPython.display import display, HTML
from matplotlib.ticker import NullFormatter
from scipy.stats import binned_statistic
from IPython.display import display, HTML
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
import scipy.cluster.hierarchy as sch
plt.rcParams['axes.unicode_minus'] = False
class Cluster_cells(object):
""" this class contains methods to evaluate genotyped amplicons"""
def __init__(self, allel_traits):
self.barcodes = []
self.data_tensor = []
self.clusters = []
self.linkage = []
self.cells = 0
self.genotypes = 0
self.allel_traits = allel_traits
self.m_cell_idt = []
@classmethod
def read_hd5(cls, genotype_file, allel_traits=(0, 1, 2, 3), merged=False):
"""r"""
run = cls(allel_traits)
run._select_allel_variants(genotype_file.T, allel_traits, merged)
return run
def _select_allel_variants(self, genotype, allel_traits, merged):
"""reads the hd5 genotyping outpu file and converts the data in a binary tensor
of shape m * n * k, with m genetic variants, n cells and k the considered allelic traits
(0: homozygote wt, 1: heterozygote, 2: homozygote alternate, 3: unknown/not applicable)"""
self.hd5_data = genotype
dat = np.array(genotype)
self.genotypes, self.cells = dat.shape
try:
self.data_tensor = np.array([dat == i for i in allel_traits]).astype('int')
except TypeError:
self.data_tensor = np.array([dat == allel_traits]).astype('int')
if merged:
self.data_tensor = self.data_tensor.sum(axis=0).reshape(1,self.data_tensor.shape[1],self.data_tensor.shape[2])
return
def cell_identity(self, sparsity_thresh=0.05, dense_dot=False):
"""returns for each allele trait a n * n cell identity matrix"""
for i, m in enumerate(self.data_tensor):
counts = np.sum(m)
if counts / (self.cells * self.genotypes) < sparsity_thresh:
A = csr_matrix(m)
rec = A.T.dot(A).toarray()
else:
if dense_dot:
print(('matrix {} is not sparse, try dense dot product').format(i))
rec = np.dot(m.T, m)
else:
chunks = 300
cell_arr = np.arange(self.cells)
cell_chunk = [cell_arr[i:i + chunks] for i in range(0, self.cells, chunks)]
rec = np.zeros([self.cells, self.cells])
for i, k in enumerate(cell_chunk):
for j, l in enumerate(cell_chunk):
dat1 = np.dot(m[:, k].T, m[:, l])
c1 = len(k)
c2 = len(l)
rec[i * chunks:i * chunks + c1, j * chunks:j * chunks + c2] = dat1
self.m_cell_idt.append(rec)
self.m_cell_idt = np.array(self.m_cell_idt)
return
def cos_similarity(self):
dat = self.data_tensor.sum(axis=0)
norm_dat = np.linalg.norm(dat,axis=0)
self.cos_sim = np.dot(dat.T, dat) / np.dot(norm_dat.reshape(-1,1), norm_dat.reshape(1,-1))
def angular_similarity(self):
try:
arccos = np.arccos(self.cos_sim)
except AttributeError:
self.cos_similarity()
arccos = np.arccos(self.cos_sim)
arccos[np.isnan(arccos)] = 0
self.ang_sim = 1 - 2*arccos/np.pi
def jaccard_similarity(self):
dat = self.data_tensor.sum(axis=0)
sq_norm_dat = np.linalg.norm(dat,axis=0)**2
self.jaccard_sim = np.dot(dat.T, dat)/(-np.dot(dat.T, dat)+sq_norm_dat.reshape(-1,1)+sq_norm_dat)
def make_cluster(self, method, data=None, cmap=plt.cm.YlGnBu):
"""rr"""
try:
if data == None: pass
dat = self.m_cell_idt.sum(axis=0)
except ValueError:
dat = data
self.linkage = sch.linkage(dat, method=method)
# Compute and plot first dendrogram.
fig = plt.figure(figsize=(16,16))
ax1 = fig.add_axes([0.09,0.1,0.2,0.6])
Z1 = sch.dendrogram(self.linkage, orientation='left')
ax1.set_xticks([])
ax1.set_yticks([])
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Z2 = sch.dendrogram(self.linkage)
ax2.set_xticks([])
ax2.set_yticks([])
# Plot distance matrix.
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
dat = dat[idx1,:]
dat = dat[:,idx2]
im = axmatrix.matshow(dat, aspect='auto', origin='lower', cmap=cmap)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
plt.colorbar(im, cax=axcolor)
plt.savefig('clusters.svg', dpi=600)
plt.savefig('clusters.png', dpi=600)
#fig.show()
self.cell_sort_idx = idx1
return
def retrieve_cluster(self, number):
"""rr"""
self.clusters = sch.fcluster(self.linkage, number, criterion='maxclust')
return
def load_genotypes(genotypes_path):
# load genotyping data from hdf5 compressed file
with h5py.File(genotypes_path, 'r') as f:
# import hdf5 layers into arrays
cell_barcodes = copy.deepcopy([c.decode('utf8').split('.')[0] for c in f['CELL_BARCODES']])
variants = copy.deepcopy([v.decode('utf8') for v in f['VARIANTS']])
genotypes = pd.DataFrame(np.transpose(f['GT']), index=cell_barcodes, columns=variants).sort_index()
genotypes.index.name = 'cell_barcode'
quality = pd.DataFrame(np.transpose(f['GQ']), index=cell_barcodes, columns=variants).sort_index()
quality.index.name = 'cell_barcode'
total_depth = pd.DataFrame(np.transpose(f['DP']), index=cell_barcodes, columns=variants).sort_index()
total_depth.index.name = 'cell_barcode'
alt_depth = pd.DataFrame(np.transpose(f['AD']), index=cell_barcodes, columns=variants).sort_index()
alt_depth.index.name = 'cell_barcode'
# calculate vaf - nan for division by 0
#vaf = np.divide(alt_depth, total_depth)
return genotypes, quality, total_depth, alt_depth#, vaf
def filter_variants(genotypes, alt_depth, total_depth, quality, min_alt_depth, min_total_depth, min_quality):
# filters variants from genotyping data based on simple metrics
genotypes[total_depth < min_total_depth] = 3
genotypes[((genotypes == 1) | (genotypes == 2)) & (alt_depth < min_alt_depth)] = 3
genotypes[quality < min_quality] = 3
genotypes[genotypes.isnull()] = 3
return genotypes
def load_variants(variants_file_path):
# load variant annotations tsv file
variant_info = pd.read_csv(variants_file_path, sep='\t', header=0, index_col=0, low_memory=False)
variant_info.index.name = 'variant'
return variant_info
|
991,509 | fb0bbede69a8fb5c63784616116a5bd7a73b494b | #!/usr/bin/env python
# sys module
import time
import os
import sys
# third parties module
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
from torch.utils.data import SubsetRandomSampler
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
# local module
path_this = os.path.abspath (os.path.dirname (__file__))
sys.path.append (os.path.join (path_this, '..'))
from utils import progress_bar
from model import CNN
transformer = transforms.Compose ([
transforms.Resize ((224, 224)),
# to tensor
transforms.ToTensor (),
# Normalize args is (Mean ..), (std ..) for each channel
transforms.Normalize ([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
# prepare data
dataset = torchvision.datasets.ImageFolder (root='./data',
transform=transformer)
# split into validation and train
# first, specify indices
indices = range (len (dataset)) # all indexes of dataset
validation_size = int (0.2 * len (dataset))
validation_idx = np.random.choice (indices, size=validation_size, replace=False)
train_idx = list (set (indices) - set (validation_idx))
# create sampler
train_sampler = torch.utils.data.SubsetRandomSampler (train_idx)
validation_sampler = torch.utils.data.SubsetRandomSampler (validation_idx)
# create loader
train_loader = torch.utils.data.DataLoader (dataset, batch_size=20,
num_workers=2, sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader (dataset, batch_size=20,
num_workers=2, sampler=validation_sampler)
# for GPU optimization
device = torch.device ("cuda:0" if torch.cuda.is_available () else "cpu")
print ("Using ", device)
# create network
# net = CNN ()
# """
# use resnet34 pretrained with freeze model
net = torchvision.models.resnet34 (pretrained=True)
# freeze the network
for param in net.parameters () :
param.requires_grad = True
net.fc = nn.Linear (512, 80)
# """
net.to (device)
# optimization
criterion = nn.CrossEntropyLoss ()
optimizer = optim.Adam (net.fc.parameters (), lr=0.001)
# training network
print ("Training..")
for epoch in range (10) :
running_loss = 0
tot_time = 0
for i, data in enumerate (train_loader) :
t_start = time.time ()
progress_bar (i+1, len (train_loader))
inputs, labels = data
# send to device
inputs, labels = inputs.to (device), labels.to (device)
# feed forward
optimizer.zero_grad ()
outputs = net (inputs)
loss = criterion (outputs, labels)
loss.backward ()
optimizer.step ()
tot_time += time.time () - t_start
progress_bar (None)
# validating
with torch.no_grad () :
correct = 0
total = 0
for i, data in enumerate (validation_loader) :
progress_bar (i, len (validation_loader))
inputs, labels = data
inputs, labels = inputs.to (device), labels.to (device)
predicted = net (inputs)
_, predicted = torch.max (predicted.data, 1)
correct += (predicted == labels).sum ().item ()
total += labels.size (0)
progress_bar (None)
print ("epoch {},loss {:.2f}, acc {:.2F},{:.2F}/it".format (
epoch + 1,
loss.item (),
correct/total,
tot_time / len (train_loader)
))
print ('Finished Training')
# """
|
991,510 | 0f7fd390ce2b2d3226d4580950037ffff8e84733 | import time
from sqlalchemy.orm import aliased
from humaniki_backend.utils import is_property_exclusively_citizenship, transform_ordered_aggregations_with_year_fns, \
transform_ordered_aggregations_with_proj_internal_codes, get_transform_ordered_aggregation_qid_match
from humaniki_schema import utils
from humaniki_schema.queries import get_aggregations_obj
from humaniki_schema.schema import metric, metric_aggregations_j, metric_properties_j, label, label_misc, \
metric_aggregations_n, fill, metric_coverage
from sqlalchemy import func, and_, desc
import pandas as pd
from humaniki_schema.utils import Properties, get_enum_from_str
from humaniki_schema.log import get_logger
log = get_logger(BASE_DIR=__file__)
def get_aggregations_id_preds(session, ordered_aggregations, non_orderable_params, as_subquery=True):
'''transform the ordered_aggreation dict of {prop:value} to {prop: sqlalchemy predicate} for use later on
as subquery. if the value is specified as 'all' then leave untouched.'''
# note all pred_fns take and transform in place an ordered_aggregations_dict,
# they also take session (although I would eventually like to remove that, so the whole lookup can happen in one
# transaction).
prop_pred_fn_map = {Properties.PROJECT: transform_ordered_aggregations_with_proj_internal_codes,
Properties.DATE_OF_BIRTH: transform_ordered_aggregations_with_year_fns,
Properties.DATE_OF_DEATH: transform_ordered_aggregations_with_year_fns,
Properties.CITIZENSHIP: get_transform_ordered_aggregation_qid_match(Properties.CITIZENSHIP),
Properties.OCCUPATION: get_transform_ordered_aggregation_qid_match(Properties.OCCUPATION)}
ordered_aggregations_preds = ordered_aggregations # we will be overwriting this
for prop_id, val in ordered_aggregations.items():
if val.lower() == 'all':
continue
else:
pred_fn = prop_pred_fn_map[Properties(prop_id)]
ordered_aggregations_preds = pred_fn(ordered_aggregations_preds, session)
if as_subquery:
# optimized
return ordered_aggregations_preds
else:
raise AssertionError('please only as subquery for now on.')
def build_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang):
"""
the entry point for building metrics, first querys the database for the metrics in question
secondly, builds the nested-dict response.
:param non_orderable_params:
:param session:
:param fill_id:
:param population_id:
:param properties_id:
:param aggregations_id:
:param label_lang:
:return:
"""
# query the metrics table
build_metrics_start_time = time.time()
metrics, metrics_columns = get_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang)
build_metrics_query_end_time = time.time()
# make a nested dictionary represented the metrics
metrics_response, represented_biases = build_gap_response(properties_id, metrics, metrics_columns, label_lang,
session)
build_metrics_grouping_end_time = time.time()
# timing
query_metrics_seconds_taken = build_metrics_query_end_time - build_metrics_start_time
group_metrics_seconds_taken = build_metrics_grouping_end_time - build_metrics_query_end_time
log.debug(f"Querying metrics repsponse took {'%.3f' % query_metrics_seconds_taken} seconds")
log.debug(f"Grouping metrics repsponse took {'%.3f' % group_metrics_seconds_taken} seconds")
return metrics_response, represented_biases
def generate_json_expansion_values(properties):
property_query_cols = []
for prop_i, prop in enumerate(properties):
prop_col = func.json_extract(metric_properties_j.properties, f"$[{prop_i}]").label(f"prop_{prop_i}")
agg_col = func.json_unquote(func.json_extract(metric_aggregations_j.aggregations, f"$[{prop_i}]")).label(
f"agg_{prop_i}")
property_query_cols.append(prop_col)
property_query_cols.append(agg_col)
return property_query_cols
def generate_aliased_tables_for_labelling(properties):
"""
generate a list of dicts defining how to join an aggregation column
the details needed are the join table and the join_key like
[{table:label_misc as 'label_0', join_key:'src'}
...
{{table:label as 'label_n', join_key:'qid'}}]
note that the table the join table and key are dependent on the property
:param properties:
:return:
"""
aliased_joins = []
for prop_i, prop in enumerate(properties):
if prop == 0: # recall we are faking sitelinks as property 0
label_table = label_misc
join_key = 'src'
elif prop in [Properties.DATE_OF_BIRTH.value, Properties.DATE_OF_DEATH.value]:
label_table = None # there is no join to be made
join_key = None # there is no join to be made
else:
label_table = label
join_key = 'qid'
aliased_label = aliased(label_table, name=f"label_{prop_i}") if label_table else None
join_data = {'label_table': aliased_label, 'join_key': join_key}
aliased_joins.append(join_data)
return aliased_joins
def label_metric_query(session, metrics_subq, properties, label_lang):
"""
So we have the metrics table, exploded into one aggregation per column, but need to join the labels
we create an alias of the label table per aggregation, and then join
note that sitelinks must be joined on label_misc.src and
qids must be joined on label.qid
:return: a sqlalchemy query
"""
# i wish i could compute the alias joins inline in this function rather than upfront, but
# I believe I need the column names before I can start joining.
aliased_joins = generate_aliased_tables_for_labelling(properties)
aliased_label_cols = []
dimension_label_params = []
for i, aj in enumerate(aliased_joins):
if aj['label_table']:
# the left key from the unlabelled metric
metrics_subq_join_col = getattr(metrics_subq.c, f'agg_{i}')
# define the right key
label_join_table = aj['label_table']
label_join_key = aj['join_key']
label_subtable_cols = [getattr(label_join_table, label_join_key),
getattr(label_join_table, 'lang'),
getattr(label_join_table, 'label')]
label_join_table_lang_filtered = session.query(*label_subtable_cols) \
.filter(label_join_table.lang == label_lang) \
.subquery(f'label_sub_{i}')
# was
# label_col = aj['label_table'].label.label(f'label_agg_{i}')
label_col = label_join_table_lang_filtered.c.label.label(f'label_agg_{i}')
label_join_column = getattr(label_join_table_lang_filtered.c, label_join_key)
dimension_label_tuple = (label_join_table_lang_filtered, label_join_column, metrics_subq_join_col)
dimension_label_params.append(dimension_label_tuple)
else: # we probably aren't joining, like for labelling years
label_col = getattr(metrics_subq.c, f'agg_{i}').label(f'label_agg_{i}')
aliased_label_cols.append(label_col)
# first there will always be the bias_value to label
bias_sublabel_table = session.query(label_misc).filter(label_misc.lang == label_lang,
label_misc.type == 'bias').subquery('label_sub')
label_query_cols = [metrics_subq, bias_sublabel_table.c.label.label('bias_label'), *aliased_label_cols]
labelled_q = session.query(*label_query_cols) \
.outerjoin(bias_sublabel_table,
bias_sublabel_table.c.src == metrics_subq.c.bias_value)
for (label_join_table_lang_filtered, label_join_column, metrics_subq_join_col) in dimension_label_params:
labelled_q = labelled_q \
.outerjoin(label_join_table_lang_filtered, label_join_column == metrics_subq_join_col)
return labelled_q
def get_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang):
"""
get the metrics based on population and properties, and optionally the aggregations
Expands the metrics row from json aggregations.aggregations list
--> from
fill_id | population | [prop_0,..prop_n] | [agg_val_0, .., agg_val_n] | gender | total
---> to
fill_id | population | prop_0 |...| prop_n |agg_val_0 | ... |agg_val_n] | gender | total
This can be done by writing a dynamics query, based on the fact that we know how many properties
are being queried by the api user. For instance.
The reason this transform is necessary here is to facilitate labelling the aggregation values
using sql joins. That is also tricky because some aggegration values are site-links, and some are
qids.
This jiujitsu may be deprecated if we store the aggregations noramlized rather than as json list.
The problem I was having there was the hetergenous types of the aggregations (sitelinks, str) (qids, int)
:param session:
:param fill_id:
:param population_id:
:param properties_id:
:param aggregations_id: a specificed aggregations id, or None
:param label_lang: if not None then label
:return:
"""
prop_id = properties_id.id
properties = properties_id.properties
property_query_cols = generate_json_expansion_values(properties)
query_cols = [*property_query_cols, metric.bias_value, metric.total]
metrics_q = session.query(*query_cols) \
.join(metric_properties_j, metric.properties_id == metric_properties_j.id) \
.join(metric_aggregations_j, metric.aggregations_id == metric_aggregations_j.id) \
.filter(metric.properties_id == prop_id) \
.filter(metric.fill_id == fill_id) \
.filter(metric.population_id == population_id) \
.order_by(metric.aggregations_id)
if isinstance(aggregations_id, int):
metrics_q = metrics_q.filter(metric.aggregations_id == aggregations_id)
if isinstance(aggregations_id, list):
metrics_q = metrics_q.filter(metric.aggregations_id.in_(aggregations_id))
if isinstance(aggregations_id, dict):
for prop_pos, (prop_id, val) in enumerate(aggregations_id.items()):
if val == 'all':
continue
else:
prop_pos_after_bias = prop_pos + 1
a_man = aliased(metric_aggregations_n)
val_predicate = val(a_man.value) if callable(val) else a_man.value == val
metrics_q = metrics_q.join(a_man, and_(metric.aggregations_id == a_man.id,
a_man.aggregation_order == prop_pos_after_bias,
a_man.property == prop_id,
val_predicate))
# if a label_lang is defined we need to make a subquery
if label_lang is not None:
metrics_subq = metrics_q.subquery('metrics_driver')
metrics_q = label_metric_query(session, metrics_subq, properties, label_lang)
log.debug(f'metrics_q is:'
f' {metrics_q.statement.compile(compile_kwargs={"literal_binds": True})}')
metrics = metrics_q.all()
metrics_columns = metrics_q.column_descriptions
log.debug(f'Number of metrics to return are {len(metrics)}')
return metrics, metrics_columns
def build_gap_response(properties_id, metrics_res, columns, label_lang, session):
"""
transforms a metrics response into a json-able serialization
see https://docs.google.com/document/d/1tdm1Xixy-eUvZkCc02kqQre-VTxzUebsComFYefS5co/edit#heading=h.a8xg7ij7tuqm
:param label_lang:
:param metrics:
:return: response dict
"""
prop_names = [utils.Properties(p).name.lower() for p in properties_id.properties]
is_citizenship = is_property_exclusively_citizenship(properties_id)
iso_codes = get_iso_codes_as_lookup_table(session) if is_citizenship else None
col_names = [col['name'] for col in columns]
aggr_cols = [col['name'] for col in columns if col['name'].startswith('agg')]
label_cols = [col['name'] for col in columns if col['name'].startswith('label')]
# use pandas to group by all dimensions except gender
metric_df = pd.DataFrame.from_records(metrics_res, columns=col_names)
metric_df.to_dict()
pandas_groupby_start = time.time()
agg_groups = metric_df.groupby(by=aggr_cols) if aggr_cols else [('global', metric_df)]
pandas_groupby_end = time.time()
# accumulator pattern
data_points = []
for group_i, (group_name, group) in enumerate(agg_groups):
group_name_as_list = group_name if isinstance(group_name, tuple) else [group_name]
item_d = dict(zip(prop_names, group_name_as_list))
values = dict(group[['bias_value', 'total']].to_dict('split')['data'])
labels_prop_order = group[label_cols].iloc[0].values if label_cols else []
item_labels = dict(zip(prop_names, labels_prop_order))
if is_citizenship:
try:
item_labels['iso_3166'] = iso_codes[group_name]
except KeyError as ke:
# log.debug(f'iso code exception: {ke}')
pass
data_point = {'order': group_i,
'item': item_d,
'item_label': item_labels,
"values": values}
# including this just once in the meta portion for now.
# if label_lang:
# labels = dict(group[['bias_value', 'bias_label']].to_dict('split')['data'])
# data_point['labels'] = labels
data_points.append(data_point)
pandas_group_iteration_end = time.time()
log.debug(f'pandas groupby took {pandas_groupby_end - pandas_groupby_start} seconds')
log.debug(f'pandas group iteration took {pandas_group_iteration_end - pandas_groupby_end} seconds')
represented_biases = make_represented_genders(metric_df, label_lang) if label_lang else None
return data_points, represented_biases
def make_represented_genders(metric_df, label_lang):
"""
return a dict of the represented genders and (maybe their label) to make life easy for the front end.
:return:
"""
return dict(metric_df[['bias_value', 'bias_label']].drop_duplicates().to_dict('split')['data'])
def get_iso_codes_as_lookup_table(session, iso_subtype='iso_3166_1'):
"""
:return: a dict mapping qids to iso_3166_1
"""
iso_codes = session.query(label_misc.src, label_misc.label).filter(label_misc.lang == iso_subtype).all()
iso_codes_df = pd.DataFrame.from_records(iso_codes)
iso_codes_dict = iso_codes_df.to_dict('split')['data']
return dict(iso_codes_dict)
def get_metrics_count(session):
metrics_count = session.query(func.count(metric.fill_id)).scalar()
return metrics_count
def get_all_snapshot_dates(session):
snapshots = session.query(fill).filter(fill.detail['active'] == True).order_by(desc(fill.date)).all()
snapshot_dicts = [snapshot.to_dict() for snapshot in snapshots]
return snapshot_dicts
def get_coverage(session, population_id, properties_id, fill_id):
## TODO, use sqlalchemy parameters not f-strings to sanitize inputs
metric_coverage_sql = f""" select n.total_with_properties / d.total_with_properties as coverage
from
(select
total_with_properties,
'n' as nd,
'k' as k
from metric_coverage
where fill_id={fill_id} and properties_id={properties_id} and population_id={population_id}) n
join
(select
total_with_properties,
'd' as nd,
'k' as k
from metric_coverage
where fill_id={fill_id}
-- i really hope the fact holds that this is the numerator we want.
and properties_id=(select id
from metric_properties_j
where properties_len=0)
and population_id={population_id}) d
on n.k = d.k
"""
coverage_decimal = session.execute(metric_coverage_sql).scalar()
try:
coverage = float(coverage_decimal)
except TypeError:
coverage = None
return coverage
|
991,511 | 3a15d3d302c590a8ca8ffc759eaceff0ed1aa583 | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from typing import cast
import pytest
from elasticapm import get_client
from elasticapm.conf.constants import SPAN, TRANSACTION
from elasticapm.instrumentation.packages.psycopg import PGCursorProxy
from elasticapm.utils import default_ports
from tests.fixtures import TempStoreClient
psycopg = pytest.importorskip("psycopg")
pytestmark = pytest.mark.psycopg
has_postgres_configured = "POSTGRES_DB" in os.environ
def connect_kwargs():
return {
"dbname": os.environ.get("POSTGRES_DB", "elasticapm_test"),
"user": os.environ.get("POSTGRES_USER", "postgres"),
"password": os.environ.get("POSTGRES_PASSWORD", "postgres"),
"host": os.environ.get("POSTGRES_HOST", None),
"port": os.environ.get("POSTGRES_PORT", None),
}
@pytest.fixture(scope="function")
def postgres_connection(request):
conn = psycopg.connect(**connect_kwargs())
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE test(id int, name VARCHAR(5) NOT NULL);"
"INSERT INTO test VALUES (1, 'one'), (2, 'two'), (3, 'three');"
)
yield conn
# cleanup
cursor.execute("ROLLBACK")
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_destination(instrument, postgres_connection, elasticapm_client):
elasticapm_client.begin_transaction("test")
cursor = postgres_connection.cursor()
cursor.execute("SELECT 1")
elasticapm_client.end_transaction("test")
transaction = elasticapm_client.events[TRANSACTION][0]
span = elasticapm_client.spans_for_transaction(transaction)[0]
assert span["context"]["destination"] == {
"address": os.environ.get("POSTGRES_HOST", None),
"port": default_ports["postgresql"],
"service": {"name": "", "resource": "postgresql/elasticapm_test", "type": ""},
}
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_psycopg_tracing_outside_of_elasticapm_transaction(instrument, postgres_connection, elasticapm_client):
cursor = postgres_connection.cursor()
# check that the cursor is a proxy, even though we're not in an elasticapm
# transaction
assert isinstance(cursor, PGCursorProxy)
cursor.execute("SELECT 1")
transactions = elasticapm_client.events[TRANSACTION]
assert not transactions
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_psycopg_select_LIKE(instrument, postgres_connection, elasticapm_client):
"""
Check that we pass queries with %-notation but without parameters
properly to the dbapi backend
"""
cursor = postgres_connection.cursor()
query = "SELECT * FROM test WHERE name LIKE 't%'"
try:
elasticapm_client.begin_transaction("web.django")
cursor.execute(query)
cursor.fetchall()
elasticapm_client.end_transaction(None, "test-transaction")
finally:
# make sure we've cleared out the spans for the other tests.
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
span = spans[0]
assert span["name"] == "SELECT FROM test"
assert span["type"] == "db"
assert span["subtype"] == "postgresql"
assert span["action"] == "query"
assert "db" in span["context"]
assert span["context"]["db"]["instance"] == "elasticapm_test"
assert span["context"]["db"]["type"] == "sql"
assert span["context"]["db"]["statement"] == query
assert span["context"]["service"]["target"]["type"] == "postgresql"
assert span["context"]["service"]["target"]["name"] == "elasticapm_test"
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_psycopg_composable_query_works(instrument, postgres_connection, elasticapm_client):
"""
Check that we parse queries that are psycopg.sql.Composable correctly
"""
from psycopg import sql
cursor = postgres_connection.cursor()
query = sql.SQL("SELECT * FROM {table} WHERE {row} LIKE 't%' ORDER BY {row} DESC").format(
table=sql.Identifier("test"), row=sql.Identifier("name")
)
baked_query = query.as_string(cursor.__wrapped__)
result = None
try:
elasticapm_client.begin_transaction("web.django")
cursor.execute(query)
result = cursor.fetchall()
elasticapm_client.end_transaction(None, "test-transaction")
finally:
# make sure we've cleared out the spans for the other tests.
assert [(2, "two"), (3, "three")] == result
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
span = spans[0]
assert span["name"] == "SELECT FROM test"
assert "db" in span["context"]
assert span["context"]["db"]["instance"] == "elasticapm_test"
assert span["context"]["db"]["type"] == "sql"
assert span["context"]["db"]["statement"] == baked_query
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_psycopg_binary_query_works(instrument, postgres_connection, elasticapm_client):
"""
Check that we pass queries with %-notation but without parameters
properly to the dbapi backend
"""
cursor = postgres_connection.cursor()
query = b"SELECT * FROM test WHERE name LIKE 't%'"
baked_query = query.decode()
try:
elasticapm_client.begin_transaction("web.django")
cursor.execute(query)
result = cursor.fetchall()
elasticapm_client.end_transaction(None, "test-transaction")
finally:
# make sure we've cleared out the spans for the other tests.
assert [(2, "two"), (3, "three")] == result
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
span = spans[0]
assert span["name"] == "SELECT FROM test"
assert "db" in span["context"]
assert span["context"]["db"]["instance"] == "elasticapm_test"
assert span["context"]["db"]["type"] == "sql"
assert span["context"]["db"]["statement"] == baked_query
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_psycopg_context_manager(instrument, elasticapm_client):
elasticapm_client.begin_transaction("test")
with psycopg.connect(**connect_kwargs()) as conn:
with conn.cursor() as curs:
curs.execute("SELECT 1;")
curs.fetchall()
elasticapm_client.end_transaction("test", "OK")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert len(spans) == 2
assert spans[0]["subtype"] == "postgresql"
assert spans[0]["action"] == "connect"
assert spans[0]["context"]["service"]["target"]["type"] == "postgresql"
assert spans[0]["context"]["service"]["target"]["name"] == "elasticapm_test"
assert spans[1]["subtype"] == "postgresql"
assert spans[1]["action"] == "query"
@pytest.mark.integrationtest
@pytest.mark.skipif(not has_postgres_configured, reason="PostgresSQL not configured")
def test_psycopg_rows_affected(instrument, postgres_connection, elasticapm_client):
cursor = postgres_connection.cursor()
try:
elasticapm_client.begin_transaction("web.django")
cursor.execute("INSERT INTO test VALUES (4, 'four')")
cursor.execute("SELECT * FROM test")
cursor.execute("UPDATE test SET name = 'five' WHERE id = 4")
cursor.execute("DELETE FROM test WHERE id = 4")
elasticapm_client.end_transaction(None, "test-transaction")
finally:
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert spans[0]["name"] == "INSERT INTO test"
assert spans[0]["context"]["db"]["rows_affected"] == 1
assert spans[1]["name"] == "SELECT FROM test"
assert "rows_affected" not in spans[1]["context"]["db"]
assert spans[2]["name"] == "UPDATE test"
assert spans[2]["context"]["db"]["rows_affected"] == 1
assert spans[3]["name"] == "DELETE FROM test"
assert spans[3]["context"]["db"]["rows_affected"] == 1
@pytest.mark.integrationtest
def test_psycopg_connection(instrument, elasticapm_transaction, postgres_connection):
# elastciapm_client.events is only available on `TempStoreClient`, this keeps the type checkers happy
elasticapm_client = cast(TempStoreClient, get_client())
elasticapm_client.end_transaction("test", "success")
span = elasticapm_client.events[SPAN][0]
host = os.environ.get("POSTGRES_HOST", "localhost")
assert span["name"] == f"psycopg.connect {host}:5432"
assert span["action"] == "connect"
|
991,512 | ac1dc5a38cbed7e8bb87cc880fc0b0fc908e0195 | def cond():
a=6
if a<0:
print("a is negative")
else:
print("a s positive")
cond() |
991,513 | 172752c696de42882926b773173f65702c367023 | import os
import numpy as np
import unittest
import yaml
from greengraph import *
from nose.tools import *
from mock import Mock, patch
def test_geolocate():
with open(os.path.join(os.path.dirname(__file__),'fixtures','mapcoord.yaml')) as fixtures_file:
fixtures = yaml.load(fixtures_file)
for fixt in fixtures:
location = fixt.pop('location')
lat = fixt.pop('lat')
long = fixt.pop('long')
answer = (lat,long)
Trial = Greengraph(0.0,0.0)
assert_equal(Trial.geolocate(location), answer)
def test_location_sequence():
with open(os.path.join(os.path.dirname(__file__),'fixtures','locationsequence.yaml')) as fixtures_file:
fixtures = yaml.load(fixtures_file)
for fixt in fixtures:
orig = fixt.pop('orig')
dest = fixt.pop('dest')
step = fixt.pop('steps')
lat = fixt.pop('lat')
long = fixt.pop('long')
Trial = Greengraph(0.0,0.0)
answer = Trial.location_sequence(Trial.geolocate(orig),Trial.geolocate(dest),step)
assert(answer[fixt][1]==long and answer[fixt][0]==lat)
|
991,514 | d02a93859afc8acaddbfda5b7362ea689113d985 | import pandas as pd
import matplotlib.pyplot as plt
def compute_score_variation(games, color, variation):
games = games[games["variation"]==variation]
if color == "white":
score = (len(games[games["resultat"]=="1-0"]) + 0.5*len(games[games["resultat"]=="1/2-1/2"]))/len(games)
else:
score = (len(games[games["resultat"]=="0-1"]) + 0.5*len(games[games["resultat"]=="1/2-1/2"]))/len(games)
return score
def compute_scored_opening(games, pseudo, opening, color):
games_with_opening = games[(games[color] == pseudo) & (games["opening"] == opening)]
print(len(games_with_opening))
scores = []
variations = games_with_opening["variation"].value_counts().index.values
for variation in variations:
scores.append(compute_score_variation(games_with_opening, color, variation))
plt.barh(variations, scores)
plt.xticks(rotation="vertical")
for index, value in enumerate(scores):
plt.text(value, index, "{:0.2f}".format(value))
plt.show()
|
991,515 | 1ca1dec3de79de9bf0f65344fca7b93d2e1c82b8 | from rest_framework import serializers
from comment.models import Comment
from utils.common_utils import get_model_fields
class CommentSerializer(serializers.ModelSerializer):
uid = serializers.IntegerField()
sid = serializers.IntegerField()
cid = serializers.IntegerField()
content = serializers.CharField()
class Meta:
model = Comment
fields = get_model_fields(Comment)
|
991,516 | 4de779e2159d23de8a98a2ff2a7865a2f9683b11 | import argparse
import os
import re
#filename & open
name = "data/WhatsApp.txt"
fole = open(name, 'r')
# Create empty dictionaries
kady = {}
tommy = {}
Dict = {}
#regex for word
rogex = re.compile('[^a-zA-Z]')
s = raw_input("Lets begin? -- Please press enter")
#read first line
line = fole.readline()
Stats = {}
Stats["total"] = 0
while line:
print "Line: " +line
L =line.split()
# Regex for Date : DD/MM/YYY , 23.12.2017
m = re.match(r'.*\d+/\d+/\d+.*', line)
if m:
# if Date found, store date
date = L[0] #m.group(0)
print "Date: " +date
n = re.search(r'.*\d+:\d+:\d+.*', line)
if n:
time = L[1] #m.group(0)
print "Time: " +time
if n and m:
#print "2: "+L[2]
#print type(L[2])
i = L[2].find("Tommy")
if i>=0:
sender = 't'
print "Sender: Tommy"
i = L[2].find("Kady")
if i>=0:
sender = 'k'
print "Sender: Kady"
#print "3: "+L[3]
#LEN = len(L)
if sender == 'k':
b = 3
if sender == 't':
b = 4
for x in range(b,len(L) ):
#print L[x]
word = L[x].lower()
# replaces everything but characters
#out = rogex.sub('', sling)
formed = rogex.sub('', word)
if formed:
Stats["total"] += 1
if Dict.has_key(formed):
Dict[formed]+=1
else:
Dict[formed]= 1
else:
for w in line:
#print L[x]
word = w.lower()
#out = rogex.sub('', sling)
formed = rogex.sub('', word)
if formed:
Stats["total"] += 1
if Dict.has_key(formed):
Dict[formed]+=1
else:
Dict[formed]= 1
line = fole.readline()
# LOOP REPEAT
print Dict
print Stats["total"]
raw_input("Calculate Charts?")
# CHARTS
fake = { 'tum':10, 'koosh':12 , 'pom':3}
Charts = [ ('.', 0) ]
its = 1
for D in Dict:
# what is occurence?
# word
V = Dict[D]
tup = (D, V)
print tup
# put in charts
if V >= 10:
i = 0
its = len(Charts)
while i < its :
# if bigger than element at position x
# put into position x
#print Charts[i][0] #word
#print Charts[i][1] # occurence
if V > Charts[i][1]:
Charts.insert(i, tup )
#its += 1 # increase charts
i = its # to END loop
i+=1
# Eo while
print Charts
# End |
991,517 | 2559bb50695ec2f39d6fb3e5c66c62b5526683e0 |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import math
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import roc_auc_score
import tensorflow as tf
class OHETransformer( TransformerMixin, BaseEstimator ):
def __init__(self, feats):
super().__init__()
self._feats = feats
def fit(self, X, y=None):
self._ohe = OneHotEncoder(handle_unknown='error', sparse=False, drop='first')
self._ohe.fit(X[self._feats])
return self
def transform(self, X, y=None):
tX = self._ohe.transform(X[self._feats])
feat_names = self._ohe.get_feature_names(self._feats)
X = X.drop(self._feats, axis=1)
dftX = pd.DataFrame(data=tX, columns=feat_names)
X = X.join(dftX)
return X
class StandardScalerTransformer( TransformerMixin, BaseEstimator ):
def __init__(self, feats):
super().__init__()
self._feats = feats
def fit(self, X, y=None):
self._scaler = StandardScaler()
self._scaler.fit(X[self._feats])
return self
def transform(self, X, y=None):
tX = self._scaler.transform(X[self._feats])
X.loc[:, self._feats] = tX
return X
def get_params(self, deep=False):
return {"feats": self._feats}
class ScalerTransformer( TransformerMixin, BaseEstimator ):
def __init__(self, feats, fn):
self._feats = feats
self._fn = fn
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
for col in self._feats:
X.loc[:, col] = X[col].apply(self._fn)
return X
def get_params(self, deep=False):
return {"feats": self._feats, "fn": self._fn}
class FeatureSelector( TransformerMixin, BaseEstimator ):
def __init__(self, feature_names):
self._feature_names = feature_names
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self._feature_names]
def get_params(self, deep=False):
return {"feature_names": self._feature_names}
class FeatureDropper( TransformerMixin, BaseEstimator ):
def __init__(self, feature_names):
self._feature_names = feature_names
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.drop(self._feature_names, axis=1)
return X
def get_params(self, deep=False):
return {"feature_names": self._feature_names}
class FrequencyTransformer ( TransformerMixin, BaseEstimator ):
def __init__(self, feats):
self._feats = feats
def get_params(self, deep=False):
return {"feats": self._feats}
def fit(self, X, y=None):
count_map = {}
for col in self._feats:
count_map[col] = X[col].value_counts()
self._count_map = count_map
return self
def transform(self, X, y=None):
count_map = self._count_map
for col in self._feats:
ba = count_map[col]
X.loc[:, col] = X[col].map(ba)
return X
class MedianPerClassFiller( TransformerMixin, BaseEstimator ):
def __init__(self, group_by_col, target_col):
self._by = group_by_col
self._on = target_col
def fit(self, X, y=None):
by = self._by
on = self._on
if by is None:
self._median = X[on].median()
else:
# Compute the number of values and the mean of each group
agg = X.groupby(by)[on].agg(['median'])
md = agg["median"]
mp = {}
for k,v in zip(md.index.values, md.values):
mp[k] = v
self._map = mp
return self
def transform(self, X, y=None):
by = self._by
on = self._on
if by is None:
X.loc[:, on] = X[on].fillna(self._median)
else:
mp = self._map
def fn(row):
# NaN will be true here
if row[on] != row[on]:
return mp[row[by]]
else:
return row[on]
X.loc[:, on] = X.apply(fn, axis=1)
return X
def get_params(self, deep=False):
return {'group_by_col' : self._by, 'target_col' : self._on}
class MapTransformer ( TransformerMixin, BaseEstimator ):
def __init__(self, col, replace_map):
super().__init__()
self._replace_map = replace_map
self._col = col
def get_params(self, deep=False):
return {"replace_map": self._replace_map, "col" : self._col}
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X.loc[:,self._col] = X[self._col].map(self._replace_map)
return X
class ModeFiller( TransformerMixin, BaseEstimator ):
def __init__(self, feats):
self._feats = feats
pass
def fit(self, X, y=None):
self._mode_X = X[self._feats].mode().iloc[0,:]
return self
def transform(self, X, y=None):
dfm = self._mode_X
for feat,med in zip(dfm.index.values, dfm.values):
X.loc[:, feat] = X[feat].fillna(med)
return X
def get_params(self, deep=False):
return {"feats": self._feats}
def getTitleFromName(strName):
givenName = strName.split(",")[1]
title = givenName.split(".")[0].lower().strip()
if title == 'mr':
return 'mr'
elif title == 'miss':
return 'miss'
elif title == 'mrs':
return 'mrs'
elif title == 'master':
return 'master'
else:
return 'other'
def preprocess_df(df_all):
df_all.loc[:, "Title"] = df_all["Name"].apply(getTitleFromName)
df_all.loc[:, "FamilySize"] = df_all["SibSp"] + df_all["Parch"] + 1
return df_all
denselayers = [[8], [10], [8, 8], [10, 5], [12, 6, 4]]
def build_nn(input_cols = 7, opt = 'Adam', dropoutrate = 0.2, lyrsi = 2):
layers = []
layers.append(tf.keras.layers.Input(shape=(input_cols,)))
for units in denselayers[lyrsi]:
layers.append(tf.keras.layers.Dense(units=units, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.0001)))
layers.append(tf.keras.layers.Dropout(rate=dropoutrate))
layers.append(tf.keras.layers.Dense(units=1, activation="sigmoid"))
model = tf.keras.Sequential(layers=layers)
model.compile(optimizer=opt, loss="binary_crossentropy", metrics=['accuracy'])
return model
def create_pipeline(model):
# feature_pipeline does not have the capacity to append from unaltered features
# they have to be added manually.
#noop_pipeline = Pipeline(steps=[
# ('cat_selector', FeatureSelector(['Sex']))
#])
#numerical_pipeline = Pipeline(steps=[
# ('cat_selector', FeatureSelector(['Age', 'FamilySize', 'Fare'])),
# https://machinelearningmastery.com/how-to-improve-neural-network-stability-and-modeling-performance-with-data-scaling/
# good rule of thumb is the data to be scaled to [0,1], or mean = 0, stdev = 1
# standardization is good if the distribution is closing to normal dist.
# otherwise, use normalization
# ('std_scaler', StandardScaler())
#])
#categorical_pipeline = Pipeline(steps=[
# ('cat_selector', FeatureSelector(["Pclass", 'Title', 'Embarked'])),
# ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=True))
#])
# Keras with validation_split seems not supporting FeatureUnion, unfortunately.
# because FeatureUnion outputs the result not in np array or tensors, ofc.
# but some sparse matrix or something.
#feature_pipeline = FeatureUnion(transformer_list=[
# ('categorical', categorical_pipeline),
# ('numerical', numerical_pipeline),
# ('nop', noop_pipeline)
#])
pipeline = Pipeline(steps=[
('age_filler', MedianPerClassFiller("Title", "Age")),
('embarked_filler', ModeFiller(["Embarked"])),
('fare_filler', MedianPerClassFiller("Pclass", "Fare")),
# https://visualstudiomagazine.com/articles/2014/01/01/how-to-standardize-data-for-neural-networks.aspx
# it seems that for binary data, it is better to encode to -1,1 rather than 0,1., but not here it seems.
('sex_map_transformer', MapTransformer("Sex", {"female": -1, 'male': 1})),
('pclass_map_transformer', MapTransformer("Pclass", {1: "High", 2: "Middle", 3: "Low"})),
#('title_transformer', FrequencyTransformer(["Title"])),
#('embarked_transformer', FrequencyTransformer(["Embarked"])),
#('pclass_transformer', FrequencyTransformer(["Pclass"])),
#('age_log', ScalerTransformer(["Age"], lambda x : math.log(x+1))),
#('fare_log', ScalerTransformer(["Fare"], lambda x : math.log(x+1))),
('ohe_transformer', OHETransformer(["Pclass", 'Title', 'Embarked'])),
('std_scaler', StandardScalerTransformer(['Age', 'FamilySize', 'Fare'])),
#('feature_pipeline', feature_pipeline),
('model', model)
])
return pipeline
def dump_grid_search_results(grid_result):
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
def search_layers_dropoutrate(df_train):
train_col = ["Pclass", 'Title', "FamilySize", 'Sex', 'Age', 'Fare', 'Embarked']
model = tf.keras.wrappers.scikit_learn.KerasClassifier(build_fn=build_nn, opt = 'Nadam', input_cols=15, batch_size=16, epochs=100, verbose=0)
pipeline = create_pipeline(model)
lyrsis = [x for x in range(0, len(denselayers))]
dropoutrate = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
param_grid = dict(model__lyrsi = lyrsis, model__dropoutrate = dropoutrate)
#param_grid = {
# "model__denselayer" : denselayers,
# "model__dropoutrate" : dropoutrate
#}
# search the grid
grid = GridSearchCV(estimator=pipeline,
param_grid=param_grid,
cv=5,
verbose=2,
n_jobs=-1) # include n_jobs=-1 if you are using CPU
grid_result = grid.fit(df_train[train_col], df_train["Survived"])
dump_grid_search_results(grid_result)
def search_opt(df_train):
train_col = ["Pclass", 'Title', "FamilySize", 'Sex', 'Age', 'Fare', 'Embarked']
model = tf.keras.wrappers.scikit_learn.KerasClassifier(build_fn=build_nn, input_cols=15, batch_size=16, epochs=100, verbose=0)
pipeline = create_pipeline(model)
optimizers = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Nadam']
param_grid = {
"model__opt" : optimizers
}
# search the grid
grid = GridSearchCV(estimator=pipeline,
param_grid=param_grid,
cv=5,
verbose=2,
n_jobs=-1) # include n_jobs=-1 if you are using CPU
grid_result = grid.fit(df_train[train_col], df_train["Survived"])
dump_grid_search_results(grid_result)
def search_params(df_train):
train_col = ["Pclass", 'Title', "FamilySize", 'Sex', 'Age', 'Fare', 'Embarked']
model = tf.keras.wrappers.scikit_learn.KerasClassifier(build_fn=build_nn, input_cols=15, verbose=0)
pipeline = create_pipeline(model)
batch_size = [16, 32, 64]
epochs = [25, 50, 75, 100]
param_grid = {
"model__batch_size": batch_size,
"model__epochs": epochs
}
# search the grid
grid = GridSearchCV(estimator=pipeline,
param_grid=param_grid,
cv=5,
verbose=2,
n_jobs=-1) # include n_jobs=-1 if you are using CPU
grid_result = grid.fit(df_train[train_col], df_train["Survived"])
dump_grid_search_results(grid_result)
def plot_acc(train, validation, blurb):
plt.plot(train)
plt.plot(validation)
plt.title(f'model {blurb}')
plt.ylabel(blurb)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
class LossHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
class AccuracyHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs):
self.acc = []
self.val_acc = []
def on_epoch_end(self, batch, logs):
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
def predict(df_train, df_test):
train_col = ["Pclass", 'Title', "FamilySize", 'Sex', 'Age', 'Fare', 'Embarked']
callbacks = [
LossHistory()
]
model = tf.keras.wrappers.scikit_learn.KerasClassifier(build_fn=build_nn, dropoutrate = 0.3, lyrsi = 0, opt = 'Nadam', input_cols = 12, epochs=100, batch_size=16, verbose=0)
pipeline = create_pipeline(model)
scores = cross_val_score(pipeline, df_train[train_col], df_train["Survived"], cv=5)
print(f"mean train acc: {np.mean(scores)}")
pipeline.fit(df_train[train_col], df_train["Survived"], model__callbacks=callbacks, model__validation_split=0.2)
train_pred = pipeline.predict(df_train[train_col]).astype(int)
print("roc: {}".format(roc_auc_score(df_train["Survived"], train_pred)))
plot_acc(callbacks[0].losses, callbacks[0].val_losses, 'loss')
test_pred = pipeline.predict(df_test[train_col]).astype(int)
df_test.loc[:, "Survived"] = test_pred
df_test[["PassengerId", "Survived"]].to_csv("test.predicted.nn.csv", index=False)
def main():
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
df_all = df_train.append(df_test)
df_all = preprocess_df(df_all)
df_train = df_all.iloc[:891, :]
df_test = df_all.iloc[891:, :]
#search_params(df_train)
#search_opt(df_train)
#search_layers_dropoutrate(df_train)
predict(df_train, df_test)
def scratchpad():
df = pd.DataFrame(data = {
"numeric1": [0, 0, 0, 0, 1, 1, 1, 1],
"numeric2": [0, 0, 1, 1, 2, 2, 3, 3],
"categ0": ['m', 'm', 'm', 'm', 'f', 'f', 'f', 'x'],
"categ1": ['p', 'w', 'p', 'w', 'p', 'w', 'p', 'w'],
"categ": [0, 0, 0, 0, 1, 1, 1, 1]
})
#ssj = StandardScalerTransformer(["numeric1", "numeric2"])
#ssj = ssj.fit(df)
#ssj.transform(df)
#print(df)
ohe = OHETransformer(["categ0", "categ1"])
ohe = ohe.fit(df)
df = ohe.transform(df)
print(df)
if __name__ == '__main__':
#scratchpad()
main()
pass
|
991,518 | fb9fa98ef1d36a8c8124ed4fa2a4b66642468e69 | # Produce a plot of the last day of the year at a given temperature
import mx.DateTime
import iemdb
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
# get climatology
ccursor.execute("""SELECT valid, high from ncdc_climate71 where station = 'ia0200'
ORDER by valid ASC""")
climate = []
cdoy = []
for row in ccursor:
ts = row[0]
climate.append( row[1] )
cdoy.append( int(ts.strftime("%j")) )
# Extract obs
ccursor.execute("""SELECT sday, high from alldata where stationid = 'ia0200'
and year = 2010 ORDER by day ASC""")
obs = []
odoy = []
for row in ccursor:
ts = mx.DateTime.strptime("2010%s" % (row[0],) ,'%Y%m%d' )
obs.append( row[1] )
odoy.append( int(ts.strftime("%j")) )
ccursor.execute("""SELECT sday, high from alldata where stationid = 'ia0200'
and year = 2009 ORDER by day ASC""")
obs2009 = []
odoy2009 = []
for row in ccursor:
ts = mx.DateTime.strptime("2009%s" % (row[0],) ,'%Y%m%d' )
obs2009.append( row[1] )
odoy2009.append( int(ts.strftime("%j")) )
ccursor.execute("""select high, max(sday) from alldata where stationid = 'ia0200'
GROUP by high ORDER by high DESC""")
highs = []
doy = []
m = 0
for row in ccursor:
if row[0] <= 50:
continue
ts = mx.DateTime.strptime("2009%s" % (row[1],) ,'%Y%m%d' )
highs.append( row[0] )
if int( ts.strftime("%j") ) > m:
m = int( ts.strftime("%j") )
doy.append( m )
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot( doy, highs , label='Latest Occurence', zorder=2)
ax.scatter( odoy, obs , label='2010 Obs', zorder=2)
ax.scatter( odoy2009, obs2009 , label='2009 Obs', marker='+', zorder=2)
ax.plot( cdoy, climate, label='Climatology' )
ax.grid(True)
ax.set_xticks( (1,32,60,91,121,152,182,213,244,258,274,289,305,319,335,349,365) )
ax.set_xticklabels( ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep 1','Sep 15', 'Oct 1','Oct 15', 'Nov 1','Nov 15', 'Dec 1', 'Dec 15') )
ax.set_xlim( 240, 366 )
ax.set_ylim( 50, 110)
ax.set_ylabel("Temperature $^{\circ}\mathrm{F}$")
ax.set_xlabel('Day of Year')
ax.set_title("Ames High Temperatures [1893-2010]")
rect = Rectangle((274, 50), 31, 70, facecolor="#aaaaaa", zorder=1)
ax.add_patch(rect)
ax.legend()
import iemplot
fig.savefig('test.ps')
iemplot.makefeature('test') |
991,519 | 1e7903d063b68954fef092d7a96f99b59ca00fc7 | import png
import math
import sys
import cmd
from collections import namedtuple
from Pathspec import Pathspec
Pixel = namedtuple('Pixel', 'x y r g b a')
def getPixel(x, y):
xAddr = x * 4
return Pixel(x, y, *pic[y][xAddr:xAddr+4])
def savePixel(p):
xAddr = p.x * 4
pic[p.y][xAddr+0] = p.r
pic[p.y][xAddr+1] = p.g
pic[p.y][xAddr+2] = p.b
pic[p.y][xAddr+3] = p.a
def t(n):
return (255 - n) // 2
original = Pathspec(sys.argv[1], 'png')
final = Pathspec(original.directory + original.name + '_with_grid.png')
info = png.Reader(original.path).asRGBA()
width, height = info[0:2]
print('\nOriginal Image Details')
print('input file: \t%s' % original)
print('dimensions: \t%dpx by %dpx' % (width, height))
print('\nSpecify Grid Dimensions')
sqIn = input('square grid? (y/n) > ')
squareGrid = True if sqIn == '' else sqIn.lower().startswith('y')
cols = int(input('number of columns > '))
dx = width / cols
if squareGrid:
dy = height/round(height/dx)
else:
rows = int(input('number of rows > '))
dy = height / rows
pic = list(info[2])
for y in range(1, height-1):
for x in range(1, width-1):
if math.floor(y % dy) == 0 or math.floor(x % dx) == 0:
p = getPixel(x, y)
savePixel(Pixel(x, y, t(p.r), t(p.g), t(p.b), 255))
img = png.from_array(pic, 'RGBA')
img.save(final.path)
print('\nFinal Image Details')
print('output file: \t%s' % final.path)
print('grid spacing: \t%dpx by %dpx' % (dx, dy))
print('squareness: \t%d%%' % round(dx*100/dy))
|
991,520 | eb441bded980d8d5b60ad1eb9aae1c1da830aa92 | """
This module contains all unit tests for the calculator.
"""
from unittest import TestCase
from unittest.mock import MagicMock
from calculator.adder import Adder
from calculator.subtracter import Subtracter
from calculator.multiplier import Multiplier
from calculator.divider import Divider
from calculator.calculator import Calculator
from calculator.exceptions import InsufficientOperands
class AdderTests(TestCase):
"""Contain tests for Adder class."""
def test_adding(self):
"""Perform tests on Adder class."""
adder = Adder()
for i in range(-10, 10):
for j in range(-10, 10):
self.assertEqual(i + j, adder.calc(j, i))
class SubtracterTests(TestCase):
"""Contain tests for Subtracter class."""
def test_subtracting(self):
"""Perform tests on Subtracter class."""
subtracter = Subtracter()
for i in range(-10, 10):
for j in range(-10, 10):
self.assertEqual(i-j, subtracter.calc(j, i))
class MultiplierTests(TestCase):
"""Contain tests for Multiplier class."""
def test_multiplying(self):
"""Perform tests on Multiplier class."""
multiplier = Multiplier()
for i in range(-10, 10):
for j in range(-10, 10):
self.assertEqual(i*j, multiplier.calc(j, i))
class DividerTests(TestCase):
"""Contain tests for Divider class."""
def test_dividing(self):
"""Perform tests on Divider class."""
divider = Divider()
for i in range(-10, 10):
for j in range(-10, 10):
if j != 0:
self.assertEqual(i/j, divider.calc(j, i))
class CalculatorTests(TestCase):
"""Contain tests for Calculator class."""
def setUp(self):
"""Initialize calculator with new operator objects."""
self.adder = Adder()
self.subtracter = Subtracter()
self.multiplier = Multiplier()
self.divider = Divider()
self.calculator = Calculator(self.adder, self.subtracter,
self.multiplier, self.divider)
def test_insufficient_operands(self):
"""Test Insufficient Operands exception."""
self.calculator.enter_number(0)
with self.assertRaises(InsufficientOperands):
self.calculator.add()
def test_adder_call(self):
"""Test a call to the Adder function using MagicMock."""
self.adder.calc = MagicMock(return_value=0)
self.calculator.enter_number(1)
self.calculator.enter_number(2)
self.calculator.add()
self.adder.calc.assert_called_with(2, 1)
def test_subtracter_call(self):
"""Test a call to the Divider function using MagicMock."""
self.subtracter.calc = MagicMock(return_value=0)
self.calculator.enter_number(1)
self.calculator.enter_number(2)
self.calculator.subtract()
self.subtracter.calc.assert_called_with(2, 1)
|
991,521 | 005e749e116be2658af958c5e4a65c16c850ba69 | from lib.from_scratch.indexer import read_term_counts, read_index
from argparse import ArgumentParser
parser = ArgumentParser(description='Performs queries from a query file using a Vector Space retrieval model.')
parser.add_argument("index_path", help="the path to read the index from")
parser.add_argument("term_counts_path", help="the path to read the term counts from")
parser.add_argument("query_file_path", help="the path to a TSV query file containing query IDs and stopped, case-folded test queries")
parser.add_argument("-r", help="the number of scores to output for each query", type=int, default=None)
parser.add_argument("-rel", help="the path to a cacm relevance file to enable optimal query transformation using Rocchio's Algorithm", default=None)
args = parser.parse_args()
# Read the term counts
read_term_counts(args.term_counts_path)
read_index(args.index_path)
from lib.from_scratch.retrieval_model import VectorSpaceRetrievalModel
# Create the retrieval model
model = VectorSpaceRetrievalModel()
# Perform the retrieval
model.process_query_file(args.query_file_path, args.rel, args.r) |
991,522 | a58a7e2c05853c752021a2ceba984e8510ca6e95 | import torch
class TestingModel:
'''
This class tests our cnn model based on the trained model and the prepared test dataset
'''
def __init__(self, cnn_model, test_dataset_loader, test_dataset):
self.cnn_model = cnn_model
self.test_dataset_loader = test_dataset_loader
self.test_dataset = test_dataset
def test(self, device="cuda"):
self.cnn_model.eval()
accuracy_count = 0
for test_data in self.test_dataset_loader:
test_images, test_labels = test_data
test_images = test_images.to(device)
test_labels = test_labels.to(device)
test_outputs = self.cnn_model(test_images)
_, predicted = torch.max(test_outputs.data, 1)
accuracy_count += torch.sum(predicted == test_labels.data).item()
test_accuracy = 100 * accuracy_count / len(self.test_dataset)
return test_accuracy
|
991,523 | 0c4b4ddae0e6839277bf54cc6fe020bb7f4ea7d4 | import socket
from socket import AF_INET, SOCK_STREAM
s = socket.socket(family=AF_INET,type= SOCK_STREAM, proto=0)
print("socket created:{}".format(s)) |
991,524 | 62b0916c660db089e09492540508500a5bef20cc | import pickle
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from skimage.feature import hog
# NOTE: the next import is only valid for scikit-learn version <= 0.17
# for scikit-learn >= 0.18 use:
# from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from pathlib import Path
#declar the name of the pickle file to save
feature_vector_pickle_file = './feature_vector_pickle_fullset.p'
feature_vector_file = Path(feature_vector_pickle_file)
def convert_color(img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
if conv == 'RGB2HSV':
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:,:,0], size).ravel()
color2 = cv2.resize(img[:,:,1], size).ravel()
color3 = cv2.resize(img[:,:,2], size).ravel()
return np.hstack((color1, color2, color3))
def color_hist(img, nbins=32): #bins_range=(0, 256)
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins)
channel2_hist = np.histogram(img[:,:,1], bins=nbins)
channel3_hist = np.histogram(img[:,:,2], bins=nbins)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
print('Extracting features, this may take a while...')
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
file_features = []
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
# Divide up into cars and notcars
# cars
car_filepath = []
car_filepath.append('../labeled dataset/project/vehicles/GTI_Far/*.png')
car_filepath.append('../labeled dataset/project/vehicles/GTI_Left/*.png')
car_filepath.append('../labeled dataset/project/vehicles/GTI_MiddleClose/*.png')
car_filepath.append('../labeled dataset/project/vehicles/GTI_Right/*.png')
car_filepath.append('../labeled dataset/project/vehicles/KITTI_extracted/*.png')
# not cars
notcar_filepath = []
notcar_filepath.append('./training_set/non-vehicles/GTI/*.png')
notcar_filepath.append('./training_set/non-vehicles/Extras/*.png')
car_images = []
notcar_images = []
for files in car_filepath:
print('car files are {}'.format(files))
car_images = car_images + glob.glob(files)
print('Number of car images in the data set {}'.format(len(car_images)))
for files in notcar_filepath:
print('notcar files are {}'.format(files))
notcar_images = notcar_images + glob.glob(files)
print('Number of non-car images in the data set {}'.format(len(notcar_images)))
cars = []
notcars = []
for image in car_images:
cars.append(image)
for image in notcar_images:
notcars.append(image)
# Note: During testing, reduce the sample size because HOG features are slow to compute
# The quiz evaluator times out after 13s of CPU time
reduce_sample_size_for_testing = False
if reduce_sample_size_for_testing is True:
sample_size = 500
cars = cars[0:sample_size]
notcars = notcars[0:sample_size]
# Apply these parameters to extract the features.
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 8
pix_per_cell = 8
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32)
hist_bins = 32
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
# check if we already created the feature vectors
if feature_vector_file.is_file():
print('Feature vectors are already created')
# load the feature vectors and corresponding parameters
with open(feature_vector_pickle_file, mode='rb') as f:
dist_pickle = pickle.load(f)
svc = dist_pickle["svc"]
color_space = dist_pickle["color_space"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
spatial_size = dist_pickle["spatial_size"]
hist_bins = dist_pickle["hist_bins"]
print('Model trained using:',color_space,'color space |',orient,'orientations |',pix_per_cell,
'pixels per cell |', cell_per_block,'cells per block |', hog_channel, 'hog channel')
else:
t=time.time()
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to extract HOG features...')
# Create an array stack of feature vectors
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',color_space,'color space |',orient,'orientations |',pix_per_cell,
'pixels per cell |', cell_per_block,'cells per block |', hog_channel, 'hog channel')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
n_predict = 10
print('My SVC predicts: ', svc.predict(X_test[0:n_predict]))
print('For these',n_predict, 'labels: ', y_test[0:n_predict])
t2 = time.time()
print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')
# save the result for later use
dist_pickle = {}
dist_pickle["svc"] = svc
dist_pickle["color_space"] = color_space
dist_pickle["scaler"] = X_scaler
dist_pickle["orient"] = orient
dist_pickle["pix_per_cell"] = pix_per_cell
dist_pickle["cell_per_block"] = cell_per_block
dist_pickle["spatial_size"] = spatial_size
dist_pickle["hist_bins"] = hist_bins
pickle.dump( dist_pickle, open( feature_vector_pickle_file, 'wb' ) )
# Visualization Car VS Not-Car
# pick a random image from cars
random_pick = np.random.randint(0, len(cars))
random_car = cars[random_pick]
car_image = mpimg.imread(random_car)
car_image_converted = convert_color(car_image) #default is YCrCb
print('randomly picked car: {}'.format(random_car))
# pick a random image from notcars
random_pick = np.random.randint(0, len(notcars))
random_notcar = notcars[random_pick]
notcar_image = mpimg.imread(random_notcar)
notcar_image_converted = convert_color(notcar_image) #default is YCrCb
print('randomly picked not-car: {}'.format(random_notcar))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(car_image)
ax1.set_title('Car', fontsize=20)
ax2.imshow(notcar_image)
ax2.set_title('Not-Car', fontsize=20)
fig.savefig('./output_images/car_not_car.png')
# Visualization HOG features with 3 channels from YCrCb
for i in range(3):
channel = car_image_converted[:,:,i]
features, hog_image = get_hog_features(channel, orient,
pix_per_cell, cell_per_block,
vis=True, feature_vec=False)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(channel, cmap='gray')
car_title = 'Car CH-' + str(i)
ax1.set_title(car_title, fontsize=20)
ax2.imshow(hog_image, cmap='gray')
hog_title = 'HOG_Visualization_Car_CH-' + str(i)
ax2.set_title(hog_title, fontsize=20)
filename = './output_images/' + hog_title + '.png'
fig.savefig(filename)
for i in range(3):
channel = notcar_image_converted[:,:,i]
features, hog_image = get_hog_features(channel, orient,
pix_per_cell, cell_per_block,
vis=True, feature_vec=False)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(channel, cmap='gray')
car_title = 'Not-Car CH-' + str(i)
ax1.set_title(car_title, fontsize=20)
ax2.imshow(hog_image, cmap='gray')
hog_title = 'HOG_Visualization_Not-Car_CH-' + str(i)
ax2.set_title(hog_title, fontsize=20)
filename = './output_images/' + hog_title + '.png'
fig.savefig(filename) |
991,525 | 20d96113649833c0076b0ef9ce72deacdf6e4408 | """
For two given composers find operas (one of composers is the author)
at which premiere both could have been together (i.e. they both had to be alive).
Just a theoretical possibility of presence is considered
"""
from datetime import datetime
composers = {
"beethoven": ("Ludwig van Beethoven", "17 December 1770", "26 March 1827"),
"wagner": ("Richard Wagner", "22 May 1813", "13 February 1883"),
"verdi": ("Giuseppe Verdi", "9 October 1813", "27 January 1901"),
"mozart": ("Wolfgang Amadeus Mozart", "27 January 1756", "5 December 1791"),
}
operas = [
("mozart", "Apollo and Hyacinth", "13 May 1767"),
("mozart", "Marriage of Figaro", "1 May 1786"),
("mozart", "Don Giovanni", "29 October 1787"),
("mozart", "Così fan tutte", "6 January 1790"),
("mozart", "The Clemency of Titus", "6 September 1791"),
("mozart", "The Magic Flute", "30 September 1791"),
("wagner", "The Fairies", "29 June 1888"),
("wagner", "Rienzi", "20 October 1842"),
("wagner", "The Flying Dutchman", "2 January 1843"),
("wagner", "Tannhäuser", "19 October 1845"),
("wagner", "Lohengrin", "28 August 1850"),
("wagner", "The Rhinegold", "22 September 1869"),
("wagner", "The Valkyrie", "26 June 1870"),
("wagner", "Siegfried", "16 August 1876"),
("wagner", "Twilight of the Gods", "17 August 1876"),
("wagner", "Tristan and Isolde", "10 June 1865"),
("wagner", "The Master-Singers of Nuremberg", "21 June 1868"),
("wagner", "Parsifal", "26 July 1882"),
("beethoven", "Fidelio", "20 November 1805"), #originally titled Leonore
("verdi", "Nabucco", "9 March 1842"),
("verdi", "Ernani", "9 March 1844"),
("verdi", "Macbeth", "14 March 1847"),
("verdi", "Il corsaro", "25 October 1848"),
("verdi", "Rigoletto", "11 March 1851"),
("verdi", "La traviata", "6 March 1853"),
("verdi", "Aroldo", "16 August 1857"),
("verdi", "Macbeth", "21 April 1865"),
("verdi", "Don Carlos", "11 March 1867"),
("verdi", "Aida", "24 December 1871"),
("verdi", "Otello", "5 February 1887"),
("verdi", "Falstaff", "9 February 1893"),
]
def _get_date(date_str):
return datetime.date(datetime.strptime(date_str, "%d %B %Y"))
def operas_both_at_premiere(guest, composer, operas=operas):
""" Returns a list of titles of operas,
where the guest and the composer could have been together at premiere.
Args:
guest (str): one of the composers but not the author of an opera
composer (str): the author of an opera
operas (list): list of operas
Returns a list of titles of operas.
"""
pass
|
991,526 | 18f230df6948b988250a3d41057cff4b7e4833d0 | # This creates a custom user that starts out as identical to the default
# user
#
# The sequence is:
#
# (note, we can run the Django server before this process is complete, we
# just mustn't create or run migrations)
#
# 1. Create the Django Project
# 2. Create a `users` app
# 3. Decide whether to subclass `AbstractUser` or `AbstractBaseUser`
# (this project uses the former, which is much, much easier)
# 4. Create initial custom user model:
# 4.1 update `settings.py`
# 4.2 create a replacement `User` model (see below)
# 4.3 create new user forms (`forms.py`)
# 4.4 customise Django admin (`admin.py`)
# 5. Run `makemigrations` for `users`
# 6. Run `migrate` for `users`
# 7. Create a superuser
# 8. Update `settings.py` for templates
# 9. Create the project-level templates (see the templates for details):
# 9.1 `base.html` (or `_layout.html`)
# 9.2 `registration/login.html`
# 9.3 `home.html`
# 9.4 `signup.html`
# 10. Create URL routing:
# 10.1 `<project_dir>/<project_dir>/urls.py`
# 10.2 `users/urls.py`
# 11. Create views:
# 11.1 `users/views.py`
from django.contrib.auth.models import AbstractUser
#from django.db import models
class KSUser(AbstractUser):
pass
|
991,527 | ba96af48f266c7b5810de32aa6346ec5837b6503 | # coding=UTF_8
#
# problem_056.py
# ProjectEuler
#
# This file was created by Jens Kwasniok on 15.08.16.
# Copyright (c) 2016 Jens Kwasniok. All rights reserved.
#
from problem_000 import *
class Problem_056(Problem):
def __init__(self):
self.problem_nr = 56
self.input_format = (InputType.NUMBER_INT, 1, 100)
self.default_input = 100
self.description_str = '''A googol (10^100) is a massive number: one followed by one-hundred zeros; 100100 is almost unimaginably large: one followed by two-hundred zeros. Despite their size, the sum of the digits in each number is only 1.
Considering natural numbers of the form, a^b, where a, b < ''' + dye_input_var(100) + ", what is the maximum digital sum?\n"
def calculate(self, N):
max_dsum = -1 # lowest result possible (a = b = 0)
max_Details = None
a = 0
while a < N:
b = 0
while b < N:
n = a ** b
nStr = str(n)
dsum = 0
for d in nStr:
dsum += int(d)
if dsum > max_dsum:
max_dsum = dsum
max_Details = [a, b]
b += 1
a += 1
self.last_result = max_dsum
self.last_result_details = max_Details
def details(self):
a = self.last_result_details[0]
b = self.last_result_details[1]
return "a = " + str(a) + ", b = " + str(b) + ", a^b = " + str(a**b)
register_problem(Problem_056())
|
991,528 | 8c4ec96992e534f5618657ffb228fa0998efd9b1 |
from bob import make_bob, otp_encrypt
import random
import bob
FILTERS="x+"
DIAGONAL="↗↖"
RECTILINEAR="↑→"
ALL=DIAGONAL+RECTILINEAR
def getKey(photons, disposalInstructions, messageLen):
# this function takes in a list of photons and a list of boolean values for bobs correct filters
# returns a key of those correct values, trunicating to be 5*lenMessage
key = []
for i in range(len(photons)):
#only add a photon to the key if we instructed bob to keep it
if disposalInstructions[i]:
key.append(photons[i])
#only produce a key if it is long enough
if len(key) >= messageLen*5:
key = key[:messageLen*5]
else:
return None
return key
def validateFilters(photons, filters):
# This function takes in the photons sent to bob and the filters returned from bob
# Returns a list of boolean indicating if the correct filter was used
isValid = []
correctPhotons = 0
rectFilter = FILTERS[1]
diagFilter = FILTERS[0]
#
for i in range(len(photons)):
photon = photons[i]
filt = filters[i]
if filt != None:
if ((photon in DIAGONAL) and (filt == diagFilter)) or ((photon in RECTILINEAR) and (filt == rectFilter)):
isValid.append(True)
correctPhotons += 1
else:
isValid.append(False)
# This deals with bob not recieving a photon
else:
isValid.append(False)
return isValid, correctPhotons
def generatePhotons(messageLen):
# This function takes in a message length and multiplies it by a random
# key length value between 12 and 13
randomKeyLen = int(messageLen * (random.randint(10, 12) + random.random()))
photons = bob.choices(ALL, randomKeyLen)
return photons
def problem1(bob, message):
"""
An example of quantum key exchange and OTP message sending
All photons arrive at bob and there is no eavesdropping on the line
"""
"""
1. Generate a sufficiently large random key; the key must be at least 5
times the length of the message and on average half of bobs guess
filters will be wrong
2. Get the filters bob used by using bob.quantum_channel(data)
3. Create the list of correct filters sent and figure out which filters
Bob used correctly
4. Tell Bob which filters he guessed incorrectly and should remove
5. Create the key and to make sure it's >= 5*len(message) and shorten the
key to 5*len(message) if it is currently longer
6. Call otp_encrypt(key, message) to encrypt the message and then use
bob.message(ciphertext) to send Bob the coded message
"""
# raise NotImplemented("TODO")
#1. Generates a key of length: messageLen * a value between 15 and 31.0
messageLen = len(message)
allPhotons = []
allFilters = []
correctPhotons = 0
while correctPhotons < messageLen*5:
photons = generatePhotons(messageLen)
#2.
bobFilters = bob.quantum_channel(photons)
allFilters += bobFilters
allPhotons += photons
#3.
disposalInstructions, correctPhotons = validateFilters(allPhotons, allFilters)
if correctPhotons >= messageLen*5:
#4.
bob.dispose(disposalInstructions)
#5.
key = getKey(allPhotons, disposalInstructions, messageLen)
#6.
ciphertext = otp_encrypt(key, message)
bob.message(ciphertext)
def problem2(bob, message):
"""
If Bob selects the incorrect filter, there is a 10% chance that the photon will be lost
The length of the list of filters returned by Bob is the number of photons that reached bob successfully
"""
# This solution is identical to problem 1 as incorrect filters and Nones are both handled in validateFilters
messageLen = len(message)
allPhotons = []
allFilters = []
correctPhotons = 0
while correctPhotons < messageLen*5:
photons = generatePhotons(messageLen)
bobFilters = bob.quantum_channel(photons)
allPhotons += photons
allFilters += bobFilters
disposalInstructions, correctPhotons = validateFilters(allPhotons, allFilters)
if correctPhotons >= messageLen*5:
bob.dispose(disposalInstructions)
key = getKey(allPhotons, disposalInstructions, messageLen)
#6.
ciphertext = otp_encrypt(key, message)
bob.message(ciphertext)
def problem3(bob, message):
"""
Eve may be evesdropping and alter the polarity of photons, but no photons are lost
"""
# raise NotImplemented("TODO")
messageLen = len(message)
allPhotons = []
allFilters = []
correctPhotons = 0
while correctPhotons < messageLen*5:
#Generate a random string of photons
photons = generatePhotons(messageLen)
#Generate a list len = len(photons) where bob should show what photon he recieved (I decided on every third photon)
tellList = generateTell(photons)
#transmit the photon list and tellList to bob
bobFilters = bob.quantum_channel(photons, tellList)
allPhotons += photons
allFilters += bobFilters
# Here we check for eve evesdropping
allPhotons, allFilters = checkForEve(bob, allPhotons, allFilters)
# If we get back Nones, eve is evesdropping and we stop the function as she has been reported
if allPhotons == None and allFilters == None:
return
# If eve is not evesdropping we can get disposal instructions and check if our key is long enough
disposalInstructions, correctPhotons = validateFilters(allPhotons, allFilters)
# If the key is long enough, tell bob what photons to drop and calculate the key
if correctPhotons >= messageLen*5:
bob.dispose(disposalInstructions)
key = getKey(allPhotons, disposalInstructions, messageLen)
# only send bob the cipher text if eve is not evesdropping
ciphertext = otp_encrypt(key, message)
bob.message(ciphertext)
def checkForEve(bob, allPhotons, allFilters):
# This function takes in the bob object, and the allPhotons list and allFilters list
# returns fixedPhotons and fixedFilters list only if eve is not evesdropping
# this is where each filter is stored, for ease of coding
rectFilter = FILTERS[1]
diagFilter = FILTERS[0]
fixedPhotons = []
fixedFilters = []
for i in range(len(allFilters)):
filt = allFilters[i]
photon = allPhotons[i]
# if the photon was not dropped
if filt != None:
# we check if the sent photon and the recieved photon are in the same filter group
if (photon in DIAGONAL) and (filt in DIAGONAL):
# if they are but are the opposite orientation in a filter group, we know eve is evesdropping
if DIAGONAL.index(photon) != DIAGONAL.index(filt):
# report eve and return Nones
bob.report_eve()
return None, None
#same as above except for the opposite filter group
elif ((photon in RECTILINEAR) and (filt in RECTILINEAR)):
if RECTILINEAR.index(photon) != RECTILINEAR.index(filt):
bob.report_eve()
return None, None
elif filt not in DIAGONAL and filt not in RECTILINEAR:
# If a filter was recieved, bob did not tell us the photon and we can add it to the output lists
fixedPhotons.append(photon)
fixedFilters.append(filt)
elif filt == None:
# cases where tellList == True and filt == None have already been dealt with so we can include Nones here
fixedPhotons.append(photon)
fixedFilters.append(filt)
return fixedPhotons, fixedFilters
def generateTell(photons):
#Gets every third photon to ask bob.quantum_channel() to report
tell = []
for i in range(len(photons)):
if i%3 == 0:
tell.append(True)
else:
tell.append(False)
return tell
def cleanFilters(bobFilters, photons, tellList):
# This function cleans the photon and bobFilter lists
# returns both lists with the index's removed
# where bob recieved no Photon but where we told bob to show what he recieved
bobFilters = list(bobFilters)
count = 0
for i in range(len(bobFilters)):
if tellList[i]:
if bobFilters[i] == None:
bobFilters[i] = ' '
photons[i] = ' '
count += 1
for i in range(count):
bobFilters.remove(' ')
photons.remove(' ')
return bobFilters, photons
def problem4(bob, message):
"""
Eve may be evesdropping and alter the polarity of photons
If Eve uses the wrong filter, there is a 10% that the packet will be lost
If Bob uses the wrong filter, there is a 10% chance that the photon will be lost
The length of the list of filters returned by Bob is the number of photons that reached bob successfully
"""
# raise NotImplemented("TODO")
messageLen = len(message)
allPhotons = []
allFilters = []
correctPhotons = 0
while correctPhotons < messageLen*5:
photons = generatePhotons(messageLen)
tellList = generateTell(photons)
bobFilters = bob.quantum_channel(photons, tellList)
# This is the only difference from problem 3, which deals with Nones in the filter list where
# The tellList had a True
bobFilters, photons = cleanFilters(bobFilters, photons, tellList)
allPhotons += photons
allFilters += bobFilters
allPhotons, allFilters = checkForEve(bob, allPhotons, allFilters)
if allPhotons == None and allFilters == None:
return
disposalInstructions, correctPhotons = validateFilters(allPhotons, allFilters)
if correctPhotons >= messageLen*5:
bob.dispose(disposalInstructions)
key = getKey(allPhotons, disposalInstructions, messageLen)
ciphertext = otp_encrypt(key, message)
bob.message(ciphertext)
def test():
print("\nTesting Problem 1 :")
problem1(make_bob(problemNumber=1), "HELLO BOB HOW ARE YOU DOING TODAY")
print('\tSecond Test:')
problem1(make_bob(problemNumber=1), "HELLO BOB")
print('\nTesting problem 2 :')
problem2(make_bob(problemNumber=2), "HELLO BOB HOW ARE YOU DOING TODAY")
print("\nTesting Problem 3:")
print("\tTest Using P3 Bob:")
problem3(make_bob(problemNumber=3), "HELLO BOB HOW ARE YOU DOING TODAY")
print('\tSecond Test:')
problem3(make_bob(problemNumber=3), "HELLO BOB")
print("\tTest Using P1 Bob:")
problem3(make_bob(problemNumber=1), "HELLO BOB")
print('\nTesting Problem 4:')
print('\tTesting Using P4 Bob:')
problem4(make_bob(problemNumber=4), "HELLO BOB HOW ARE YOU DOING TODAY")
print('\tTesting Using P2 Bob:')
problem4(make_bob(problemNumber=2), "HELLO BOB HOW ARE YOU DOING TODAY")
print('\tTesting Using P1 Bob:')
problem4(make_bob(problemNumber=1), "HELLO BOB HOW ARE YOU DOING TODAY")
print('\tTesting using P3 Bob:')
problem4(make_bob(problemNumber=3), 'HELLO BOB HOW ARE YOU DOING TODAY')
return
if __name__ == "__main__":
test()
|
991,529 | cdc9d81c2a7f21bc9a839f96f2c22eb63258ad39 | """
main.py
"""
from parser import get_from_url
def main():
views, likes, dislikes = get_from_url("https://www.youtube.com/watch?v=yUtB4Zg_ioc")
print(views, likes, dislikes)
if __name__ == "__main__":
main() |
991,530 | cdf70dc787daeb69f5cc36d59c72ca8a5d9860ba | number = int(input("Which number do you want\n"),)
if(number % 2 == 0):
print("This is Even Number")
else:
print("This is Odd Number")
|
991,531 | 286e5eb4ba64a63efad6b73da57f9ef8d923c482 | def insertionSort(arr):
for i in range(1, len(arr)):
temp = arr[i]
while arr[i-1] > temp and i > 0:
arr[i], arr[i-1] = arr[i-1], arr[i]
i = i-1
return arr
print(insertionSort([6, 8, 1, 4, 5, 3, 7, 2]))
|
991,532 | 45e887f3b219be8209d25a2f8991f6745a8b8f63 | with open("input.txt", "r") as f:
test_input = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
data = [int(n) for n in f.read().split(" ")]
class Node(object):
def __init__(self, no_subnodes, no_metadata):
# number of nodes and metadata
# the nodes and metadata will be added externally
self.no_subnodes = no_subnodes
self.no_metadata = no_metadata
self.subnodes = []
self.metadata = []
def total_metadata(self):
# finding the sum of the metadata
return sum(self.metadata) + sum([subnode.total_metadata() for subnode in self.subnodes])
def value(self):
# algorithm for the value of a node as described in the exercise
if self.no_subnodes == 0:
return sum(self.metadata)
val = 0
for i in self.metadata:
if i <= self.no_subnodes:
val += self.subnodes[i - 1].value()
return val
def __str__(self):
# representation for checking if the tree is correct
res = str(self.metadata) + "\n"
for subnode in self.subnodes:
for line in str(subnode).split("\n"):
res += " " + line + "\n"
# removing the last newline character
return res[:-1]
def __repr__(self):
return self.__str__()
def build_tree(dat):
# the root node that will be returned
root = Node(dat[0], dat[1])
dat = dat[2:]
for i in xrange(root.no_subnodes):
# add subnodes and make data shorter
subnode, dat = build_tree(dat)
root.subnodes.append(subnode)
for j in xrange(root.no_metadata):
# add metadata
root.metadata.append(dat[j])
# return the root node and the leftover data
return root, dat[root.no_metadata:]
tree, data_left = build_tree(data)
"""If there is data left, something went wrong"""
assert not data_left
# report
print str(tree)
print tree.total_metadata()
print tree.value()
|
991,533 | 393a5586ddd5b5172d8a946a38e62f230507c477 | #include <stdio.h>
#include <stdlib.h>
int largest,element,a[1300010];
int cmp(const void *a,const void *b)
{
return *(int *)a - *(int *)b;
}
int median(int a[],int left,int right)//求中位数
{
return a[(left + right)/2];
}
void sqlit(int a[],int middle,int left,int right,int *ll,int *rr)//计算中位数个数最左端和最右端
{
int i,j;
i = j = (left + right) / 2;
while(a[--i] == middle);//寻找左端与中位数第一个相等的位置
*ll = i + 1;//上面的i已经超出中位数,即后退一步
while(a[++j] == middle);//寻找右端与中位数最后一个相等的位置
*rr = j - 1;//上面的j已经超出中位数,即后退一步
}
void mode(int left,int right)
{
int ll,rr;
int middle = median(a,left,right);//求中位数
sqlit(a,middle,left,right,&ll,&rr);//分割数组
if(largest < rr - ll + 1)//寻找最大重数
{
largest = rr - ll + 1;
element = middle;
}
if(ll - left > largest)//左边的个数大于中位数的个数从左边再寻找
mode(left,ll - 1);
if(right - rr > largest)//右边的个数大于中位数的个数从右边再寻找
mode(rr + 1,right);
}
int main()
{
int n,m,i;
/*
scanf("%d",&n);
while(n--)
{ */
scanf("%d",&m);
for(i=0;i<m;i++)
scanf("%d",&a[i]);
qsort(a,m,sizeof(a[0]),cmp);
mode(0,m-1);
printf("%d\n%d\n",element,largest);
//}
return 0;
}
/***************************************************
User name: zxw140226杨尚澄
Result: Accepted
Take time: 396ms
Take Memory: 5672KB
Submit time: 2018-04-03 19:37:25
****************************************************/ |
991,534 | 3e2c988f2061e01c2554f0b74e540a36afe1d838 | # Kekan Nikhilkumar
# 1001-563-734
# 2018-09-09
# Assignment-01-02
import numpy as np
# This module calculates the activation function
def calculate_activation_function(weight,bias,input_array,type='Sigmoid'):
net_value = weight * input_array + bias
if type == 'Sigmoid':
activation = 1.0 / (1 + np.exp(-net_value))
elif type == "Linear":
activation = net_value
elif type == "Hyperbolic Tangent":
activation = np.tanh(net_value)
elif type == "Positive Linear":
activation = np.maximum(0,net_value)
return activation |
991,535 | e606c3567512bb3f25fd1daf87736e3c3d6f8d3e | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
profile=models.ImageField()
class Post(models.Model):
user=models.ForeignKey(User, on_delete=models.CASCADE)
image=models.ImageField()
caption=models.TextField()
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now=True)
@property
def like_count(self):
return self.like_set.count()
class Meta:
ordering=['-updated']
def __str__(self):
return '%s-%s' % (self.id,self.user)
class Comment(models.Model):
post=models.ForeignKey(Post,on_delete=models.CASCADE)
user=models.ForeignKey(User,on_delete=models.CASCADE)
content=models.TextField()
class Like(models.Model):
user=models.ForeignKey(User, on_delete=models.CASCADE)
post=models.ForeignKey(Post, on_delete=models.CASCADE)
|
991,536 | 979e54665fa79fc45e6ff9ef371470f4eceacb73 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import wget
import os
from tensorflow.examples.tutorials.mnist import input_data
def view_image(obs):
square_obs = np.reshape(obs,(28,28))
plt.imshow(square_obs, cmap="gray")
plt.show()
def plot_data(df, y_lst, filename):
for y_var in y_lst:
x_name = df.index.name
df.reset_index().plot(x=x_name, y=y_var, legend=False)
plt.ylabel(y_var.split()[-1])
f = '_{}'.format(y_var.split()[0]).join(filename.rsplit('.', 1))
plt.savefig(f, bbox_inches="tight")
plt.title(y_var)
plt.tight_layout()
def g_learning_curve(func, X_train, X_test, y_train, y_test, binary=True, iterations=20):
d = []
for s_size in range(5000, 55000+1, 5000):
w, mistakes = func(X_train, y_train, obs=s_size, iterations=iterations)
score = calc_acc_score(X_test, y_test, w, binary)
d.append([s_size, score])
df = pd.DataFrame(d).rename(columns={0:'observation size', 1:'accuracy'}).set_index('observation size')
return df
def classify(X, w, binary, classes=None, b=0):
if binary == True:
preds = np.sign(X@w)\
.reshape(X.shape[0],)
else:
preds = []
for i in range(X.shape[0]):
x = X[i]
idx = np.argmax([np.dot(w.T, make_F(x, k, classes)) for k in classes])
yhat = classes[idx]
preds.append(yhat)
return preds
def calc_acc_score(X, y, w, binary):
yhat = classify(X, w, binary, classes=np.unique(y))
return np.mean(yhat == y)
def acc_scores(iterations, func, X_train, X_test, y_train, y_test, binary=True):
train_scores = []
test_scores = []
w = None
for i in range(1, iterations + 1):
w, _ = func(X_train, y_train, w=w)
train_scores.append(calc_acc_score(X_train, y_train, w, binary))
test_scores.append(calc_acc_score(X_test, y_test, w, binary))
all_scores = pd.DataFrame([train_scores, test_scores]).T
all_scores.columns = ['training scores', 'test scores']
all_scores.index.name = 'iterations'
all_scores.index += 1
return all_scores
def make_F(x, k, classes):
# k is the desired class
F = []
for c in classes:
if k == c:
F = np.concatenate([F, x], axis=0)
else:
F = np.concatenate([F, np.zeros(len(x))], axis=0)
return F.reshape(-1, 1)
def plot_curves(df, x, y_lst):
for y_var in y_lst:
df.plot(x=x, y=y_var)
plt.ylabel('accuracy')
plt.title(y_var)
plt.show()
plt.savefig('/figures/' + y_var.replace(' ','_') + '_accuracy.png')
def load_data():
filenames = ['t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz']
link = r'https://github.com/zalandoresearch/fashion-mnist/raw/master/data/fashion/'
for filename in filenames:
if filename in os.listdir('.'):
continue
print(f'Downloading {filename} from github.')
f = os.path.join(link, filename)
wget.download(f, '.')
data = input_data.read_data_sets('.')
# training labels
labels_raw = data.train.labels
labels = [-1 if x%2 == 0 else 1 for x in labels_raw]
labels = np.array(labels)
# test labels
test_labels_raw = data.test.labels
test_labels = [-1 if x%2 == 0 else 1 for x in test_labels_raw]
test_labels = np.array(test_labels)
# training features
images = data.train.images
# test feaures
test_images = data.test.images
return images, labels, labels_raw, test_images, test_labels, test_labels_raw
|
991,537 | d7621ef37a6ef4e8bf22bf19dfea438355ff3e12 | # -*- coding: utf-8 -*-
from odoo import fields, models, api
class Partner(models.Model):
_inherit = "res.partner"
payment_days_ids = fields.Many2many('payment.days', 'res_partner_paymnet_days_rel', 'res_partner_id', 'payment_days_id', string="Payment Days")
collection_executive_id = fields.Many2one("res.users", string="Collection executive") |
991,538 | ee43196b2a2ac85f7dd5d121d1f65f049af19b7d |
from maineed.event import *
print(running)
print(ps_count)
print(psi)
show_init = True
end_init = False
fail_init = False
timer = False
timer2 = 0
play_sound = True
byby = False
gogo = False
gogoc = 0
gogob = 1
while running:
if show_init:
background_sound.play()
byby = drow_init()
show_init = False
background_sound.fadeout(10000)
if end_init:
byby = drow_end_init(score)
background_sound2.fadeout(3000)
if fail_init:
byby = drow_fail_init(score)
background_sound2.fadeout(3000)
clock.tick(FPS) # 遊戲FPS
# 取得輸入
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if byby:
running = False
# 更新遊戲
MAIN_SHOW.update()
player.shoot()
boss.move()
# 石頭子彈撞擊
hits = pygame.sprite.groupcollide(rocks, bullets, True, True, pygame.sprite.collide_circle)
for hit in hits:
# 音效
random.choice(expl_sounds).play()
# 特效
expl = Explosion(hit.rect.center, 'md')
MAIN_SHOW.add(expl)
# 得分
score += 100
# 重生
reborn_rocks()
# 石頭玩家撞擊
hits = pygame.sprite.groupcollide(rocks, players, True, False, pygame.sprite.collide_circle)
for hit in hits:
# 音效
random.choice(damage_sounds).play()
# 特效
expl = Explosion(hit.rect.center, 'sm')
MAIN_SHOW.add(expl)
# 重生
reborn_rocks()
# 傷害
player.helf -= 5
# 附加事件
if player.helf <= 0:
background_sound2.fadeout(1000)
fail_init = True
# 子彈玩家撞擊
hits = pygame.sprite.groupcollide(enemybullets, players, True, False, pygame.sprite.collide_circle)
for hit in hits:
# 音效
random.choice(damage_sounds).play()
# 特效
expl = Explosion(hit.rect.center, 'sm')
MAIN_SHOW.add(expl)
# 傷害
player.helf -= 5
# 附加事件
if player.helf <= 0:
background_sound2.fadeout(1000)
fail_init = True
hits = pygame.sprite.groupcollide(enemybullets2, players, True, False, pygame.sprite.collide_circle)
for hit in hits:
# 音效
random.choice(damage_sounds).play()
# 特效
expl = Explosion(hit.rect.center, 'sm')
MAIN_SHOW.add(expl)
# 傷害
player.helf -= 10
# 附加事件
if player.helf <= 0:
fail_init = True
# 海盜船子彈撞擊
hits = collision_event(pirate_ships, bullets, False, True)
for hit in hits:
# 音效
random.choice(des_sounds).play()
# 特效
pirate_ship.image = explship_pngs[ps_count]
expl = Explosion(hit.rect.center, 'sm')
MAIN_SHOW.add(expl)
# 參數
ps_count += 1
pirate_ship.helf -= 30
if pirate_ship.helf <= 0:
expl = Explosion(hit.rect.center, 'slg')
MAIN_SHOW.add(expl)
destruction_song.play()
score += 1000
pirate_ship.helf = 100
pirate_ship.rect.x = random.randrange(200, 800)
pirate_ship.rect.y = random.randrange(-1000, -200)
ps_count = 0
pirate_ship.image = explship_pngs[ps_count]
x = random.randrange(0, 2)
if x == 0:
poww = Power1(hit.rect.center)
MAIN_SHOW.add(poww)
powers1.add(poww)
if x == 1:
poww = Power2(hit.rect.center)
MAIN_SHOW.add(poww)
powers2.add(poww)
# 魔王子彈撞擊
hits = pygame.sprite.groupcollide(bosss, bullets, False, True, pygame.sprite.collide_circle)
for hit in hits:
# 音效
des_sound_b.play()
# 特效
expl = Explosion2(hit.rect.x, hit.rect.y, 'sm')
MAIN_SHOW.add(expl)
# 參數
boss.helf -= 1
if boss.helf == 270:
boss.psi = 5
des_sound_c.play()
expl = Explosion(hit.rect.center, 'lg')
MAIN_SHOW.add(expl)
destruction_song.play()
boss.image = boss_img1
boss.image.set_colorkey(WHITE)
if boss.helf == 170:
boss.psi = 3
des_sound_c.play()
expl = Explosion(hit.rect.center, 'lg')
MAIN_SHOW.add(expl)
destruction_song.play()
boss.image = boss_img2
boss.image.set_colorkey(WHITE)
if boss.helf == 80:
boss.psi = 1
des_sound_c.play()
expl = Explosion(hit.rect.center, 'lg')
MAIN_SHOW.add(expl)
destruction_song.play()
boss.image = boss_img3
boss.image.set_colorkey(WHITE)
if boss.helf <= 0:
boss.psi = 10000
gogo = True
if gogo is True:
gogob += 1
gogoc += 1
if gogob % 60 == 0:
des_sound_c.play()
expl = Explosion3(boss.rect.x - 50, boss.rect.y - 50, 'lg')
MAIN_SHOW.add(expl)
destruction_song.play()
if gogoc >= 600:
expl = Explosion(boss.rect.center, 'blg')
MAIN_SHOW.add(expl)
if gogoc >= 800:
score += 10000
end_init = True
# 吃寶物1
hits = collision_event(powers1, players, True, False)
for hit in hits:
s1_sound.play()
player.helf += 50
if player.helf > 100:
player.helf = 100
# 吃寶物2
hits = collision_event(powers2, players, True, False)
for hit in hits:
s2_sound.play()
player.shoot_fast = True
player.psi = 10
if timer == 800:
player.psi = 20
timer = 0
player.shoot_fast = False
# 計時
if player.shoot_fast is True:
timer += 1
if timer2 < 700:
timer2 += 1
if timer2 > 600 and play_sound is True:
background_sound2.play()
play_sound = False
# 畫面顯示
SCREEN.fill(BLACK)
MAIN_SHOW.draw(SCREEN)
draw_text(SCREEN, f"Score: {score}", 26, 75, 900)
draw_health(SCREEN, player.helf, 5, 950)
draw_health(SCREEN, boss.helf, 175, 0)
pygame.display.update()
# 遊戲結束
pygame.quit()
|
991,539 | 3fe2041ea58d3d09891a877711857f81e730f510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/11 7:23 下午
# @Author : liujiatian
# @File : 3.组合总和.py
# 给定一个无重复元素的数组 candidates 和一个目标数 target ,找出 candidates 中所有可以使数字和为 target 的组合。
#
# candidates 中的数字可以无限制重复被选取。
#
# 说明:
#
#
# 所有数字(包括 target)都是正整数。
# 解集不能包含重复的组合。
#
#
# 示例 1:
#
# 输入:candidates = [2,3,6,7], target = 7,
# 所求解集为:
# [
# [7],
# [2,2,3]
# ]
#
#
# 示例 2:
#
# 输入:candidates = [2,3,5], target = 8,
# 所求解集为:
# [
# [2,2,2,2],
# [2,3,3],
# [3,5]
# ]
#
#
#
# 提示:
#
#
# 1 <= candidates.length <= 30
# 1 <= candidates[i] <= 200
# candidate 中的每个元素都是独一无二的。
# 1 <= target <= 500
#
# Related Topics 数组 回溯算法
# 👍 802 👎 0
class Solution:
def combinationSum(self, candidates, target):
def backtrace(number_list, target):
if sum(number_list) == target and sorted(number_list) not in result:
result.append(sorted(number_list))
return
for number in candidates:
cur_sum = sum(number_list)
if cur_sum + number <= target:
backtrace(number_list + [number], target)
if not candidates:
return []
result = []
backtrace([], target)
return result
if __name__ == '__main__':
candidates = [2, 3, 6, 7]
target = 7
print(Solution().combinationSum(candidates, target))
|
991,540 | b4ebd2b371e17f1a6301fe2dc7e2bbfa7b73e66d | #!/usr/bin/env python
import os
import numpy as np
import csv
from rulo_utils.csvcreater import csvcreater
def numpywriter(filepath , array=0):
file = open(filepath, 'ab')
np.savetxt(file,array )
file.close()
|
991,541 | 39b4d103460290421cf07a869ff458e138e0f1cc | from django.shortcuts import render
from django.core.paginator import Paginator
from .models import Post, Category
# Create your views here.
def blog(request):
posts = Post.objects.all()
paginator = Paginator(posts, 4)
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request, "blog_m/index.html", {'posts': posts})
|
991,542 | c680154999043b1ed833edbf855643fe92278969 | #!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2016 Massimiliano Patacchiola
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#In this example I used the backprojection algorithm with multimple templates
# in order to track my hand in a webcam streaming. The center of the hand is
#taken as reference point for controlling some keys on the keyboard and play
#a videogame. To obtain the templates of your hands you can simply take some
#screenshot of the open hand using some screen capture utilities (like Shutter)
#You can load as many templates as you like, just remember to load the images
#and append them in the template_list. To simulate the keyboard I am using the
#libraray evdev that requires admin right in order to write in your keyboard.
#To run the example just type: sudo python ex_multi_backprojection_hand_tracking_gaming.py
#BUTTONS:
# 'a' = Press 'a' to start the capture of the hand position and the
# keyboard simulation
# 'q' = Press 'q' to exit (you have to select the CV windows with the mouse)
import cv2
import numpy as np
from evdev import UInput, ecodes as e
from deepgaze.color_detection import MultiBackProjectionColorDetector
from deepgaze.mask_analysis import BinaryMaskAnalyser
#Declare the simulated keyboard object
ui = UInput()
#Enable or disavle the keyboard simulation (enabled when press 'a')
ENABLE_CAPTURE = False
#Declare a list and load the templates. If you are using more templates
#then you have to load them here.
template_list=list()
template_list.append(cv2.imread('template_1.png')) #Load the image
template_list.append(cv2.imread('template_2.png')) #Load the image
template_list.append(cv2.imread('template_3.png')) #Load the image
template_list.append(cv2.imread('template_4.png')) #Load the image
template_list.append(cv2.imread('template_5.png')) #Load the image
template_list.append(cv2.imread('template_6.png')) #Load the image
#Open a webcam streaming
video_capture=cv2.VideoCapture(0) #Open the webcam
#Reduce the size of the frame to 320x240
video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
#Get the webcam resolution
cam_w = int(video_capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
cam_h = int(video_capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
#Declare an offset that is used to define the distance
#from the webcam center of the two red lines
offset = int(cam_h / 7)
#Declaring the binary mask analyser object
my_mask_analyser = BinaryMaskAnalyser()
#Defining the deepgaze color detector object
my_back_detector = MultiBackProjectionColorDetector()
my_back_detector.setTemplateList(template_list) #Set the template
print("Welcome! Press 'a' to start the hand tracking. Press 'q' to exit...")
while(True):
# Capture frame-by-frame
ret, frame = video_capture.read()
if(frame is None): break #check for empty frames
#Return the binary mask from the backprojection algorithm
frame_mask = my_back_detector.returnMask(frame, morph_opening=True, blur=True, kernel_size=5, iterations=2)
if(my_mask_analyser.returnNumberOfContours(frame_mask) > 0 and ENABLE_CAPTURE==True):
x_center, y_center = my_mask_analyser.returnMaxAreaCenter(frame_mask)
x_rect, y_rect, w_rect, h_rect = my_mask_analyser.returnMaxAreaRectangle(frame_mask)
area = w_rect * h_rect
cv2.circle(frame, (x_center, y_center), 3, [0,255,0], 5)
#Check the position of the target and press the keys
#KEY_UP, KEY_DOWN, KEY_RIGHT, KEY_LEFT, KEY_SPACE
#KEY_W, KEY_S, KEY_D, KEY_A
#DOWN
if(y_center > int(cam_h/2)+offset and area>10000):
ui.write(e.EV_KEY, e.KEY_DOWN, 1)
print("KEY_DOWN")
#UP
elif(y_center < int(cam_h/2)-offset and area>10000):
ui.write(e.EV_KEY, e.KEY_UP, 1)
print("KEY_UP")
else:
print("WAITING")
ui.write(e.EV_KEY, e.KEY_DOWN, 0) #release the buttons
ui.write(e.EV_KEY, e.KEY_UP, 0)
ui.syn()
#Drawing the offsets
cv2.line(frame, (0, int(cam_h/2)-offset), (cam_w, int(cam_h/2)-offset), [0,0,255], 2) #horizontal
cv2.line(frame, (0, int(cam_h/2)+offset), (cam_w, int(cam_h/2)+offset), [0,0,255], 2)
#Showing the frame and waiting for the exit command
cv2.imshow('mpatacchiola - deepgaze', frame) #show on window
cv2.imshow('Mask', frame_mask) #show on window
if cv2.waitKey(1) & 0xFF == ord('q'): break #Exit when Q is pressed
if cv2.waitKey(33) == ord('a'):
if(ENABLE_CAPTURE==True):
print("Disabling capture...")
ENABLE_CAPTURE=False
else:
print("Enabling capture...")
ENABLE_CAPTURE=True
#Close the keyboard ojbect
ui.close()
#Release the camera
video_capture.release()
print("Bye...")
|
991,543 | d884d4c194da90145a475e095b8d4054ae303d00 | import math
d = float(input('Please set d:'))
n = math.floor(10.0/abs(d)) + 1
print('A({0},R_0) = {1}'.format(d,n*n))
|
991,544 | 8d9691167bdf8ab87ade14a3d0dbcbbc43233624 | # 使用{}创建字典
d = {}
d = {"name": "youhuan"}
print(d)
# dict创建一个序列
a = dict(name="youhuan")
print(a)
# len获取字典键值对的个数
# in,not in 检查字典中是否包含指定的键
# 获取字典中的值
print(d.get("name2", "moren"))
print(d["name"])
#修改字典中的值
d["name"] = "miaomiao"
print(d)
#setdefault()向字典中添加值,如果不存在则添加,如果存在就不做任何操作
#update()用于合并两个字典,如果有重复的key,新的会替换旧的
#popitem()弹出最后一个键值对,返回的是一个元组
#pop()根据key弹出字典中的值
#遍历字典
keys = d.keys()
for i in keys:
print(d.get(i))
items = d.items()
for k, v in items:
print(k, " => ", v) |
991,545 | 292df7a38a99746cafc4f1e9170908bcfccc2cf9 | import dash
app = dash.Dash(__name__, title="Rubicon")
|
991,546 | bf7b79d7b2d881069a9ded8107b765ec09643d9a | maxRep = 0
maxNumber = 0
for num in range(2,1000+1):
# Numbers that are mod(5) or mod(2) will never repeat
if num % 2 == 0 or num % 5 == 0:
continue
# Emulate long divison
else:
mods = []
reps = 0 # Length
div = 1 # Divisor
while div < num:
div *= 10
# Do one cycle loop
mods.append(div % num)
reps += 1
div = div % num
# Repeat
while div is not 1:
div *= 10
reps += 1
mods.append(div % num)
div = div % num
if reps > maxRep:
maxRep = reps
maxNumber = num
print maxRep
print maxNumber
|
991,547 | bd2fd8148fd2e3cb3392761edd7f29856862f67d | import torch
import numpy as np
from scipy.misc import imread, imresize
from torchvision.models import resnet101
import torch
import torch.nn.functional as F
import torch.nn as nn
def load_resnet_image_encoder(model_stage=2):
""" Load the appropriate parts of ResNet-101 for feature extraction.
Parameters
----------
model_stage : Integral
The stage of ResNet-101 from which to extract features.
For 28x28 feature maps, this should be 2. For 14x14 feature maps, 3.
Returns
-------
torch.nn.Sequential
The feature extractor (ResNet-101 at `model_stage`)
Notes
-----
This function will download ResNet-101 if it is not already present through torchvision.
"""
print('Load pretrained ResNet 101.')
model = resnet101(pretrained=True)
layers = [model.conv1, model.bn1, model.relu, model.maxpool]
layers += [getattr(model, 'layer{}'.format(i+1)) for i in range(model_stage)]
model = torch.nn.Sequential(*layers)
if torch.cuda.is_available():
model.cuda()
for p in model.parameters():
p.requires_grad = False
return model.eval()
# def extract_image_feats(img, model, gpus):
# img_hres = F.interpolate(img, size = (224, 224), mode = 'bicubic', align_corners=False)
# imin = img_hres.min()
# imax = img_hres.max()
# img_hres = (img_hres - imin) / (imax - imin)
# mean = torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).cuda()
# std = torch.FloatTensor([0.229, 0.224, 0.224]).view(1, 3, 1, 1).cuda()
# img_hres = (img_hres - mean) / std
# return nn.parallel.data_parallel(model, (img_hres), gpus)
def extract_image_feats(img, model, gpus):
img_hres = F.interpolate(img, size = (224, 224), mode = 'bicubic', align_corners=False)
img_hres = (img_hres + 1.) / 2.
mean = torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).cuda()
std = torch.FloatTensor([0.229, 0.224, 0.224]).view(1, 3, 1, 1).cuda()
img_hres = (img_hres - mean) / std
return nn.parallel.data_parallel(model, (img_hres), gpus)
def extract_image_feats_(img_path, model):
""" Extract image features from the image at `img_path` using `model`.
Parameters
----------
img_path : Union[pathlib.Path, str]
The path to the image file.
model : torch.nn.Module
The feature extractor to use.
Returns
-------
Tuple[numpy.ndarray, torch.Tensor]
The image and image features extracted from `model`
"""
# read in the image and transform it to shape (1, 3, 224, 224)
path = str(img_path) # to handle pathlib
img = imread(path, mode='RGB')
img = imresize(img, (224, 224), interp='bicubic')
img = img.transpose(2, 0, 1)[None]
# use ImageNet statistics to transform the data
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
img_tensor = torch.FloatTensor((img / 255 - mean) / std)
# push to the GPU if possible
if torch.cuda.is_available():
img_tensor = img_tensor.cuda()
return (img.squeeze().transpose(1, 2, 0), model(img_tensor))
|
991,548 | 848f6ff3a6c40cc9b2006f7b718bfaa272827ca4 | #!/usr/bin/python3
# use YAML
# use YAML::Tiny
# use Data::Dumper
# use Hash::Merge::Simple qw/ merge /
import argparse
import os
import sys
import logging
import logging.config
import pprint
import yaml
class Config:
def __init__(self,
config_file='/usr/local/cam/conf/config.yml',
mode='prod'):
# start with a temporty config to get things started
with open('/usr/local/cam/conf/config.yml', 'r') as file:
config_root = yaml.safe_load(file)
config = config_root['prod']
with open(config['Logging']['LogConfig'], 'rt') as f:
lconfig = yaml.safe_load(f.read())
logging.config.dictConfig(lconfig)
# create logger
self.logger = logging.getLogger(__name__)
self.config_file = config_file
self.mode = mode
# make sure we hav a config file
if not self.config_file or not os.path.exists(self.config_file):
print("Config file {} does not exist".format(self.config_file))
sys.exit(1)
self.logger.debug("config file is {}".format(self.config_file))
with open(self.config_file, 'r') as file:
self.config_root = yaml.safe_load(file)
try:
self.config = self.config_root[self.mode]
except KeyError:
print("mode config wrong, should be something like \
'prod' or 'test', is {}".format(self.mode))
sys.exit(1)
self.logger.debug("config for {} loaded".format(self.mode))
try:
self.debug = self.config['Debug']['Level']
except (KeyError, TypeError):
self.debug = 0
try:
dumpconfig = self.config['Debug']['DumpConfig']
except (KeyError, TypeError):
dumpconfig = 99
# see if this config tree is templated upon another
# There is no further recursion of templates
try:
template = self.config['Config']['Template']
except KeyError:
pass
else:
try:
self.logger.debug("going to load template {}".format(template))
tmpl_config = self.config_root[template]
except KeyError:
print("template config wrong, should name of another config \
root, is {}".format(template))
sys.exit(1)
else:
out = self.merge(self.config, tmpl_config)
self.config = out
try:
self.debug = self.config['Debug']['Level']
except (KeyError, TypeError):
self.debug = 0
try:
dumpconfig = self.config['Debug']['DumpConfig']
except (KeyError, TypeError):
dumpconfig = 99
if self.debug >= dumpconfig:
self.logger.debug(self.config)
def merge(self, a, b, path=None):
"merges b into a"
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
self.merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
pass # keep the a value
else:
a[key] = b[key]
return a
def get_config(self):
return(self.config)
# never implemented, never used
# sub writeConfig {
# my (self, $old ) = @_
#
# # replace config
# self.in}->[0]->{prod} = $old
# if ( self.debug} >= self.config]['Debug']['DumpConfig'} ) {
# print("going to write:", Dumper(self.in})
# # write new config
# if ( ! self.in}->write( self.config_file} ) ) {
# die "errors writing ", self.config_file}, " : $!\n"
if __name__ == '__main__':
print("mode = test")
config_test = Config(mode='test')
config = config_test.get_config()
# pprint.pprint(config)
print("['Directories']['cam_images']['prod']:",
config['Directories']['cam_images']['prod'])
print("['Paths']['cam_images']:",
config['Paths']['cam_images'])
print("config['BucketShiz']['VideoNameTemplate']",
config['BucketShiz']['VideoNameTemplate'])
print("mode = prod")
config_prod = Config(mode='prod')
config = config_prod.get_config()
# pprint.pprint(config)
print("['Directories']['cam_images']['prod']:",
config['Directories']['cam_images']['prod'])
print("['Paths']['cam_images']:",
config['Paths']['cam_images'])
print("config['BucketShiz']['VideoNameTemplate']",
config['BucketShiz']['VideoNameTemplate'])
print("mode = shiz")
config_shiz = Config(mode='shiz')
config = config_shiz.get_config()
print("['Directories']['cam_images']['prod']:",
config['Directories']['cam_images']['prod'])
print("['Paths']['cam_images']:",
config['Paths']['cam_images'])
print("config['BucketShiz']['VideoNameTemplate']",
config['BucketShiz']['VideoNameTemplate'])
|
991,549 | 83acd7bf8c3a9ba7677e9955c32d1d50dbe2bfe5 | import argparse
import time
import os
import json
import random
import numpy as np
from generate_map import *
from utils.general_functions import *
from utils.graphs import *
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
curr_time = time.strftime('%d_%m_%H_%M_%S')
# parse input arguments
parser = argparse.ArgumentParser(description='script for generating truck and drone problem instances')
parser.add_argument('-f', '--file', type=str, default='parameters/drive_and_fly_together_1x1.json',
help='Path to json parameters files')
parser.add_argument('-o', '--output', type=str, default=f'problem_{curr_time}',
help='Path to output problem file')
parser.add_argument("--show", type=str2bool, nargs='?',
const=True, default=False,
help="Show the generated problem environment")
args = parser.parse_args()
# get the input and output files
parameters_file = get_file(args.file, 'json')
problem_file = get_file(args.output, 'pddl')
problem_name, _ = os.path.splitext(problem_file)
# load the parameters file
with open(parameters_file, 'r') as fp:
params = json.load(fp)
# set the problem name in parameters
params['general']['problem_name'] = problem_name
# set the seed
np.random.seed(params["general"]["seed"])
random.seed(params["general"]["seed"])
# generate the environment
env = generate_map(params, show=args.show)
# construct adjacency matrix for the truck nodes
truck_adj_max = np.ones(env["truck_dist_matrix"].shape)
truck_adj_max[env["truck_dist_matrix"] != np.inf] = 0
# set the truck initial position as the first truck node, and the goal position as the last truck node
truck_start = env["truck_nodes"][0]
truck_goal = env["truck_nodes"][-1]
# build the truck graph
g_truck = construct_truck_graph(env, params)
# build the drone graph
g_drone, rrt_list = construct_drone_graph(env, params,
one_package=params["general"][
"domain_name"] != 'drive_and_fly_together_multiple')
# construct the PDDL
os.getcwd()
local_problem_file = write_pddl(g_truck=g_truck, g_drone=g_drone,
truck_start=truck_start, truck_goal=truck_goal, params=params)
|
991,550 | 544d8a40f76ef9876402b21cdcae4d01e962f2c8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Student(models.Model): # Creating a Student table in django database to see in Django Administration
name = models.CharField(max_length=20, null=True)
age = models.CharField(max_length=20, null=True)
roll_number = models.CharField(max_length=20, null=True)
def __str__(self):
return self.name
|
991,551 | 5828a73ec9ec680a2c5efef1b4b0e108feff0a4e | '''
Created on 2018年9月15日
@author: xingli
'''
import os,sys
sys.path.append('/home/pi/Desktop/xing/iot-device/apps')
from time import sleep
from labs.module03 import TempSensorAdaptor
from labs.module04 import I2CSenseHatAdaptor
sysPerfAdaptor = TempSensorAdaptor.TempSensorAdaptor()
i2cSenseHat = I2CSenseHatAdaptor.I2CSenseHatAdaptor()
print("Starting system performance app daemon thread...")
#sysPerfAdaptor.setEnableAdaptorFlag(True)
#sysPerfAdaptor.start()
i2cSenseHat.run()
#keep running
while (True):
sleep(5)
pass |
991,552 | d2ce6b1e5be9dce5b17750c9d24a2b98746ae3c1 | from collections import namedtuple
import torch
import cupy
from string import Template
from ...backend.torch_skcuda_backend import TorchSkcudaBackend
from .torch_backend import TorchBackend2D
# As of v8, cupy.util has been renamed cupy._util.
if hasattr(cupy, '_util'):
memoize = cupy._util.memoize
else:
memoize = cupy.util.memoize
@memoize(for_each_device=True)
def _load_kernel(kernel_name, code, **kwargs):
code = Template(code).substitute(**kwargs)
kernel_code = cupy.cuda.compile_with_cache(code)
return kernel_code.get_function(kernel_name)
Stream = namedtuple('Stream', ['ptr'])
def _get_dtype(t):
dtypes = {torch.float32: 'float',
torch.float64: 'double'}
return dtypes[t.dtype]
class SubsampleFourier(object):
"""Subsampling of a 2D image performed in the Fourier domain.
Subsampling in the spatial domain amounts to periodization
in the Fourier domain, hence the formula.
Parameters
----------
x : tensor
Torch tensor with at least 5 dimensions, the last being the real
and imaginary parts. Ideally, the last dimension should be a
power of 2 to avoid errors.
k : int
Integer such that x is subsampled by k along the spatial variables.
Raises
------
RuntimeError
In the event that x is not contiguous.
TypeError
In the event that x is on CPU or the input is not complex.
Returns
-------
out : tensor
Tensor such that its fourier transform is the Fourier
transform of a subsampled version of x, i.e. in
F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k)].
"""
def __init__(self):
self.block = (32, 32, 1)
def GET_BLOCKS(self, N, threads):
return (N + threads - 1) // threads
def __call__(self, x, k):
if not x.is_cuda:
raise TypeError('Use the torch backend (without skcuda) for CPU tensors.')
batch_shape = x.shape[:-3]
signal_shape = x.shape[-3:]
x = x.view((-1,) + signal_shape)
out = torch.empty(x.shape[:-3] + (x.shape[-3] // k, x.shape[-2] // k, x.shape[-1]), dtype=x.dtype, layout=x.layout, device=x.device)
kernel = '''
#define NW ${W} / ${k}
#define NH ${H} / ${k}
extern "C"
__global__ void periodize(const ${Dtype}2 *input, ${Dtype}2 *output)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = blockIdx.z * blockDim.z + threadIdx.z;
if(tx >= NW || ty >= NH || tz >= ${B})
return;
input += tz * ${H} * ${W} + ty * ${W} + tx;
${Dtype}2 res = make_${Dtype}2(0.f, 0.f);
for (int j=0; j<${k}; ++j)
for (int i=0; i<${k}; ++i)
{
const ${Dtype}2 &c = input[j * NH * ${W} + i * NW];
res.x += c.x;
res.y += c.y;
}
res.x /= ${k} * ${k};
res.y /= ${k} * ${k};
output[tz * NH * NW + ty * NW + tx] = res;
}
'''
B = x.shape[0]
W = x.shape[2]
H = x.shape[1]
periodize = _load_kernel('periodize', kernel, B=B, H=H, W=W, k=k, Dtype=_get_dtype(x))
grid = (self.GET_BLOCKS(out.shape[1], self.block[0]),
self.GET_BLOCKS(out.shape[2], self.block[1]),
self.GET_BLOCKS(out.shape[0], self.block[2]))
periodize(grid=grid, block=self.block, args=[x.data_ptr(), out.data_ptr()],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
out = out.reshape(batch_shape + out.shape[-3:])
return out
class Modulus(object):
"""This class implements a modulus transform for complex numbers.
Usage
-----
modulus = Modulus()
x_mod = modulus(x)
Parameters
---------
x : tensor
Complex torch tensor.
Raises
------
RuntimeError
In the event that x is not contiguous.
TypeError
In the event that x is on CPU or the input is not complex.
Returns
-------
output : tensor
A tensor with the same dimensions as x, such that output[..., 0]
contains the complex modulus of x, while output[..., 1] = 0.
"""
def __init__(self):
self.CUDA_NUM_THREADS = 1024
def GET_BLOCKS(self, N):
return (N + self.CUDA_NUM_THREADS - 1) // self.CUDA_NUM_THREADS
def __call__(self, x):
if not x.is_cuda:
raise TypeError('Use the torch backend (without skcuda) for CPU tensors.')
out = torch.empty(x.shape[:-1] + (1,), device=x.device, layout=x.layout, dtype=x.dtype)
kernel = """
extern "C"
__global__ void abs_complex_value(const ${Dtype} * x, ${Dtype} * z, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
z[i] = normf(2, x + 2*i);
}
"""
fabs = _load_kernel('abs_complex_value', kernel, Dtype=_get_dtype(x))
fabs(grid=(self.GET_BLOCKS(int(out.nelement()) ), 1, 1),
block=(self.CUDA_NUM_THREADS, 1, 1),
args=[x.data_ptr(), out.data_ptr(), out.numel()],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return out
class TorchSkcudaBackend2D(TorchSkcudaBackend, TorchBackend2D):
_modulus_complex = Modulus()
_subsample_fourier = SubsampleFourier()
@classmethod
def modulus(cls, x):
cls.contiguous_check(x)
cls.complex_check(x)
return cls._modulus_complex(x)
@classmethod
def subsample_fourier(cls, x, k):
cls.contiguous_check(x)
cls.complex_check(x)
return cls._subsample_fourier(x, k)
backend = TorchSkcudaBackend2D
|
991,553 | 30a22e1935fbd2730bceabadf44756f260280c6e | import argparse
import random
import sys
import time
import pygame
import comp
import grid
import player
import ship
# TODO later
# interactive ship placement
# add heatmap option
# normalize wording: ships sunk or sunk ships
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='path to file with ship locations')
args = parser.parse_args()
p = player.Player()
computer = comp.Computer()
with open(args.filename, 'r') as f:
for line in f:
p.add_ship(ship.Ship(line))
black = (0, 0, 0)
white = (255, 255, 255)
pygame.init()
pygame.font.init()
pygame.display.set_caption('Battleship')
icon = pygame.image.load('images/icon.bmp')
pygame.display.set_icon(icon)
screen = pygame.display.set_mode((880, 480))
screen.fill(white)
pygame.display.update()
clock = pygame.time.Clock()
font = pygame.font.SysFont(pygame.font.get_default_font(), 22)
message = font.render('Your turn.', True, black)
right_message = font.render('Press TAB to see how the computer views your grid.', True, black)
players_turn = True
need_to_wait = False
computer_message = ''
heatmap_visible = False
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYUP:
if event.key == pygame.K_TAB:
heatmap_visible = not heatmap_visible
elif players_turn and event.type == pygame.MOUSEBUTTONDOWN\
and pygame.mouse.get_pressed()[0]:
mouse_pos = pygame.mouse.get_pos()
computer_grid = computer.grid_rect(pygame)
if computer_grid.collidepoint(mouse_pos):
square = computer.pos_to_square(mouse_pos)
if square not in computer.shots_taken:
players_turn = False
response = computer.query(square)
computer_message = ''
if response:
message_text = "Hit at {}{}! Computer's turn."
else:
message_text = "Miss at {}{}! Computer's turn."
message = font.render(message_text.format(*square),
True, black)
screen.fill((255, 255, 255))
screen.blit(message, (20, 455))
screen.blit(right_message, (460, 455))
computer.show(pygame)
if heatmap_visible:
right_message = font.render('Press TAB to see your grid normally.', True, black)
computer.show_heatmap(pygame)
else:
right_message = font.render('Press TAB to see how the computer views your grid.', True, black)
p.show(pygame)
pygame.display.flip()
if need_to_wait:
time.sleep(0.8)
need_to_wait = False
continue
if players_turn:
message = font.render(computer_message + 'Your turn.', True, black)
mouse_pos = pygame.mouse.get_pos()
computer_grid = pygame.Rect(460, 40, 400, 400)
if computer_grid.collidepoint(mouse_pos):
square = computer.pos_to_square(mouse_pos)
if square not in computer.shots_taken:
pygame.mouse.set_cursor(*pygame.cursors.diamond)
else:
pygame.mouse.set_cursor(*pygame.cursors.arrow)
else:
pygame.mouse.set_cursor(*pygame.cursors.arrow)
else:
pygame.mouse.set_cursor(*pygame.cursors.arrow)
computer_shot = computer.make_shot(p.ships_sunk())
player_response = p.query(computer_shot)
computer_message = 'Computer fired at {}{}: '.format(computer_shot[0], computer_shot[1])
if player_response:
computer_message += 'hit! '
else:
computer_message += 'miss! '
computer.register_shot(computer_shot, player_response, p.ships_sunk())
players_turn = True
need_to_wait = True
while computer.defeated or p.defeated:
clock.tick(10)
pygame.mouse.set_cursor(*pygame.cursors.arrow)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if computer.defeated:
message_text = 'Game over, you win!'
elif p.defeated:
message_text = 'Game over, you lose!'
message = font.render(message_text, True, black)
screen.fill((255, 255, 255))
screen.blit(message, (20, 455))
p.show(pygame)
computer.show(pygame)
pygame.display.flip()
if __name__ == '__main__':
main()
|
991,554 | 01ea08279191778e1a9fda3d6ed5011501396d13 | import pandas as pd
import itertools
import numpy as np
data = pd.read_csv("dataset.csv", ",")
empty_string = "empty"
unique_outlook = data['Outlook'].unique()
unique_temperature = data['Temperature'].unique()
unique_humidity = data['Humidity'].unique()
unique_windy = data['Windy'].unique()
def calculate_support(X, Y, input_data):
filtered_data = input_data
if X.Outlook != empty_string:
filtered_data = filtered_data[filtered_data['Outlook'] == X.Outlook]
if X.Temperature != empty_string:
filtered_data = filtered_data[filtered_data['Temperature'] == X.Temperature]
if X.Humidity != empty_string:
filtered_data = filtered_data[filtered_data['Humidity'] == X.Humidity]
if X.Windy != empty_string:
filtered_data = filtered_data[filtered_data['Windy'] == X.Windy]
total_number_of_transactions = input_data.shape[0]
transactions_containing_both_x_and_y = filtered_data[filtered_data['Play'] == Y]['Play'].count()
# print(total_number_of_transactions, transactions_containing_both_x_and_y)
return transactions_containing_both_x_and_y / total_number_of_transactions
def calculate_confidence(X, Y, input_data):
filtered_data = input_data
if X.Outlook != empty_string:
filtered_data = filtered_data[filtered_data['Outlook'] == X.Outlook]
if X.Temperature != empty_string:
filtered_data = filtered_data[filtered_data['Temperature'] == X.Temperature]
if X.Humidity != empty_string:
filtered_data = filtered_data[filtered_data['Humidity'] == X.Humidity]
if X.Windy != empty_string:
filtered_data = filtered_data[filtered_data['Windy'] == X.Windy]
transaction_containing_x = filtered_data.shape[0]
transactions_containing_both_x_and_y = filtered_data[filtered_data['Play'] == Y]['Play'].count()
if transaction_containing_x == 0:
return 0
return transactions_containing_both_x_and_y / transaction_containing_x
class Instance():
def __init__(self, outlook, temperature, humidity, windy):
self.Outlook = outlook
self.Temperature = temperature
self.Humidity = humidity
self.Windy = windy
self.support = {
'yes': 0,
'no': 0
}
self.confidence = {
'yes': 0,
'no': 0
}
self.calc_confidence()
self.calc_support()
def calc_support(self, input_data = data):
self.support['no'] = calculate_support(self, 'no', input_data)
self.support['yes'] = calculate_support(self, 'yes', input_data)
def calc_confidence(self, input_data = data):
self.confidence['no'] = calculate_confidence(self, 'no', input_data)
self.confidence['yes'] = calculate_confidence(self, 'yes', input_data)
def __str__(self):
return f'Instance of {self.Outlook} {self.Temperature} {self.Humidity} {self.Windy} \n Support: [{self.support}] \n Confidence [{self.confidence}] \n'
class Node:
def __init__(self, parent, children, key, level = 0):
self.parent = parent
self.combs_values = []
self.children = []
self.key = key
self.level = level
self.rules = []
self.instances_of_rule = []
def generate_combs(self):
self.combinations = list(itertools.product(*self.combs_values))
self.get_max_rules()
def get_max_rules(self):
for item in self.combinations:
vector = construct_vector(item)
self.instances_of_rule.append(Instance(vector[0], vector[1], vector[2], vector[3]))
self.rules.append(vector)
def __str__(self):
string = f'\nNode on level {self.level}'
for instance in self.instances_of_rule:
string += f'\n\t{instance}'
return string
def construct_vector(vector):
result = [empty_string] * 4
for value in vector:
if(value == "overcast" or value == "rainy" or value == "sunny"):
result[0] = value
elif(value == "hot" or value == "cool" or value == "mild"):
result[1] = value
elif(value == "high" or value == "normal"):
result[2] = value
else:
result[3] = value
return result
def generate_tree(input_data, input_set, parent):
children = []
for index,_ in enumerate(input_set):
node_last_value_index = index
if parent.key >= 0:
node_last_value_index += parent.key + 1
new_node = Node(parent, [], node_last_value_index, parent.level + 1)
if node_last_value_index < len(input_set):
new_array_combs = parent.combs_values.copy()
new_array_combs.append(input_set[node_last_value_index])
new_node.combs_values = new_array_combs
new_node.generate_combs()
children.append(new_node)
if node_last_value_index + 1 < len(input_set):
generate_tree(input_data, input_set, new_node)
parent.children = children
root = Node(None, [], -1)
generate_tree(data, [unique_outlook, unique_temperature, unique_humidity, unique_windy], root)
def BFS(root):
output_string = ''
queue = []
queue.append(root)
number_of_rules = 0
while queue:
node = queue.pop(0)
for child in node.children:
queue.append(child)
if node != root:
number_of_rules += len(node.instances_of_rule)
output_string += f'{node} \n'
print(node)
return output_string
out = BFS(root)
with open('output.txt', 'w+') as f:
f.write(out) |
991,555 | ae2802be45a7831eb4121c73c64a39a7b0b4a996 | import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, metrics
# csv writer
from util import *
# hyperparameters
GAMMA = 0.000001
# kernel options: linear, poly, rbf, sigmoid, precomputed
KERNEL = 'poly'
# degree of the polynomial kernal function
DEGREE = 2
# tolerance
TOL = 1e-9
# penalty parameter of error term
C = 100.0
training_data = scipy.io.loadmat('labeled_images.mat')
testing_data = scipy.io.loadmat('public_test_images.mat')
# training data
identity = training_data['tr_identity']
images = np.transpose(training_data['tr_images'])
labels = training_data['tr_labels']
# testing data
test_images = np.transpose(testing_data['public_test_images'])
test_data = test_images.reshape((len(test_images), -1))
n_samples = len(images)
data = images.reshape((n_samples, -1))
# create svm classifier
classifier = svm.SVC(gamma=GAMMA, C=C, kernel=KERNEL, degree=DEGREE, tol=TOL)
# train the classifier
classifier.fit(data[:n_samples / 2], labels[:n_samples/2])
# predict some values
expected = labels[n_samples/2:]
predicted = classifier.predict(data[n_samples/2:])
predicted2 = classifier.predict(test_data)
# write to csv file
write_csv('predictions_svm.csv', predicted2)
print "Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))
|
991,556 | 5489bc16648055180580f02ced8d1216263b9cdb | # pylint: disable=redefined-builtin
"""Test related method and functionality of Context."""
import pytest
import responses
from decanter.core import Context
from decanter.core.extra import CoreStatus
def test_no_context(globals, client):
"""Test calling coerx_api when no context created.
CoreClient will call context.LOOP, check if every api has raises
AttributeError with message "event loop is \'NoneType\'".
"""
with pytest.raises(AttributeError, match=r'event loop is \'NoneType\''):
client.upload(file=globals['test_csv_file'])
with pytest.raises(AttributeError, match=r'event loop is \'NoneType\''):
client.train(train_input=globals['train_input'])
with pytest.raises(AttributeError, match=r'event loop is \'NoneType\''):
client.predict(
predict_input=globals['predict_input'])
@responses.activate
def test_connection_fail(context_fixture):
"""Context exits from Python when meeting any RequestException."""
with pytest.raises(SystemExit):
context_fixture('RequestException')
@responses.activate
def test_auth_fail(context_fixture):
"""Context exits from Python when authorization failed."""
with pytest.raises(SystemExit):
context_fixture('AuthFailed')
@responses.activate
def test_stop_jobs(globals, urls, client, mock_test_responses, context_fixture):
"""Context stops the jobs in the list passed by `Context.stop.jobs()`"""
async def cancel():
context.stop_jobs([datas[0], datas[2]])
responses.add(
responses.GET, urls('task', 'upload'),
json={
'_id': globals['upload'],
'status': CoreStatus.DONE,
'result': {
'_id': globals['data']}
},
status=200,
content_type='application/json')
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.RUNNING)
responses.add(
responses.PUT, urls('stop', 'upload'),
json={
'message': 'task removed'
},
status=200,
content_type='application/json')
datas = []
for i in range(3):
data = client.upload(file=globals['test_csv_file'], name=str(i))
datas.append(data)
cancel_task = Context.LOOP.create_task(cancel())
Context.CORO_TASKS.append(cancel_task)
context.run()
assert datas[0].status == CoreStatus.FAIL
assert datas[2].status == CoreStatus.FAIL
assert datas[1].status == CoreStatus.DONE
@responses.activate
def test_stop_all_jobs(
globals, urls, client, mock_test_responses, context_fixture):
"""Context stops all jobs in running or pending status."""
async def cancel():
context.stop_all_jobs()
return
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.RUNNING)
responses.add(
responses.PUT, urls('stop', 'upload'),
json={
'message': 'task removed'
},
status=200,
content_type='application/json')
datas = []
for i in range(3):
data = client.upload(file=globals['test_csv_file'], name=str(i))
datas.append(data)
cancel_task = Context.LOOP.create_task(cancel())
Context.CORO_TASKS.append(cancel_task)
context.run()
assert all(data.status == CoreStatus.FAIL for data in datas)
@responses.activate
def test_get_jobs_by_name(
globals, client, mock_test_responses, context_fixture):
"""Context gets jobs with name in name list."""
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.DONE)
data_list = []
for i in range(3):
data = client.upload(file=globals['test_csv_file'], name=str(i))
data_list.append(data)
context.run()
jobs = context.get_jobs_by_name(names=['0', '2'])
assert jobs[0] is data_list[0]
assert jobs[1] is data_list[2]
@responses.activate
def test_get_all_jobs(
globals, client, mock_test_responses, context_fixture):
"""Context gets all jobs."""
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.DONE)
data_list = []
for i in range(2):
data = client.upload(file=globals['test_csv_file'], name=str(i))
data_list.append(data)
context.run()
jobs = context.get_all_jobs()
assert jobs[0] is data_list[0]
assert jobs[1] is data_list[1]
@responses.activate
def test_get_jobs_status(
globals, urls, client, mock_test_responses, context_fixture):
"""Context shows jobs status in dataframe with specific status."""
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.DONE)
responses.add(
responses.GET, urls('task', 'upload'),
json={
'_id': globals['upload'],
'status': CoreStatus.FAIL
},
status=200,
content_type='application/json')
for i in range(2):
client.upload(file=globals['test_csv_file'], name=str(i))
context.run()
job_fail = context.get_jobs_status(status=['fail'])
assert job_fail.iloc[0]['status'] == 'fail'
assert job_fail.iloc[0]['Job'] == '1'
|
991,557 | dccfad4074f50f0a1d14f1260d6ac7c75ad3802a | # -*- coding: utf-8 -*-
"""This file contains the text format specific event object classes."""
from plaso.events import time_events
from plaso.lib import eventdata
class TextEvent(time_events.TimestampEvent):
"""Convenience class for a text format-based event."""
DATA_TYPE = 'text:entry'
def __init__(self, timestamp, offset, attributes):
"""Initializes a text event object.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of microseconds since Jan 1, 1970 00:00:00 UTC.
offset: The offset of the attributes.
attributes: A dict that contains the events attributes.
"""
super(TextEvent, self).__init__(
timestamp, eventdata.EventTimestamp.WRITTEN_TIME)
self.offset = offset
for name, value in iter(attributes.items()):
# TODO: Revisit this constraints and see if we can implement
# it using a more sane solution.
if isinstance(value, basestring) and not value:
continue
setattr(self, name, value)
|
991,558 | eda7832cd1f91b27a038efb1474acaacb6a03fcf | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
"""
"""
class Solution:
# @param matrix, a list of lists of integers
# @param target, an integer
# @return a boolean
def searchMatrix(self, matrix, target):
if not matrix:
return False
row_idx = self.search_row(matrix, target, 0, len(matrix)-1)
if row_idx < 0:
return False
return self.search_col(matrix[row_idx], target, 0, len(matrix[row_idx])-1)
def search_row(self, matrix, target, lo, hi):
if target < matrix[lo][0]:
return lo-1
elif target >= matrix[hi][0]:
return hi
if hi - lo < 5:
for i in range(lo, hi):
if matrix[i][0] <= target < matrix[i+1][0]:
return i
# now split
mid = (lo + hi) / 2
if target >= matrix[mid][0]:
return self.search_row(matrix, target, mid, hi)
else:
return self.search_row(matrix, target, lo, mid)
def search_col(self, nums, target, lo, hi):
if hi - lo < 5:
for i in range(lo, hi+1):
if nums[i] == target:
return True
return False
mid = (lo + hi) / 2
if nums[mid] == target:
return True
elif nums[mid] < target:
return self.search_col(nums, target, mid+1, hi)
else:
return self.search_col(nums, target, lo, mid-1)
if __name__ == '__main__':
s = Solution()
print s.searchMatrix([
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
], int(sys.argv[1]))
|
991,559 | 18908fbf9b58aa362dc3ca76aaf871f49be3d2aa |
from keras import layers
from keras import models
from keras.utils import plot_model
from keras.optimizers import SGD
import numpy as np
from STFT import istft
import scipy.io.wavfile
import matplotlib.pyplot as plt
import os
from DataGeneration import dataGenBig
from Others import formatData
from GenerateModels import generateModel
# import theano
# print('theano: ', theano.__version__)
# import sys
# print(sys.version)
# import keras
# print('keras: ', keras.__version__)
model = generateModel()
# print(model.summary())
# plot_model(model, to_file='model.png')
# # =========continue to train the model
# optim = SGD(lr=0.0001, momentum=0.9, nesterov=True)
# model.compile(loss='mean_squared_error', optimizer=optim)
# model.load_weights('BSSmodelProcessFeature27Sept')
# dg = dataGenBig(False,87654321)
# fileName = "ProcessFeature0001.txt"
# =========train a new model
optim = SGD(lr=0.001, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=optim)
dg = dataGenBig(False,123456789)
fileName = "ProcessFeature0005.txt"
# The parameters for training the model
dg.TrainDataParams()
saveModelName = "BSSmodelProcessFeature"
# if os.path.exists(fileName):
# os.remove(fileName)
wr = open(fileName, "w")
for epoch in range(100):
stri = '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%Epoch is {}\n'.format(epoch)
print(stri)
wr.write(stri)
wr.flush()
for index in range(int(dg.trainNum/dg.BATCH_SIZE_Train)):
if index % 10 == 0:
model.save_weights(saveModelName, True)
if index % 5 == 0:
(X_valid, y_valid) = dg.batchCallableValidation()
(X_valid_new, y_valid_new) = formatData(X_valid, y_valid, dg)
loss = model.evaluate(X_valid_new, y_valid_new, batch_size=X_valid_new.shape[0], verbose=1)
stri = "*****************batch %d valid_loss : %f\n" % (index, loss)
print(stri)
wr.write(stri)
wr.flush()
(X_train, y_train) = dg.batchCallableTrain()
(X_train_new,y_train_new) = formatData(X_train, y_train, dg)
loss = model.train_on_batch(X_train_new, y_train_new)
stri = "==================batch %d train_loss : %f\n" % (index, loss)
print(stri)
wr.write(stri)
wr.flush()
wr.close()
|
991,560 | 3a1ae7ab514d4f038fb9546b6e2ae690f34e6f2c | # -*- coding: utf-8 -*-
'''
@Author : Arron
@email :hou.zg@foxmail.com
@software: python
@File : 平均值.py
@Time : 2018/2/12 22:16
'''
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# x = np.arange(1, 10, 1)
xt = tf.constant([[1., 2.],
[3., 4.]])
sess = tf.Session()
# y = sess.run(tf.reduce_mean(xt))
print(sess.run(tf.reduce_mean(xt)))
print(sess.run(tf.reduce_mean(xt,reduction_indices=0)))
print(sess.run(tf.reduce_mean(xt,0)))
print(sess.run(tf.reduce_mean(xt,1)))
|
991,561 | 6221b8fa1cad4a08078aad9843ff084a462e913d |
import faulthandler; faulthandler.enable()
import time
from tqdm import tqdm
import time
import json
from os import environ
from elasticsearch import Elasticsearch, helpers
from elasticsearch_dsl import Search
ES_INDEX_FULL_TEXT = "nycdocs-use"
FIRST = False
ES_INDEX_CHUNK = "nycdocs-use-chunk128"
vector_dims = 512
batch_size = 512
total_chunks = 37281 # get this with `wc nyc_docs_paragraphs.json`
total_docs = 4251
## Put ElasticSearch credentials here
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
if not es.ping():
raise ValueError("Connection to ElasticSearch failed")
sys.exit(1)
else:
print('Connection to ElasticSearch OK')
doc_counter = 0
idx_name_chunk = {}
name_idx_chunk = {}
def es_index_batch_chunk(lbatch):
global doc_counter
records = []
for body in lbatch.to_dict(orient='records'):
id_ = body["_id"] + "c" + str(body["chonk"])
idx_name_chunk[doc_counter] = id_
name_idx_chunk[id_] = doc_counter
body["page"] = doc_counter
body["_index"] = ES_INDEX_CHUNK
del body["smallenough"]
body["doc_id"] = body["_id"]
body["_id"] = id_
records.append(body)
doc_counter += 1
res = helpers.bulk(es, records, chunk_size=len(records), request_timeout=200)
import pandas as pd
import numpy as np
with tqdm(total=total_chunks) as pbar:
for j, batch in enumerate(pd.read_json('nyc_docs-sentences15.json', lines=True, chunksize=batch_size)):
batch["smallenough"] = batch["text"].apply(lambda x: len(x) < 100000)
batch = batch[batch["smallenough"]]
es_index_batch_chunk(batch)
pbar.update(len(batch))
with open(ES_INDEX_CHUNK + "_idx_name.json", 'w') as f:
f.write(json.dumps(idx_name_chunk))
with open(ES_INDEX_CHUNK + "_name_idx.json", 'w') as f:
f.write(json.dumps(name_idx_chunk))
# also put the full documents into ES
with open('nyc_docs.jsonl', 'r') as reader:
for i, line_json in tqdm(enumerate(reader), total=total_docs):
line = json.loads(line_json)
body = {
"text": line["_source"]["content"][:1000000],
"routing": line.get("_routing", None),
}
es.index(index=ES_INDEX_FULL_TEXT, id=line["_id"], body=body)
|
991,562 | 0436e56cfdf432ec1e6b9c00fa51713fd8f43c3e |
#calss header
class _STREAK():
def __init__(self,):
self.name = "STREAK"
self.definitions = [u'a long, thin mark that is easily noticed because it is very different from the area surrounding it: ', u'an often unpleasant characteristic that is very different from other characteristics: ', u'a short period of good or bad luck: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
991,563 | 98f1f7a2a4ad95daabd55b2f2c36196a05056a3c | import os, sys
import warnings
import numpy as np
class OneSigma:
def __init__(self, args, model_base):
super().__init__()
self.args = args
self.base = model_base
self.reset()
def reset(self):
self.n_err = 0
self.n_obs = 0
self.initialized = False
self.ps = None
def error(self, label):
itv = self.predict()
if itv[0] <= label and label <= itv[1]:
return 0.0
else:
return 1.0
def predict(self):
obs_pred = self.base.predict()
mu, sig = np.squeeze(obs_pred['mu']), np.sqrt(np.squeeze(obs_pred['cov']))
interval = [mu - sig, mu + sig]
return interval
def init_or_update(self, label):
if label is None:
return
if not self.initialized:
self.base.init_state(label)
self.initialized = True
else:
# check error before update
err = self.error(label)
self.n_err += err
self.n_obs += 1
# print(f'MVP: error = {self.n_err}, n = {self.n_obs}')
# predict
self.ps = self.predict()
print(f'[OneSigma] size = {self.ps[1] - self.ps[0]:.4f}, '
f'interval = [{self.ps[0]:.4f}, {self.ps[1]:.4f}], obs = {label:.4f}, '
f'error_cur = {err}, '
f'error = {self.n_err / self.n_obs:.4f}'
)
# update the base model
self.base_out = self.base.update(label, update_max=False)
self.label = label
def summary(self):
return {
'obs': self.label,
'ps': self.ps,
'n_err': self.n_err,
'n_obs': self.n_obs,
}
|
991,564 | 4a4d2149cd9577bd1064a12a514affcc8a8f8c81 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
SPONSOR_LEVELS = (
('diamond', u'DIAMANTE'),
('platinum', u'PLATINO'),
('gold', u'ORO'),
('silver', u'PLATA'),
('bronce', u'BRONCE'),
)
class Sponsor(models.Model):
name = models.CharField(max_length=100)
url = models.URLField(max_length=255)
level = models.CharField(max_length=100, choices=SPONSOR_LEVELS)
image = models.ImageField(upload_to='.')
class Meta:
verbose_name = u'patrocinador'
verbose_name_plural = u'patrocinadores'
def __unicode__(self):
return self.name
STATUS_LEVELS = (
('reject', u'Rechazo'),
('objection', u'Objeción'),
('waiting', u'A la espera'),
('accepted', u'Aceptado'),
)
ACCEPTED_LEVEL = (('none', u'NONE'),) + SPONSOR_LEVELS
INVOICE_STATUS = (
('none', u'None'),
('sent', u'Enviada'),
('aceptada', u'Aceptada'),
('recibida', u'Recibida'),
)
class Prospect(models.Model):
company = models.CharField(max_length=100)
web = models.CharField(max_length=255, blank=True, null=True)
contact_name = models.CharField(max_length=100, blank=True, null=True)
email = models.CharField(max_length=100)
previous_interest = models.BooleanField(default=False)
already_contacted = models.BooleanField(default=False)
status = models.CharField(max_length=100, choices=STATUS_LEVELS,
null=True, blank=True)
user_in_charge = models.ForeignKey(User, blank=True, null=True)
comments = models.TextField(blank=True, null=True)
accepted_level = models.CharField(max_length=100, choices=ACCEPTED_LEVEL, default=ACCEPTED_LEVEL[0])
invoice_status = models.CharField(max_length=100, choices=INVOICE_STATUS, default=INVOICE_STATUS[0])
class Meta:
verbose_name = u'candidato'
verbose_name_plural = u'candidatos'
def __unicode__(self):
return self.company
|
991,565 | c7e3e73c2c5b7c8574269eb2411acf867e4e7497 | #!/usr/bin/env python3.6
import argparse
import asyncio
import time
import sys
import string
import json
import random
import multiprocessing as mp
import aiohttp
async def post_message(session, url, message):
async with session.post(url, json={'message': message}) as resp:
assert resp.status == 201 or resp.status == 200
return await resp.text()
async def get_shaurl(session, shaurl):
async with session.get(shaurl) as resp:
assert resp.status == 200
return await resp.text()
async def delete_shaurl(session, shaurl):
async with session.delete(shaurl) as resp:
return await resp.text()
async def aio(messages, procnum, avg_dict, count_dict, hostname_and_port):
url = 'https://{}:{}/messages'.format(*hostname_and_port)
conn = aiohttp.TCPConnector(verify_ssl=False)
count_dict[procnum] = 0
async with aiohttp.ClientSession(connector=conn) as session:
sha = None
shaurl = None
start = time.perf_counter()
for i in range(len(messages)):
message = messages[i]
text = await post_message(session, url, message)
sha = json.loads(text)["digest"]
shaurl = "{}/{}".format(url, sha)
count_dict[procnum] += 1
await get_shaurl(session, shaurl)
count_dict[procnum] += 1
await delete_shaurl(session, shaurl)
count_dict[procnum] += 1
end = time.perf_counter()
elapsed = (time.perf_counter() - start)
avg = count_dict[procnum] / elapsed
print('avg reqs/sec for thread {}: {}'.format(procnum, avg))
avg_dict[procnum] = avg
def start_asyncio_processing(messages, procnum, avg_dict, count_dict, hostname_and_port):
loop = asyncio.get_event_loop()
loop.run_until_complete(aio(messages, procnum, avg_dict, count_dict, hostname_and_port))
def get_n_messages_length_k(n, k):
return [''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
for i in range(n)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--threads', type=int, default=1)
parser.add_argument('-c', '--count', type=int, default=1)
parser.add_argument('-N', '--length', type=int, default=10)
parser.add_argument('-n', '--hostname', type=str, default='localhost')
parser.add_argument('-p', '--port', type=int, default=5000)
args = parser.parse_args()
manager = mp.Manager()
avg_dict = manager.dict()
count_dict = manager.dict()
jobs = []
processes = []
messages_by_thread = [None] * args.threads
for i in range(args.threads):
messages_by_thread[i] = get_n_messages_length_k(args.count, args.length)
for i in range(args.threads):
hostname_and_port = (args.hostname, args.port)
p = mp.Process(target=start_asyncio_processing, args=(messages_by_thread[i], i, avg_dict, count_dict, hostname_and_port))
jobs.append(p)
processes.append(p)
start = time.perf_counter()
[p.start() for p in processes]
for proc in jobs:
proc.join()
end = time.perf_counter()
elapsed = (end - start)
reqs = sum(count_dict.values())
avg = reqs / elapsed
print('total secs: {}. total reqsts: {}. total reqs/sec: {}'.format(
elapsed,
reqs,
avg,
))
avg_by_thread = sum(avg_dict.values()) / len(avg_dict)
print("averaging {} reqs/second/thread, or {} requests in {} secs".format(
avg_by_thread,
int(avg_by_thread * args.threads),
elapsed))
if __name__ == '__main__':
main()
|
991,566 | 26ab918d8cdbfe63c6e6430670f413d686f06586 | import numpy as np
from numpy import pi
from numpy.testing import assert_allclose
import brownian_ot
from brownian_ot.utils import rot_x, rot_y, rot_z, sphere_D, dimer_D
from brownian_ot.force_utils import calc_fx, calc_fy, calc_fz, calc_tx, calc_ty
def test_rotation_matrices():
invert_yz = np.diag([1, -1, -1])
invert_xz = np.diag([-1, 1, -1])
invert_xy = np.diag([-1, -1, 1])
identity = np.identity(3)
assert_allclose(rot_x(pi), invert_yz, atol = 1e-15)
assert_allclose(rot_x(2*pi), identity, atol = 1e-15)
assert_allclose(rot_y(pi), invert_xz, atol = 1e-15)
assert_allclose(rot_y(2*pi), identity, atol = 1e-15)
assert_allclose(rot_z(pi), invert_xy, atol = 1e-15)
assert_allclose(rot_z(2*pi), identity, atol = 1e-15)
def test_force_extraction():
def dummy_force(pos, orient):
return np.arange(6)
func_list = [calc_fx, calc_fy, calc_fz, calc_tx, calc_ty]
for (func, i) in zip(func_list, np.arange(5)):
assert_allclose(func(0, dummy_force), i)
def test_dimer_D_ratios():
'''
Check that dimensionality in dimer diffusion tensors is correct.
'''
kT = 1/25 * 1e-19
eta = 1e-3
a = 5e-7
D_sphere = sphere_D(a, kT, eta)
D_dimer = dimer_D(a, kT, eta)
assert_allclose(np.array([D_dimer[2,2]/D_sphere[2,2],
D_dimer[5,5]/D_sphere[5,5]]),
np.array([6/7.740, 8/14.42]))
|
991,567 | 4e1a943dc0e3c2cfbec842510e7c30143e574425 | age = input("Are you a cigarette addict older than 75 years old? (please answer yes or no) :")
if age == "yes" :
age = True
elif age == "no":
age = False
chronic = input("Do you have a severe chronic disease? (please answer yes or no) :")
if chronic == "yes" :
chronic = True
elif chronic == "no":
chronic = False
immune = input("Is your immune system too weak? (please answer yes or no) :")
if immune == "yes" :
immune = True
elif immune == "no":
immune = False
if age or chronic or immune == True :
print("You are in risky group")
elif age or chronic or immune == False :
print("You are not in risky group")
|
991,568 | d0eab604d32132732960eb56124cbe5bd06c4ecb | with open('header.md') as f:
readme = f.read()
with open('AUTHORS.md') as f:
readme += f.read()
with open('footer.md') as f:
readme += f.read()
with open('README.md', 'w') as f:
f.write(readme)
|
991,569 | 60e854a50e91584e9a5a430810330944aa818f92 | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from collections import namedtuple, deque
import random
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from torch.optim import Adam
#-------hyperParameter-----------
num_agents = 10
batch_size = 32
dim_obs = 8
dim_act = 2
capacity = 10000
explore_step = 5000
GAMMA = 0.99
tau = 0.01
scale_reward = 0.01
use_cuda = torch.cuda.is_available()
Tensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
#----------Basic Function-----------
def ob_process(img):
img=torch.FloatTensor(torch.from_numpy(img/255))
#img=img.unsqueeze(0)
return img
def soft_update(target, source, t):
for target_param, source_param in zip(target.parameters(),
source.parameters()):
target_param.data.copy_(
(1 - t) * target_param.data + t * source_param.data)
def get_obs_cnn(obs):
temp = []
for i in range(num_agents):
temp.append(np.r_[obs["image"][i]])
temp = np.r_[temp]
t = ob_process(temp)
return t
def get_obs_ir(obs):
temp = []
for i in range(num_agents):
temp.append(np.r_[obs["ir"][i]])
temp = np.r_[temp]
#print(temp)
return temp
def act2lst(act_array):
action_lst = []
act_array = list(act_array)
#length = len(act_array)
for i in act_array:
if i >= 0 and i < 0.333:
k = 0
elif i>=0.333 and i < 0.6666:
k = 0.5
elif i >= 0.6666 and i <= 1:
k = 1
action_lst.append(k)
if action_lst == [0,0]:
return 0
elif action_lst == [0,0.5]:
return 1
elif action_lst == [0,1]:
return 2
elif action_lst ==[0.5,0]:
return 3
elif action_lst == [0.5,0.5]:
return 4
elif action_lst == [0.5,1]:
return 5
elif action_lst == [1,0]:
return 6
elif action_lst == [1,0.5]:
return 7
elif action_lst == [1,1]:
return 8
# -----------Net Structure---------------
class Critic(nn.Module):
def __init__(self,num_agents,dim_o,dim_a):
super(Critic,self).__init__()
self.num_agents = num_agents
self.dim_o = dim_o
self.dim_a = dim_a
obs_dim = dim_o * num_agents
act_dim = dim_a * num_agents
self.fc1 = nn.Linear(obs_dim,1024)
self.fc2 = nn.Linear(1024+act_dim,512)
self.fc3 = nn.Linear(512,300)
self.fc4 = nn.Linear(300,1)
def forward(self,input,acts):
output = F.relu(self.fc1(input))
output = torch.cat([output,acts],1)
output = F.relu(self.fc2(output))
output = F.relu(self.fc3(output))
output = self.fc4(output)
return output
class Actor(nn.Module):
def __init__(self):
super(Actor,self).__init__()
self.conv1=nn.Conv2d(in_channels=2,out_channels=32,kernel_size=8,stride=4)
self.conv2=nn.Conv2d(in_channels=32,out_channels=64,kernel_size=4,stride=2)
self.conv3=nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1)
self.fc1 = nn.Linear(in_features=7*7*64,out_features = 128)
self.fc2=nn.Linear(in_features=128,out_features=2)
def forward(self,input):
output=F.relu(self.conv1(input))
output=F.relu(self.conv2(output))
output=F.relu(self.conv3(output))
output=output.view(output.size(0),-1)
output=F.relu(self.fc1(output))
output=self.fc2(output)
return output
#--------------------Buffer------------------------
class replay_memory:
def __init__(self):
self.memory = deque(maxlen = capacity)
self.Transition = namedtuple('Transition',['state_cnn','state_ir','action','reward','done'])
def __len__(self):
return len(self.memory)
def add(self,state_cnn,state_ir,action,reward,done):
e = self.Transition(state_cnn,state_ir,action,reward,done)
self.memory.append(e)
def sample(self,batch_size):
rand_idx = np.random.randint(1,len(self.memory)-1,batch_size)
next_rand_idx = rand_idx + 1
prev_rand_idx = rand_idx - 1
state = [self.memory[i] for i in rand_idx]
next_state = [self.memory[i] for i in next_rand_idx]
prev_state = [self.memory[i] for i in prev_rand_idx]
prev_state_cnn = np.vstack([e.state_cnn for e in prev_state if e is not None]).reshape(batch_size,num_agents,1,84,84)
pre_state_ir = np.vstack([e.state_ir for e in prev_state if e is not None])
state_cnn = np.concatenate([e.state_cnn for e in state if e is not None]).reshape(batch_size,num_agents,1,84,84)
state_ir = np.concatenate([e.state_ir for e in state if e is not None]).reshape(batch_size,num_agents,-1)
next_state_cnn = np.concatenate([e.state_cnn for e in next_state if e is not None]).reshape(batch_size,num_agents,1,84,84)
next_state_ir = np.concatenate([e.state_ir for e in next_state if e is not None]).reshape(batch_size,num_agents,-1)
#action = torch.from_numpy(np.vstack([e.action for e in state if e is not None])).float()
action = np.concatenate([e.action for e in state if e is not None]).reshape(batch_size,num_agents,2)
reward = np.vstack([e.reward for e in state if e is not None])
done = np.vstack([e.done for e in state if e is not None]).astype(np.uint8)
next_state_cnn = np.concatenate((state_cnn, next_state_cnn), 2)
state_cnn = np.concatenate((prev_state_cnn, state_cnn), 2)
return state_cnn, next_state_cnn,state_ir,next_state_ir, action, reward, done
#--------------------MADDPG------------------------
class MADDPG():
def __init__(self):
self.actors = [Actor() for i in range(num_agents)]
self.critics = [Critic(num_agents,dim_obs,dim_act) for i in range(num_agents)]
self.actors_target = deepcopy(self.actors)
self.critics_target = deepcopy(self.critics)
self.last_state_cnn = np.zeros((num_agents,1,84,84))
self.last_state_ir = np.zeros((num_agents,8))
self.step = 0
self.buffer = replay_memory()
self.var = [1 for i in range(num_agents)]
#self.random_number = [random.uniform(-0.5,0.5) for i in range(num_agents)]
self.critic_optimizer = [Adam(x.parameters(),lr=0) for x in self.critics]
self.actor_optimizer = [Adam(x.parameters(),lr=0) for x in self.actors]
if torch.cuda.is_available():
for x in self.actors:
x.cuda()
for x in self.critics:
x.cuda()
for x in self.actors_target:
x.cuda()
for x in self.critics_target:
x.cuda()
def get_new_cnn(self,t):
t = np.concatenate((self.last_state_cnn, t), axis=1)
return t
def learn(self):
if self.step <= explore_step:
return
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
for agent in range(num_agents):
state_cnn,next_state_cnn,state_ir,next_state_ir,action,reward,done = self.buffer.sample(batch_size)
state_cnn = torch.from_numpy(state_cnn).type(Tensor)
next_state_cnn = torch.from_numpy(next_state_cnn).type(Tensor)
state_ir = torch.from_numpy(state_ir).type(Tensor)
next_state_ir = torch.from_numpy(next_state_ir).type(Tensor)
action = torch.from_numpy(action).type(Tensor)
reward = torch.from_numpy(reward).type(Tensor)
non_final_mask = ByteTensor(list(map(lambda s: s is not None, next_state_cnn)))
non_final_next_states = torch.stack([s for s in next_state_cnn if s is not None]).type(FloatTensor)
non_final_next_states_ir = torch.stack([s for s in next_state_ir if s is not None]).type(FloatTensor)
#whole_state = state_cnn.view(batch_size, -1)
whole_state = state_ir.view(batch_size,-1)
whole_action = action.view(batch_size,-1).type(Tensor)
self.critic_optimizer[agent].zero_grad()
current_Q = self.critics[agent](whole_state,whole_action)
next_action = [self.actors_target[i](non_final_next_states[:,i,:,:,:]) for i in range(num_agents)]
next_action = torch.stack(next_action)
next_action = (next_action.transpose(0,1).contiguous())
target_Q = torch.zeros(batch_size).type(Tensor)
target_Q[non_final_mask] = self.critics_target[agent](non_final_next_states_ir.view(-1, num_agents * dim_obs),next_action.view(-1, num_agents * dim_act)).squeeze()
target_Q = (target_Q.unsqueeze(1) * GAMMA) + (reward[:,agent].unsqueeze(1) * scale_reward)
loss_Q = nn.MSELoss()(current_Q,target_Q.detach())
#print(loss_Q)
loss_Q.backward()
self.critic_optimizer[agent].step()
self.actor_optimizer[agent].zero_grad()
state_i = state_cnn[:, agent, :,:,:]
action_i = self.actors[agent](state_i)
ac = action.clone()
ac[:, agent, :] = action_i
whole_action = ac.view(batch_size, -1).type(Tensor)
actor_loss = -self.critics[agent](whole_state, whole_action)
#print(actor_loss)
actor_loss = actor_loss.mean()
actor_loss.backward()
self.actor_optimizer[agent].step()
#c_loss.append(loss_Q)
#a_loss.append(actor_loss)
if self.step % 100 == 0:
for i in range(num_agents):
soft_update(self.critics_target[i],self.critics[i],tau)
soft_update(self.actors_target[i],self.actors[i],tau)
def select_action(self,obs,done):
actions = np.zeros((num_agents,dim_act))
#try_l = np.zeros((num_agents*dim_act,))
obs = get_obs_cnn(obs).unsqueeze(1)
new_state_cnn = self.get_new_cnn(obs)
act_lst = []
for i in range(num_agents):
sb = torch.from_numpy(new_state_cnn[i,:]).type(Tensor).unsqueeze(0)
#print(sb)
act = self.actors[i].forward(sb).squeeze()
act += torch.from_numpy(np.random.randn(2) *self.var[i]).type(Tensor)
if self.step > explore_step and self.var[i] > 0.05:
self.var[i] *= 0.99
act = torch.clamp(act, 0,1)
act_np = act.detach().cpu().numpy()
#act_np = np.round(act_np,1)
actions[i,:] = act_np
action_number = act2lst(act_np)
act_lst.append(action_number)
if done.item(0) != True:
self.last_state_cnn = obs
self.last_action = actions
elif done.item(0) == True:
self.last_state_cnn = np.zeros((num_agents,1, 84, 84))
self.last_action = np.zeros((num_agents, dim_act))
self.step += 1
print('step:',self.step)
return actions,act_lst
def store_experience(self,obs,action,reward,done):
state_cnn = get_obs_cnn(obs)
state_ir = get_obs_ir(obs)
self.buffer.add(state_cnn,state_ir,action,reward,done)
|
991,570 | 480f501fe2cd6e2a61b1f498b6732f3b24fa23e0 | #Create your own implementation of a built-in function enumerate, named `with_index`,
# which takes two parameters: `iterable` and `start`, default is 0.
# Tips: see the documentation for the enumerate function
iterable = ['one', 'two', 'three', 'four', 'five']
def with_index(iterable, start=1):
n = start
for elem in iterable:
yield n, elem
n += 1
print(list(with_index(iterable)))
class With_Index:
def __init__(self, data, start = 0):
self.data = data
self.index = start
self.res = ''
def __iter__(self):
return self
def __next__(self):
if self.index > (len(self.data)):
raise StopIteration
else:
self.res = self.index, self.data[self.index-1]
self.index += 1
return self.res ### Почему не работает yield?(бесконечный цикл)
print(list(With_Index(iterable))) |
991,571 | ef227424fe7b9bde8b8c3bb17015ac08b1ad1f97 | #!/usr/bin/python
# 2.12 Lab 3 tf examples
# Peter Yu Sept 2016
import rospy
import tf
import numpy as np
import threading
import serial
import tf.transformations as tfm
from helper import transformPose, pubFrame, cross2d, lookupTransform, pose2poselist, poselist2pose, invPoselist
def main():
rospy.init_node('apriltag_navi', anonymous=True)
lr = tf.TransformListener()
br = tf.TransformBroadcaster()
rospy.sleep(0.1)
poselist_tag_map = [0,0,0.44,0.5,0.5,0.5,0.5]
print 'poselist_tag_map', poselist_tag_map, '\n'
pose_tag_map = poselist2pose(poselist_tag_map)
print 'poselist2pose(poselist_tag_map):\n', pose_tag_map, '\n'
poselist_map_tag = invPoselist(poselist_tag_map)
print 'invPoselist(poselist_tag_map):', poselist_map_tag, '\n'
poselist_tag_map_bylookup = lookupTransform(lr, sourceFrame = '/apriltag', targetFrame = '/map')
print "lookupTransform(poselist_tag_map, sourceFrame = '/tag', targetFrame = '/map'):", poselist_tag_map_bylookup, '\n'
poselist_map_tag_bylookup = lookupTransform(lr, sourceFrame = '/map', targetFrame = '/apriltag')
print "lookupTransform(poselist_tag_map, sourceFrame = '/map', targetFrame = '/apriltag'):", poselist_map_tag_bylookup, '\n'
poselist_base_tag = [0,0,1,0,0,0,1]
poselist_base_map = transformPose(lr, poselist_base_tag, sourceFrame = '/apriltag', targetFrame = '/map')
print "transformPose(poselist_tag_map, sourceFrame = '/apriltag', targetFrame = '/map'):", poselist_base_map, '\n'
for i in xrange(100):
pubFrame(br, pose = poselist_base_map, frame_id = '/robot_base', parent_frame_id = '/map')
rospy.sleep(0.1)
if __name__=='__main__':
main()
|
991,572 | ec842e2987a30a22fb0b71ee22a45dd9effaa317 | # encoding=utf-8
import logging
import os
import sys
import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import fire
def main(**kwargs):
for k, v in kwargs.items():
print(k)
print(v)
if k == 'inSegFile':
inSegFile = v
if k == 'outVectorFile':
outVectorFile = v
if k == 'sizeNum':
sizeNum = v
if k == 'windowNum':
windowNum = v
if k == 'min_countNum':
min_countNum = v
model = Word2Vec(LineSentence(inSegFile), size=sizeNum, window=windowNum, min_count=min_countNum,
workers=multiprocessing.cpu_count())
model.wv.save_word2vec_format(outVectorFile, binary=False)
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format="%(asctime)s: %(levelname)s: %(message)s")
logging.root.setLevel(level=logging.INFO)
fire.Fire()
|
991,573 | d0c1376ab6d6fe9de17f628ab013016e66a29c0d | from __future__ import division
import numpy as np
import tensorflow as tf
np.random.seed(1234)
class PolicyAdaptive(object):
"""docstring for PolicyAdaptive"""
def __init__(self, step_size, method):
self.lambda_ = step_size
self.method = method
self.alpha_ = 0.9
self.beta1_ = 0.9
self.beta2_ = 0.5
self.beta3_ = 0.5
self.degree_ = 2
self.eps_ = 1e-8
self.momentum = None
self.mean_square = None
self.loss = None
self.n_iter = 0
def reset_moving_average(self):
self.momentum = None
self.mean_square = None
self.loss = None
self.n_iter = 0
def apply_gradient(self, theta, grad, loss):
if self.method == 'sgd':
theta -= self.lambda_ * grad
return theta
elif self.method == 'momentum':
if self.momentum is not None:
self.momentum = self.alpha_ * self.momentum + self.lambda_ * grad
else:
self.momentum = self.lambda_ * grad
# batch_shape = theta.shape
# rescale = tf.expand_dims(tf.expand_dims(tf.reshape(tf.clip_by_value((loss + 0.5), clip_value_min=0.0, clip_value_max=10000.0), (batch_shape[0],1)), axis=1), axis=1)
# theta -= self.momentum * rescale
theta -= self.momentum
return theta
# elif self.method == 'rmsprop':
# self.mean_square = self.alpha_ * self.mean_square + (1 - self.lambda_) * (grad**2)
# # r_k_hat = self.mean_square / (1. - self.alpha_**(self.n_iter+1))
# theta -= self.lambda_ * grad / (np.sqrt(self.mean_square) + self.eps_)
elif self.method == 'adam':
if self.momentum is not None:
self.momentum = self.beta1_ * self.momentum + (1. - self.beta1_) * grad
else:
self.momentum = grad
if self.mean_square is not None:
self.mean_square = self.beta2_ * self.mean_square + (1. - self.beta2_) * grad**2
else:
self.mean_square = grad**2
m_k_hat = self.momentum / (1. - self.beta1_**(self.n_iter+1))
r_k_hat = self.mean_square / (1. - self.beta2_**(self.n_iter+1))
theta -= self.lambda_ * m_k_hat / (tf.sqrt(r_k_hat) + self.eps_)
return theta
elif self.method == 'ladam':
if self.momentum is not None:
self.momentum = self.beta1_ * self.momentum + (1. - self.beta1_) * grad
else:
self.momentum = grad
if self.mean_square is not None:
self.mean_square = self.beta2_ * self.mean_square + (1. - self.beta2_) * grad**2
else:
self.mean_square = grad**2
if self.loss is not None:
self.loss = self.beta3_ * self.loss + (1. - self.beta3_) * loss
else:
self.loss = loss
if theta.shape[1] > 2:
batch_shape = theta.shape
dX = tf.reshape(self.lambda_ * self.momentum / (tf.sqrt(self.mean_square) + self.eps_), (batch_shape[0],-1))
rescale = tf.reshape(tf.clip_by_value((self.loss + 0.5), clip_value_min=0.0, clip_value_max=10000.0), (batch_shape[0],1))
theta = tf.reshape(theta, (batch_shape[0],-1))
theta -= dX * (rescale ** self.degree_)
theta = tf.reshape(theta, batch_shape)
else:
theta -= self.lambda_ * self.momentum / (np.sqrt(self.mean_square) + self.eps_) * (np.expand_dims( (self.loss+0.5).clip(min=0.0), axis=1) ** self.degree_)
return theta
else:
raise NotImplementedError
self.n_iter += 1 |
991,574 | 7fbc27be5a79644de05bf5eab98e09adb0f4b95a | import pandas as pd
from datetime import datetime
data = pd.read_csv('Data/DSNY_Monthly_Tonnage_Data.csv')
# Create accurate borocd numbers
district = []
for i in data['COMMUNITYDISTRICT']:
if len(str(i)) == 1:
i = "0" + str(i)
district.append(i)
else:
district.append(i)
data['COMMUNITYDISTRICT'] = district
# join to create borocd
data['BoroCD'] = data['BOROUGH_ID'].astype(str) + data['COMMUNITYDISTRICT'].astype(str)
# Format Time in order to subset latter
data['Collection_Date'] = [datetime.strptime(x, '%Y / %m') for x in data['MONTH']]
# set time as index to subset later
data.set_index('Collection_Date', inplace=True)
# Read population
pop = pd.read_excel('Data/t_pl_p1_cd.xlsx', skiprows=5)
# Subset
pop.dropna(inplace=True)
pop.columns = ['BoroCD','Community_District', '1970', '1980','1990','2000','2010','Number','Percent']
pop = pop[['BoroCD', '2010']]
# Clean Data
district = []
for i in pop['BoroCD']:
i = str(i).strip()
if len(str(i)) == 1:
i = "0" + str(i)
district.append(i)
else:
district.append(str(i))
pop['BoroCD'] = district
# Create proper BoroCD column
pop['BoroCD'][0:12] = ["2" + str(x) for x in pop['BoroCD'][0:12]]
pop['BoroCD'][12:30] = ["3" + str(x) for x in pop['BoroCD'][12:30]]
pop['BoroCD'][30:42] = ["1" + str(x) for x in pop['BoroCD'][30:42]]
pop['BoroCD'][42:56] = ["4" + str(x) for x in pop['BoroCD'][42:56]]
pop['BoroCD'][56:59] = ["5" + str(x) for x in pop['BoroCD'][56:59]]
# Subset 2014 data
tons_2014 = data['2014'].groupby(['BoroCD'])['REFUSETONSCOLLECTED', 'PAPERTONSCOLLECTED','MGPTONSCOLLECTED'].sum()
# Merge population data with tonnage data
tons = tons_2014.reset_index().merge(pop)
# Add Paper recycling and MGP recycling for total recycling
tons['Total_Recycling'] = tons['PAPERTONSCOLLECTED'] + tons['MGPTONSCOLLECTED']
# Calculate Diversion Rate
tons['Diversion_Rate'] = tons['Total_Recycling'] / (tons['REFUSETONSCOLLECTED'] + tons['Total_Recycling'])
# Calculate Per Capita Diversion Rate - multiple by 100000 to have reasonable numbers
tons['Per_Capita_Diversion_Rate'] = (tons['Diversion_Rate'] / tons['2010']) * 100000
tons.to_csv('Data/diversion_rate.csv', index=False) |
991,575 | a6341e9c7295b1d51b51efb4e3baf42ca6878ffa | import torch
import torch.nn as nn
import torch.nn.functional as F
BN_EPS = 1e-4
class ConvBnRelu2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3, 3), padding=1):
super(ConvBnRelu2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_channels, eps=BN_EPS)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class StackEncoder(nn.Module):
def __init__(self, x_channels, y_channels, kernel_size=(3, 3)):
super(StackEncoder, self).__init__()
padding = (kernel_size - 1) // 2
self.encode = nn.Sequential(
ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding),
)
def forward(self, x):
x = self.encode(x)
x_small = F.max_pool2d(x, kernel_size=2, stride=2)
return x, x_small
class StackDecoder(nn.Module):
def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3):
super(StackDecoder, self).__init__()
padding = (kernel_size - 1) // 2
self.decode = nn.Sequential(
ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding),
)
def forward(self, x, down_tensor):
_, channels, height, width = down_tensor.size()
x = F.upsample(x, size=(height, width), mode='bilinear')
x = torch.cat([x, down_tensor], 1)
x = self.decode(x)
return x
# 32x32
class UNet270480(nn.Module):
def __init__(self, in_shape):
super(UNet270480, self).__init__()
channels, height, width = in_shape
self.down1 = StackEncoder(3, 24, kernel_size=3) ;# 256
self.down2 = StackEncoder(24, 64, kernel_size=3) # 128
self.down3 = StackEncoder(64, 128, kernel_size=3) # 64
self.down4 = StackEncoder(128, 256, kernel_size=3) # 32
self.down5 = StackEncoder(256, 512, kernel_size=3) # 16
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # 32
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # 64
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # 128
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # 256
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # 512
self.classify = nn.Conv2d(24, 3, kernel_size=1, bias=True)
self.center = nn.Sequential(ConvBnRelu2d(512, 512, kernel_size=3, padding=1))
def forward(self, x):
out = x;
down1, out = self.down1(out);
down2, out = self.down2(out);
down3, out = self.down3(out);
down4, out = self.down4(out);
down5, out = self.down5(out);
out = self.center(out)
out = self.up5(out, down5);
out = self.up4(out, down4);
out = self.up3(out, down3);
out = self.up2(out, down2);
out = self.up1(out, down1);
out = self.classify(out);
out = torch.squeeze(out, dim=1);
return out
class UNet_small(nn.Module):
def __init__(self, in_shape):
super(UNet_small, self).__init__()
channels, height, width = in_shape
self.down1 = StackEncoder(3, 24, kernel_size=3) # 512
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # 512
self.classify = nn.Conv2d(24, 3, kernel_size=1, bias=True)
self.center = nn.Sequential(
ConvBnRelu2d(24, 24, kernel_size=3, padding=1),
)
def forward(self, x):
out = x
down1, out = self.down1(out)
out = self.center(out)
out = self.up1(out, down1)
out = self.classify(out)
out = torch.squeeze(out, dim=1)
return out
|
991,576 | 9aa0b8b21d5e85bd22620efe5ce0cf53ddbb1a10 | from flask import Blueprint
from flask_restful import Api
public_api_bp = Blueprint('public_api', __name__)
public_api = Api(public_api_bp)
from . import public_main
from . import public_verify
from .. import errors
|
991,577 | 8944d32bf2f488db526606838e8fd0fa9646f210 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class students(models.Model):
_name = 'ai.table'
name = fields.Char()
value = fields.Integer()
value2 = fields.Float(compute="_value_pc", store=True)
description = fields.Text()
value3 = fields.Text()
group_id = fields.Many2one(comodel_name="mymod.groups")
@api.depends('value')
def _value_pc(self):
self.value2 = float(self.value) / 100
class groups(models.Model):
_name = 'mymod.groups'
name = fields.Char()
value = fields.Integer()
value2 = fields.Float(compute="_value_pc", store=True)
description = fields.Text()
students_ids = fields.One2many(comodel_name="ai.table" , inverse_name="group_id")
@api.depends('value')
def _value_pc(self):
self.value2 = float(self.value) / 100 |
991,578 | 2fcc88e4bbf4249f3f763341894d916a450b052e | ## busquedas
lista= [19,5,9,5,33,87,12]
suma= 0
for i in range(0, 7):
if lista[i] >= 10:
print(lista[i])
suma= suma + lista[i]
print("Total: ", suma)
val= int(input("V: "))
pos= -1
for i in range(0, 7):
if lista[i] == val:
pos= i
print("Posicion : ", pos)
pos= 0
for i in range(0, 7):
if lista[i] > lista[pos]:
pos= i
print("Pos Mayor : ", pos)
print("Val Mayor : ", lista[pos])
pos= 0
for i in range(0, 7):
if lista[i] < lista[pos]:
pos= i
print("Pos Menor : ", pos)
print("Val Menor : ", lista[pos])
|
991,579 | 99044ff123d4ce9c0b78760c1eaac1f14a8feec3 | # -*- coding: utf-8 -*-
"""
Created Sat Mar 17 10:50:37 2018
@author: DeepLearning
"""
import sys
import os
import mxnet as mx
import numpy as np
import pandas as pd
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
from sklearn.manifold import TSNE
from utilities import *
import matplotlib.pyplot as plt
import seaborn as sns
from utilities import *
import sklearn.neighbors
import matplotlib.patches as mpatches
from sklearn.utils.linear_assignment_ import linear_assignment
try:
import cPickle as pickle
except:
import pickle
import gzip
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from scipy import interp
def cluster_acc(Y_pred, Y):
# Y_pred=ysup_pred; Y=y_dec
# For all algorithms we set the
# number of clusters to the number of ground-truth categories
# and evaluate performance with unsupervised clustering ac-curacy (ACC):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
# rows are predictions, columns are ground truth
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
## eq. 1 use the Students t-distribution as a kernel to measure the similarity between embedded point zi and centroid mu j
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q) #
# The gradients of L with respect to feature space embedding of each data point zi and each cluster centroid mu j are computed as:
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu) # eq. 4
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z) # eq.5
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, znum, save_to='dec_model'):
# Read previously trained _SAE
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,znum], pt_dropout=0.2)
ae_model.load( os.path.join(save_to,'SAE_zsize{}_wimgfeatures_descStats_zeromean.arg'.format(str(znum))) ) #_Nbatch_wimgfeatures
logging.log(logging.INFO, "Reading Autoencoder from file..: %s"%(os.path.join(save_to,'SAE_zsize{}_wimgfeatures_descStats_zeromean.arg'.format(znum))) )
self.ae_model = ae_model
logging.log(logging.INFO, "finished reading Autoencoder from file..: ")
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
self.best_args = {}
self.best_args['num_centers'] = num_centers
self.best_args['znum'] = znum
def cluster(self, X_train, y_dec_train, y_train, classes, batch_size, save_to, labeltype, update_interval, logger):
N = X_train.shape[0]
self.best_args['update_interval'] = update_interval
self.best_args['y_dec'] = y_dec_train
self.best_args['roi_labels'] = y_train
self.best_args['classes'] = classes
self.best_args['batch_size'] = batch_size
self.logger = logger
# selecting batch size
# [42*t for t in range(42)] will produce 16 train epochs
# [0, 42, 84, 126, 168, 210, 252, 294, 336, 378, 420, 462, 504, 546, 588, 630]
test_iter = mx.io.NDArrayIter({'data': X_train},
batch_size=N, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
## embedded point zi
self.z = model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values()[0]
# For visualization we use t-SNE (van der Maaten & Hinton, 2008) applied to the embedded points zi. It
self.perplexity = 5
self.learning_rate = 125
# reconstruct wordy labels list(Y)==named_y
named_y = [classes[kc] for kc in y_dec_train]
self.best_args['named_y'] = named_y
# To initialize the cluster centers, we pass the data through
# the initialized DNN to get embedded data points and then
# perform standard k-means clustering in the feature space Z
# to obtain k initial centroids {mu j}
kmeans = KMeans(self.best_args['num_centers'], n_init=20)
kmeans.fit(self.z)
args['dec_mu'][:] = kmeans.cluster_centers_
figprogress = plt.figure(figsize=(20, 15))
print 'Batch_size = %f'% self.best_args['batch_size']
print 'update_interval = %f'% update_interval
self.best_args['plot_interval'] = int(8*update_interval)
print 'plot_interval = %f'% self.best_args['plot_interval']
self.best_args['y_pred'] = np.zeros((X_train.shape[0]))
self.best_args['meanAuc_cv'] = []
self.best_args['std_auc'] = []
self.best_args['auc_val'] = []
self.best_args['overall_metric'] = []
self.ploti = 0
self.maxAUC = 10000.0
### Define DEC training varialbes
label_buff = np.zeros((X_train.shape[0], self.best_args['num_centers']))
train_iter = mx.io.NDArrayIter({'data': X_train},
{'label': label_buff},
batch_size=self.best_args['batch_size'],
shuffle=True, last_batch_handle='roll_over')
### KL DIVERGENCE MINIMIZATION. eq(2)
# our model is trained by matching the soft assignment to the target distribution.
# To this end, we define our objective as a KL divergence loss between
# the soft assignments qi (pred) and the auxiliary distribution pi (label)
solver = Solver('sgd',learning_rate=0.1,lr_scheduler=mx.misc.FactorScheduler(100,0.1)) ### original: 0.01, try1: Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.000125, lr_scheduler=mx.misc.FactorScheduler(20*update_interval,0.5)) try 2: Solver('sgd', momentum=0.6, wd=0.05, learning_rate=0.00125, lr_scheduler=mx.misc.FactorScheduler(20*update_interval,0.5))
#solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
DECmetric = np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
print("DECmetric = {}".format(DECmetric))
#####################
# Z-space MLP fully coneected layer for classification
#####################
batch_size = 50
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=5, random_state=3)
# Evaluate a score by cross-validation
tprs = []; aucs = []
mean_fpr = np.linspace(0, 1, 100)
cvi = 0
for train, test in cv.split(self.Z_train, self.yZ_train):
# Multilayer Perceptron
MLP_train_iter = mx.io.NDArrayIter(self.Z_train[train], self.yZ_train[train], batch_size, shuffle=True)
MLP_val_iter = mx.io.NDArrayIter(self.Z_train[test], self.yZ_train[test], batch_size)
# We’ll define the MLP using MXNet’s symbolic interface
dataMLP = mx.sym.Variable('data')
#The following code declares two fully connected layers with 128 and 64 neurons each.
#Furthermore, these FC layers are sandwiched between ReLU activation layers each
#one responsible for performing an element-wise ReLU transformation on the FC layer output.
# The first fully-connected layer and the corresponding activation function
fc1 = mx.sym.FullyConnected(data=dataMLP, num_hidden = 128)
act1 = mx.sym.Activation(data=fc1, act_type="relu")
fc2 = mx.sym.FullyConnected(data=act1, num_hidden = 32)
act2 = mx.sym.Activation(data=fc2, act_type="relu")
# data has 2 classes
fc3 = mx.sym.FullyConnected(data=act2, num_hidden=2)
# Softmax with cross entropy loss
mlp = mx.sym.SoftmaxOutput(data=fc3, name='softmax')
# create a trainable module on CPU
#mon = mx.mon.Monitor(interval=100, pattern='.*', sort=True); # Defaults to mean absolute value |x|/size(x)
#checkpoint = mx.callback.do_checkpoint('mlp_model_params_z{}_mu{}.arg'.format(self.best_args['znum'],self.best_args['num_centers']))
self.mlp_model = mx.mod.Module(symbol=mlp, context=mx.cpu())
self.mlp_model.fit(MLP_train_iter, # train data
monitor=None,
optimizer='sgd', # use SGD to train
optimizer_params={'learning_rate':0.1}, # use fixed learning rate
eval_metric= 'acc', #MLPacc(yZ_val, Z_val), # report accuracy during trainin
num_epoch=100)
#epoch_end_callbackcheckpoint) # train for at most 10 dataset passes. extras: #monitor=mon,
#After the above training completes, we can evaluate the trained model by running predictions on validation data.
#The following source code computes the prediction probability scores for each validation data.
# prob[i][j] is the probability that the i-th validation contains the j-th output class.
prob_val = self.mlp_model.predict(MLP_val_iter)
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(self.yZ_train[test], prob_val.asnumpy()[:,1])
# to create an ROC with 100 pts
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
print roc_auc
aucs.append(roc_auc)
cvi += 1
# compute across all cvs
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
print r'cv meanROC (AUC = {0:.4f} $\pm$ {0:.4f})'.format(mean_auc, std_auc)
Z_test_iter = mx.io.NDArrayIter(self.Z_test, None, batch_size)
prob_test = self.mlp_model.predict(Z_test_iter)
# Compute ROC curve and area the curve
fpr_val, tpr_val, thresholds_val = roc_curve(self.yZ_test, prob_test.asnumpy()[:,1])
self.auc_val = auc(fpr_val, tpr_val)
print r'cv test (AUC = {0:.4f})'.format(self.auc_val)
# compute Z-space metric
overall_metric = -np.log(mean_auc) -np.log(1-DECmetric) #np.log(1-mean_auc) + np.log(DECmetric)
print("overall_metric: DEC+MLP = {}".format(overall_metric))
self.best_args['overall_metric'].append(overall_metric)
if(overall_metric <= self.maxAUC):
print '================== Improving auc_val = {}'.format(self.auc_val)
for key, v in args.items():
self.best_args[key] = args[key]
self.best_args['meanAuc_cv'].append(mean_auc)
self.best_args['std_auc'].append(std_auc)
self.best_args['auc_val'].append(self.auc_val)
self.best_args['pbestacci'] = self.p
self.best_args['zbestacci'] = self.z
self.best_args['dec_mu'][:] = args['dec_mu'].asnumpy()
#self.best_args['mlp_model'] = self.mlp_model
self.mlp_model.save_params(os.path.join(save_to,'mlp_model_params_z{}_mu{}.arg'.format(self.best_args['znum'],self.best_args['num_centers'])))
self.maxAUC = overall_metric
return overall_metric
def refresh(i): # i=3, a full epoch occurs every i=798/48
if i%self.best_args['update_interval'] == 0:
self.z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
self.p = np.zeros((self.z.shape[0], self.best_args['num_centers']))
self.dec_op.forward([self.z, args['dec_mu'].asnumpy()], [self.p])
self.best_args['dec_mu'] = args['dec_mu']
# the soft assignments qi (pred)
y_pred = self.p.argmax(axis=1)
print np.std(np.bincount(y_pred)), np.bincount(y_pred)
## COMPUTING target distributions P
## we compute pi by first raising qi to the second power and then normalizing by frequency per cluster:
print '\n... Updating i = %f' % i
weight = 1.0/self.p.sum(axis=0) # p.sum provides fj
weight *= self.best_args['num_centers']/weight.sum()
self.p = (self.p**2)*weight
train_iter.data_list[1][:] = (self.p.T/self.p.sum(axis=1)).T
#print np.sum(y_pred != self.best_args['y_pred']), 0.001*y_pred.shape[0]
#####################
# prep Z-space MLP fully coneected layer for classification
#####################
# compare soft assignments with known labels (only B or M)
print '\n... Updating MLP fully coneected layer i = %f' % i
sep = int(self.z.shape[0]*0.10)
print(self.z.shape)
datalabels = np.asarray(self.best_args['roi_labels'])
dataZspace = np.concatenate((self.z, self.p), axis=1) #zbestacci #dec_model['zbestacci']
Z = dataZspace[datalabels!='K',:]
y = datalabels[datalabels!='K']
print(Z)
# Do a 5 fold cross-validation
self.Z_test = Z[:sep]
self.yZ_test = np.asanyarray(y[:sep]=='M').astype(int)
self.Z_train = Z[sep:]
self.yZ_train = np.asanyarray(y[sep:]=='M').astype(int)
print(self.Z_test.shape)
print(self.Z_train.shape)
if(i==0):
self.tsne = TSNE(n_components=2, perplexity=self.perplexity, learning_rate=self.learning_rate,
init='pca', random_state=0, verbose=2, method='exact')
self.Z_tsne = self.tsne.fit_transform(dataZspace)
# plot initial z
figinint = plt.figure()
axinint = figinint.add_subplot(1,1,1)
plot_embedding_unsuper_NMEdist_intenh(self.Z_tsne, named_y, axinint, title='kmeans init tsne:\n', legend=True)
figinint.savefig('{}//tsne_init_z{}_mu{}_{}.pdf'.format(save_to,self.best_args['znum'],self.best_args['num_centers'],labeltype), bbox_inches='tight')
plt.close()
if(i>0 and i%self.best_args['plot_interval']==0 and self.ploti<=15):
# Visualize the progression of the embedded representation in a subsample of data
# For visualization we use t-SNE (van der Maaten & Hinton, 2008) applied to the embedded points zi. It
tsne = TSNE(n_components=2, perplexity=self.perplexity, learning_rate=self.learning_rate,
init='pca', random_state=0, verbose=2, method='exact')
Z_tsne = tsne.fit_transform(dataZspace)
axprogress = figprogress.add_subplot(4,4,1+self.ploti)
plot_embedding_unsuper_NMEdist_intenh(Z_tsne, named_y, axprogress, title="iter %d z_tsne" % (i), legend=False)
self.ploti = self.ploti+1
# For the purpose of discovering cluster assignments, we stop our procedure when less than tol% of points change cluster assignment between two consecutive iterations.
# tol% = 0.001
if i == self.best_args['update_interval']*120: # performs 1epoch = 615/3 = 205*1000epochs
return True
# Deeo learning metrics to minimize
solver.set_metric(mx.metric.CustomMetric(ce))
# start solver
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(self.best_args['update_interval']))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
self.best_args['end_args'] = args
# finish
figprogress = plt.gcf()
figprogress.savefig('{}\\tsne_progress_z{}_mu{}_{}.pdf'.format(save_to,self.best_args['znum'],self.best_args['num_centers'],labeltype), bbox_inches='tight')
plt.close()
# plot final z
figfinal = plt.figure()
axfinal = figfinal.add_subplot(1,1,1)
tsne = TSNE(n_components=2, perplexity=self.perplexity, learning_rate=self.learning_rate,
init='pca', random_state=0, verbose=2, method='exact')
Z_tsne = tsne.fit_transform(self.z)
plot_embedding_unsuper_NMEdist_intenh(Z_tsne, self.best_args['named_y'], axfinal, title='final tsne', legend=True)
figfinal.savefig('{}\\tsne_final_z{}_mu{}_{}.pdf'.format(save_to,self.best_args['znum'],self.best_args['num_centers'],labeltype), bbox_inches='tight')
plt.close()
outdict = {'meanAuc_cv':self.best_args['meanAuc_cv'],
'std_auc':self.best_args['std_auc'],
'auc_val':self.best_args['auc_val'],
'overall_metric':self.best_args['overall_metric'],
'dec_mu':self.best_args['dec_mu'],
'y_pred': self.best_args['y_pred'],
'named_y': self.best_args['named_y'],
'classes':self.best_args['classes'],
'num_centers': self.best_args['num_centers'],
'znum':self.best_args['znum'],
'update_interval':self.best_args['update_interval'],
'batch_size':self.best_args['batch_size']}
return outdict
if __name__ == '__main__':
#####################################################
from decModel_wimgF_dualopt_descStats import *
from utilities import *
## 1) read in the datasets both all NME (to do pretraining)
NME_nxgraphs = r'Z:\Cristina\Section3\paper_notes_section3_MODIFIED\datasets'
allNMEs_dynamic = pd.read_csv(os.path.join(NME_nxgraphs,'dyn_roi_records_allNMEs_descStats.csv'), index_col=0)
allNMEs_morphology = pd.read_csv(os.path.join(NME_nxgraphs,'morpho_roi_records_allNMEs_descStats.csv'), index_col=0)
allNMEs_texture = pd.read_csv(os.path.join(NME_nxgraphs,'text_roi_records_allNMEs_descStats.csv'), index_col=0)
allNMEs_stage1 = pd.read_csv(os.path.join(NME_nxgraphs,'stage1_roi_records_allNMEs_descStats.csv'), index_col=0)
# to load SERw matrices for all lesions
with gzip.open(os.path.join(NME_nxgraphs,'nxGdatafeatures_allNMEs_descStats.pklz'), 'rb') as fin:
nxGdatafeatures = pickle.load(fin)
# to load discrall_dict dict for all lesions
with gzip.open(os.path.join(NME_nxgraphs,'nxGnormfeatures_allNMEs_descStats.pklz'), 'rb') as fin:
discrall_dict_allNMEs = pickle.load(fin)
#########
# shape input (798L, 427L)
nxGdiscfeatures = discrall_dict_allNMEs
print('Loading {} leasions with nxGdiscfeatures of size = {}'.format(nxGdiscfeatures.shape[0], nxGdiscfeatures.shape[1]) )
print('Normalizing dynamic {} leasions with features of size = {}'.format(allNMEs_dynamic.shape[0], allNMEs_dynamic.shape[1]))
normdynamic = (allNMEs_dynamic - allNMEs_dynamic.mean(axis=0)) / allNMEs_dynamic.std(axis=0)
normdynamic.mean(axis=0)
print(np.min(normdynamic, 0))
print(np.max(normdynamic, 0))
print('Normalizing morphology {} leasions with features of size = {}'.format(allNMEs_morphology.shape[0], allNMEs_morphology.shape[1]))
normorpho = (allNMEs_morphology - allNMEs_morphology.mean(axis=0)) / allNMEs_morphology.std(axis=0)
normorpho.mean(axis=0)
print(np.min(normorpho, 0))
print(np.max(normorpho, 0))
print('Normalizing texture {} leasions with features of size = {}'.format(allNMEs_texture.shape[0], allNMEs_texture.shape[1]))
normtext = (allNMEs_texture - allNMEs_texture.mean(axis=0)) / allNMEs_texture.std(axis=0)
normtext.mean(axis=0)
print(np.min(normtext, 0))
print(np.max(normtext, 0))
print('Normalizing stage1 {} leasions with features of size = {}'.format(allNMEs_stage1.shape[0], allNMEs_stage1.shape[1]))
normstage1 = (allNMEs_stage1 - allNMEs_stage1.mean(axis=0)) / allNMEs_stage1.std(axis=0)
normstage1.mean(axis=0)
print(np.min(normstage1, 0))
print(np.max(normstage1, 0))
# shape input (798L, 427L)
combX_allNME = np.concatenate((nxGdiscfeatures, normdynamic.as_matrix(), normorpho.as_matrix(), normtext.as_matrix(), normstage1.as_matrix()), axis=1)
YnxG_allNME = np.asarray([nxGdatafeatures['roi_id'].values,
nxGdatafeatures['classNME'].values,
nxGdatafeatures['nme_dist'].values,
nxGdatafeatures['nme_int'].values])
print('Loading {} all NME of size = {}'.format(combX_allNME.shape[0], combX_allNME.shape[1]) )
print('Loading all NME lables [label,BIRADS,dist,enh] of size = {}'.format(YnxG_allNME[0].shape[0]) )
######################
## 2) DEC using labeled cases
######################
labeltype = 'wimgF_dualopt_descStats_saveparams'
save_to = r'Z:\Cristina\Section3\paper_notes_section3_MODIFIED\save_to'
#log
logging.basicConfig(filename=os.path.join(save_to,'decModel_{}.txt'.format(labeltype)),
format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
logger = logging.getLogger()
# dfine num_centers according to clustering variable
## use y_dec to minimizing KL divergence for clustering with known classes
ysup = ["{}_{}_{}".format(a, b, c) if b!='nan' else "{}_{}".format(a, c) for a, b, c in zip(YnxG_allNME[1], YnxG_allNME[2], YnxG_allNME[3])]
#ysup[range(combX_filledbyBC.shape[0])] = YnxG_filledbyBC[1]+'_'+YnxG_filledbyBC[3]+'_'+YnxG_filledbyBC[4] # +['_'+str(yl) for yl in YnxG_filledbyBC[3]]
#ysup[range(combX_filledbyBC.shape[0])] = YnxG_filledbyBC[1]+'_'+YnxG_filledbyBC[3]
ysup = ['K'+rl[1::] if rl[0]=='U' else rl for rl in ysup]
roi_labels = YnxG_allNME[1]
roi_labels = ['K' if rl=='U' else rl for rl in roi_labels]
try:
y_dec = np.asarray([int(label) for label in ysup])
except:
classes = [str(c) for c in np.unique(ysup)]
numclasses = [i for i in range(len(classes))]
y_dec = []
for k in range(len(ysup)):
for j in range(len(classes)):
if(str(ysup[k])==classes[j]):
y_dec.append(numclasses[j])
y_dec = np.asarray(y_dec)
########################################################
# DEC
########################################################
input_size = combX_allNME.shape[1]
latent_size = [input_size/rxf for rxf in [2,5,10,15,20]] # 25
varying_mu = [int(np.round(var_mu)) for var_mu in np.linspace(3,12,10)]
for znum in latent_size:
valAUC = []
cvRFZspaceAUC = []
normalizedMI = []
# to load a prevously DEC model
for num_centers in varying_mu:
# batch normalization
X_train = combX_allNME
y_dec_train = y_dec
y_train = roi_labels
batch_size = 125 #X_train.shape[0]
update_interval = 20 # approx. 4 epochs per update
X_train[np.isnan(X_train)] = 0.1
#np.argwhere(np.isnan(X_train))
# if(num_centers==3 and znum==30):
# continue
#num_centers = len(classes)
# Read autoencoder: note is not dependent on number of clusters just on z latent size
print "Load autoencoder of znum = ",znum
print "Training DEC num_centers = ",num_centers
logger.info('Load autoencoder of znum = {}, mu = {} \n Training DEC'.format(znum,num_centers))
epochs_update = float(batch_size*update_interval)/X_train.shape[0]
logger.info('DEC batch_size = {}, update_interval = {} Training DEC, updating parameters every ~ {} Epochs \n '.format(batch_size,update_interval,epochs_update))
dec_model = DECModel(mx.cpu(), X_train, num_centers, 1.0, znum, 'Z:\\Cristina\\Section3\\paper_notes_section3_MODIFIED\\save_to\\SAEmodels')
logger.info('Tunning DEC batch_size ={}, alpha anheling={}'.format(batch_size,update_interval)) # orig paper 256*40 (10240) point for upgrade about 1/6 (N) of data
outdict = dec_model.cluster(X_train, y_dec_train, y_train, classes, batch_size, save_to, labeltype, update_interval, logger) # 10 epochs# ~ per 1/3 of data 798/48=16 update twice per epoch ~ N/(batch size)=iterations to reach a full epochg
logger.info('Finised trainining DEC...')
print 'dec_model meanAuc_cv = {}'.format( outdict['meanAuc_cv'] )
logger.info('dec_model meanAuc_cv = {}'.format( outdict['meanAuc_cv'] ))
cvRFZspaceAUC.append(outdict['meanAuc_cv'])
print 'dec_model auc_val = {}'.format( outdict['auc_val'] )
logger.info('dec_model auc_val = {}'.format( outdict['auc_val'] ))
valAUC.append(outdict['auc_val'])
# save output results
dec_args_keys = ['encoder_1_bias', 'encoder_3_weight', 'encoder_0_weight',
'encoder_0_bias', 'encoder_2_weight', 'encoder_1_weight',
'encoder_3_bias', 'encoder_2_bias']
dec_args = {key: v.asnumpy() for key, v in dec_model.best_args.items() if key in dec_args_keys}
dec_args['dec_mubestacci'] = dec_model.best_args['dec_mu']
args_save = {key: v for key, v in dec_model.best_args.items() if key not in dec_args_keys}
dec_model = dec_args.copy()
dec_model.update(args_save)
# mlp_model = dec_model['mlp_model']
#
# # An example of saving module parameters.
# mlp_model.save_params(os.path.join(save_to,'mlp_model_params_z{}_mu{}.arg'.format(znum,num_centers)))
# # what is doing:
# # from mxnet import ndarray
# # arg_params, aux_params = dec_model.mlp_model.get_params()
# # save_dict = {('arg:%s' % k) : v.as_in_context(mx.cpu()) for k, v in arg_params.items()}
# # save_dict.update({('aux:%s' % k) : v.as_in_context(mx.cpu()) for k, v in aux_params.items()})
# # ndarray.save(fname, save_dict)
#
# del dec_model['mlp_model']
# del args_save['mlp_model']
# del outdict['mlp_model']
args_save_save= gzip.open(os.path.join(save_to,'args_save_z{}_mu{}.arg'.format(znum,num_centers)), 'wb')
pickle.dump(args_save, args_save_save, protocol=pickle.HIGHEST_PROTOCOL)
args_save_save.close()
# save model saving params into a numpy array
dec_model_save= gzip.open(os.path.join(save_to,'dec_model_z{}_mu{}_{}.arg'.format(znum,num_centers,labeltype)), 'wb')
pickle.dump(dec_model, dec_model_save, protocol=pickle.HIGHEST_PROTOCOL)
dec_model_save.close()
## plot iterations
df1 = pd.DataFrame({'overall_metric': pd.Series(outdict['overall_metric']),
'iterations':range(len(outdict['overall_metric']))})
df2 = pd.DataFrame({'auc_val': pd.Series(outdict['auc_val']),
'meanAuc_cv': pd.Series(outdict['meanAuc_cv']),
'inc_iterations':range(len(outdict['meanAuc_cv']))})
fig2 = plt.figure(figsize=(20,6))
#ax2 = plt.axes()
sns.set_context("notebook")
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
ax1 = fig2.add_subplot(2,1,1)
sns.pointplot(x="iterations", y="overall_metric", data=df1, ax=ax1, size=0.005)
ax2 = fig2.add_subplot(2,1,2)
sns.pointplot(x="inc_iterations", y="auc_val", data=df2, label='auc_val', color = "red", ax=ax2, size=0.0005)
sns.pointplot(x="inc_iterations", y="meanAuc_cv", data=df2, label='meanAuc_cv', color = "green", ax=ax2, size=0.0005)
fig2.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
ax2.legend(loc="lower right",fontsize=18)
fig2.savefig(save_to+os.sep+'DEC_z{}_mu{}_{}-unsuprv acc vs iteration.pdf'.format(znum,num_centers,labeltype), bbox_inches='tight')
plt.close(fig2)
#####################
# Calculate normalized MI: find the relative frequency of points in Wk and Cj
#####################
N = X_train.shape[0]
num_classes = len(np.unique(roi_labels)) # present but not needed during AE training
roi_classes = np.unique(roi_labels)
y_train_roi_labels = np.asarray(y_train)
# extact embedding space
all_iter = mx.io.NDArrayIter({'data': X_train}, batch_size=X_train.shape[0], shuffle=False,
last_batch_handle='pad')
## embedded point zi
aDEC = DECModel(mx.cpu(), X_train, num_centers, 1.0, znum, 'Z:\\Cristina\\Section3\\NME_DEC\\SAEmodels')
mxdec_args = {key: mx.nd.array(v) for key, v in dec_args.items() if key != 'dec_mubestacci'}
zbestacci = model.extract_feature(aDEC.feature, mxdec_args, None, all_iter, X_train.shape[0], aDEC.xpu).values()[0]
# orig paper 256*40 (10240) point for upgrade about 1/6 (N) of data
#zbestacci = dec_model['zbestacci']
pbestacci = np.zeros((zbestacci.shape[0], dec_model['num_centers']))
aDEC.dec_op.forward([zbestacci, dec_args['dec_mubestacci'].asnumpy()], [pbestacci])
# find max soft assignments dec_args
W = pbestacci.argmax(axis=1)
clusters = np.unique(W)
num_clusters = len(np.unique(W))
MLE_kj = np.zeros((num_clusters,num_classes))
absWk = np.zeros((num_clusters))
absCj = np.zeros((num_classes))
for k in range(num_clusters):
# find poinst in cluster k
absWk[k] = np.sum(W==k)
for j in range(num_classes):
# find points of class j
absCj[j] = np.sum(y_train_roi_labels==roi_classes[j])
# find intersection
ptsk = W==k
MLE_kj[k,j] = np.sum(ptsk[y_train_roi_labels==roi_classes[j]])
# if not assignment incluster
absWk[absWk==0]=0.00001
# compute NMI
numIwc = np.zeros((num_clusters,num_classes))
for k in range(num_clusters):
for j in range(num_classes):
if(MLE_kj[k,j]!=0):
numIwc[k,j] = MLE_kj[k,j]/N * np.log( N*MLE_kj[k,j]/(absWk[k]*absCj[j]) )
Iwk = np.sum(np.sum(numIwc, axis=1), axis=0)
Hc = -np.sum(absCj/N*np.log(absCj/N))
Hw = np.sum(absWk/N*np.log(absWk/N))
NMI = Iwk/(np.abs(Hc+Hw))
print "... DEC normalizedMI = ", NMI
# to plot best acci
normalizedMI.append( NMI )
outdict['NMI'] = NMI
logger.info('dec_model NMI={}'.format(NMI))
# save model saving params into a numpy array
outdict_save= gzip.open(os.path.join(save_to,'outdict_z{}_mu{}_{}.arg'.format(znum,num_centers,labeltype)), 'wb')
pickle.dump(outdict, outdict_save, protocol=pickle.HIGHEST_PROTOCOL)
outdict_save.close()
# save to R
# pdzfinal = pd.DataFrame( np.append( y[...,None], zfinal, 1) )
# pdzfinal.to_csv('datasets//zfinal.csv', sep=',', encoding='utf-8', header=False, index=False)
# # to save to csv
# pdcombX = pd.DataFrame( np.append( y[...,None], combX, 1) )
# pdcombX.to_csv('datasets//combX.csv', sep=',', encoding='utf-8', header=False, index=False)
#
|
991,580 | f244a700b08413e131a1577fcbee858d56867759 | import math
def isPrime(num):
isPrime = True
if (num % 2) == 0:
return False
for x in range (2,num):
if(num % x) == 0:
isPrime = False
break
return isPrime
def runPrimeTest():
for y in range (2, 400):
if isPrime(y):
print y
def generateAllPrimes(num):
listOfPrimes = []
for x in range(2,num):
if isPrime(x):
listOfPrimes.append(x)
return listOfPrimes
def generateAllNonEvenFactors(num):
listOfFactors = []
for x in range(2,num):
if((num % x == 0) and (x % 2 == 1)):
listOfFactors.append(x)
return listOfFactors
def calculatePrimeFactors(listOfFactors,num):
listOfPrimeFactors = []
for x in listOfFactors:
if(isPrime(x)):
listOfPrimeFactors.append(x)
return listOfPrimeFactors
'''
listOfPrimeFactors = []
for x in listOfPrimes:
if num % x == 0:
listOfPrimeFactors.append(x)
return listOfPrimeFactors
'''
def calculateMaxPrimeFactor(listOfPrimeFactors):
if len(listOfPrimeFactors) == 0:
return 0
max = listOfPrimeFactors[0]
for x in range(0, len(listOfPrimeFactors)):
if listOfPrimeFactors[x] > max:
max = listOfPrimeFactors[x]
return max
def main():
#number = 600851475143
number = 13195
listOfFactors = generateAllNonEvenFactors(number)
listOfPrimeFactors = calculatePrimeFactors(listOfFactors, number)
for x in range(0,len(listOfPrimeFactors)):
print str(listOfPrimeFactors[x]) + ' is a prime factor of ' + str(number)
print listOfPrimeFactors
print 'The largest prime factor of ' + str(number) + ' is ' + str(calculateMaxPrimeFactor(listOfPrimeFactors))
main()
|
991,581 | 70b8d2aa0c26c2cd1a18579f82b6a1c86ed75bb8 | # Generated by Django 3.2.6 on 2021-09-02 01:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('secondary', '0006_auto_20210830_2358'),
]
operations = [
migrations.AddField(
model_name='parent',
name='email',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='parent',
name='username',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='student',
name='email',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='student',
name='username',
field=models.CharField(max_length=200, null=True),
),
]
|
991,582 | dd22dd84e09e55206223bc9d0932a2a201c9848b | import torch
from torchvision.utils import save_image
def reconstruct(filename,input,encoder,decoder,image_size,num_chanel,device):
with torch.no_grad():
x_sample = input.to(device)
x_reconstruct_mean = decoder(encoder(x_sample))
save_image(torch.cat((x_sample, x_reconstruct_mean), dim=0).view(input.shape[0]*2, num_chanel, image_size, image_size),
filename)
def sampling(filename,fixednoise,decoder,num_sample,image_size,num_chanel):
with torch.no_grad():
sample = decoder(fixednoise)
save_image(sample.view(num_sample, num_chanel, image_size, image_size), filename,scale_each=True,normalize=True)
def sampling_eps(filename,fixednoise,decoder,num_sample,image_size,num_chanel):
with torch.no_grad():
sample = decoder(fixednoise)
save_image(sample.view(num_sample, num_chanel, image_size, image_size), filename+'_64.eps',scale_each=True,normalize=True,nrow=8)
save_image(sample.view(num_sample, num_chanel, image_size, image_size)[:32], filename+'_32.eps',scale_each=True,normalize=True,nrow=8)
save_image(sample.view(num_sample, num_chanel, image_size, image_size)[:16], filename+'_16.eps',scale_each=True,normalize=True,nrow=8) |
991,583 | b6e9f830759f3c3ca9fc548016ad16a67c1cd509 | """Test the `GlobManager` class.
"""
import os
from tests.helper_functions import TestCaseWithFakeFiles
from watch_do import GlobManager
class TestGlobManager(TestCaseWithFakeFiles):
"""Test the `GlobManager` class.
"""
def test_last_files(self):
"""Test that the `last_files` property is being correctly maintained.
"""
glob_manager = GlobManager(['*'])
self.assertCountEqual(glob_manager.last_files, set())
glob_manager.get_files()
self.assertCountEqual(
glob_manager.last_files,
{
'bob.py', 'dave.txt', 'fred.txt.py', 'geoff.py', 'jim.py.txt',
'rob.txt'
})
def test_get_files(self):
"""Check that globbing is working as we expect it to.
Perform some generic globbing on the test items created and ensure
we're getting what we expect to back.
"""
glob_manager = GlobManager(['*'])
self.assertCountEqual(
glob_manager.get_files(),
{
'bob.py', 'dave.txt', 'fred.txt.py', 'geoff.py', 'jim.py.txt',
'rob.txt'
})
glob_manager = GlobManager(['*.py'])
self.assertCountEqual(
glob_manager.get_files(), {'bob.py', 'fred.txt.py', 'geoff.py'})
glob_manager = GlobManager(['*.py', '*.txt'])
self.assertCountEqual(
glob_manager.get_files(),
{
'bob.py', 'dave.txt', 'fred.txt.py', 'geoff.py', 'jim.py.txt',
'rob.txt'
})
glob_manager = GlobManager(['**/*.py'])
self.assertCountEqual(
glob_manager.get_files(),
{
'bob.py',
'fred.txt.py',
'geoff.py',
'animals/dog.py',
'animals/mouse.txt.py',
'animals/sheep.py',
'animals/vehicles/bus.py',
'animals/vehicles/aeroplane.txt.py',
'animals/vehicles/tractor.py'
})
glob_manager = GlobManager(['bob.py'])
self.assertCountEqual(glob_manager.get_files(), {'bob.py'})
glob_manager = GlobManager(['bob.py', 'bob.py'])
self.assertCountEqual(glob_manager.get_files(), {'bob.py'})
glob_manager = GlobManager(
[os.path.join(self.temp_dir.name, file_name) for file_name in [
'bob.py', 'geoff.py']])
self.assertCountEqual(glob_manager.get_files(), {'bob.py', 'geoff.py'})
|
991,584 | 74d98230d3aafb3b4f347917db4021e26aee89b0 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
#过滤器
import json
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy.http import Request
class MyImagesPipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
image_guid = request.url.split('/')[-1]
return 'full/%s' % (image_guid)
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield Request(image_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
return item
class TutorialPipeline(object):
def __init__(self):
self.mfile = open('test.html', 'w')
def process_item(self, item, spider):
for x in item['image_urls']:
text = '<img src="'+ x + '" alt = "" />'
self.mfile.writelines(text)
def close_spider(self, spider):
self.mfile.close()
|
991,585 | a787ec716807c85408bbf9138935155016cfc1b4 | #
# BuilderBot.py
#
# @author Alain Rinder
# @date 2017.06.07
# @version 0.1
#
import random
import time
from src.player.IBot import *
from src.action.IAction import *
class BuilderBot(IBot):
def play(self, board) -> IAction:
if self.remainingFences() > 0 and len(board.storedValidFencePlacings) > 0:
randomFencePlacing = random.choice(board.storedValidFencePlacings)
attempts = 5
while board.isFencePlacingBlocking(randomFencePlacing) and attempts > 0:
#print("Cannot place blocking %s" % randomFencePlacing)
randomFencePlacing = random.choice(board.storedValidFencePlacings)
attempts -= 1
if (attempts == 0):
validPawnMoves = board.storedValidPawnMoves[self.pawn.coord]
return random.choice(validPawnMoves)
return randomFencePlacing
else:
validPawnMoves = board.storedValidPawnMoves[self.pawn.coord]
return random.choice(validPawnMoves)
|
991,586 | f42d590602554951de13c3007a079bccf14a30d9 | from django.db import models
#from numpy.random import random_sample
# Create your models here.
class Tempvalt(models.Model):
temperature = models.FileField()
altitude = models.IntegerField() |
991,587 | d7a672fbcc8982ca0b82d02eeb7859200639f570 | import urllib
import requests
import time
from bs4 import BeautifulSoup
import csv
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36"
query = 'Python'
query = query.replace(' ', '+')
URL = f"https://google.com/search?q={query}&num=20"
headers = {"user-agent": USER_AGENT}
resp = requests.get(URL, headers=headers)
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "html.parser")
results = []
for g in soup.find_all('div', class_='rc'):
anchors = g.find_all('a')
spans=g.find_all('span')
if anchors:
title = g.find('h3').text
link = anchors[0]['href']
if spans:
span=spans[-1].text
results.append([title,link,span])
with open('data.csv','w') as fp:
wr=csv.writer(fp)
wr.writerows(results)
|
991,588 | 598d4bd6dcaf55e3587cbb1df4fc0622260a2007 | from ast import arg
import boto3
from botocore.exceptions import ClientError
import argparse, os, glob, logging, json, sys
from subprocess import check_output
from datetime import datetime
#########################
# Utility Functions #
#########################
# Setup the logging
def setup_logging(log_file_path, additional_modules):
# Define how to handle logging within this script
logging.basicConfig(
format='%(message)s',
level=logging.INFO,
handlers=[
logging.FileHandler(log_file_path),
logging.StreamHandler()
]
)
# Override the defaults logging settings from modules like boto and docker
for module in additional_modules:
logging.getLogger(module).setLevel(logging.CRITICAL)
# Define a generic logging function
def log(message, details_json=None, level=None):
# Determine which script is being run
source = sys.argv[0]
# Build the log message as JSON
log_message = {
"message": message,
"source" : source,
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
# Safely parse any JSON object included with the log request
if details_json is not None:
if level == "error":
try:
test = json.dumps(details_json.response.get("Error"), sort_keys=False)
details_body = details_json.response.get("Error")
except:
details_body = "Could not parse error's response"
else:
try:
test = json.dumps(details_json, sort_keys=False)
details_body = details_json
except:
details_body = "Could not parse details JSON object"
log_message["details"] = details_body
# Log at the specified level
if level == "warning":
logging.warning(json.dumps(log_message, sort_keys=False))
elif level == "critical":
logging.critical(json.dumps(log_message, sort_keys=False))
elif level == "error":
logging.error(json.dumps(log_message, sort_keys=False))
else:
logging.info(json.dumps(log_message, sort_keys=False))
return None
# Is this code running on windows?
def is_windows():
return os.name == "nt"
# Define where the tmp directory is
def get_tmp_path():
if is_windows():
return "C:\\tabsetup\\"
else:
return "/tmp/"
# Get TSM's full path
def get_tsm_path():
# Define the glob expression, based on the OS
glob_exp = ""
if is_windows():
glob_exp = "C:\\tableau\\packages\\[b][i][n][.]*\\[t][s][m][.]*"
else:
glob_exp = "/opt/tableau/tableau_server/packages/[b][i][n][.]*/[t][s][m]*"
# Evaluate the path
path = glob.glob(glob_exp)
# Return a safe result path
return path[0]
# Execute a tsm command
def exec_tsm(*args):
# # Create a subprocess, and execute the given command
log(level="Info", message=f"Running the following tsm command: {' '.join(args)}")
tsm_output = check_output(args).decode('UTF-8')
# # Log, then return the output of the TSM command
log(message=tsm_output, level="Info")
return tsm_output
#print(" ".join(args))
#return ""
# Get the path for the backups, based on the OS
def backup_full_path(tsm):
# Get the path to backup files from TSM
path = exec_tsm(tsm, "configuration", "get", "-k", "basefilepath.backuprestore")
# Clean up the response text
clean_path = path.replace("\r","").replace("\n","")
#if is_windows():
# return os.path.join("C:\\ProgramData", "Tableau", "Tableau Server", "data", "tabsvc", "files", "backups",filename)
#else:
# return os.path.join("/var","opt","tableau","tableau_server","data","tabsvc","files","backups",filename)
return clean_path
#########################
# Business Logic #
#########################
# Backup Tableau Server, and save to S3
def backup(tsm, bucket_name, s3_prefix):
# Generate a filename for the tsbak
today = datetime.now().strftime('%Y-%m-%d')
# cleanup the server first
exec_tsm(tsm, "maintenance", "cleanup", "--all")
# export the tsm settings
settings_filename = f"{today}-settings.json"
settings_fullpath = os.path.join(get_tmp_path(),settings_filename)
exec_tsm(tsm, "settings", "export", "--output-config-file", settings_fullpath)
# perform the backup
tsbak_filename = f"{today}-backup.tsbak"
exec_tsm(tsm, "maintenance", "backup", "--file",tsbak_filename , "--ignore-prompt")
# Get the full path to the backup file
tsbak_fullpath = os.path.join(backup_full_path(tsm=tsm),tsbak_filename)
# Upload the backup files to S3 bucket
s3_client = boto3.client('s3')
try:
key = f"{s3_prefix}{settings_filename}"
response_settings = s3_client.upload_file(settings_fullpath, bucket_name, key)
key = f"{s3_prefix}{tsbak_filename}"
response_tsbak = s3_client.upload_file(tsbak_fullpath, bucket_name,key)
except ClientError as e:
log(message=f"Error uploading ${key} to {bucket_name}", details_json=e, level="error")
return False
# Cleanup backup files locally
os.remove(settings_fullpath)
os.remove(tsbak_fullpath)
return True
# Restore Tableau Server, from a backup in S3
def restore(tsm, bucket_name, s3_prefix):
# Upload the backup files to S3 bucket
s3_client = boto3.client('s3')
try:
# Look for existing backup files
list = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=s3_prefix)
tsbaks = []
settings = []
# Loop through all files, and sort out tsbaks and setting.jsons
for file in list.get("Contents",[]):
if file.get("Key","").endswith('tsbak'):
tsbaks.append(file)
if file.get("Key","").endswith('settings.json'):
settings.append(file)
# Sort both lists based on last updated date
def sort_key(obj):
return obj.get("LastModified")
tsbaks.sort(key=sort_key)
settings.sort(key=sort_key)
# Get an S3 resource reference
s3_resource = boto3.resource('s3')
# Download/Restore from tsbak
if len(tsbaks)>0:
# Get the local and remote paths
local_backup_path = os.path.join(backup_full_path(tsm=tsm), "backup.tsbak")
s3_backup_path = tsbaks[0]['Key']
# Download the file
s3_resource.Bucket(bucket_name).download_file(s3_backup_path, local_backup_path)
# Restore from the backup
exec_tsm(tsm, "maintenance", "restore", "--file", "backup.tsbak")
# Cleanup file
os.remove(local_backup_path)
# Download/Restore from settings.json
if len(settings)>0:
# Get the local and remote paths
local_backup_path = os.path.join(get_tmp_path(), "settings.json")
s3_backup_path = settings[0]['Key']
# Download the file
s3_resource.Bucket(bucket_name).download_file(s3_backup_path, local_backup_path)
# Restore from the backup
exec_tsm(tsm, "settings", "import", "--config-only", "--force-keys", "-f", local_backup_path)
exec_tsm(tsm, "pending-changes", "apply", "--ignore-prompt")
# Cleanup file
os.remove(local_backup_path)
# Ensure Tableau Server has started
exec_tsm(tsm, "start")
except ClientError as e:
log(message=f"Error restoring backups from {bucket_name}", details_json=e, level="error")
return False
return True
#########################
# Main Executable #
#########################
def main():
# Setup logging
setup_logging(log_file_path="tableau-backup-restore-s3.log",additional_modules=["boto3","botocore"])
# Parse parameters
parser = argparse.ArgumentParser()
parser.add_argument("--command", help="Options are 'backup' or 'restore'", type=str, required=True)
parser.add_argument("--region", help="The AWS region", type=str, required=True)
parser.add_argument("--s3bucket", help="The S3 bucket, where the backup files live", type=str,required=True)
parser.add_argument("--s3prefix", help="The prefix used to query for backup files in S3", type=str,required=True)
args = parser.parse_args()
command = args.command.lower()
region=args.region
s3_bucket=args.s3bucket.lower()
s3_prefix=args.s3prefix.lower()
# Get a reference to tsm
tsm = get_tsm_path()
# Execute the command
if command == 'backup':
# Take a backup using TSM and upload to S3
status = backup(tsm=tsm, bucket_name=s3_bucket, s3_prefix=s3_prefix)
elif command == 'restore':
# Find the latest backup in S3 and use TSM to restore from it
status = restore(tsm=tsm, bucket_name=s3_bucket, s3_prefix=s3_prefix)
main() |
991,589 | 01819743e12acee57f97ba5e55ef4f72517b1e6d | from django.test import TestCase, SimpleTestCase
from django.urls import reverse, resolve
from accounts.views import profile, order_history
from accounts.forms import UserProfileForm
class TestURLs(SimpleTestCase):
def test_profile_URL(self):
"""
Testing profile URL
"""
url = reverse('profile')
print(resolve(url))
self.assertEquals(resolve(url).func, profile)
def test_order_history_URL(self):
"""
Testing order_history URL
"""
url = reverse('order_history', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, order_history)
class TestForms(TestCase):
def test_user_profile_form_valid(self):
"""
Testing user_profile form is valid
"""
form = UserProfileForm(data={
'default_phone_number': 'test_phone_number',
'default_town_or_city': 'test_town_or_city',
'default_street_address1': 'test_street1',
'default_street_address2': 'test_street2',
'default_county': 'test_county',
'default_country': 'GB',
})
self.assertTrue(form.is_valid())
def test_user_profile_form_invalid(self):
"""
Testing user_profile form is invalid
"""
form = UserProfileForm(data={
'default_phone_number': 'test_phone_number',
'default_town_or_city': 'test_town_or_city',
'default_street_address1': 'test_street1',
'default_street_address2': 'test_street2',
'default_county': 'test_county',
'default_country': 'test_country',
})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 1)
|
991,590 | 3fc1a8c6eaa54c1465f9b7067a4a20d01a91fdad | import pytest
from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory
from ..api.views import FoesViewSet
from ..models import Foe
pytestmark = pytest.mark.django_db
class TestFoeViewSet:
def test_get_queryset(self, foe: Foe, rf: RequestFactory):
view = FoesViewSet()
request = rf.get("/api/foes/")
request.user = AnonymousUser()
view.request = request
assert foe in view.get_queryset()
def test_get_active(self, foe: Foe, rf: RequestFactory):
view = FoesViewSet()
request = rf.get("/api/foes/?active=true")
request.user = AnonymousUser()
view.request = request
assert foe in view.get_queryset()
def test_get_detail(self, foe: Foe, rf: RequestFactory):
view = FoesViewSet()
request = rf.get(f"/api/foes/{foe.uuid}/")
request.user = AnonymousUser()
view.request = request
assert foe in view.get_queryset()
|
991,591 | ecf14b5d5b08c0eb33e6e25d8bb938b7646ac7c4 | from django.conf.urls import url
from basic_app import views
app_name='basic_app'
urlpatterns=[
url(r'^register/',views.register,name='register')
]
|
991,592 | 1ebcc253daea235221a7fb38ac3bf30ce96e89ec | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 17:29:16 2019
@author: Tanya Joon
"""
#importing dependencies
from glob import glob
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten, Average
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import cv2
from tensorflow.python.keras.callbacks import TensorBoard
from time import time
#path to the training list
path='C:\\Users\\Tanya Joon\\Documents\\MM 803 Image\\split\\trainlist.txt'
f = open(path)
line = f.readline()
path1= 'C:/Users/Tanya Joon/Documents/MM 803 Image/UCF-101/'
i = 0
w = [];
files_frames = []
files_flow = []
while line:
#reading the training list line by line and appending the frames and optical flow names to the list
line1=line[:-1]
filename_frames= path1+line1+'_frames'
filename_flow=path1+line1+'_flow'
line = f.readline()
imagePatches_frames = glob(filename_frames+'/*.png')
imagePatches_flow = glob(filename_flow+'/*.png')
count_frames=0;
count_flow=0;
files_frames += imagePatches_frames
files_flow += imagePatches_flow
count_frames += len(imagePatches_frames)
count_flow += len(imagePatches_flow)
print(count_frames)
print(count_flow)
f.close()
#storing the frames and optical flow names to a numpy array
files_frames = np.array(files_frames)
files_flow = np.array(files_flow)
#reading all the 101 class labels from the file
dct = {}
for line in open("C:/Users/Tanya Joon/Documents/MM 803 Image/split/classInd.txt", "r").readlines():
x, y = line.strip().split(' ')
dct[y] = int(x)
BATCH_SIZE = 64
#defining a data generator to feed the frames and optical flow to the model
def datagen():
while True:
samples = np.random.randint(0, len(files_frames), size = BATCH_SIZE)
yield [np.array([cv2.resize(cv2.imread(file), (224, 224)) for file in files_frames[samples]]), np.array([np.reshape(cv2.resize(cv2.imread(file, 0), (224, 224)), (224, 224, 1)) for file in files_flow[samples]])], to_categorical([dct[file.split('/')[6]]-1 for file in files_frames[samples]], 101)
gen = datagen()
#Model
#input layer: taking generated frames as input
inp_frames = Input(shape=(224,224,3))
#Layer1
conv_1_frames = Conv2D(96, (7,7), strides= 2, activation='relu')(inp_frames)
batch_norm_1_frames= tf.nn.local_response_normalization(conv_1_frames, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
pool_1_frames = MaxPooling2D((2,2)) (batch_norm_1_frames)
#Layer2
conv_2_frames = Conv2D(256, (5,5), strides= 2, activation='relu')(pool_1_frames)
batch_norm_2_frames = tf.nn.local_response_normalization(conv_2_frames, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
pool_2_frames = MaxPooling2D((2,2)) (batch_norm_2_frames)
#Layer3
conv_3_frames = Conv2D(512,(3,3),strides=1,activation='relu')(pool_2_frames)
#Layer4
conv_4_frames = Conv2D(512,(3,3),strides=1,activation='relu')(conv_3_frames)
#Layer5
conv_5_frames = Conv2D(512,(3,3),strides=1,activation='relu')(conv_4_frames)
pool_3_frames = MaxPooling2D((2,2))(conv_5_frames)
flat_frames = Flatten() (pool_3_frames)
#Layer6
fc_1_frames = Dense(4096,activation='relu')(flat_frames)
#Layer7
fc_2_frames = Dense(2048,activation='relu')(fc_1_frames)
#output layer
out_frames = Dense(101,activation='softmax')(fc_2_frames)
#input layer: taking generated optical flow as input
inp_flow = Input(shape=(224,224,1))
#Layer1
conv_1_flow = Conv2D(96, (7,7), strides= 2, activation='relu')(inp_flow)
batch_norm_1_flow= tf.nn.local_response_normalization(conv_1_flow, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
pool_1_flow = MaxPooling2D((2,2)) (batch_norm_1_flow)
#Layer2
conv_2_flow = Conv2D(256, (5,5), strides= 2, activation='relu')(pool_1_flow)
pool_2_flow = MaxPooling2D((2,2)) (conv_2_flow)
#Layer3
conv_3_flow = Conv2D(512,(3,3),strides=1,activation='relu')(pool_2_flow)
#Layer4
conv_4_flow = Conv2D(512,(3,3),strides=1,activation='relu')(conv_3_flow)
#Layer5
conv_5_flow = Conv2D(512,(3,3),strides=1,activation='relu')(conv_4_flow)
pool_3_flow = MaxPooling2D((2,2))(conv_5_flow)
flat_flow = Flatten() (pool_3_flow)
#Layer6
fc_1_flow = Dense(4096,activation='relu')(flat_flow)
#Layer7
fc_2_flow = Dense(2048,activation='relu')(fc_1_flow)
#output layer
out_flow = Dense(101,activation='softmax')(fc_2_flow)
#Taking the output of both the streams and combining them
out = Average()([out_frames, out_flow])
model = Model(inputs=[inp_frames, inp_flow], outputs=out)
opti_flow = tf.keras.optimizers.Adam(learning_rate=1e-5)
#compiling the model by using categorical_crossentropy loss
model.compile(optimizer=opti_flow, loss = 'categorical_crossentropy', metrics=['mae','accuracy'])
model.summary()
#visualizing the model on tensorboard
tensorboard = TensorBoard(log_dir="logs\{}".format(time()),write_graph=True)
#calling the datagenerator and passing the inputs to our model for training
i=0
hist_frames=[]
for x, y in datagen():
i=i+1
print(i)
if(i == 15000): break
history = model.fit(x,y, batch_size=64, epochs=1,callbacks=[tensorboard])
hist_frames.append(history.history)
#saving training history
print("\nhistory dict:",hist_frames)
#saving the model after training
model.save('C:\\Users\\Tanya Joon\\Documents\\MM 803 Image\\model.h5')
#saving the training loss in an numpy array
loss_array=[]
for i in hist_frames:
for j in i['loss']:
loss_array.append(j)
#saving the training accuracy in an numpy array
accuracy_array=[]
for i in hist_frames:
for j in i['accuracy']:
accuracy_array.append(j)
#printing the average training loss and accuracy
print("Average accuracy: ", np.average(accuracy_array))
print("Average test loss: ", np.average(loss_array))
#visualizing training accuracy
plt.plot(accuracy_array[0::200])
plt.title('model accuracy')
plt.ylabel('')
plt.xlabel('')
plt.legend(['accuracy'], loc='upper left')
plt.savefig('accuracy.png')
#visualizing training loss
plt.clf()
plt.plot(loss_array[0::200])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('')
plt.savefig('loss.png')
|
991,593 | 142f8f96ba9f7d1fc12f8aba6acf9dbae7b0a954 | import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import pyomo.environ as po
import time
from datetime import datetime
import itertools
import sys
from optparse import OptionParser
import data
################ PLOTTING ################
class Timeslot():
def __init__(self, start, end, color, title):
self.start = start
self.end = end
self.color = color
self.title = title
def applyTimeslots(offset, limit, timeslots, ax):
for timeslot in timeslots:
ax.fill_between([offset + 0.05, limit - 0.05], [timeslot.start + 0.05, timeslot.start + 0.05], [timeslot.end - 0.05,timeslot.end - 0.05], color=timeslot.color, edgecolor='k', linewidth=0.5, alpha=0.4
)
ax.text(float(limit - offset) / 2 + offset, (timeslot.start + timeslot.end) / 2, timeslot.title, ha="center", va="center", fontsize=12)
def plotDay(timeslots):
width = 10
height = 8
fig = plt.figure(figsize=(width, height))
margin = 0.3
ax = fig.add_subplot(111)
ax.yaxis.grid()
ax.set_ylabel("Time")
ax.axes.get_xaxis().set_visible(False)
fig.gca().invert_yaxis()
applyTimeslots(margin, width - margin, timeslots, ax)
ax.set_title("Day")
plt.show()
def plotWeek(timeslotDays, dayStartAndEnd = None, title = None):
width = 15
height = 8
weekdays = 7
fig = plt.figure(figsize=(width, height))
margin = 0.3
ax = fig.add_subplot(111)
ax.yaxis.grid()
if dayStartAndEnd != None:
(start, end) = dayStartAndEnd
plt.yticks(range(start, end + 1))
fig.gca().set_ylim([start,end])
ax.set_ylabel("Time")
#ax.axes.get_xaxis().set_visible(False)
fig.gca().invert_yaxis()
dayWidth = width / weekdays
def genDays():
days = ["Monday", "Tuesday", "Wensday", "Thursday", "Friday", "Saturday", "Sunday"]
fig.gca().set_xlim([0,len(days)])
for i, day in enumerate(days):
yield (day, i * dayWidth + (dayWidth / 2))
tickedDays = list(genDays())
plt.xticks(list(map(lambda x: x[1], tickedDays)), list(map(lambda x: x[0], tickedDays)))
for i, dayTimeslots in enumerate(timeslotDays):
offset = i * dayWidth
applyTimeslots(offset, offset + dayWidth, dayTimeslots, ax)
if title != None:
ax.set_title(title)
else:
ax.set_title("Week")
plt.show()
def plotFor(events, modelDataMapping, model, week, rooms, withBadSlots = False):
timeslots = [ [] for i in range(7) ]
color_map = {}
colorsAsList = list(mcolors.TABLEAU_COLORS.items())
for i, k in enumerate(events):
color_map[k] = colorsAsList[i]
dayStart = 8
# plot data
for e in modelDataMapping.E:
for r in rooms:
for (h, d, w) in modelDataMapping.P:
if w != week:
continue
s = set(modelDataMapping.B[r])
namedEvent = modelDataMapping.namedEvents[e]
value = po.value(model.x[e,r,h,d,w])
if value != 0:
color = color_map[namedEvent[0]]
teachersThatCanTeachThis = [str(teacherIdx) for teacherIdx in modelDataMapping.courseIdToTeacherMapping[e]]
timeslots[d].append(Timeslot(h + dayStart, h + dayStart + namedEvent[1]["duration"], color, namedEvent[1]["id"] + "\n" + ", ".join(teachersThatCanTeachThis) + "\n" + str(r)))
if withBadSlots:
if (h, d, w) in s:
timeslots[d].append(Timeslot(h + dayStart, h + dayStart + 1, "red", ""))
continue
roomStr = ", room ".join(list(map(lambda x: str(x), rooms)))
plotWeek(timeslots, (7, 22), "Week " + str(week) + ", room " + roomStr)
def plotFile():
dataImporter = data.Data("data/small")
import model
slots = dataImporter.slots
banned = dataImporter.banned
events = dataImporter.events
teachers = dataImporter.teachers
students = dataImporter.students
rooms = dataImporter.rooms
m = po.ConcreteModel("timetable")
import cloudpickle
with open('ttmilp_model.pkl', mode='rb') as file:
m = cloudpickle.load(file)
modelDataMapping = model.prepareModel(events, students, teachers, slots, banned, rooms)
plotFor(events, modelDataMapping, m, week=17, rooms=[0,1,2,3,4,5])
if __name__ == "__main__":
plotFile()
|
991,594 | 84051f672930f6737415535730f346c6e278e1f5 | #!/usr/bin/env python
# coding=utf-8
from handlers.index import IndexHandler
from handlers import wap
app = [
(r"/", IndexHandler),
(r"/trade/wap/pay", wap.WapPayHandler),
(wap.URL_WAP_PAY_CALLBACK, wap.WapPayCallbackHandler),
(wap.URL_WAP_PAY_NOTIFY, wap.WapPayNotifyHandler),
(r"/trade/wap/refund", wap.WapRefundHandler),
] |
991,595 | c812871a0c63102522e99fa949cb2c8bf5beeba3 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cStringIO
import re
import ipaddr
from bob.forms import AutocompleteWidget
from django import forms
from django.conf import settings
from django.forms import formsets
from django.db.models import Q
from lck.django.common.models import MACAddressField
from powerdns.models import Record, Domain
from ralph.business.models import Venture, VentureRole
from ralph.deployment.models import Deployment, Preboot
from ralph.deployment.util import (
clean_hostname,
hostname_exists,
ip_address_exists,
is_mac_address_known,
network_exists,
preboot_exists,
rack_exists,
venture_and_role_exists,
)
from ralph.discovery.models import (
Device,
DeviceEnvironment,
DeviceType,
Network,
IPAddress,
ServiceCatalog,
)
from ralph.discovery.models_component import is_mac_valid
from ralph.dnsedit.models import DHCPEntry
from ralph.dnsedit.util import (
find_addresses_for_hostname,
get_domain,
get_revdns_records,
is_valid_hostname,
)
from ralph.ui.forms.devices import ServiceCatalogMixin
from ralph.ui.widgets import DeviceWidget
from ralph.util import Eth
from bob.csvutil import UnicodeReader
from ralph.ui.widgets import ReadOnlySelectWidget, ReadOnlyWidget
class DeploymentForm(ServiceCatalogMixin):
class Meta:
model = Deployment
fields = [
'device',
'venture',
'venture_role',
'service',
'device_environment',
'mac',
'ip',
'hostname',
'preboot',
]
widgets = {
'device': DeviceWidget,
'mac': AutocompleteWidget,
'ip': AutocompleteWidget,
}
def __init__(self, *args, **kwargs):
super(DeploymentForm, self).__init__(*args, **kwargs)
device = self.initial['device']
macs = [e.mac for e in device.ethernet_set.order_by('mac')]
self.fields['mac'].widget.choices = [(mac, mac) for mac in macs]
# all mac addresses have the same length - default sorting is enough
dhcp_entries = DHCPEntry.objects.filter(mac__in=macs).order_by('mac')
ips = [e.ip for e in dhcp_entries]
self.fields['ip'].widget.choices = [(ip, ip) for ip in ips]
proposed_mac = macs[0] if macs else ''
proposed_ip = ips[0] if ips else ''
for dhcp_entry in dhcp_entries:
if dhcp_entry.mac in macs:
proposed_mac = dhcp_entry.mac
proposed_ip = dhcp_entry.ip
break
self.initial.update({
'mac': proposed_mac,
'ip': proposed_ip,
'venture': device.venture,
'venture_role': device.venture_role,
'preboot': (device.venture_role.get_preboot() if
device.venture_role else ''),
'hostname': device.name,
})
self.fields['venture'].queryset = Venture.objects.order_by('name')
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
return clean_hostname(hostname)
def clean_ip(self):
ip = self.cleaned_data.get('ip')
return str(ipaddr.IPAddress(ip))
def _validate_cols_count(
expected_count, cols, row_number, allowed_oversize=0
):
if all((
len(cols) != expected_count,
len(cols) != (expected_count + allowed_oversize)
)):
raise forms.ValidationError(
"Incorrect number of columns (got %d, expected %d%s) at row %d" %
(
len(cols),
expected_count,
' or %d' % (
expected_count + allowed_oversize
) if allowed_oversize > 0 else '',
row_number
),
)
def _validate_cols_not_empty(cols, row_number):
for col_number, col in enumerate(cols, start=1):
value = col.strip()
if not value:
raise forms.ValidationError(
"Empty value at row %d column %d" % (
row_number, col_number
)
)
def _validate_mac(mac, parsed_macs, row_number):
if not is_mac_valid(Eth("", mac, "")):
raise forms.ValidationError(
"Row %s: Invalid MAC address." % row_number
)
if mac in parsed_macs:
raise forms.ValidationError(
"Row %s: Duplicated MAC address. "
"Please check previous rows..." % row_number
)
def _validate_management_ip(ip, row_number):
try:
ipaddr.IPAddress(ip)
except ValueError:
raise forms.ValidationError(
"Row %s: Incorrect management IP address." % row_number
)
def _validate_network_name(network_name, row_number):
if not network_exists(network_name):
raise forms.ValidationError(
"Row %s: Network doesn't exists." % row_number
)
def _validate_venture_and_role(venture_symbol, venture_role, row_number):
if not venture_and_role_exists(venture_symbol, venture_role):
raise forms.ValidationError(
"Row %s: "
"Couldn't find venture with symbol %s and role %s" % (
row_number, venture_symbol, venture_role
)
)
def _validate_preboot(preboot, row_number):
if not preboot_exists(preboot):
raise forms.ValidationError(
"Row %s: Couldn't find preboot %s" % (
row_number, preboot
)
)
def _validate_service(service_name, row_number):
if not ServiceCatalog.objects.filter(name=service_name).exists():
raise forms.ValidationError(
"Row %s: "
"Couldn't find service with name %s" % (
row_number, service_name
)
)
def _validate_environment(environment_name, row_number):
if not DeviceEnvironment.objects.filter(name=environment_name).exists():
raise forms.ValidationError(
"Row %s: "
"Couldn't find environment with name %s" % (
row_number, environment_name
)
)
def _validate_deploy_children(mac, row_number):
mac = MACAddressField.normalize(mac)
try:
device = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
return
if device.deleted:
return
children = device.child_set.filter(deleted=False)
if children.exists():
raise forms.ValidationError(
"Row %d: Device with MAC %s exists and has child devices "
"[%s]. Delete the child devices first." % (
row_number,
mac,
', '.join(str(d) for d in children.all()),
)
)
if device.servermount_set.filter(device__deleted=False).exists():
raise forms.ValidationError(
"Row %d: Device with MAC %s exists and exports shares." %
(row_number, mac)
)
for share in device.diskshare_set.all():
if any((
share.disksharemount_set.filter(device__deleted=False).exists(),
share.disksharemount_set.filter(server__deleted=False).exists(),
)):
raise forms.ValidationError(
"Row %d: Device with MAC %s exists and exports disks." %
(row_number, mac)
)
def _validate_asset_identity(identity, row_number, mac=None):
if 'ralph_assets' not in settings.INSTALLED_APPS:
return # nothing to do
from ralph_assets.api_ralph import get_asset_by_sn_or_barcode
asset = get_asset_by_sn_or_barcode(identity)
if not asset:
raise forms.ValidationError(
"Row %d: Asset with sn or barcode `%s` does not exist." %
(row_number, identity)
)
if not mac or not asset['device_id']:
return
mac = MACAddressField.normalize(mac)
try:
device = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
return
if device.id != asset['device_id']:
raise forms.ValidationError(
"Row %d: Found asset by sn or barcode and found device by "
"MAC address are not the same." % row_number
)
class PrepareMassDeploymentForm(forms.Form):
assets_enabled = 'ralph_assets' in settings.INSTALLED_APPS
csv = forms.CharField(
label="CSV",
widget=forms.widgets.Textarea(attrs={'class': 'span12 csv-input'}),
required=False,
help_text=(
"Template: mac ; management-ip ; network ; venture-symbol ; "
"role ; service ; environment ; preboot{}".format(
' ; asset sn or barcode (not required)'
if assets_enabled
else ''
)
)
)
def clean_csv(self):
csv_string = self.cleaned_data['csv'].strip().lower()
rows = UnicodeReader(cStringIO.StringIO(csv_string))
parsed_macs = set()
for row_number, cols in enumerate(rows, start=1):
_validate_cols_count(
8, cols, row_number, 1 if self.assets_enabled else 0
)
mac = cols[0].strip()
_validate_mac(mac, parsed_macs, row_number)
_validate_deploy_children(mac, row_number)
parsed_macs.add(mac)
management_ip = cols[1].strip()
_validate_management_ip(management_ip, row_number)
network_name = cols[2].strip()
if not (is_mac_address_known(mac) and network_name == ''):
# Allow empty network when the device already exists.
_validate_network_name(network_name, row_number)
venture_symbol = cols[3].strip()
venture_role = cols[4].strip()
_validate_venture_and_role(
venture_symbol, venture_role, row_number,
)
service = cols[5].strip()
_validate_service(service, row_number)
environment = cols[6].strip()
_validate_environment(environment, row_number)
preboot = cols[7].strip()
_validate_preboot(preboot, row_number)
if self.assets_enabled and len(cols) == 9:
asset_identity = cols[8].strip()
_validate_asset_identity(asset_identity, row_number, mac)
return csv_string
def _validate_hostname(hostname, mac, parsed_hostnames, row_number):
mac = MACAddressField.normalize(mac)
try:
dev = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
if hostname_exists(hostname):
raise forms.ValidationError(
"Row %s: Hostname already exists." % row_number
)
else:
ip_addresses = list(
dev.ipaddress_set.values_list('address', flat=True)
)
ip_addresses_in_dns = find_addresses_for_hostname(hostname)
for ip in ip_addresses_in_dns:
if ip not in ip_addresses:
raise forms.ValidationError(
"Row %s: Using an old device %s failed. "
"Exists A or PTR records in DNS which are not assigned "
"to device IP addresses." % (row_number, dev)
)
if Deployment.objects.filter(hostname=hostname).exists():
raise forms.ValidationError(
"Row %s: Running deployment with hostname: %s already "
"exists." % (row_number, hostname)
)
if hostname in parsed_hostnames:
raise forms.ValidationError(
"Row %s: Duplicated hostname. "
"Please check previous rows..." % row_number
)
def _validate_ip_address(ip, network, parsed_ip_addresses, row_number):
try:
ipaddr.IPAddress(ip)
except ValueError:
raise forms.ValidationError(
"Row %s: Invalid IP address." % row_number
)
if ip not in network:
raise forms.ValidationError(
"Row %s: IP address is not valid for network %s." % (
row_number, network.name
)
)
if ip in parsed_ip_addresses:
raise forms.ValidationError(
"Row %s: Duplicated IP address. "
"Please check previous rows..." % row_number
)
def _validate_ip_owner(ip, mac, row_number):
"""If the MAC is unique, make sure the IP address is not used anywhere.
If the MAC address belongs to an existing device, make sure the IP address
also belongs to that device.
"""
mac = MACAddressField.normalize(mac)
try:
dev = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
if ip_address_exists(ip):
raise forms.ValidationError(
"Row %s: IP address already exists." % row_number
)
else:
# Does another device have this IPAddress?
if(Device.objects.filter(
ipaddress__number=int(ipaddr.IPAddress(ip)),
).exclude(
pk=dev.id,
).exists()):
raise forms.ValidationError(
"Row %s: IP address used by another device." % row_number
)
class MassDeploymentForm(forms.Form):
assets_enabled = 'ralph_assets' in settings.INSTALLED_APPS
csv = forms.CharField(
label="CSV",
widget=forms.widgets.Textarea(attrs={'class': 'span12 csv-input'}),
help_text=(
"Template: hostname ; ip ; rack-sn ; mac ; management-ip ; "
"network ; venture-symbol ; role ; service ; environment ; preboot{}".format(
' ; asset sn or barcode (not required)'
if assets_enabled
else ''
)
)
)
def clean_csv(self):
csv_string = self.cleaned_data['csv'].strip().lower()
rows = UnicodeReader(cStringIO.StringIO(csv_string))
cleaned_csv = []
parsed_hostnames = set()
parsed_ip_addresses = set()
parsed_macs = set()
for row_number, cols in enumerate(rows, start=1):
_validate_cols_count(11, cols, row_number, 1)
_validate_cols_not_empty(cols, row_number)
mac = cols[3].strip()
_validate_mac(mac, parsed_macs, row_number)
parsed_macs.add(mac)
hostname = cols[0].strip()
_validate_hostname(hostname, mac, parsed_hostnames, row_number)
if not clean_hostname(hostname):
raise forms.ValidationError("Invalid hostname")
parsed_hostnames.add(hostname)
network_name = cols[5].strip()
try:
network = Network.objects.get(name=network_name)
except Network.DoesNotExist:
raise forms.ValidationError(
"Row %s: Network '%s' doesn't exists." %
(row_number, network_name)
)
rack_sn = cols[2].strip()
if re.match(r"^[0-9]+$", rack_sn):
rack_sn = "Rack %s %s" % (
rack_sn,
network.data_center.name.upper(),
)
if not rack_exists(rack_sn):
raise forms.ValidationError(
"Row %s: Rack with serial number '%s' doesn't exists." % (
row_number, rack_sn
)
)
try:
network.racks.get(sn=rack_sn)
except Device.DoesNotExist:
raise forms.ValidationError(
"Row %s: Rack '%s' isn't connected to "
"network '%s'." % (row_number, rack_sn, network.name)
)
ip = cols[1].strip()
_validate_ip_address(ip, network, parsed_ip_addresses, row_number)
_validate_ip_owner(ip, mac, row_number)
parsed_ip_addresses.add(ip)
management_ip = cols[4].strip()
_validate_management_ip(management_ip, row_number)
try:
venture_role = VentureRole.objects.get(
venture__symbol=cols[6].strip(),
name=cols[7].strip()
)
venture = venture_role.venture
except VentureRole.DoesNotExist:
raise forms.ValidationError(
"Row %s: "
"Couldn't find venture with symbol %s and role %s" % (
row_number, cols[6].strip(), cols[7].strip()
)
)
try:
preboot = Preboot.objects.get(name=cols[10].strip())
except Preboot.DoesNotExist:
raise forms.ValidationError(
"Row %s: Couldn't find preboot %s" % (
row_number, cols[10].strip()
)
)
asset_identity = None
if self.assets_enabled and len(cols) == 12:
asset_identity = cols[11].strip()
_validate_asset_identity(asset_identity, row_number, mac)
service = cols[8].strip()
_validate_service(service, row_number)
environment = cols[9].strip()
_validate_environment(environment, row_number)
cleaned_csv.append({
'hostname': hostname,
'ip': ip,
'mac': mac,
'rack_sn': rack_sn,
'venture': venture,
'venture_role': venture_role,
'preboot': preboot,
'management_ip': management_ip,
'network': network,
'asset_identity': asset_identity,
'service': service,
'device_environment': environment,
})
return cleaned_csv
class ServerMoveStep1Form(forms.Form):
addresses = forms.CharField(
label="Server addresses",
widget=forms.widgets.Textarea(attrs={'class': 'span12'}),
help_text="Enter the IP addresses or hostnames to be moved, "
"separated with spaces or newlines.",
)
@staticmethod
def _get_address_candidates(address):
try:
ip_address = str(ipaddr.IPAddress(address))
except ValueError:
ip_address = None
try:
mac = MACAddressField.normalize(address)
except ValueError:
mac = None
if not mac:
hostname = address
if ip_address:
candidates = IPAddress.objects.filter(
address=ip_address,
)
elif mac:
ips = {
str(ip) for ip in
DHCPEntry.objects.filter(mac=mac).values_list('ip', flat=True)
}
candidates = IPAddress.objects.filter(address__in=ips)
else:
candidates = IPAddress.objects.filter(
Q(hostname=hostname) |
Q(address__in=find_addresses_for_hostname(hostname))
)
return candidates.filter(
device__deleted=False,
device__model__type__in={
DeviceType.rack_server,
DeviceType.blade_server,
DeviceType.virtual_server,
DeviceType.unknown,
}
)
def clean_addresses(self):
addresses = self.cleaned_data['addresses']
for address in addresses.split():
if not self._get_address_candidates(address).exists():
raise forms.ValidationError(
"No server found for %s." % address,
)
return addresses
def _check_move_address(address):
if not IPAddress.objects.filter(
device__deleted=False,
device__model__type__in={
DeviceType.rack_server,
DeviceType.blade_server,
DeviceType.virtual_server,
DeviceType.unknown,
}
).filter(address=address).exists():
raise forms.ValidationError(
"No server found for %s." % address,
)
class ServerMoveStep2Form(forms.Form):
address = forms.ChoiceField()
network = forms.ChoiceField()
def clean_address(self):
address = self.cleaned_data['address']
_check_move_address(address)
return address
def clean_network(self):
network_id = self.cleaned_data['network']
if not Network.objects.filter(id=network_id).exists():
raise forms.ValidationError("Invalid network.")
return network_id
class ServerMoveStep2FormSetBase(formsets.BaseFormSet):
def add_fields(self, form, index):
form.fields['network'].choices = [
(n.id, n.name)
for n in Network.objects.order_by('name')
]
form.fields['network'].widget.attrs = {
'class': 'span12',
}
if self.initial:
candidates = self.initial[index]['candidates']
else:
candidates = {form.data['%s-%d-address' % (self.prefix, index)]}
form.fields['address'].widget.attrs = {
'class': 'span12',
}
if len(candidates) == 1:
form.fields['address'].widget = ReadOnlySelectWidget()
form.fields['address'].choices = [(ip, ip) for ip in candidates]
return super(ServerMoveStep2FormSetBase, self).add_fields(form, index)
ServerMoveStep2FormSet = formsets.formset_factory(
form=ServerMoveStep2Form,
formset=ServerMoveStep2FormSetBase,
extra=0,
)
class ServerMoveStep3Form(forms.Form):
address = forms.CharField(widget=ReadOnlyWidget())
new_ip = forms.CharField()
new_hostname = forms.CharField()
def clean_address(self):
address = self.cleaned_data['address']
_check_move_address(address)
return address
def clean_new_ip(self):
old_ip = self.cleaned_data.get('address')
new_ip = self.cleaned_data['new_ip']
try:
new_ip = str(ipaddr.IPAddress(new_ip))
except ValueError:
raise forms.ValidationError("Malformed IP address.")
rdomain = '.'.join(
list(reversed(new_ip.split('.')))[1:]
) + '.in-addr.arpa'
if not Domain.objects.filter(name=rdomain).exists():
raise forms.ValidationError("No RevDNS domain for address.")
try:
ipaddress = IPAddress.objects.get(address=new_ip)
except IPAddress.DoesNotExist:
if Record.objects.filter(content=new_ip).exists():
raise forms.ValidationError("Address already in DNS.")
if get_revdns_records(new_ip).exists():
raise forms.ValidationError("Address already in DNS.")
if DHCPEntry.objects.filter(ip=new_ip).exists():
raise forms.ValidationError("Address already in DHCP.")
else:
if ipaddress.device and not ipaddress.device.deleted:
if not old_ip:
raise forms.ValidationError("Address in use.")
device = Device.objects.get(ipaddress__address=old_ip)
if ipaddress.device.id != device.id:
raise forms.ValidationError(
"Address used by %s" % device,
)
return new_ip
def clean_new_hostname(self):
old_ip = self.cleaned_data.get('address')
new_hostname = self.cleaned_data['new_hostname']
if not is_valid_hostname(new_hostname):
raise forms.ValidationError("Invalid hostname")
try:
get_domain(new_hostname)
except Domain.DoesNotExist:
raise forms.ValidationError("Invalid domain")
try:
ipaddress = IPAddress.objects.get(hostname=new_hostname)
except IPAddress.DoesNotExist:
if find_addresses_for_hostname(new_hostname):
raise forms.ValidationError("Hostname already in DNS.")
else:
if ipaddress.device and not ipaddress.device.deleted:
if not old_ip:
raise forms.ValidationError("Hostname in use.")
device = Device.objects.get(ipaddress__address=old_ip)
if ipaddress.device.id != device.id:
raise forms.ValidationError(
"Hostname used by %s" % device,
)
elif Record.objects.filter(name=new_hostname).exists():
raise forms.ValidationError("Hostname already in DNS.")
return new_hostname
class ServerMoveStep3FormSetBase(formsets.BaseFormSet):
def clean(self):
if any(self.errors):
return
hostnames = set()
ips = set()
for i in xrange(self.total_form_count()):
form = self.forms[i]
ip = form.cleaned_data['new_ip']
if ip in ips:
form._errors['new_ip'] = form.error_class([
"Duplicate IP"
])
else:
ips.add(ip)
hostname = form.cleaned_data['new_hostname']
if hostname in hostnames:
form._errors['new_hostname'] = form.error_class([
"Duplicate hostname"
])
else:
hostnames.add(hostname)
ServerMoveStep3FormSet = formsets.formset_factory(
form=ServerMoveStep3Form,
formset=ServerMoveStep3FormSetBase,
extra=0,
)
|
991,596 | f6f646fd2122f394ae9e034cbcdd413d953c4741 | from dataclasses import dataclass, field
from typing import Optional
from .derived_view_structure import DerivedViewStructure
from .destination_display_ref import DestinationDisplayRef
from .fare_scheduled_stop_point_ref import FareScheduledStopPointRef
from .multilingual_string import MultilingualString
from .scheduled_stop_point_ref import ScheduledStopPointRef
from .topographic_place_view import TopographicPlaceView
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class GroupOfServicesEndPointDerivedViewStructure(DerivedViewStructure):
class Meta:
name = "GroupOfServicesEndPoint_DerivedViewStructure"
name: Optional[MultilingualString] = field(
default=None,
metadata={
"name": "Name",
"type": "Element",
"namespace": "http://www.netex.org.uk/netex",
}
)
fare_scheduled_stop_point_ref_or_scheduled_stop_point_ref: Optional[object] = field(
default=None,
metadata={
"type": "Elements",
"choices": (
{
"name": "FareScheduledStopPointRef",
"type": FareScheduledStopPointRef,
"namespace": "http://www.netex.org.uk/netex",
},
{
"name": "ScheduledStopPointRef",
"type": ScheduledStopPointRef,
"namespace": "http://www.netex.org.uk/netex",
},
),
}
)
destination_display_ref: Optional[DestinationDisplayRef] = field(
default=None,
metadata={
"name": "DestinationDisplayRef",
"type": "Element",
"namespace": "http://www.netex.org.uk/netex",
}
)
topographic_place_view: Optional[TopographicPlaceView] = field(
default=None,
metadata={
"name": "TopographicPlaceView",
"type": "Element",
"namespace": "http://www.netex.org.uk/netex",
}
)
|
991,597 | 9a69d42331e666754254ee28ac069c14a256edb0 | from pymongo import MongoClient
import pymongo
def to_mongo(list_dic):
assert (type(list_dic == list))
lista = []
client = pymongo.MongoClient("mongodb+srv://m001-student:m001-mongodb-basics@sandbox.q0fpj.mongodb.net/<proyecto>?retryWrites=true&w=majority")
database = client.proyecto #indicando base de datos
colection = database.menú #indicando colección
for dictionary in list_dic:
find = colection.find_one({"Nombre":dictionary['Nombre']}) #Para no insertar documentos duplicados
if find == None:
insert = colection.insert_one(dictionary)
lista.append(insert)
print(insert.inserted_id)
return lista |
991,598 | 849e3f358a4e798b2f7ae3ea484962435a57144d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-07-16 01:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0030_auto_20200709_0223'),
]
operations = [
migrations.AlterField(
model_name='pdf_file',
name='gradd',
field=models.CharField(default='No Grade yet', max_length=100),
),
migrations.AlterField(
model_name='student',
name='matric_no',
field=models.CharField(default='empty', max_length=10),
),
]
|
991,599 | 9c5092226276a4ba97f5bb3ada79d33ef7d4815c | n,m = map(int,input().split())
ab = [list(map(int,input().split())) for _ in range(n)]
cd = [list(map(int,input().split())) for _ in range(m)]
ans = [0]*n
for i in range(n):
count = 10**9
for j in range(m):
ch = abs(ab[i][0] - cd[j][0]) + abs(ab[i][1] - cd[j][1])
if ch < count:
ans[i] = j+1
count = ch
print(ans[i]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.