index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,900 | 09c9bb2d3f9f6ca33e240337992f37fe3d9ee088 | import sys
filepath = sys.argv[1]
filepath_out = sys.argv[2]
preranked_file = sys.argv[3]
preranked = [] # (topic, doc id, score) score is 0,1 if qrel
with open(preranked_file, 'r') as inputFile:
for line in inputFile:
parts = line.split()
preranked.append((int(parts[0]), parts[2].strip(), parts[4].strip()))
data = {}
with open(filepath, 'r') as inputFile:
for line in inputFile:
parts = line.split()
topicId = int(parts[0])
docId = parts[2]
score = float(parts[3])
if topicId not in data:
data[topicId] = []
data[topicId].append((score, docId))
# prerank * neural score
i_rank = 0
for topic in sorted(data):
list = data[topic]
# print(list)
for i in range(len(list)):
score, docId = list[i]
preranked_item = preranked[i_rank]
if preranked_item[0] != topic:
print('fixing topic unequal', preranked_item[0], topic)
if preranked_item[0] < topic:
while preranked[i_rank][0] < topic:
i_rank += 1
preranked_item = preranked[i_rank]
if preranked_item[0] != topic or preranked_item[1] != docId:
print('aaahh!!', topic, preranked_item[0], docId, preranked_item[1])
preranked_score = next(item[2] for item in preranked if item[0] == topic and item[1] == docId)
# print(preranked_score)
else:
preranked_score = preranked_item[2] #next(item[2] for item in preranked if item[0] == topic and item[1] == docId)
#preranked_score = filter(lambda item: item[0] == topic and item[1] == docId, preranked)
list[i] = (score*float(preranked_score), docId)
i_rank += 1
with open(filepath_out, 'w') as outFile:
for topic in sorted(data):
# i = 0
for tuple in sorted(data[topic], reverse=True):
# if i == 100:
# break
outFile.write(str(topic)+'\tQ0\t'+tuple[1]+'\t'+str(tuple[0])+'\tdrmm\n')
# i += 1
|
989,901 | 73f10ac155c1717dfdf19e8e10c3ef14b00e3ccb | def bubble_sort(array):
n = len(array)
for i in range(n - 1):
for j in range(n - 1 - i):
if array[j + 1] < array[j]:
array[j + 1], array[j] = array[j], array[j + 1]
return array
a = [1, 2, 1, 3]
print(bubble_sort(a))
def quicksort(array):
n = len(array)
if n <= 1:
return array
pivot = array.pop()
lesser, greater = [], []
for i in range(n - 1):
if array[i] > pivot:
greater.append(array[i])
else:
lesser.append(array[i])
return quicksort(lesser) + [pivot] + quicksort(greater)
a = [1, 2, 1, 3]
print(quicksort(a))
def binary_search_it(sorted_array, target):
n = len(sorted_array)
lo = 0
hi = n
while(lo < hi):
mid = lo + (hi - lo) // 2
if target == sorted_array[mid]:
return mid
elif target < sorted_array[mid]:
hi = mid - 1
else:
lo = mid + 1
return -1
sa = [1, 1, 3, 4, 4, 4, 4, 5, 6]
print(binary_search_it(sa, 4))
|
989,902 | abaad1060835d3edbf82ae8aeeefa94fd4541efe | import os.path
from pathlib import Path
import googlephotos_helper
import settings
def upload_to_google_photos(pics):
print(f'Uploading to google photos...')
if len(pics) > 0:
session = googlephotos_helper.get_session()
# 古い画像からアップロードしたほうが並び順的にいい感じだと思うのでreversedにしておく
googlephotos_helper.upload_pics(session, reversed(pics))
print(f'Uploading to google photos completed.')
else:
print(f'No photos.')
def save_to_local(pics):
print(f'Saving to local files...')
for data, file_name in pics:
dir_name = settings.local_pict_dir_name
if not os.path.exists(dir_name):
os.mkdir(dir_name)
path = Path(dir_name + '/' + file_name)
path.write_bytes(data)
print(f'Saving completed.')
|
989,903 | dab0a91a19e2a4b42deea6b066724286f6112c4a | #선택정렬
from random import randint
array =[]
for j in range(0, 100):
array_num = randint(1, 100)
array.append(array_num)
for i in range(len(array)):
min_index = i
for j in range(i+1, len(array)):
if array[min_index] > array[j]:
min_index = j
array[min_index], array[i] = array[i], array[min_index] #스와프
print(array)
|
989,904 | 9d60c2ae1c08f144030d959d354735ef285fd61f | name = 'Da Woon CHAE'
age = 23 # not a lie
height_cm = 176 # centimeter
cm_to_inch = 1.0 / 2.54
height_inch = height_cm * cm_to_inch
weight_kg = 72 # kilogram
eyes = 'Black'
teeth = 'White'
hair = 'Black'
print "Let's talk about %s." % name
print "He's %g inches tall." % height_cm
print "He's %g inches tall." % height_inch
print "He's %d pounds heavy." % weight_kg
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %g, and %d I get %g." % (age, height_cm, weight_kg, age + height_cm + weight_kg) |
989,905 | c15c30a4fca05a2887beba2a4efa325bfce4d151 | # -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following lines in the
[options.entry_points] section in setup.cfg:
console_scripts =
fibonacci = ideuy.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
import argparse
import logging
import sys
from ideuy import __version__
from ideuy.download import download_images_from_grid_vector
__author__ = "Damián Silvani"
__copyright__ = "Damián Silvani"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Downloads image products from IDEuy",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("grid_vector", help="path to grid vector")
parser.add_argument("-t",
"--type",
default="national",
choices=["national", "urban"],
help="type of grid")
parser.add_argument("-p",
"--product-type",
default="rgb_8bit",
choices=["rgbi_16bit", "rgbi_8bit", "rgb_8bit"],
help="product type")
parser.add_argument("-o", "--output-dir", default=".", help="output dir")
parser.add_argument("-j",
"--num-jobs",
default=1,
type=int,
help="number of simultaneous download threads")
parser.add_argument("--version",
action="version",
version="ideuy {ver}".format(ver=__version__))
parser.add_argument("-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
parser.add_argument("-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
download_images_from_grid_vector(grid_vector=args.grid_vector,
output_dir=args.output_dir,
type_id=args.type,
product_type_id=args.product_type,
num_jobs=args.num_jobs)
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
989,906 | 598badfa716e489f2a6877b4c1450f7ca6b0a335 | with open("day-08.txt") as f:
notes = [line.split(" | ") for line in f.read().rstrip().splitlines()]
patterns = []
outputs = []
for p, o in notes:
patterns.append([tuple(sorted(s)) for s in p.split(maxsplit=9)])
outputs.append([tuple(sorted(s)) for s in o.split(maxsplit=3)])
def make_mask(digit):
mask = 0
for letter in digit:
mask |= 1 << (ord(letter) - ord("a"))
return mask
masks = {}
ans = 0
for pat, out in zip(patterns, outputs):
for digit in pat:
mask = make_mask(digit)
if len(digit) == 2:
masks[1] = mask
elif len(digit) == 3:
masks[7] = mask
elif len(digit) == 4:
masks[4] = mask
elif len(digit) == 7:
masks[8] = mask
for i, digit in enumerate(out):
num = 0
if len(digit) == 2:
num = 1
elif len(digit) == 3:
num = 7
elif len(digit) == 4:
num = 4
elif len(digit) == 7:
num = 8
else:
mask = make_mask(digit)
if len(digit) == 5:
if mask & masks[7] == masks[7]:
num = 3
elif mask | masks[7] | masks[4] == masks[8]:
num = 2
else:
num = 5
else:
if mask & masks[4] == masks[4]:
num = 9
elif mask & masks[4] & masks[7] != masks[1]:
num = 6
ans += num * 10 ** (3 - i)
print(ans)
|
989,907 | c0d8fa17087bd12ee9bd1fb9cf58dd0cc3d5bf6c | from scipy.io.wavfile import read
import matplotlib.pyplot as plt
# read audio samples
print("Enter the digit for the different Sound wave")
i=input('Enter your choice')
input_data = read("Sound/leftByAnkur1.wav")
if i== '1':
input_data = read("Sound/leftByAnkur1.wav")
elif i== '2':
input_data = read("Sound/leftByAnkur2.wav")
elif i== '3':
input_data = read("Sound/leftByAnkur3.wav")
elif i== '4':
input_data = read("Sound/leftByAnkur4.wav")
else:
print("Sorry you don't have option other than this")
audio = input_data[1]
# plot the first 1024 samples
plt.plot(audio[0:1024])
# label the axes
plt.ylabel("Amplitude")
plt.xlabel("Time (samples)")
# set the title
plt.title("Left Sample " + i)
# display the plot
plt.show() |
989,908 | 20eae8ab1bc6a1242d548efb8a9ed509ea8398ef | from math import pi
from math import sqrt
import sys
r = float(sys.argv[1])
d = 2 * r
p = 2 * pi * r
a = pi * r **2
soluciones = input ("Elige una de las siguientes soluciones (diámetro(d), perímetro(p), área(a) o salir(s): ")
if soluciones == "diámetro" or soluciones == "diametro" or soluciones == "d":
print(d)
if soluciones == "perímetro" or soluciones == "perimetro" or soluciones == "p":
print(p)
if soluciones == "área" or soluciones == "area" or soluciones == "a":
print(a)
if soluciones == "salir" or soluciones == "s":
print("Saliendo del programa...")
|
989,909 | fc110134e48dcaa8bbc5d8284108f8d3e397c762 | import time
import beeper
seconds = input("how many seconds? [q for quit] ")
while seconds != "q":
seconds = int(seconds)
while seconds > 0:
print(seconds, "remaining")
time.sleep(1)
seconds -= 1
print("beep beep beep")
beeper.beep(3)
seconds = input("how many seconds? [q for quit] ")
|
989,910 | b79d749f3dcb893303f9148c6cec8540b49c0ce2 | from typing import Callable, List, Union
import random
import cv2
import numpy as np
from vcap.caching import cache
class DetectionPrediction:
def __init__(self, class_name: str,
class_id: int,
rect: List[Union[int, float]],
confidence: float):
"""An object detection label. Basically a bounding box classifying
some portion of an image.
:param class_name: The class name that was predicted
(Price_Tag, Shelf_Empty, Person)
:param class_id: The class number used by the network
:param rect: [xmin, ymin, xmax, ymax] format
:param confidence: The confidence in percent from 0-1, representing
how confident the network is with the prediction
"""
self._color = None
# The full rect, (x1, y1, x2, y2) format
self.rect = tuple(rect)
self.name = str(class_name)
self.confidence = float(confidence)
self.class_num = int(class_id)
assert 0. <= self.confidence <= 1, "Confidence must be between 0 and 1!"
def __str__(self):
return (str(self.name) + " " +
str(self.rect) + " " +
str(round(self.confidence, 2)))
def __repr__(self):
return self.__str__()
@property
def p1(self):
""" Top Left of the rect"""
return tuple(self.rect[:2])
@property
def p2(self):
""" Top Right of the rect"""
return tuple(self.rect[2:])
@property
def color(self):
if self._color is None:
# Generate a color based on the class name
rand_seed = random.Random(self.name)
self._color = tuple([rand_seed.randint(0, 255) for i in range(3)])
return self._color
class ClassificationPrediction:
def __init__(self,
class_scores: np.ndarray, # floats
class_names: List[str]):
"""A classification of an entire image.
:param class_scores: A list of scores for each class
:param class_names: A list of class names, same length as class scores.
Each index corresponds to an index in class_scores
"""
self.class_scores = class_scores
self.class_names = class_names
def __iter__(self):
"""Iterate over class_name and class_score"""
for name, score in zip(self.class_names, self.class_scores):
# Ensure that no numpy floats get returned (causing bad errors)
yield str(name), float(score)
def __str__(self):
return f"{self.name} {self.confidence:.2f}"
def __repr__(self):
return self.__str__()
@property
@cache
def name(self) -> str:
"""The class name that was predicted with the highest confidence"""
return self.class_names[self.class_num]
@property
@cache
def class_num(self) -> int:
"""The class number used by the network for the class with the highest
confidence"""
return int(np.argmax(self.class_scores))
@property
@cache
def confidence(self) -> float:
"""The confidence in percent from 0-1, representing how confident the
network is with its highest confidence prediction"""
return float(self.class_scores[self.class_num])
class EncodingPrediction:
def __init__(self, encoding_vector: np.ndarray,
distance_func: Callable):
"""
:param encoding_vector: The encoding prediction
:param distance_func: A function with args (encoding, other_encodings)
returns the distance from one encoding to all the other_encodings in
"""
self.vector = encoding_vector
self.distance = distance_func
class DensityPrediction:
def __init__(self, density_map):
self.map = density_map
self.count = np.sum(density_map)
def __str__(self):
return str(self.count)
def __repr__(self):
return self.__str__()
def resized_map(self, new_size):
"""
Returns a resized density map, where the sum of each value is still the
same count
:param new_size: (new width, new height)"""
new_map = cv2.resize(self.map.copy(), new_size)
cur_count = np.sum(new_map)
# Avoid dividing by zero
if cur_count == 0:
return new_map
scale = self.count / cur_count
new_map *= scale
return new_map
class SegmentationPrediction:
def __init__(self, segmentation, label_map):
"""
:param segmentation: 2d segmented image where the value is the label num
:param label_map: Formatted as {"1" : {"label" : "chair", "color" : [12, 53, 100]}}
"""
self.segmentation = segmentation
self.label_map = label_map
def colored(self):
"""Convert segmented image to RGB image using label_map"""
color_img = np.zeros(
(self.segmentation.shape[0], self.segmentation.shape[1], 3),
dtype=np.uint8)
# Replace segment values with color pixels using label_map values
for label_num, label in self.label_map.items():
color_img[self.segmentation == int(label_num)] = np.array(
label["color"], dtype=np.uint8)
return color_img
class DepthPrediction:
def __init__(self, depth_prediction):
"""
:param depth_prediction: 2d image where the value is the depth
"""
self.depth_prediction = depth_prediction
def normalized(self):
"""Convert segmented image to RGB image using label_map"""
# Scale results to max at 255 for image display
max_distance = np.max(self.depth_prediction)
pred = 255 * self.depth_prediction // max_distance
# Convert results to uint8
pred = pred.astype(np.uint8, copy=True)
# Do fancy coloring
pred = cv2.applyColorMap(pred, cv2.COLORMAP_JET)
return pred
|
989,911 | f813a1f39db7fc32ad0b33967f6bbaad5c163877 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import print_function
import os
import numpy as np
import cv2
import os
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import model_from_json
from keras.models import Sequential
from keras import losses
from keras.layers import Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.optimizers import Adam
from collections import Counter
import matplotlib.pyplot as plt
import librosa
import librosa.display
# import os
from os.path import isfile
import shutil
import gc
import warnings
import numpy as np
# import sys
warnings.filterwarnings("ignore")
# In[2]:
def get_length(path):
y, sr = librosa.load(path)
length = librosa.get_duration(y=y, sr=sr)
gc.collect()
return length
# In[3]:
def save_wav(data_path, path_png):
# length = get_length(data_path)
# for offset in range(0,int(length),num):
fig = plt.figure(figsize=(5.04, 2.16))
ax = plt.gca()
ax.axis('off')
# load type
y, sr = librosa.load(data_path,offset=0, duration=10)
S_full, phase = librosa.magphase(librosa.stft(y))
librosa.display.specshow(librosa.amplitude_to_db(S_full, ref=np.max),
y_axis='hz', x_axis='off', sr=11025, ax=ax)
# S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000)
# S_dB = librosa.power_to_db(S, ref=np.max)
# librosa.display.specshow(S_dB, y_axis='off', x_axis='off', sr=11025, ax=ax)
fig.savefig ( path_png + "vocal")
plt.close(fig)
gc.collect()
def save_wav_only_vocal_noise(data_path, path_png, noise):
# length = get_length(data_path)
# for offset in range(0,int(length),num):
fig = plt.figure(figsize=(5.04, 2.16))
ax = plt.gca()
ax.axis('off')
# load type
y, sr = librosa.load(data_path,offset=0, duration=10)
y = add_noise(y, noise)
S_full, phase = librosa.magphase(librosa.stft(y))
librosa.display.specshow(librosa.amplitude_to_db(S_full, ref=np.max),
y_axis='hz', x_axis='off', sr=11025, ax=ax)
fig.savefig ( path_png + "only_vocal_noise" )
plt.close(fig)
gc.collect()
def save_wav_only_vocal_pitch(data_path, path_png):
# length = get_length(data_path)
# for offset in range(0,int(length),num):
fig = plt.figure(figsize=(5.04, 2.16))
ax = plt.gca()
ax.axis('off')
# load type
y, sr = librosa.load(data_path,offset=0, duration=10)
y = manipulate(y, sr)
S_full, phase = librosa.magphase(librosa.stft(y))
librosa.display.specshow(librosa.amplitude_to_db(S_full, ref=np.max),
y_axis='hz', x_axis='off', sr=sr, ax=ax)
fig.savefig ( path_png +"only_vocal_pitch" )
plt.close(fig)
gc.collect()
# In[4]:
def load_model(path="./model/"):
json_file = open(path+"DeepMusic.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(path+"DeepMusic.h5")
adam = keras.optimizers.Adam(learning_rate=0.00005)
loaded_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=adam, metrics=['accuracy'])
return loaded_model
# In[9]:
def load_data(data_path):
x_data = []
subdir = os.listdir(data_path)
for idex, subdir in enumerate(subdir):
img = cv2.imread(data_path+subdir,1)
x_data.append(img/255)
x_data = np.array(x_data)
height = 216
width = 504
x_data = x_data.reshape(x_data.shape[0], height, width, 3)
x_data = x_data.astype('float32')/255
return x_data
# In[10]:
def get_music_name(y):
predict = []
label = get_label()
add = 0
for y_index in y:
index = np.argmax(y_index)
predict.append(label[index])
result = predict[0]+"/"+predict[1]+"/"+predict[2]
return result
def get_label(label="./deepmusic/model/label.npy"):
return np.load(label)
def remove_file(path):
shutil.rmtree(path)
def add_noise(data, noise_factor):
noise = np.random.randn(len(data))
data_noise = data + noise_factor * noise
return data_noise
def manipulate(data, sampling_rate):
bins_per_octave = 12
pitch_pm = 2
pitch_change = pitch_pm * 2*(np.random.uniform())
return librosa.effects.pitch_shift(data, sampling_rate, pitch_change)
if __name__ == "__main__":
try :
# args로 경로 입력
fname=sys.argv[1]
path=fname+"_png/"
try :
os.mkdir(path)
except Exception as ex:
remove_file(path)
os.mkdir(path)
# 입력 받은 경로에 관한 이미지를 만들어서 저장
save_wav(fname, path)
save_wav_only_vocal_noise(fname, path,0.005)
save_wav_only_vocal_pitch(fname, path)
# 이미지를 array 받아오기
x = load_data(path)
# model에게 입력을 준다.
model = load_model(path="./deepmusic/model/")
y = model.predict(x)
# 예측값에서 가장 많이 나온거 찾는다.
music_name = get_music_name(y)
# 폴더와 wav파일 지운다.
remove_file(path)
# 예측값을 return
print(music_name)
except Exception as ex:
remove_file(path)
print("error")
|
989,912 | b9e3d8d0e066b309d91acfcb997ec093c8b6ed32 | dicci={"Alemania":"Berlín", "Francia":"París", "Colombia":"Bogotá", "España":"Madrid"}
print(dicci["Francia"])
#Agregar mas elemnetos
dicci["Italia"]="Lisboa"
print(dicci)
#MOdificar
dicci["Italia"]="Roma"
print(dicci)
#Eliminar
del dicci["Italia"]
print(dicci)
#Diccionario con distintos tipos de datos
dicci2={"Nombre":"Angel", "Fecha":291099, 5:True}
print(dicci2)
#Convertir tupla en dicc
tupla=("España", "Francia", "Alemania")
dicc3={tupla[0]:"Madrid", tupla[1]:"Paris", tupla[2]:"Berlín"}
print(dicc3)
#Imprimir el valor asociado a una clave
print(dicc3["España"])
#Almacenar una tupla en un dicc
dicc4={23:"Jordan", "Nombre":"Michael", "Activo": False, "Anillos":[1991, 1992, 1993, 1996, 1997]}
print(dicc4["Anillos"])
#Dicc dentro de otro dicc
dicc5={23:"Jordan", "Nombre":"Michael", "Activo": False, "Anillos":{"temporadas":[1991, 1992, 1993, 1996, 1997]}}
print(dicc5)
#llaves de un dicc
print(dicc5.keys())
#Valores
print(dicc5.values())
#length
print(len(dicc5)) |
989,913 | 82a820be663fa7fc7726b6f259aee4db2e7c727f | #!/usr/bin/env python3
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# (c) Andreas Doehler <andreas.doehler@bechtle.com/andreas.doehler@gmail.com>
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from typing import Dict, NamedTuple, List
from .agent_based_api.v1 import (
register,
Result,
Service,
SNMPTree,
State,
contains,
OIDEnd,
check_levels,
get_value_store,
)
from .agent_based_api.v1.type_defs import CheckResult, DiscoveryResult, StringTable
from .utils.temperature import check_temperature, TempParamDict
oracle_ilom_map_unit = {
"1": " Other",
"2": " Unknown",
"3": "c",
"4": "f",
"5": "k",
"6": "v",
"7": "a",
"8": "w",
"20": "rpm",
"21": "frequency",
}
oracle_ilom_unit_perf = {
"w": "power",
"rpm": "fan",
"v": "voltage",
}
oracle_ilom_map_type = {
"1": "other",
"2": "unknown",
"3": "temperature",
"4": "voltage",
"5": "current",
"6": "tachometer",
"7": "counter",
"8": "switch",
"9": "lock",
"10": "humidity",
"11": "smoke",
"12": "presence",
"13": "airflow",
}
oracle_ilom_map_state = {
"1": (2, "Critical"),
"2": (2, "Major"),
"3": (1, "Minor"),
"4": (3, "indeterminate"),
"5": (1, "Warning"),
"6": (1, "Pending"),
"7": (0, "Cleared"),
}
class ILOMSens(NamedTuple):
sensor_type: str
sensor_state: str
availability: str
sensor_unit: str
sensor_exponent: int
sensor_value_str: str
sensor_lower_warn_value: int
sensor_upper_warn_value: int
sensor_lower_crit_value: int
sensor_upper_crit_value: int
sensor_lower_fatal_value: int
sensor_upper_fatal_value: int
Section = Dict[str, ILOMSens]
def parse_oracle_ilom(string_table: List[StringTable]) -> Section:
parsed = {}
entities = {}
sensors, entity, types = string_table
for entity_id, entity_state, entity_alarm, entity_name in entity:
entities[entity_id] = {
"name": entity_name,
"state": entity_state,
"alarm": entity_alarm,
}
for type_id, type_entry in types:
if type_id in entities:
entities[type_id].update(
{"type": oracle_ilom_map_type.get(type_entry, "other")}
)
for (
sensor_id,
sensor_unit,
sensor_exponent,
sensor_value_str,
sensor_lower_warn_value,
sensor_upper_warn_value,
sensor_lower_crit_value,
sensor_upper_crit_value,
sensor_lower_fatal_value,
sensor_upper_fatal_value,
sensor_bit_mask,
) in sensors:
sensor_name = "Sensor %s %s" % (sensor_id, entities[sensor_id]["name"].strip())
parsed[sensor_name] = ILOMSens(
sensor_type=entities[sensor_id]["type"],
sensor_state=entities[sensor_id]["alarm"],
availability=entities[sensor_id]["state"],
sensor_unit=oracle_ilom_map_unit.get(sensor_unit, " Other"),
sensor_exponent=sensor_exponent,
sensor_value_str=sensor_value_str,
sensor_lower_warn_value=sensor_lower_warn_value,
sensor_upper_warn_value=sensor_upper_warn_value,
sensor_lower_crit_value=sensor_lower_crit_value,
sensor_upper_crit_value=sensor_upper_crit_value,
sensor_lower_fatal_value=sensor_lower_fatal_value,
sensor_upper_fatal_value=sensor_upper_fatal_value,
)
return parsed
register.snmp_section(
name="oracle_ilom",
parse_function=parse_oracle_ilom,
detect=contains(".1.3.6.1.2.1.1.2.0", ".1.3.6.1.4.1.42.2.200"),
fetch=[
SNMPTree(
base=".1.3.6.1.4.1.42.2.70.101.1.1.8.1",
oids=[
OIDEnd(),
"1", # sunPlatNumericSensorBaseUnits
"2", # sunPlatNumericSensorExponent
"4", # sunPlatNumericSensorCurrent
"8", # sunPlatNumericSensorLowerThresholdNonCritical
"9", # sunPlatNumericSensorUpperThresholdNonCritical
"10", # sunPlatNumericSensorLowerThresholdCritical
"11", # sunPlatNumericSensorUpperThresholdCritical
"12", # sunPlatNumericSensorLowerThresholdFatal
"13", # sunPlatNumericSensorUpperThresholdFatal
"15", # sunPlatNumericSensorEnabledThresholds
],
),
SNMPTree(
base=".1.3.6.1.4.1.42.2.70.101.1.1.2.1",
oids=[
OIDEnd(),
"2", # sunPlatEquipmentOperationalState
"3", # sunPlatEquipmentAlarmStatus
"5", # sunPlatEquipmentLocationName
],
),
SNMPTree(
base=".1.3.6.1.4.1.42.2.70.101.1.1.6.1",
oids=[
OIDEnd(),
"2", # sunPlatEquipmentLocationName
],
),
],
)
def discover_oracle_ilom(params, section: Dict[str, ILOMSens]) -> DiscoveryResult:
for key, values in section.items():
if values.availability == "2" and values.sensor_type == params.get("value"):
yield Service(item=key, parameters={})
def check_oracle_ilom(item: str, section: Dict[str, ILOMSens]) -> CheckResult:
data = section.get(item)
if data:
state, state_readable = oracle_ilom_map_state.get(
data.sensor_state, (3, "unknown")
)
unit = data.sensor_unit
precision = pow(10, int(data.sensor_exponent))
reading = int(data.sensor_value_str) * precision
crit_lower = (
int(data.sensor_lower_crit_value) * precision
if int(data.sensor_lower_crit_value) != 0
else None
)
warn_lower = (
int(data.sensor_lower_warn_value) * precision
if int(data.sensor_lower_warn_value) != 0
else None
)
crit = (
int(data.sensor_upper_crit_value) * precision
if int(data.sensor_upper_crit_value) != 0
else None
)
warn = (
int(data.sensor_upper_warn_value) * precision
if int(data.sensor_upper_warn_value) != 0
else None
)
infotext = "status: %s" % (state_readable)
yield from check_levels(
reading,
metric_name=oracle_ilom_unit_perf.get(data.sensor_unit, "other"),
levels_upper=(warn, crit),
levels_lower=(warn_lower, crit_lower),
render_func=lambda value: "%.2f %s" % (value, unit),
)
yield Result(state=State(state), summary=infotext)
def check_oracle_ilom_temp(
item: str, params: TempParamDict, section: Dict[str, ILOMSens]
) -> CheckResult:
data = section.get(item)
if data:
state, state_readable = oracle_ilom_map_state.get(
data.sensor_state, (3, "unknown")
)
precision = pow(10, int(data.sensor_exponent))
reading = int(data.sensor_value_str) * precision
crit_lower = (
int(data.sensor_lower_fatal_value) * precision
if int(data.sensor_lower_fatal_value) != 0
else None
)
warn_lower = (
int(data.sensor_lower_warn_value) * precision
if int(data.sensor_lower_warn_value) != 0
else None
)
crit = (
int(data.sensor_upper_fatal_value) * precision
if int(data.sensor_upper_fatal_value) != 0
else None
)
warn = (
int(data.sensor_upper_warn_value) * precision
if int(data.sensor_upper_warn_value) != 0
else None
)
yield from check_temperature(
reading,
params,
unique_name="oracle_ilom_%s" % item,
value_store=get_value_store(),
dev_unit=data.sensor_unit,
dev_levels=(warn, crit),
dev_levels_lower=(warn_lower, crit_lower),
dev_status=state,
dev_status_name=state_readable,
)
register.check_plugin(
name="oracle_ilom_temp",
sections=["oracle_ilom"],
service_name="Temperature %s",
discovery_function=discover_oracle_ilom,
discovery_default_parameters={"value": "temperature"},
discovery_ruleset_name="inventory_oracle_ilom",
check_function=check_oracle_ilom_temp,
check_default_parameters={},
check_ruleset_name="temperature",
)
register.check_plugin(
name="oracle_ilom_fan",
sections=["oracle_ilom"],
service_name="Fan %s",
discovery_function=discover_oracle_ilom,
discovery_default_parameters={"value": "tachometer"},
discovery_ruleset_name="inventory_oracle_ilom",
check_function=check_oracle_ilom,
)
register.check_plugin(
name="oracle_ilom_voltage",
sections=["oracle_ilom"],
service_name="Voltage %s",
discovery_function=discover_oracle_ilom,
discovery_default_parameters={"value": "voltage"},
discovery_ruleset_name="inventory_oracle_ilom",
check_function=check_oracle_ilom,
)
register.check_plugin(
name="oracle_ilom_other",
sections=["oracle_ilom"],
service_name="Other %s",
discovery_function=discover_oracle_ilom,
discovery_default_parameters={"value": "other"},
discovery_ruleset_name="inventory_oracle_ilom",
check_function=check_oracle_ilom,
)
|
989,914 | 08d4ac34068d405f1edc0c39216cfb3db7e06cf7 | # -*- coding: utf-8 -*-
#PYTHON SCRIPT #X X SCRIPT NEEDED FOR VOLTAGE TEMPERATURE CALCULATION OF THE IPG LEAD
#THE MODEL SHOULD BE ACTIVE AND SIMULATION RESULTS OF THE MODEL SHOULD HAVE BEEN COMPLETED BEFORE THE SCRIPT IS RUN
# original code modified by cfawole
#owning U of H code
#extract human body electric fields along the lead paths specified in the text files in the pathway_xxx directory
#save the fields in a folder with the same name as the short name of simulation file
import s4l_v1
import numpy
import os
import re
sim_list = list(s4l_v1.document.AllSimulations);
project_name = sim_list[0].GetOutputFileName() #long name of simulation file
result_subfolder = project_name[0:project_name.find('.smash')].split('/')[-1] #short name of simulation file
pathway_folder = 'E:\\c_test\\Transalating Juliannas Code\\pathway_Fat\\' # directory of the different lead pathway files
result_folder = 'E:\\c_test\Transalating Juliannas Code\\Etan2\\'; # top level directory of where the fields will be saved
if (not os.path.exists(result_folder)):
os.mkdir(result_folder); # make top directory folder if it does not exist
result_folder = result_folder+result_subfolder; # actual folder where field files will be stored
file_list = os.listdir(pathway_folder);
if (not os.path.exists(result_folder)):
os.mkdir(result_folder);
sim_index = -1
for sim in sim_list: #for each simulation in simulation list
sim_index = sim_index+1
sim_name = sim.Name;
sim_info = re.split('_',sim_name);
body_type = sim_info[0];
x_shift = float(sim_info[1]);
y_shift = float(sim_info[2]);
z_shift = float(sim_info[3]);
results = sim.Results()
field_sensor = results[ 'Overall Field' ]
efield_sensor = field_sensor[ 'EM E(x,y,z,f0)' ]
efield_sensor.Update()
efield_data = efield_sensor.Data;
efield_grid = efield_data.Grid;
efield_array = efield_data.Field(0) #the e-field
for coo_file in file_list: #coo_file is each lead path file
dot_index = coo_file.find('.');
if coo_file[dot_index:] == '.txt':
InputFileName = pathway_folder + '/'+coo_file;
path_coo_file = open(InputFileName,'r');
coo_list_str = path_coo_file.readlines();
path_coo_file.close();
result_file_name = result_folder + '/' + sim_name + '_' + coo_file;
field_result_file = open(result_file_name,'w');
str_out = str('%coord_x\tcoord_y\tcoord_z\tEtan_real\tEtan_imag\n');
field_result_file.write(str_out);
j = 0;
coo_list = [];
for a_coordinat in coo_list_str:
cord = re.split('\s+', a_coordinat);
str_x = cord[0];
coo_x = float(str_x); # x-cooridinate in lead path file
str_y = cord[1];
coo_y = float(str_y);
str_z = cord[2];
coo_z = float(str_z);
coo_value = coo_x+x_shift/1000.0,coo_y+y_shift/1000.0,coo_z+z_shift/1000.0; #if the body model is shifted, shift the each lead path correspondingly
temp_point = s4l_v1.Vec3(coo_value[0],coo_value[1],coo_value[2]);
coo_list.append(temp_point);
IndexNum = len(coo_list);
Mid_Point = [];
Point_Vec = [];
for index1 in range(IndexNum-1):
temp_point2 = coo_list[index1+1] + coo_list[index1]; # add the next point to the current point
Mid_Point.append(s4l_v1.Vec3([0.5*temp_point2[i] for i in range(0,3)])); # find the mid point of the point obtained above
Point_Vec.append(coo_list[index1+1] - coo_list[index1]); #subtract the next point from the current point. This will be used to calculate the length of a segment
ETan = [];
Mag_Etan = [];
TotalLength = 0;
for index2 in range(len(Mid_Point)):
cell_index = efield_grid.FindCell(Mid_Point[index2][0],Mid_Point[index2][1],Mid_Point[index2][2]);
point_index = efield_grid.ComputeCellIndex(cell_index[0],cell_index[1],cell_index[2]);
efield = efield_array[point_index]
e_x = efield[0]; # x-component of the e-field
e_y = efield[1];
e_z = efield[2];
Segment_Length = numpy.sqrt((Point_Vec[index2][0])**2+(Point_Vec[index2][1])**2\
+(Point_Vec[index2][2])**2); #length of a segment of the lead path
TotalLength = TotalLength + Segment_Length; # total length of lead path
UnitVec = Point_Vec[index2]*(1/Segment_Length); #unit vector of a segment of the lead path
ETanTmp = (UnitVec[0]*e_x+UnitVec[1]*e_y+UnitVec[2]*e_z); # dot product of E-field and unit vector
ETan.append(ETanTmp);
str_out = str(Mid_Point[index2][0])+'\t'+str(Mid_Point[index2][1]) +'\t'\
+str(Mid_Point[index2][2])+'\t'+ str(ETanTmp.real) + '\t' + str(ETanTmp.imag)+'\n'; #each output line is mid point and the real/imaginary path of E-field
field_result_file.write(str_out);
field_result_file.close(); |
989,915 | b7acda9f0bf4d912e1e62f7f08a734d2320df779 | # -*- coding: utf-8 -*-
from formencode import validators
from tw2.forms import (
TableForm,
TextField,
TextArea,
LabelField,
)
from tw2.tinymce import TinyMCEWidget
from pylonsprojectjp.apps.admin import (
admin_config,
ListColumn,
)
from .models import BlogEntryModel
class BlogEntryForm(TableForm):
id = LabelField(
label=u"ID",
validator=validators.Int)
title = TextField(
label=u"タイトル",
validator=validators.UnicodeString(max=255, strip=True))
body = TinyMCEWidget(
label=u"本文",
validator=validators.UnicodeString)
@admin_config(name='blog')
class BlogAdmin(object):
title = u"ブログ管理"
list_columns = [
ListColumn('id', label=u"ID"),
ListColumn('title', label=u"タイトル"),
ListColumn('body', label=u"本文"),
ListColumn('created', label=u"作成日時"),
ListColumn('modified', label=u"更新日時"),
]
model_class = BlogEntryModel
def __init__(self, request):
self.request = request
def get_form(self, entity):
return BlogEntryForm()
|
989,916 | ecc8050a55cc3113e34809d0d49e27a701c91c73 | import random
def random_move():
direct = ("up", "down", "left", "right")
if random.randint(0, 1):
return random.choice(direct)
else:
None |
989,917 | 1decbe4ef0566adaba5bd736f2c16c0a84a83ef0 | import datetime
import glob
import os
import time
from django.conf import settings
from django.template.loader import render_to_string
from ietf.message.models import Message, SendQueue
from ietf.message.utils import send_scheduled_message_from_send_queue
from ietf.doc.models import DocumentAuthor
from ietf.person.models import Person
def announcement_from_form(data, **kwargs):
'''
This function creates a new message record. Taking as input EmailForm.data
and key word arguments used to override some of the message fields
'''
# possible overrides
by = kwargs.get('by',Person.objects.get(name='(System)'))
from_val = kwargs.get('from_val','ID Tracker <internet-drafts-reply@ietf.org>')
content_type = kwargs.get('content_type','')
# from the form
subject = data['subject']
to_val = data['to']
cc_val = data['cc']
body = data['body']
message = Message.objects.create(by=by,
subject=subject,
frm=from_val,
to=to_val,
cc=cc_val,
body=body,
content_type=content_type)
# create SendQueue
send_queue = SendQueue.objects.create(by=by,message=message)
# uncomment for testing
send_scheduled_message_from_send_queue(send_queue)
return message
def get_authors(draft):
"""
Takes a draft object and returns a list of authors suitable for a tombstone document
"""
authors = []
for a in draft.authors.all():
initial = ''
prefix, first, middle, last, suffix = a.person.name_parts()
if first:
initial = first + '. '
entry = '%s%s <%s>' % (initial,last,a.address)
authors.append(entry)
return authors
def get_abbr_authors(draft):
"""
Takes a draft object and returns a string of first author followed by "et al"
for use in New Revision email body.
"""
initial = ''
result = ''
authors = DocumentAuthor.objects.filter(document=draft)
if authors:
prefix, first, middle, last, suffix = authors[0].author.person.name_parts()
if first:
initial = first[0] + '. '
result = '%s%s' % (initial,last)
if len(authors) > 1:
result += ', et al'
return result
def get_last_revision(filename):
"""
This function takes a filename, in the same form it appears in the InternetDraft record,
no revision or extension (ie. draft-ietf-alto-reqs) and returns a string which is the
reivision number of the last active version of the document, the highest revision
txt document in the archive directory. If no matching file is found raise exception.
"""
files = glob.glob(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR,filename) + '-??.txt')
if files:
sorted_files = sorted(files)
return get_revision(sorted_files[-1])
else:
raise Exception('last revision not found in archive')
def get_revision(name):
"""
Takes a draft filename and returns the revision, as a string.
"""
#return name[-6:-4]
base,ext = os.path.splitext(name)
return base[-2:]
def get_revision_emails(draft):
"""
Dervied from the ColdFusion legacy app, we accumulate To: emails for a new
revision by adding:
1) the conents of id_internal.state_change_notice_to, this appears to be largely
custom mail lists for the document or group
2) the main AD, via id_internal.job_owner
3) any ad who has marked "discuss" in the ballot associated with this id_internal
4) And now, also, the RFC Editor if the draft is in the RFC Editor Queue
"""
# from legacy
if not draft.get_state('draft-iesg'):
return ''
emails = []
if draft.notify:
emails.append(draft.notify)
if draft.ad:
emails.append(draft.ad.role_email("ad").address)
if draft.active_ballot():
for ad, pos in draft.active_ballot().active_ad_positions().iteritems():
if pos and pos.pos_id == "discuss":
emails.append(ad.role_email("ad").address)
if draft.get_state('draft-iesg').slug == "rfcqueue":
emails.append('rfc-editor@rfc-editor.org')
return ', '.join(emails)
def add_email(emails,person):
if person.email() not in emails:
emails[person.email()] = '"%s %s"' % (person.first_name,person.last_name)
def get_fullcc_list(draft):
"""
This function takes a draft object and returns a string of emails to use in cc field
of a standard notification. Uses an intermediate "emails" dictionary, emails are the
key, name is the value, to prevent adding duplicate emails to the list.
"""
emails = {}
# get authors
for author in draft.authors.all():
if author.address not in emails:
emails[author.address] = '"%s"' % (author.person.name)
if draft.group.acronym != 'none':
# add chairs
for role in draft.group.role_set.filter(name='chair'):
if role.email.address not in emails:
emails[role.email.address] = '"%s"' % (role.person.name)
# add AD
if draft.group.type.slug == 'wg':
emails['%s-ads@ietf.org' % draft.group.acronym] = '"%s-ads"' % (draft.group.acronym)
elif draft.group.type.slug == 'rg':
email = draft.group.parent.role_set.filter(name='chair')[0].email
emails[email.address] = '"%s"' % (email.person.name)
# add sheperd
if draft.shepherd:
emails[draft.shepherd.address] = '"%s"' % (draft.shepherd.person.name)
# use sort so we get consistently ordered lists
result_list = []
for key in sorted(emails):
if emails[key]:
result_list.append('%s <%s>' % (emails[key],key))
else:
result_list.append('<%s>' % key)
return ','.join(result_list)
def get_email_initial(draft, type=None, input=None):
"""
Takes a draft object, a string representing the email type:
(extend,new,replace,resurrect,revision,update,withdraw) and
a dictonary of the action form input data (for use with replace, update, extend).
Returns a dictionary containing initial field values for a email notification.
The dictionary consists of to, cc, subject, body.
NOTE: for type=new we are listing all authors in the message body to match legacy app.
It appears datatracker abbreviates the list with "et al". Datatracker scheduled_announcement
entries have "Action" in subject whereas this app uses "ACTION"
"""
# assert False, (draft, type, input)
expiration_date = (datetime.date.today() + datetime.timedelta(185)).strftime('%B %d, %Y')
new_revision = str(int(draft.rev)+1).zfill(2)
new_filename = draft.name + '-' + new_revision + '.txt'
curr_filename = draft.name + '-' + draft.rev + '.txt'
data = {}
data['cc'] = get_fullcc_list(draft)
data['to'] = ''
if type == 'extend':
context = {'doc':curr_filename,'expire_date':input['expiration_date']}
data['subject'] = 'Extension of Expiration Date for %s' % (curr_filename)
data['body'] = render_to_string('drafts/message_extend.txt', context)
elif type == 'new':
# if the ID belongs to a group other than "none" add line to message body
if draft.group.type.slug == 'wg':
wg_message = 'This draft is a work item of the %s Working Group of the IETF.' % draft.group.name
else:
wg_message = ''
context = {'wg_message':wg_message,
'draft':draft,
'authors':get_abbr_authors(draft),
'revision_date':draft.latest_event(type='new_revision').time.date(),
'timestamp':time.strftime("%Y-%m-%d%H%M%S", time.localtime())}
data['to'] = 'i-d-announce@ietf.org'
data['cc'] = draft.group.list_email
data['subject'] = 'I-D ACTION:%s' % (curr_filename)
data['body'] = render_to_string('drafts/message_new.txt', context)
elif type == 'replace':
'''
input['replaced'] is a DocAlias
input['replaced_by'] is a Document
'''
context = {'doc':input['replaced'].name,'replaced_by':input['replaced_by'].name}
data['subject'] = 'Replacement of %s with %s' % (input['replaced'].name,input['replaced_by'].name)
data['body'] = render_to_string('drafts/message_replace.txt', context)
elif type == 'resurrect':
last_revision = get_last_revision(draft.name)
last_filename = draft.name + '-' + last_revision + '.txt'
context = {'doc':last_filename,'expire_date':expiration_date}
data['subject'] = 'Resurrection of %s' % (last_filename)
data['body'] = render_to_string('drafts/message_resurrect.txt', context)
elif type == 'revision':
context = {'rev':new_revision,'doc':new_filename,'doc_base':new_filename[:-4]}
data['to'] = get_revision_emails(draft)
data['cc'] = ''
data['subject'] = 'New Version Notification - %s' % (new_filename)
data['body'] = render_to_string('drafts/message_revision.txt', context)
elif type == 'update':
context = {'doc':input['filename'],'expire_date':expiration_date}
data['subject'] = 'Posting of %s' % (input['filename'])
data['body'] = render_to_string('drafts/message_update.txt', context)
elif type == 'withdraw':
context = {'doc':curr_filename,'by':input['type']}
data['subject'] = 'Withdrawl of %s' % (curr_filename)
data['body'] = render_to_string('drafts/message_withdraw.txt', context)
return data
|
989,918 | 823122629e28be046da527979da2b30fab51a378 | from .__core import *
__all__ = ["routine"] |
989,919 | 069933f4e265a70a118a9584e9b1daa79922d925 | from config import Config
from subprocess import Popen, PIPE
class Set():
def __init__(self) -> None:
config = Config()
self.config = config.read()
def command(self, label: str) -> None:
monitors = self.config[label]
command = ['xrandr']
for monitor in monitors:
state = monitor['state']
interface = monitor['interface']
command.append('--output')
command.append(interface)
if state == 'off':
command.append('--off')
else:
offset = monitor['coordinates']['offset']
command.append('--pos')
command.append(offset[0] + 'x' + offset[1])
command.append('--auto')
if monitor['primary'] == True:
command.append('--primary')
process = Popen(command, stdout=PIPE, stderr=PIPE)
process.communicate() |
989,920 | 9246472296156ed6e754fc6d5ca540a47c30f439 | import random
# stolen from https://github.com/arizonatribe/word-generator
class Words:
words = {
"nouns": [
"aardvark",
"aardwolf",
"ability",
"abroad",
"abuse",
"accentor",
"access",
"accident",
"account",
"act",
"action",
"active",
"activity",
"actor",
"ad",
"addax",
"addition",
"address",
"administration",
"adult",
"advance",
"advantage",
"advertising",
"advice",
"affair",
"affect",
"african buffalo",
"african wild ass",
"african wild dog",
"afternoon",
"agama",
"age",
"agency",
"agent",
"agouti",
"agreement",
"air",
"airline",
"airport",
"alarm",
"albatross",
"alcohol",
"alligator",
"alpaca",
"alternative",
"ambition",
"american black bear",
"american sparrow",
"amount",
"amur leopard",
"anaconda",
"analysis",
"analyst",
"andean mountain cat",
"anemone",
"angelfish",
"anger",
"angle",
"anhinga",
"animal",
"annual",
"anoa",
"anole",
"answer",
"ant",
"anteater",
"antelope",
"anxiety",
"anybody",
"anything",
"anywhere",
"apartment",
"appeal",
"appearance",
"apple",
"application",
"appointment",
"archerfish",
"arctic fox",
"area",
"argument",
"arm",
"armadillo",
"army",
"arowana",
"arrival",
"art",
"article",
"asian black bear",
"aside",
"ask",
"aspect",
"assignment",
"assist",
"assistance",
"assistant",
"associate",
"association",
"assumption",
"atmosphere",
"attack",
"attempt",
"attention",
"attitude",
"audience",
"auk",
"author",
"average",
"avocet",
"award",
"awareness",
"axolotl",
"aye aye",
"babirusa",
"baboon",
"baby",
"back",
"background",
"bad",
"badger",
"bag",
"bake",
"balance",
"bald eagle",
"ball",
"band",
"bandicoot",
"bank",
"banteng",
"bar",
"barbet",
"base",
"baseball",
"basilisk",
"basis",
"basket",
"bat",
"bat",
"batfish",
"bath",
"bathroom",
"battle",
"beach",
"bear",
"bear",
"bearded dragon",
"beat",
"beautiful",
"beaver",
"bed",
"bed bug",
"bedroom",
"bee",
"bee eater",
"beer",
"beetle",
"beginning",
"being",
"bell",
"belt",
"bench",
"bend",
"benefit",
"bet",
"betta",
"bettong",
"beyond",
"bicycle",
"bid",
"big",
"bigeyes",
"bike",
"bilby",
"bill",
"binturong",
"bird",
"bird",
"bird of paradise",
"birth",
"birthday",
"bison",
"bit",
"bite",
"bitter",
"bitterling",
"bittern",
"black",
"black footed cat",
"black footed ferret",
"blackdevil",
"blame",
"blank",
"blind",
"block",
"blood",
"blow",
"blue",
"blue sheep",
"blue whale",
"bluebird",
"boa",
"board",
"boat",
"bobcat",
"body",
"bone",
"bongo",
"bonus",
"booby",
"book",
"boot",
"border",
"boss",
"bother",
"bottle",
"bottom",
"bowerbird",
"bowl",
"box",
"boy",
"boyfriend",
"brain",
"branch",
"brave",
"bread",
"break",
"breakfast",
"breast",
"breath",
"brick",
"bridge",
"brief",
"brilliant",
"broad",
"broadbill",
"brother",
"brown",
"brown bear",
"brush",
"buddy",
"budget",
"budgie",
"bug",
"building",
"bulbul",
"bull",
"bullfrog",
"bunch",
"bunny",
"bunting",
"burn",
"bus",
"bush dog",
"bushbaby",
"bushshrike",
"business",
"bustard",
"butterfly",
"button",
"buy",
"buyer",
"buzzard",
"cabinet",
"cable",
"caecilian",
"cake",
"calendar",
"call",
"calm",
"camel",
"camera",
"camp",
"campaign",
"can",
"cancel",
"cancer",
"candidate",
"candle",
"candy",
"cap",
"capital",
"capybara",
"car",
"caracal",
"caracara",
"card",
"cardinal",
"cardinalfish",
"care",
"career",
"caribou",
"carp",
"carpet",
"carry",
"case",
"cash",
"cassowary",
"cat",
"cat",
"catch",
"category",
"caterpillar",
"catfish",
"cattle",
"cause",
"cavy",
"celebration",
"cell",
"centipede",
"chain",
"chair",
"challenge",
"chameleon",
"chamois",
"champion",
"championship",
"chance",
"change",
"channel",
"chapter",
"character",
"charge",
"charity",
"chart",
"chat",
"check",
"cheek",
"cheetah",
"chemical",
"chemistry",
"chest",
"chicken",
"chicken",
"child",
"childhood",
"chimaera",
"chimpanzee",
"chinchilla",
"chip",
"chipmunk",
"chocolate",
"choice",
"chough",
"chuckwalla",
"church",
"cicada",
"cichlid",
"cigarette",
"city",
"civet",
"claim",
"clam",
"class",
"classic",
"classroom",
"clerk",
"click",
"client",
"climate",
"climbing mouse",
"climbing perch",
"clock",
"closet",
"clothes",
"cloud",
"clouded leopard",
"clownfish",
"club",
"clue",
"coach",
"coast",
"coat",
"coati",
"cobra",
"cockatiel",
"cockatoo",
"cockroach",
"code",
"coffee",
"cold",
"collar",
"collection",
"college",
"colugo",
"combination",
"combine",
"comfort",
"comfortable",
"command",
"comment",
"commercial",
"commission",
"committee",
"common",
"common genet",
"communication",
"community",
"company",
"comparison",
"competition",
"complaint",
"complex",
"computer",
"concentrate",
"concept",
"concern",
"concert",
"conch",
"conclusion",
"condition",
"conference",
"confidence",
"conflict",
"confusion",
"connection",
"consequence",
"consideration",
"consist",
"constant",
"construction",
"contest",
"context",
"contract",
"contribution",
"control",
"conversation",
"convert",
"cook",
"cookie",
"coot",
"copperhead",
"copy",
"cormorant",
"corner",
"cost",
"cotinga",
"cotton rat",
"cougar",
"count",
"counter",
"country",
"county",
"couple",
"courage",
"course",
"courser",
"court",
"cousin",
"cover",
"cow",
"cow",
"coyote",
"coypu",
"crab",
"crack",
"craft",
"crane",
"crane fly",
"crash",
"crayfish",
"crazy",
"cream",
"creative",
"credit",
"crew",
"cricket",
"criticism",
"crocodile",
"cross",
"crow",
"cry",
"cuckoo",
"culpeo",
"culture",
"cup",
"curlew",
"currency",
"current",
"curve",
"cuscus",
"customer",
"cut",
"cuttlefish",
"cycle",
"damage",
"dance",
"dare",
"dark",
"dartfish",
"dassie rat",
"data",
"database",
"date",
"daughter",
"day",
"dead",
"deal",
"dealer",
"dear",
"death",
"death adder",
"debate",
"debt",
"decision",
"deep",
"deer",
"deer mouse",
"definition",
"degree",
"degu",
"delay",
"delivery",
"demand",
"department",
"departure",
"dependent",
"deposit",
"depression",
"depth",
"description",
"design",
"designer",
"desire",
"desk",
"detail",
"development",
"device",
"devil",
"dhole",
"diamond",
"dibbler",
"diet",
"difference",
"difficulty",
"dig",
"dik dik",
"dikkop",
"dimension",
"dimetrodon",
"dingo",
"dinner",
"dinosaur",
"dipper",
"direction",
"director",
"dirt",
"disaster",
"discipline",
"discount",
"discus",
"discussion",
"disease",
"dish",
"disk",
"display",
"distance",
"distribution",
"district",
"divide",
"doctor",
"document",
"dodo",
"dog",
"dog",
"dolphin",
"donkey",
"door",
"dormouse",
"dot",
"double",
"doubt",
"dove",
"draft",
"drag",
"dragonfly",
"drama",
"draw",
"drawer",
"drawing",
"dream",
"dress",
"drink",
"drive",
"driver",
"drongo",
"drop",
"drunk",
"duck",
"due",
"dugong",
"duiker",
"dump",
"dunnart",
"dust",
"duty",
"eagle",
"eagle ray",
"ear",
"earth",
"ease",
"east",
"eat",
"echidna",
"economics",
"economy",
"edge",
"editor",
"education",
"eel",
"effect",
"effective",
"efficiency",
"effort",
"egg",
"egret",
"eider",
"election",
"electric eel",
"electric ray",
"elephant",
"elephant bird",
"elevator",
"elk",
"emergency",
"emotion",
"emphasis",
"employ",
"employee",
"employer",
"employment",
"emu",
"end",
"energy",
"engine",
"engineer",
"engineering",
"entertainment",
"enthusiasm",
"entrance",
"entry",
"environment",
"equal",
"equipment",
"equivalent",
"ermine",
"error",
"escape",
"essay",
"establishment",
"estate",
"estimate",
"evening",
"event",
"evidence",
"exam",
"examination",
"example",
"exchange",
"excitement",
"excuse",
"exercise",
"exit",
"experience",
"expert",
"explanation",
"expression",
"extension",
"extent",
"external",
"extreme",
"eye",
"face",
"fact",
"factor",
"fail",
"failure",
"falcon",
"fall",
"familiar",
"family",
"fan",
"farm",
"farmer",
"fat",
"father",
"fault",
"fear",
"feature",
"fee",
"feed",
"feedback",
"feel",
"feeling",
"female",
"fennec fox",
"ferret",
"few",
"field",
"fight",
"figure",
"file",
"fill",
"film",
"final",
"finance",
"finch",
"finding",
"finger",
"finish",
"fire",
"fish",
"fish",
"fisher",
"fishing",
"fishing cat",
"fix",
"flamingo",
"flat headed cat",
"flea",
"flight",
"floor",
"flow",
"flower",
"flowerpecker",
"fly",
"fly",
"flying fish",
"flying frog",
"focus",
"fold",
"following",
"food",
"foot",
"football",
"force",
"forever",
"form",
"formal",
"fortune",
"fossa",
"foundation",
"fox",
"frame",
"freedom",
"friend",
"friendship",
"frigatebird",
"frog",
"frogmouth",
"front",
"fruit",
"fuel",
"fulmar",
"fun",
"function",
"funeral",
"funny",
"future",
"gain",
"galago",
"gallinule",
"game",
"gannet",
"gap",
"gar",
"garage",
"garbage",
"garden",
"garter snake",
"gas",
"gate",
"gather",
"gaur",
"gazelle",
"gear",
"gecko",
"gene",
"general",
"geoffroy's cat",
"gerbil",
"gerenuk",
"giant panda",
"giant tortoise",
"gibbon",
"gift",
"gila monster",
"giraffe",
"girl",
"girlfriend",
"give",
"glad",
"glass",
"glove",
"gnu",
"go",
"goal",
"goat",
"goatfish",
"god",
"gold",
"goldfish",
"golf",
"good",
"goose",
"gopher",
"goral",
"gorilla",
"gourami",
"government",
"grab",
"grackle",
"grade",
"grand",
"grandfather",
"grandmother",
"grass",
"grasshopper",
"gray wolf",
"great",
"greater glider",
"grebe",
"green",
"green iguana",
"grison",
"grizzly bear",
"grocery",
"ground",
"groundhog",
"group",
"grouse",
"growth",
"guanaco",
"guarantee",
"guard",
"guess",
"guest",
"guidance",
"guide",
"guinea pig",
"guitar",
"gull",
"gundi",
"guy",
"habit",
"hair",
"half",
"hall",
"hamster",
"hand",
"handle",
"hang",
"harm",
"harrier",
"hartebeest",
"hat",
"hate",
"hawaiian honeycreeper",
"hawk",
"head",
"health",
"hearing",
"heart",
"heat",
"heavy",
"hedgehog",
"height",
"hell",
"hello",
"helmetshrike",
"help",
"hermit crab",
"heron",
"hide",
"high",
"highlight",
"highway",
"himalayan tahr",
"hippopotamus",
"hire",
"hissing cockroach",
"historian",
"history",
"hit",
"hold",
"hole",
"holiday",
"home",
"homework",
"honey",
"honeyeater",
"hook",
"hope",
"hornbill",
"hornet",
"horror",
"horse",
"horse",
"hospital",
"host",
"hotel",
"hour",
"house",
"housing",
"hoverfly",
"human",
"hummingbird",
"hunt",
"hurry",
"hurt",
"husband",
"hutia",
"hyena",
"hyrax",
"iberian lynx",
"ibex",
"ibis",
"ice",
"icterid",
"idea",
"ideal",
"if",
"iguana",
"illegal",
"image",
"imagination",
"impact",
"impala",
"implement",
"importance",
"impress",
"impression",
"improvement",
"incident",
"income",
"increase",
"independence",
"independent",
"indication",
"individual",
"industry",
"inevitable",
"inflation",
"influence",
"information",
"initial",
"initiative",
"injury",
"insect",
"insect",
"inside",
"inspection",
"inspector",
"instance",
"instruction",
"insurance",
"intention",
"interaction",
"interest",
"internal",
"international",
"internet",
"interview",
"introduction",
"investment",
"invite",
"iron",
"island",
"issue",
"it",
"item",
"jacana",
"jack",
"jackal",
"jacket",
"jaguar",
"jaguarundi",
"jay",
"jellyfish",
"jerboa",
"job",
"join",
"joint",
"joke",
"judge",
"judgment",
"juice",
"jump",
"jungle cat",
"junior",
"jury",
"kangaroo",
"kangaroo rat",
"keep",
"kerodon",
"kestrel",
"key",
"kick",
"kid",
"kill",
"kind",
"king",
"king cobra",
"kingbird",
"kingfisher",
"kinkajou",
"kiss",
"kitchen",
"kite",
"kitten",
"kiwi",
"klipspringer",
"knee",
"knife",
"knifefish",
"knowledge",
"koala",
"kodiak bear",
"kodkod",
"koi",
"komodo dragon",
"kookaburra",
"kowari",
"kudu",
"kultarr",
"lab",
"lack",
"ladder",
"lady",
"ladybug",
"lake",
"lamb",
"lamprey",
"land",
"landscape",
"language",
"lapwing",
"laugh",
"law",
"lawyer",
"lay",
"layer",
"lead",
"leader",
"leadership",
"leading",
"league",
"leather",
"leave",
"lecture",
"leech",
"leg",
"lemming",
"lemur",
"length",
"leopard",
"lesson",
"let",
"letter",
"level",
"library",
"lie",
"life",
"lift",
"liger",
"light",
"limit",
"line",
"link",
"lion",
"lionfish",
"lip",
"list",
"listen",
"literature",
"living",
"lizard",
"llama",
"loach",
"load",
"loan",
"lobster",
"local",
"location",
"lock",
"log",
"long",
"long tailed tit",
"longspur",
"look",
"loon",
"loris",
"lory",
"loss",
"love",
"lovebird",
"low",
"luck",
"lunch",
"lynx",
"lyrebird",
"macaw",
"machine",
"magazine",
"mail",
"main",
"maintenance",
"major",
"make",
"male",
"mall",
"mallard",
"mamba",
"mammoth",
"man",
"management",
"manager",
"manakin",
"manatee",
"mandrill",
"manner",
"manta ray",
"mantis shrimp",
"manufacturer",
"many",
"map",
"mara",
"march",
"margay",
"marine angelfish",
"marine hatchetfish",
"mark",
"market",
"marketing",
"markhor",
"marlin",
"marmot",
"marriage",
"marsupial mole",
"marten",
"master",
"mastodon",
"match",
"mate",
"material",
"math",
"matter",
"maximum",
"maybe",
"meadowlark",
"meal",
"meaning",
"measurement",
"meat",
"media",
"medicine",
"medium",
"meerkat",
"meet",
"meeting",
"megaloceros",
"megapode",
"member",
"membership",
"memory",
"mention",
"menu",
"mess",
"message",
"metal",
"method",
"middle",
"midnight",
"might",
"milk",
"millipede",
"mind",
"mine",
"miniature horse",
"minimum",
"mink",
"minnow",
"minor",
"minute",
"mirror",
"miss",
"mission",
"mistake",
"mix",
"mixture",
"mobile",
"mockingbird",
"mode",
"model",
"mole",
"mole rat",
"mom",
"moment",
"money",
"mongoose",
"monitor",
"monitor lizard",
"monkey",
"month",
"mood",
"moorhen",
"moose",
"moray eel",
"morning",
"mortgage",
"mosasaur",
"mosquito",
"most",
"moth",
"mother",
"motmot",
"motor",
"mountain",
"mountain goat",
"mountain lion",
"mouse",
"mouse",
"mouse deer",
"mousebird",
"mouth",
"move",
"movie",
"mud",
"mudpuppy",
"mudskipper",
"mullet",
"muntjac",
"muscle",
"music",
"muskox",
"muskrat",
"musky rat kangaroo",
"nail",
"naked mole rat",
"name",
"narwhal",
"nasty",
"nation",
"national",
"native",
"natural",
"nature",
"nautilus",
"neat",
"necessary",
"neck",
"needlefish",
"negative",
"negotiation",
"nerve",
"net",
"network",
"news",
"newspaper",
"newt",
"night",
"nighthawk",
"nightjar",
"nobody",
"noise",
"normal",
"north",
"nose",
"note",
"nothing",
"notice",
"novel",
"numbat",
"number",
"nurse",
"nuthatch",
"nutria",
"object",
"objective",
"obligation",
"occasion",
"ocelot",
"octopus",
"offer",
"office",
"officer",
"official",
"oil",
"okapi",
"old world babbler",
"old world flycatcher",
"olingo",
"onager",
"opening",
"operation",
"opinion",
"opossum",
"opportunity",
"opposite",
"option",
"orange",
"orangutan",
"orca",
"order",
"ordinary",
"organization",
"original",
"oriole",
"oryx",
"osprey",
"ostrich",
"other",
"otter",
"outcome",
"outside",
"oven",
"ovenbird",
"owl",
"owner",
"oyster",
"paca",
"pace",
"pack",
"package",
"paddlefish",
"pademelon",
"page",
"pain",
"paint",
"painting",
"pair",
"pallas's cat",
"panda",
"pangolin",
"panic",
"panther",
"paper",
"parakeet",
"parent",
"park",
"parking",
"parrot",
"parrotfish",
"part",
"particular",
"partner",
"party",
"pass",
"passage",
"passenger",
"passenger pigeon",
"passion",
"past",
"path",
"patience",
"patient",
"pattern",
"pause",
"pay",
"payment",
"peace",
"peacock",
"peafowl",
"peak",
"peccary",
"pelican",
"pen",
"penalty",
"penguin",
"pension",
"people",
"percentage",
"perception",
"performance",
"period",
"permission",
"permit",
"person",
"personal",
"personality",
"perspective",
"phase",
"pheasant",
"philosophy",
"phone",
"photo",
"phrase",
"physical",
"physics",
"piano",
"pick",
"picture",
"pie",
"piece",
"pig",
"pigeon",
"pika",
"pike",
"pin",
"pipe",
"piranha",
"pitch",
"pitohui",
"pizza",
"pizzly bear",
"place",
"plan",
"plane",
"plant",
"plastic",
"plate",
"platform",
"platypus",
"play",
"player",
"pleasure",
"plenty",
"plover",
"pocket gopher",
"poem",
"poet",
"poetry",
"pogona",
"point",
"poison dart frog",
"polar bear",
"police",
"policy",
"politics",
"pollution",
"pony",
"pool",
"pop",
"population",
"porcupine",
"porpoise",
"position",
"positive",
"possession",
"possibility",
"possible",
"possum",
"post",
"pot",
"potato",
"potential",
"potoo",
"potoroo",
"potto",
"pouched rat",
"pound",
"power",
"practice",
"prairie dog",
"prawn",
"praying mantis",
"preference",
"preparation",
"presence",
"present",
"presentation",
"president",
"press",
"pressure",
"price",
"pride",
"priest",
"primary",
"principle",
"print",
"prior",
"priority",
"private",
"prize",
"problem",
"procedure",
"process",
"produce",
"product",
"profession",
"professional",
"professor",
"profile",
"profit",
"program",
"progress",
"project",
"promise",
"promotion",
"prompt",
"pronghorn",
"proof",
"property",
"proposal",
"protection",
"przewalski's horse",
"psychology",
"ptarmigan",
"pterosaur",
"public",
"pudu",
"puff adder",
"puffer fish",
"puffin",
"pull",
"puma",
"punch",
"puppy",
"purchase",
"purple",
"purpose",
"push",
"put",
"python",
"qinling panda",
"quagga",
"quail",
"quality",
"quantity",
"quarter",
"queen",
"question",
"quetzal",
"quiet",
"quit",
"quokka",
"quoll",
"quote",
"rabbit",
"raccoon",
"raccoon dog",
"race",
"radio",
"rail",
"rain",
"rainbowfish",
"raise",
"range",
"rat",
"rate",
"ratio",
"rattlesnake",
"raven",
"raw",
"reach",
"reaction",
"read",
"reading",
"reality",
"reason",
"reception",
"recipe",
"recognition",
"recommendation",
"record",
"recording",
"recover",
"red",
"red panda",
"red river hog",
"reference",
"reflection",
"refrigerator",
"refuse",
"region",
"register",
"regret",
"regular",
"reindeer",
"relation",
"relationship",
"relative",
"release",
"relief",
"remote",
"remove",
"rent",
"repair",
"repeat",
"replacement",
"reply",
"report",
"representative",
"republic",
"reputation",
"request",
"requirement",
"research",
"reserve",
"resident",
"resist",
"resolution",
"resolve",
"resort",
"resource",
"respect",
"respond",
"response",
"responsibility",
"rest",
"restaurant",
"result",
"return",
"reveal",
"revenue",
"review",
"revolution",
"reward",
"rhea",
"rhinoceros",
"rice",
"rich",
"ride",
"ring",
"ringtail",
"rip",
"rise",
"risk",
"river",
"river dolphin",
"road",
"roadrunner",
"robin",
"rock",
"rock hyrax",
"rockfish",
"role",
"roll",
"roller",
"roof",
"rook",
"room",
"rope",
"rough",
"round",
"routine",
"row",
"royal",
"rub",
"rufous rat kangaroo",
"ruin",
"rule",
"run",
"rush",
"saber toothed cat",
"sad",
"safe",
"safety",
"sail",
"sailfish",
"salad",
"salamander",
"salary",
"sale",
"salmon",
"salt",
"sample",
"sand",
"sand cat",
"sandgrouse",
"sandwich",
"satisfaction",
"save",
"savings",
"sawfish",
"scale",
"scene",
"schedule",
"scheme",
"school",
"science",
"score",
"scorpion",
"scratch",
"screen",
"screw",
"script",
"sea",
"sea anemone",
"sea cucumber",
"sea duck",
"sea gull",
"sea lion",
"sea otter",
"sea snake",
"seadragon",
"seahorse",
"seal",
"search",
"season",
"seat",
"second",
"secret",
"secretary",
"section",
"sector",
"security",
"selection",
"self",
"sell",
"senior",
"sense",
"sensitive",
"sentence",
"series",
"serval",
"serve",
"service",
"session",
"set",
"setting",
"sex",
"shake",
"shame",
"shape",
"share",
"shark",
"she",
"shearwater",
"sheep",
"shelduck",
"shelter",
"shift",
"shine",
"ship",
"shirt",
"shock",
"shoe",
"shoebill",
"shoot",
"shop",
"shopping",
"shot",
"shoulder",
"show",
"shower",
"shrew",
"shrimp",
"sick",
"side",
"sign",
"signal",
"signature",
"significance",
"silly",
"silver",
"simple",
"sing",
"singer",
"single",
"sink",
"sir",
"sister",
"site",
"situation",
"size",
"skate",
"skill",
"skimmer",
"skin",
"skink",
"skirt",
"skua",
"skunk",
"sky",
"sleep",
"slice",
"slide",
"slip",
"sloth",
"sloth bear",
"slow loris",
"slug",
"smell",
"smile",
"smoke",
"snail",
"snake",
"snipe",
"snow",
"snow leopard",
"society",
"sock",
"soft",
"software",
"soil",
"solenodon",
"solid",
"solution",
"somewhere",
"son",
"song",
"songbird",
"sort",
"sound",
"soup",
"source",
"south",
"space",
"spare",
"sparrow",
"speaker",
"special",
"specialist",
"specific",
"spectacled bear",
"speech",
"speed",
"spell",
"spend",
"spider",
"spiny lobster",
"spiny mouse",
"spiny rat",
"spirit",
"spiritual",
"spite",
"split",
"sponge",
"spoonbill",
"sport",
"spot",
"spray",
"spread",
"spring",
"springhare",
"square",
"squid",
"squirrel",
"squirrel glider",
"stable",
"staff",
"stage",
"stand",
"standard",
"star",
"starfish",
"starling",
"start",
"state",
"statement",
"station",
"status",
"stay",
"steak",
"steal",
"step",
"steppe lemming",
"stick",
"stick bug",
"still",
"stingray",
"stoat",
"stock",
"stomach",
"stone curlew",
"stonefish",
"stop",
"storage",
"store",
"stork",
"storm",
"story",
"strain",
"stranger",
"strategy",
"street",
"strength",
"stress",
"stretch",
"strike",
"string",
"strip",
"stroke",
"structure",
"struggle",
"student",
"studio",
"study",
"stuff",
"stupid",
"sturgeon",
"style",
"subject",
"substance",
"success",
"suck",
"sugar",
"suggestion",
"suit",
"summer",
"sun",
"sun bear",
"sunbird",
"supermarket",
"support",
"surgeonfish",
"surgery",
"surprise",
"surround",
"survey",
"suspect",
"swallow",
"swamphen",
"swan",
"sweet",
"swift",
"swim",
"swimming",
"swing",
"switch",
"swordfish",
"sympathy",
"system",
"t rex",
"table",
"tackle",
"tadpole",
"takin",
"tale",
"talk",
"tamandua",
"tamarin",
"tanager",
"tank",
"tap",
"tapaculo",
"tapir",
"tarantula",
"target",
"tarpon",
"tarsier",
"task",
"tasmanian devil",
"tasmanian wolf",
"taste",
"tax",
"tayra",
"tea",
"teach",
"teacher",
"teaching",
"team",
"tear",
"technology",
"telephone",
"television",
"tell",
"temperature",
"temporary",
"tennis",
"tenrec",
"tension",
"term",
"termite",
"tern",
"test",
"tetra",
"text",
"thanks",
"theme",
"theory",
"thick knee",
"thing",
"thornbill",
"thought",
"thrasher",
"throat",
"thrush",
"ticket",
"tie",
"tiger",
"till",
"time",
"tip",
"tit",
"title",
"toad",
"toadfish",
"today",
"toe",
"tomorrow",
"tone",
"tongue",
"tonight",
"tool",
"tooth",
"top",
"topic",
"tortoise",
"total",
"toucan",
"touch",
"tough",
"tour",
"tourist",
"towel",
"tower",
"town",
"track",
"trade",
"tradition",
"traffic",
"train",
"trainer",
"training",
"transition",
"transportation",
"trash",
"travel",
"treat",
"tree",
"treeshrew",
"trick",
"trip",
"trogon",
"trouble",
"trout",
"truck",
"trumpeter",
"trust",
"truth",
"try",
"tuatara",
"tuna",
"tune",
"turaco",
"turkey",
"turn",
"turtle",
"twist",
"type",
"tyrant flycatcher",
"uncle",
"understanding",
"union",
"unique",
"unit",
"university",
"upper",
"upstairs",
"urchin",
"use",
"user",
"usual",
"vacation",
"valuable",
"value",
"vanga",
"vaquita",
"variation",
"variety",
"vast",
"vegetable",
"vehicle",
"version",
"vicuna",
"video",
"view",
"village",
"viper",
"virus",
"viscacha",
"visit",
"visual",
"voice",
"vole",
"volume",
"vulture",
"wader",
"wagtail",
"wait",
"wake",
"walk",
"wall",
"wallaby",
"walleye",
"walrus",
"wapiti",
"war",
"warbler",
"warning",
"warthog",
"wash",
"wasp",
"watch",
"water",
"water buffalo",
"wave",
"waxwing",
"way",
"weakness",
"wealth",
"wear",
"weasel",
"weather",
"weaver",
"weaver finch",
"web",
"wedding",
"week",
"weekend",
"weight",
"weird",
"welcome",
"west",
"western",
"whale",
"wheel",
"whereas",
"while",
"whistler",
"whistling duck",
"white",
"white eye",
"whole",
"whydah",
"widow spider",
"wife",
"wigeon",
"wildcat",
"wildebeest",
"will",
"win",
"wind",
"window",
"wine",
"wing",
"winner",
"winter",
"wish",
"witness",
"wolf",
"wolverine",
"woman",
"wombat",
"wonder",
"wongai ningaui",
"wood",
"woodchuck",
"woodcock",
"woodpecker",
"woodswallow",
"word",
"work",
"worker",
"working",
"world",
"worm",
"worry",
"worth",
"wrap",
"wren",
"writer",
"writing",
"x ray tetra",
"x-ray",
"xenophobe",
"xenopoecilus",
"xenops",
"xerus",
"xylophone",
"yak",
"yapok",
"yard",
"year",
"yellow",
"yellowjacket",
"yesterday",
"young",
"youth",
"zebra",
"zebra",
"zebu",
"zone",
"zzyzx"
],
"adverbs": [
"aboard",
"abnormally",
"about",
"abroad",
"absentmindedly",
"absolutely",
"abundantly",
"accidentally",
"accordingly",
"actively",
"actually",
"acutely",
"admiringly",
"affectionately",
"affirmatively",
"after",
"afterwards",
"agreeably",
"almost",
"already",
"always",
"amazingly",
"angrily",
"annoyingly",
"annually",
"anxiously",
"anyhow",
"anyplace",
"anyway",
"anywhere",
"appreciably",
"appropriately",
"around",
"arrogantly",
"aside",
"assuredly",
"astonishingly",
"away",
"awfully",
"awkwardly",
"badly",
"barely",
"bashfully",
"beautifully",
"before",
"begrudgingly",
"believably",
"bewilderedly",
"bewilderingly",
"bitterly",
"bleakly",
"blindly",
"blissfully",
"boldly",
"boastfully",
"boldly",
"boyishly",
"bravely",
"briefly",
"brightly",
"brilliantly",
"briskly",
"brutally",
"busily",
"calmly",
"candidly",
"carefully",
"carelessly",
"casually",
"cautiously",
"certainly",
"charmingly",
"cheerfully",
"chiefly",
"childishly",
"cleanly",
"clearly",
"cleverly",
"closely",
"cloudily",
"clumsily",
"coaxingly",
"coincidentally",
"coldly",
"colorfully",
"commonly",
"comfortably",
"compactly",
"compassionately",
"completely",
"confusedly",
"consequently",
"considerably",
"considerately",
"consistently",
"constantly",
"continually",
"continuously",
"coolly",
"correctly",
"courageously",
"covertly",
"cowardly",
"crazily",
"crossly",
"cruelly",
"cunningly",
"curiously",
"currently",
"customarily",
"cutely",
"daily",
"daintily",
"dangerously",
"daringly",
"darkly",
"dastardly",
"dearly",
"decently",
"deeply",
"defiantly",
"deftly",
"deliberately",
"delicately",
"delightfully",
"densely",
"diagonally",
"differently",
"diligently",
"dimly",
"directly",
"disorderly",
"divisively",
"docilely",
"dopily",
"doubtfully",
"down",
"dramatically",
"dreamily",
"during",
"eagerly",
"early",
"earnestly",
"easily",
"efficiently",
"effortlessly",
"elaborately",
"eloquently",
"elegantly",
"elsewhere",
"emotionally",
"endlessly",
"energetically",
"enjoyably",
"enormously",
"enough",
"enthusiastically",
"entirely",
"equally",
"especially",
"essentially",
"eternally",
"ethically",
"even",
"evenly",
"eventually",
"evermore",
"every",
"everywhere",
"evidently",
"evocatively",
"exactly",
"exceedingly",
"exceptionally",
"excitedly",
"exclusively",
"explicitly",
"expressly",
"extensively",
"externally",
"extra",
"extraordinarily",
"extremely",
"fairly",
"faithfully",
"famously",
"far",
"fashionably",
"fast",
"fatally",
"favorably",
"ferociously",
"fervently",
"fiercely",
"fiery",
"finally",
"financially",
"finitely",
"fluently",
"fondly",
"foolishly",
"forever",
"formally",
"formerly",
"fortunately",
"forward",
"frankly",
"frantically",
"freely",
"frequently",
"frenetically",
"fully",
"furiously",
"furthermore",
"generally",
"generously",
"genuinely",
"gently",
"genuinely",
"girlishly",
"gladly",
"gleefully",
"gracefully",
"graciously",
"gradually",
"gratefully",
"greatly",
"greedily",
"grimly",
"grudgingly",
"habitually",
"half-heartedly",
"handily",
"handsomely",
"haphazardly",
"happily",
"hastily",
"harmoniously",
"harshly",
"hastily",
"hatefully",
"hauntingly",
"healthily",
"heartily",
"heavily",
"helpfully",
"hence",
"highly",
"hitherto",
"honestly",
"hopelessly",
"horizontally",
"hourly",
"how",
"however",
"hugely",
"humorously",
"hungrily",
"hurriedly",
"hysterically",
"icily",
"identifiably",
"idiotically",
"imaginatively",
"immeasurably",
"immediately",
"immensely",
"impatiently",
"impressively",
"inappropriately",
"incessantly",
"incorrectly",
"indeed",
"independently",
"indoors",
"indubitably",
"inevitably",
"infinitely",
"informally",
"infrequently",
"innocently",
"inquisitively",
"instantly",
"intelligently",
"intensely",
"intently",
"interestingly",
"intermittently",
"internally",
"invariably",
"invisibly",
"inwardly",
"ironically",
"irrefutably",
"irritably",
"jaggedly",
"jauntily",
"jealously",
"jovially",
"joyfully",
"joylessly",
"joyously",
"jubilantly",
"judgmentally",
"just",
"justly",
"keenly",
"kiddingly",
"kindheartedly",
"kindly",
"knavishly",
"knottily",
"knowingly",
"knowledgeably",
"kookily",
"lastly",
"late",
"lately",
"later",
"lazily",
"less",
"lightly",
"likely",
"limply",
"lithely",
"lively",
"loftily",
"longingly",
"loosely",
"loudly",
"lovingly",
"loyally",
"luckily",
"luxuriously",
"madly",
"magically",
"mainly",
"majestically",
"markedly",
"materially",
"meaningfully",
"meanly",
"meantime",
"meanwhile",
"measurably",
"mechanically",
"medically",
"menacingly",
"merely",
"merrily",
"methodically",
"mightily",
"miserably",
"mockingly",
"monthly",
"morally",
"more",
"moreover",
"mortally",
"mostly",
"much",
"mysteriously",
"nastily",
"naturally",
"naughtily",
"nearby",
"nearly",
"neatly",
"needily",
"negatively",
"nervously",
"never",
"nevertheless",
"next",
"nicely",
"nightly",
"noisily",
"normally",
"nosily",
"not",
"now",
"nowadays",
"numbly",
"obediently",
"obligingly",
"obnoxiously",
"obviously",
"occasionally",
"oddly",
"offensively",
"officially",
"often",
"ominously",
"once",
"only",
"openly",
"optimistically",
"orderly",
"ordinarily",
"outdoors",
"outrageously",
"outwardly",
"outwards",
"overconfidently",
"overseas",
"painfully",
"painlessly",
"paradoxically",
"partially",
"particularly",
"passionately",
"patiently",
"perfectly",
"periodically",
"perpetually",
"persistently",
"personally",
"persuasively",
"physically",
"plainly",
"playfully",
"poetically",
"poignantly",
"politely",
"poorly",
"positively",
"possibly",
"potentially",
"powerfully",
"presently",
"presumably",
"prettily",
"previously",
"primly",
"principally",
"probably",
"promptly",
"properly",
"proudly",
"punctually",
"puzzlingly",
"quaintly",
"queasily",
"questionably",
"questioningly",
"quicker",
"quickly",
"quietly",
"quirkily",
"quite",
"quizzically",
"randomly",
"rapidly",
"rarely",
"readily",
"really",
"reasonably",
"reassuringly",
"recently",
"recklessly",
"regularly",
"reliably",
"reluctantly",
"remarkably",
"repeatedly",
"reproachfully",
"reponsibly",
"resentfully",
"respectably",
"respectfully",
"restfully",
"richly",
"ridiculously",
"righteously",
"rightfully",
"rightly",
"rigidly",
"roughly",
"routinely",
"rudely",
"ruthlessly",
"sadly",
"safely",
"scarcely",
"scarily",
"scientifically",
"searchingly",
"secretively",
"securely",
"sedately",
"seemingly",
"seldom",
"selfishly",
"selflessly",
"separately",
"seriously",
"shakily",
"shamelessly",
"sharply",
"sheepishly",
"shoddily",
"shortly",
"shrilly",
"significantly",
"silently",
"simply",
"sincerely",
"singularly",
"shyly",
"skillfully",
"sleepily",
"slightly",
"slowly",
"slyly",
"smoothly",
"so",
"softly",
"solely",
"solemnly",
"solidly",
"silicitiously",
"somehow",
"sometimes",
"somewhat",
"somewhere",
"soon",
"specially",
"specifically",
"spectacularly",
"speedily",
"spiritually",
"splendidly",
"sporadically",
"spasmodically",
"startlingly",
"steadily",
"stealthily",
"sternly",
"still",
"strenuously",
"stressfully",
"strictly",
"structurally",
"studiously",
"stupidly",
"stylishly",
"subsequently",
"substantially",
"subtly",
"successfully",
"suddenly",
"sufficiently",
"suitably",
"superficially",
"supremely",
"surely",
"surprisingly",
"suspiciously",
"sweetly",
"swiftly",
"sympathetically",
"systematically",
"temporarily",
"tenderly",
"tensely",
"tepidly",
"terribly",
"thankfully",
"then",
"there",
"thereby",
"thoroughly",
"thoughtfully",
"thus",
"tightly",
"today",
"together",
"tomorrow",
"too",
"totally",
"touchingly",
"tremendously",
"truly",
"truthfully",
"twice",
"ultimately",
"unabashedly",
"unanimously",
"unbearably",
"unbelievably",
"unemotionally",
"unethically",
"unexpectedly",
"unfailingly",
"unfavorably",
"unfortunately",
"uniformly",
"unilaterally",
"unimpressively",
"universally",
"unnaturally",
"unnecessarily",
"unquestionably",
"unwillingly",
"up",
"upbeat",
"unkindly",
"upliftingly",
"upright",
"unselfishly",
"upside-down",
"unskillfully",
"upward",
"upwardly",
"urgently",
"usefully",
"uselessly",
"usually",
"utterly",
"vacantly",
"vaguely",
"vainly",
"valiantly",
"vastly",
"verbally",
"vertically",
"very",
"viciously",
"victoriously",
"vigilantly",
"vigorously",
"violently",
"visibly",
"visually",
"vivaciously",
"voluntarily",
"warmly",
"weakly",
"wearily",
"weekly",
"well",
"wetly",
"when",
"where",
"while",
"whole-heartedly",
"wholly",
"why",
"wickedly",
"widely",
"wiggly",
"wildly",
"willfully",
"willingly",
"wisely",
"woefully",
"wonderfully",
"worriedly",
"worthily",
"wrongly",
"yearly",
"yearningly",
"yesterday",
"yet",
"youthfully",
"zanily",
"zealously",
"zestfully",
"zestily"
],
"adjectives": [
"abandoned",
"able",
"absolute",
"adorable",
"adventurous",
"academic",
"acceptable",
"acclaimed",
"accomplished",
"accurate",
"aching",
"acidic",
"acrobatic",
"active",
"actual",
"adept",
"admirable",
"admired",
"adolescent",
"adorable",
"adored",
"advanced",
"afraid",
"affectionate",
"aged",
"aggravating",
"aggressive",
"agile",
"agitated",
"agonizing",
"agreeable",
"ajar",
"alarmed",
"alarming",
"alert",
"alienated",
"alive",
"all",
"altruistic",
"amazing",
"ambitious",
"ample",
"amused",
"amusing",
"anchored",
"ancient",
"angelic",
"angry",
"anguished",
"animated",
"annual",
"another",
"antique",
"anxious",
"any",
"apprehensive",
"appropriate",
"apt",
"arctic",
"arid",
"aromatic",
"artistic",
"ashamed",
"assured",
"astonishing",
"athletic",
"attached",
"attentive",
"attractive",
"austere",
"authentic",
"authorized",
"automatic",
"avaricious",
"average",
"aware",
"awesome",
"awful",
"awkward",
"babyish",
"bad",
"back",
"baggy",
"bare",
"barren",
"basic",
"beautiful",
"belated",
"beloved",
"beneficial",
"better",
"best",
"bewitched",
"big",
"big-hearted",
"biodegradable",
"bite-sized",
"bitter",
"black",
"black-and-white",
"bland",
"blank",
"blaring",
"bleak",
"blind",
"blissful",
"blond",
"blue",
"blushing",
"bogus",
"boiling",
"bold",
"bony",
"boring",
"bossy",
"both",
"bouncy",
"bountiful",
"bowed",
"brave",
"breakable",
"brief",
"bright",
"brilliant",
"brisk",
"broken",
"bronze",
"brown",
"bruised",
"bubbly",
"bulky",
"bumpy",
"buoyant",
"burdensome",
"burly",
"bustling",
"busy",
"buttery",
"buzzing",
"calculating",
"calm",
"candid",
"canine",
"capital",
"carefree",
"careful",
"careless",
"caring",
"cautious",
"cavernous",
"celebrated",
"charming",
"cheap",
"cheerful",
"cheery",
"chief",
"chilly",
"chubby",
"circular",
"classic",
"clean",
"clear",
"clear-cut",
"clever",
"close",
"closed",
"cloudy",
"clueless",
"clumsy",
"cluttered",
"coarse",
"cold",
"colorful",
"colorless",
"colossal",
"comfortable",
"common",
"compassionate",
"competent",
"complete",
"complex",
"complicated",
"composed",
"concerned",
"concrete",
"confused",
"conscious",
"considerate",
"constant",
"content",
"conventional",
"cooked",
"cool",
"cooperative",
"coordinated",
"corny",
"corrupt",
"costly",
"courageous",
"courteous",
"crafty",
"crazy",
"creamy",
"creative",
"creepy",
"criminal",
"crisp",
"critical",
"crooked",
"crowded",
"cruel",
"crushing",
"cuddly",
"cultivated",
"cultured",
"cumbersome",
"curly",
"curvy",
"cute",
"cylindrical",
"damaged",
"damp",
"dangerous",
"dapper",
"daring",
"darling",
"dark",
"dazzling",
"dead",
"deadly",
"deafening",
"dear",
"dearest",
"decent",
"decimal",
"decisive",
"deep",
"defenseless",
"defensive",
"defiant",
"deficient",
"definite",
"definitive",
"delayed",
"delectable",
"delicious",
"delightful",
"delirious",
"demanding",
"dense",
"dental",
"dependable",
"dependent",
"descriptive",
"deserted",
"detailed",
"determined",
"devoted",
"different",
"difficult",
"digital",
"diligent",
"dim",
"dimpled",
"dimwitted",
"direct",
"disastrous",
"discrete",
"disfigured",
"disgusting",
"disloyal",
"dismal",
"distant",
"downright",
"dreary",
"dirty",
"disguised",
"dishonest",
"dismal",
"distant",
"distinct",
"distorted",
"dizzy",
"dopey",
"doting",
"double",
"downright",
"drab",
"drafty",
"dramatic",
"dreary",
"droopy",
"dry",
"dual",
"dull",
"dutiful",
"each",
"eager",
"earnest",
"early",
"easy",
"easy-going",
"ecstatic",
"edible",
"educated",
"elaborate",
"elastic",
"elated",
"elderly",
"electric",
"elegant",
"elementary",
"elliptical",
"embarrassed",
"embellished",
"eminent",
"emotional",
"empty",
"enchanted",
"enchanting",
"energetic",
"enlightened",
"enormous",
"enraged",
"entire",
"envious",
"equal",
"equatorial",
"essential",
"esteemed",
"ethical",
"euphoric",
"even",
"evergreen",
"everlasting",
"every",
"evil",
"exalted",
"excellent",
"exemplary",
"exhausted",
"excitable",
"excited",
"exciting",
"exotic",
"expensive",
"experienced",
"expert",
"extraneous",
"extroverted",
"extra-large",
"extra-small",
"fabulous",
"failing",
"faint",
"fair",
"faithful",
"fake",
"false",
"familiar",
"famous",
"fancy",
"fantastic",
"far",
"faraway",
"far-flung",
"far-off",
"fast",
"fat",
"fatal",
"fatherly",
"favorable",
"favorite",
"fearful",
"fearless",
"feisty",
"feline",
"female",
"feminine",
"few",
"fickle",
"filthy",
"fine",
"finished",
"firm",
"first",
"firsthand",
"fitting",
"fixed",
"flaky",
"flamboyant",
"flashy",
"flat",
"flawed",
"flawless",
"flickering",
"flimsy",
"flippant",
"flowery",
"fluffy",
"fluid",
"flustered",
"focused",
"fond",
"foolhardy",
"foolish",
"forceful",
"forked",
"formal",
"forsaken",
"forthright",
"fortunate",
"fragrant",
"frail",
"frank",
"frayed",
"free",
"French",
"fresh",
"frequent",
"friendly",
"frightened",
"frightening",
"frigid",
"frilly",
"frizzy",
"frivolous",
"front",
"frosty",
"frozen",
"frugal",
"fruitful",
"full",
"fumbling",
"functional",
"funny",
"fussy",
"fuzzy",
"gargantuan",
"gaseous",
"general",
"generous",
"gentle",
"genuine",
"giant",
"giddy",
"gigantic",
"gifted",
"giving",
"glamorous",
"glaring",
"glass",
"gleaming",
"gleeful",
"glistening",
"glittering",
"gloomy",
"glorious",
"glossy",
"glum",
"golden",
"good",
"good-natured",
"gorgeous",
"graceful",
"gracious",
"grand",
"grandiose",
"granular",
"grateful",
"grave",
"gray",
"great",
"greedy",
"green",
"gregarious",
"grim",
"grimy",
"gripping",
"grizzled",
"gross",
"grotesque",
"grouchy",
"grounded",
"growing",
"growling",
"grown",
"grubby",
"gruesome",
"grumpy",
"guilty",
"gullible",
"gummy",
"hairy",
"half",
"handmade",
"handsome",
"handy",
"happy",
"happy-go-lucky",
"hard",
"hard-to-find",
"harmful",
"harmless",
"harmonious",
"harsh",
"hasty",
"hateful",
"haunting",
"healthy",
"heartfelt",
"hearty",
"heavenly",
"heavy",
"hefty",
"helpful",
"helpless",
"hidden",
"hideous",
"high",
"high-level",
"hilarious",
"hoarse",
"hollow",
"homely",
"honest",
"honorable",
"honored",
"hopeful",
"horrible",
"hospitable",
"hot",
"huge",
"humble",
"humiliating",
"humming",
"humongous",
"hungry",
"hurtful",
"husky",
"icky",
"icy",
"ideal",
"idealistic",
"identical",
"idle",
"idiotic",
"idolized",
"ignorant",
"ill",
"illegal",
"ill-fated",
"ill-informed",
"illiterate",
"illustrious",
"imaginary",
"imaginative",
"immaculate",
"immaterial",
"immediate",
"immense",
"impassioned",
"impeccable",
"impartial",
"imperfect",
"imperturbable",
"impish",
"impolite",
"important",
"impossible",
"impractical",
"impressionable",
"impressive",
"improbable",
"impure",
"inborn",
"incomparable",
"incompatible",
"incomplete",
"inconsequential",
"incredible",
"indelible",
"inexperienced",
"indolent",
"infamous",
"infantile",
"infatuated",
"inferior",
"infinite",
"informal",
"innocent",
"insecure",
"insidious",
"insignificant",
"insistent",
"instructive",
"insubstantial",
"intelligent",
"intent",
"intentional",
"interesting",
"internal",
"international",
"intrepid",
"ironclad",
"irresponsible",
"irritating",
"itchy",
"jaded",
"jagged",
"jam-packed",
"jaunty",
"jealous",
"jittery",
"joint",
"jolly",
"jovial",
"joyful",
"joyous",
"jubilant",
"judicious",
"juicy",
"jumbo",
"junior",
"jumpy",
"juvenile",
"kaleidoscopic",
"keen",
"key",
"kind",
"kindhearted",
"kindly",
"klutzy",
"knobby",
"knotty",
"knowledgeable",
"knowing",
"known",
"kooky",
"kosher",
"lame",
"lanky",
"large",
"last",
"lasting",
"late",
"lavish",
"lawful",
"lazy",
"leading",
"lean",
"leafy",
"left",
"legal",
"legitimate",
"light",
"lighthearted",
"likable",
"likely",
"limited",
"limp",
"limping",
"linear",
"lined",
"liquid",
"little",
"live",
"lively",
"livid",
"loathsome",
"lone",
"lonely",
"long",
"long-term",
"loose",
"lopsided",
"lost",
"loud",
"lovable",
"lovely",
"loving",
"low",
"loyal",
"lucky",
"lumbering",
"luminous",
"lumpy",
"lustrous",
"luxurious",
"mad",
"made-up",
"magnificent",
"majestic",
"major",
"male",
"mammoth",
"married",
"marvelous",
"masculine",
"massive",
"mature",
"meager",
"mealy",
"mean",
"measly",
"meaty",
"medical",
"mediocre",
"medium",
"meek",
"mellow",
"melodic",
"memorable",
"menacing",
"merry",
"messy",
"metallic",
"mild",
"milky",
"mindless",
"miniature",
"minor",
"minty",
"miserable",
"miserly",
"misguided",
"misty",
"mixed",
"modern",
"modest",
"moist",
"monstrous",
"monthly",
"monumental",
"moral",
"mortified",
"motherly",
"motionless",
"mountainous",
"muddy",
"muffled",
"multicolored",
"mundane",
"murky",
"mushy",
"musty",
"muted",
"mysterious",
"naive",
"narrow",
"nasty",
"natural",
"naughty",
"nautical",
"near",
"neat",
"necessary",
"needy",
"negative",
"neglected",
"negligible",
"neighboring",
"nervous",
"new",
"next",
"nice",
"nifty",
"nimble",
"nippy",
"nocturnal",
"noisy",
"nonstop",
"normal",
"notable",
"noted",
"noteworthy",
"novel",
"noxious",
"numb",
"nutritious",
"nutty",
"obedient",
"obese",
"oblong",
"oily",
"oblong",
"obvious",
"occasional",
"odd",
"oddball",
"offbeat",
"offensive",
"official",
"old",
"old-fashioned",
"only",
"open",
"optimal",
"optimistic",
"opulent",
"orange",
"orderly",
"organic",
"ornate",
"ornery",
"ordinary",
"original",
"other",
"our",
"outlying",
"outgoing",
"outlandish",
"outrageous",
"outstanding",
"oval",
"overcooked",
"overdue",
"overjoyed",
"overlooked",
"palatable",
"pale",
"paltry",
"parallel",
"parched",
"partial",
"passionate",
"past",
"pastel",
"peaceful",
"peppery",
"perfect",
"perfumed",
"periodic",
"perky",
"personal",
"pertinent",
"pesky",
"pessimistic",
"petty",
"phony",
"physical",
"piercing",
"pink",
"pitiful",
"plain",
"plaintive",
"plastic",
"playful",
"pleasant",
"pleased",
"pleasing",
"plump",
"plush",
"polished",
"polite",
"political",
"pointed",
"pointless",
"poised",
"poor",
"popular",
"portly",
"posh",
"positive",
"possible",
"potable",
"powerful",
"powerless",
"practical",
"precious",
"present",
"prestigious",
"pretty",
"precious",
"previous",
"pricey",
"prickly",
"primary",
"prime",
"pristine",
"private",
"prize",
"probable",
"productive",
"profitable",
"profuse",
"proper",
"proud",
"prudent",
"punctual",
"pungent",
"puny",
"pure",
"purple",
"pushy",
"putrid",
"puzzled",
"puzzling",
"quaint",
"qualified",
"quarrelsome",
"quarterly",
"queasy",
"querulous",
"questionable",
"quick",
"quick-witted",
"quiet",
"quintessential",
"quirky",
"quixotic",
"quizzical",
"radiant",
"ragged",
"rapid",
"rare",
"rash",
"raw",
"recent",
"reckless",
"rectangular",
"ready",
"real",
"realistic",
"reasonable",
"red",
"reflecting",
"regal",
"regular",
"reliable",
"relieved",
"remarkable",
"remorseful",
"remote",
"repentant",
"required",
"respectful",
"responsible",
"repulsive",
"revolving",
"rewarding",
"rich",
"rigid",
"right",
"ringed",
"ripe",
"roasted",
"robust",
"rosy",
"rotating",
"rotten",
"rough",
"round",
"rowdy",
"royal",
"rubbery",
"rundown",
"ruddy",
"rude",
"runny",
"rural",
"rusty",
"sad",
"safe",
"salty",
"same",
"sandy",
"sane",
"sarcastic",
"sardonic",
"satisfied",
"scaly",
"scarce",
"scared",
"scary",
"scented",
"scholarly",
"scientific",
"scornful",
"scratchy",
"scrawny",
"second",
"secondary",
"second-hand",
"secret",
"self-assured",
"self-reliant",
"selfish",
"sentimental",
"separate",
"serene",
"serious",
"serpentine",
"several",
"severe",
"shabby",
"shadowy",
"shady",
"shallow",
"shameful",
"shameless",
"sharp",
"shimmering",
"shiny",
"shocked",
"shocking",
"shoddy",
"short",
"short-term",
"showy",
"shrill",
"shy",
"sick",
"silent",
"silky",
"silly",
"silver",
"similar",
"simple",
"simplistic",
"sinful",
"single",
"sizzling",
"skeletal",
"skinny",
"sleepy",
"slight",
"slim",
"slimy",
"slippery",
"slow",
"slushy",
"small",
"smart",
"smoggy",
"smooth",
"smug",
"snappy",
"snarling",
"sneaky",
"sniveling",
"snoopy",
"sociable",
"soft",
"soggy",
"solid",
"somber",
"some",
"spherical",
"sophisticated",
"sore",
"sorrowful",
"soulful",
"soupy",
"sour",
"Spanish",
"sparkling",
"sparse",
"specific",
"spectacular",
"speedy",
"spicy",
"spiffy",
"spirited",
"spiteful",
"splendid",
"spotless",
"spotted",
"spry",
"square",
"squeaky",
"squiggly",
"stable",
"staid",
"stained",
"stale",
"standard",
"starchy",
"stark",
"starry",
"steep",
"sticky",
"stiff",
"stimulating",
"stingy",
"stormy",
"straight",
"strange",
"steel",
"strict",
"strident",
"striking",
"striped",
"strong",
"studious",
"stunning",
"stupendous",
"stupid",
"sturdy",
"stylish",
"subdued",
"submissive",
"substantial",
"subtle",
"suburban",
"sudden",
"sugary",
"sunny",
"super",
"superb",
"superficial",
"superior",
"supportive",
"sure-footed",
"surprised",
"suspicious",
"svelte",
"sweaty",
"sweet",
"sweltering",
"swift",
"sympathetic",
"tall",
"talkative",
"tame",
"tan",
"tangible",
"tart",
"tasty",
"tattered",
"taut",
"tedious",
"teeming",
"tempting",
"tender",
"tense",
"tepid",
"terrible",
"terrific",
"testy",
"thankful",
"that",
"these",
"thick",
"thin",
"third",
"thirsty",
"this",
"thorough",
"thorny",
"those",
"thoughtful",
"threadbare",
"thrifty",
"thunderous",
"tidy",
"tight",
"timely",
"tinted",
"tiny",
"tired",
"torn",
"total",
"tough",
"traumatic",
"treasured",
"tremendous",
"tragic",
"trained",
"tremendous",
"triangular",
"tricky",
"trifling",
"trim",
"trivial",
"troubled",
"true",
"trusting",
"trustworthy",
"trusty",
"truthful",
"tubby",
"turbulent",
"twin",
"ugly",
"ultimate",
"unacceptable",
"unaware",
"uncomfortable",
"uncommon",
"unconscious",
"understated",
"unequaled",
"uneven",
"unfinished",
"unfit",
"unfolded",
"unfortunate",
"unhappy",
"unhealthy",
"uniform",
"unimportant",
"unique",
"united",
"unkempt",
"unknown",
"unlawful",
"unlined",
"unlucky",
"unnatural",
"unpleasant",
"unrealistic",
"unripe",
"unruly",
"unselfish",
"unsightly",
"unsteady",
"unsung",
"untidy",
"untimely",
"untried",
"untrue",
"unused",
"unusual",
"unwelcome",
"unwieldy",
"unwilling",
"unwitting",
"unwritten",
"upbeat",
"upright",
"upset",
"urban",
"usable",
"used",
"useful",
"useless",
"utilized",
"utter",
"vacant",
"vague",
"vain",
"valid",
"valuable",
"vapid",
"variable",
"vast",
"velvety",
"venerated",
"vengeful",
"verifiable",
"vibrant",
"vicious",
"victorious",
"vigilant",
"vigorous",
"villainous",
"violet",
"violent",
"virtual",
"virtuous",
"visible",
"vital",
"vivacious",
"vivid",
"voluminous",
"wan",
"warlike",
"warm",
"warmhearted",
"warped",
"wary",
"wasteful",
"watchful",
"waterlogged",
"watery",
"wavy",
"wealthy",
"weak",
"weary",
"webbed",
"wee",
"weekly",
"weepy",
"weighty",
"weird",
"welcome",
"well-documented",
"well-groomed",
"well-informed",
"well-lit",
"well-made",
"well-off",
"well-to-do",
"well-worn",
"wet",
"which",
"whimsical",
"whirlwind",
"whispered",
"white",
"whole",
"whopping",
"wicked",
"wide",
"wide-eyed",
"wiggly",
"wild",
"willing",
"wilted",
"winding",
"windy",
"winged",
"wiry",
"wise",
"witty",
"wobbly",
"woeful",
"wonderful",
"wooden",
"woozy",
"wordy",
"worldly",
"worn",
"worried",
"worrisome",
"worse",
"worst",
"worthless",
"worthwhile",
"worthy",
"wrathful",
"wretched",
"writhing",
"wrong",
"wry",
"yawning",
"yearly",
"yellow",
"yellowish",
"young",
"youthful",
"yummy",
"zany",
"zealous",
"zesty",
"zigzag"
],
"verbs": [
"accept",
"ache",
"acknowledge",
"act",
"add",
"admire",
"admit",
"admonish",
"advise",
"adopt",
"affirm",
"afford",
"agree",
"ail",
"alert",
"allege",
"allow",
"allude",
"amuse",
"analyze",
"announce",
"annoy",
"answer",
"apologize",
"appeal",
"appear",
"applaud",
"appreciate",
"approve",
"argue",
"arrange",
"arrest",
"arrive",
"articulate",
"ask",
"assert",
"assure",
"attach",
"attack",
"attempt",
"attend",
"attract",
"auction",
"avoid",
"avow",
"awake",
"babble",
"back",
"bake",
"balance",
"balk",
"ban",
"bang",
"bandage",
"bar",
"bare",
"bargain",
"bark",
"barrage",
"barter",
"baste",
"bat",
"bathe",
"battle",
"bawl",
"be",
"beam",
"bear",
"beat",
"become",
"befriend",
"beg",
"begin",
"behave",
"believe",
"bellow",
"belong",
"bend",
"berate",
"besiege",
"bestow",
"bet",
"bid",
"bite",
"bleach",
"bleed",
"bless",
"blind",
"blink",
"blot",
"blow",
"blurt",
"blush",
"boast",
"bob",
"boil",
"bolt",
"bomb",
"book",
"bore",
"borrow",
"bounce",
"bow",
"box",
"brag",
"brake",
"branch",
"brand",
"break",
"breathe",
"breed",
"bring",
"broadcast",
"broil",
"bruise",
"brush",
"bubble",
"build",
"bump",
"burn",
"burnish",
"bury",
"buy",
"buzz",
"cajole",
"calculate",
"call",
"camp",
"care",
"carry",
"carve",
"cause",
"caution",
"catch",
"challenge",
"change",
"chant",
"charge",
"chase",
"cheat",
"check",
"cheer",
"chew",
"chide",
"chip",
"choke",
"chomp",
"choose",
"chop",
"claim",
"clap",
"clean",
"clear",
"climb",
"clip",
"close",
"coach",
"coil",
"collect",
"color",
"comb",
"come",
"comfort",
"command",
"comment",
"communicate",
"compare",
"compete",
"complain",
"complete",
"concede",
"concentrate",
"concern",
"conclude",
"concur",
"confess",
"confide",
"confirm",
"connect",
"consent",
"consider",
"consist",
"contain",
"contend",
"continue",
"cook",
"copy",
"correct",
"cost",
"cough",
"count",
"counter",
"cover",
"covet",
"crack",
"crash",
"crave",
"crawl",
"crochet",
"cross",
"criticize",
"croak",
"cross-examine",
"crowd",
"crush",
"cry",
"cure",
"curl",
"curse",
"curve",
"cut",
"cycle",
"dam",
"damage",
"dance",
"dare",
"deal",
"debate",
"decay",
"deceive",
"decide",
"decipher",
"declare",
"decorate",
"delay",
"delight",
"deliver",
"demand",
"deny",
"depend",
"describe",
"desert",
"deserve",
"desire",
"deter",
"develop",
"dial",
"dictate",
"die",
"dig",
"digress",
"direct",
"disclose",
"dislike",
"dive",
"divide",
"divorce",
"divulge",
"do",
"dock",
"dole",
"dote",
"double",
"doubt",
"drag",
"drain",
"draw",
"dream",
"dress",
"drip",
"drill",
"drink",
"drive",
"drone",
"drop",
"drown",
"dry",
"dupe",
"dump",
"dust",
"dye",
"earn",
"eat",
"echo",
"edit",
"educate",
"elope",
"embarrass",
"emigrate",
"emit",
"emphasize",
"employ",
"empty",
"enchant",
"encode",
"encourage",
"end",
"enjoin",
"enjoy",
"enter",
"entertain",
"enunciate",
"envy",
"equivocate",
"escape",
"evacuate",
"evaporate",
"exaggerate",
"examine",
"excite",
"exclaim",
"excuse",
"exercise",
"exist",
"expand",
"expect",
"expel",
"exhort",
"explain",
"explode",
"explore",
"extend",
"extoll",
"face",
"fade",
"fail",
"fall",
"falter",
"fasten",
"favor",
"fax",
"fear",
"feed",
"feel",
"fence",
"fetch",
"fight",
"file",
"fill",
"film",
"find",
"fire",
"fish",
"fit",
"fix",
"flap",
"flash",
"flee",
"float",
"flood",
"floss",
"flow",
"flower",
"fly",
"fold",
"follow",
"fool",
"force",
"foretell",
"forget",
"forgive",
"form",
"found",
"frame",
"freeze",
"fret",
"frighten",
"fry",
"fume",
"garden",
"gasp",
"gather",
"gaze",
"gel",
"get",
"gild",
"give",
"glide",
"glue",
"gnaw",
"go",
"grab",
"grate",
"grease",
"greet",
"grill",
"grin",
"grip",
"groan",
"grow",
"growl",
"grumble",
"grunt",
"guarantee",
"guard",
"guess",
"guide",
"gurgle",
"gush",
"hail",
"hammer",
"hand",
"handle",
"hang",
"happen",
"harass",
"harm",
"harness",
"hate",
"haunt",
"have",
"head",
"heal",
"heap",
"hear",
"heat",
"help",
"hide",
"highlight",
"hijack",
"hinder",
"hint",
"hiss",
"hit",
"hold",
"hook",
"hoot",
"hop",
"hope",
"hover",
"howl",
"hug",
"hum",
"hunt",
"hurry",
"hurt",
"ice",
"identify",
"ignore",
"imagine",
"immigrate",
"imply",
"implore",
"impress",
"improve",
"include",
"increase",
"infect",
"inflate",
"influence",
"inform",
"infuse",
"inject",
"injure",
"inquire",
"insist",
"inspect",
"inspire",
"instruct",
"intend",
"interest",
"interfere",
"interject",
"interrupt",
"introduce",
"invent",
"invest",
"invite",
"irritate",
"iron",
"itch",
"jab",
"jabber",
"jail",
"jam",
"jeer",
"jest",
"jog",
"join",
"joke",
"jolt",
"judge",
"juggle",
"jump",
"keep",
"kick",
"kill",
"kiss",
"kneel",
"knit",
"knock",
"knot",
"know",
"label",
"lament",
"land",
"last",
"laugh",
"lay",
"lead",
"lean",
"learn",
"leave",
"lecture",
"lend",
"let",
"level",
"license",
"lick",
"lie",
"lift",
"light",
"lighten",
"like",
"list",
"listen",
"live",
"load",
"loan",
"lock",
"long",
"look",
"loosen",
"lose",
"love",
"lower",
"mail",
"maintain",
"make",
"man",
"manage",
"mar",
"march",
"mark",
"marry",
"marvel",
"mate",
"matter",
"mean",
"measure",
"meet",
"melt",
"memorize",
"mend",
"mention",
"merge",
"milk",
"mine",
"miss",
"mix",
"moan",
"moor",
"mourn",
"molt",
"move",
"mow",
"mug",
"multiply",
"mumble",
"murder",
"mutter",
"nag",
"nail",
"name",
"nap",
"need",
"nest",
"nod",
"note",
"notice",
"number",
"obey",
"object",
"observe",
"obtain",
"occur",
"offend",
"offer",
"ogle",
"oil",
"omit",
"open",
"operate",
"order",
"overflow",
"overrun",
"owe",
"own",
"pack",
"pad",
"paddle",
"paint",
"pant",
"park",
"part",
"pass",
"paste",
"pat",
"pause",
"pay",
"peck",
"pedal",
"peel",
"peep",
"peer",
"peg",
"pelt",
"perform",
"permit",
"pester",
"pet",
"phone",
"pick",
"pinch",
"pine",
"place",
"plan",
"plant",
"play",
"plead",
"please",
"pledge",
"plow",
"plug",
"point",
"poke",
"polish",
"ponder",
"pop",
"possess",
"post",
"postulate",
"pour",
"practice",
"pray",
"preach",
"precede",
"predict",
"prefer",
"prepare",
"present",
"preserve",
"press",
"pretend",
"prevent",
"prick",
"print",
"proceed",
"proclaim",
"produce",
"profess",
"program",
"promise",
"propose",
"protect",
"protest",
"provide",
"pry",
"pull",
"pump",
"punch",
"puncture",
"punish",
"push",
"put",
"question",
"quilt",
"quit",
"quiz",
"quote",
"race",
"radiate",
"rain",
"raise",
"rant",
"rain",
"rate",
"rave",
"reach",
"realize",
"read",
"rebuff",
"recall",
"receive",
"recite",
"recognize",
"recommend",
"record",
"reduce",
"reflect",
"refuse",
"regret",
"reign",
"reiterate",
"reject",
"rejoice",
"relate",
"relax",
"release",
"rely",
"remain",
"remember",
"remind",
"remove",
"repair",
"repeat",
"replace",
"reply",
"report",
"reprimand",
"reproduce",
"request",
"rescue",
"retire",
"retort",
"return",
"reveal",
"reverse",
"rhyme",
"ride",
"ring",
"rinse",
"rise",
"risk",
"roar",
"rob",
"rock",
"roll",
"rot",
"row",
"rub",
"ruin",
"rule",
"run",
"rush",
"sack",
"sail",
"satisfy",
"save",
"savor",
"saw",
"say",
"scare",
"scatter",
"scoff",
"scold",
"scoot",
"scorch",
"scrape",
"scratch",
"scream",
"screech",
"screw",
"scribble",
"seal",
"search",
"see",
"sell",
"send",
"sense",
"separate",
"serve",
"set",
"settle",
"sever",
"sew",
"shade",
"shampoo",
"share",
"shave",
"shelter",
"shift",
"shiver",
"shock",
"shoot",
"shop",
"shout",
"show",
"shriek",
"shrug",
"shut",
"sigh",
"sign",
"signal",
"sin",
"sing",
"singe",
"sip",
"sit",
"skate",
"skateboard",
"sketch",
"ski",
"skip",
"slap",
"sleep",
"slice",
"slide",
"slip",
"slow",
"smash",
"smell",
"smile",
"smoke",
"snap",
"snarl",
"snatch",
"sneak",
"sneer",
"sneeze",
"snicker",
"sniff",
"snore",
"snort",
"snoop",
"snooze",
"snow",
"soak",
"sob",
"soothe",
"sound",
"sow",
"span",
"spare",
"spark",
"sparkle",
"speak",
"speculate",
"spell",
"spend",
"spill",
"spin",
"spoil",
"spot",
"spray",
"sprout",
"sputter",
"squash",
"squeeze",
"stab",
"stain",
"stammer",
"stamp",
"stand",
"star",
"stare",
"start",
"stash",
"state",
"stay",
"steer",
"step",
"stipulate",
"stir",
"stitch",
"stop",
"store",
"strap",
"storm",
"stow",
"strengthen",
"stress",
"stretch",
"strip",
"stroke",
"stuff",
"stutter",
"stray",
"strum",
"strut",
"stun",
"stunt",
"submerge",
"succeed",
"suffer",
"suggest",
"suit",
"supply",
"support",
"suppose",
"surmise",
"surprise",
"surround",
"suspect",
"suspend",
"sway",
"swear",
"swim",
"swing",
"switch",
"swoop",
"sympathize",
"talk",
"take",
"tame",
"tap",
"taste",
"taunt",
"teach",
"tear",
"tease",
"telephone",
"tell",
"tempt",
"terrify",
"test",
"testify",
"thank",
"thaw",
"theorize",
"think",
"threaten",
"throw",
"thunder",
"tick",
"tickle",
"tie",
"time",
"tip",
"tire",
"toast",
"toss",
"touch",
"tour",
"tow",
"trace",
"track",
"trade",
"train",
"translate",
"transport",
"trap",
"travel",
"treat",
"tremble",
"trick",
"trickle",
"trim",
"trip",
"trot",
"trouble",
"trust",
"trounce",
"try",
"tug",
"tumble",
"turn",
"twist",
"type",
"understand",
"undress",
"unfasten",
"unite",
"unlock",
"unpack",
"uphold",
"upset",
"upstage",
"urge",
"untie",
"use",
"usurp",
"utter",
"vacuum",
"value",
"vanish",
"vanquish",
"venture",
"visit",
"voice",
"volunteer",
"vote",
"vouch",
"wail",
"wait",
"wake",
"walk",
"wallow",
"wander",
"want",
"warm",
"warn",
"wash",
"waste",
"watch",
"water",
"wave",
"waver",
"wear",
"weave",
"wed",
"weigh",
"welcome",
"whimper",
"whine",
"whip",
"whirl",
"whisper",
"whistle",
"win",
"wink",
"wipe",
"wish",
"wobble",
"wonder",
"work",
"worry",
"wrap",
"wreck",
"wrestle",
"wriggle",
"write",
"writhe",
"x-ray",
"yawn",
"yell",
"yelp",
"yield",
"yodel",
"zip",
"zoom"
]
}
@staticmethod
def sentence(rng=random.Random()):
return rng.choice(Words.words['verbs']) + 'ing a ' + \
rng.choice(Words.words['adverbs']) + ' ' + \
rng.choice(Words.words['adjectives']) + ' ' + \
rng.choice(Words.words['nouns'])
|
989,921 | 4aab0f7ea5f48d28b25a456fb24a9830fa9a434f | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-03-27 09:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0019_auto_20180327_1614'),
]
operations = [
migrations.AlterField(
model_name='projectinformation',
name='deploy_option',
field=models.CharField(choices=[('Combo', 'Combo'), ('Individual', 'Individual')], default='Combo', max_length=16, verbose_name='NDB Deploy Option'),
),
]
|
989,922 | 7a940f3b1404b976dae70ab9c3bfb0ac4ab1147e | import numpy as np
import torch
from tqdm.auto import tqdm
import torch.optim as optim
from pathlib import Path
import utils
import os
from dataloader_cache_allitem import DataLoaderTrain, DataLoaderTest
from infer_embedding import news_feature, infer_news_embedding
from preprocess import read_news_bert, get_doc_input_bert
from train_preprocess import read_news_bert_nopadding
from ddp_model import ModelBert
from parameters import parse_args
from tnlrv3.modeling import TuringNLRv3ForSequenceClassification
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
from tnlrv3.tokenization_tnlrv3 import TuringNLRv3Tokenizer
from torch.multiprocessing import Barrier,Lock
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import time
import logging
import math
import random
MODEL_CLASSES = {
'tnlrv3': (TuringNLRv3Config, TuringNLRv3ForSequenceClassification, TuringNLRv3Tokenizer),
'bert': (None,None,None)
}
def setup(rank, world_size):
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size,)
torch.cuda.set_device(rank)
# Explicitly setting seed
torch.manual_seed(42)
def cleanup():
dist.destroy_process_group()
def load_bert(args):
assert args.bert_model in ("tnlrv3","bert")
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.bert_model]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
output_hidden_states = True)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case)
bert_model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config)
return bert_model,tokenizer
def warmup_linear(args,step):
if step <= args.warmup_step:
return step/args.warmup_step
return max(1e-4,(args.schedule_step-step)/(args.schedule_step-args.warmup_step))
def train(local_rank,args,cache,news_idx_incache,prefetch_step):
'''
shared memory:
cache: (array) global_cache
news_idx_incache: (dict) {news_id:index in cache}
prefetch_step: (list) the step of data generation, for sync of dataloader
'''
utils.setuplogging()
os.environ["RANK"] = str(local_rank)
setup(local_rank, args.world_size)
device = torch.device("cuda", local_rank)
cache = cache[0]
logging.info('loading model: {}'.format(args.bert_model))
bert_model,tokenizer = load_bert(args)
if args.freeze_bert:
logging.info('!!! Freeze the parameters of {}'.format(args.bert_model))
for param in bert_model.parameters():
param.requires_grad = False
# choose which block trainabel
for index, layer in enumerate(bert_model.bert.encoder.layer):
if index in args.finetune_blocks:
# logging.info(f"finetune {index} block")
for param in layer.parameters():
param.requires_grad = True
else:
logging.info('!!!Not freeze the parameters of {}'.format(args.bert_model))
news_combined, news_length, category_dict, domain_dict, subcategory_dict = read_news_bert_nopadding(
os.path.join(args.root_data_dir,f'docs.tsv'),
args, tokenizer )
logging.info('-----------news_num:{}-----------'.format(len(news_combined)))
assert args.cache_num >= len(news_length)
if local_rank == 0:
idx = 0
for news in news_length.keys():
news_idx_incache[news] = [idx,-args.max_step_in_cache]
idx += 1
dist.barrier()
model = ModelBert(args, bert_model, device, len(category_dict), len(domain_dict), len(subcategory_dict))
model = model.to(device)
if args.world_size > 1:
ddp_model = DDP(model,device_ids=[local_rank],output_device=local_rank,find_unused_parameters=True)
else:
ddp_model = model
lr_scaler = args.world_size
if args.warmup_lr:
rest_param = filter(lambda x: id(x) not in list(map(id, bert_model.parameters())), ddp_model.parameters())
optimizer = optim.Adam([
{'params': bert_model.parameters(), 'lr': args.pretrain_lr*warmup_linear(args,1)},
{'params': rest_param, 'lr': args.lr*warmup_linear(args,1)}])
else:
rest_param = filter(lambda x: id(x) not in list(map(id, bert_model.parameters())), ddp_model.parameters())
optimizer = optim.Adam([
{'params': bert_model.parameters(), 'lr': args.pretrain_lr * lr_scaler},
{'params': rest_param, 'lr': args.lr * lr_scaler}])
dataloader = DataLoaderTrain(
data_dir=os.path.join(args.root_data_dir,
f'autoregressive'),
args=args,
local_rank=local_rank,
cache=cache,
news_idx_incache=news_idx_incache,
prefetch_step=prefetch_step,
world_size=args.world_size,
worker_rank=local_rank,
cuda_device_idx=local_rank,
news_combined=news_combined,
news_length=news_length,
enable_prefetch=True,
enable_shuffle=True,
enable_stream_queue=True,
enable_gpu=args.enable_gpu,
)
logging.info('Training...')
for ep in range(args.epochs):
hit_ratio = 0
ht_num = 0
hit_num = 0
all_num = 0
loss = 0.0
start_time = time.time()
usernum = 0
for cnt, batch in tqdm(enumerate(dataloader)):
if cnt > args.max_steps_per_epoch:
break
title_length, address_cache, update_cache, batch = batch
usernum += batch[3].shape[0]
if args.enable_gpu:
batch_news_feature, batch_hist, batch_mask, batch_negs = [
x.cuda(non_blocking=True) for x in batch]
else:
batch_news_feature, batch_hist, batch_mask, batch_negs = batch
if address_cache is not None:
cache_vec = torch.FloatTensor(cache[address_cache]).cuda(non_blocking=True)
hit_ratio += cache_vec.size(0) / (batch_news_feature.size(0) + cache_vec.size(0))
ht_num += 1
hit_num += cache_vec.size(0)
all_num += (batch_news_feature.size(0) + cache_vec.size(0))
else:
cache_vec = None
bz_loss,encode_vecs = ddp_model(batch_news_feature, cache_vec, batch_hist, batch_mask, batch_negs,title_length)
loss += bz_loss.data.float()
optimizer.zero_grad()
bz_loss.backward()
optimizer.step()
if args.drop_encoder_ratio > 0:
encode_vecs = encode_vecs.detach().cpu().numpy()
cache[update_cache] = encode_vecs
if args.warmup_lr:
optimizer.param_groups[0]['lr'] = args.pretrain_lr*warmup_linear(args,cnt+1) #* lr_scaler
optimizer.param_groups[1]['lr'] = args.lr*warmup_linear(args,cnt+1) #* lr_scaler
if cnt % 500 == 0:
logging.info(
'learning_rate:{},{}'.format(args.pretrain_lr*warmup_linear(args,cnt+1), args.lr*warmup_linear(args,cnt+1)))
else:
if cnt == 25000:
for param_group in optimizer.param_groups:
param_group['lr'] = 1e-5 * lr_scaler
logging.info(f"change lr rate {1e-5 * lr_scaler}")
if cnt % 100 == 0:
logging.info(
'[{}] cost_time:{} step:{}, usernum: {}, train_loss: {:.5f}'.format(
local_rank, time.time()-start_time, cnt, usernum, loss.data / (cnt+1)))
if hit_num > 0:
logging.info(
'[{}] step:{}, avarage hit ratio:{}'.format(
local_rank, cnt, hit_ratio / ht_num))
logging.info(
'[{}] step:{}, all hit ratio:{}'.format(
local_rank, cnt, hit_num / all_num))
# save model minibatch
if local_rank == 0 and (cnt+1) % args.save_steps == 0:
ckpt_path = os.path.join(args.model_dir, f'{args.savename}-epoch-{ep + 1}-{cnt}.pt')
torch.save(
{
'model_state_dict': model.state_dict(),
'category_dict': category_dict,
'domain_dict': domain_dict,
'subcategory_dict': subcategory_dict
}, ckpt_path)
logging.info(f"Model saved to {ckpt_path}")
dist.barrier()
loss /= (cnt+1)
logging.info('epoch:{}, loss:{},usernum:{}, time:{}'.format(ep+1, loss, usernum,time.time()-start_time))
# save model last of epoch
if local_rank == 0:
ckpt_path = os.path.join(args.model_dir, '{}-epoch-{}.pt'.format(args.savename,ep+1))
torch.save(
{
'model_state_dict': model.state_dict(),
'category_dict': category_dict,
'domain_dict': domain_dict,
'subcategory_dict': subcategory_dict
}, ckpt_path)
logging.info(f"Model saved to {ckpt_path}")
logging.info("time:{}".format(time.time()-start_time))
dataloader.join()
cleanup()
def test(local_rank,args):
setup(local_rank, args.world_size)
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
assert ckpt_path is not None, 'No ckpt found'
checkpoint = torch.load(ckpt_path)
subcategory_dict = checkpoint['subcategory_dict']
category_dict = checkpoint['category_dict']
domain_dict = checkpoint['domain_dict']
device = torch.device("cuda", local_rank)
# load model
bert_model,tokenizer = load_bert(args)
model = ModelBert(args, bert_model, device, len(category_dict), len(domain_dict), len(subcategory_dict))
model = model.to(device)
if args.world_size > 1:
ddp_model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
else:
ddp_model = model
map_location = {'cuda:%d' % 0: 'cuda:%d' % local_rank}
ddp_model.load_state_dict(checkpoint['model_state_dict'],map_location=map_location)
logging.info(f"Model loaded from {ckpt_path}")
model.eval()
torch.set_grad_enabled(False)
news, news_index = read_news_bert(
os.path.join(args.root_data_dir,
f'test_files/docs.tsv'),
args,
tokenizer,
mode='test'
)
news_combined = news_feature(args, news, news_index, category_dict, domain_dict, subcategory_dict)
news_scoring = infer_news_embedding(args,news_combined,model)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
dataloader = DataLoaderTest(
news_index=news_index,
news_scoring=news_scoring,
news_bias_scoring=None,
data_dir=os.path.join(args.root_data_dir,
f'test_files'),
filename_pat=args.filename_pat,
args=args,
world_size= args.world_size,
worker_rank=local_rank,
cuda_device_idx=local_rank,
enable_prefetch=True,
enable_shuffle=True,
enable_gpu=args.enable_gpu,
)
from metrics import roc_auc_score, ndcg_score, mrr_score, ctr_score
AUC = [[], []]
MRR = [[], []]
nDCG5 = [[], []]
nDCG10 = [[], []]
CTR1 = [[], []]
CTR3 = [[], []]
CTR5 = [[], []]
CTR10 = [[], []]
def print_metrics(hvd_local_rank, cnt, x):
logging.info("[{}] Ed: {}: {}".format(hvd_local_rank, cnt, \
'\t'.join(["{:0.2f}".format(i * 100) for i in x])))
def get_mean(arr):
return [np.array(i).mean() for i in arr]
# for cnt, (log_vecs, log_mask, news_vecs, news_bias, labels) in enumerate(dataloader):
for cnt, (log_vecs, log_mask, news_vecs, labels) in tqdm(enumerate(dataloader)):
his_lens = torch.sum(log_mask, dim=-1).to(torch.device("cpu")).detach().numpy()
if args.enable_gpu:
log_vecs = log_vecs.cuda(non_blocking=True)
log_mask = log_mask.cuda(non_blocking=True)
user_vecs = model.user_encoder.infer_user_vec(log_vecs, log_mask).to(torch.device("cpu")).detach().numpy()
for index, user_vec, news_vec, label, his_len in zip(
range(len(labels)), user_vecs, news_vecs, labels, his_lens):
if label.mean() == 0 or label.mean() == 1:
continue
score = np.dot(
news_vec, user_vec
)
auc = roc_auc_score(label, score)
mrr = mrr_score(label, score)
ndcg5 = ndcg_score(label, score, k=5)
ndcg10 = ndcg_score(label, score, k=10)
ctr1 = ctr_score(label, score, k=1)
ctr3 = ctr_score(label, score, k=3)
ctr5 = ctr_score(label, score, k=5)
ctr10 = ctr_score(label, score, k=10)
AUC[0].append(auc)
MRR[0].append(mrr)
nDCG5[0].append(ndcg5)
nDCG10[0].append(ndcg10)
CTR1[0].append(ctr1)
CTR3[0].append(ctr3)
CTR5[0].append(ctr5)
CTR10[0].append(ctr10)
if his_len <= 5:
AUC[1].append(auc)
MRR[1].append(mrr)
nDCG5[1].append(ndcg5)
nDCG10[1].append(ndcg10)
CTR1[1].append(ctr1)
CTR3[1].append(ctr3)
CTR5[1].append(ctr5)
CTR10[1].append(ctr10)
if cnt == 0:
for i in range(2):
print_metrics(hvd_rank, 0,
get_mean([AUC[i], MRR[i], nDCG5[i], nDCG10[i], CTR1[i], CTR3[i], CTR5[i], CTR10[i]]))
if (cnt + 1) % args.log_steps == 0:
for i in range(2):
print_metrics(hvd_rank, (cnt + 1) * args.batch_size, get_mean([AUC[i], MRR[i], nDCG5[i], \
nDCG10[i], CTR1[i], CTR3[i], CTR5[i],
CTR10[i]]))
dataloader.join()
for i in range(2):
print_metrics(hvd_rank, (cnt + 1) * args.batch_size, get_mean([AUC[i], MRR[i], nDCG5[i], \
nDCG10[i], CTR1[i], CTR3[i], CTR5[i],
CTR10[i]]))
if __name__ == "__main__":
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
args = parse_args()
args.world_size = 4
# args.root_data_dir= '/data/t-shxiao/rec/data/'
# args.config_name= '/data/t-shxiao/adalm16/data/model/unilm2-base-uncased-config.json'
# args.tokenizer_name='/data/t-shxiao/adalm16/data/bert-pretrained-cache/vocab.txt'
# args.model_name_or_path='/data/t-shxiao/adalm16/data/finetune_model/model_3200.bin'
# args.batch_size = 10000
# args.cache_num = 910000
# args.drop_encoder_ratio = 1
if 'train' in args.mode:
print('-----------trian------------')
if args.world_size > 1:
# synchronizer = Barrier(args.world_size)
# serializer = Lock()
cache = np.zeros((args.cache_num,args.news_dim))
global_cache = mp.Manager().list([cache])
news_idx_incache = mp.Manager().dict()
global_prefetch_step = mp.Manager().list([0]*args.world_size)
mp.spawn(train,
args = (args,global_cache,news_idx_incache,global_prefetch_step),
nprocs=args.world_size,
join=True)
else:
cache = [np.zeros((args.cache_num, args.news_dim))]#[torch.zeros(args.cache_num, args.news_dim, requires_grad=False) for x in range(args.world_size)]
news_idx_incache = {}
prefetch_step = [0]
train(0,args,cache,news_idx_incache,prefetch_step)
if 'test' in args.mode:
print('-----------test------------')
if args.world_size > 1:
mp.spawn(test,
args = (args,),
nprocs=args.world_size,
join=True)
else:
test(0,args)
|
989,923 | 2fff0ca5417b2c52037dac5abee7849a65d9469c | import turtle as t
import random
tort = t.Turtle()
t.colormode(255)
tort.shape("turtle")
def random_color():
red = random.randint(0, 255)
green = random.randint(0, 255)
blue = random.randint(0, 255)
random_colour = (red, green, blue)
return random_colour
tort.speed('fastest')
def draw_spirograph(size_of_graph):
for _ in range(int(360 / size_of_graph)):
tort.color(random_color())
tort.circle(100)
tort.setheading(tort.heading() + 10)
draw_spirograph(10)
screen = t.Screen()
screen.exitonclick()
|
989,924 | 7d81651e37330abf932263d3ba4f5f4544d3f355 | import random, time, sys
#import numpy as np
#from pygame.locals import *
import pyjsdl as pygame
#import pygame
from textdata import txtFile
#pyjsdl.display.setup(runGame)
WINDOWWIDTH = 500
WINDOWHEIGHT = 700
boardArrHeight = 200
boardArr = []
posX = 5 #chracter position
posY = 0
direction = "down"
score = 0
stomachSpace = 100
numLives = 0
collapseGroupHolder = [[0]*10]*boardArrHeight
def initBoardArr():
global boardArr,txtFile,boardArrHeight
#with open('boardArr.txt') as f:
# for line in f:
# inner_list = line.rstrip('\n').split(' ')
# innerListInts = [int(numeric_string) for numeric_string in inner_list]
# boardArr.append(innerListInts)
count = 0
innerList = []
txtFile = txtFile.rstrip()
for row in txtFile:
if not row.isdigit():
continue
elif count <10:
innerList.append(int(row))
count += 1
else:
boardArr.append(innerList)
count = 0
innerList=[]
#innerListInts = [int(numeric_string) for numeric_string in inner_list]
#boardArr.append(innerListInts)
boardArrHeight = len(boardArr)
print boardArr
print "** ", boardArr[0]
print "**** ", boardArr[0][0]
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
pygame.init()
print "in main"
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
DISPLAYSURF.fill((255,255,255))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 100)
pygame.display.set_caption('Cotton Candy')
initBoardArr()
renderScreen()
while True: # game loop
runGame()
showTextScreen('Game Over')
def runGame():
global posX, posY,direction
while True:
collapse() #may be superfluous
for event in pygame.event.get(): # event handling loop
if event.type == KEYDOWN:
if (event.key == K_LEFT or event.key == K_a):
print "left pressed"
if(direction == "left"):
move(direction)
direction = "left"
elif (event.key == K_RIGHT or event.key == K_d) :
print "right pressed"
if(direction == "right"):
move(direction)
direction = "right"
elif (event.key == K_UP or event.key == K_w):
print "up pressed"
direction == "up"
elif (event.key == K_DOWN or event.key == K_s):
print "down pressed"
if(direction == "down"):
move(direction)
direction = "down"
elif event.key == K_SPACE: #eat
print "eating direction is ", direction
eat(direction)
renderScreen()
def canEat(goal):
global boardArr
if goal[0] < 0 or goal[1] < 0 or goal[1] >=10:
print "cant eat because off screen"
return False
row = goal[0]
col = goal[1]
if boardArr[row][col] == 0:
print "cant eat because empty there: " ,direction
return False
else:
return True
#return true or false based on if can eat
def eat(direction):
global posX,posY,boardArr,stomachSpace, score
goal = (0,0)
if direction == "right":
goal = (posY,posX+1)
elif direction == "left":
goal = (posY,posX-1)
elif direction == "down":
goal = (posY+1,posX)
#say where the goal to eat is
if canEat(goal):
stomachSpace -= 1
row = goal[0]
col = goal[1]
getEatGroup(goal,boardArr[row][col])
#boardArr[row,col] = 0
#other eating things
collapse()
renderScreen()
def getEatGroup(goal,tileType):
global boardArr, score
row = goal[0]
col = goal[1]
print "tile type is: ",boardArr[row][col]
#if boardArr[row][col] == tileType:
boardArr[row][col] = 0
score += 1
if col+1 < 10 and boardArr[row][col+1] == tileType:
getEatGroup((row,col+1),tileType)
if col-1 >= 0 and boardArr[row][col-1] == tileType:
getEatGroup((row,col-1),tileType)
if boardArr[row+1][col] == tileType:
getEatGroup((row+1,col),tileType)
if row-1 >= 0 and boardArr[row-1][col] == tileType:
getEatGroup((row-1,col),tileType)
def collapsable(row,col,num):
global boardArr
sameType = boardArr[row][col]
if sameType == 0:
return False
if col+1 <10 and (boardArr[row][col+1] == sameType or num>0): #right one
if not collapsable(row,col+1,num+1):
return False
if row-1>=0 and (boardArr[row-1][col] == sameType or num>0):
if not collapsable(row-1,col,num+1):
return False
return True
def getCollapseGroup(row,col,tileType): #new group collapsing methods is ???
global boardArr, score, collapseGroupHolder
group = []
done = 0
if collapseGroupHolder[row][col] == 0:
group.append((row,col))
collapseGroupHolder[row][col] = 1
if col+1 < 10 and boardArr[row][col+1] == tileType and collapseGroupHolder[row][col+1] == 0:
group.extend(getCollapseGroup(row,col+1,tileType))
if col-1 >= 0 and boardArr[row][col-1] == tileType and collapseGroupHolder[row][col-1] == 0:
group.extend(getCollapseGroup(row,col-1,tileType))
if row+1 < boardArrHeight and boardArr[row+1][col] == tileType and collapseGroupHolder[row+1][col] == 0:
group.extend(getCollapseGroup(row+1,col,tileType))
if row-1 >= 0 and boardArr[row-1][col] == tileType and collapseGroupHolder[row-1][col] == 0:
group.extend(getCollapseGroup(row-1,col,tileType))
#if group != []:
#print group, " this is group"
return group
def canCollapse(group):
global boardArr
can = True
for i in group:
if (i[0]+1,i[1]) not in group and i[0]+1 < boardArrHeight and boardArr[i[0]+1][i[1]] != 0:
can = False
#if group != [] and can:
#print "helloooo"
return can
def collapseGroup(group):
global boardArr
for i in group:
if i[0]+1 < boardArrHeight:
boardArr[i[0]+1][i[1]] = boardArr[i[0]][i[1]]
boardArr[i[0]][i[1]] = 0
def collapse():
global posX,posY,boardArr, collapseGroupHolder
for i in xrange(0,boardArrHeight-1): #can make faster by starting dif pos
for j in xrange(0,10):
startPos = 300-posY*50
if startPos+i*50 > 1000:
break
collapseGroupHolder = [[0]*10]*boardArrHeight
group = getCollapseGroup(i,j,boardArr[i][j])
if canCollapse(group):
collapseGroup(group)
#if not group == []:
#print group
collapseCharacter()
def collapseCharacter():
global posX,posY,boardArr
while (boardArr[posY+1][posX] == 0):
posY += 1
def canMove(goal):
if goal[0] < 0 or goal[1] < 0 or goal[1] >=10:
print "cant move because off screen y: ",goal[0]," x: ",goal[1]
return False
return not(canEat(goal))
def move(direction):
global posX, posY,boardArr
goal = (0,0)
if direction == "right":
goal = (posY,posX+1)
elif direction == "left":
goal = (posY,posX-1)
elif direction == "down":
goal = (posY+1,posX)
if canMove(goal):
if direction == "right":
posX += 1
elif direction == "left":
posX = posX -1
elif direction == "down":
posY += 1
elif canMove((goal[0]-1,goal[1])):
if direction == "right":
posX += 1
elif direction == "left":
posX -= 1
posY -= 1
elif canMove((goal[0]+1,goal[1])):
if direction == "right":
posX += 1
elif direction == "left":
posX -= 1
posY += 1
#if moving downwards move the screen downwards
collapse()
renderScreen()
def renderScoreAndStomach():
if pygame.font:
font = pygame.font.Font(None, 36)
scoreText = "Score: %d Belly Space: %d" %(score,stomachSpace)
text = font.render(scoreText, 1, (0, 0, 0))
textpos = text.get_rect(centerx=DISPLAYSURF.get_width()/2)
DISPLAYSURF.blit(text, textpos)
else:
print "fonts disabled"
def renderCharacter():
global posX,posY,boardArr, DISPLAYSURF,direction
#myimage = pygame.image.load("rightCharacter.png")
#imagerect = myimage.get_rect()
vertices = [(posX*50,300),(posX*50+50,300),(posX*50+25,300+50)]
if direction == "right":
vertices = [(posX*50,300),(posX*50+50,300+25),(posX*50,300+50)]
elif direction == "left":
vertices = [(posX*50+50,300),(posX*50+50,300+50),(posX*50,300+25)]
elif direction == "down":
vertices = [(posX*50,300),(posX*50+50,300),(posX*50+25,300+50)]
#myimage = pygame.image.load("rightCharacter.png")
#imagerect = myimage.get_rect()
pygame.draw.polygon(DISPLAYSURF,(190,78,89), vertices, 0)
print posY, ", ", posX
def renderScreen():
global posX,posY,boardArr, boardArrHeight, DISPLAYSURF
PINK = (255,0,128)
PURPLE = (147,112,219)
BLUE = (135,216,250)
WHITE = (255,255,255)
color = WHITE
DISPLAYSURF.fill((255,255,255))
for i in xrange(0,boardArrHeight):
for j in xrange(0,10):
tileType = boardArr[i][j]
#print "tile type is ", tileType
if tileType == 1 or tileType == "1":
print "PINK"
color = PINK
elif tileType == 2:
print "PURPLE"
color = PURPLE
elif tileType == 3:
print "BLUE"
color = BLUE
elif tileType == 4 or tileType == 5:
color = (0,0,0)
else:
color = WHITE
startPos = 300-posY*50
if startPos+i*50 > 1000:
break
pygame.draw.rect(DISPLAYSURF,color,pygame.Rect(j*50,startPos+i*50,50,50),0)
pygame.draw.rect(DISPLAYSURF,WHITE,pygame.Rect(j*50,startPos+i*50,50,50),5)
#print "<3<3"
renderScoreAndStomach()
renderCharacter()
pygame.display.flip()
#draw the screen where it should be
def die():
print "collapsed on character" #what happens when you die
if __name__ == '__main__':
main()
|
989,925 | 28bdbc25d691bed614bf2d9e8256dd342de60c9e | from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader
from django.template import RequestContext
from django.contrib.sites.models import Site
from apps.captcha.fields import CaptchaField
from django.utils.translation import ugettext_lazy as _
attrs_dict = { 'class': 'required' }
class ContactForm(forms.Form):
name = forms.CharField(max_length=100, widget=forms.TextInput(attrs=attrs_dict), label=_(u'Nombre(s) y Apellido(s)'))
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict, maxlength=200)), label=_(u'Email'))
telephone = forms.CharField(max_length=100, widget=forms.TextInput(attrs=attrs_dict), label=_(u'Teléfono'))
body = forms.CharField(widget=forms.Textarea(attrs=attrs_dict), label=_(u'Mensaje'))
captcha = CaptchaField(label=_(u'Seguridad'))
from_email = settings.DEFAULT_FROM_EMAIL
recipient_list = [mail_tuple[1] for mail_tuple in settings.MANAGERS]
subject_template_name = "djcontact/contact_form_subject.txt"
template_name = 'djcontact/contact_form.txt'
def __init__(self, data=None, files=None, request=None, *args, **kwargs):
if request is None:
raise TypeError("Keyword argument 'request' must be supplied")
super(ContactForm, self).__init__(data=data, files=files, *args, **kwargs)
self.request = request
def message(self):
"""
Render the body of the message to a string.
"""
if callable(self.template_name):
template_name = self.template_name()
else:
template_name = self.template_name
return loader.render_to_string(template_name,
self.get_context())
def subject(self):
"""
Render the subject of the message to a string.
"""
subject = loader.render_to_string(self.subject_template_name,
self.get_context())
return ''.join(subject.splitlines())
def get_context(self):
"""
Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``).
"""
if not self.is_valid():
raise ValueError("Cannot generate Context from invalid contact form")
return RequestContext(self.request,
dict(self.cleaned_data,
site=Site.objects.get_current()))
def get_message_dict(self):
"""
Generate the various parts of the message and return them in a
dictionary, suitable for passing directly as keyword arguments
to ``django.core.mail.send_mail()``.
By default, the following values are returned:
* ``from_email``
* ``message``
* ``recipient_list``
* ``subject``
"""
if not self.is_valid():
raise ValueError("Message cannot be sent from invalid contact form")
message_dict = {}
for message_part in ('from_email', 'message', 'recipient_list', 'subject'):
attr = getattr(self, message_part)
message_dict[message_part] = callable(attr) and attr() or attr
return message_dict
def save(self, fail_silently=False):
"""
Build and send the email message.
"""
send_mail(fail_silently=fail_silently, **self.get_message_dict())
|
989,926 | 5cd3d855f4b90bda54964365387e39186c00844a | """
Taken from and somewhat expanded upon:
https://towardsdatascience.com/streamlit-101-an-in-depth-introduction-fc8aad9492f2
The examples in the article don't jibe with the actual output in a number of cases.
I've added some of my own workarounds, but the original code is here
https://github.com/shaildeliwala/experiments/blob/master/streamlit.py
"""
import pandas as pd
import streamlit as st
import plotly.express as px
import pydeck as pdk
# drop error from pyplot global warning
st.set_option('deprecation.showPyplotGlobalUse', False)
@st.cache
def get_data():
url = "http://data.insideairbnb.com/united-states/ny/new-york-city/2019-09-12/visualisations/listings.csv"
return pd.read_csv(url)
df = get_data()
st.title("Streamlit 101 - st.title example")
st.markdown("updated with code where the examples had none. Some updates and workarounds also")
st.markdown("thx to https://towardsdatascience.com/streamlit-101-an-in-depth-introduction-fc8aad9492f2")
st.markdown("The examples in the article don't jibe with the actual output in a number of cases.I've added some of my own workarounds, but the original code is here.")
st.markdown("https://github.com/shaildeliwala/experiments/blob/master/streamlit.py")
st.header("This is a header")
st.markdown("## this is an h2")
st.markdown("> Markdown testing *italic* **bold test**")
st.subheader("Exploratory Data Analysis")
st.dataframe(df.head())
st.subheader("Specifying Code Blocks")
st.code("""
@st.cache
def get_data():
url = ""http://data.insideairbnb.com/united-states/ny/new-york-city/2019-09-12/visualisations/listings.csv""
return pd.read_csv(url)
""", language="python")
# st.map displays locations on a map without having to write a single line of boilerplate code to prepare a map object. The only requirement is that the dataframe must contain columns named lat/latitude or lon/longitude.
st.subheader("mapping")
st.markdown("showing the entire map with just 'data=df'")
st.map(data=df)
df['price'] = df['price'].astype(int)
st.markdown("show only prices above $800")
st.map(data=df[df['price'] > 800])
st.header('show 3d map with stacking')
# The deck_gl_chart widget is deprecated and will be removed on 2020-05-01. To render a map, you should use st.pydeck_chart widget.
# st.pydeck_chart uses a different format
st.deck_gl_chart(
viewport={
'latitude': 40.7477,
'longitude': -73.9750,
'zoom': 11,
'pitch': 50,
},
layers=[{
'type': 'HexagonLayer',
'data': df,
'radius': 200,
'elevationScale': 4,
'elevationRange': [0, 1000],
'pickable': True,
'extruded': True,
}, {
'type': 'ScatterplotLayer',
'data': df,
}])
# the new way to do that
# st.pydeck_chart()
st.header('subselecting columns')
cols = ["name", "host_name", "neighbourhood", "room_type", "price"]
st_ms = st.multiselect("Columns", df.columns.tolist(), default=cols)
st.dataframe(df[st_ms].head(10))
st.subheader('average price by room type')
st.table(df.groupby("room_type").price.mean().reset_index()\
.round(2).sort_values("price", ascending=False)\
.assign(avg_price=lambda x: x.pop("price").apply(lambda y: "%.2f" % y)))
st.subheader('which hosts have most properties listed - displaying json')
st.json([{"id":3647,"name":"THE VILLAGE OF HARLEM....NEW YORK !","host_id":4632,"host_name":"Elisabeth","neighbourhood_group":"Manhattan","neighbourhood":"Harlem","latitude":40.80902,"longitude":-73.9419,"room_type":"Private room","price":150,"minimum_nights":3,"number_of_reviews":0,"last_review":1,"reviews_per_month":1,"calculated_host_listings_count":1,"availability_365":365},{"id":3831,"name":"Cozy Entire Floor of Brownstone","host_id":4869,"host_name":"LisaRoxanne","neighbourhood_group":"Brooklyn","neighbourhood":"Clinton Hill","latitude":40.68514,"longitude":-73.95976,"room_type":"Entire home\\/apt","price":89,"minimum_nights":1,"number_of_reviews":279,"last_review":"2019-08-29","reviews_per_month":4.62,"calculated_host_listings_count":1,"availability_365":192}])
st.header("side bar and price range slider")
values = st.sidebar.slider(label="Price Range", min_value=float(df.price.min()), max_value=1000., value=(50., 300.))
f = px.histogram(df.query(f"price.between{values}"), x="price", nbins=15, title="Price distribution")
f.update_xaxes(title="Price")
f.update_yaxes(title="No. of listings")
st.plotly_chart(f)
st.header('using radio buttons and checkboxes')
# this does not produce a df output showing the changes, as in the example
st.write("Using a radio button restricts selection to only one option at a time.")
neighborhood = st.radio("Neighborhood", df.neighbourhood_group.unique())
show_exp = st.checkbox("Include expensive listings")
show_exp = " and price<200" if not show_exp else ""
@st.cache
def get_availability(show_exp, neighborhood):
return df.query(f"""neighbourhood_group==@neighborhood{show_exp}\
and availability_365>0""").availability_365.describe(\
percentiles=[.1, .25, .5, .75, .9, .99]).to_frame().T
st.table(get_availability(show_exp, neighborhood))
st.header('pyplot - average availability')
# this has a warning -
# PyplotGlobalUseWarning: You are calling st.pyplot() without any arguments. After December 1st, 2020, we will remove the ability to do this as it requires the use of Matplotlib's global figure object, which is not thread-safe.
# To future-proof this code, you should pass in a figure as shown below:
# >>> fig, ax = plt.subplots()
# >>> ax.scatter([1, 2, 3], [1, 2, 3])
# >>> ... other plotting actions ...
# >>> st.pyplot(fig)
df.query("availability_365>0")\
.groupby("neighbourhood_group")\
.availability_365.mean()\
.plot.bar(rot=0)\
.set(title="Average availability by neighborhood group",\
xlabel="Neighborhood group", ylabel="Avg. availability (in no. of days)")
st.pyplot()
st.header('show number of reviews - sidebar number input')
# this is borked over value variables.
# Magics
# Notice how in the Number of Reviews section above, we wrote df.query("@minimum<=number_of_reviews<=@maximum")on its own line without wrapping it in a call to st.dataframe. This still rendered a dataframe because Streamlit detects a variable or literal on its own line and uses st.write to render it.
minimum = st.sidebar.number_input("Minimum", min_value=0, value=0)
maximum = st.sidebar.number_input("Maximum", min_value=0, value=5)
st.write('min and max are:', minimum, maximum)
if minimum > maximum:
st.error("Please enter a valid range")
else:
# df.query("@minimum<=number_of_reviews<=@maximum")
df.query("@minimum<=number_of_reviews<=@maximum").sort_values("number_of_reviews", ascending=False)\
.head(50)[["name", "number_of_reviews", "neighbourhood", "host_name", "room_type", "price"]]
st.markdown('completed min and max section')
st.header('images and dropdowns')
pics = {
"Cat": "https://cdn.pixabay.com/photo/2016/09/24/22/20/cat-1692702_960_720.jpg",
"Puppy": "https://cdn.pixabay.com/photo/2019/03/15/19/19/puppy-4057786_960_720.jpg",
"Sci-fi city": "https://storage.needpix.com/rsynced_images/science-fiction-2971848_1280.jpg"
}
pic = st.selectbox("Picture choices", list(pics.keys()), 0)
st.image(pics[pic], use_column_width=True, caption=pics[pic])
st.markdown("## finished - showing balloons feature")
st.write("Click the button for balloons!")
btn = st.button("Celebrate!")
if btn:
st.balloons() |
989,927 | 374de701f733b4b76fe5df5b5e0aa090b93650bd | """Helper module to compute for the Shapley values of each feature
Authors: Leodegario Lorenzo II, Alva Presbitero
Date: 13 October 2021
"""
from math import factorial
def compute_shapley_value(model_outcomes, feature_name):
"""Return the Shapley value of the feature given model outcomes
dictionary
Parameters
----------
model_outcomes : dict of float
Dictionary contining the model outcomes/prediction for the given
feature value. Keys of the dictionary are tuples containing the
feature subset $S$, while the values are the model outcomes.
feature_name : str
Name of the feature whose Shapley value is to be computed
Returns
-------
shapley_value : float
The computed Shapley value of the feature
"""
# Get number of features
subsets_list = model_outcomes.keys()
n = max([len(subset) for subset in subsets_list])
# Get all subset that contains the feature, this is the domain of set $S$
S_domain = [subset for subset in subsets_list if feature_name in subset]
# Initialize results container
shapley_value = 0
# Iterate through all possible values of $S$, i.e., those that contain the
# feature $i$
for S in S_domain:
# Compute for the length of set $S$
s = len(S)
# Get the subset that don't contain i
S_without_i = tuple(sorted(set(S) - set((feature_name,))))
# Compute for the weighted marginal contribution.
# Note that this has two components:
# 1. Probability of i joining to form S, P(S)
P_of_S = factorial(s - 1) * factorial(n - s) / factorial(n)
# 2. Marginal contribution of i as it joins S, MC_{i, S}
marginal_contribution = model_outcomes[S] - model_outcomes[S_without_i]
# We then add the weighted marginal contribution to the feature's
# Shapley value
shapley_value += P_of_S * marginal_contribution
return shapley_value
def get_shapley_values(model_outcomes):
"""Return Shapley values of each feature given model outcomes
Given the model outcomes for each possible feature subset $S$, this
function computes for the expected marginal contribution $\phi$ of
each feature $i$ using the Shapley value definition:
$$
\phi_i (v) = \sum_{S \subseteq N} \frac{(s-1)!(n-s)!}{n!}[v(S) - v(S \backslash \{ i \})]
$$
where $N$ is the set of all features, $|S|$ and $|N|$ is defined as
$s$ and $n$ respectively, and $v$ refers to the corresponding model
outcome
Parameters
----------
model_outcomes : dict of float
Dictionary contining the model outcomes/prediction for the given
feature value. Keys of the dictionary are tuples containing the
feature subset $S$, while the values are the model outcomes.
Returns
-------
shapley_values : dict of float
Dictionary with the feature name as key and value as the
feature's Shapley value
"""
# Get superset $N$
N = sorted(model_outcomes.keys(), key=lambda x: -len(x))[0]
# Initialize results container
shapley_values = {}
# Iterate through all features then compute their Shapley values
for feature_name in N:
shapley_values[feature_name] = compute_shapley_value(
model_outcomes, feature_name)
return shapley_values
|
989,928 | d55347bf34909f2329b599c6795d15ce90c2ecfb | """
Author : Kusmakhar Pathak
Created: 30 July 2020
(c) Copyright by Kusmakhar Pathak.
"""
# Programme to get hostname and host IP address
import socket
# function to access host computer IP address
def get_ipAddress():
host_pc = socket.gethostname()
IPAddress = socket.gethostbyname(host_pc)
return host_pc, IPAddress
print(f"The host name of host computer is : {get_ipAddress()[0]}")
print(f"The host IP address of host computer is : {get_ipAddress()[1]}")
|
989,929 | 37e741f68de5ce3e13e591d8439263ae063a7026 | def webapp_add_wsgi_middleware(app):
#from google.appengine.ext.appstats import recording
#app = recording.appstats_wsgi_middleware(app)
return app
remoteapi_CUSTOM_ENVIRONMENT_AUTHENTICATION = (
'HTTP_X_APPENGINE_INBOUND_APPID', ['literumble'])
|
989,930 | 138edf31e63fa233586a6395fd733e0b415c2554 | ##A palindromic number or numeral
##palindrome is a number that remains the same when its digits are reversed.
x = int(input("enter the first number. "))
y = x
sum = 0
while y > 0:
i = y % 10
sum = sum * 10 + i
y = y // 10
if x == sum:
print (x, "is an palindrome number")
else:
print (x, "is not palindromic number")
####Code for string palindromic
##num = input("Enter any string: ")
##rev_num = reversed(num)
## check if the string is equal to its reverse
##if list(num) == list(rev_num):
## print("Palindrome string")
##else:
## print("Not Palindrome string")
##
|
989,931 | e221114c95418f03dd2f426c3ec6746ecc763f0b | # Julita Osman 314323
# http://pep8online.com/
def is_palindrom(text):
text = text.lower()
length = len(text)
lista = [' ', '.', ',', ':', ';', '-', '\"', '?', '!', '\'']
for i in range(length):
k = length - i - 1
if text[k] in lista and length-1 != k:
text = text[0:k:]+text[k+1::]
elif text[k] in lista and length-1 == k:
text = text[:-1]
length = len(text)
i = 0
while i <= length//2:
if text[i] == text[length - 1]:
i = i + 1
length = length - 1
else:
return False
return True
text1 = 'Kobyła ma mały bok'
text2 = 'Eine gulden, gute Tugend: Luge nie!'
text3 = 'oko'
text4 = 'rotor'
# print(is_palindrom(text1))
# print(is_palindrom(text2))
# print(is_palindrom(text3))
# print(is_palindrom(text4))
|
989,932 | 5615a7f29fb34b8bf40dc9c2f5afabe914313089 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-19 14:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wf', '0002_auto_20170919_1343'),
]
operations = [
migrations.RenameField(
model_name='monthendbalances',
old_name='checking_acct',
new_name='checking_acct_ct',
),
]
|
989,933 | 33b24f5a26b1493385f91feaa29c59653bd00a96 | f = open("C:/Users/escroc/Documents/projectBioInformatique/Supp-B.txt", 'r')
#
# while(f.read()):
# print(text)
data =""
for line in f:
if not line[0].isdigit():
data+=line
print(data)
save = f = open("C:/Users/escroc/Documents/projectBioInformatique/Supp-B-prunned.txt", 'w')
save.write(data)
|
989,934 | 1988c4a636e7ce828cb28320f569025f8270b98c | '''
Take in the following three values from the user:
- investment amount
- interest rate in percentage
- number of years to invest
Print the future values to the console.
'''
investment_amount = int(input("Amount of investment: "))
interest_rate_in_percentage = int(input("interest rate (%): "))
number_of_years_to_invest = int(input("Number of years to invest: "))
print(investment_amount)
print(interest_rate_in_percentage)
print(number_of_years_to_invest)
|
989,935 | beb1be5397da229a4e90f891efa3c8dfefa53153 | from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
train_data = pd.read_csv("train.csv")
test_data = pd.read_csv("test.csv")
y_train = train_data["Survived"]
train_data.drop(labels="Survived", axis=1, inplace=True)
full_data = train_data.append(test_data)
drop_columns = ["Name", "Age", "SibSp", "Ticket", "Cabin", "Parch", "Embarked"]
full_data.drop(labels=drop_columns, axis=1, inplace=True)
full_data = pd.get_dummies(full_data, columns=["Sex"])
full_data.fillna(value=0.0, inplace=True)
X_train = full_data.values[0:891]
X_test = full_data.values[891:]
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
state = 12
test_size = 0.25
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=test_size, random_state=state)
lr_list = [0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]
for learning_rate in lr_list:
gb_clf = GradientBoostingClassifier(n_estimators=20, learning_rate = learning_rate, max_features=2, max_depth = 2, random_state = 0)
gb_clf.fit(X_train, y_train)
print("Learning rate: ", learning_rate)
print("Accuracy score (training): {0:.3f}".format(gb_clf.score(X_train, y_train)))
print("Accuracy score (validation): {0:.3f}".format(gb_clf.score(X_val, y_val)))
gb_clf2 = GradientBoostingClassifier(n_estimators=20, learning_rate = 0.5, max_features=2, max_depth = 2, random_state = 0)
gb_clf2.fit(X_train, y_train)
predictions = gb_clf2.predict(X_val)
print("Confusion Matrix:")
print(confusion_matrix(y_val, predictions))
print("Classification Report")
print(classification_report(y_val, predictions))
xgb_clf = XGBClassifier()
xgb_clf.fit(X_train, y_train)
score = xgb_clf.score(X_val, y_val)
print(score)
preds2 = xgb_clf.predict(X_val)
accuracy = accuracy_score(y_val, preds2)
print("Accuracy: " + str((accuracy * 100.0)))
|
989,936 | d14c054329c2ce1b80d367af715e98b61604c65e | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Codes\PyQt5Projects\blogui\RightSearchWidget.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_RightSearchWidget(object):
def setupUi(self, RightSearchWidget):
RightSearchWidget.setObjectName("RightSearchWidget")
RightSearchWidget.resize(313, 75)
RightSearchWidget.setAutoFillBackground(False)
RightSearchWidget.setStyleSheet("background-color: rgb(255, 255, 255);")
self.verticalLayout = QtWidgets.QVBoxLayout(RightSearchWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.lb_Search_Text = QtWidgets.QLabel(RightSearchWidget)
self.lb_Search_Text.setStyleSheet("margin-top:10px;\n"
"margin-left:10px;\n"
"")
self.lb_Search_Text.setObjectName("lb_Search_Text")
self.verticalLayout.addWidget(self.lb_Search_Text)
self.lb_Search_Line = QtWidgets.QLabel(RightSearchWidget)
self.lb_Search_Line.setMinimumSize(QtCore.QSize(0, 3))
self.lb_Search_Line.setMaximumSize(QtCore.QSize(16777215, 9))
self.lb_Search_Line.setStyleSheet("border:2px solid rgb(247, 155, 106);\n"
"border-left:none;\n"
"border-right:none;\n"
"border-bottom:none;\n"
"margin-top:5px;\n"
"margin-left:10px;\n"
"margin-right:10px;\n"
"")
self.lb_Search_Line.setText("")
self.lb_Search_Line.setObjectName("lb_Search_Line")
self.verticalLayout.addWidget(self.lb_Search_Line)
self.widgetSearchWidget = QtWidgets.QWidget(RightSearchWidget)
self.widgetSearchWidget.setAutoFillBackground(False)
self.widgetSearchWidget.setObjectName("widgetSearchWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widgetSearchWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setObjectName("horizontalLayout")
self.le_Keyword = QtWidgets.QLineEdit(self.widgetSearchWidget)
self.le_Keyword.setMinimumSize(QtCore.QSize(0, 30))
self.le_Keyword.setMaximumSize(QtCore.QSize(16777215, 30))
self.le_Keyword.setStyleSheet("border:1px solid rgb(238,238,238);\n"
"margin-left:10px;\n"
"")
self.le_Keyword.setObjectName("le_Keyword")
self.horizontalLayout.addWidget(self.le_Keyword)
self.pb_Keyword = QtWidgets.QPushButton(self.widgetSearchWidget)
self.pb_Keyword.setMinimumSize(QtCore.QSize(50, 30))
self.pb_Keyword.setStyleSheet("border:none;\n"
"background-color: rgb(247, 155, 106);\n"
"border-radius:3px;\n"
"margin-right:10px;\n"
"color: rgb(255, 255, 255);\n"
"\n"
"")
self.pb_Keyword.setObjectName("pb_Keyword")
self.horizontalLayout.addWidget(self.pb_Keyword)
self.verticalLayout.addWidget(self.widgetSearchWidget)
self.retranslateUi(RightSearchWidget)
QtCore.QMetaObject.connectSlotsByName(RightSearchWidget)
def retranslateUi(self, RightSearchWidget):
_translate = QtCore.QCoreApplication.translate
RightSearchWidget.setWindowTitle(_translate("RightSearchWidget", "Form"))
self.lb_Search_Text.setText(_translate("RightSearchWidget", "搜索"))
self.pb_Keyword.setText(_translate("RightSearchWidget", "搜索"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
RightSearchWidget = QtWidgets.QWidget()
ui = Ui_RightSearchWidget()
ui.setupUi(RightSearchWidget)
RightSearchWidget.show()
sys.exit(app.exec_())
|
989,937 | 38b0dd6836f1e97476f1e8cb2950165cc39e4924 | # Generated by Django 4.1.7 on 2023-02-21 09:05
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('slug', models.SlugField(unique=True)),
('squad_number', models.PositiveIntegerField(blank=True, null=True)),
('image', models.ImageField(upload_to='players')),
('position', models.CharField(blank=True, choices=[('GOALKEEPER', 'Goalkeeper'), ('DEFENDER', 'Defender'), ('MIDFIELDER', 'Midfielder'), ('STRIKER', 'Striker')], max_length=15)),
('status', models.CharField(blank=True, choices=[('ACTIVE', 'Active'), ('LOAN', 'On Loan'), ('SOLD', 'Sold')], max_length=15)),
('availability', models.CharField(blank=True, choices=[('INJURED', 'Injured'), ('SUSPENDED', 'Suspended'), ('LEAVE', 'On Leave')], max_length=15)),
('club_appearances', models.PositiveIntegerField(blank=True, help_text='Total club appearances since joining the club.', null=True)),
('goals', models.PositiveIntegerField(blank=True, help_text='Total goals since joining the club.', null=True)),
('nationality', models.CharField(blank=True, max_length=50)),
('yellow_cards', models.PositiveIntegerField(blank=True, help_text='Total yellow cards since joining the club.', null=True)),
('red_cards', models.PositiveIntegerField(blank=True, help_text='Total red cards since joining the club.', null=True)),
('date_of_birth', models.DateField(blank=True, null=True)),
('height', models.CharField(blank=True, help_text='Height in cm.', max_length=10)),
('weight', models.CharField(blank=True, help_text='Weight in kg.', max_length=10)),
('previous_clubs', models.CharField(blank=True, max_length=255)),
('biography', ckeditor.fields.RichTextField(blank=True)),
],
options={
'ordering': ['squad_number'],
},
),
migrations.CreateModel(
name='Staff',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='staff')),
('staff', models.CharField(choices=[('TECHNICAL', 'Technical Crew'), ('MANAGEMENT', 'Club Management'), ('BACKROOM', 'Backroom Staff')], max_length=20)),
('position', models.CharField(max_length=100)),
('active', models.BooleanField(default=True, help_text='Uncheck this if the Official is no longer with the club.')),
],
options={
'ordering': ['name', 'staff'],
},
),
]
|
989,938 | 50d8db2038a018a20fb3a5c535d2cd303816d31d | def gallery_creator(img_list):
for image in img_list:
print('\'<a href="http://plonsker.github.io/imgs/' + image + '" target="_blank"><img src="imgs/' + image + '"/></a>\',')
img_list = ['00(1).tiff','000008190022.jpg','000012590002.jpg','000012590035.jpg','000012700008.jpg','000012700019.jpg','000012700021.jpg','000012700026.jpg','000012700028.jpg','000014050008.jpeg','000014050019 (1).jpeg','000014050031.jpeg','000014050035.jpg','000030060028.jpg','000039150002.jpg','000039160027.jpg','000039160032.jpg','000039160033.jpg','000042750022.jpg','000042750031.jpg','000044220036.jpeg','000060590018.jpg','000060590032.jpg','02760023.JPG','02770020.JPG','02770035.JPG','07640019.JPG','11(1).tiff','11A(1).tiff','13(3).tiff','14070037.JPG','16.jpg','17(1).tif','17.jpg','17.tif','17A.tif','18(1).tif','18(2).tif','18(3).tif','18.tif','18A(1).tif','18A.tif','19(1).tif','19.tif','20(1).tif','20(2).tif','20.tif','20A.tif','21310006.JPG','21320021.JPG','21A.jpeg.sb-5e0f504b-GoArrW','21A.tiff','22.tif','23(1).tif','23.tif','23A.tif','24.jpg','24.tif','25(1).tif','25.tif','25A.tif','26(1).tif','26(2).tif','26(3).tif','26.tif','26A(1).tif','26A.tif','27(1).tiff','27.jpeg','27.tif','27A.tif','28(1).tif','28(2).tif','28(3).tiff','28.tif','29.tif','29A.tif','30.tif','31 (5).jpg','31.bmp','31.tif','31A.tif','32(1).tif','32.jpg','32.tif','33.tif','34(1).tif','34(2).tif','34(3).tif','34.tif','34A.tif','36(1).tif','36(2).tif','36.tif','36060016.JPG','37.tif','38.tif','42450009.JPG','46.tif','49.tif','50400006.JPG','52.tif','57660003.JPG','64570012.JPG','64570025.JPG','64590008.JPG','64590029.JPG','64600032.JPG','65720004.JPG','65730019.JPG','72530019.jpeg','72540004.JPG','72540024.jpeg','72540031.JPG','72540035.JPG','74310005.JPG','74310006.JPG','74330006.JPG','74340001.JPG','79350036.JPG','79360013.JPG','79370022.JPG','79400003.JPG','79410030.JPG','79420009.JPG','79870005.JPG','79880018.JPG','79880019.JPG','79880029.JPG','AA009.jpg','AA036.jpg','AAA004.jpg','AAA016.jpg','AAA021.jpg','AAA036(1).jpg','AAA036.jpg','AAA039.jpg','Copy of R0100072.JPG','DSC00226.JPG','DSC00230.JPG','DSC00231.JPG','DSC00236.JPG','DSC00290.JPG','DSC00440.JPG','DSC00498.JPG','DSC00912.JPG','DSC01090.JPG','DSC01817.JPG','DSC02445.JPG','DSC02450.JPG','DSC02498.JPG','DSC02523.JPG','DSC02524.JPG','DSC02534.JPG','DSC03252.JPG','DSC03373.JPG','DSC03394.JPG','DSC03445.JPG','DSC04482.JPG','DSC04614.JPG','DSC04711.JPG','DSC04743.JPG','DSC05566.JPG','DSC05654.JPG','DSC05715.JPG','DSC06128.JPG','DSC06230.JPG','DSC06234.JPG','DSC06956.JPG','DSC07029.JPG','DSC07295.JPG','DSC07460.JPG','DSC07810.JPG','DSC07854.JPG','DSC08251.JPG','DSC08368.JPG','DSC08635.JPG','DSC08810.JPG','DSC08811.JPG','DSC08900.JPG','DSC09992.JPG','FH000002.jpg','FH000007.jpg','FH000014.jpg','FH000019.jpg','G-17.tif','R0000108.JPG','R0041708-01.jpeg','R0041713-01.jpeg','R0043461.JPG','R0043895.JPG','R0044962.JPG','R0045773.JPG','R0045889.JPG','R0046113-01.jpeg','R0047314.JPG','R0048218.JPG','R0048516.JPG','R0048546.JPG','R0050091.JPG','R0050115.JPG','R0060115.JPG','R0060117.JPG','R0061200-01.jpeg','R0061436.JPG','R0061790.JPG','R0062523.JPG','R0064404.JPG','R0065326.JPG','R0066260.JPG','R0066334.JPG','R0066362.JPG','R0066892.JPG','R0067293.JPG','R0067950.JPG','R0068138.JPG','R0068235.JPG','R0095071.JPG','R0096242.JPG','bridge (1).JPG','bridge.JPG','wtc_plonsker.JPG',]
gallery_creator(img_list)
|
989,939 | e26e8276882ddb37695bdc81c7968fdb5346b7cb | import numpy as np
import pyautogui
import imutils
import cv2
template = cv2.imread('Alive.png')
img_original = pyautogui.screenshot(region=(18,400, 772, 194))
img_original = cv2.cvtColor(np.array(img_original), cv2.COLOR_RGB2BGR)
img_rgb = cv2.cvtColor(np.array(img_original), cv2.COLOR_RGB2BGR)
w, h = template.shape[:-1]
res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
threshold = 1
loc = np.where(res >= threshold)
targets_center_data_x = []
targets_center_data_y = []
for pt in zip(*loc[::-1]): # Switch collumns and rows
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 1)
targets_center_data_y.append(int(pt[1] + (h/2)))
targets_center_data_x.append(int(pt[0] + (w/2)))
target_count = len(targets_center_data_y)
print("Target count : " + str(target_count))
#for x in range(len(targets_bound_box_data_x)):
cv2.imwrite('targets.png', img_rgb)
cv2.imwrite('screenshot.png', img_original)
|
989,940 | 134cfaa5150df3331864c5c8dc9ce7f0d9a6d13e | class DB_Abstract:
# DB Connection constructor
# INPUT: database information per db implementation (connection info / file path / windows registry / whatever
# OUTPUT: N/A
# DESC: constructs the db connection instance and prepares everything necessary to start actively working against the db. if db structure is missing, the function should create it.
def __init__(self):
pass
# Insert a new user into database
# INPUT: User struct
# OUTPUT: success status
# DESC: inserts a new user into the database.
def insert_user(self, user):
pass
# Insert a new tweet into database
# INPUT: tweet struct
# OUTPUT: success status
# DESC: inserts a new tweet into the database.
def insert_tweet(self, tweet):
pass
# Gets a user by their ID
# INPUT: user ID
# OUTPUT: user struct or null
# DESC: builds a user struct out of information pulled from the db.
def get_user_by_id(self, user_id):
pass
# Gets all tweets for user
# INPUT: user ID
# OUTPUT: >=1 list of tweets by user, or None
# DESC: gets all tweets written by a specific user.
def get_user_tweets(self, uid):
pass
# Gets a tweet by it's ID
# INPUT: tweet ID
# OUTPUT: tweet struct or null
# DESC: builds a tweet struct out of information pulled from the db.
def get_tweet_by_id(self, tweet_id):
pass
# Gets all replies for a specific tweet, by tweet ID
# INPUT: tweet ID
# OUTPUT: >=1 items list containing tweet replies for a specific tweet, or null.
# DESC: builds a list of tweet structs containing replies for a specific tweet
def get_replies_by_id(self, tweet_id):
pass
|
989,941 | fe87c56d64011db1b61a65c3f1fea1fb80d5a821 | my_str=input("What do you want to get displayed:")
print(my_str)
|
989,942 | 0a4ca657a1fec0527e286fa3dbed80a67a04c3b8 | #!/usr/local/anaconda/bin/python
'''
A tool to calculate the fluxes from DSMACC
D.Ellis 2016
'''
#functions
global specs,reactants
xlen = lambda x: xrange(len(x))
import numpy as np
import pandas as pd
import sys,os,re,multiprocessing,netCDF4
from netCDF4 import Dataset
import matplotlib.pyplot as plt
#netcdf file path
ncfile = sys.argv[1]
ncores = 4
########### read dsmacc data
myfile = ncfile
nc = Dataset(myfile,'r')
print nc.date, '\n', nc.description,'\n'
print 'Select Simulation: \n\n'
for i,g in enumerate(nc.groups): print i , ' - ', g
group = tuple(nc.groups)[int(input('Enter Number \n'))]
print group, 'took', nc.groups[group].WALL_time, 'seconds to compute.'
specs = pd.DataFrame(nc.groups[group].variables['Spec'][:])
specs.columns = nc.groups[group].variables['Spec'].head.split(',')
rates = pd.DataFrame(nc.groups[group].variables['Rate'][:])
rates.columns = nc.groups[group].variables['Rate'].head.split(',')
print 'Spec and Rate files loaded'
nc.close()
########################
#specs['TIME'] = pd.to_datetime(specs.TIME, unit='s')
rates['TIME'] = specs['TIME']
## need atleast two timesteps here
#specs = specs.ix[[99,100]]
#rates = rates.ix[[99,100]]
''' 1 remove dummy and non-reactions'''
specs = specs[[r for r in specs.columns if ('DUMMY' not in r) & ('EMISS' not in r)]]
rates = rates[[r for r in rates.columns[6:] if ('DUMMY' not in r) & ('EMISS' not in r)]]
rates = rates.loc[:, (rates > 0).any(axis=0)]
''' 2 remove species if not present (shrink data) '''
#rates = rates[rates.columns[rates.sum()>=0.]]
''' 3 get conversion factor from molcm-3 to mixing ratio'''
M = specs['M'].mean(),
''' 5 generate reactants and products list '''
#no nead to clear whitespace as begin.py should take care of that.
rate_head = '\n'+'\n'.join(rates.columns)+'\n'
products = [i.split('+') for i in re.findall(r'-->([A-z0-9+]*)',rate_head)]
reactants = [j.split('+') for j in re.findall(r'\n([A-z0-9+]{1,60})[-->]{0,1}',rate_head)]
if len(reactants) != len(products) : print 'reactants and poducts differing lengths'
''' 6 trip to only timesteps required '''
print 'getconc'
''' Fluxes '''
flux = []
for i in xlen(reactants):
rcol = []
for j in reactants[i]:
dummy = specs[re.sub(r'([\.\d\s]*)(\D[\d\D]*)', r'\2', j)]
try: rcol.append( float(re.sub(r'([\.\d]*)\s*\D[\d\D]*', r'\1', j) * dummy ))
except: rcol.append(dummy) # coeff = 1 if not yet specified
prod = 1
for k in rcol: prod *= k
print i
flux.append(prod * rates[rates.columns[i]])
''' 4 convert concentrations to mixing ratio '''
#assign number for species, used later
#clean array if not making graph
specs = specs.loc[:, (specs > 0).any(axis=0)]
specs = specs / float(specs.M.mean())
flux = np.array(np.array(flux).tolist()).T
'''
force graphs
'''
''' Define spec locations '''
locs2 = dict(enumerate(specs.columns[7:]))
locs = {v: k for k, v in locs2.iteritems()}
''' 1 get all species interaction '''
def combine(ln): return [[[re.sub(r'([\.\d\s]*)(\D[\d\D]*)', r'\2',r),re.sub(r'([\.\d\s]*)(\D[\d\D]*)', r'\2',p)],ln] for p in
products[ln] for r in reactants[ln]]
dummy = np.vectorize(combine)(xlen(reactants))
edges = [] ; [edges.extend(i) for i in dummy] ; edges.sort() #because why not
''' 2 extract non duplicated list of reactions '''
individual = list(set(frozenset(i[0]) for i in edges))
''' 4 Make a combination of these '''
flux_data = []
for i in individual:
fp , fm =[],[]
st = list(i)
try:
#if True:
d0, d1 = locs[st[0]],locs[st[1]]
dummy = [j for j in xlen(edges) if i == set(edges[j][0])]
for k in dummy:
edge = edges[k]
if st[0] == edge[0][0]: fp.append(edge[1])
else: fm.append(edge[1])
flux_data.append([[fp,fm] ,d0,d1])
except IndexError as e: print e, st # if self reaction
except KeyError as e : print 'no concentration for', e #no specie concentration
flux_data = np.array(flux_data)
#ncdf info
combinations = str(list(flux_data[:,0]))
src = np.array(flux_data[:,1])
tar = np.array(flux_data[:,2])
times = np.array(specs.TIME*M).astype(int)
#rateheaders = [x.strip() for x in rate_head.split('\n') if x.strip()]
#rates = np.array([i.split('-->') for i in rateheaders])
rate_head = '[' + rate_head.replace('\n','","').replace('-->','>')[2:-2] +']'
from netCDF4 import Dataset
locs_json = str(locs).replace("u'",'"').replace("\'",'"')
conc = np.array(specs[specs.columns[7:]])
nrows = conc.shape[0]
info_file = Dataset('ropa_'+ncfile, 'w', format='NETCDF3_CLASSIC')
info_file.createDimension('time', nrows)
info_file.createDimension('specs', conc.shape[1])
info_file.createDimension('fluxes', flux.shape[1])
info_file.createDimension('sourcetarget', len(src))
info_file.createDimension('dict', len(locs_json))
info_file.createDimension('comb', len(combinations))
info_file.createDimension('timestr', len(times))
info_file.createDimension('rateheader', len(rate_head))
cnc = info_file.createVariable('concentration', 'f8', ('time', 'specs'))
cnc[:,:] = conc
flx = info_file.createVariable('edge-length', 'f8', ('time', 'fluxes'))
flx[:,:] = flux
rt = info_file.createVariable('rate', 'c', 'rateheader')
rt[:] = rate_head
sources = info_file.createVariable('source', 'i4', 'sourcetarget')
sources[:] = src
targets = info_file.createVariable('target', 'i4', 'sourcetarget')
targets[:] = tar
dictn = info_file.createVariable('nodes', 'c', 'dict')
dictn[:] = locs_json
comb = info_file.createVariable('combinations', 'c', 'comb')
comb[:] = combinations
stime = info_file.createVariable('timeseconds', 'f8', 'time')
stime[:] = times
print 'PRIMARY SPECS'
print 'LOCAT~ION ARRAY'
print 'TIME ARRAY NOT HERE YET'
info_file.close()
print 'nc write'
#https://github.com/wolfiex/netcdfjs reader
|
989,943 | 1c230da8800c7a69127915ebd0c5d50afa4f8579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Array elements swap helper.
'''
import random
import logging
log = logging.getLogger(__name__)
def swap(a, b, ind):
'''
The function swaps elements of a and b
on the indices stored in ind.
Args:
a: list of elements
b: list of elements
ind: list indices
'''
for i in ind:
a[i], b[i] = b[i], a[i]
def permutation_swap(a, b, ind):
'''
The function swaps elements of a and b on
indiecs stored in ind, but it takes care
that after swap a and b stay permutations.
Args:
a: list of elements (permutation)
b: list of elements (permutation)
ind: list indices
'''
used_a = set()
used_b = set()
candidates_a = set()
candidates_b = set()
for i in ind:
used_a.add(b[i])
used_b.add(a[i])
a[i], b[i] = b[i], a[i]
used = set()
for i in xrange(len(a)):
if a[i] in used_a:
candidates_a = used_b.difference(used_a).difference(set([a[i]])) \
.difference(used)
if len(candidates_a):
element = random.sample(candidates_a, 1)[0]
a[i] = element
used.add(element)
used = set()
for i in xrange(len(b)):
if b[i] in used_b:
candidates_b = used_a.difference(used_b).difference(set([b[i]])) \
.difference(used)
if len(candidates_b):
element = random.sample(candidates_b, 1)[0]
b[i] = element
used.add(element)
if __name__ == '__main__':
array1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
array2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print "normal swap"
print array1, array2
swap(array1, array2, [0, 1, 5])
print array1, array2
array1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
array2 = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
print "permutation swap"
print array1, array2
permutation_swap(array1, array2, [0, 1, 5])
print array1, array2
|
989,944 | 82c5f9f34f493044a6e06672935e87dabeae4aa1 | from flask import Flask, render_template
from flask import request as req
import requests
app = Flask(__name__)
# Root path (static)
@app.route("/")
@app.route("/index")
def index():
return render_template('index.html')
# Render a table of chemicals reachable from a start chemical by querying the
# biochem4j.org graph DB. The final column contains a link to the /pathway link.
@app.route("/chemical")
def chemical():
# create the graph DB query
max_length_chems = int(req.args.get("max_len", "0"))
max_length_rels = (max_length_chems - 1) * 2
query = """
MATCH shortestPath((s:Chemical)<-[r:has_reactant*1..%d]->(t:Chemical {id: '%s'}))
WITH s, r, t, range(0, size(r)-2) as idx
WHERE ALL (i IN idx WHERE r[i]['stoichiometry'] * r[i+1]['stoichiometry'] < 0)
AND s.monoisotopic_mass > %s
RETURN s.name, s.id, s.formula, size(r) as len""" % (max_length_rels, req.args.get("chem_id", ""),
req.args.get("min_mw", ""))
print(query)
# send the graph DB query request
r = requests.post('http://biochem4j.org/db/data/transaction/commit',
headers={'accept': 'application/json', 'content-type': 'application/json'},
json={"statements":[{"statement": query}]})
if r.status_code != 200:
return "Error (%d): %s" % (r.status_code, r.text)
# populate info needed for rendering the table
fromChemical = {"id": req.args.get("chem_id", "")}
chemicals = []
for row_data in r.json()["results"][0]["data"]:
row = row_data["row"]
chemicals.append({"name": row[0], "id": row[1], "formula": row[2],
"path_length": int(row[3] / 2 + 1)})
return render_template('chemicals.html', fromChemical=fromChemical, chemicals=chemicals)
# Render an interactive graph visualization with vis.js of the pathway between two chemicals.
@app.route("/pathway")
def pathway():
# create the graph DB query
path_length_chems = int(req.args.get("path_len", "0"))
max_length_rels = (path_length_chems - 1) * 2
query = """
MATCH p=shortestPath((s:Chemical {id:'%s'})<-[r:has_reactant*1..%d]->(t:Chemical {id: '%s'}))
WITH p, s, r, t, range(0, size(r)-2) as idx
WHERE ALL (i IN idx WHERE r[i]['stoichiometry'] * r[i+1]['stoichiometry'] < 0)
RETURN p""" % (req.args.get("id1", ""), max_length_rels, req.args.get("id2", ""))
print(query)
# send the graph DB query request
r = requests.post('http://biochem4j.org/db/data/transaction/commit',
headers={'accept': 'application/json', 'content-type': 'application/json'},
json={"statements":[{"statement": query}]})
if r.status_code != 200:
return "Error (%d): %s" % (r.status_code, r.text)
meta = {"fromId": req.args.get("id1", ""), "toId": req.args.get("id2", ""),}
# populate nodes and edges need for rendering the graph view
i = 0
nodes = []
edges = []
for row_data in r.json()["results"][0]["data"]:
for elem in row_data["row"][0]:
if "smiles" in elem: # chemical node
nodes.append({"index": i, "name": elem["name"], "color": "cyan"})
if i > 0:
edges.append({"node1": i - 1, "node2": i})
i = i + 1
elif "balance" in elem: # reaction node
nodes.append({"index": i, "name": elem["id"], "color": "red"})
if i > 0:
edges.append({"node1": i - 1, "node2": i})
i = i + 1
else: # other node or reference
pass
return render_template('pathway.html', meta=meta, nodes=nodes, edges=edges)
# Path for the AWS load balancer to ping to check health status
@app.route("/ping")
def ping():
return "OK"
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
|
989,945 | 70529800b7b1672ee90bbb4b45bc9e8e1b21b55b | # !/usr/bin/env python
# coding=utf-8
"""
Demo to integrate an ODE for ChE 344; A <--> 3B + C, membrane reactor with pressure drop, B diffusing out the membrane
references:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
https://scipy.github.io/old-wiki/pages/Cookbook/CoupledSpringMassSystem.html
"""
from __future__ import print_function
import sys
import numpy as np
from scipy.integrate import odeint
from common import make_fig, GOOD_RET
__author__ = 'hbmayes'
def sys_odes(y_vector, w, ka, keq, kc, alpha, cto, fto):
# put here any equations you need to calculate the differential equations (R.H.S.s of dy/dW)
fa, fb, fc, p = y_vector
ft = fa + fb + fc
ca = cto * fa / ft
cb = cto * fb / ft
cc = cto * fc / ft
r1 = ka*(ca - cb**3.0 * cc / keq)
rb = kc*cb
# below are the differential equations for dFa/dW, dFb/dW, dFc/dW, and dp/dW (in that order)
dy_dw = [-r1,
3.0*r1 - rb,
r1,
-alpha*ft/(2.0*fto*p)]
return dy_dw
def solve_ode_sys():
# initial values
fa0 = 5.0
fb0 = 0.0
c0 = 0.0
p0 = 1.0
# "y" is our system of equations (a vector). "y0" are the initial values. I'm listing "X" first and "p" second
y0 = [fa0, fb0, c0, p0]
ka = 2.0
keq = 0.004
kc = 8.0
cto = 0.2
fto = 5.0
alpha = 0.015
# Give an initial weight through a final weight. We don't know the final weight needed yet; if we guess
# too small and we don't get the conversion we want, we can always increase it and run the program again
x_min = 0
x_max = 30.0
w_cat = np.linspace(x_min, x_max, 1001)
sol = odeint(sys_odes, y0, w_cat, args=(ka, keq, kc, alpha, cto, fto))
a_w = sol[:, 0]
b_w = sol[:, 1]
c_w = sol[:, 2]
p_w = sol[:, 3]
name = 'lecture_9'
make_fig(name+"flows", w_cat, a_w, y1_label="$F_A$(W)", y2_array=b_w, y2_label="$F_B$(W)",
y3_array=c_w, y3_label="$F_C$(W)", x_label="catalyst mass (kg)",
y_label="molar flow rates (mol/s)", x_lima=x_min, x_limb=x_max, y_lima=None, y_limb=None, loc=7)
make_fig(name+"p", w_cat, p_w, x_label="catalyst mass (kg)",
y_label="pressure ratio, p (unitless)", x_lima=x_min, x_limb=x_max, y_lima=0.0, y_limb=1.0, loc=7)
def main():
""" Runs the main program.
"""
solve_ode_sys()
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
989,946 | 6ab24ecb6bd5c51275ef2c2a8ca922bca82cc9e5 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
import pickle
wine = pd.read_csv('wine.data', names = ["Cultivator", "Alchol", "Malic_Acid", "Ash", "Alcalinity_of_Ash", "Magnesium", "Total_phenols", "Falvanoids", "Nonflavanoid_phenols", "Proanthocyanins", "Color_intensity", "Hue", "OD280", "Proline"])
# Set up data and labels
X = wine.drop('Cultivator',axis=1)
y = wine['Cultivator']
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Normalize data
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Generate list of mlps
models = [MLPClassifier(hidden_layer_sizes=(100,100,100),max_iter=1000) for i in range(10)]
# Generate Multiple Models and Choose the best
[obj.fit(X_train,y_train) for obj in models]
model_predictions = [obj.predict(X_test) for obj in models]
scores = [accuracy_score(y_test,prediction) for prediction in model_predictions]
index_of_best = scores.index(max(scores))
print scores
print index_of_best
optimal_perceptron = models[index_of_best]
predictions = optimal_perceptron.predict(X_test)
# Evaluate
evaluations = []
for a,b in zip(y_test.values,predictions):
if a==b:
evaluations.append('PASS')
else:
evaluations.append('FAIL')
#print(confusion_matrix(y_test,predictions))
results = pd.DataFrame({'Actual':y_test.values,'Prediction':predictions, 'Result':evaluations})
print results
print '\n'
print classification_report(y_test,predictions)
print "Cumulative Accuracy: %.02f%%\n" % (float(accuracy_score(y_test,predictions))*100.0)
# Give the option to save the generated neural network
while True:
choice = raw_input('Save network? (y/n) >> ')
if choice == 'y':
with open('wine_detection_network.pickle', 'wb') as f:
pickle.dump(optimal_perceptron, f)
break
elif choice == 'n':
break
else:
print "invalid choice, please try again >> "
|
989,947 | 63f24182cf510d5b4f12d8f8dfa4eb46835fe096 | #%%
# 所有的数据分为一下几类:
# 诊断复杂,中有数字,形容描述 如 0013
#
import jieba
import matplotlib.pyplot as plt
import numpy as np
from gensim import corpora, models, similarities
from sklearn.cluster import KMeans
from sklearn.externals import joblib
import cla
def getsample( filename , featurestr,loopnumber):
fo = open(filename)
diagnose = list()
# print ("文件名为: ", fo.name)
for i in range(loopnumber):
line = fo.readline()
# print(line)
str1 = line.split('$')
if str1[1] == featurestr:
dia = str1[2].split('\n') # remove '\n'
if dia[0] not in diagnose:
diagnose.append(dia[0])
return diagnose
def getsimma(da):
size = len(da)
all_doc_list = []
for doc in da:
doc_list = [word for word in jieba.cut(doc)]
all_doc_list.append(doc_list)
dictionary = corpora.Dictionary(all_doc_list)
corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]
sim = np.zeros((size,size))
for i in range(size):
doc_test_vec = dictionary.doc2bow(all_doc_list[i])
tfidf = models.TfidfModel(corpus)
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))
sim[i,:] = 1/(np.array(index[tfidf[doc_test_vec]])+0.01)
return sim
def main():
# 读取的文件名
filename = "meinian_round1_data_part1_20180408.txt"
# 读取的条目名
featurestr = '0516'
da = getsample( filename , featurestr, 30000) # 提取不同的信息
numoftxt =len(da) # 获得不同的信息的个数
sim = getsimma(da) # 获得相似度
out = cla.main(sim,5,numoftxt) # 分类模型的建立
# 生成对应的字典
# dist = dist()
# dist.append()
# 打印所有的条目
for i in range(numoftxt):
# if out[i,0] == claser:
print(da[i])
# 打印各个分类
for claser in range(5):
print('--------------------------------------')
for i in range(numoftxt):
if out[i,0] == claser:
print(da[i])
# print(all_doc_list)
if __name__ == '__main__':
main()
|
989,948 | af7dee274e05f33cb108ce4e042a4d656b2f12ba | # This is our super (parent) class. All other characers, will be subclasses of this class
class Character(object):
def __init__(self,name,health,power):
self.name = name
self.health = health
self.power = power
def take_damage(self,amount_of_damage):
self.health -= amount_of_damage
def get_health(self):
return self.health
def is_alive(self):
return self.health > 0
def is_alive(self):
# will return True if this statement is True, and False otherwise
return self.health > 0
def attack(self,enemy):
for i in range(0,5):
rand_num = randint(0,5)
if rand_num == 1:
self.power * 2
print "%s lands a critical strike on %s!" % (self.name,enemy.name)
else:
self.power = self.power
print "%s attacks %s" % (self.name, enemy.name)
def receive_damage(self, points):
self.health -= points
print "%s received %d damage." % (self.name, points)
if self.health <= 0:
print "YOU HAVE DIED"
def print_status(self):
print "%s has %d health and %d power." % (self.name, self.health, self.power)
|
989,949 | 8b62c09ec372d608c27dc367cd8b8597af832581 | # Easy
#
# In a array A of size 2N, there are N+1 unique elements, and exactly one of
# these elements is repeated N times.
#
# Return the element repeated N times.
#
# Example 1:
#
# Input: [1,2,3,3]
# Output: 3
#
# Example 2:
#
# Input: [2,1,2,5,3,2]
# Output: 2
#
# Example 3:
#
# Input: [5,1,5,2,5,3,5,4]
# Output: 5
#
# Note:
#
# 4 <= A.length <= 10000
# 0 <= A[i] < 10000
# A.length is even
class Solution:
def repeatedNTimes(self, A: List[int]) -> int:
element_count = dict()
for k in A:
if not k in element_count:
element_count[k] = 1
else:
element_count[k] += 1
if element_count[k] == len(A) // 2:
return k
|
989,950 | 51bafe5ea779cf950cee6d8ba08399a8345ad1aa | import cv2
from ..utils.utils import intdetector, str2bool, stringdetector
def resize(img, ratio=100, size=None, interpolation=None):
"""
This function retrieves the image sent by the user resized with the ratio or the size inputted by the user.
The ratio has priority over the size in case that it is modified from the default value.
Args:
img (:obj: array, mandatory):
Image to rotate.
size (:obj: tuple, optional):
Tuple with the new size of the image in (width, height) format.
ratio(:obj: int, optional):
Number, in percentage, that indicates the percentage of upscaling or downscaling that the image will have applied.
Default is returning the image as it is.
interpolation(:obj: string, optional):
Type of interpolation that will be applied to the image.
Default is area if resize is not to a bigger one. In case of smaller resize, linear interpolation is the default.
Returns:
:obj: array:
The resulting object is the image, in the same format as inputted, but with the transformation applied.
If force_fit == True, remember that the width and height of the image will change if degrees not in 0, 90, 180, 270.
Raises:
ValueError: Raised if any of the values of the size is negative.
ArgumentTypeError: Raised if any of the values of the size tuple is not an int or the ratio is not a positive int.
"""
def _interpolation_choice(interpolation):
if interpolation is None: # Set a default
if size is not None:
if (ratio >= 100) or (size[0] >= img.shape[0]) or (size[1] >= img.shape[1]):
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LINEAR
else:
if ratio >= 100:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LINEAR
elif interpolation in ["nearest", cv2.INTER_NEAREST]:
interpolation = cv2.INTER_NEAREST
elif interpolation in ["linear", cv2.INTER_LINEAR]:
interpolation = cv2.INTER_LINEAR
elif interpolation in ["area", cv2.INTER_AREA]:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_CUBIC
return interpolation
# Checks over ratio
intdetector(ratio)
if ratio <= 0:
raise ValueError("Expected ratio parameter to be over zero")
# Checks over size if necessary
if size is not None:
intdetector(size[0])
intdetector(size[1])
if size[0] == img.shape[0] and size[1] == img.shape[1]:
return img
if size[0] < 0 or size[1] < 0:
raise ValueError("Expected size parameter to be a tuple with values over 0")
interpolation = _interpolation_choice(interpolation)
if ratio != 100: # Ratio has the priority if inputted
size = ( int(img.shape[1] * ratio / 100), int(img.shape[0] * ratio / 100) )
return cv2.resize(img, size, interpolation = interpolation)
if size is not None: # If ratio has not been modified, let's check size if inputted
return cv2.resize(img, size, interpolation = interpolation)
else:
return img
def addBorders(img, top=0, bottom=0, left=0, right=0, borderType='constant', color='black'):
"""
This function adds borders to the sides of the image the user wants.
This borders can be white or black, or may have other effects, like reflections.
Args:
img (:obj: array, mandatory):
Image which is going to be added borders.
top (:obj: int, optional):
Integer which indicates the number of pixels to add in top of the image.
Defaults to 0.
bottom (:obj: int, optional):
Integer which indicates the number of pixels to add in the bottom of the image.
Defaults to 0.
left (:obj: int, optional):
Integer which indicates the number of pixels to add in the left side of the image.
Defaults to 0.
right (:obj: int, optional):
Integer which indicates the number of pixels to add in the right side of the image.
Defaults to 0.
borderType(:obj: string, optional):
Type of border that the user wants to add to the image.
There are several options:
- Constant: Solid border, and the user can choose color.
- Reflect: The border will mirror the border elements.
- Default: Quite similar to 'reflect', but with slight changes.
- Replicate: Replicates the last element of the image.
Defaults to constant.
color(:obj: string, optional):
This param is only triggered if borderType is constant.
The user can choose between two colors to add:
- Black
- White
Defaults to black.
Returns:
:obj: array:
The resulting object is the image, in the same format as inputted, but with the transformation applied.
Raises:
ValueError: Raised if any of the values is negative or not expected.
ArgumentTypeError: Raised if any of the values is not the type that it is supposed to.
"""
def _checks(img, top, bottom, left, right, borderType, color):
intdetector(top)
intdetector(bottom)
intdetector(left)
intdetector(right)
if top < 0 or bottom < 0 or left < 0 or right < 0:
raise ValueError("Values must be over zero")
stringdetector(borderType)
borderType = borderType.lower()
if borderType not in ['constant', 'reflect', 'default', 'replicate']:
raise ValueError("Border types are 'constant', 'reflect', 'default' and 'replicate'")
if borderType == 'constant':
stringdetector(color)
color = color.lower()
if color not in ['black', 'white']:
raise ValueError("Supported colors are 'black' and 'white'")
return borderType, color
def _borderChoice(borderType):
if borderType == 'constant':
return cv2.BORDER_CONSTANT
elif borderType == 'reflect':
return cv2.BORDER_REFLECT
elif borderType == 'default':
return cv2.BORDER_DEFAULT
else:
return cv2.BORDER_REPLICATE
def _colorChoice(color):
if color == 'black':
return [0, 0, 0]
else:
return [255, 255, 255]
borderType, color = _checks(img, top, bottom, left, right, borderType, color)
border = _borderChoice(borderType)
if top == 0 and bottom == 0 and left == 0 and right == 0:
return img
if borderType == 'constant':
color = _colorChoice(color)
return cv2.copyMakeBorder(img, top, bottom, left, right, border, value=color)
else:
return cv2.copyMakeBorder(img, top, bottom, left, right, border)
|
989,951 | 1f199b3c41c0a29e59a7207bb35ef9c1fff13830 | # Created by [Yuexiong Ding] at 2018/2/12
# 归一化数据
#
import pandas as pd
import numpy as np
# 原始数据集索引文件地址
INDEX_FILE_PATH = r'D:\MyProjects\TianChi\Astronomy\DataSet\RawData\train_index.csv'
# 原始数据集文件存放地址
DATA_FILE_PATH = r'D:\MyProjects\TianChi\Astronomy\DataSet\RawData\TrainingData'
# 采样数据文件存放根目录
SAMPLE_ROOT_PATH = r'D:\MyProjects\TianChi\Astronomy\DataSet\NormalizedTrainingData'
# 采样后数据集索引文件的地址
SAMPLE_INDEX_FILE_PATH = SAMPLE_ROOT_PATH + r'\Index'
# 采样后数据集存放地址
SAMPLE_DATA_FILE_PATH = SAMPLE_ROOT_PATH + r'\Samples\TrainingData'
# 均值
MEAN = 0
# 方差
VARIANCE = 0
# 读取索引文件
IndexData = pd.read_csv(INDEX_FILE_PATH)
IndexDataId = IndexData['id']
# 计算均值向量
for i in range(IndexDataId.size):
print('求均值:第' + str(i + 1) + '样本')
# i 从 0 开始
MEAN = (i * MEAN + np.loadtxt(DATA_FILE_PATH + '\\' + str(IndexDataId[i]) + '.txt', delimiter=',')) / (i + 1)
# 打印出均值向量
print('均值向量为:' + str(MEAN))
# 将均值写入文件保存起来
MeanFrame = pd.DataFrame({'Mean': MEAN})
MeanFrame.to_csv(SAMPLE_ROOT_PATH + '\\' + 'Mean.csv', index=False, sep=',')
# 计算方差
for i in range(IndexDataId.size):
print('求方差:第' + str(i + 1) + '样本')
VARIANCE = (i * VARIANCE + (
np.loadtxt(DATA_FILE_PATH + '\\' + str(IndexDataId[i]) + '.txt', delimiter=',') - MEAN) ** 2) / (i + 1)
# 打印出方差向量
print('方差向量为:' + str(VARIANCE))
# 将方差写入文件保存起来
VarianceFrame = pd.DataFrame({'Variance': VARIANCE})
VarianceFrame.to_csv(SAMPLE_ROOT_PATH + '\\' + 'Variance.csv', index=False, sep=',')
# 归一化采样的样本
for i in range(IndexDataId.size):
print('归一化:第' + str(i + 1) + '样本')
NormalData = (np.loadtxt(DATA_FILE_PATH + '\\' + str(IndexDataId[i]) + '.txt', delimiter=',') - MEAN) / VARIANCE
np.savetxt(SAMPLE_DATA_FILE_PATH + '\\' + str(IndexDataId[i]) + '.txt', NormalData, delimiter=',')
# 打印出归一化向量
print('归一化向量为:' + str(NormalData))
|
989,952 | ee21f80715923909b6219b8681c73938587eb032 | """
python的参数转换器
"""
from flask import Flask
from werkzeug.routing import BaseConverter
app = Flask(__name__)
# 1. 定义自定义的转换器
class RegexConverter(BaseConverter):
"""自定义转换器,用于处理参数和接收参数"""
def __init__(self, url_map, regex): # 第二个参数,是flask初始化此转换器传递的,第三个是自定义的路由规则传递进来的
# 调用父类的初始化方法
super(RegexConverter, self).__init__(url_map)
# 将正则表达式的参数保存到对象的属性中,这个regex属性是父类中定义的固定正则表达式属性,flask会根据这个属性定义的规则去匹配路由中的参数
self.regex = regex
# 2. 将自定义的转换器添加到flask的应用中
app.url_map.converters["re"] = RegexConverter
# 3. 使用自定义的转换器匹配路由参数
@app.route("/send/<re(r'1[34578]\d{9}'):mobile>")
def send_sms(mobile):
"""定义的视图函数"""
return "send sms number: %s" % mobile
if __name__ == '__main__':
app.run(debug=True)
|
989,953 | cadb3f74103c4285ff20edfe4c210e47fc51d4d7 | # window parameters
icon_name = 'car.ico'
size = (1050, 600)
title = 'Car Shop App'
path_img = 'images/'
# toolbar icons
file_img = 'file.png'
save_img = 'save.png'
home_img = 'home.png'
search_img = 'search.png'
print_img = 'print.png'
ID_BUTTON = 100
# database connection
host = 'localhost'
user ='nadya'
password = 'Myfriend_16'
db = 'cars_db'
# data for start page
cars_index = ['№ ', 'ID', "Коробка передач ", "Пробег ",
"№РТС ", "Цена ", "Год выпуска ",
"Объем двигателя ", "Цвет ", "Тип кузова ","Модель "]
cars = {}
orders_index = ['№: ','ID', "Год продажи: ", "Марка: ",
"Фамилия клиента: ", "Фамилия продавца: ", "Форма оплаты: "]
clients_index = ['№','ID', 'Фамилия', 'Имя', 'Серия, номер паспорта', 'Телефон']
# data for aboutbox
program_name = 'CarSearching'
version = '1.0'
copyright = '(C) 2020 Nadezhda Shatalina'
web_site = 'https://github.com/etoruru'
developer = 'Nadezhda Shatalina'
doc_writer = 'Nadezhda Shatalina'
description = """CarSearching это автоматизированная информационная система,
предназначенная для упрощения работы с базой данных. Возможности включают добавление, изменение, удаление элементов,
расширенные возможности поиска и многое другое."""
|
989,954 | a4fbee525c647514a93c7cf53dfadf02693ee192 | from lxml import etree
import sys
import re
if(len(sys.argv) > 2):
input_filename = sys.argv[1]
output_name = sys.argv[2]
else:
print "Usage: python remove_unwanted_spaces.py <input harem> <output file>"
sys.exit()
tree = etree.parse(input_filename)
for ps in tree.iterfind('//P'):
inside_ps = etree.tostring(ps).replace('\n','').replace('\t',' ').replace(' ',' ')
inside_elem = etree.fromstring('<temp>'+inside_ps+'</temp>')
ps.getparent().replace(ps,inside_elem)
etree.strip_tags(tree,'temp')
temp_str = etree.tostring(tree, encoding="ISO-8859-1")
temp_str = re.sub(r"(?<=\w)<EM", " <EM", temp_str)
temp_str = re.sub(r"</EM>(?=\w)", "</EM> ", temp_str)
# output to file
f = open("../outputs/"+output_name,'w')
f.write(temp_str)
f.close() |
989,955 | e9ff762ce44e23422a102b4d4bbff9a6940b80e1 | from pathlib import Path
from mock import ANY, Mock, patch
from ruamel.yaml import YAML
from dwalk.cli import CLI
def test_args() -> None:
assert not CLI().args.include_meta
assert not CLI().args.version
def test_args__version() -> None:
assert CLI(["--version"]).args.version
@patch("dwalk.cli.CLI.print_version", return_value=0)
def test_invoke__version(print_version: Mock) -> None:
assert CLI(["--version"]).invoke() == 0
print_version.assert_called_with()
def test_print_help() -> None:
assert CLI().print_help() == 0
def test_print_version() -> None:
assert CLI().print_version() == 0
@patch("dwalk.cli.YAML.dump")
def test_execute(dump: Mock) -> None:
testing = Path(__file__).parent.parent.joinpath("testing").absolute()
bottom = testing.joinpath("bottom")
cli = CLI(
[
"--directory",
str(bottom),
"--filenames",
"dwalk.3.yml",
"dwalk.2.yml",
"dwalk.1.yml",
],
)
assert cli.invoke() == 0
with open(testing.joinpath("expect.yml"), "r") as stream:
expect = YAML(typ="safe").load(stream)
dump.assert_called_with(expect, ANY)
|
989,956 | 2d18fca1b2379676fd96c9f980d7287e90b5b3ec | #!/usr/bin/python
import numpy as np
import itertools as it
from typing import Iterable
from collections import Counter
from sage.arith.functions import LCM_list
import warnings
import re
import matplotlib.pyplot as plt
import inspect
# 9.11 (9.8)
# 9.15 (9.9)
PLOTS_DIR = "plots"
class CableSummand():
def __init__(self, knot_as_k_values):
self.knot_as_k_values = knot_as_k_values
self._knot_description = self.get_summand_descrption(knot_as_k_values)
self._signature_as_function_of_theta = None
@staticmethod
def get_summand_descrption(knot_as_k_values):
description = ""
if knot_as_k_values[0] < 0:
description += "-"
description += "T("
for k in knot_as_k_values:
description += "2, " + str(2 * abs(k) + 1) + "; "
return description[:-2] + ")"
@property
def knot_description(self):
return self._knot_description
@property
def signature_as_function_of_theta(self):
if self._signature_as_function_of_theta is None:
self._signature_as_function_of_theta = \
self.get_summand_signature_as_theta_function()
return self._signature_as_function_of_theta
@classmethod
def get_blanchfield_for_pattern(cls, k_n, theta=0):
msg = "Theorem on which this function is based, assumes " +\
"theta < k, where q = 2*k + 1 for pattern knot T(p, q)."
if theta == 0:
sf = cls.get_untwisted_signature_function(k_n)
return sf.square_root() + sf.minus_square_root()
k = abs(k_n)
assert theta <= k, msg
results = []
ksi = 1/(2 * k + 1)
counter = Counter()
# print("lambda_odd, i.e. (theta + e) % 2 != 0")
for e in range(1, k + 1):
if (theta + e) % 2 != 0:
counter[e * ksi] = 1 * sgn(k_n)
counter[1 - e * ksi] = -1 * sgn(k_n)
results.append((e * ksi, 1 * sgn(k_n)))
results.append((1 - e * ksi, -1 * sgn(k_n)))
# for example for k = 9 (q = 19) from this part we get
# for even theta
# 2/19: 1
# 4/19: 1
# 6/19: 1
# 8/19: 1
# 11/19: -1
# 13/19: -1
# 15/19: -1
# 17/19: -1
#
# for odd theta
# 1/19: 1
# 3/19: 1
# 5/19: 1
# 7/19: 1
# 9/19: 1
# 10/19: -1
# 12/19: -1
# 14/19: -1
# 16/19: -1
# 18/19: -1
# print("lambda_even")
# print("normal")
for e in range(1, theta):
if (theta + e) % 2 == 0:
results.append((e * ksi, 1 * sgn(k_n)))
results.append((1 - e * ksi, -1 * sgn(k_n)))
# print("reversed")
for e in range(theta + 1, k + 1):
if (theta + e) % 2 == 0:
results.append((e * ksi, -1 * sgn(k_n)))
results.append((1 - e * ksi, 1 * sgn(k_n)))
return SignatureFunction(values=results)
@classmethod
def get_satellite_part(cls, *knot_as_k_values, theta=0):
patt_k = knot_as_k_values[-1]
ksi = 1/(2 * abs(patt_k) + 1)
satellite_part = SignatureFunction()
# For each knot summand consider k values in reversed order,
# ommit k value for pattern.
for layer_num, k in enumerate(knot_as_k_values[:-1][::-1]):
sf = cls.get_untwisted_signature_function(k)
shift = theta * ksi * 2^layer_num
right_shift = sf >> shift
left__shift = sf << shift
for _ in range(layer_num):
right_shift = right_shift.double_cover()
left__shift = left__shift.double_cover()
satellite_part += right_shift + left__shift
return satellite_part
@staticmethod
def get_untwisted_signature_function(j):
# return the signature function of the T_{2, 2k+1} torus knot
k = abs(j)
q = 2 * k + 1
counter = Counter({(2 * a + 1)/(2 * q) : -sgn(j)
for a in range(k)})
counter.update(Counter({(2 * a + 1)/(2 * q) : sgn(j)
for a in range(k + 1, q)}))
return SignatureFunction(counter=counter)
def get_summand_signature_as_theta_function(self):
knot_as_k_values = self.knot_as_k_values
def get_summand_signture_function(theta):
patt_k = knot_as_k_values[-1]
# theta should not be larger than k for the pattern.
theta %= (2 * abs(patt_k) + 1)
theta = min(theta, 2 * abs(patt_k) + 1 - theta)
pattern_part = self.get_blanchfield_for_pattern(patt_k, theta)
satellite_part = self.get_satellite_part(*knot_as_k_values,
theta=theta)
return pattern_part, satellite_part
get_summand_signture_function.__doc__ = \
get_summand_signture_function_docsting
return get_summand_signture_function
def get_file_name_for_summand_plot(self, theta=0):
if self.knot_as_k_values[0] < 0:
name = "inv_T_"
else:
name = "T_"
for k in self.knot_as_k_values:
name += str(abs(k)) + "_"
name += "_theta_" + str(theta)
return name
def plot_summand_for_theta(self, theta, save_path=None):
pp, sp = self.signature_as_function_of_theta(theta)
title = self.knot_description + ", theta = " + str(theta)
if save_path is not None:
file_name = self.get_file_name_for_summand_plot(theta)
save_path = os.path.join(save_path, file_name)
pp.plot_sum_with_other(sp, title=title, save_path=save_path)
def plot_summand(self):
range_limit = min(self.knot_as_k_values[-1] + 1, 3)
for theta in range(range_limit):
self.plot_summand_for_theta(theta)
class CableSum():
def __init__(self, knot_sum):
self.knot_sum_as_k_valus = knot_sum
self.knot_summands = [CableSummand(k) for k in knot_sum]
self.signature_as_function_of_theta = \
self.get_signature_as_function_of_theta()
def __call__(self, *thetas):
return self.signature_as_function_of_theta(*thetas)
def get_dir_name_for_plots(self, dir=None):
dir_name = ''
for knot in self.knot_summands:
if knot.knot_as_k_values[0] < 0:
dir_name += "inv_"
dir_name += "T_"
for k in knot.knot_as_k_values:
k = 2 * abs (k) + 1
dir_name += str(k) + "_"
dir_name = dir_name[:-1]
print(dir_name)
dir_path = os.getcwd()
if dir is not None:
dir_path = os.path.join(dir_path, dir)
dir_path = os.path.join(dir_path, dir_name)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
return dir_name
def plot_sum_for_theta_vector(self, thetas, save_to_dir=False):
if save_to_dir:
if not os.path.isdir(PLOTS_DIR):
os.mkdir(PLOTS_DIR)
dir_name = self.get_dir_name_for_plots(dir=PLOTS_DIR)
save_path = os.path.join(os.getcwd(), PLOTS_DIR)
save_path = os.path.join(save_path, dir_name)
else:
save_path = None
for i, knot in enumerate(self.knot_summands):
knot.plot_summand_for_theta(thetas[i], save_path=save_path)
pp, sp = self.signature_as_function_of_theta(*thetas)
title = self.knot_description + ", thetas = " + str(thetas)
if save_path is not None:
file_name = re.sub(r', ', '_', str(thetas))
file_name = re.sub(r'[\[\]]', '', str(file_name))
file_path = os.path.join(save_path, file_name)
pp.plot_sum_with_other(sp, title=title, save_path=file_path)
if save_path is not None:
file_path = os.path.join(save_path, "all_" + file_name)
sf_list = [knot.signature_as_function_of_theta(thetas[i])
for i, knot in enumerate(self.knot_summands)]
# pp, sp = knot.signature_as_function_of_theta(thetas[i])
# (pp + sp) = sp.plot
#
# pp.plot_sum_with_other(sp, title=title, save_path=file_path)
return dir_name
def plot_all_summands(self):
for knot in self.knot_summands:
knot.plot_summand()
@property
def knot_description(self):
return self._knot_description
@property
def patt_k_list(self):
return self._patt_k_list
@property
def patt_q_list(self):
return self._patt_q_list
# q_order is LCM of all q values for pattern knots
@property
def q_order(self):
return self._q_order
@q_order.setter
def q_order(self, val):
self._q_order = val
@property
def knot_sum_as_k_valus(self):
return self._knot_sum_as_k_valus
@knot_sum_as_k_valus.setter
def knot_sum_as_k_valus(self, knot_sum):
self._knot_sum_as_k_valus = knot_sum
self._knot_description = self.get_knot_descrption(knot_sum)
self._patt_k_list = [abs(i[-1]) for i in knot_sum]
self._patt_q_list = [2 * i + 1 for i in self._patt_k_list]
if any(n not in Primes() for n in self._patt_q_list):
msg = "Incorrect q-vector. This implementation assumes that" + \
" all last q values are prime numbers.\n" + \
str(self._patt_q_list)
raise ValueError(msg)
self.q_order = LCM_list(self._patt_q_list)
def parse_thetas(self, *thetas):
summands_num = len(self.knot_sum_as_k_valus)
if not thetas:
thetas = summands_num * (0,)
elif len(thetas) == 1 and summands_num > 1:
if isinstance(thetas[0], Iterable):
if len(thetas[0]) >= summands_num:
thetas = thetas[0]
elif not thetas[0]:
thetas = summands_num * (0,)
elif thetas[0] == 0:
thetas = summands_num * (0,)
else:
msg = "This function takes at least " + str(summands_num) + \
" arguments or no argument at all (" + str(len(thetas)) \
+ " given)."
raise TypeError(msg)
return tuple(thetas)
@staticmethod
def get_knot_descrption(knot_sum):
description = ""
for knot in knot_sum:
if knot[0] < 0:
description += "-"
description += "T("
for k in knot:
description += "2, " + str(2 * abs(k) + 1) + "; "
description = description[:-2] + ") # "
return description[:-3]
def get_signature_as_function_of_theta(self, **key_args):
if 'verbose' in key_args:
verbose_default = key_args['verbose']
else:
verbose_default = False
knot_desc = self.knot_description
def signature_as_function_of_theta(*thetas, **kwargs):
# print("\n\nsignature_as_function_of_theta " + knot_desc)
verbose = verbose_default
if 'verbose' in kwargs:
verbose = kwargs['verbose']
thetas = self.parse_thetas(*thetas)
satellite_part = SignatureFunction()
pattern_part = SignatureFunction()
# for each cable knot (summand) in cable sum apply theta
for i, knot in enumerate(self.knot_summands):
sfth = knot.signature_as_function_of_theta
pp, sp = sfth(thetas[i])
pattern_part += pp
satellite_part += sp
sf = pattern_part + satellite_part
if verbose:
print()
print(str(thetas))
print(sf)
assert sf.total_sign_jump() == 0
return pattern_part, satellite_part
signature_as_function_of_theta.__doc__ =\
signature_as_function_of_theta_docstring
return signature_as_function_of_theta
def is_metabolizer(self, theta):
# Check if square alternating difference
# divided by last q value is integer.
result = sum(el^2 / self.patt_q_list[idx] * (-1)^idx
for idx, el in enumerate(theta))
# for idx, el in enumerate(theta):
# old_sum += (el^2 / self.patt_q_list[idx] * (-1)^idx)
return result.is_integer()
def is_signature_big_in_ranges(self, ranges_list):
for thetas in it.product(*ranges_list):
# Check only non-zero metabolizers.
if not self.is_metabolizer(thetas) or not any(thetas):
continue
signature_is_small = True
# Check if any element generated by thetas vector
# has a large signature.
for shift in range(1, self.q_order):
shifted_thetas = [shift * th for th in thetas]
pp, sp = self.signature_as_function_of_theta(*shifted_thetas)
sf = pp + sp
limit = 5 + np.count_nonzero(shifted_thetas)
extremum = abs(sf.extremum(limit=limit))
if shift > 1:
print(shifted_thetas, end=" ")
print(extremum)
if extremum > limit:
signature_is_small = False
break
elif shift == 1:
print("*" * 10)
print(shifted_thetas, end=" ")
print(extremum)
if signature_is_small:
print("\n" * 10 + "!" * 1000)
return False
return True
def is_signature_big_for_all_metabolizers(self):
num_of_summands = len(self.knot_sum_as_k_valus)
if num_of_summands % 4:
f_name = self.is_signature_big_for_all_metabolizers.__name__
msg = "Function {}".format(f_name) + " is implemented only for " +\
"knots that are direct sums of 4n direct summands."
raise ValueError(msg)
for shift in range(0, num_of_summands, 4):
ranges_list = num_of_summands * [range(0, 1)]
ranges_list[shift : shift + 3] = \
[range(0, i + 1) for i in self.patt_k_list[shift: shift + 3]]
ranges_list[shift + 3] = range(0, 2)
if not self.is_signature_big_in_ranges(ranges_list):
return False
else:
print("\nOK")
return True
class CableTemplate():
def __init__(self, knot_formula, q_vector=None, k_vector=None,
generate_q_vector=True, slice_candidate=True):
self._knot_formula = knot_formula
# q_i = 2 * k_i + 1
if k_vector is not None:
self.k_vector = k_vector
elif q_vector is not None:
self.q_vector = q_vector
elif generate_q_vector:
self.q_vector = self.get_q_vector(knot_formula, slice_candidate)
@property
def cable(self):
if self._cable is None:
msg = "q_vector for cable instance has not been set explicit. " + \
"The variable is assigned a default value."
warnings.warn(msg)
self.fill_q_vector()
return self._cable
def fill_q_vector(self, q_vector=None, slice=True):
if q_vector is None:
q_vector = self.get_q_vector(self.knot_formula)
self.q_vector = q_vector
@property
def knot_formula(self):
return self._knot_formula
@property
def k_vector(self):
return self._k_vector
@k_vector.setter
def k_vector(self, k):
self._k_vector = k
if self.extract_max(self.knot_formula) > len(k) - 1:
msg = "The vector for knot_formula evaluation is to short!"
msg += "\nk_vector " + str(k) + " \nknot_formula " \
+ str(self.knot_formula)
raise IndexError(msg)
self.knot_sum_as_k_valus = eval(self.knot_formula)
self._cable = CableSum(self.knot_sum_as_k_valus)
self._q_vector = [2 * k_val + 1 for k_val in k]
@property
def q_vector(self):
return self._q_vector
@q_vector.setter
def q_vector(self, new_q_vector):
self.k_vector = [(q - 1)/2 for q in new_q_vector]
@staticmethod
def extract_max(string):
numbers = re.findall(r'\d+', string)
numbers = map(int, numbers)
return max(numbers)
@classmethod
def get_q_vector(cls, knot_formula, slice=True):
lowest_number = 2
q_vector = [0] * (cls.extract_max(knot_formula) + 1)
P = Primes()
for layer in cls.get_layers_from_formula(knot_formula)[::-1]:
for el in layer:
q_vector[el] = P.next(lowest_number)
lowest_number = q_vector[el]
lowest_number *= 4
return q_vector
@staticmethod
def get_layers_from_formula(knot_formula):
k_indices = re.sub(r'[k-]', '', knot_formula)
k_indices = re.sub(r'\[\d+\]', lambda x: x.group()[1:-1], k_indices)
k_indices = eval(k_indices)
number_of_layers = max(len(lst) for lst in k_indices)
layers = []
for i in range(1, number_of_layers + 1):
layer = set()
for lst in k_indices:
if len(lst) >= i:
layer.add(lst[-i])
layers.append(layer)
return layers
def add_with_shift(self, other):
shift = self.extract_max(self.knot_formula) + 1
o_formula = re.sub(r'\d+', lambda x: str(int(x.group()) + shift),
other.knot_formula)
return self + CableTemplate(o_formula)
def __add__(self, other):
s_formula = self.knot_formula
o_formula = other.knot_formula
knot_formula = s_formula[:-1] + ",\n" + o_formula[1:]
cable_template = CableTemplate(knot_formula)
return cable_template
def mod_one(n):
return n - floor(n)
# CableSum.get_knot_descrption.__doc__ = \
# """
# Arguments:
# arbitrary number of lists of numbers, each list encodes a single cable.
# Examples:
# sage: get_knot_descrption([1, 3], [2], [-1, -2], [-3])
# 'T(2, 3; 2, 7) # T(2, 5) # -T(2, 3; 2, 5) # -T(2, 7)'
# """
CableSum.get_signature_as_function_of_theta.__doc__ = \
"""
Function intended to construct signature function for a connected
sum of multiple cables with varying theta parameter values.
Accept arbitrary number of arguments (depending on number of cables in
connected sum).
Each argument should be given as list of integer representing
k - parameters for a cable: parameters k_i (i=1,.., n-1) for satelit knots
T(2, 2k_i + 1) and - the last one - k_n for a pattern knot T(2, 2k_n + 1).
Returns a function that will take theta vector as an argument and return
an object SignatureFunction.
To calculate signature function for a cable sum and a theta values vector,
use as below.
sage: signature_function_generator = get_signature_as_function_of_theta(
[1, 3], [2], [-1, -2], [-3])
sage: sf = signature_function_generator(2, 1, 2, 2)
sage: print(sf)
0: 0
5/42: 1
1/7: 0
1/5: -1
7/30: -1
2/5: 1
3/7: 0
13/30: -1
19/42: -1
23/42: 1
17/30: 1
4/7: 0
3/5: -1
23/30: 1
4/5: 1
6/7: 0
37/42: -1
Or like below.
sage: print(get_signature_as_function_of_theta([1, 3], [2], [-1, -2], [-3]
)(2, 1, 2, 2))
0: 0
1/7: 0
1/6: 0
1/5: -1
2/5: 1
3/7: 0
1/2: 0
4/7: 0
3/5: -1
4/5: 1
5/6: 0
6/7: 0
"""
get_summand_signture_function_docsting = \
"""
This function returns SignatureFunction for previously defined single
cable T_(2, q) and a theta given as an argument.
The cable was defined by calling function
get_summand_signature_as_theta_function(*arg)
with the cable description as an argument.
It is an implementaion of the formula:
Bl_theta(K'_(2, d)) =
Bl_theta(T_2, d) + Bl(K')(ksi_l^(-theta) * t)
+ Bl(K')(ksi_l^theta * t)
"""
signature_as_function_of_theta_docstring = \
"""
Arguments:
Returns object of SignatureFunction class for a previously defined
connected sum of len(arg) cables.
Accept len(arg) arguments: for each cable one theta parameter.
If call with no arguments, all theta parameters are set to be 0.
"""
#
# CableSummand.get_blanchfield_for_pattern.__doc__ = \
# """
# Arguments:
# k_n: a number s.t. q_n = 2 * k_n + 1, where
# T(2, q_n) is a pattern knot for a single cable from a cable sum
# theta: twist/character for the cable (value form v vector)
# Return:
# SignatureFunction created for pattern signature function
# for a given cable and theta/character
# Based on:
# Proposition 9.8. in Twisted Blanchfield Pairing
# (https://arxiv.org/pdf/1809.08791.pdf)
# """
# CableSummand.get_summand_signature_as_theta_function.__doc__ = \
# """
# Argument:
# n integers that encode a single cable, i.e.
# values of q_i for T(2,q_0; 2,q_1; ... 2, q_n)
# Return:
# a function that returns SignatureFunction for this single cable
# and a theta given as an argument
# """
|
989,957 | fb5b69d6bd1699377d65ce0b0b8c241373abd612 | import os
from colander import SchemaNode, MappingSchema
from deform import FileData, Form, ValidationFailure
from deform.widget import FileUploadWidget
from pyramid.httpexceptions import HTTPFound
from pyramid.i18n import TranslationStringFactory
from pyramid.security import remember, forget
from pyramid.view import view_config
# @view_config(renderer='templates/form.pt', name='file')
# @demonstrate('File Upload Widget')
# def file(self):
# class Schema(colander.Schema):
# upload = colander.SchemaNode(
# deform.FileData(),
# widget=deform.widget.FileUploadWidget(tmpstore)
# )
#
# schema = Schema()
# form = deform.Form(schema, buttons=('submit',))
#
# return self.render_form(form, success=tmpstore.clear)
from money_map.security import USERS, check_password
_ = TranslationStringFactory('money_map')
# pages = {
# '100': dict(uid='100', title='Page 100', body='<em>100</em>'),
# '101': dict(uid='101', title='Page 101', body='<em>101</em>'),
# '102': dict(uid='102', title='Page 102', body='<em>102</em>')
# }
# class WikiPage(colander.MappingSchema):
# title = colander.SchemaNode(colander.String())
# body = colander.SchemaNode(
# colander.String(),
# widget=deform.widget.RichTextWidget()
# )
class AuthenticationViews:
def __init__(self, request):
self.request = request
self.logged_in = request.authenticated_userid
# Cannot remove templates/
# https://docs.pylonsproject.org/projects/pyramid-cookbook/en/latest/pylons/templates.html#chameleon
@view_config(route_name='home', renderer='templates/home.pt')
def home(self):
ts = _('log-in')
return {'name': 'Home View', 'log_in': ts}
@view_config(route_name='hello', permission='edit', renderer='templates/hello.pt')
def hello(self):
return {'name': 'Hello View'}
@view_config(route_name='login', renderer='templates/login.pt')
def login(self):
request = self.request
login_url = request.route_url('login')
referrer = request.url
if referrer == login_url:
referrer = '/' # never use login form itself as came_from
came_from = request.params.get('came_from', referrer)
message = ''
login = ''
password = ''
if 'form.submitted' in request.params:
login = request.params['login']
password = request.params['password']
hashed_pw = USERS.get(login)
if hashed_pw and check_password(password, hashed_pw):
headers = remember(request, login)
return HTTPFound(location=came_from,
headers=headers)
message = 'Failed login'
return dict(
name='Login',
message=message,
url=request.application_url + '/login',
came_from=came_from,
login=login,
password=password,
)
@view_config(route_name='logout')
def logout(self):
request = self.request
headers = forget(request)
url = request.route_url('home')
return HTTPFound(location=url,
headers=headers)
class BankAccountStatement(MappingSchema):
class Store(dict):
def preview_url(self, name):
return ''
file = SchemaNode(FileData(), widget=FileUploadWidget(Store(), accept='text/xml'))
class BankAccountStatementViews(object):
def __init__(self, request):
self.request = request
@property
def bank_account_statement_form(self):
schema = BankAccountStatement()
return Form(schema, buttons=('submit',))
@property
def reqts(self):
return self.bank_account_statement_form.get_widget_resources()
@view_config(route_name='bank_account_statement_upload', renderer='templates/bank_account_statement_upload.pt')
def bank_account_statement_upload(self):
form = self.bank_account_statement_form.render()
if 'submit' in self.request.params:
controls = self.request.POST.items()
try:
appstruct = self.bank_account_statement_form.validate(controls)
except ValidationFailure as e:
return dict(form=e.render())
# last_uid = int(sorted(pages.keys())[-1])
# new_uid = str(last_uid + 1)
# pages[new_uid] = dict(
# uid=new_uid, title=appstruct['title'],
# body=appstruct['body']
# )
f = appstruct['file']
bank_account_statement_filename = f['filename']
bank_account_statement_extension = os.path.splitext(bank_account_statement_filename)[1]
bank_account_statement_file = f['fp']
print(bank_account_statement_file)
# Now visit new page
# url = self.request.route_url('wikipage_view', uid=new_uid)
url = self.request.route_url('home')
return HTTPFound(url)
return dict(form=form)
# class WikiViews(object):
# def __init__(self, request):
# self.request = request
#
# @property
# def wiki_form(self):
# schema = WikiPage()
# return deform.Form(schema, buttons=('submit',))
#
# @property
# def reqts(self):
# return self.wiki_form.get_widget_resources()
#
# @view_config(route_name='wiki_view', renderer='templates/wiki_view.pt')
# def wiki_view(self):
# return dict(pages=pages.values())
#
# @view_config(route_name='wikipage_add',
# renderer='templates/wikipage_addedit.pt')
# def wikipage_add(self):
# form = self.wiki_form.render()
#
# if 'submit' in self.request.params:
# controls = self.request.POST.items()
# try:
# appstruct = self.wiki_form.validate(controls)
# except deform.ValidationFailure as e:
# # Form is NOT valid
# return dict(form=e.render())
#
# # Form is valid, make a new identifier and add to list
# last_uid = int(sorted(pages.keys())[-1])
# new_uid = str(last_uid + 1)
# pages[new_uid] = dict(
# uid=new_uid, title=appstruct['title'],
# body=appstruct['body']
# )
#
# # Now visit new page
# url = self.request.route_url('wikipage_view', uid=new_uid)
# return HTTPFound(url)
#
# return dict(form=form)
#
# @view_config(route_name='wikipage_view', renderer='templates/wikipage_view.pt')
# def wikipage_view(self):
# uid = self.request.matchdict['uid']
# page = pages[uid]
# return dict(page=page)
#
# @view_config(route_name='wikipage_edit',
# renderer='templates/wikipage_addedit.pt')
# def wikipage_edit(self):
# uid = self.request.matchdict['uid']
# page = pages[uid]
#
# wiki_form = self.wiki_form
#
# if 'submit' in self.request.params:
# controls = self.request.POST.items()
# try:
# appstruct = wiki_form.validate(controls)
# except deform.ValidationFailure as e:
# return dict(page=page, form=e.render())
#
# # Change the content and redirect to the view
# page['title'] = appstruct['title']
# page['body'] = appstruct['body']
#
# url = self.request.route_url('wikipage_view',
# uid=page['uid'])
# return HTTPFound(url)
#
# form = wiki_form.render(page)
#
# return dict(page=page, form=form)
|
989,958 | b8d9a96f3a3c15e81486e3d370ef16a746d964b3 | # we use break to stop a loop
for item in ["balloons", "flowers", "sugar", "watermelons"]:
if item != "sugar":
print("We want sugar not " + item)
else:
print("Found the sugar")
break
# Output:
"""
We want sugar not balloons
We want sugar not flowers
Found the sugar
""" |
989,959 | bbb5b3625278ee3020c439cf6ca49761a724b9e5 | import sqlite3
from sqlite3 import Error
import os
import pandas as pd
import json
from flask import jsonify
db_path = os.path.abspath('../data/testdb.db')
sql = " INSERT INTO frequent_browsers \
SELECT personID, COUNT(personID) AS num_sites_visited FROM visits \
LEFT JOIN sites ON sites.id=visits.siteId \
LEFT JOIN people ON people.id=visits.personID \
GROUP BY personID \
ORDER BY num_sites_visited DESC \
LIMIT 10 \
"
def frequent_browsers(conn,sql):
cur = conn.cursor()
cur.execute(sql)
def create_connection(database):
try:
conn = sqlite3.connect(database)
return conn
print("Connection Successful")
except Error as e:
print("Connection Error "+str(e))
return None
def query_top_browsers():
database = db_path
print(database)
conn = create_connection(database)
print(conn)
with conn:
frequent_browsers(conn,sql)
if __name__ == '__main__':
query_top_browsers()
|
989,960 | 1e8ed3f02def00383d3aae174426823f35c5847f | #strftime method is used to convert datetime as string format
from datetime import datetime
timestamp=1528797322
date_time=datetime.fromtimestamp(timestamp)
print("date and time: ", date_time)
d=date_time.strftime("%m/%d/%Y, %H:%M:%S")
print("Output 2:", d)
d=date_time.strftime("%d %b, %Y")
print("Output 3:", d)
d=date_time.strftime("%d %B, %Y")
print("Output 4:", d)
d=date_time.strftime("%I%p")
print("Output 5:", d)
|
989,961 | 2c94d2c2ed45b6180c1831397686be0b0a071ea0 | #!/usr/bin/python
import os
import requests
import bitly_api
import blower
import tweet
import twilio_sms
from log import log_job
from rq import Queue
from worker import conn
from datetime import datetime
from pytz import timezone
tz = timezone('US/Pacific')
price_target = 35
earliest = 8
latest = 23
_blower = False
_twilio = True
_twitter = False
_email = False
_log = True
def handle_message(item):
print "new task:" + str(item["id"])
price = item["instant_price"]
if price:
price = int(price.strip().lstrip("$").replace("/hr",""))
if _log or price >= price_target:
url = shorten_url(item["url"])
if price >= price_target:
print "possible task, send notifications"
msg = "$" + str(price) + ": " + item["truncated_title"] + " " + url
if _blower:
blower.send_sms(msg)
if _twilio:
twilio_sms.send_sms(msg)
if _twitter:
tweet.send_dm(msg)
if _email:
pass
if _log:
print "@@@ add log_job to queue"
q = Queue(connection=conn)
values = [item["id"],datetime.now(),price,item["truncated_title"],url]
q.enqueue(log_job, values)
def shorten_url(url):
c = bitly_api.Connection(access_token=os.environ['BITLY_TOKEN'])
response = c.shorten(url)
return response["url"]
def fetch_tasks():
now = datetime.now(tz)
if now.hour > earliest and now.hour < latest:
url = os.environ['TASKRABBIT_URL']
headers = { 'Cookie' : os.environ['TASKRABBIT_COOKIE'],
'Accept-Encoding' : 'gzip,deflate,sdch',
'Accept-Language' : 'en-US,en;q=0.8',
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36',
'Accept' : '*/*',
'Referer' : 'https://www.taskrabbit.com/opentasks',
'X-Requested-With' : 'XMLHttpRequest',
'If-None-Match' : '"c2b24f81440e5f6951bfd324f08c84e6"',
'Connection' : 'keep-alive',
'X-TR-App' : 'truman' }
r = requests.get(url,headers=headers)
data = r.json()
ids = []
f = open('id.dat', 'r+')
for line in f:
ids.append(line.strip())
f.close
f = open('id.dat', 'w+')
items = data["response"]["items"]
for item in items:
if str(item["id"]) not in ids:
handle_message(item)
ids.append(item["id"])
else:
pass
## keep the last 500 or so
ids.sort(reverse=True)
max_len = 500
if len(ids) > max_len:
new_ids = []
for i in xrange(max_len):
new_ids.append(ids[i])
else:
new_ids = ids
# write ids
for line in new_ids:
f.write(str(line)+"\n")
f.close
|
989,962 | 4c1ee3275d87bc28538e0c810e0aa2fe85cb3457 | print("Bitoperatoren")
a = 6
b = 7
print("a = {:04b}".format(a))
print("b = {:04b}".format(b))
print()
print("a & b = {:04b}".format(a & b)) # AND
print("a | b = {:04b}".format(a | b)) # OR
print("a ^ b = {:04b}".format(a ^ b)) # XOR
print()
# NOT
print("~a = {:04b} (Ausgabe mit Vorzeichen!)".format(~a)) # gibt Probleme bei der Ausgabe
print()
# SHIFT
print("001101000 >> 2 = {:09b}".format(0b001101000 >> 2)) # um 2 Positionen nach rechts verschieben
print("001101000 << 2 = {:09b}".format(0b001101000 << 2)) # um 2 Positionen nach links verschieben
|
989,963 | 2b433b97e0f52db77b44bba37a1f10f3d57e72af | # Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measurement questions, e.g., "How many hours are there in a day?"."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.modules import train_test_split
from mathematics_dataset.sample import number
from mathematics_dataset.util import composition
from mathematics_dataset.util import display
import six
import sympy
def _make_modules(is_train):
"""Returns modules, with split based on the boolean `is_train`."""
return {
'conversion': functools.partial(
conversion, is_train=is_train, is_extrapolation=False),
'time': functools.partial(time, is_train=is_train),
}
def train(entropy_fn):
"""Returns dict of training modules."""
del entropy_fn # unused
return _make_modules(is_train=True)
def test():
"""Returns dict of testing modules."""
return _make_modules(is_train=False)
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {
'conversion': functools.partial(
conversion, is_train=False, is_extrapolation=True),
}
Unit = collections.namedtuple('Unit', ('name', 'symbol'))
MICRO_SYMBOL = ' μ'
LENGTH = {
Unit('metro', ' m'): 1,
Unit('chilometro', ' km'): 1000,
Unit('centimetro', ' cm'): sympy.Rational(1, 100),
Unit('millimetro', ' mm'): sympy.Rational(1, 1000),
Unit('micrometro', ' μm'): sympy.Rational(1, 1e6),
Unit('nanometro', ' nm'): sympy.Rational(1, 1e9),
}
TIME = {
Unit('secondo', ' s'): 1,
Unit('minuto', None): 60,
Unit('ora', None): 60*60,
Unit('giorno', None): 24*60*60,
Unit('settimana', None): 7*24*60*60,
Unit('millisecondo', ' ms'): sympy.Rational(1, 1e3),
Unit('microsecondo', MICRO_SYMBOL + 's'): sympy.Rational(1, 1e6),
Unit('nanosecondo', ' ns'): sympy.Rational(1, 1e9),
}
TIME_YEARLY = {
Unit('anno', None): 1,
Unit('decade', None): 10,
Unit('secolo', None): 100,
Unit('millenio', None): 1000,
Unit('mese', None): sympy.Rational(1, 12),
}
MASS = {
Unit('chilogrammo', ' kg'): 1, # Yes, the *kilo*gram is the SI base unit.
Unit('tonnellata', ' t'): 1000,
Unit('grammo', ' g'): sympy.Rational(1, 1e3),
Unit('milligrammo', ' mg'): sympy.Rational(1, 1e6),
Unit('microgrammo', MICRO_SYMBOL + 'g'): sympy.Rational(1, 1e9),
Unit('nanogrammo', ' ng'): sympy.Rational(1, 1e12),
}
VOLUME = {
Unit('litro', ' l'): 1,
Unit('millilitro', ' ml'): sympy.Rational(1, 1000),
}
DIMENSIONS = [LENGTH, TIME, TIME_YEARLY, MASS, VOLUME]
def pluralize(name):
if name == 'ora':
return 'ore'
if name == 'settimana':
return 'settimane'
if name == 'tonnellata':
return 'tonnelate'
return name[:-1] + 'i'
def _factor_non_decimal(value):
"""Extras x dividing value such that x is coprime to 2 and 5."""
result = 1
factors = sympy.factorint(value)
for factor, power in six.iteritems(factors):
if factor not in [2, 5]:
result *= factor ** power
return result
def _sample_conversion_decimal(dimension, is_extrapolation):
"""Samples to and from units and values."""
base_unit, target_unit = random.sample(list(dimension.keys()), 2)
scale = sympy.Rational(dimension[base_unit]) / dimension[target_unit]
scale_non_decimal = _factor_non_decimal(sympy.denom(scale))
entropy = 9 if is_extrapolation else 7
base_value = number.non_integer_decimal(entropy, signed=False)
base_value = display.Decimal(base_value.value * scale_non_decimal)
target_value = display.Decimal(base_value.value * scale)
return base_value, base_unit, target_value, target_unit
def _conversion_decimal(context, is_train, is_extrapolation):
"""E.g., "How many grams are in 5kg?"."""
dimension = random.choice(DIMENSIONS)
while True:
base_value, base_unit, target_value, target_unit = (
_sample_conversion_decimal(dimension, is_extrapolation))
if train_test_split.is_train(base_value) == is_train:
break
templates = [
'Qual è il numero di {target_name} in {base_value} {base_name}?',
'Si converta {base_value} {base_name} in {target_name}.',
]
if base_unit.symbol is not None:
templates += [
'Qual è il numero di {target_name} in {base_value}{base_symbol}?',
'Si converta {base_value}{base_symbol} in {target_name}.',
]
template = random.choice(templates)
base_name = pluralize(base_unit.name)
target_name = pluralize(target_unit.name)
question = example.question(
context,
template,
base_name=base_name,
base_symbol=base_unit.symbol,
base_value=base_value,
target_name=target_name)
return example.Problem(question=question, answer=target_value)
def _conversion_fraction(context, is_train):
"""E.g., "How many grams are in three quarters of a kg?"."""
dimension = random.choice(DIMENSIONS)
# Limit probability of giving zero answer.
allow_zero = random.random() < 0.2
# Repeat until we find a pair with an integral answer. (Avoids ambiguity with
# decimals.)
while True:
base_unit, target_unit = random.sample(list(dimension.keys()), 2)
base_value = number.non_integer_rational(2, signed=False)
if train_test_split.is_train(base_value) != is_train:
continue
answer = (base_value * sympy.Rational(dimension[base_unit])
/ sympy.Rational(dimension[target_unit]))
if (abs(answer) <= 100000
and sympy.denom(answer) == 1
and (allow_zero or answer != 0)):
break
template = random.choice([
'Qual è il numero di {target_name} in {base_value} di {base_name}?',
'Si converta {base_value} di {base_name} in {target_name}.',
])
#if sympy.denom(base_value) > 20 or random.choice([False, True]):
base_value_string = base_value # Will be represented as e.g., 2/3.
#else:
# base_value_string = display.StringNumber(base_value) # e.g., two thirds
question = example.question(
context, template,
base_name=base_unit.name,
base_value=base_value_string,
target_name=pluralize(target_unit.name))
return example.Problem(question=question, answer=answer)
def conversion(is_train, is_extrapolation):
"""Conversion question, in decimal or fraction."""
context = composition.Context()
# TODO(b/124038528): implement extrapolation for fraction conversions too
if is_extrapolation or random.choice([False, True]):
return _conversion_decimal(
context, is_train=is_train, is_extrapolation=is_extrapolation)
else:
return _conversion_fraction(context, is_train=is_train)
def time(is_train):
"""Questions for calculating start, end, or time differences."""
context = composition.Context()
start_minutes = random.randint(1, 24*60 - 1)
while True:
duration_minutes = random.randint(1, 12*60 - 1)
if train_test_split.is_train(duration_minutes) == is_train:
break
end_minutes = start_minutes + duration_minutes
def format_12hr(minutes):
"""Format minutes from midnight in 12 hr format."""
hours = (minutes // 60) % 24
minutes %= 60
#am_pm = 'AM' if hours < 12 else 'PM'
#hours = (hours - 1) % 12 + 1
return '{}:{:02}'.format(hours, minutes)
#return '{}:{:02} {}'.format(hours, minutes, am_pm)
start = format_12hr(start_minutes)
end = format_12hr(end_minutes)
which_question = random.randint(0, 3)
if which_question == 0:
# Question: What is start = end - duration?
template = random.choice([
'Che ore sono se mancano {duration} minuti alle {end}?',
])
return example.Problem(
question=example.question(
context, template, duration=duration_minutes, end=end),
answer=start)
elif which_question == 1:
# Question: What is end = start + duration?
template = random.choice([
'Che ore sono se sono passati {duration} minuti dalle {start}?',
])
return example.Problem(
question=example.question(
context, template, duration=duration_minutes, start=start),
answer=end)
else:
# Question: What is duration = end - start?
template = random.choice([
'Quanti minuti ci sono tra le {start} e le {end}?',
])
return example.Problem(
question=example.question(context, template, start=start, end=end),
answer=duration_minutes)
|
989,964 | ac8c6bb318ef8c37f3a5ccf6f3e150ae862237f3 | # Critter Caretaker
# Virtual pet to care for
class Critter(object):
"""A virtual pet"""
def __init__(self,name,hunger=0,boredom=0):
self.name = name
self.hunger = hunger
self.boredom = boredom
def __pass_time(self):
self.hunger+=1
self.boredom+=1
def __get_mood(self):
unhappiness = self.hunger + self.boredom
if unhappiness<5:
mood="happy"
elif 5<=unhappiness <=10:
mood="okay"
elif 11<=unhappiness<=15:
mood="frustrated"
else:
mood="mad"
return mood
mood=property(__get_mood)
def talk(self):
print("I'm ", self.name," and I feel ", self.mood," now\n")
self.__pass_time()
def eat(self,food=4):
print("Brrupp. Thank you.")
self.hunger-=food
if self.hunger<0:
self.hunger = 0
self.__pass_time()
def play(self,fun=4):
print("Wheee!")
self.boredom-=fun
if self.boredom<0:
self.boredom=0
self.__pass_time()
def main():
crit_name=input("What do you want to name your critter?: ")
crit = Critter(crit_name)
choice=None
while choice!="0":
print("""
Critter Caretaker
0- Quit
1- Listen to your critter
2- Feed your critter
3- Play with your critter
""")
choice= input("Choice: ")
if choice == "0":
print("Good bye.")
elif choice == "1":
crit.talk()
elif choice == "2":
crit.eat()
elif choice == "3":
crit.play()
else:
print("\nInvalid choice.")
if __name__=="__main__":
main()
input("\n\nPress the Enter key to exit") |
989,965 | 3b10841758f98f911670fe11abf80d86ab204f12 | from sklearn import datasets
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
random_state = 100
blobs = datasets.make_blobs(
n_samples = 500,
n_features = 10,
centers = 4,
cluster_std = 1,
center_box = (-15.0, 15.0),
shuffle = True,
random_state = random_state
)
features, target = blobs
# iris = datasets.load_iris()
# features = iris.data
# target = iris.target
# within cluster summ of squares
wcss = []
start, stop, step = 2, 6, 1
clusters_range = range(start, stop + step, step)
for n_clusters in clusters_range:
kms = KMeans(
n_clusters=n_clusters,
init='k-means++',
max_iter=300,
n_init=10,
random_state=random_state
)
kms.fit(features)
wcss.append(kms.inertia_)
plt.plot(clusters_range, wcss)
plt.title('The elbow method')
plt.xlabel('number of clusters')
plt.ylabel('Within cluster summ of squares')
plt.yticks(wcss)
plt.xticks(clusters_range)
plt.show()
feature_dim = (1, 2)
for n_clusters in clusters_range:
fig, (fst_plt, snd_plt) = plt.subplots(1, 2)
# fst_plt is silhouette plot
# silhouette coefficient ranges from [-1, 1]
fst_plt.set_xlim([-1, 1])
# (n_clusters + 1) * 10] for spaces
fst_plt.set_ylim([0, len(features) + (n_clusters + 1) * 10])
kms = KMeans(n_clusters = n_clusters, random_state = 10)
cluster_labels = kms.fit_predict(features)
silhouette_avg = silhouette_score(features, cluster_labels)
print(f'For n_clusters = {n_clusters} average silhouette is {silhouette_avg}')
sample_silhouette_values = silhouette_samples(features, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
fst_plt.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor = color,
edgecolor = color,
alpha = 0.7
)
fst_plt.text(
-0.05,
y_lower + 0.5 * size_cluster_i, str(i)
)
y_lower = y_upper + 10
fst_plt.set_title('The silhouette for various clusters')
fst_plt.set_xlabel('The silhouette coefficient values')
fst_plt.set_ylabel('Cluster label')
fst_plt.set_yticks([])
fst_plt.set_xticks(np.arange(-0.1, 1, 0.1))
fst_plt.axvline(x = silhouette_avg, color = 'red', linestyle = '--')
colors = cm.nipy_spectral(
cluster_labels.astype(float) / n_clusters
)
snd_plt.scatter(
features[:, feature_dim[0]],
features[:, feature_dim[1]],
marker = '.',
s = 30, lw = 0, alpha = 0.7, c = colors, edgecolor = 'k'
)
centers = kms.cluster_centers_
snd_plt.scatter(
centers[:, feature_dim[0]],
centers[:, feature_dim[1]],
marker = 'o', c = 'white', alpha = 1, s = 200, edgecolor = 'k'
)
for i, center in enumerate(centers):
snd_plt.scatter(
center[feature_dim[0]], center[feature_dim[1]],
marker = f'${i}$', alpha = 1, s = 50, edgecolor = 'k'
)
snd_plt.set_title('The visualisation of the clustered data')
snd_plt.set_xlabel(f'Feature space for the {feature_dim[0]} feature')
snd_plt.set_ylabel(f'Feature space for the {feature_dim[1]} feature')
plt.suptitle(
("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold'
)
plt.show() |
989,966 | 07ab8719677ee2597ba4a27977bdbcc72aafbef8 | """Extension of app JSON capabilities."""
import flask
import pytest
from flask_mongoengine import MongoEngine
from flask_mongoengine.json import use_json_provider
@pytest.fixture()
def extended_db(app):
"""Provider config fixture."""
if use_json_provider():
app.json_provider_class = DummyProvider
else:
app.json_encoder = DummyEncoder
app.config["MONGODB_SETTINGS"] = [
{
"db": "flask_mongoengine_test_db",
"host": "localhost",
"port": 27017,
"alias": "default",
"uuidRepresentation": "standard",
}
]
test_db = MongoEngine(app)
db_name = (
test_db.connection["default"].get_database("flask_mongoengine_test_db").name
)
if not db_name.endswith("_test_db"):
raise RuntimeError(
f"DATABASE_URL must point to testing db, not to master db ({db_name})"
)
# Clear database before tests, for cases when some test failed before.
test_db.connection["default"].drop_database(db_name)
yield test_db
# Clear database after tests, for graceful exit.
test_db.connection["default"].drop_database(db_name)
class DummyEncoder(flask.json.JSONEncoder):
"""
An example encoder which a user may create and override
the apps json_encoder with.
This class is a NO-OP, but used to test proper inheritance.
"""
DummyProvider = None
if use_json_provider():
class DummyProvider(flask.json.provider.DefaultJSONProvider):
"""Dummy Provider, to test correct MRO in new flask versions."""
@pytest.mark.skipif(condition=use_json_provider(), reason="New flask use other test")
@pytest.mark.usefixtures("extended_db")
def test_inheritance_old_flask(app):
assert issubclass(app.json_encoder, DummyEncoder)
json_encoder_name = app.json_encoder.__name__
assert json_encoder_name == "MongoEngineJSONEncoder"
@pytest.mark.skipif(
condition=not use_json_provider(), reason="Old flask use other test"
)
@pytest.mark.usefixtures("extended_db")
def test_inheritance(app):
assert issubclass(app.json_provider_class, DummyProvider)
json_provider_class = app.json_provider_class.__name__
assert json_provider_class == "MongoEngineJSONProvider"
assert isinstance(app.json, DummyProvider)
|
989,967 | 72fedf1334ecd62c78b8f32b35a22421cf06b69a | from PyQt5.QtGui import QIntValidator, QDoubleValidator
from PyQt5.QtWidgets import QLineEdit
def set_validator(edit: QLineEdit, type: str):
validator = None
if type == 'int':
validator = QIntValidator()
elif type == 'float':
validator = QDoubleValidator()
edit.setValidator(validator)
|
989,968 | 6447f591bed4cc660a8d12eaa32dfb4202b4e11a | import twitter
import json
from collections import Counter
from prettytable import PrettyTable
from bs4 import BeautifulSoup
import requests
seen = {}
### https://github.com/edsu/shortpipe/blob/master/shortpipe
def unshorten(url):
url = url.strip()
if url in seen:
return seen[url]
new_url = url
try:
r = requests.get(url)
if r.status_code == 200:
new_url = r.url
except:
# TODO: maybe log something here?
pass
seen[url] = new_url
return new_url
def get_title(url):
try:
r = requests.get(url)
except:
print 'Could not get %s' % url
return 'no title found'
soup = BeautifulSoup(r.text)
try:
title = soup.find("meta", {"name":"title"})['content']
except:
print 'no title found for %s' % url
return "no title found"
return title
def get_title_and_text(url):
try:
r = requests.get(url)
except:
print 'Could not get %s' % url
return ('no title found',"")
soup = BeautifulSoup(r.text)
if url.startswith('http://www.gva.be/'):
try:
title = soup.find("meta", {"name":"title"})['content']
art_text = ''.join([sibling.text for sibling in soup.find('span',attrs={"class":"date-marker"}).find_next_siblings()])
except:
print 'no title or article text found for %s' % url
return ("no title found","")
elif url.startswith('http://m.gva.be/'):
try:
title = soup.find("meta", {"property":"og:title"})['content']
art_text = soup.find('div', attrs={'class':'txt'}).text
except:
print 'no title or article text found for %s' % url
return ("no title found","")
else:
print 'no title or article text found for %s' % url
return ("no title found","")
return (title, art_text)
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
# Nothing to see by displaying twitter_api except that it's now a
# defined variable
q = 'gva.be'
count = 100
# See https://dev.twitter.com/docs/api/1.1/get/search/tweets
search_results = twitter_api.search.tweets(q=q, count=count)
statuses = search_results['statuses']
# Iterate through 5 more batches of results by following the cursor
for _ in range(5):
print "Length of statuses", len(statuses)
try:
next_results = search_results['search_metadata']['next_results']
except KeyError, e: # No more results when next_results doesn't exist
break
# Create a dictionary from next_results, which has the following form:
# ?max_id=313519052523986943&q=NCAA&include_entities=1
kwargs = dict([ kv.split('=') for kv in next_results[1:].split("&") ])
search_results = twitter_api.search.tweets(**kwargs)
statuses += search_results['statuses']
urls = [ url['expanded_url']
for status in statuses
for url in status['entities']['urls'] ]
#unshorten_urls = [unshorten_url(url) for url in urls]
unshortend_urls = []
titles = {}
for url in urls:
if (url.startswith('http://www.gva.be') or
url.startswith('http://m.gva.be/')) and ('.aspx' in url):
unshortend_url = url
else:
print 'Unshorten %s' % url
unshortend_url = unshorten(url)
(title, art_text) = get_title_and_text(unshortend_url)
if title <> 'no title found':
unshortend_urls.append(unshortend_url)
titles[unshortend_url] = title
art_texts[unshortend_url] = art_text
c = Counter(unshortend_urls)
pt = PrettyTable(field_names=['Title','Count'])
[ pt.add_row((titles[kv[0]], kv[1])) for kv in c.most_common() ]
pt.align['Title'], pt.align['Count'] = 'l','r'
print pt
|
989,969 | de9a6532c93f86a3a38b0411cb18e3c1e4b758ed | from django.apps import AppConfig
class Practica1Config(AppConfig):
name = 'Practica'
|
989,970 | 5e35f7e3ce871d63864976b17b149d20f4afde78 | from flask import Blueprint,render_template,request,url_for,redirect
from app.trade_form import REGIONBForm
trade_bp = Blueprint('trade_bp',__name__, template_folder='templates', url_prefix='/trade')
from run import application
from app import trade_util
from app.trade_util import TradeUtil
logger=application.config['logger']
trade_util=TradeUtil.getinstance()
# from blinker import Namespace
# sig=Namespace()
# custom_signal=sig.signal('custom_signal')
# def log_template_renders(sender,template,context,**app):
# sender.logger.debug(template.name)
# @custom_signal.connect
# def process_received(app,**args):
# a=args.get('id')
# pass
#template_rendered.connect(log_template_renders,app)
# @mliosa_bp.before_request
# def check_access():
# if not trade_util.check_entitlement():
# render_template('error.html')
@application.route('/')
def healthcheck():
return 'runing'
@trade_bp.route('/sendtrade',methods=['POST','GET'])
def put_trade():
form_data=REGIONBForm()
return render_template('put_trade.html',form=form_data)
@trade_bp.route('/processtrade',methods=['POST'])
def processtrade():
form_data = REGIONBForm()
if form_data.validate_on_submit():
id=trade_util.process_trade(request.form)
if id != '0':
return redirect(url_for('trade_bp.getmli'))
return redirect(url_for('trade_bp.getmli'))
return render_template('put_trade.html', form=form_data)
@trade_bp.route('/error')
def error():
return render_template('error.html')
@trade_bp.app_template_filter()
def formattext(str1):
return str(str1) + "formatted!!"
@trade_bp.route('/gettrades',methods=['GET'])
def getmli():
#custom_signal.send(current_app._get_current_object(),id=123)
logger.info('fetching trades..')
trades= trade_util.get_all_trades()
return render_template('trade.html',data=trades)
@trade_bp.route('/get/<id>')
def getId(id):
print(request.view_args)
return str(id)
|
989,971 | 28dc1d1fb626cc1265e618e036c8ea34863a993b | # Generated by Django 3.1.7 on 2021-04-30 12:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('case_app', '0004_auto_20210424_0508'),
]
operations = [
migrations.AlterField(
model_name='case',
name='contribute_context',
field=models.TextField(max_length=256, verbose_name='資助項目'),
),
migrations.AlterField(
model_name='case',
name='end_date',
field=models.DateField(blank=True, default='2021-04-30', verbose_name='個案結束日期'),
),
migrations.AlterField(
model_name='case',
name='is_scholorship',
field=models.BooleanField(default=False, verbose_name='獎學金'),
),
migrations.AlterField(
model_name='case',
name='name',
field=models.CharField(max_length=256, verbose_name='姓名'),
),
migrations.AlterField(
model_name='case',
name='phone',
field=models.CharField(max_length=256, verbose_name='電話'),
),
migrations.AlterField(
model_name='case',
name='scholorship_amount',
field=models.IntegerField(blank=True, default=0, null=True, verbose_name='獎學金金額'),
),
migrations.AlterField(
model_name='case',
name='school',
field=models.CharField(max_length=256, verbose_name='學校'),
),
migrations.AlterField(
model_name='case',
name='situation',
field=models.TextField(default='無', max_length=512, verbose_name='個案情況'),
),
migrations.AlterField(
model_name='case',
name='start_date',
field=models.DateField(default='2021-04-30', verbose_name='個案開始日期'),
),
migrations.AlterField(
model_name='case',
name='total_money',
field=models.IntegerField(default=0, null=True, verbose_name='目前總資助金額'),
),
migrations.AlterField(
model_name='case',
name='visited_form',
field=models.CharField(blank=True, default='', max_length=256, verbose_name='訪視表'),
),
migrations.AlterField(
model_name='case',
name='visited_photos',
field=models.CharField(blank=True, default='', max_length=256, verbose_name='訪視照片'),
),
]
|
989,972 | e2beb5e069669024dc51cc4852d4c049fc3307a4 | # Generated by Django 3.1.2 on 2020-11-21 14:09
from django.db import migrations, models
import django_cryptography.fields
class Migration(migrations.Migration):
dependencies = [
('shop', '0005_auto_20201121_1528'),
]
operations = [
migrations.AlterField(
model_name='card',
name='cardnumber',
field=django_cryptography.fields.encrypt(models.CharField(max_length=100)),
),
migrations.AlterField(
model_name='card',
name='password',
field=django_cryptography.fields.encrypt(models.CharField(max_length=100)),
),
]
|
989,973 | a2135f2a95fc657cd8cde78ced3b321670746665 | import io #stdio
archivo = open("arch.txt","r")
archivo.seek(5)#se situa en el caracter de una linea
lineas = archivo.read()
print (lineas)
import pickle
lista = [1,2,"hola",["holanda"]]
fichero = open("archivo.pckl","wb")
pickle.dump(lista,fichero)
fichero.close()
fichero = open("archivo.pckl","rb")
data = pickle.load(fichero)
print(data) |
989,974 | 0a9f30f0b2c886e0fcec5b7117c06ff44b8c507d | import numpy as np
import cv2
image =cv2.imread("/../images/plant.jpg",-1)
image.shape
#getTickCount() returns number of clock count
#getTickFrequency() return number of clock-cycles per second.
start_time = cv2.getTickCount()
for i in range(1,50,2):
cv2.medianBlur(image,i)
print((cv2.getTickCount()-start_time)/cv2.getTickFrequency())
#set opencv as optimized mode
cv2.setUseOptimized(True)
cv2.useOptimized()
|
989,975 | ba6e7d5ba2a0182661dc26f06c51af5b4d2df1db | import pyeccodes.accessors as _
def load(h):
h.add(_.Constant('GRIBEXSection1Problem', (52 - _.Get('section1Length'))))
_.Template('grib1/mars_labeling.def').load(h)
h.add(_.Constant('operStream', "oper"))
h.alias('mars.stream', 'operStream')
h.add(_.Unsigned('band', 1))
h.alias('mars.obstype', 'band')
h.add(_.Sprintf('marsIdent', "%d", _.Get('indicatorOfTypeOfLevel')))
h.alias('mars.ident', 'marsIdent')
h.add(_.Unsigned('functionCode', 1))
h.add(_.Pad('padding_loc3_1', 1))
|
989,976 | c9dcba07b5d79de8c5321375607714c08bf17f8d | import numpy as np
import unittest
from flare.common.error_handling import LastExpError
from flare.common.replay_buffer import Experience
from flare.common.replay_buffer import ReplayBuffer, NoReplacementQueue, Sample
class TestNoReplacementQueue(unittest.TestCase):
@classmethod
def is_episode_end(cls, t):
return t[3][0]
def test_sampling(self):
exp_q = NoReplacementQueue()
# obs r a e
exp_q.add((np.zeros(10), [1], [1], [0]))
exp_q.add((np.zeros(10), [0], [-1], [1])) # 1st episode end
exp_q.add((np.zeros(10), [1], [2], [0]))
exp_q.add((np.zeros(10), [1], [3], [0]))
exp_q.add((np.zeros(10), [1], [4], [0]))
exp_seqs = exp_q.sample(self.is_episode_end)
self.assertEqual(len(exp_q), 1)
self.assertEqual(len(exp_seqs), 2)
self.assertEqual(len(exp_seqs[0]), 2)
self.assertEqual(exp_seqs[0][0][2], [1])
self.assertEqual(exp_seqs[0][1][2], [-1])
self.assertEqual(len(exp_seqs[1]), 3)
self.assertEqual(exp_seqs[1][0][2], [2])
self.assertEqual(exp_seqs[1][1][2], [3])
self.assertEqual(exp_seqs[1][2][2], [4])
# obs r a e
exp_q.add((np.zeros(10), [0], [-2], [1]))
exp_seqs = exp_q.sample(self.is_episode_end)
self.assertEqual(len(exp_q), 0)
self.assertEqual(len(exp_seqs), 1)
self.assertEqual(len(exp_seqs[0]), 2)
self.assertEqual(exp_seqs[0][0][2], [4])
self.assertEqual(exp_seqs[0][1][2], [-2])
self.assertEqual(len(exp_q), 0)
class TestReplayBuffer(unittest.TestCase):
@classmethod
def is_episode_end(cls, t):
return t[3]
def test_single_instance_replay_buffer(self):
capacity = 30
episode_len = 4
buf = ReplayBuffer(capacity)
for i in xrange(10 * capacity):
# obs r a e
buf.add((np.zeros(10), i * 0.5, i, (i + 1) % episode_len == 0))
# check the circular queue in the buffer
self.assertTrue(len(buf) == min(i + 1, capacity))
if (len(buf) < 2): # need at least two elements
continue
# should raise error when trying to pick up the last element
exp_seqs = buf.sample(capacity, self.is_episode_end, 0)
for exp_seq in exp_seqs:
self.assertEqual(len(exp_seq), 2)
self.assertNotEqual(exp_seq[0][3], 1)
self.assertEqual(exp_seq[1][2], exp_seq[0][2] + 1)
if __name__ == '__main__':
unittest.main()
|
989,977 | cf4d8e3043fdc1040bbb3a3baa9c8fda7b829f2d | def main():
qtd_testes = int(input())
for i in range(qtd_testes):
#input só para atender o formato da entrada
input()
#Recebe a linha, faz split e converte cada itens do split em Int()
alunos = [int(i) for i in input().split(' ')]
#Atribue a alunos_desc os alunos ordenados descrecente.
alunos_desc = sorted(alunos, reverse=True)
# Soma 1 para item de alunos que seja igual em alunos_desc na mesma posicao
resultado = sum([1 for i in range(len(alunos)) if alunos[i] == alunos_desc[i]])
print(resultado)
if __name__ == '__main__':
main() |
989,978 | 4bc18daa0b696bbb46993aa65a4944c1f8b6df1b | from bs4 import BeautifulSoup
import urllib3
import socket, threading
from HtmlParse import *
from Server import *
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 1911))
s.listen(10)
lock = threading.Lock()
while True:
(client_sock, client_addr) = s.accept()
Server(client_sock, client_addr).start()
|
989,979 | 4a956c80d5a18918cd3d0564eeabfc71a6447074 | """
Neural networks
"""
from pathlib import Path
import pytorch_lightning as pl
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Subset
from torchvision.datasets import MNIST
from .nopeek_loss import NoPeekLoss
class SplitNN(pl.LightningModule):
def __init__(self, hparams) -> None:
super().__init__()
self.hparams = hparams
self.set_noise(self.hparams.noise_scale)
self.part1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, 3, 1),
torch.nn.ReLU(),
torch.nn.Conv2d(32, 64, 3, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
torch.nn.Flatten(),
torch.nn.Linear(9216, 500),
torch.nn.Tanh(), # tanh to bound outputs, otherwise cannot be D.P.
)
self.part2 = torch.nn.Sequential(
torch.nn.Linear(500, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 10),
torch.nn.Softmax(dim=1),
)
def set_noise(self, noise: float) -> None:
self._noise = torch.distributions.Laplace(0.0, noise)
def forward(self, x):
intermediate = self.part1(x)
out = self.part2(
intermediate
+ self._noise.sample(intermediate.size()).to(intermediate.device)
)
return out, intermediate
def encode(self, x):
out = self.part1(x)
out += self._noise.sample(out.size()).to(out.device)
return out
def decode(self, x):
return self.part2(x)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
def training_step(self, batch, batch_idx: int):
data, targets = batch
predictions, intermediates = self(data)
loss = NoPeekLoss(self.hparams.nopeek_weight)(
data, intermediates, predictions, targets
)
correct = predictions.max(1)[1].eq(targets.flatten())
output = {
"loss": loss,
"progress_bar": {
"accuracy": 100 * correct.sum().true_divide(correct.size(0)),
},
}
return output
def validation_step(self, batch, batch_idx: int):
data, targets = batch
predictions, intermediates = self(data)
loss = NoPeekLoss(self.hparams.nopeek_weight)(
data, intermediates, predictions, targets
)
correct = predictions.max(1)[1].eq(targets.flatten())
return {"val_loss": loss, "val_correct": correct}
def validation_epoch_end(self, outs):
preds = []
total_loss = 0.0
for out in outs:
preds.append(out["val_correct"])
total_loss += out["val_loss"]
avg_loss = total_loss.true_divide(len(outs))
preds = torch.cat(preds)
acc = 100 * preds.sum().true_divide(preds.size(0))
results = {"val_loss": avg_loss, "val_accuracy": acc}
return {"progress_bar": results, "log": results}
def test_step(self, batch, batch_idx):
data, targets = batch
predictions, intermediates = self(data)
loss = NoPeekLoss(self.hparams.nopeek_weight)(
data, intermediates, predictions, targets
)
correct = predictions.max(1)[1].eq(targets.flatten())
return {"test_loss": loss, "test_correct": correct}
def test_epoch_end(self, outs):
preds = []
total_loss = 0.0
for out in outs:
preds.append(out["test_correct"])
total_loss += out["test_loss"]
avg_loss = total_loss.true_divide(len(outs))
preds = torch.cat(preds)
acc = 100 * preds.sum().true_divide(preds.size(0))
results = {"test_loss": avg_loss, "test_acc": acc}
return {"avg_test_loss": avg_loss, "log": results}
def prepare_data(self):
data_transform = transforms.Compose(
[
transforms.ToTensor(),
# PyTorch examples; https://github.com/pytorch/examples/blob/master/mnist/main.py
transforms.Normalize((0.1307,), (0.3081,)),
]
)
data_dir = Path.cwd() / "data"
self.train_data = Subset(
MNIST(data_dir, download=True, train=True, transform=data_transform),
range(40_000),
)
self.val_data = Subset(
MNIST(data_dir, download=True, train=False, transform=data_transform),
range(5000),
)
# Test data
self.test_data = Subset(
MNIST(data_dir, download=True, train=False, transform=data_transform),
range(5000, 10_000),
)
def train_dataloader(self):
return DataLoader(self.train_data, batch_size=self.hparams.batch_size)
def val_dataloader(self):
return DataLoader(self.val_data, batch_size=self.hparams.batch_size)
def test_dataloader(self):
return DataLoader(self.test_data, batch_size=self.hparams.batch_size)
class ReLUSplitNN(SplitNN):
def __init__(self, hparams) -> None:
super().__init__(hparams)
self.part1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, 3, 1),
torch.nn.ReLU(),
torch.nn.Conv2d(32, 64, 3, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
torch.nn.Flatten(),
torch.nn.Linear(9216, 500),
torch.nn.ReLU(),
)
self.part2 = torch.nn.Sequential(
torch.nn.Linear(500, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 10),
torch.nn.Softmax(dim=1),
)
|
989,980 | c75b47c5bb9a7717580a87b8d634824ba768abfb | from deuces.deuces import Card, Evaluator, Deck
evaluator = Evaluator()
def str2cards(s):
"""Parse a string like 'as8sqdtc3d3c' to a simple (flat) list of
deuces card objects (in reality integers).
"""
assert len(s) % 2 == 0
str_list = []
cards = []
for cardnum in range(len(s) / 2):
str_list.append(s[cardnum * 2 : cardnum * 2 + 2])
for i, si in enumerate(str_list):
cstring = si[0].upper() + si[1].lower()
cards.append(Card.new(cstring))
return cards
def reduce_h(hand):
"""Reduce a hand like [As, Th] to a string like ATo."""
assert(type(hand) == list)
assert(len(hand) == 2)
assert(type(hand[0]) == type(hand[1]) == int)
hand.sort(reverse=True)
hand_str = Card.int_to_str(hand[0])[0] + Card.int_to_str(hand[1])[0]
if Card.get_suit_int(hand[0]) == Card.get_suit_int(hand[1]):
hand_str += 's'
else:
hand_str += 'o'
return hand_str
def pr(x):
"""Simply abbreviation for pretty printing in deuces. Expects list."""
Card.print_pretty_cards(x)
def ring_winners(b, players):
"""Given a board and a list-of-lists of hole cards, what is the list
of indices of the winning players, and what string describes the
winning hand?
"""
winners = []
winrank = ''
s = [evaluator.evaluate(b, p) for p in players]
for i, rank in enumerate(s):
if rank == min(s):
winners.append(i)
winrank = evaluator.class_to_string(evaluator.get_rank_class(rank))
return [winners, winrank]
def _who_wins(b, p1, p2, printout = True):
if printout:
[pr(h) for h in [b, p1, p2]]
s = [evaluator.evaluate(b, p) for p in [p1, p2]]
r = [evaluator.class_to_string(evaluator.get_rank_class(x)) for x in s]
if s[1] > s[0]:
t = "P1 wins"
winning_player = 1
elif s[1] < s[0]:
t = "P2 wins"
winning_player = 2
else:
t = "push"
winning_player = 0
if printout:
print ', '.join(map(str, s) + map(str, r) + [t])
return winning_player
def draw_sure(deck, n, exclusions):
"""Draw n cards from a deck but skip any cards listed in
exclusions. Please note this func always returns a list, unlike
native deuces draw function.
"""
drawn = []
while len(drawn) < n:
c = deck.draw()
if c in exclusions + drawn:
continue
drawn.append(c)
return drawn
def find_pcts(p1, p2, start_b = [], iter = 10000):
"""Given 2 players' hole cards and an optional board in any state,
what is each player's chance of winning (equity)?
"""
win_record = []
for i in range(iter):
deck = Deck()
need = 5 - len(start_b)
b2 = draw_sure(deck, need, p1+p2+start_b)
win_record.append(_who_wins(start_b + b2, p1, p2, printout = False))
return [win_record.count(1) / float(len(win_record)),
win_record.count(2) / float(len(win_record))
]
def find_pcts_multi(P, start_b = [], iter = 10000):
"""Given a list-of-lists of players' hole cards and an optional board
in any state, what is each player's chance of winning (equity)?
"""
assert len(P) >= 2
wins_per_player = [0] * len(P)
all_hole = reduce(lambda x,y: x+y, P)
for i in range(iter):
deck = Deck()
need = 5 - len(start_b)
b2 = draw_sure(deck, need, all_hole+start_b)
s = [evaluator.evaluate(start_b+b2, h) for h in P]
for i, e in enumerate(s):
if e == min(s):
wins_per_player[i] += 1
return [float(x) / sum(wins_per_player) for x in wins_per_player]
|
989,981 | c4b754d4d4f2c5bb83878c136b240072f2144989 | import pytest
class TransportPacket:
def __init__(self, data):
self.raw = data
def display(self):
readable = ''
for byte in self.raw:
print(hex(byte), end=' ')
print()
def is_sync(self):
return self.raw[0] == 0x47
|
989,982 | 7d30405de22dd3aef980b9252207216895ca209e | S = input()
N = len(S)
flag = [0,0,0]
if S=="".join(reversed(list(S))):
flag[0]=1
if S[:int((N-1)/2)]=="".join(reversed(list(S[:int((N-1)/2)]))):
flag[1]=1
if S[int((N+3)/2)-1:]=="".join(reversed(list(S[int((N+3)/2)-1:]))):
flag[2]=1
print("Yes" if flag==[1,1,1] else "No")
|
989,983 | 6692ad9a42422bcf12a9a85b932e58f48d55680f | import micropython
import xbee
import uio
import uos
from umqtt.simple import MQTTClient
from sys import stdin, stdout
import network
import machine
import json
import time
import utime
import sys
import gc
from sites.siteFile import pubTopic
SERVER = pubTopic.server_Address
msg_Topic = pubTopic.MESSAGE_TOPIC
sub_Topic = pubTopic.SUB_TOPIC
temp_Topic = pubTopic.TEMP_PUB_TOPIC
precip_Topic = pubTopic.PRECIP_PUB_TOPIC
daily_Topic = pubTopic.DAILY_PUB_TOPIC
update_Topic = pubTopic.UPDATE_TOPIC
command_Topic = pubTopic.COMMAND_TOPIC
alert_Topic = pubTopic.ALERT_TOPIC
msg_payload = ""
payload_Topic = ""
publish_Success = False
topic_ToSend = []
msg_ToSend = []
obs_publishReady = 0
topic_File = "topic.log"
msg_File = "msg.log"
file_lineRef = 0
tfile_readComplete = False
mfile_readComplete = False
files_Exist = False
CLIENT_ID = pubTopic.clientID # Should be unique for each device connected.
minute = 0
year = 0
TZ = pubTopic.clientTZ
timeStart = 0
timeDiff = 0
rssiUpdated = False
timeUpdated = False
'''
AI_DESC = {
0x00: 'CONNECTED',
0x22: 'REGISTERING_TO_NETWORK',
0x23: 'CONNECTING_TO_INTERNET',
0x24: 'RECOVERY_NEEDED',
0x25: 'NETWORK_REG_FAILURE',
0x2A: 'AIRPLANE_MODE',
0x2B: 'USB_DIRECT',
0x2C: 'PSM_DORMANT',
0x2F: 'BYPASS_MODE_ACTIVE',
0xFF: 'MODEM_INITIALIZING',
}
'''
def publish_Backup():
global msg_payload
global payload_Topic
global daily_Topic
global precip_Topic
global topic_ToSend
global msg_ToSend
global obs_publishReady
global topic_File
global msg_File
topic_ToSend.append(payload_Topic)
msg_ToSend.append(msg_payload)
obs_publishReady = obs_publishReady + 1
if (payload_Topic == daily_Topic or payload_Topic == precip_Topic):
log = uio.open(topic_File, mode="a")
log.write(payload_Topic + "\n")
time.sleep(0.5)
log.close()
log1 = uio.open(msg_File, mode="a")
log1.write(msg_payload + "\n")
time.sleep(0.5)
log1.close()
msg_payload = ""
payload_Topic = ""
def publish_Obs(client_id=CLIENT_ID, hostname=SERVER, keepalive=60):
global topic_ToSend
global msg_ToSend
global obs_publishReady
global publish_Success
global wdt
global tfile_readComplete
global mfile_readComplete
global timeStart
publish_Success = False
cnt = 0
wdt.feed()
client = MQTTClient(client_id, hostname)
issue = client.connect()
if issue == 0:
while cnt < obs_publishReady:
topic = topic_ToSend[cnt]
msg = msg_ToSend[cnt]
client.publish(topic, msg)
wdt.feed()
cnt = cnt + 1
wdt.feed()
time.sleep(0.5)
client.disconnect()
if cnt == obs_publishReady:
publish_Success = True
timeStart = time.ticks_ms()
obs_publishReady = 0
topic_ToSend.clear()
msg_ToSend.clear()
if files_Exist == True and tfile_readComplete == True and mfile_readComplete == True:
wdt.feed()
deleteFiles()
time.sleep(1)
createFiles()
else:
wdt.feed()
else: ##client socket connection contained error to post to message que
topic_ToSend.append(msg_Topic)
msg_ToSend.append(issue)
obs_publishReady = obs_publishReady + 1
def read_teensy():
global msg_payload
global payload_Topic
global temp_Topic
global precip_Topic
global alert_Topic
global daily_Topic
msg_read = False
temp_Inbound = False
precip_Inbound = False
alert_Inbound = False
dailyUpdate = False
tempUpdate = False
t_d = [""] * 8
var = ""
msg_payload = ""
payload_Topic = ""
trim_String = ""
empty_Read = False
time.sleep(3)
c = stdin.buffer.read() # reads byte and returns first byte in buffer or None
if c is None: #buffer is empty, move on
empty_Read = True
if empty_Read == False:
temp_String = str(c.decode())
tempBuff = temp_String.split('\n')
pubCnt = len(tempBuff)
i = 0
while i < pubCnt:
if (tempBuff[i].startswith('#') and tempBuff[i].endswith('\r')):
if (tempBuff[i][1] == 'T'):
temp_Inbound = True
elif (tempBuff[i][1] == 'F'):
precip_Inbound = True
elif (tempBuff[i][1] == 'A'):
alert_Inbound = True
else:
pass
trim_String = tempBuff[i]
trim_String = trim_String[2:len(trim_String)-1]
if (temp_Inbound == True) or (precip_Inbound == True) or (alert_Inbound == True):
msg_payload = trim_String
msg_read = True
if precip_Inbound == True:
payload_Topic=precip_Topic
publish_Backup()
elif temp_Inbound == True:
if msg_payload.count(",") == 7: # daily has the pp value so there will be 7 "," delimiters
t_d = msg_payload.split(",")
dailyUpdate = True
elif msg_payload.count(",") == 6:
t_d = msg_payload.split(",")
tempUpdate = True
else:
msg_payload = ""
payload_Topic = ""
elif alert_Inbound == True:
alertUpdate = True
payload_Topic=alert_Topic
publish_Backup()
else:
pass
if dailyUpdate == True:
msg_payload = pubTopic.clientID + ',' + t_d[0] + ',' + t_d[1] + ',' + t_d[2] + ',' + t_d[3] + ',' + t_d[4] + ',' + t_d[5] + ',' + t_d[6] + ',' + t_d[7]
payload_Topic=daily_Topic
publish_Backup()
if tempUpdate == True:
msg_payload = pubTopic.clientID + ',' + t_d[0] + ",,,,," + t_d[5] + ',' + t_d[6]
payload_Topic=temp_Topic
publish_Backup()
else:
pass
else:
pass
msg_read = False
temp_Inbound = False
precip_Inbound = False
alert_Inbound = False
dailyUpdate = False
tempUpdate = False
t_d = [""] * 8
var = ""
msg_payload = ""
payload_Topic = ""
trim_String = ""
i = i + 1
def xbee_debug(desig = "", msgToTeensy = ""):
if desig == "#T":
xbee_msg = desig + msgToTeensy + "\r\n"
else:
xbee_msg = desig + "\r\n"
stdout.buffer.write(xbee_msg)
def printLocalTime():
global TZ
global timeUpdated
t1 = str(int(utime.localtime()[0]) - 2000) + "," + str(utime.localtime()[1]) + "," + str(utime.localtime()[2]) + "," + str(utime.localtime()[3]) + "," + str(utime.localtime()[4]) + "," + str(utime.localtime()[5])
timeMsg = t1 + "," + TZ
xbee_debug("#T",timeMsg + "\r\n")
timeUpdated = True
def readFiles():
global obs_publishReady
global topic_ToSend
global msg_ToSend
global topic_File
global msg_File
global file_lineRef
global tfile_readComplete
global mfile_readComplete
obs_ToAdd = 0
temp_Line = file_lineRef
start_Read = temp_Line
currentNum = obs_publishReady
with uio.open(topic_File, mode="r") as log:
topics_pending = log.readlines()
num = len(topics_pending)
if num > 0:
i = 0
incr_Read = start_Read
while i < 50 - currentNum:
t = topics_pending[incr_Read]
topic_ToSend.append(t[:len(t)-1])
incr_Read = incr_Read + 1
if incr_Read == num:
i = 50 - currentNum
tfile_readComplete = True
file_lineRef = 0
else:
i = i + 1
obs_ToAdd = obs_ToAdd + 1
log.close()
with uio.open(msg_File, mode="r") as log1:
msg_pending = log1.readlines()
num = len(msg_pending)
i = 0
incr_Read = start_Read
while i < 50 - currentNum:
m = msg_pending[incr_Read]
msg_ToSend.append(m[:len(m)-1])
incr_Read = incr_Read + 1
if incr_Read == num:
i = 50 - currentNum
mfile_readComplete = True
file_lineRef = 0
else:
i = i + 1
log1.close()
obs_publishReady = obs_publishReady + obs_ToAdd
if tfile_readComplete == False:
file_lineRef = file_lineRef + obs_ToAdd
else:
pass
else:
log.close()
tfile_readComplete = True
mfile_readComplete = True
file_lineRef = 0
def createFiles():
global topic_File
global msg_File
global file_lineRef
global tfile_readComplete
global mfile_readComplete
log = uio.open(topic_File, mode="x")
log.close()
log1 = uio.open(msg_File, mode="x")
log1.close()
tfile_readComplete = True
mfile_readComplete = True
file_lineRef = 0
def deleteFiles():
global topic_File
global msg_File
try:
log = uio.open(topic_File)
log.close()
uos.remove(topic_File)
except OSError:
pass
try:
log1 = uio.open(msg_File)
log1.close()
uos.remove(msg_File)
except OSError:
pass
topic_ToSend.append(msg_Topic)
msg_ToSend.append(pubTopic.clientID + " Cellular and MQTT Connected, v0.9")
bootMsg = str(machine.reset_cause())
topic_ToSend.append(msg_Topic)
msg_ToSend.append(bootMsg)
obs_publishReady = 2
#Pull pending topic/msg from log files
try:
log = uio.open(topic_File)
log.close()
files_Exist = True
except OSError:
files_Exist = False
pass
if files_Exist == True:
readFiles()
else:
createFiles()
xbee.atcmd("AN", "super")
xbee.atcmd("CP", 0)
xbee.atcmd("AM", 0)
xbee.atcmd("N#", 0)
xbee.atcmd("TM", 0x258)
xbee.atcmd("TS", 0x258)
xbee.atcmd("DO", 0x21)
xbee.atcmd("K1", 0x3C)
xbee.atcmd("K2", 0x3C)
xbee.atcmd("MO", 0x7)
xbee.atcmd("HM", 0x1)
xbee.atcmd("HF", 0x3C)
xbee.atcmd("DL", "0.0.0.0")
xbee.atcmd("DE", 0x0)
xbee.atcmd("C0", 0x0)
xbee.atcmd("DX", 0xA0000)
yearCheck = 2019
conn = network.Cellular()
timeStart = time.ticks_ms()
while not conn.isconnected():
read_teensy()
time.sleep(2)
while (utime.localtime()[0]) < yearCheck:
read_teensy()
time.sleep(2)
while (utime.localtime()[4]%5) != 0:
read_teensy()
time.sleep(2)
time.sleep(1)
printLocalTime()
wdt = machine.WDT(timeout=60000, response=machine.SOFT_RESET)
while True:
wdt.feed()
read_teensy()
wdt.feed()
time.sleep(2)
xbeeRSSI = xbee.atcmd('DB')
if xbeeRSSI != None:
if xbeeRSSI < 105:
if (obs_publishReady > 0):
xbeeConn = xbee.atcmd('AI')
if xbeeConn == 0:
gc.collect()
publish_Obs()
else:
pass
else:
pass
else:
pass
else:
pass
tCheck = time.ticks_ms()
if tCheck < timeStart: ## cover wrap around timing
timeStart = tCheck
else:
timeDiff = tCheck - timeStart
if timeDiff > 600000:
machine.soft_reset()
wdt.feed()
if publish_Success == True and file_lineRef > 0:
readFiles()
if ((utime.localtime()[4]%15) == 0) and timeUpdated == False:
printLocalTime()
timeUpdated = True
wdt.feed()
if (timeUpdated == True) and ((utime.localtime()[4]%15) != 0):
timeUpdated = False
if ((utime.localtime()[4]%30) == 0) and rssiUpdated == False:
topic_ToSend.append(msg_Topic)
msg_ToSend.append("RSSI: " + str(xbeeRSSI))
obs_publishReady = obs_publishReady + 1
rssiUpdated = True
wdt.feed()
if (rssiUpdated == True) and ((utime.localtime()[4]%30) != 0):
rssiUpdated = False
wdt.feed()
time.sleep(0.200)
|
989,984 | 9299db17621431e56bd0a40cace8702a8bd4dfa8 | def remove_dups(lst):
result=[]
for x in lst:
if x not in result:
result.append(x)
return(result)
|
989,985 | b9bf8ea7a871d7380860f6f661c42ffe2f338303 | import random
import sys
from host import GO
def readInput(n, path="input.txt"):
with open(path, 'r') as f:
lines = f.readlines()
piece_type = int(lines[0])
previous_board = [[int(x) for x in line.rstrip('\n')] for line in lines[1:n+1]]
board = [[int(x) for x in line.rstrip('\n')] for line in lines[n+1: 2*n+1]]
return piece_type, previous_board, board
def writeOutput(result, path="output.txt"):
res = ""
if result == "PASS":
res = "PASS"
else:
res += str(result[0]) + ',' + str(result[1])
with open(path, 'w') as f:
f.write(res)
def writePass(path="output.txt"):
with open(path, 'w') as f:
f.write("PASS")
class RandomPlayer():
def __init__(self):
self.type = 'random'
def get_input(self, go, piece_type):
possible_placements = []
for i in range(go.size):
for j in range(go.size):
if go.valid_place_check(i, j, piece_type, test_check = True):
possible_placements.append((i,j))
if not possible_placements:
return "PASS"
else:
return random.choice(possible_placements)
if __name__ == "__main__":
N = 7
piece_type, previous_board, board = readInput(N)
go = GO(N)
go.set_board(piece_type, previous_board, board)
player = RandomPlayer()
action = player.get_input(go, piece_type)
writeOutput(action) |
989,986 | 86a6b7bc2d3bc5a8de16f7060627cef33e3dfa6f | class Shout(object):
def __init__(self, text):
self.text = text
def _repr_html_(self):
return "<h1>" + self.text + "</h1>"
|
989,987 | 2b4e9d6953b6a3dd1e085b509434ca1fccfb7726 | N = int(input())
a = [int(i) for i in input().split()]
x = int(input())
ans = "NO"
for i in range(N):
if a[i] == x:
ans = "YES"
print(ans) |
989,988 | 8338974335408fcd90d697841117c838d6a58fb2 | #!/usr/bin/env python
#3.6
import argparse
import random
import struct
import sys
import Queue
print(sys.version)
import rospy
import subprocess
import baxter_interface
from baxter_interface import CHECK_VERSION
import ikpy
import numpy as np
from ikpy import plot_utils
from ikpy.chain import Chain
from ikpy.link import OriginLink, URDFLink
import matplotlib.pyplot
from mpl_toolkits.mplot3d import Axes3D
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
from sensor_msgs.msg import JointState
#compares each element in order in arrays a and b and returns the largest numerical difference found
def getDiff(a,b):
print(a)
print(b)
j = -1
m = -1
i = 0
while(i < len(a)):
c = np.absolute(a[i] - b[i])
if(c > m):
j = i
m = c
i += 1
print(j)
return m
#creates a dict out of a key array a and a value array b
def convertToDict(a,b):
p = {}
i = 0
while(i < len(a)):
p[a[i]] = b[i]
i += 1
return p
#rearange dictionary elements into a given order
def properOrder(angles,order):
starting = []
i = 0
while(i < len(order)):
if(order[i] in angles):
starting.append(angles[order[i]])
else:
starting.append(0.)
i += 1
return np.asarray(starting)
def convertToStampedPose(hdr, sP):
return PoseStamped(header=hdr,pose=
Pose(
position=Point(
x=sP["position"][0],
y=sP["position"][1],
z=sP["position"][2]),
orientation=Quaternion(
x=sP["orientation"][0],
y=sP["orientation"][1],
z=sP["orientation"][2],
w=sP["orientation"][3])
))
jointStates = Queue.LifoQueue(maxsize=8)
def callback(data):
if(jointStates.full()):
jointStates.get()
jointStates.put(data)
#print(data)
class Wobbler(object):
def __init__(self):
self._done = False
self._head = baxter_interface.Head()
self._left_arm = baxter_interface.limb.Limb('left')
self._right_arm = baxter_interface.limb.Limb('right')
print(self._left_arm.joint_names())
print(self._left_arm.joint_angles())
print(self._left_arm.joint_velocities())
print(self._left_arm.joint_efforts())
print(self._left_arm.endpoint_pose())
print(self._left_arm.endpoint_velocity())
print(self._left_arm.endpoint_effort())
self._left_arm_chain = Chain(name='left_arm',
active_links_mask=[False,False,True,True,True,True,True,True,True,False],
links=[
OriginLink(),
URDFLink(
name="left_torso_arm_mount",
translation_vector=[0.024645, 0.219645, 0.118588],
orientation=[0, 0, 0.7854],
rotation=[0, 0, 1],
),
URDFLink(
name="left_s0",
translation_vector=[0.055695, 0, 0.011038],
orientation=[0, 0, 0],
rotation=[0, 0, 1],
bounds=(-1.70167993878, 1.70167993878)
),
URDFLink(
name="left_s1",
translation_vector=[0.069, 0, 0.27035],
orientation=[-1.57079632679, 0, 0],
rotation=[0, 0, 1],
bounds=(-2.147, 1.047)
),
URDFLink(
name="left_e0",
translation_vector=[0.102, 0, 0],
orientation=[1.57079632679, 0, 1.57079632679],
rotation=[0, 0, 1],
bounds=(-3.05417993878, 3.05417993878)
),
URDFLink(
name="left_e1",
translation_vector=[0.069, 0, 0.26242],
orientation=[-1.57079632679, -1.57079632679, 0],
rotation=[0, 0, 1],
bounds=(-0.05, 2.618)
),
URDFLink(
name="left_w0",
translation_vector=[0.10359, 0, 0],
orientation=[1.57079632679, 0, 1.57079632679],
rotation=[0, 0, 1],
bounds=(-3.059, 3.059)
),
URDFLink(
name="left_w1",
translation_vector=[0.01, 0, 0.2707],
orientation=[-1.57079632679, -1.57079632679, 0],
rotation=[0, 0, 1],
bounds=(-1.57079632679, 2.094)
),
URDFLink(
name="left_w2",
translation_vector=[0.115975, 0, 0],
orientation=[1.57079632679, 0, 1.57079632679],
rotation=[0, 0, 1],
bounds=(-3.059, 3.059)
),
URDFLink(
name="left_hand",
translation_vector=[0, 0, 0.11355],
orientation=[0, 0, 0],
rotation=[0, 0, 1],
)
])
#self._left_arm_chain = ikpy.chain.Chain.from_urdf_file("/home/jbunker/ros_ws/src/baxter_common/baxter_description/urdf/baxter.urdf")
# verify robot is enabled
print("Getting robot state... ")
self._rs = baxter_interface.RobotEnable(CHECK_VERSION)
self._init_state = self._rs.state().enabled
print("Enabling robot... ")
self._rs.enable()
print("Running. Ctrl-c to quit")
def clean_shutdown(self):
"""
Exits example cleanly by moving head to neutral position and
maintaining start state
"""
print("\nExiting example...")
if self._done:
self.set_neutral()
if not self._init_state and self._rs.state().enabled:
print("Disabling robot...")
self._rs.disable()
def set_neutral(self):
"""
Sets the head back into a neutral pose
"""
self._head.set_pan(0.0)
def testJoint(self):
self.set_neutral()
print(self._left_arm.joint_names())
target = (self._right_arm.joint_names())[0]
print("targeting joint {0}".format(target))
print("starting angle {0}".format(self._right_arm.joint_angle(target)))
print("starting velocity {0}".format(self._right_arm.joint_velocity(target)))
print("starting effort {0}".format(self._right_arm.joint_effort(target)))
command_rate = rospy.Rate(1)
control_rate = rospy.Rate(50)
commandDictLeft = {}
for n in self._left_arm.joint_names():
commandDictLeft[n] = 0.
commandDictRight = {}
for n in self._right_arm.joint_names():
commandDictRight[n] = 0.
start = rospy.get_time()
while not rospy.is_shutdown() and (rospy.get_time() - start < 0.1):
self._left_arm.set_joint_positions(commandDictLeft)
self._right_arm.set_joint_positions(commandDictRight)
control_rate.sleep()
print("zeroing ended, recording starting waypoint...")
i = 0
while(i < 3):
state = jointStates.get()
print(i)
i += 1
originP = [0.064027, 0.259027, 0.08]
target = np.asarray([[1, 0, 0, .7],
[0, 1, 0, .0],
[0, 0, 1, .5],
[0, 0, 0, 1]])
angles = self._left_arm.joint_angles()
order = ["origin", "left_torso_arm_mount", "left_s0", "left_s1", "left_e0", "left_e1", "left_w0", "left_w1", "left_w2", "left_hand"]
angles = properOrder(self._left_arm.joint_angles(),order)
print(angles)
print(target)
print(self._left_arm_chain)
solution = ikpy.inverse_kinematics.inverse_kinematic_optimization(chain=self._left_arm_chain, target_frame=target, starting_nodes_angles=angles)
print(solution)
inp = convertToDict(order,solution)
i = 0
while(getDiff(angles,solution) > 0.01 and i < 1000):
print(getDiff(angles,solution))
inp = convertToDict(order,solution)
print(inp)
self._left_arm.set_joint_positions(convertToDict(order,solution))
#self._left_arm.set_joint_positions(commandDictLeft)
control_rate.sleep()
angles = properOrder(self._left_arm.joint_angles(),order)
#print(angles)
i += 1
print(self._left_arm_chain)
ax = matplotlib.pyplot.figure().add_subplot(111, projection='3d')
self._left_arm_chain.plot(ikpy.inverse_kinematics.inverse_kinematic_optimization(chain=self._left_arm_chain, target_frame=target, starting_nodes_angles=angles), ax)
matplotlib.pyplot.show()
#t_angles = self.left_arm_chain.inverse_kinematics(target=target_frame,initial_position=self._left_arm.joint_positions()))
self._done = True
rospy.signal_shutdown("Example finished.")
def main():
"""RSDK Head Example: Wobbler
Nods the head and pans side-to-side towards random angles.
Demonstrates the use of the baxter_interface.Head class.
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.parse_args(rospy.myargv()[1:])
print("Initializing node... ")
rospy.init_node("rsdk_dabber")#, log_level=rospy.DEBUG)
rospy.Subscriber("/robot/joint_states", JointState, callback)
wobbler = Wobbler()
rospy.on_shutdown(wobbler.clean_shutdown)
print("Wobbling... ")
wobbler.testJoint()
print("Done.")
if __name__ == '__main__':
main()
|
989,989 | c75e8da258728bd432a040895ad0ed62b6b1e7bc | import sqlite3
conn = sqlite3.connect('meeting.db')
c = conn.cursor()
#search by project_name or meeting_date
while 1:
n = int(input('\n1.Search by project name\n2.Search by meeting date\n3.Quit searching\n\nYour choice : '))
if n==1:
name = input('\nEnter project name : ')
c.execute("select * from meeting where project_name=?",(name,))
result = c.fetchall()
if len(result) != 0:
for row in result:
print('\nID\tProject_name\tMeeting_time\tMeeting_date\tMeeting_topic')
print(row[0],'\t',row[1],'\t',row[2],'\t',row[3],'\t\t',row[4])
else:
print('Project doesn\'t exist')
elif n==2:
time = input('\nEnter meeting date : ')
c.execute("select * from meeting where meeting_date=?",(time,))
result = c.fetchall()
if len(result) != 0:
for row in result:
print('\nID\tProject_name\tMeeting_time\tMeeting_date\tMeeting_topic')
print(row[0],'\t',row[1],'\t',row[2],'\t',row[3],'\t\t',row[4])
else:
print('No meeting found')
else:
print('Exitted')
break
|
989,990 | efc31df5a8f55a7d2d6a67d47bde5326e500abba | """AD5272 - device access class for the AD5272 I2C potentiometer
Provides access to control the wiper positions for the AD5272 device with
helper methods to set potential differences and resistances in potential
divider and rheostat mode respectively.
Adam Davis, STFC Application Engineering Group.
"""
from i2c_device import I2CDevice, I2CException
class AD5272(I2CDevice):
#AD5272 class.
#
#This class implements support to set the resistance across the digital potentiometer
#
def __init__(self, address=0x2F, **kwargs):
#Initialise the AD5272 device.
#:param address: The address of the AD5272 default: 0x2F when ADDR=0, 2E when ADD= FLOAT (see schematics)
#
I2CDevice.__init__(self, address, **kwargs)
self.write8(0x1C, 0x02) # enable the update of wiper position by default
#Read back current wiper settings
self.write8(0x08, 0x00) # Have to write code 0x0800 to initiate a read of the wiper
tmp=self.readU16(0) # read the result into tmp variable
self.__wiper_pos = ((tmp&0x03) << 8) + ((tmp&0xFF00) >> 8) #mask off lower 8 bits and shift down 8, mask off upper 8 bits and bits 7-2 & shift up 8
#read the contents of the control register
#0x1 = 50-TP program enable 0 = default, dissable
#0x2 = RDAC register write protect 0 = default, wiper position frozen, 1 = allow update via i2c
#0x4 = Resistance performance enable 0 = default = enabled, 1 = dissbale
#0x8 = 50-TP memory program success bit 0 = default = unsuccessful, 1 = successful
#send the command to read the contents of the control register
self.write8(0x20, 0x00) #send the command to read the contents of the control register
# when read, byte swap to get register contents
self.__control_reg = (self.readU16(0)&0xF00 >> 8)
#Internal variable settings depending on device / voltage connections
self.__num_wiper_pos = 1024
self.__tot_resistance = 100.0
self.__low_pd = 0.0
self.__high_pd = 3.3
def set_total_resistance(self, resistance):
#Sets the total resistance across the potentiometer for set_resistance()
#:param resistance: Total resistance between H- and L- (Kiloohms)
#
self.__tot_resistance = float(resistance)
def set_num_wiper_pos(self, positions):
#Sets the number of write positions
#:param resistance: Total resistance between H- and L- (Kiloohms)
#
self.__num_wiper_pos = int(positions)
def set_resistance(self, resistance):
#Sets the resistance of a given wiper in rheostat mode (see datasheet)
#:param wiper: Wiper to set 0=A, 1=B
#:param resistance: Desired resistance between H- and W- (Kiloohms)
#
if resistance < 0 or resistance > self.__tot_resistance:
raise I2CException("Select a resistance between 0 and {:.2f}".format(self.__tot_resistance))
self.__wiper_pos = int(resistance / self.__tot_resistance * self.__num_wiper_pos)
self.write8(((self.__wiper_pos & 0xFF00) + 0x400)>>8, (self.__wiper_pos & 0xFF))
def set_terminal_PDs(self, wiper, low, high):
#Sets the potential difference for H- and L- on a given wiper for set_PD()
#:param wiper: Wiper to set 0=A, 1=B
#:param low: Low PD (Volts)
#:param high: High PD (Volts)
#
self.__low_pd[wiper] = float(low)
self.__high_pd[wiper] = float(high)
def set_PD(self, pd):
#Sets the potential difference of a given wiper in potential divider mode (see datasheet)
#:param wiper: Wiper to set 0=A, 1=B
#:param pd: Target potential difference (Volts)
#
self.__wiper_pos[wiper] = int((pd - self.__low_pd) / (self.__high_pd - self.__low_pd) * self.__wiper_pos)
self.write8(((self.__wiper_pos & 0xFF00) + 0x400)>>8, (self.__wiper_pos & 0xFF))
def set_wiper(self, position):
#Manually sets a wiper position
#:param wiper: Wiper to set 0=A, 1=B
#:param position: Target position [0-255]
#
self.__wiper_pos = int(position)
self.write8(((self.__wiper_pos & 0xFF00) + 0x400)>>8, (self.__wiper_pos & 0xFF))
def get_wiper(self, force=False):
#Gets a wiper position
#:param:
#:returns: Current position [0-255]
#
if force:
self.write8(0x08, 0x00) # Have to write code 0x8000 to initiate a read of the wiper
tmp=self.readU16(0) # read the result into tmp variable
self.__wiper_pos = ((tmp&0x03) << 8) + ((tmp&0xFF00) >> 8)
return self.__wiper_pos
def enable_50TP(self, enable):
#Sets whether one can transfer the current RDAC setting to the memory
if enable: self.write8(0x1C, self.__control_reg | 0x1)
else: self.write8(0x1C, self.__control_reg & 0x6)
def store_50TP(self, enable):
#stores the current RDAC value in the 50TP memory locations
#
if enable :
self.write8(0x0C, 0x00) # move the contents of the RDAC register to the memory
tmp=self.readU16(0) # read the result into tmp variable
return (((tmp&0x03) << 8) + ((tmp&0xFF00) >> 8))
def set_shutdown(self, enable):
#Sets whether to use shutdown mode
#:param enable: true - device enters shutdown mode, false - normal operation
#
if enable: self.write8(0x24, 0x1)
else: self.write8(0x24, 0x0)
|
989,991 | a508362865a1a4268d17eda17be176ebf3dce5ad | from django import forms
from .models import GrievanceForm, ApplicationStatus
class StudentHomeViewForm(forms.ModelForm):
class Meta:
model = GrievanceForm
fields = ('document1', 'document2', 'document3', 'document4', 'document5', 'preferedStation1',
'preferedStation2', 'preferedStation3', 'preferedStation4', 'preferedStation5',
'preferedStation6', 'preferedStation7', 'preferedStation8', 'preferedStation9',
'preferedStation10', 'allocatedStation', 'natureOfQuery', 'preferenceNumberOfAllocatedStation')
class ApplicationStatusForm(forms.ModelForm):
class Meta:
model = ApplicationStatus
fields = ('description',)
|
989,992 | 387b0ed278b0fef3c0a5953227cf6d16534ddbf0 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""NowcastingPlus is a basic model for short-term forecasting.
This modules contains class NowcastingParams, which is the class parameter
and class NowcastingPlusModel, which is the model.
Typical usage example:
nr = NowcastingPlusModel(data = data, params = NowcastingParams(step = 10))
nr.feature_extraction()
nr.label_extraction()
nr.fit()
output = nr.predict()
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Any, List
import kats.models.model as m
import numpy as np
import pandas as pd
from kats.consts import Params, TimeSeriesData
from kats.models.nowcasting.feature_extraction import LAG, MA, MOM, ROC
from kats.models.nowcasting.model_io import deserialize_from_zippy, serialize_for_zippy
from sklearn import linear_model, preprocessing
from sklearn.linear_model import LinearRegression
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def poly(df, n):
"""
Takes the column x from the dataframe df and takes
the value from x to the power n
"""
poly = pd.Series(df.x**n, name="poly_" + str(n))
df = df.join(poly)
return df
class NowcastingParams(Params):
"""The class for Nowcasting Parameters.
Takes parameters for class NowcastingModel.
Attributes:
step: An integer indicating how many steps ahead we are forecasting. Default is 1.
"""
# pyre-fixme[2]: Parameter must be annotated.
def __init__(self, step: int = 1, **kwargs) -> None:
super().__init__()
self.step = step
logging.debug(f"Initialized QuadraticModel with parameters: step:{step}")
# pyre-fixme[3]: Return type must be annotated.
def validate_params(self):
"""Raises: NotImplementedError("Subclasses should implement this!")."""
logging.warning("Method validate_params() is not implemented.")
raise NotImplementedError("Subclasses should implement this!")
# pyre-fixme[24]: Generic type `m.Model` expects 1 type parameter.
class NowcastingPlusModel(m.Model):
"""The class for NowcastingPlus Model.
This class performs data processing and short term prediction, for time series
based on machine learning methodology.
Attributes:
TimeSeriesData: Time Series Data Source.
NowcastingParams: parameters for Nowcasting.
"""
def __init__(
self,
data: TimeSeriesData,
params: NowcastingParams,
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
model: Any = None,
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
poly_model: Any = None,
feature_names: List[str] = [],
poly_feature_names: List[str] = [],
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
scaler: Any = None,
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
label_scaler: Any = None,
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
y_train_season_obj: Any = None,
) -> None:
super().__init__(data, params)
# pyre-fixme[16]: Optional type has no attribute `value`.
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
# pyre-fixme[4]: Attribute must be annotated.
self.df = data.to_dataframe()
# pyre-fixme[4]: Attribute must be annotated.
self.step = params.step
self.model = model
self.feature_names = feature_names
self.poly_model = poly_model
# pyre-fixme[4]: Attribute must be annotated.
self.df_poly = data.to_dataframe()
self.poly_feature_names = poly_feature_names
# pyre-fixme[4]: Attribute must be annotated.
self.df_nowcasting = data.to_dataframe()
self.scaler = scaler
self.label_scaler = label_scaler
self.y_train_season_obj = y_train_season_obj
def feature_extraction(self) -> None:
"""
Extracts features for time series data.
"""
# Add the hour, minute, and x column to the data
self.df_poly["hour"] = self.df_poly["time"].apply(lambda y: y.hour)
self.df_poly["minute"] = self.df_poly["time"].apply(lambda y: y.minute)
self.df_poly["x"] = self.df_poly["hour"] * 60 + self.df_poly["minute"]
# Empty list to hold the feature names
poly_feature_names = []
# Add the poly columns to the df_poly
for degree in [0, 1, 2, 3, 4, 5]:
self.df_poly = poly(self.df_poly, degree)
poly_feature_names.append("poly_" + str(degree))
# filterout + - inf, nan
self.df_poly = self.df_poly[
~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)
]
# Save the poly feature name
self.poly_feature_names = poly_feature_names
feature_names = []
#########################################################################################
train_index_poly = self.df_poly[
~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)
].index
X_train_poly, y_train_poly = (
self.df_poly[self.poly_feature_names].loc[train_index_poly],
self.df_poly["y"].loc[train_index_poly],
)
# Build the Polynomial Regression Model
lin_reg = LinearRegression()
lin_reg.fit(X_train_poly, y_train_poly)
self.poly_model = lin_reg
y_train_season = lin_reg.predict(X_train_poly)
self.y_train_season_obj = y_train_season
#########################################################################################
for n in [10, 15, 20, 25, 30]:
self.df = MOM(self.df, n)
feature_names.append("MOM_" + str(n))
for n in [10, 15, 20, 25, 30]:
self.df = ROC(self.df, n)
feature_names.append("ROC_" + str(n))
for n in [1, 2, 3, 4, 5]:
self.df = LAG(self.df, n)
feature_names.append("LAG_" + str(n))
for n in [10, 20, 30]:
self.df = MA(self.df, n)
feature_names.append("MA_" + str(n))
self.df = self.df[
~self.df.isin([np.nan, np.inf, -np.inf]).any(1)
] # filterout + - inf, nan
self.feature_names = feature_names
def label_extraction(self) -> None:
"""Extracts labels from time series data."""
self.df["label"] = self.df["y"]
###################### module 1: for offline training ######################
def fit(self) -> None:
"""Fits model."""
logging.debug(
"Call fit() with parameters: " "step:{step}".format(step=self.step)
)
n = 1
train_index = self.df[~self.df.isin([np.nan, np.inf, -np.inf]).any(1)].index
X_train = self.df[self.feature_names].loc[train_index]
std_scaler = preprocessing.StandardScaler()
X_train = std_scaler.fit_transform(X_train)
self.scaler = std_scaler
n = self.step
y_train = (
self.df["label"].loc[train_index] - self.y_train_season_obj[train_index]
).diff(-n)[:-n]
X_train = X_train[:-n]
reg = linear_model.LassoCV()
reg.fit(X_train, y_train)
self.model = reg
def save_model(self) -> bytes:
"""Saves sklearn model as bytes."""
return serialize_for_zippy(self.model)
###################### module 2: for online prediction ######################
# pyre-fixme[14]: `predict` overrides method defined in `Model` inconsistently.
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def predict(self, **kwargs):
"""Predicts the time series in the future.
Nowcasting forecasts at the time unit of step ahead.
This is in order to keep precision and different from usual algorithms.
Returns:
A float variable, the forecast at future step.
"""
logging.debug(
"Call predict() with parameters. "
"Forecast 1 step only, kwargs:{kwargs}".format(kwargs=kwargs)
)
X_test = self.df[-self.step :][self.feature_names]
X_test = self.scaler.transform(X_test)
y_predict = self.model.predict(X_test)
poly_now = self.y_train_season_obj[-1]
first_occ = np.where(self.y_train_season_obj == poly_now)
polynext = self.y_train_season_obj[first_occ[0][0] + self.step]
now = self.df["y"][-self.step :]
return (now - poly_now) - y_predict + polynext
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def predict_polyfit(self, model=None, df=None, **kwargs):
poly_now = self.y_train_season_obj[-1]
first_occ = np.where(self.y_train_season_obj == poly_now)
polynext = self.y_train_season_obj[first_occ[0][0] + self.step]
return polynext
def load_model(self, model_as_bytes: bytes) -> None:
"""Loads model_as_str and decodes into the class NowcastingModel.
Args:
model_as_bytes: a binary variable, indicating whether to read as bytes.
"""
self.model = deserialize_from_zippy(model_as_bytes)
# pyre-fixme[14]: `plot` overrides method defined in `Model` inconsistently.
# pyre-fixme[3]: Return type must be annotated.
def plot(self):
"""Raises: NotImplementedError("Subclasses should implement this!")。"""
raise NotImplementedError("Subclasses should implement this!")
# pyre-fixme[3]: Return type must be annotated.
def __str__(self):
"""Returns the name as Nowcasting,"""
return "Nowcasting"
|
989,993 | 1b9910e3702965de1588a604022ba1064cd00bc3 | _ = input().split(' ')
my_list = ([int(i) for i in input().split(' ')])
first_set = set([int(i) for i in input().split(' ')])
second_set = set([int(i) for i in input().split(' ')])
p = 0
for i in my_list:
if i in first_set:
p += 1
if i in second_set:
p -= 1
print(p)
|
989,994 | 52d67cc8967c1e37ed4d4a5a12401e55132c04ce | # Generated by Django 3.1.1 on 2021-01-04 09:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('overview', '0004_auto_20210104_1707'),
]
operations = [
migrations.CreateModel(
name='host_notmonitor',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('ip', models.GenericIPAddressField()),
('owner', models.CharField(max_length=30, null=True)),
('department', models.CharField(max_length=20)),
('state', models.IntegerField()),
('deleted', models.BooleanField()),
('joinTime', models.BigIntegerField()),
('updateTime', models.BigIntegerField()),
],
),
migrations.DeleteModel(
name='HostNotmonitor',
),
]
|
989,995 | 7308465ba85622d1a362a13ed986bade61705c17 | import reversion
from django.utils.timezone import now
from rest_framework import status
from rest_framework.reverse import reverse
from reversion.models import Version
from datahub.company.constants import BusinessTypeConstant
from datahub.company.models import Company
from datahub.company.test.factories import CompanyFactory
from datahub.core.constants import Country, UKRegion
from datahub.core.reversion import EXCLUDED_BASE_MODEL_FIELDS
from datahub.core.test_utils import (
APITestMixin,
format_date_or_datetime,
random_obj_for_model,
)
from datahub.metadata.models import Sector
class TestCompanyVersioning(APITestMixin):
"""
Tests for versions created when interacting with the company endpoints.
"""
def test_add_creates_a_new_version(self):
"""Test that creating a company creates a new version."""
assert Version.objects.count() == 0
response = self.api_client.post(
reverse('api-v4:company:collection'),
data={
'name': 'Acme',
'trading_names': ['Trading name'],
'business_type': {'id': BusinessTypeConstant.company.value.id},
'sector': {'id': random_obj_for_model(Sector).id},
'address': {
'line_1': '75 Stramford Road',
'town': 'London',
'country': {
'id': Country.united_kingdom.value.id,
},
},
'uk_region': {'id': UKRegion.england.value.id},
},
)
assert response.status_code == status.HTTP_201_CREATED
response_data = response.json()
assert response_data['name'] == 'Acme'
assert response_data['trading_names'] == ['Trading name']
company = Company.objects.get(pk=response_data['id'])
# check version created
assert Version.objects.get_for_object(company).count() == 1
version = Version.objects.get_for_object(company).first()
assert version.revision.user == self.user
assert version.field_dict['name'] == 'Acme'
assert version.field_dict['trading_names'] == ['Trading name']
assert not any(set(version.field_dict) & set(EXCLUDED_BASE_MODEL_FIELDS))
def test_add_400_doesnt_create_a_new_version(self):
"""Test that if the endpoint returns 400, no version is created."""
assert Version.objects.count() == 0
response = self.api_client.post(
reverse('api-v4:company:collection'),
data={'name': 'Acme'},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert Version.objects.count() == 0
def test_update_creates_a_new_version(self):
"""Test that updating a company creates a new version."""
company = CompanyFactory(name='Foo ltd.')
assert Version.objects.get_for_object(company).count() == 0
response = self.api_client.patch(
reverse('api-v4:company:item', kwargs={'pk': company.pk}),
data={'name': 'Acme'},
)
assert response.status_code == status.HTTP_200_OK
assert response.json()['name'] == 'Acme'
# check version created
assert Version.objects.get_for_object(company).count() == 1
version = Version.objects.get_for_object(company).first()
assert version.revision.user == self.user
assert version.field_dict['name'] == 'Acme'
def test_update_400_doesnt_create_a_new_version(self):
"""Test that if the endpoint returns 400, no version is created."""
company = CompanyFactory()
response = self.api_client.patch(
reverse('api-v4:company:item', kwargs={'pk': company.pk}),
data={'trading_names': ['a' * 600]},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert Version.objects.get_for_object(company).count() == 0
def test_archive_creates_a_new_version(self):
"""Test that archiving a company creates a new version."""
company = CompanyFactory()
assert Version.objects.get_for_object(company).count() == 0
url = reverse('api-v4:company:archive', kwargs={'pk': company.id})
response = self.api_client.post(url, data={'reason': 'foo'})
assert response.status_code == status.HTTP_200_OK
response_data = response.json()
assert response_data['archived']
assert response_data['archived_reason'] == 'foo'
# check version created
assert Version.objects.get_for_object(company).count() == 1
version = Version.objects.get_for_object(company).first()
assert version.revision.user == self.user
assert version.field_dict['archived']
assert version.field_dict['archived_reason'] == 'foo'
def test_archive_400_doesnt_create_a_new_version(self):
"""Test that if the endpoint returns 400, no version is created."""
company = CompanyFactory()
assert Version.objects.get_for_object(company).count() == 0
url = reverse('api-v4:company:archive', kwargs={'pk': company.id})
response = self.api_client.post(url)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert Version.objects.get_for_object(company).count() == 0
def test_unarchive_creates_a_new_version(self):
"""Test that unarchiving a company creates a new version."""
company = CompanyFactory(
archived=True, archived_on=now(), archived_reason='foo',
)
assert Version.objects.get_for_object(company).count() == 0
url = reverse('api-v4:company:unarchive', kwargs={'pk': company.id})
response = self.api_client.post(url)
assert response.status_code == status.HTTP_200_OK
response_data = response.json()
assert not response_data['archived']
assert response_data['archived_reason'] == ''
# check version created
assert Version.objects.get_for_object(company).count() == 1
version = Version.objects.get_for_object(company).first()
assert version.revision.user == self.user
assert not version.field_dict['archived']
class TestAuditLogView(APITestMixin):
"""Tests for the audit log view."""
def test_audit_log_view(self):
"""Test retrieval of audit log."""
initial_datetime = now()
with reversion.create_revision():
company = CompanyFactory(
description='Initial desc',
)
reversion.set_comment('Initial')
reversion.set_date_created(initial_datetime)
reversion.set_user(self.user)
changed_datetime = now()
with reversion.create_revision():
company.description = 'New desc'
company.save()
reversion.set_comment('Changed')
reversion.set_date_created(changed_datetime)
reversion.set_user(self.user)
versions = Version.objects.get_for_object(company)
version_id = versions[0].id
url = reverse('api-v4:company:audit-item', kwargs={'pk': company.pk})
response = self.api_client.get(url)
response_data = response.json()['results']
# No need to test the whole response
assert len(response_data) == 1
entry = response_data[0]
assert entry['id'] == version_id
assert entry['user']['name'] == self.user.name
assert entry['comment'] == 'Changed'
assert entry['timestamp'] == format_date_or_datetime(changed_datetime)
assert entry['changes']['description'] == ['Initial desc', 'New desc']
assert not set(EXCLUDED_BASE_MODEL_FIELDS) & entry['changes'].keys()
|
989,996 | 0b910a2daaa7b40f477c2c00a0756ce18305e2b2 | import secrets
import string
import typing
import jwt
class InvalidToken(Exception):
pass
def generate_jwt_token(secret: str) -> str:
encoded_jwt = jwt.encode({"usage": "authentication"}, secret, algorithm="HS256")
return encoded_jwt.decode("utf-8")
def validate_jwt_token(token: str, secret: str) -> typing.Optional[typing.Dict]:
try:
jwt.decode(token, secret, algorithms=["HS256"])
except jwt.exceptions.DecodeError:
raise InvalidToken
def make_temporary_password() -> str:
return make_random_token(24)
def make_jwt_secret() -> str:
return make_random_token(60)
def make_random_token(char_length: int = 24) -> str:
return "".join(
secrets.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(char_length)
)
|
989,997 | 4d05fe08e21c2ce181eca2efd2ad5bec0fc2aaba | from django.conf.urls import include, url
from django.contrib import admin
import pollsapi.views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^polls/$', pollsapi.views.PollList.as_view()),
url(r'polls/(?P<pk>[0-9]+)/$', pollsapi.views.PollDetail.as_view()),
url(r'^create_user/$', pollsapi.views.UserCreate.as_view()),
url(r'^choices/(?P<pk>[0-9]+)/$', pollsapi.views.ChoiceDetail.as_view()),
url(r'^create_vote/$', pollsapi.views.CreateVote.as_view()),
url(r'^users/(?P<pk>[0-9]+)/$', pollsapi.views.UserDetail.as_view()),
]
|
989,998 | 448711ec340d75176fea03ea2cb39132fd373ec3 | print("안녕하세요?")
print("programming에 입문하신 것을 축하드립니다.") |
989,999 | 39f8928e22c9ceb08719a3fdbb7353a776c2acac | import pytest
from plotboss.plotlog import PlotLogParser
from datetime import datetime
import pendulum
def test_log_parser():
p = PlotLogParser()
p.buffer = 0
p.size = 0
p.buckets = 0
p.num_threads = 0
assert p.size == 0
assert p.buffer == 0
assert p.buckets == 0
assert p.num_threads == 0
with open('./tests/assets/plot.logfile') as f:
lines = f.readlines()
p.feed(lines)
assert p.phase == 4
assert p.total_time == 39945.08
assert p.tmp_dir == '/farm/yards/901'
assert p.tmp2_dir == '/farm/yards/901'
assert p.pool_key == '0b6f2b9428744d5062a2073e14b3ca9896a71f7ca9850bdcb285f26108fb19f610c788d47e4830c4c7abfa7611e00168'
assert p.farmer_key == '93222af1a0f7b2ff39f98eb87c1b609fea797798302a60d1f1d6e5152cfdce12c260325d78446e7b8758101b64f43bd5'
assert p.size == 32
assert p.buffer == 4000
assert p.buckets == 128
assert p.num_threads == 4
assert p.start_time == pendulum.local(2021, 4, 4, 19, 0, 50)
assert p.complete_time == pendulum.local(2021, 4, 5, 6, 6, 35)
assert p.target_path == "/farm/wagons/801/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot"
assert p.final_dir == '/farm/wagons/801' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.