hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2893612d9bb5f812e7e498a10ba625355b7d1dee | 1,794 | py | Python | clientV4.py | sekranmert/AWS-Arduino-SmartHomeSystem | 80f4b6a5871fccb3bfc065d3fac5ba09feec525a | [
"MIT"
] | 1 | 2021-06-24T14:24:39.000Z | 2021-06-24T14:24:39.000Z | clientV4.py | sekranmert/AWS-Arduino-SmartHomeSystem | 80f4b6a5871fccb3bfc065d3fac5ba09feec525a | [
"MIT"
] | null | null | null | clientV4.py | sekranmert/AWS-Arduino-SmartHomeSystem | 80f4b6a5871fccb3bfc065d3fac5ba09feec525a | [
"MIT"
] | null | null | null | import socket
import threading
helpMessage = '-q -- close connection\n-l -- list of connected devices\n-t -- server time \n-s "arduino/client ""reciever name" "message" -- send message (messages can be max 100 character) \nif reciever is an arduino board it can be controlled by this messsage:\n -s arduino "arduino name" led "0/1/status" \n'
print("connecting...\n for command list write '-h' \n"+helpMessage)
host = '127.0.0.1' # 127.0.0.1 for local
port = 9999 # 9999 for local
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.connect((host, port))
def recvTh():
while True:
try:
message = socket.recv(100).decode('ascii')
if message == 't':
socket.send("c".encode('ascii'))
elif message == 'n':
name = input("Enter your client name: ")
socket.send(name.encode('ascii'))
else:
print(message+"\n")
except ConnectionAbortedError:
break
except:
print("connection error")
socket.close()
break
def sendTh():
while True:
message = input()
if (len(message)<= 1024):
tokens = message.split()
if tokens[0] == '-h':
print(helpMessage)
elif tokens[0] == '-q':
print("quiting")
socket.send('-q'.encode('ascii'))
socket.close()
break
else:
socket.send(message.encode('ascii'))
else:
print("message must be under 1024 char")
recvThread = threading.Thread(target=recvTh)
sendThread = threading.Thread(target=sendTh)
recvThread.start()
sendThread.start()
| 32.618182 | 312 | 0.545151 |
289396e6e160ca17355478e692561082d33da8f2 | 3,885 | py | Python | data_loader/data_loaders.py | ChunpingQiu/Sen2LCZ_CNN | 5576567da658f945321280f37ff8d9bf46dd1818 | [
"MIT"
] | null | null | null | data_loader/data_loaders.py | ChunpingQiu/Sen2LCZ_CNN | 5576567da658f945321280f37ff8d9bf46dd1818 | [
"MIT"
] | null | null | null | data_loader/data_loaders.py | ChunpingQiu/Sen2LCZ_CNN | 5576567da658f945321280f37ff8d9bf46dd1818 | [
"MIT"
] | 1 | 2021-08-19T03:35:05.000Z | 2021-08-19T03:35:05.000Z | from torchvision import datasets, transforms
from base import BaseDataLoader
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import torch
from skimage import io#, transform
import numpy as np
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class LCZDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
# trsfm = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])
self.data_dir = data_dir
self.dataset = LCZdataset(self.data_dir, transform=transforms.Compose([RandomCrop(64),ToTensor()]))
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class LCZdataset(Dataset):
"""LCZdataset."""
def __init__(self, csv_file, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.landmarks_frame.iloc[idx, 0]
image = io.imread(img_name)
classLabel = self.landmarks_frame.iloc[idx, 1]
classLabel = np.array([classLabel])
#landmarks = landmarks.astype('float').reshape(-1, 2)
#print(image.shape)
image = image[:,:,[1,2,3,4,5,6,7,10,11,12]]/10000.0
sample = {'image': image, 'label': classLabel}
if self.transform:
sample = self.transform(sample)
return sample['image'], sample['label']-1#sample
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
#landmarks = landmarks - [left, top]
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image.astype("float")).float(),#torch.from_numpy(image.astype("float")).float()
'label': torch.squeeze(torch.from_numpy(label))}#
| 32.647059 | 121 | 0.621107 |
28957d205b560942a43fe20af3ee47c7d8d34a15 | 591 | py | Python | eval_predictions.py | uporwal/sigir-2019-ecom-challenge | bffa7f99930321ad5d86e0cddd3c9ddfb98ba3d2 | [
"MIT"
] | 7 | 2019-06-05T01:42:54.000Z | 2020-07-31T04:31:47.000Z | eval_predictions.py | uporwal/sigir-2019-ecom-challenge | bffa7f99930321ad5d86e0cddd3c9ddfb98ba3d2 | [
"MIT"
] | 5 | 2019-06-09T15:11:47.000Z | 2019-06-28T18:35:48.000Z | eval_predictions.py | uporwal/sigir-2019-ecom-challenge | bffa7f99930321ad5d86e0cddd3c9ddfb98ba3d2 | [
"MIT"
] | 5 | 2019-06-04T17:06:33.000Z | 2021-01-15T11:14:43.000Z | import evaluation_script
import argparse
parser = argparse.ArgumentParser(description='Evaluation script used in the eBay SIGIR 2019 eCommerce Search Challenge.')
parser.add_argument('-g', '--ground-truth-file', required=True, help="Ground truth file")
parser.add_argument('-p', '--prediction-file', required=True, help="Prediction file")
parser.add_argument('-d', '--document-file', required=False, default=None, help="Document file")
args = parser.parse_args()
r = evaluation_script.evaluate_submission(args.ground_truth_file, args.prediction_file, args.document_file)
print();
print(r)
| 45.461538 | 121 | 0.781726 |
2895a62d74a6cf74dd272cfa08d6a6029b8f3434 | 48 | py | Python | starfish/__main__.py | haoxusci/starfish | d7bd856024c75f2ce41504406f2a663566c3814b | [
"MIT"
] | 164 | 2018-03-21T21:52:56.000Z | 2022-03-23T17:14:39.000Z | starfish/__main__.py | lbgbox/starfish | 0e879d995d5c49b6f5a842e201e3be04c91afc7e | [
"MIT"
] | 1,728 | 2018-03-15T23:16:09.000Z | 2022-03-12T00:09:18.000Z | starfish/__main__.py | lbgbox/starfish | 0e879d995d5c49b6f5a842e201e3be04c91afc7e | [
"MIT"
] | 66 | 2018-03-25T17:21:15.000Z | 2022-01-16T09:17:11.000Z | from .core.starfish import starfish
starfish()
| 12 | 35 | 0.791667 |
2896d0048b215dc837ae66958ce2ac38e7c770f9 | 968 | py | Python | coreapp/migrations/0058_auto_20200426_1348.py | Quanscendence/braynai | ab828ca95571c6dffef2b2392522e6a4160a2304 | [
"MIT"
] | null | null | null | coreapp/migrations/0058_auto_20200426_1348.py | Quanscendence/braynai | ab828ca95571c6dffef2b2392522e6a4160a2304 | [
"MIT"
] | null | null | null | coreapp/migrations/0058_auto_20200426_1348.py | Quanscendence/braynai | ab828ca95571c6dffef2b2392522e6a4160a2304 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-04-26 08:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coreapp', '0057_projectpricing_custom_supprt'),
]
operations = [
migrations.RenameField(
model_name='endpointalgorithm',
old_name='algorithm',
new_name='model_id',
),
migrations.RemoveField(
model_name='endpointalgorithm',
name='prediction_column_name',
),
migrations.AddField(
model_name='endpointalgorithm',
name='accuracy',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='endpointalgorithm',
name='type_of_prediction',
field=models.CharField(blank=True, choices=[('Classification', 'Classification'), ('Linear', 'Linear')], max_length=100, null=True),
),
]
| 29.333333 | 144 | 0.597107 |
2897d15751315f822719f939136b75871bf6ecab | 1,431 | py | Python | server/price_cache.py | pareeohnos/ktrade | 1eaed1ff16ded580d5649c667935357567e7b514 | [
"MIT"
] | 5 | 2021-09-08T11:04:15.000Z | 2021-11-27T08:42:23.000Z | server/price_cache.py | pareeohnos/ktrade | 1eaed1ff16ded580d5649c667935357567e7b514 | [
"MIT"
] | 36 | 2021-08-31T09:28:10.000Z | 2021-12-10T06:47:04.000Z | server/price_cache.py | pareeohnos/ktrade | 1eaed1ff16ded580d5649c667935357567e7b514 | [
"MIT"
] | 2 | 2021-08-29T02:53:54.000Z | 2021-08-29T06:21:36.000Z | import logging
from server.singleton_meta import SingletonMeta
log = logging.getLogger(__name__)
class PriceCache(metaclass=SingletonMeta):
def __init__(self):
log.debug("[PriceCache] Init new price cache")
self.price_cache = {}
def init_cache_for_ticker(self, watched_ticker_id):
log.info(f"[PriceCache] Init cache for watched ticker {watched_ticker_id}")
if self.price_cache.get(watched_ticker_id):
return
self.price_cache[watched_ticker_id] = {
"high": None,
"low": None,
"price": None
}
def cached_prices_for_ticker(self, watched_ticker_id):
return self.price_cache.get(watched_ticker_id)
def cached_price(self, watched_ticker_id, key):
cache = self.price_cache.get(watched_ticker_id)
if not cache:
return None
log.info(f"[PriceCache] Getting {key} for {watched_ticker_id}: {cache[key]}")
return cache[key]
def update_cached_price(self, watched_ticker_id, key, val):
log.info(f"[PriceCache] Updating {key} price for {watched_ticker_id}: {val}")
self.price_cache[watched_ticker_id][key] = val
def delete_watched_ticker(self, watched_ticker_id):
log.info(f"[PriceCache] Deleting cache for {watched_ticker_id}")
del self.price_cache[watched_ticker_id]
def reset_cached_values(self):
for prices in self.price_cache.values():
prices["low"] = None
prices["high"] = None
prices["price"] = None | 31.108696 | 81 | 0.714186 |
289918d2c57a6904734431ddd51bb10c97d644f6 | 499 | py | Python | series_loop.py | Akshara2820/Python_WhileLoop | d525b547bc8c8236cb2cd1881080ec4e6604fffc | [
"MIT"
] | 1 | 2021-09-15T03:42:15.000Z | 2021-09-15T03:42:15.000Z | series_loop.py | Akshara2820/Python_WhileLoop | d525b547bc8c8236cb2cd1881080ec4e6604fffc | [
"MIT"
] | null | null | null | series_loop.py | Akshara2820/Python_WhileLoop | d525b547bc8c8236cb2cd1881080ec4e6604fffc | [
"MIT"
] | null | null | null | # (10,2,20,4,30,6,40,8,50)
n=int(input("enter no--"))
i=1
c=10
while i<=n:
if i%2==0:
c+=10
print(i,end=",")
i+=1
i+=1
print(c,end=",")
# (1+10=11, 11+20=31, 31+30=61, 61+40=101)
n=int(input("enter no,-"))
i=0
d=1
s=10
while i<n:
print(d,end=",")
d=d+s
s+=10
i+=1
# (1+10=11, 11+20=31, 31+30=61, 61+40=101)
n=int(input("enter no.=="))
i=1
d=1
while i<=n:
print(d,end=" ")
d=d+10*i
i+=1
| 12.794872 | 44 | 0.420842 |
289d03fd3a78072e9344f01958c2c279a5179efe | 9,092 | py | Python | modules/Manager.py | jurajkula/IBT | 7b09f6d331433bfbf3e7955754a36b69b332bb4e | [
"MIT"
] | 3 | 2019-05-16T18:54:49.000Z | 2019-10-21T11:12:50.000Z | modules/Manager.py | jurajkula/IBT | 7b09f6d331433bfbf3e7955754a36b69b332bb4e | [
"MIT"
] | null | null | null | modules/Manager.py | jurajkula/IBT | 7b09f6d331433bfbf3e7955754a36b69b332bb4e | [
"MIT"
] | null | null | null | import os
import time
from os import mkdir
from os.path import isdir
from threading import Lock
import cv2
import imutils
from modules.Camera import Detect
from modules.Camera.CameraHandler import CameraHandler
from modules.Config import Config
from modules.Fusion import Fusion
from modules.Logger.Logger import Logger
from modules.Radar.RadarHandler import RadarHandler
class Manager:
def __init__(self, config: Config):
self.radarHandler, self.cameraHandler = self.createHandlers()
self.logger = Logger(config.debug)
self.radarData = []
self.config = config
self.lockRadarData = Lock()
self.lockRadarTimestamp = Lock()
self.temp = None
self.state = self.setState()
self.radarTimestamp = [0]
@staticmethod
def createHandlers():
return RadarHandler(), CameraHandler()
def setState(self):
self.state = self.config.mode
path = './data/records/record-'
if self.state == 'save':
ids = 0
while isdir(path + str(ids)):
ids += 1
self.temp = path + str(ids)
mkdir(self.temp)
mkdir(self.temp + '/radar')
mkdir(self.temp + '/camera')
if self.state == 'load':
if isdir(path + str(self.config.loadId)):
self.temp = path + str(self.config.loadId)
else:
exit(-10)
return self.state
def configureRadar(self):
self.radarHandler.setLogger(self.logger)
if self.state != 'load':
self.radarHandler.set_ports('/dev/ttyACM1', '/dev/ttyACM0') \
.set_config_file(self.config.configRadar) \
.send_config()
if self.state != 'run':
self.radarHandler.dataRadarPath = self.temp + '/radar'
self.radarHandler.setState(self.state)
self.radarHandler.setRadarData(self.radarData)
self.radarHandler.lockRadarData = self.lockRadarData
self.radarHandler.lockRadarTimestamp = self.lockRadarTimestamp
self.radarHandler.timestamp = self.radarTimestamp
self.radarHandler.cameraPos = self.config.CameraPosition
def configureCamera(self):
self.cameraHandler.setState(self.state)
self.cameraHandler.setLogger(self.logger)
def fpsFromCamera(self):
frames = 120
i = 0
start = time.time()
while i < 120:
ret, frame = self.cameraHandler.captureFrame()
i += 1
seconds = time.time() - start
return frames / seconds
def runner(self):
fusion = Fusion.Fusion(self.config)
if self.state != 'load':
self.logger.log('Find out camera fps')
fps = int(self.fpsFromCamera() + 0.5)
self.radarHandler.start()
if self.state == 'save':
c = 0
while self.cameraHandler.cap.isOpened():
ret, frame = self.cameraHandler.captureFrame()
if ret:
if c % fps == 0:
timestamp = int(time.time())
filename = self.temp + '/camera/img-' + str(timestamp) + '.png'
cv2.imwrite(filename, frame)
c = 0
cv2.imshow('frame', frame)
c += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
self.radarHandler.setState('cancel')
break
else:
break
exit(0)
oldFusion = None
if self.state == 'load':
for file in sorted(os.listdir(self.temp + '/camera/')):
fusedCount = 0
pick = [0]
img = cv2.imread(self.temp + '/camera/' + file)
frame = imutils.resize(img, width=min(self.config.imageSize, img.shape[1]))
timestamp = int(file.split('-')[1].split('.')[0]) * 1000
while True:
self.lockRadarTimestamp.acquire()
try:
timestampRadar = self.radarTimestamp[0]
finally:
self.lockRadarTimestamp.release()
if (timestampRadar - 50 < timestamp) & (timestamp < timestampRadar + 50):
pick = Detect.detectPedestrian(frame, self.config.winStride, self.config.scale)
self.lockRadarData.acquire()
try:
fused = fusion.fuse(pick,
[frame.shape[0], frame.shape[1]],
self.radarData)
finally:
self.lockRadarData.release()
if fused is not None:
oldFusion = fused[0]
fusedCount = fused[1]
break
if timestampRadar + 50 > timestamp:
break
time.sleep(0.1)
self.lockRadarData.acquire()
try:
self.cameraHandler.insertCountDataToImage(frame, [fusedCount, len(pick), len(self.radarData)])
finally:
self.lockRadarData.release()
if oldFusion is None:
cv2.imshow('Frame', frame)
if cv2.waitKey(25) == ord('q'):
self.radarHandler.setState('cancel')
break
continue
for o in oldFusion:
for oo in o:
if oo.fused is not True:
continue
self.cameraHandler.insertDataToImage(frame, oo)
cv2.imshow('Frame', frame)
if cv2.waitKey(25) == ord('q'):
self.radarHandler.setState('cancel')
break
self.radarHandler.setState('cancel')
while self.radarHandler.is_alive():
time.sleep(0.4)
print('5')
try:
self.cameraHandler.releaseAndClose()
exit(0)
except RuntimeError:
pass
c = 0
while self.cameraHandler.cap.isOpened():
time.sleep(0.001)
fusedCount = 0
pick = [0]
# Capture frame-by-frame
ret, frame = self.cameraHandler.captureFrame()
frame = imutils.resize(frame, width=min(600, frame.shape[1]))
if ret:
if (c % self.config.oldDetection == 0) & (self.config.oldDetection > 0):
oldFusion = None
if c % int(fps / 4) == 0:
timestamp = time.time() * 1000
self.lockRadarTimestamp.acquire()
try:
timestampRadar = self.radarTimestamp[0]
finally:
self.lockRadarTimestamp.release()
if timestampRadar - 50 < timestamp < timestampRadar + 50:
pick = Detect.detectPedestrian(frame, self.config.winStride, self.config.scale)
self.lockRadarData.acquire()
try:
fused = fusion.fuse(pick,
[self.cameraHandler.cap.get(3), self.cameraHandler.cap.get(4)],
self.radarData)
finally:
self.lockRadarData.release()
if fused is not None:
oldFusion = fused[0]
fusedCount = fused[1]
c = 0
self.lockRadarData.acquire()
try:
self.cameraHandler.insertCountDataToImage(frame, [fusedCount, len(pick), len(self.radarData)])
finally:
self.lockRadarData.release()
if oldFusion is None:
cv2.imshow('Frame', frame)
if cv2.waitKey(25) == ord('q'):
self.radarHandler.setState('cancel')
break
c += 1
continue
for o in oldFusion:
for oo in o:
if oo.fused is not True:
continue
self.cameraHandler.insertDataToImage(frame, oo)
# Display the resulting frame
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) == ord('q'):
self.radarHandler.setState('cancel')
break
c += 1
# Break the loop
else:
break
self.radarHandler.join()
self.cameraHandler.releaseAndClose()
| 34.439394 | 114 | 0.478883 |
289e8099349c64172c6b2bf0ba568b861c6f1152 | 5,809 | py | Python | train.py | okwrtdsh/3D-ResNets-PyTorch | f36a32ea8b283524d1d102937c49689b1f475b5f | [
"MIT"
] | null | null | null | train.py | okwrtdsh/3D-ResNets-PyTorch | f36a32ea8b283524d1d102937c49689b1f475b5f | [
"MIT"
] | null | null | null | train.py | okwrtdsh/3D-ResNets-PyTorch | f36a32ea8b283524d1d102937c49689b1f475b5f | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
import time
import os
import sys
import numpy as np
from utils import AverageMeter, calculate_accuracy, save_gif, accuracy
from models.binarized_modules import binarizef
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt,
epoch_logger, batch_logger, device):
print('train at epoch {}'.format(epoch))
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# accuracies = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
input_mean = []
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
input_mean.extend([i.mean() for i in inputs.detach().cpu().numpy()])
data_time.update(time.time() - end_time)
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
# acc = calculate_accuracy(outputs, targets)
losses.update(loss.item(), inputs.size(0))
# accuracies.update(acc, inputs.size(0))
prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
optimizer.zero_grad()
loss.backward()
# https://github.com/itayhubara/BinaryNet.pytorch/blob/master/main_mnist.py#L113
# for p in list(model.parameters()):
# if hasattr(p, 'org'):
# p.data.copy_(p.org)
optimizer.step()
# for p in list(model.parameters()):
# if hasattr(p, 'org'):
# p.org.copy_(p.data.clamp_(-1, 1))
batch_time.update(time.time() - end_time)
end_time = time.time()
batch_logger.log({
'epoch': epoch,
'batch': i + 1,
'iter': (epoch - 1) * len(data_loader) + (i + 1),
'loss': losses.val,
'top1': top1.val,
'top5': top5.val,
'lr': optimizer.param_groups[0]['lr']
})
sys.stdout.flush()
sys.stdout.write('\rEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.sum:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})\t\t'
'len {len_mean},'
'mean {mean:.4f},'
'std {std:.4f},'
'min {min:.4f},'
'max {max:.4f}'
'\t\t'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
top5=top5,
len_mean=len(input_mean),
mean=np.mean(input_mean),
std=np.std(input_mean),
min=np.min(input_mean),
max=np.max(input_mean),
))
sys.stdout.flush()
print('\n[Train] Epoch{0}\t'
'Time: {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
'Data: {data_time.sum:.3f} ({data_time.avg:.3f})\t'
'Loss: {loss.avg:.4f}\t'
'Acc@1: {top1.avg:.3f}\t'
'Acc@5: {top5.avg:.3f}'
'\tlen {len_mean},'
'mean {mean:.4f},'
'std {std:.4f},'
'min {min:.4f},'
'max {max:.4f}'
'\t\t'.format(
epoch,
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
top5=top5,
len_mean=len(input_mean),
mean=np.mean(input_mean),
std=np.std(input_mean),
min=np.min(input_mean),
max=np.max(input_mean),
))
print()
epoch_logger.log({
'epoch': epoch,
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'lr': optimizer.param_groups[0]['lr'],
'batch_time': batch_time.sum,
'data_time': data_time.sum,
})
# if hasattr(list(model.parameters())[0], 'org'):
# mask = binarize(
# list(model.parameters())[0].data,
# quant_mode='det'
# ).add_(1).div_(2).to('cpu').detach().numpy()
if 'exp' in opt.model and not opt.load_path:
mask = binarizef(
list(model.parameters())[0]
).add_(1).div_(2).to('cpu').detach().numpy()
print('max', mask.max())
print('min', mask.min())
mask = mask.reshape((opt.sample_duration, 8, 8, 1)).astype(np.uint8)
assert mask.shape == (opt.sample_duration, 8, 8, 1)
# save_file_path = os.path.join(opt.result_path,
# 'mask_{}.npy'.format(epoch))
# np.save(save_file_path, mask)
save_file_path = os.path.join(opt.result_path,
'mask_{}.gif'.format(epoch))
save_gif(mask, save_file_path, vmax=1, vmin=0)
if epoch % opt.checkpoint == 0:
save_file_path = os.path.join(opt.result_path,
'save_{}.pth'.format(epoch))
states = {
'epoch': epoch + 1,
'arch': opt.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(states, save_file_path)
| 36.30625 | 88 | 0.484249 |
289f6d1cb4c2dff400bd79a40abc1c0e080f2635 | 477 | py | Python | contact/views.py | ledomone/kurs_django | c24aaf8f8a22a695b41e2436bf9bf4d1ca665079 | [
"MIT"
] | null | null | null | contact/views.py | ledomone/kurs_django | c24aaf8f8a22a695b41e2436bf9bf4d1ca665079 | [
"MIT"
] | null | null | null | contact/views.py | ledomone/kurs_django | c24aaf8f8a22a695b41e2436bf9bf4d1ca665079 | [
"MIT"
] | null | null | null | from django.contrib.contenttypes import fields
from django.shortcuts import render
from .forms import MessageForm, ContactForm
from django.views.generic import DetailView, ListView, FormView
class MessageAddView(FormView):
# form_class = MessageForm
form_class = ContactForm
template_name = 'contact/message_form.html'
success_url = '/'
def form_valid(self, form):
form.save() # bo...
return super(MessageAddView, self).form_valid(form) | 29.8125 | 63 | 0.740042 |
289fb47f080457beca96ad6fa33ec1f46323cf2b | 6,506 | py | Python | commands/misc/settings.py | ii-Python/Prism | a404a61ddb16d045aa29d81908ce4ad80b24e24d | [
"MIT"
] | 6 | 2020-09-28T13:19:37.000Z | 2021-07-13T10:37:22.000Z | commands/misc/settings.py | BenjaminGotBanned/Prism | a404a61ddb16d045aa29d81908ce4ad80b24e24d | [
"MIT"
] | 2 | 2020-10-06T17:59:40.000Z | 2020-10-06T20:12:39.000Z | commands/misc/settings.py | BenjaminGotBanned/Prism | a404a61ddb16d045aa29d81908ce4ad80b24e24d | [
"MIT"
] | 3 | 2021-01-05T13:33:58.000Z | 2021-07-13T10:37:37.000Z | # Modules
import discord
from datetime import date
from discord import Embed
from json import loads, dumps
from assets.prism import Tools
from discord.ext import commands
# Main Command Class
class Settings(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.desc = "Changes server settings for Prism"
self.usage = "settings [key] [value]"
@commands.command()
@commands.has_permissions(manage_guild = True)
async def settings(self, ctx, key: str = None, value = None):
db = loads(open("db/guilds", "r").read())
_db = db[str(ctx.guild.id)]
if not key:
prefix = _db["prefix"]
nsfw = True if "nsfw-enabled" in _db["tags"] else False
levels = True if "levels-enabled" in _db["tags"] else False
joinleave = self.bot.get_channel(_db["data"]["joinleave_channel"]).name if _db["data"]["joinleave_channel"] else "Not setup"
if _db["data"]["autorole"]:
autorole = discord.utils.get(ctx.guild.roles, id = _db["data"]["autorole"])
if not autorole:
autorole = "Not setup"
_db["data"]["autorole"] = None
open("db/guilds", "w").write(dumps(db, indent = 4))
else:
autorole = "@" + autorole.name
else:
autorole = "Not setup"
embed = Embed(title = "Server Settings", description = f"Last updated: {_db['data']['last_updated']}", color = 0x126bf1)
embed.add_field(name = "Settings", value = f"Prefix: {prefix}\nNSFW Enabled: {nsfw}\nLevels Enabled: {levels}\nJoin/Leave Channel: #{joinleave}\nAutorole: {autorole}", inline = False)
embed.add_field(name = "How to change these", value = f"To change a setting, use ``{prefix}settings [setting] [value]``.\nFor example: ``{prefix}settings nsfw off``.", inline = False)
embed.set_author(name = " | Settings", icon_url = self.bot.user.avatar_url)
embed.set_footer(text = f" | Requested by {ctx.author}.", icon_url = ctx.author.avatar_url)
return await ctx.send(embed = embed)
elif key and not value:
return await ctx.send(embed = Tools.error("No value specified."))
key = key.lower()
if not key in ["prefix", "nsfw", "levels", "joinleave", "autorole"]:
return await ctx.send(embed = Tools.error("That isn't a valid setting."))
elif not isinstance(value, str) and not isinstance(value, bool):
return await ctx.send(embed = Tools.error("That isn't a valid value."))
elif value.lower() in ["on", "enable", "true", "yes"]:
value = True
elif value.lower() in ["off", "disable", "false", "no"]:
value = False
else:
if key != "prefix" and not isinstance(value, str) and not isinstance(value, bool):
return await ctx.send(embed = Tools.error("That isn't a valid value."))
if key == "prefix":
for char in ["`", "\\"]:
if char in value:
return await ctx.send(embed = Tools.error("Prefix contains unsupported characters."))
if len(value) > 10:
return await ctx.send(embed = Tools.error("Prefixes cannot be longer than 10 characters."))
_db["prefix"] = value
text = f"The prefix in this server has been set to ``{value}``."
elif key == "nsfw":
if value:
if not "nsfw-enabled" in _db["tags"]:
_db["tags"].append("nsfw-enabled")
else:
if "nsfw-enabled" in _db["tags"]:
_db["tags"].remove("nsfw-enabled")
text = f"NSFW has been set to ``{value}``."
elif key == "levels":
if value:
if not "levels-enabled" in _db["tags"]:
_db["tags"].append("levels-enabled")
else:
if "levels-enabled" in _db["tags"]:
_db["tags"].remove("levels-enabled")
text = f"Leveling has been set to ``{value}``."
elif key == "joinleave":
if not isinstance(value, str):
return await ctx.send(embed = Tools.error("That isn't a valid value."))
try:
id = int(value)
except:
try:
id = int(value.split("<#")[1].split(">")[0])
except:
return await ctx.send(embed = Tools.error("That isn't a valid value."))
channel = self.bot.get_channel(id)
if not channel:
return await ctx.send(embed = Tools.error("That isn't a valid channel ID."))
_db["data"]["joinleave_channel"] = channel.id
text = f"The join/leave channel has been set to #{channel.name}"
elif key == "autorole":
if value.lower() in ["remove", "reset"]:
_db["data"]["autorole"] = None
text = "The autorole for this server has been reset."
else:
if value.startswith("<@&") and value.endswith(">"):
value = value.replace("<", "").replace(">", "").replace("@", "").replace("&", "")
else:
if value.startswith("@"):
value = value[1:]
role = discord.utils.get(ctx.guild.roles, name = value)
if not role:
return await ctx.send(embed = Tools.error("Couldn't find that role; check your capitalization. You can't use IDs here."))
value = role.id
role = discord.utils.get(ctx.guild.roles, id = int(value))
_db["data"]["autorole"] = role.id
text = "This server's autorole has been set to @" + role.name
_db["data"]["last_updated"] = str(date.today())
open("db/guilds", "w").write(dumps(db, indent = 4))
embed = Embed(title = text, color = 0x126bf1)
embed.set_author(name = " | Settings", icon_url = self.bot.user.avatar_url)
embed.set_footer(text = f" | Set by {ctx.author}.", icon_url = ctx.author.avatar_url)
return await ctx.send(embed = embed)
# Link to bot
def setup(bot):
bot.add_cog(Settings(bot))
| 29.306306 | 196 | 0.527206 |
28a230764b88abf38e3cb6d2f0cf4df9e3778896 | 970 | py | Python | core/test.py | awesome-archive/muzero-pytorch | 2ff4ea145097050031d6026f0aa1a97de72d702d | [
"MIT"
] | null | null | null | core/test.py | awesome-archive/muzero-pytorch | 2ff4ea145097050031d6026f0aa1a97de72d702d | [
"MIT"
] | null | null | null | core/test.py | awesome-archive/muzero-pytorch | 2ff4ea145097050031d6026f0aa1a97de72d702d | [
"MIT"
] | null | null | null | import torch
from .mcts import MCTS, Node
from .utils import select_action
def test(config, model, episodes, device, render):
model.to(device)
model.eval()
test_reward = 0
env = config.new_game()
with torch.no_grad():
for ep_i in range(episodes):
done = False
ep_reward = 0
obs = env.reset()
while not done:
if render:
env.render()
root = Node(0)
obs = torch.FloatTensor(obs).to(config.device).unsqueeze(0)
root.expand(env.to_play(), env.legal_actions(), model.initial_inference(obs))
MCTS(config).run(root, env.action_history(), model)
action = select_action(root, temperature=1, deterministic=True)
obs, reward, done, info = env.step(action.index)
ep_reward += reward
test_reward += ep_reward
return test_reward / episodes
| 31.290323 | 93 | 0.565979 |
28a386192b68f112112b6e68f5293867934e803f | 167 | py | Python | demo/deep_learning/base/second_stage_bounding_box_prediction/dcn_feature_calibration.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | 1 | 2018-12-09T06:09:29.000Z | 2018-12-09T06:09:29.000Z | demo/deep_learning/base/second_stage_bounding_box_prediction/dcn_feature_calibration.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | demo/deep_learning/base/second_stage_bounding_box_prediction/dcn_feature_calibration.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import torch
class RotateRectangleDCNFeatureCalibration(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass | 20.875 | 60 | 0.712575 |
953703edf77bdad68e1e40c1a564d92c76b7a5a5 | 1,824 | pyde | Python | listing_70/listing_70.pyde | tiranderel/2019-fall-polytech-cs | 67f0482a0f143381f9b494a4348d6436ce8f8c1e | [
"MIT"
] | null | null | null | listing_70/listing_70.pyde | tiranderel/2019-fall-polytech-cs | 67f0482a0f143381f9b494a4348d6436ce8f8c1e | [
"MIT"
] | null | null | null | listing_70/listing_70.pyde | tiranderel/2019-fall-polytech-cs | 67f0482a0f143381f9b494a4348d6436ce8f8c1e | [
"MIT"
] | null | null | null | import math
class MySuperBall:
x=0
y=0
radius=0
speed=0
counter=0
previousBall=None
vector = 1
def render (self):
noStroke ()
fill (200 , 100)
ellipse (self.x,self.y,self.radius , self.radius )
stroke (10)
strokeWeight (2)
if self.previousBall!=None:
print(self.previousBall)
line (self.x,self.y, self.previousBall.x, self.previousBall.y)
noStroke ()
fill (0)
ellipse (self.x,self.y ,6 ,6)
def upDate (self):
self.counter += self.speed * self.vector /500
self.y = 250 + sin( self.counter ) *200
if ( self.counter > TWO_PI ):
self.vector = self.vector *( -1)
ballArray_one=[]
ballArray_two=[]
def setup ():
size (500 , 500)
smooth ()
myInit ()
def myInit ():
global ballArray_one, ballArray_two
number = 125
step = float (width) / float( number )
ballArray_one = list(range(number))
for i in range(len(ballArray_one)):
tmp_obj = MySuperBall ()
variable = random (0 ,5)
tmp_obj .x = variable + step *i
tmp_obj .y = random ( -100 ,100) + 250
tmp_obj . radius = variable *10 + 5
tmp_obj . speed = random (0.2 , 10)
if i > 0:
tmp_obj . previousBall = ballArray_one [i -1]
ballArray_one [i] = tmp_obj
ballArray_two = ballArray_one
def draw ():
global ballArray_one
background (50)
for curentBall in ballArray_one:
curentBall . upDate ()
curentBall . render ()
def keyPressed ():
if key == 'a':
myInit ()
if key == 'q':
ballArray_two [0]. radius = 300
if key == 's':
saveFrame ("myProcessing.png")
| 26.057143 | 75 | 0.533991 |
953906e1815512ae7854e463509fe51bfa7374f8 | 1,340 | py | Python | src/carim_discord_bot/discord_client/member_count.py | schana/carim-discord-bot | c1f5e868404744667156af7ad6d244939998b5a2 | [
"Apache-2.0"
] | 14 | 2020-04-06T17:58:09.000Z | 2022-02-28T13:29:35.000Z | src/carim_discord_bot/discord_client/member_count.py | schana/carim-discord-bot | c1f5e868404744667156af7ad6d244939998b5a2 | [
"Apache-2.0"
] | 48 | 2020-04-05T11:24:10.000Z | 2021-03-10T08:12:19.000Z | src/carim_discord_bot/discord_client/member_count.py | schana/carim-discord-bot | c1f5e868404744667156af7ad6d244939998b5a2 | [
"Apache-2.0"
] | 12 | 2020-03-31T15:08:56.000Z | 2021-09-07T17:54:49.000Z | import asyncio
import logging
import discord
from carim_discord_bot import managed_service, config
from carim_discord_bot.discord_client import discord_service
log = logging.getLogger(__name__)
class MemberCountService(managed_service.ManagedService):
async def handle_message(self, message: managed_service.Message):
pass
async def service(self):
while True:
await asyncio.sleep(10 * 60)
await self.update_member_count()
async def update_member_count(self):
if config.get().discord_member_count_channel_id:
client: discord.Client = discord_service.get_service_manager().client
if not client or not client.is_ready():
log.warning('client not ready')
return
channel: discord.VoiceChannel = client.get_channel(
config.get().discord_member_count_channel_id)
count = channel.guild.member_count
discord_member_count_string = config.get().discord_member_count_format.format(count=count)
await channel.edit(name=discord_member_count_string)
log.info(f'Update member count: {discord_member_count_string}')
service = None
def get_service_manager():
global service
if not service:
service = MemberCountService()
return service
| 31.162791 | 102 | 0.698507 |
953a0c9e9780129db979506c7f1a9628f61e3e34 | 491 | py | Python | code/core/early_stopping.py | ashesh-0/MultiZoomGaze | 24494a1346d09e21e4b6d999a742b5d31bbbeff0 | [
"MIT"
] | 1 | 2022-01-24T04:53:00.000Z | 2022-01-24T04:53:00.000Z | code/core/early_stopping.py | ashesh-0/MultiZoomGaze | 24494a1346d09e21e4b6d999a742b5d31bbbeff0 | [
"MIT"
] | null | null | null | code/core/early_stopping.py | ashesh-0/MultiZoomGaze | 24494a1346d09e21e4b6d999a742b5d31bbbeff0 | [
"MIT"
] | null | null | null | class EarlyStop:
def __init__(self, patience=5):
self._patience = patience
self._min_loss = None
self._counter = -1
def __call__(self, loss):
if self._min_loss is None:
self._counter = 0
self._min_loss = loss
return False
if self._min_loss > loss:
self._min_loss = loss
self._counter = 0
else:
self._counter += 1
return self._counter >= self._patience
| 24.55 | 46 | 0.543788 |
953acd7416847beb9014f4f513c188884ed30577 | 567 | py | Python | GAPullTest.py | wrashi/GAReport | b58fb1ef8a8984761ba417879aa52c4100c61a0b | [
"Unlicense"
] | null | null | null | GAPullTest.py | wrashi/GAReport | b58fb1ef8a8984761ba417879aa52c4100c61a0b | [
"Unlicense"
] | null | null | null | GAPullTest.py | wrashi/GAReport | b58fb1ef8a8984761ba417879aa52c4100c61a0b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from GAReport import GAReport
VIEW_ID = 'PutViewIDHere'
DIMENSIONS = ["Page", ]
METRICS = ["Pageviews", "Unique Pageviews", "Avg. Time on Page", "Entrances", "Bounce Rate", "% Exit", "Page Value"]
# Use these instructions for creating single and multiple filters: https://developers.google.com/analytics/devguides/reporting/core/v3/reference#filters
FILTERS= "ga:pagePath=~news"
report = GAReport(startdate="yesterday", enddate="yesterday", viewID=VIEW_ID, dimensions=DIMENSIONS, metrics=METRICS, filters=FILTERS)
print(report.df.head(3)) | 43.615385 | 153 | 0.75485 |
953ca9b5b1450ae6da266d252be6ca5bb7c74e70 | 14,525 | py | Python | model.py | coolEphemeroptera/AESRC2020 | b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de | [
"Apache-2.0"
] | 35 | 2020-09-26T13:40:16.000Z | 2022-03-22T19:42:20.000Z | model.py | coolEphemeroptera/ARNet | b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de | [
"Apache-2.0"
] | 4 | 2021-04-10T13:05:52.000Z | 2022-03-14T03:22:32.000Z | model.py | coolEphemeroptera/ARNet | b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de | [
"Apache-2.0"
] | 7 | 2020-09-26T15:52:45.000Z | 2021-06-11T05:05:23.000Z | from resnet import resnet18_,resnet34_,resnet50_,resnet101_, resnet152_
from keras.layers import Input, Dense, Lambda,Dropout,Conv2D,Activation,Bidirectional,GlobalAveragePooling1D,\
BatchNormalization,Reshape
from keras_layer_normalization import LayerNormalization
from keras.layers.cudnn_recurrent import CuDNNGRU,CuDNNLSTM
from keras.models import Model
from keras import backend as K
from keras.regularizers import l2
from keras.constraints import unit_norm
from keras.utils import multi_gpu_model
from keras.optimizers import Adam
import losses as ls
import VLAD as vd
"""
=========================
Layers
=========================
"""
def SQUEEZE(axis=3, name=None):
return Lambda(lambda x: K.squeeze(x,axis=axis),name=name)
def EXPAND(axis=3,name=None):
return Lambda(lambda x: K.expand_dims(x, axis=axis),name=name)
def BN(name=None):
return BatchNormalization(name=name)
def LN(name=None):
return LayerNormalization(name=name)
def DS(hidden,activation,rgr=l2(1e-4),use_bias=True,name=None):
return Dense(hidden,
activation=activation,
use_bias=use_bias,
kernel_initializer='he_normal',
kernel_regularizer=rgr,
bias_regularizer=rgr,
name=name)
def BIGRU(hidden,seq=True,rgr=l2(1e-4),name=None):
return Bidirectional(CuDNNGRU(hidden,
return_sequences=seq,
kernel_regularizer=rgr,
bias_regularizer=rgr),
merge_mode='concat',
name=name)
def DP(rate,name=None):
return Dropout(rate,name=name)
"""
=========================
ctc constructors
=========================
"""
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def ctc_module(ctc_pred,max_label_len):
ctc_input_len = Input(shape=[1], dtype='int32', name='x_ctc_in_len')
ctc_label_len = Input(shape=[1], dtype='int32', name='x_ctc_out_len')
ctc_labels = Input([max_label_len], dtype='float32', name='x_ctc_label')
ctc_loss = Lambda(ctc_lambda_func, output_shape=(1,), name='y_ctc_loss')\
([ctc_pred, ctc_labels, ctc_input_len, ctc_label_len])
return ctc_loss,ctc_labels, ctc_input_len, ctc_label_len
"""
=========================
NetVLAD
=========================
"""
def vlad(x,
aggregation,
vlad_clusters,
ghost_clusters):
weight_decay = 1e-4
if aggregation == 'vlad':
x_k_center = Conv2D(vlad_clusters, (1, 1),
strides=(1, 1),
kernel_initializer='orthogonal',
use_bias=True, trainable=True,
kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay),
name='vlad_center_assignment')(x)
x = vd.VladPooling(k_centers=vlad_clusters, mode='vlad', name='vlad_pool')([x, x_k_center])
elif aggregation == 'gvlad':
x_k_center = Conv2D(vlad_clusters + ghost_clusters, (1, 1),
strides=(1, 1),
kernel_initializer='orthogonal',
use_bias=True, trainable=True,
kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay),
name='gvlad_center_assignment')(x)
x = vd.VladPooling(k_centers=vlad_clusters, g_centers=ghost_clusters, mode='gvlad', name='gvlad_pool')(
[x, x_k_center])
return x
"""
=========================
AR Module
=========================
"""
def integration(x,
hidden_dim=256,
mto='avg',
vlad_clusters=8,
ghost_clusters=2):
if mto== 'avg':
x = GlobalAveragePooling1D(name="AR_MERGE")(x)
elif mto== 'bigru':
x = BIGRU(hidden_dim, seq=False, name="AR_MERGE")(x)
elif mto in ['vlad', 'gvlad']:
x = EXPAND(axis=1)(x)
x = vlad(x,
aggregation=mto,
vlad_clusters=vlad_clusters,
ghost_clusters=ghost_clusters)
else:
print("Please specify avg/bigru/vlad/gvlad ..")
exit(1)
return x
def disc_loss(x,
accent_label,
accent_classes,
loss,
margin,
name):
if loss == "softmax":
y = DS(accent_classes, activation='softmax', use_bias=False, name=name)(x)
elif loss == "sphereface":
y = ls.SphereFace(n_classes=accent_classes, m=margin, name=name)([x, accent_label])
elif loss == "cosface":
y = ls.CosFace(n_classes=accent_classes, m=margin, name=name)([x, accent_label])
elif loss == "arcface":
y = ls.ArcFace(n_classes=accent_classes, m=margin, name=name)([x, accent_label])
elif loss == "circleloss":
y = Lambda(lambda x: K.l2_normalize(x, 1))(x)
y = Dense(accent_classes, activation=None, use_bias=False, kernel_constraint=unit_norm(), name=name)(y)
else:
return
return y
"""
=========================
Model
=========================
"""
def build( inputs,
outputs,
raw=None,
name="model"):
model = Model(inputs=inputs, outputs=outputs, name=name)
model.summary()
if raw:
print("===== init weights from:%s =====" % raw)
model.load_weights(raw, by_name=True, skip_mismatch=True)
return model
def compile(model,
gpus,
lr,
loss,
loss_weights,
metrics):
if gpus>1:
model_ = multi_gpu_model(model, gpus=gpus)
else:
model_ = model
model_.compile(optimizer=Adam(lr,decay=2e-4),
loss=loss,
loss_weights=loss_weights,
metrics=metrics)
return model_
def SAR_Net(input_shape,
ctc_enable = False,
ar_enable = True,
disc_enable = False,
res_type="res18",
res_filters=64,
hidden_dim=256,
bn_dim=0,
bpe_classes=1000,
accent_classes=8,
max_ctc_len=72,
mto=None,
vlad_clusters=8,
ghost_clusters=2,
metric_loss='cosface',
margin=0.3,
raw_model=None,
lr=0.01,
gpus = 1,
mode="train",
name=None):
# =========================
# INPUT (2D Spectrogram)
# =========================
if mode=="train":
inputs = Input(shape=input_shape,name="x_data")
else:
inputs = Input(shape=[None,input_shape[1],input_shape[2]], name="x_data")
if disc_enable:
disc_labels = Input(shape=(accent_classes,), name="x_accent")
# ==============================
# SHARED ENCODER (Res + BiGRU)
# ==============================
if res_type == "res18":
cnn = resnet18_(inputs, filters=res_filters)
elif res_type == "res34":
cnn = resnet34_(inputs, filters=res_filters)
elif res_type == "res50":
cnn = resnet50_(inputs, filters=res_filters)
elif res_type == "res101":
cnn = resnet101_(inputs, filters=res_filters)
elif res_type == "res152":
cnn = resnet152_(inputs, filters=res_filters)
else:
print("======= ERROR: please specify cnn in res-[18,34,50,101,152] ======")
cnn = Reshape([-1,K.int_shape(cnn)[-1]],name="CNN2SEQ")(cnn)
cnn = DS(hidden_dim, activation='tanh', name="CNN_LIN")(cnn)
cnn = LN(name="CNN_LIN_LN")(cnn)
crnn = BIGRU(hidden_dim, name="CRNN")(cnn)
crnn = LN(name="CRNN_LN")(crnn)
# =========================
# ASR Branch
# =========================
if ctc_enable:
asr = crnn
asr = BIGRU(hidden_dim, name="CTC_BIGRU")(asr)
asr = LN(name="CTC_BIGRU_LN")(asr)
asr = DS(hidden_dim, activation='tanh', name='CTC_DS')(asr)
asr = LN(name='CTC_DS_LN')(asr)
ctc_pred = DS(bpe_classes, activation="softmax", name='ctc_pred')(asr)
ctc_loss, ctc_labels, ctc_input_len, ctc_label_len = ctc_module(ctc_pred, max_ctc_len)
# =========================
# AR Branch
# =========================
if ar_enable:
# =========================
# AR Branch: Integration
# =========================
ar = DS(hidden_dim,activation='tanh',name='AR_DS')(crnn)
ar = LN(name='AR_DS_LN')(ar)
ar = integration(ar,
hidden_dim=hidden_dim,
mto=mto,
vlad_clusters=vlad_clusters,
ghost_clusters=ghost_clusters)
ar = BN(name='AR_BN1')(ar)
# ar = DP(0.5,name="AR_DP")(ar)
ar = DS(hidden_dim, activation=None, name="AR_EMBEDDING")(ar) # Global Feature
ar = BN(name='AR_BN2')(ar)
# =======================================
# AR Branch: Classification
# =======================================
ar1 = DS(64, activation='relu',name="AR_CF_DS1")(ar)
ar1 = DS(64, activation='relu',name="AR_CF_DS2")(ar1)
ar1 = DS(accent_classes, activation='softmax', name='y_accent')(ar1)
# ===================================
# AR Branch: Discriminative loss
# ===================================
if disc_enable:
ar2 = disc_loss(ar,
accent_label=disc_labels,
accent_classes=accent_classes,
loss=metric_loss,
margin=margin,
name="y_disc")
# ==========================================
# AR Branch: Visual BottleNeck feature (*)
# ==========================================
if disc_enable and bn_dim:
bn = DS(64, activation='relu',name="AR_BN_DS")(ar)
bn = BN(name='AR_BN3')(bn)
bn = DS(bn_dim, activation=None, name="bottleneck")(bn)
bn = BN(name='AR_BN4')(bn)
bn = disc_loss(bn,
accent_label=disc_labels,
accent_classes=accent_classes,
loss=metric_loss,
margin=margin,
name="y_disc_bn")
# ==============================
# Model
# ==============================
input_set = [inputs]
output_set = []
if ar_enable:
output_set += [ar1]
if disc_enable:
input_set += [disc_labels]
output_set += [ar2]
if ctc_enable:
input_set += [ctc_labels, ctc_input_len, ctc_label_len]
output_set += [ctc_loss]
if bn_dim:
output_set += [bn]
model = build(inputs=input_set,outputs=output_set,raw=raw_model,name=name)
# ==============================
# Compile
# ==============================
loss = {}
loss_weights = {}
metrics = {}
alpha = 0.4
beta = 0.01
if ar_enable:
loss["y_accent"] = 'categorical_crossentropy'
loss_weights["y_accent"] = beta if disc_enable else 1.0
metrics["y_accent"] = "accuracy"
if disc_enable:
loss["y_disc"] = 'categorical_crossentropy' if metric_loss != 'circleloss' \
else lambda y, x: ls.circle_loss(y, x, gamma=256, margin=margin)
loss_weights["y_disc"] = 1-alpha if ctc_enable else 1.0
metrics["y_disc"] = "accuracy"
if ctc_enable:
loss["y_ctc_loss"] = lambda y_true, y_pred: y_pred
loss_weights["y_ctc_loss"] = alpha if disc_enable else 1.0
loss_weights["y_ctc_loss"] = 1-alpha if not disc_enable else beta
if bn_dim:
loss["y_disc_bn"] = 'categorical_crossentropy' if metrics != 'circleloss' \
else lambda y, x: ls.circle_loss(y, x, gamma=256, margin=margin)
loss_weights["y_disc_bn"] = 0.1
metrics['y_disc_bn'] = 'accuracy'
train_model = compile(model,gpus,lr=lr,loss=loss,loss_weights=loss_weights,metrics=metrics)
print(loss_weights)
return model,train_model
"""
======================
OTHER
======================
"""
def sub_model(model,input_name,output_name):
inputs = model.get_layer(name=input_name).input
outputs = model.get_layer(name=output_name).output
return Model(inputs=inputs, outputs=outputs)
def ctc_pred(model,x,batch_size,input_len,):
pred = model.predict(x,batch_size=batch_size)
input_len = K.constant([input_len]*len(pred),dtype="int32")
decoded = K.ctc_decode(pred, input_len, greedy=True, beam_width=100, top_paths=1)
return K.get_value(decoded[0][0])
if __name__=="__main__":
model,train_model = SAR_Net(input_shape=(1200,80,1),
ctc_enable = True,
ar_enable = True,
disc_enable = True,
res_type="res18",
res_filters=32,
hidden_dim=256,
bn_dim=0,
bpe_classes=1000,
accent_classes=8,
max_ctc_len=72,
mto='vlad',
vlad_clusters=8,
ghost_clusters=2,
metric_loss='cosface',
margin=0.3,
raw_model=None,
lr=0.01,
gpus = 1,
name=None)
sub_model(model,'x_data','y_accent')
model.save_weights('exp/demo.h5')
model.load_weights('exp/demo.h5')
| 34.748804 | 112 | 0.497969 |
953d34fa43582a04419407658a07c6d2cffc68aa | 187 | py | Python | tests/strategies/__init__.py | lycantropos/rsrc_web | 6702840befa4fa70114ce10543144410b453aa30 | [
"MIT"
] | null | null | null | tests/strategies/__init__.py | lycantropos/rsrc_web | 6702840befa4fa70114ce10543144410b453aa30 | [
"MIT"
] | 4 | 2019-06-18T18:36:50.000Z | 2019-07-10T13:14:48.000Z | tests/strategies/__init__.py | lycantropos/rsrc_web | 6702840befa4fa70114ce10543144410b453aa30 | [
"MIT"
] | null | null | null | from .literals import booleans
from .models import (readable_web_streams,
web_streams,
writeable_web_streams)
from .paths import web_url_strings
| 31.166667 | 43 | 0.663102 |
953d79768caec877a768ca7a6b3a2fc0176266ec | 27,309 | py | Python | main.py | ruotianluo/neural-summ-cnndm-pytorch | 027b63107b748bc56356bd119b243cfdda684aa2 | [
"MIT"
] | 3 | 2018-10-22T23:03:40.000Z | 2018-10-23T09:45:32.000Z | main.py | ruotianluo/neural-summ-cnndm-pytorch | 027b63107b748bc56356bd119b243cfdda684aa2 | [
"MIT"
] | null | null | null | main.py | ruotianluo/neural-summ-cnndm-pytorch | 027b63107b748bc56356bd119b243cfdda684aa2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
cudaid = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(cudaid)
import sys
import time
import numpy as np
import cPickle as pickle
import copy
import random
from random import shuffle
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import data as datar
from model import *
from utils_pg import *
from configs import *
cfg = DeepmindConfigs()
TRAINING_DATASET_CLS = DeepmindTraining
TESTING_DATASET_CLS = DeepmindTesting
def print_basic_info(modules, consts, options):
if options["is_debugging"]:
print "\nWARNING: IN DEBUGGING MODE\n"
if options["has_learnable_w2v"]:
print "USE LEARNABLE W2V EMBEDDING"
if options["is_bidirectional"]:
print "USE BI-DIRECTIONAL RNN"
if options["has_lvt_trick"]:
print "USE LVT TRICK"
if options["omit_eos"]:
print "<eos> IS OMITTED IN TESTING DATA"
if options["prediction_bytes_limitation"]:
print "MAXIMUM BYTES IN PREDICTION IS LIMITED"
for k in consts:
print k + ":", consts[k]
def init_modules():
init_seeds()
options = {}
options["is_debugging"] = False
options["is_predicting"] = False
options["cuda"] = cfg.CUDA and torch.cuda.is_available()
options["device"] = torch.device("cuda" if options["cuda"] else "cpu")
options["cell"] = cfg.CELL
options["copy"] = cfg.COPY
options["coverage"] = cfg.COVERAGE
assert TRAINING_DATASET_CLS.IS_UNICODE == TESTING_DATASET_CLS.IS_UNICODE
options["is_unicode"] = TRAINING_DATASET_CLS.IS_UNICODE
options["has_y"] = TRAINING_DATASET_CLS.HAS_Y
options["has_lvt_trick"] = False
options["has_learnable_w2v"] = True
options["is_bidirectional"] = True
options["beam_decoding"] = True # False for greedy decoding
options["omit_eos"] = False # omit <eos> and continuously decode until length of sentence reaches MAX_LEN_PREDICT (for DUC testing data)
options["prediction_bytes_limitation"] = False if TESTING_DATASET_CLS.MAX_BYTE_PREDICT == None else True
assert options["is_unicode"] == False
consts = {}
consts["idx_gpu"] = cudaid
consts["dim_x"] = cfg.DIM_X
consts["dim_y"] = cfg.DIM_Y
consts["len_x"] = cfg.MAX_LEN_X + 1 # plus 1 for eos
consts["len_y"] = cfg.MAX_LEN_Y + 1
consts["num_x"] = cfg.MAX_NUM_X
consts["num_y"] = cfg.NUM_Y
consts["hidden_size"] = cfg.HIDDEN_SIZE
consts["lvt_dict_size"] = 200 if options["is_debugging"] else cfg.LVT_DICT_SIZE
consts["batch_size"] = 5 if options["is_debugging"] else TRAINING_DATASET_CLS.BATCH_SIZE
if options["is_debugging"]:
consts["testing_batch_size"] = 1 if options["beam_decoding"] else 2
else:
#consts["testing_batch_size"] = 1 if options["beam_decoding"] else TESTING_DATASET_CLS.BATCH_SIZE
consts["testing_batch_size"] = TESTING_DATASET_CLS.BATCH_SIZE
consts["min_len_predict"] = TESTING_DATASET_CLS.MIN_LEN_PREDICT
consts["max_len_predict"] = TESTING_DATASET_CLS.MAX_LEN_PREDICT
consts["max_byte_predict"] = TESTING_DATASET_CLS.MAX_BYTE_PREDICT
consts["testing_print_size"] = TESTING_DATASET_CLS.PRINT_SIZE
consts["top_k"] = 1
consts["lr"] = 0.15
consts["beam_size"] = 4
consts["max_epoch"] = 300 if options["is_debugging"] else 30
consts["num_model"] = 1
consts["print_time"] = 5
consts["save_epoch"] = 1
assert consts["dim_x"] == consts["dim_y"]
assert consts["top_k"] <= cfg.MIN_NUM_X
assert consts["beam_size"] >= 1
if options["has_lvt_trick"]:
assert consts["lvt_dict_size"] != None
assert consts["testing_batch_size"] <= consts["batch_size"]
assert consts["lvt_dict_size"] <= cfg.NUM_FREQUENT_WORDS
modules = {}
[_, dic, hfw, w2i, i2w, w2w] = pickle.load(open(cfg.cc.TRAINING_DATA_PATH + "dic.pkl", "r"))
consts["dict_size"] = len(dic)
modules["dic"] = dic
modules["w2i"] = w2i
modules["i2w"] = i2w
if options["has_lvt_trick"]:
modules["freq_words"] = hfw
modules["lfw_emb"] = modules["w2i"][cfg.W_UNK]
modules["eos_emb"] = modules["w2i"][cfg.W_EOS]
consts["pad_token_idx"] = modules["w2i"][cfg.W_PAD]
return modules, consts, options
def greedy_decode(flist, batch, model, modules, consts, options):
testing_batch_size = len(flist)
dec_result = [[] for i in xrange(testing_batch_size)]
existence = [True] * testing_batch_size
num_left = testing_batch_size
word_emb, dec_state, x_mask, y, len_y = batch
next_y = torch.LongTensor(np.ones((1, testing_batch_size), dtype="int64")).cuda()
for step in xrange(consts["max_len_predict"]):
if num_left == 0:
break
y_pred, dec_state = model.decode_once(next_y, word_emb, dec_state, x_mask)
dict_size = y_pred.shape[-1]
y_pred = y_pred.view(testing_batch_size, dict_size)
dec_state = dec_state.view(testing_batch_size, dec_state.shape[-1])
next_y = torch.argmax(y_pred, 1).view((1, testing_batch_size))
for idx_doc in xrange(testing_batch_size):
if existence[idx_doc] == False:
continue
idx_max = next_y[0, idx_doc].item()
if options["has_lvt_trick"]:
idx_max = lvt_i2i[idx_max]
next_y[0, idx_doc] = idx_max
if idx_max == modules["eos_emb"]:
existence[idx_doc] = False
num_left -= 1
else:
dec_result[idx_doc].append(str(idx_max))
if options["prediction_bytes_limitation"]:
for i in xrange(len(dec_result)):
sample = dec_result[i]
b = 0
for j in xrange(len(sample)):
b += len(sample[j])
if b > consts["max_byte_predict"]:
dec_result[i] = dec_result[i][0 : j]
break
for idx_doc in xrange(testing_batch_size):
fname = str(flist[idx_doc])
if len(dec_result[idx_doc]) >= consts["min_len_predict"]:
write_summ("".join((cfg.cc.SUMM_PATH, fname)), dec_result[idx_doc], 1, options)
write_summ("".join((cfg.cc.BEAM_SUMM_PATH, fname)), dec_result[idx_doc], 1, options, modules["i2w"])
if options["has_y"]:
ly = len_y[idx_doc]
y_true = y[0 : ly, idx_doc].tolist()
y_true = [str(i) for i in y_true[:-1]] # delete <eos>
write_summ("".join((cfg.cc.GROUND_TRUTH_PATH, fname)), y_true, 1, options)
write_summ("".join((cfg.cc.BEAM_GT_PATH, fname)), y_true, 1, options, modules["i2w"])
def beam_decode(fname, batch, model, modules, consts, options):
fname = str(fname)
beam_size = consts["beam_size"]
num_live = 1
num_dead = 0
samples = []
sample_scores = np.zeros(beam_size)
last_traces = [[]]
last_scores = torch.FloatTensor(np.zeros(1)).cuda()
last_states = []
x, word_emb, dec_state, x_mask, y, len_y, ref_sents, max_ext_len, oovs = batch
next_y = torch.LongTensor(-np.ones((1, num_live, 1), dtype="int64")).cuda()
x = x.unsqueeze(1)
word_emb = word_emb.unsqueeze(1)
x_mask = x_mask.unsqueeze(1)
dec_state = dec_state.unsqueeze(0)
if options["cell"] == "lstm":
dec_state = (dec_state, dec_state)
for step in xrange(consts["max_len_predict"]):
tile_word_emb = word_emb.repeat(1, num_live, 1)
tile_x_mask = x_mask.repeat(1, num_live, 1)
tile_x = x.repeat(1, num_live)
y_pred, dec_state = model.decode_once(tile_x, next_y, tile_word_emb, dec_state, tile_x_mask, max_ext_len)
dict_size = y_pred.shape[-1]
y_pred = y_pred.view(num_live, dict_size)
if options["cell"] == "lstm":
dec_state = (dec_state[0].view(num_live, dec_state[0].shape[-1]), dec_state[1].view(num_live, dec_state[1].shape[-1]))
else:
dec_state = dec_state.view(num_live, dec_state.shape[-1])
cand_scores = last_scores + torch.log(y_pred) # 分数最大越好
cand_scores = cand_scores.flatten()
idx_top_joint_scores = torch.topk(cand_scores, beam_size - num_dead)[1]
idx_last_traces = idx_top_joint_scores / dict_size
idx_word_now = idx_top_joint_scores % dict_size
top_joint_scores = cand_scores[idx_top_joint_scores]
traces_now = []
scores_now = np.zeros((beam_size - num_dead))
states_now = []
for i, [j, k] in enumerate(zip(idx_last_traces, idx_word_now)):
if options["has_lvt_trick"]:
traces_now.append(last_traces[j] + [batch.lvt_i2i[k]])
else:
traces_now.append(last_traces[j] + [k])
scores_now[i] = copy.copy(top_joint_scores[i])
if options["cell"] == "lstm":
states_now.append((copy.copy(dec_state[0][j, :]), copy.copy(dec_state[1][j, :])))
else:
states_now.append(copy.copy(dec_state[j, :]))
num_live = 0
last_traces = []
last_scores = []
last_states = []
for i in xrange(len(traces_now)):
if traces_now[i][-1] == modules["eos_emb"] and len(traces_now[i]) >= consts["min_len_predict"]:
samples.append([str(e.item()) for e in traces_now[i][:-1]])
sample_scores[num_dead] = scores_now[i]
num_dead += 1
else:
last_traces.append(traces_now[i])
last_scores.append(scores_now[i])
last_states.append(states_now[i])
num_live += 1
if num_live == 0 or num_dead >= beam_size:
break
last_scores = torch.FloatTensor(np.array(last_scores).reshape((num_live, 1))).cuda()
next_y = np.array([e[-1] for e in last_traces], dtype = "int64").reshape((1, num_live))
next_y = torch.LongTensor(next_y).cuda()
if options["cell"] == "lstm":
h_states = []
c_states = []
for state in last_states:
h_states.append(state[0])
c_states.append(state[1])
dec_state = (torch.stack(h_states).view((num_live, h_states[0].shape[-1])),\
torch.stack(c_states).view((num_live, c_states[0].shape[-1])))
else:
dec_state = torch.stack(last_states).view((num_live, dec_state.shape[-1]))
assert num_live + num_dead == beam_size
if num_live > 0:
for i in xrange(num_live):
samples.append([str(e.item()) for e in last_traces[i]])
sample_scores[num_dead] = last_scores[i]
num_dead += 1
#weight by length
for i in xrange(len(sample_scores)):
sent_len = float(len(samples[i]))
sample_scores[i] = sample_scores[i] #* math.exp(-sent_len / 10)
idx_sorted_scores = np.argsort(sample_scores) # 低分到高分
if options["has_y"]:
ly = len_y[0]
y_true = y[0 : ly].tolist()
y_true = [str(i) for i in y_true[:-1]] # delete <eos>
sorted_samples = []
sorted_scores = []
filter_idx = []
for e in idx_sorted_scores:
if len(samples[e]) >= consts["min_len_predict"]:
filter_idx.append(e)
if len(filter_idx) == 0:
filter_idx = idx_sorted_scores
for e in filter_idx:
sorted_samples.append(samples[e])
sorted_scores.append(sample_scores[e])
num_samples = len(sorted_samples)
if len(sorted_samples) == 1:
sorted_samples = sorted_samples[0]
num_samples = 1
if options["prediction_bytes_limitation"]:
for i in xrange(len(sorted_samples)):
sample = sorted_samples[i]
b = 0
for j in xrange(len(sample)):
b += len(sample[j])
if b > consts["max_byte_predict"]:
sorted_samples[i] = sorted_samples[i][0 : j]
break
dec_words = [modules["i2w"][int(e)] for e in sorted_samples[-1]]
# for rouge
write_for_rouge(fname, ref_sents, dec_words, cfg)
# beam search history
write_summ("".join((cfg.cc.BEAM_SUMM_PATH, fname)), sorted_samples, num_samples, options, modules["i2w"], sorted_scores)
write_summ("".join((cfg.cc.BEAM_GT_PATH, fname)), y_true, 1, options, modules["i2w"])
#print "================="
def beam_decode_copy(fname, batch, model, modules, consts, options):
fname = str(fname)
beam_size = consts["beam_size"]
num_live = 1
num_dead = 0
samples = []
sample_scores = np.zeros(beam_size)
last_traces = [[]]
last_scores = torch.FloatTensor(np.zeros(1)).cuda()
last_states = []
x, word_emb, dec_state, x_mask, y, len_y, ref_sents, max_ext_len, oovs = batch
next_y = torch.LongTensor(-np.ones((1, num_live, 1), dtype="int64")).cuda()
x = x.unsqueeze(1)
word_emb = word_emb.unsqueeze(1)
x_mask = x_mask.unsqueeze(1)
dec_state = dec_state.unsqueeze(0)
if options["cell"] == "lstm":
dec_state = (dec_state, dec_state)
for step in xrange(consts["max_len_predict"]):
tile_word_emb = word_emb.repeat(1, num_live, 1)
tile_x_mask = x_mask.repeat(1, num_live, 1)
tile_x = x.repeat(1, num_live)
y_pred, dec_state = model.decode_once(tile_x, next_y, tile_word_emb, dec_state, tile_x_mask, max_ext_len)
dict_size = y_pred.shape[-1]
y_pred = y_pred.view(num_live, dict_size)
if options["cell"] == "lstm":
dec_state = (dec_state[0].view(num_live, dec_state[0].shape[-1]), dec_state[1].view(num_live, dec_state[1].shape[-1]))
else:
dec_state = dec_state.view(num_live, dec_state.shape[-1])
cand_scores = last_scores + torch.log(y_pred) # 分数最大越好
cand_scores = cand_scores.flatten()
idx_top_joint_scores = torch.topk(cand_scores, beam_size - num_dead)[1]
idx_last_traces = idx_top_joint_scores / dict_size
idx_word_now = idx_top_joint_scores % dict_size
top_joint_scores = cand_scores[idx_top_joint_scores]
traces_now = []
scores_now = np.zeros((beam_size - num_dead))
states_now = []
for i, [j, k] in enumerate(zip(idx_last_traces, idx_word_now)):
traces_now.append(last_traces[j] + [k])
scores_now[i] = copy.copy(top_joint_scores[i])
if options["cell"] == "lstm":
states_now.append((copy.copy(dec_state[0][j, :]), copy.copy(dec_state[1][j, :])))
else:
states_now.append(copy.copy(dec_state[j, :]))
num_live = 0
last_traces = []
last_scores = []
last_states = []
for i in xrange(len(traces_now)):
if traces_now[i][-1] == modules["eos_emb"] and len(traces_now[i]) >= consts["min_len_predict"]:
samples.append([str(e.item()) for e in traces_now[i][:-1]])
sample_scores[num_dead] = scores_now[i]
num_dead += 1
else:
last_traces.append(traces_now[i])
last_scores.append(scores_now[i])
last_states.append(states_now[i])
num_live += 1
if num_live == 0 or num_dead >= beam_size:
break
last_scores = torch.FloatTensor(np.array(last_scores).reshape((num_live, 1))).cuda()
next_y = []
for e in last_traces:
eid = e[-1].item()
if eid in modules["i2w"]:
next_y.append(eid)
else:
next_y.append(modules["lfw_emb"]) # unk
next_y = np.array(next_y).reshape((1, num_live))
next_y = torch.LongTensor(next_y).cuda()
if options["cell"] == "lstm":
h_states = []
c_states = []
for state in last_states:
h_states.append(state[0])
c_states.append(state[1])
dec_state = (torch.stack(h_states).view((num_live, h_states[0].shape[-1])),\
torch.stack(c_states).view((num_live, c_states[0].shape[-1])))
else:
dec_state = torch.stack(last_states).view((num_live, dec_state.shape[-1]))
assert num_live + num_dead == beam_size
if num_live > 0:
for i in xrange(num_live):
samples.append([str(e.item()) for e in last_traces[i]])
sample_scores[num_dead] = last_scores[i]
num_dead += 1
#weight by length
for i in xrange(len(sample_scores)):
sent_len = float(len(samples[i]))
sample_scores[i] = sample_scores[i] #* math.exp(-sent_len / 10)
idx_sorted_scores = np.argsort(sample_scores) # 低分到高分
if options["has_y"]:
ly = len_y[0]
y_true = y[0 : ly].tolist()
y_true = [str(i) for i in y_true[:-1]] # delete <eos>
sorted_samples = []
sorted_scores = []
filter_idx = []
for e in idx_sorted_scores:
if len(samples[e]) >= consts["min_len_predict"]:
filter_idx.append(e)
if len(filter_idx) == 0:
filter_idx = idx_sorted_scores
for e in filter_idx:
sorted_samples.append(samples[e])
sorted_scores.append(sample_scores[e])
num_samples = len(sorted_samples)
if len(sorted_samples) == 1:
sorted_samples = sorted_samples[0]
num_samples = 1
if options["prediction_bytes_limitation"]:
for i in xrange(len(sorted_samples)):
sample = sorted_samples[i]
b = 0
for j in xrange(len(sample)):
b += len(sample[j])
if b > consts["max_byte_predict"]:
sorted_samples[i] = sorted_samples[i][0 : j]
break
dec_words = []
for e in sorted_samples[-1]:
e = int(e)
if e in modules["i2w"]:
dec_words.append(modules["i2w"][e])
else:
dec_words.append(oovs[e - len(modules["i2w"])])
# for rouge
write_for_rouge(fname, ref_sents, dec_words, cfg)
# beam search history
write_summ_copy("".join((cfg.cc.BEAM_SUMM_PATH, fname)), sorted_samples, num_samples, options, modules["i2w"], oovs, sorted_scores)
write_summ_copy("".join((cfg.cc.BEAM_GT_PATH, fname)), y_true, 1, options, modules["i2w"], oovs)
#print "================="
def predict(model, modules, consts, options):
print "start predicting,"
options["has_y"] = TESTING_DATASET_CLS.HAS_Y
if options["beam_decoding"]:
print "using beam search"
else:
print "using greedy search"
rebuild_dir(cfg.cc.BEAM_SUMM_PATH)
rebuild_dir(cfg.cc.BEAM_GT_PATH)
rebuild_dir(cfg.cc.GROUND_TRUTH_PATH)
rebuild_dir(cfg.cc.SUMM_PATH)
print "loading test set..."
xy_list = pickle.load(open(cfg.cc.TESTING_DATA_PATH + "ibm.pkl", "r"))
batch_list, num_files, num_batches = datar.batched(len(xy_list), options, consts)
print "num_files = ", num_files, ", num_batches = ", num_batches
running_start = time.time()
partial_num = 0
total_num = 0
si = 0
for idx_batch in xrange(num_batches):
test_idx = batch_list[idx_batch]
batch_raw = [xy_list[xy_idx] for xy_idx in test_idx]
batch = datar.get_data(batch_raw, modules, consts, options)
x, len_x, x_mask, y, len_y, y_mask, oy, x_ext, y_ext, oovs = sort_samples(batch.x, batch.len_x, \
batch.x_mask, batch.y, batch.len_y, batch.y_mask, \
batch.original_summarys, batch.x_ext, batch.y_ext, batch.x_ext_words)
word_emb, dec_state = model.encode(torch.LongTensor(x).cuda(), torch.LongTensor(len_x).cuda(), torch.FloatTensor(x_mask).cuda())
if options["beam_decoding"]:
for idx_s in xrange(word_emb.size(1)):
inputx = (torch.LongTensor(x_ext[:, idx_s]).cuda(), word_emb[:, idx_s, :], dec_state[idx_s, :],\
torch.FloatTensor(x_mask[:, idx_s, :]).cuda(), y[:, idx_s], [len_y[idx_s]], oy[idx_s],\
batch.max_ext_len, oovs[idx_s])
beam_decode_copy(si, inputx, model, modules, consts, options)
si += 1
else:
inputx = (word_emb, dec_state, torch.FloatTensor(x_mask).cuda(), y, len_y)
greedy_decode(test_idx, inputx, model, modules, consts, options)
testing_batch_size = len(test_idx)
partial_num += testing_batch_size
total_num += testing_batch_size
if partial_num >= consts["testing_print_size"]:
print total_num, "summs are generated"
partial_num = 0
print si, total_num
def run(existing_model_name = None):
modules, consts, options = init_modules()
#use_gpu(consts["idx_gpu"])
if options["is_predicting"]:
need_load_model = True
training_model = False
predict_model = True
else:
need_load_model = False
training_model = True
predict_model = False
print_basic_info(modules, consts, options)
if training_model:
print "loading train set..."
if options["is_debugging"]:
xy_list = pickle.load(open(cfg.cc.TESTING_DATA_PATH + "test.pkl", "r"))
else:
xy_list = pickle.load(open(cfg.cc.TRAINING_DATA_PATH + "train.pkl", "r"))
batch_list, num_files, num_batches = datar.batched(len(xy_list), options, consts)
print "num_files = ", num_files, ", num_batches = ", num_batches
running_start = time.time()
if True: #TODO: refactor
print "compiling model ..."
model = Model(modules, consts, options)
#criterion = nn.NLLLoss(ignore_index=consts["pad_token_idx"])
if options["cuda"]:
model.cuda()
#criterion.cuda()
#model = nn.DataParallel(model)
optimizer = torch.optim.Adagrad(model.parameters(), lr=consts["lr"], initial_accumulator_value=0.1)
model_name = "cnndm.s2s"
existing_epoch = 0
if need_load_model:
if existing_model_name == None:
existing_model_name = "cnndm.s2s.gpu5.epoch5.5"
print "loading existed model:", existing_model_name
model, optimizer = load_model(cfg.cc.MODEL_PATH + existing_model_name, model, optimizer)
if training_model:
print "start training model "
print_size = num_files / consts["print_time"] if num_files >= consts["print_time"] else num_files
last_total_error = float("inf")
print "max epoch:", consts["max_epoch"]
for epoch in xrange(0, consts["max_epoch"]):
'''
if not options["is_debugging"] and epoch == 5:
consts["lr"] *= 0.1
#adjust
for param_group in optimizer.param_groups:
param_group['lr'] = consts["lr"]
'''
print "epoch: ", epoch + existing_epoch
num_partial = 1
total_error = 0.0
partial_num_files = 0
epoch_start = time.time()
partial_start = time.time()
# shuffle the trainset
batch_list, num_files, num_batches = datar.batched(len(xy_list), options, consts)
used_batch = 0.
for idx_batch in xrange(num_batches):
train_idx = batch_list[idx_batch]
batch_raw = [xy_list[xy_idx] for xy_idx in train_idx]
if len(batch_raw) != consts["batch_size"]:
continue
local_batch_size = len(batch_raw)
batch = datar.get_data(batch_raw, modules, consts, options)
x, len_x, x_mask, y, len_y, y_mask, oy, x_ext, y_ext, oovs = sort_samples(batch.x, batch.len_x, \
batch.x_mask, batch.y, batch.len_y, batch.y_mask, \
batch.original_summarys, batch.x_ext, batch.y_ext, batch.x_ext_words)
model.zero_grad()
y_pred, cost = model(torch.LongTensor(x).cuda(), torch.LongTensor(len_x).cuda(),\
torch.LongTensor(y).cuda(), torch.FloatTensor(x_mask).cuda(), \
torch.FloatTensor(y_mask).cuda(), torch.LongTensor(x_ext).cuda(), torch.LongTensor(y_ext).cuda(), \
batch.max_ext_len, None)
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
cost = cost.item()
total_error += cost
used_batch += 1
partial_num_files += consts["batch_size"]
if partial_num_files / print_size == 1 and idx_batch < num_batches:
print idx_batch + 1, "/" , num_batches, "batches have been processed,",
print "average cost until now:", "cost =", total_error / used_batch, ",",
print "time:", time.time() - partial_start
partial_num_files = 0
if not options["is_debugging"]:
print "save model... ",
save_model(cfg.cc.MODEL_PATH + model_name + ".gpu" + str(consts["idx_gpu"]) + ".epoch" + str(epoch / consts["save_epoch"] + existing_epoch) + "." + str(num_partial), model, optimizer)
print "finished"
num_partial += 1
print "in this epoch, total average cost =", total_error / used_batch, ",",
print "time:", time.time() - epoch_start
print_sent_dec(y_pred, y_ext, y_mask, oovs, modules, consts, options, local_batch_size)
if last_total_error > total_error or options["is_debugging"]:
last_total_error = total_error
if not options["is_debugging"]:
print "save model... ",
save_model(cfg.cc.MODEL_PATH + model_name + ".gpu" + str(consts["idx_gpu"]) + ".epoch" + str(epoch / consts["save_epoch"] + existing_epoch) + "." + str(num_partial), model, optimizer)
print "finished"
else:
print "optimization finished"
break
print "save final model... ",
save_model(cfg.cc.MODEL_PATH + model_name + "final.gpu" + str(consts["idx_gpu"]) + ".epoch" + str(epoch / consts["save_epoch"] + existing_epoch) + "." + str(num_partial), model, optimizer)
print "finished"
else:
print "skip training model"
if predict_model:
predict(model, modules, consts, options)
print "Finished, time:", time.time() - running_start
if __name__ == "__main__":
np.set_printoptions(threshold = np.inf)
existing_model_name = sys.argv[1] if len(sys.argv) > 1 else None
run(existing_model_name)
| 40.397929 | 211 | 0.587462 |
953e614df603a782bc861b2188ed97f796d8d6d2 | 471 | py | Python | openbook_posts/migrations/0016_auto_20190214_1525.py | TamaraAbells/okuna-api | f87d8e80d2f182c01dbce68155ded0078ee707e4 | [
"MIT"
] | 164 | 2019-07-29T17:59:06.000Z | 2022-03-19T21:36:01.000Z | openbook_posts/migrations/0016_auto_20190214_1525.py | TamaraAbells/okuna-api | f87d8e80d2f182c01dbce68155ded0078ee707e4 | [
"MIT"
] | 188 | 2019-03-16T09:53:25.000Z | 2019-07-25T14:57:24.000Z | openbook_posts/migrations/0016_auto_20190214_1525.py | TamaraAbells/okuna-api | f87d8e80d2f182c01dbce68155ded0078ee707e4 | [
"MIT"
] | 80 | 2019-08-03T17:49:08.000Z | 2022-02-28T16:56:33.000Z | # Generated by Django 2.1.5 on 2019-02-14 14:25
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('openbook_posts', '0015_post_community'),
]
operations = [
migrations.AlterField(
model_name='postimage',
name='image',
field=imagekit.models.fields.ProcessedImageField(null=True, upload_to='', verbose_name='image'),
),
]
| 23.55 | 108 | 0.641189 |
953e8e8b09e196ff5d7362c6a2eeb02c08425111 | 15,517 | py | Python | blackjack.py | hackerboy9/blackjack | 1346642e353719ab68c0dc3573aa33b688431bf8 | [
"MIT"
] | null | null | null | blackjack.py | hackerboy9/blackjack | 1346642e353719ab68c0dc3573aa33b688431bf8 | [
"MIT"
] | 1 | 2020-10-25T10:16:37.000Z | 2020-10-25T10:16:37.000Z | blackjack.py | hackerboy9/blackjack | 1346642e353719ab68c0dc3573aa33b688431bf8 | [
"MIT"
] | 2 | 2017-07-16T08:00:29.000Z | 2020-10-06T14:48:18.000Z | from collections import MutableMapping, MutableSet, namedtuple
from operator import itemgetter
class Node(namedtuple("Node", "value, left, right, red")):
__slots__ = ()
def size(self):
"""
Recursively find size of a tree. Slow.
"""
if self is NULL:
return 0
return 1 + self.left.size() + self.right.size()
def find(self, value, key):
"""
Find a value in a node, using a key function.
"""
while self is not NULL:
direction = cmp(key(value), key(self.value))
if direction < 0:
self = self.left
elif direction > 0:
self = self.right
elif direction == 0:
return self.value
def find_prekeyed(self, value, key):
"""
Find a value in a node, using a key function. The value is already a
key.
"""
while self is not NULL:
direction = cmp(value, key(self.value))
if direction < 0:
self = self.left
elif direction > 0:
self = self.right
elif direction == 0:
return self.value
def rotate_left(self):
"""
Rotate the node to the left.
"""
right = self.right
new = self._replace(right=self.right.left, red=True)
top = right._replace(left=new, red=self.red)
return top
def rotate_right(self):
"""
Rotate the node to the right.
"""
left = self.left
new = self._replace(left=self.left.right, red=True)
top = left._replace(right=new, red=self.red)
return top
def flip(self):
"""
Flip colors of a node and its children.
"""
left = self.left._replace(red=not self.left.red)
right = self.right._replace(red=not self.right.red)
top = self._replace(left=left, right=right, red=not self.red)
return top
def balance(self):
"""
Balance a node.
The balance is inductive and relies on all subtrees being balanced
recursively or by construction. If the subtrees are not balanced, then
this will not fix them.
"""
# Always lean left with red nodes.
if self.right.red:
self = self.rotate_left()
# Never permit red nodes to have red children. Note that if the left-hand
# node is NULL, it will short-circuit and fail this test, so we don't have
# to worry about a dereference here.
if self.left.red and self.left.left.red:
self = self.rotate_right()
# Finally, move red children on both sides up to the next level, reducing
# the total redness.
if self.left.red and self.right.red:
self = self.flip()
return self
def insert(self, value, key):
"""
Insert a value into a tree rooted at the given node, and return
whether this was an insertion or update.
Balances the tree during insertion.
An update is performed instead of an insertion if a value in the tree
compares equal to the new value.
"""
# Base case: Insertion into the empty tree is just creating a new node
# with no children.
if self is NULL:
return Node(value, NULL, NULL, True), True
# Recursive case: Insertion into a non-empty tree is insertion into
# whichever of the two sides is correctly compared.
direction = cmp(key(value), key(self.value))
if direction < 0:
left, insertion = self.left.insert(value, key)
self = self._replace(left=left)
elif direction > 0:
right, insertion = self.right.insert(value, key)
self = self._replace(right=right)
elif direction == 0:
# Exact hit on an existing node (this node, in fact). In this
# case, perform an update.
self = self._replace(value=value)
insertion = False
# And balance on the way back up.
return self.balance(), insertion
def move_red_left(self):
"""
Shuffle red to the left of a tree.
"""
self = self.flip()
if self.right is not NULL and self.right.left.red:
self = self._replace(right=self.right.rotate_right())
self = self.rotate_left().flip()
return self
def move_red_right(self):
"""
Shuffle red to the right of a tree.
"""
self = self.flip()
if self.left is not NULL and self.left.left.red:
self = self.rotate_right().flip()
return self
def delete_min(self):
"""
Delete the left-most value from a tree.
"""
# Base case: If there are no nodes lesser than this node, then this is the
# node to delete.
if self.left is NULL:
return NULL, self.value
# Acquire more reds if necessary to continue the traversal. The
# double-deep check is fine because NULL is red.
if not self.left.red and not self.left.left.red:
self = self.move_red_left()
# Recursive case: Delete the minimum node of all nodes lesser than this
# node.
left, value = self.left.delete_min()
self = self._replace(left=left)
return self.balance(), value
def delete_max(self):
"""
Delete the right-most value from a tree.
"""
# Attempt to rotate left-leaning reds to the right.
if self.left.red:
self = self.rotate_right()
# Base case: If there are no selfs greater than this self, then this is
# the self to delete.
if self.right is NULL:
return NULL, self.value
# Acquire more reds if necessary to continue the traversal. NULL is
# red so this check doesn't need to check for NULL.
if not self.right.red and not self.right.left.red:
self = self.move_red_right()
# Recursive case: Delete the maximum self of all selfs greater than this
# self.
right, value = self.right.delete_max()
self = self._replace(right=right)
return self.balance(), value
def delete(self, value, key):
"""
Delete a value from a tree.
"""
# Base case: The empty tree cannot possibly have the desired value.
if self is NULL:
raise KeyError(value)
direction = cmp(key(value), key(self.value))
# Because we lean to the left, the left case stands alone.
if direction < 0:
if (not self.left.red and
self.left is not NULL and
not self.left.left.red):
self = self.move_red_left()
# Delete towards the left.
left = self.left.delete(value, key)
self = self._replace(left=left)
else:
# If we currently lean to the left, lean to the right for now.
if self.left.red:
self = self.rotate_right()
# Best case: The node on our right (which we just rotated there) is a
# red link and also we were just holding the node to delete. In that
# case, we just rotated NULL into our current node, and the node to
# the right is the lone matching node to delete.
if direction == 0 and self.right is NULL:
return NULL
# No? Okay. Move more reds to the right so that we can continue to
# traverse in that direction. At *this* spot, we do have to confirm
# that node.right is not NULL...
if (not self.right.red and
self.right is not NULL and
not self.right.left.red):
self = self.move_red_right()
if direction > 0:
# Delete towards the right.
right = self.right.delete(value, key)
self = self._replace(right=right)
else:
# Annoying case: The current node was the node to delete all
# along! Use a right-handed minimum deletion. First find the
# replacement value to rebuild the current node with, then delete
# the replacement value from the right-side tree. Finally, create
# the new node with the old value replaced and the replaced value
# deleted.
rnode = self.right
while rnode is not NULL:
rnode = rnode.left
right, replacement = self.right.delete_min()
self = self._replace(value=replacement, right=right)
return self.balance()
NULL = Node(None, None, None, False)
class BJ(MutableSet):
"""
A red-black tree.
Blackjacks are based on traditional self-balancing tree theory, and have
logarithmic time and space bounds on all mutations in the worst case, and
linear bounds on iteration.
Blackjacks are mutable sets. See ``collections.MutableSet`` for a precise
definition of what this class is capable of.
Iteration on blackjacks is always ordered according to the key function
used to create the blackjack.
In addition to the standard methods, blackjacks can also pop their minimum
and maximum values easily, and the ``find()`` method can retrieve the
stored value for a key value.
"""
root = NULL
_len = 0
def __init__(self, iterable=None, key=None):
if key is None:
self._key = lambda v: v
else:
self._key = key
if iterable is not None:
for item in iterable:
self.add(item)
def __repr__(self):
return "BJ([%s])" % ", ".join(repr(i) for i in self)
def __contains__(self, value):
return self.root.find(value, self._key) is not None
def __len__(self):
return self._len
def __iter__(self):
node = self.root
stack = []
while stack or node is not NULL:
if node is not NULL:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node.value
node = node.right
def add(self, value):
self.root, insertion = self.root.insert(value, self._key)
self._len += insertion
def discard(self, value):
self.root = self.root.delete(value, self._key)
self._len -= 1
def find(self, value):
"""
Find the actual stored value for a given key value.
"""
return self.root.find(value, self._key)
def pop_max(self):
"""
Remove the maximum value and return it.
"""
if self.root is NULL:
raise KeyError("pop from an empty blackjack")
self.root, value = self.root.delete_max()
self._len -= 1
return value
def pop_min(self):
"""
Remove the minimum value and return it.
"""
if self.root is NULL:
raise KeyError("pop from an empty blackjack")
self.root, value = self.root.delete_min()
self._len -= 1
return value
class Deck(MutableMapping):
"""
A mutable mapping based on a blackjack.
Like blackjacks, decks are powered by red-black trees and have the same
bounds on operations.
"""
def __init__(self, mapping=None):
self._bj = BJ(mapping, key=itemgetter(0))
def __repr__(self):
return "Deck({%s})" % ", ".join("%r: %r" % i for i in self.iteritems())
def __len__(self):
return len(self._bj)
def __iter__(self):
return self.iterkeys()
def __getitem__(self, key):
# Messy.
value = self._bj.root.find_prekeyed(key, self._bj._key)
if value is None:
raise KeyError(key)
return value[1]
def __setitem__(self, key, value):
self._bj.add((key, value))
def __delitem__(self, key):
# Blah. Just do it.
value = self[key]
self._bj.discard((key, value))
def iteritems(self):
return iter(self._bj)
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
from unittest import TestCase
class TestTrees(TestCase):
def test_balance_right(self):
node = Node(1, NULL, Node(2, NULL, NULL, True), False)
balanced = Node(2, Node(1, NULL, NULL, True), NULL, False)
self.assertEqual(node.balance(), balanced)
def test_balance_four(self):
node = Node(2, Node(1, NULL, NULL, True), Node(3, NULL, NULL, True),
False)
balanced = Node(2, Node(1, NULL, NULL, False),
Node(3, NULL, NULL, False), True)
self.assertEqual(node.balance(), balanced)
def test_balance_left_four(self):
node = Node(3, Node(2, Node(1, NULL, NULL, True), NULL, True), NULL,
False)
balanced = Node(2, Node(1, NULL, NULL, False),
Node(3, NULL, NULL, False), True)
self.assertEqual(node.balance(), balanced)
class TestBlackjack(TestCase):
def test_len_single(self):
bj = BJ([1])
self.assertEqual(1, len(bj))
def test_len_many(self):
bj = BJ(range(10))
self.assertEqual(10, len(bj))
def test_len_many_duplicate(self):
bj = BJ(range(10))
bj.add(0)
bj.add(5)
bj.add(9)
self.assertEqual(10, len(bj))
def test_len_after_discard(self):
bj = BJ(range(10))
bj.discard(0)
self.assertEqual(9, len(bj))
def test_contains_single(self):
bj = BJ([1])
self.assertTrue(1 in bj)
def test_contains_several(self):
bj = BJ([1, 2, 3])
self.assertTrue(1 in bj)
self.assertTrue(2 in bj)
self.assertTrue(3 in bj)
def test_iter_single(self):
l = [1]
bj = BJ(l)
self.assertEqual(list(iter(bj)), l)
def test_iter_several(self):
l = range(10)
bj = BJ(l)
self.assertEqual(list(iter(bj)), l)
def test_discard(self):
bj = BJ([1])
bj.discard(1)
self.assertTrue(1 not in bj)
def test_discard_missing_empty(self):
bj = BJ()
self.assertRaises(KeyError, bj.discard, 2)
def test_discard_missing(self):
bj = BJ([1])
self.assertRaises(KeyError, bj.discard, 2)
def test_hashproof(self):
"""
Generate around 32MiB of numeric data and insert it into a single
tree.
This is a time-sensitive test that should complete in a few seconds
instead of taking hours.
See http://bugs.python.org/issue13703#msg150620 for context.
"""
g = ((x*(2**64 - 1), hash(x*(2**64 - 1))) for x in xrange(1, 10000))
bj = BJ(g)
class TestDeck(TestCase):
def test_get_set_single(self):
d = Deck()
d["test"] = "value"
self.assertEqual(d["test"], "value")
def test_get_set_several(self):
d = Deck()
d["first"] = "second"
d["third"] = "fourth"
d["fifth"] = "sixth"
self.assertEqual(d["first"], "second")
self.assertEqual(d["third"], "fourth")
self.assertEqual(d["fifth"], "sixth")
| 29.783109 | 82 | 0.566475 |
9540c7d295e0a61b349ecd6b4bc768783ff0138e | 45,052 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/linkOAM_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/linkOAM_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/linkOAM_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LinkOAM(Base):
__slots__ = ()
_SDM_NAME = 'linkOAM'
_SDM_ATT_MAP = {
'PacketSubtype': 'linkOAM.header.packet.subtype-1',
'PacketFlags': 'linkOAM.header.packet.flags-2',
'InformationOAMPDUCode': 'linkOAM.header.packet.pduType.informationOAMPDU.code-3',
'LocalInfoTLVType': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.type-4',
'LocalInfoTLVLength': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.length-5',
'LocalInfoTLVVersion': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.version-6',
'LocalInfoTLVRevision': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.revision-7',
'LocalInfoTLVState': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.state-8',
'LocalInfoTLVOamConfig': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.oamConfig-9',
'LocalInfoTLVOamPDUConfig': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.oamPDUConfig-10',
'LocalInfoTLVOui': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.oui-11',
'LocalInfoTLVVendorInfo': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.localInfoTLV.vendorInfo-12',
'RemoteInfoTLVType': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.type-13',
'RemoteInfoTLVLength': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.length-14',
'RemoteInfoTLVVersion': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.version-15',
'RemoteInfoTLVRevision': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.revision-16',
'RemoteInfoTLVState': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.state-17',
'RemoteInfoTLVOamConfig': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.oamConfig-18',
'RemoteInfoTLVOamPDUConfig': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.oamPDUConfig-19',
'RemoteInfoTLVOui': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.oui-20',
'RemoteInfoTLVVendorInfo': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.remoteInfoTLV.vendorInfo-21',
'OrganizationSpecificInfoTLVType': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.organizationSpecificInfoTLV.type-22',
'OrganizationSpecificInfoTLVLength': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.organizationSpecificInfoTLV.length-23',
'OrganizationSpecificInfoTLVOui': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.organizationSpecificInfoTLV.oui-24',
'OrganizationSpecificInfoTLVValueLength': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.organizationSpecificInfoTLV.valueLength-25',
'OrganizationSpecificInfoTLVValue': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.organizationSpecificInfoTLV.value-26',
'ReservedTLVType': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.reservedTLV.type-27',
'ReservedTLVLength': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.reservedTLV.length-28',
'ReservedTLVValueLength': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.reservedTLV.valueLength-29',
'ReservedTLVValue': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.reservedTLV.value-30',
'EndTLVType': 'linkOAM.header.packet.pduType.informationOAMPDU.informationTLV.tlvType.endTLV.type-31',
'LoopbackControlOAMPDUCode': 'linkOAM.header.packet.pduType.loopbackControlOAMPDU.code-32',
'LoopbackControlOAMPDULoopbackCommand': 'linkOAM.header.packet.pduType.loopbackControlOAMPDU.loopbackCommand-33',
'OrganizationSpecificOAMPDUCode': 'linkOAM.header.packet.pduType.organizationSpecificOAMPDU.code-34',
'OrganizationSpecificOAMPDUOui': 'linkOAM.header.packet.pduType.organizationSpecificOAMPDU.oui-35',
'OrganizationSpecificOAMPDUValueLength': 'linkOAM.header.packet.pduType.organizationSpecificOAMPDU.valueLength-36',
'OrganizationSpecificOAMPDUValue': 'linkOAM.header.packet.pduType.organizationSpecificOAMPDU.value-37',
'EventNotificationOAMPDUCode': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.code-38',
'EventNotificationOAMPDUSequenceNumber': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.sequenceNumber-39',
'TlvtypeEndTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.endTLV.type-40',
'ErroredSymbolPeriodEventTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.type-41',
'ErroredSymbolPeriodEventTLVLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.length-42',
'ErroredSymbolPeriodEventTLVTimestamp': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.timestamp-43',
'ErroredSymbolPeriodEventTLVSymbolWindow': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.symbolWindow-44',
'ErroredSymbolPeriodEventTLVSymbolThreshold': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.symbolThreshold-45',
'ErroredSymbolPeriodEventTLVSymbols': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.symbols-46',
'ErroredSymbolPeriodEventTLVErrorRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.errorRunningTotal-47',
'ErroredSymbolPeriodEventTLVEventRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredSymbolPeriodEventTLV.eventRunningTotal-48',
'ErroredFrameEventTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.type-49',
'ErroredFrameEventTLVLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.length-50',
'ErroredFrameEventTLVTimestamp': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.timestamp-51',
'ErroredFrameEventTLVFrameWindow': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.frameWindow-52',
'ErroredFrameEventTLVFrameThreshold': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.frameThreshold-53',
'ErroredFrameEventTLVFrames': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.frames-54',
'ErroredFrameEventTLVErrorRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.errorRunningTotal-55',
'ErroredFrameEventTLVEventRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFrameEventTLV.eventRunningTotal-56',
'ErroredFramesPeriodEventTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.type-57',
'ErroredFramesPeriodEventTLVLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.length-58',
'ErroredFramesPeriodEventTLVTimestamp': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.timestamp-59',
'ErroredFramesPeriodEventTLVFrameWindow': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.frameWindow-60',
'ErroredFramesPeriodEventTLVFrameThreshold': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.frameThreshold-61',
'ErroredFramesPeriodEventTLVFrames': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.frames-62',
'ErroredFramesPeriodEventTLVErrorRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.errorRunningTotal-63',
'ErroredFramesPeriodEventTLVEventRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesPeriodEventTLV.eventRunningTotal-64',
'ErroredFramesSecondsSummaryEventTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.type-65',
'ErroredFramesSecondsSummaryEventTLVLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.length-66',
'ErroredFramesSecondsSummaryEventTLVTimestamp': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.timestamp-67',
'ErroredFramesSecondsSummaryEventTLVFrameSecondsWindow': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.frameSecondsWindow-68',
'ErroredFramesSecondsSummaryEventTLVFrameSecondsThreshold': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.frameSecondsThreshold-69',
'ErroredFramesSecondsSummaryEventTLVFrameSeconds': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.frameSeconds-70',
'ErroredFramesSecondsSummaryEventTLVErrorRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.errorRunningTotal-71',
'ErroredFramesSecondsSummaryEventTLVEventRunningTotal': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.erroredFramesSecondsSummaryEventTLV.eventRunningTotal-72',
'OrganizationSpecificEventTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.organizationSpecificEventTLV.type-73',
'OrganizationSpecificEventTLVLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.organizationSpecificEventTLV.length-74',
'OrganizationSpecificEventTLVOui': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.organizationSpecificEventTLV.oui-75',
'OrganizationSpecificEventTLVValueLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.organizationSpecificEventTLV.valueLength-76',
'OrganizationSpecificEventTLVValue': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.organizationSpecificEventTLV.value-77',
'TlvtypeReservedTLVType': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.reservedTLV.type-78',
'TlvtypeReservedTLVLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.reservedTLV.length-79',
'TlvtypeReservedTLVValueLength': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.reservedTLV.valueLength-80',
'TlvtypeReservedTLVValue': 'linkOAM.header.packet.pduType.eventNotificationOAMPDU.eventTVL.tlvType.reservedTLV.value-81',
'VariableRequestOAMPDUCode': 'linkOAM.header.packet.pduType.variableRequestOAMPDU.code-82',
'VariableDescriptorBranch': 'linkOAM.header.packet.pduType.variableRequestOAMPDU.descriptors.tlvType.variableDescriptor.branch-83',
'VariableDescriptorLeaf': 'linkOAM.header.packet.pduType.variableRequestOAMPDU.descriptors.tlvType.variableDescriptor.leaf-84',
'EndDescriptorEndOfDescriptor': 'linkOAM.header.packet.pduType.variableRequestOAMPDU.descriptors.tlvType.endDescriptor.endOfDescriptor-85',
'VariableResponseOAMPDUCode': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.code-86',
'VariableContainerBranch': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.containers.tlvType.variableContainer.branch-87',
'VariableContainerLeaf': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.containers.tlvType.variableContainer.leaf-88',
'VariableContainerWidth': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.containers.tlvType.variableContainer.width-89',
'VariableContainerValueLength': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.containers.tlvType.variableContainer.valueLength-90',
'VariableContainerValue': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.containers.tlvType.variableContainer.value-91',
'EndContainerEndOfContainer': 'linkOAM.header.packet.pduType.variableResponseOAMPDU.containers.tlvType.endContainer.endOfContainer-92',
'HeaderFcs': 'linkOAM.header.fcs-93',
}
def __init__(self, parent, list_op=False):
super(LinkOAM, self).__init__(parent, list_op)
@property
def PacketSubtype(self):
"""
Display Name: Sub Type
Default Value: 0x03
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PacketSubtype']))
@property
def PacketFlags(self):
"""
Display Name: Flags
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PacketFlags']))
@property
def InformationOAMPDUCode(self):
"""
Display Name: Code
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InformationOAMPDUCode']))
@property
def LocalInfoTLVType(self):
"""
Display Name: Type
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVType']))
@property
def LocalInfoTLVLength(self):
"""
Display Name: Length
Default Value: 0x10
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVLength']))
@property
def LocalInfoTLVVersion(self):
"""
Display Name: OAM Version
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVVersion']))
@property
def LocalInfoTLVRevision(self):
"""
Display Name: Revision
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVRevision']))
@property
def LocalInfoTLVState(self):
"""
Display Name: State
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVState']))
@property
def LocalInfoTLVOamConfig(self):
"""
Display Name: OAM Configuration
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVOamConfig']))
@property
def LocalInfoTLVOamPDUConfig(self):
"""
Display Name: OAMPDU Configuration
Default Value: 0x5DC
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVOamPDUConfig']))
@property
def LocalInfoTLVOui(self):
"""
Display Name: OUI
Default Value: 0x000100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVOui']))
@property
def LocalInfoTLVVendorInfo(self):
"""
Display Name: Vendor Specific Information
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInfoTLVVendorInfo']))
@property
def RemoteInfoTLVType(self):
"""
Display Name: Type
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVType']))
@property
def RemoteInfoTLVLength(self):
"""
Display Name: Length
Default Value: 0x10
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVLength']))
@property
def RemoteInfoTLVVersion(self):
"""
Display Name: OAM Version
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVVersion']))
@property
def RemoteInfoTLVRevision(self):
"""
Display Name: Revision
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVRevision']))
@property
def RemoteInfoTLVState(self):
"""
Display Name: State
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVState']))
@property
def RemoteInfoTLVOamConfig(self):
"""
Display Name: OAM Configuration
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVOamConfig']))
@property
def RemoteInfoTLVOamPDUConfig(self):
"""
Display Name: OAMPDU Configuration
Default Value: 0x5DC
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVOamPDUConfig']))
@property
def RemoteInfoTLVOui(self):
"""
Display Name: OUI
Default Value: 0x000100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVOui']))
@property
def RemoteInfoTLVVendorInfo(self):
"""
Display Name: Vendor Specific Information
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInfoTLVVendorInfo']))
@property
def OrganizationSpecificInfoTLVType(self):
"""
Display Name: Type
Default Value: 0xFE
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificInfoTLVType']))
@property
def OrganizationSpecificInfoTLVLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificInfoTLVLength']))
@property
def OrganizationSpecificInfoTLVOui(self):
"""
Display Name: Organizationally Unique Identifier
Default Value: 0x000100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificInfoTLVOui']))
@property
def OrganizationSpecificInfoTLVValueLength(self):
"""
Display Name: Organization Specific Value Len
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificInfoTLVValueLength']))
@property
def OrganizationSpecificInfoTLVValue(self):
"""
Display Name: Organization Specific Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificInfoTLVValue']))
@property
def ReservedTLVType(self):
"""
Display Name: Type
Default Value: 0xFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReservedTLVType']))
@property
def ReservedTLVLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReservedTLVLength']))
@property
def ReservedTLVValueLength(self):
"""
Display Name: Value Len
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReservedTLVValueLength']))
@property
def ReservedTLVValue(self):
"""
Display Name: Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReservedTLVValue']))
@property
def EndTLVType(self):
"""
Display Name: end_of_tlv
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndTLVType']))
@property
def LoopbackControlOAMPDUCode(self):
"""
Display Name: Code
Default Value: 0x04
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LoopbackControlOAMPDUCode']))
@property
def LoopbackControlOAMPDULoopbackCommand(self):
"""
Display Name: Loopback Command
Default Value: 1
Value Format: decimal
Available enum values: Enable OAM Remote Loopback, 1, Disable OAM Remote Loopback, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LoopbackControlOAMPDULoopbackCommand']))
@property
def OrganizationSpecificOAMPDUCode(self):
"""
Display Name: Code
Default Value: 0xFE
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificOAMPDUCode']))
@property
def OrganizationSpecificOAMPDUOui(self):
"""
Display Name: OUI
Default Value: 0x000100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificOAMPDUOui']))
@property
def OrganizationSpecificOAMPDUValueLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificOAMPDUValueLength']))
@property
def OrganizationSpecificOAMPDUValue(self):
"""
Display Name: Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificOAMPDUValue']))
@property
def EventNotificationOAMPDUCode(self):
"""
Display Name: Code
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EventNotificationOAMPDUCode']))
@property
def EventNotificationOAMPDUSequenceNumber(self):
"""
Display Name: Sequence Number
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EventNotificationOAMPDUSequenceNumber']))
@property
def TlvtypeEndTLVType(self):
"""
Display Name: end_of_tlv
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvtypeEndTLVType']))
@property
def ErroredSymbolPeriodEventTLVType(self):
"""
Display Name: Type
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVType']))
@property
def ErroredSymbolPeriodEventTLVLength(self):
"""
Display Name: Length
Default Value: 0x28
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVLength']))
@property
def ErroredSymbolPeriodEventTLVTimestamp(self):
"""
Display Name: Time Stamp
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVTimestamp']))
@property
def ErroredSymbolPeriodEventTLVSymbolWindow(self):
"""
Display Name: Symbol Window
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVSymbolWindow']))
@property
def ErroredSymbolPeriodEventTLVSymbolThreshold(self):
"""
Display Name: Symbol Threshold
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVSymbolThreshold']))
@property
def ErroredSymbolPeriodEventTLVSymbols(self):
"""
Display Name: Symbols
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVSymbols']))
@property
def ErroredSymbolPeriodEventTLVErrorRunningTotal(self):
"""
Display Name: Error Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVErrorRunningTotal']))
@property
def ErroredSymbolPeriodEventTLVEventRunningTotal(self):
"""
Display Name: Event Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredSymbolPeriodEventTLVEventRunningTotal']))
@property
def ErroredFrameEventTLVType(self):
"""
Display Name: Type
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVType']))
@property
def ErroredFrameEventTLVLength(self):
"""
Display Name: Length
Default Value: 0x1A
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVLength']))
@property
def ErroredFrameEventTLVTimestamp(self):
"""
Display Name: Time Stamp
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVTimestamp']))
@property
def ErroredFrameEventTLVFrameWindow(self):
"""
Display Name: Frame Window
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVFrameWindow']))
@property
def ErroredFrameEventTLVFrameThreshold(self):
"""
Display Name: Frame Threshold
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVFrameThreshold']))
@property
def ErroredFrameEventTLVFrames(self):
"""
Display Name: Frames
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVFrames']))
@property
def ErroredFrameEventTLVErrorRunningTotal(self):
"""
Display Name: Error Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVErrorRunningTotal']))
@property
def ErroredFrameEventTLVEventRunningTotal(self):
"""
Display Name: Event Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFrameEventTLVEventRunningTotal']))
@property
def ErroredFramesPeriodEventTLVType(self):
"""
Display Name: Type
Default Value: 0x03
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVType']))
@property
def ErroredFramesPeriodEventTLVLength(self):
"""
Display Name: Length
Default Value: 0x1C
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVLength']))
@property
def ErroredFramesPeriodEventTLVTimestamp(self):
"""
Display Name: Time Stamp
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVTimestamp']))
@property
def ErroredFramesPeriodEventTLVFrameWindow(self):
"""
Display Name: Frame Window
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVFrameWindow']))
@property
def ErroredFramesPeriodEventTLVFrameThreshold(self):
"""
Display Name: Frame Threshold
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVFrameThreshold']))
@property
def ErroredFramesPeriodEventTLVFrames(self):
"""
Display Name: Frames
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVFrames']))
@property
def ErroredFramesPeriodEventTLVErrorRunningTotal(self):
"""
Display Name: Error Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVErrorRunningTotal']))
@property
def ErroredFramesPeriodEventTLVEventRunningTotal(self):
"""
Display Name: Event Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesPeriodEventTLVEventRunningTotal']))
@property
def ErroredFramesSecondsSummaryEventTLVType(self):
"""
Display Name: Type
Default Value: 0x04
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVType']))
@property
def ErroredFramesSecondsSummaryEventTLVLength(self):
"""
Display Name: Length
Default Value: 0x12
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVLength']))
@property
def ErroredFramesSecondsSummaryEventTLVTimestamp(self):
"""
Display Name: Time Stamp
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVTimestamp']))
@property
def ErroredFramesSecondsSummaryEventTLVFrameSecondsWindow(self):
"""
Display Name: Frame Seconds Summary Window
Default Value: 60
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVFrameSecondsWindow']))
@property
def ErroredFramesSecondsSummaryEventTLVFrameSecondsThreshold(self):
"""
Display Name: Frame Seconds Summary Threshold
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVFrameSecondsThreshold']))
@property
def ErroredFramesSecondsSummaryEventTLVFrameSeconds(self):
"""
Display Name: Frame Seconds Summary
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVFrameSeconds']))
@property
def ErroredFramesSecondsSummaryEventTLVErrorRunningTotal(self):
"""
Display Name: Error Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVErrorRunningTotal']))
@property
def ErroredFramesSecondsSummaryEventTLVEventRunningTotal(self):
"""
Display Name: Event Running Total
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErroredFramesSecondsSummaryEventTLVEventRunningTotal']))
@property
def OrganizationSpecificEventTLVType(self):
"""
Display Name: Type
Default Value: 0xFE
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificEventTLVType']))
@property
def OrganizationSpecificEventTLVLength(self):
"""
Display Name: Length
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificEventTLVLength']))
@property
def OrganizationSpecificEventTLVOui(self):
"""
Display Name: Organizationally Unique Identifier
Default Value: 0x000100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificEventTLVOui']))
@property
def OrganizationSpecificEventTLVValueLength(self):
"""
Display Name: Organization Specific Value Len
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificEventTLVValueLength']))
@property
def OrganizationSpecificEventTLVValue(self):
"""
Display Name: Organization Specific Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OrganizationSpecificEventTLVValue']))
@property
def TlvtypeReservedTLVType(self):
"""
Display Name: Type
Default Value: 0xFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvtypeReservedTLVType']))
@property
def TlvtypeReservedTLVLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvtypeReservedTLVLength']))
@property
def TlvtypeReservedTLVValueLength(self):
"""
Display Name: Value Len
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvtypeReservedTLVValueLength']))
@property
def TlvtypeReservedTLVValue(self):
"""
Display Name: Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvtypeReservedTLVValue']))
@property
def VariableRequestOAMPDUCode(self):
"""
Display Name: Code
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableRequestOAMPDUCode']))
@property
def VariableDescriptorBranch(self):
"""
Display Name: Variable Branch
Default Value: 0x07
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableDescriptorBranch']))
@property
def VariableDescriptorLeaf(self):
"""
Display Name: Variable Leaf
Default Value: 0x0002
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableDescriptorLeaf']))
@property
def EndDescriptorEndOfDescriptor(self):
"""
Display Name: end_of_descriptor
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndDescriptorEndOfDescriptor']))
@property
def VariableResponseOAMPDUCode(self):
"""
Display Name: Code
Default Value: 0x03
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableResponseOAMPDUCode']))
@property
def VariableContainerBranch(self):
"""
Display Name: Variable Branch
Default Value: 0x07
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableContainerBranch']))
@property
def VariableContainerLeaf(self):
"""
Display Name: Variable Leaf
Default Value: 0x0002
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableContainerLeaf']))
@property
def VariableContainerWidth(self):
"""
Display Name: Variable Width
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableContainerWidth']))
@property
def VariableContainerValueLength(self):
"""
Display Name: Variable Value Len
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableContainerValueLength']))
@property
def VariableContainerValue(self):
"""
Display Name: Variable Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VariableContainerValue']))
@property
def EndContainerEndOfContainer(self):
"""
Display Name: end_of_container
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndContainerEndOfContainer']))
@property
def HeaderFcs(self):
"""
Display Name: Frame Check Sequence CRC-32
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFcs']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 43.319231 | 202 | 0.715551 |
954146cbdea7e57641fc5a1ec374f381deb7f479 | 3,967 | py | Python | memos/memos/users/forms.py | iotexpert/docmgr | 735c7bcbaeb73bc44efecffb175f268f2438ac3a | [
"MIT"
] | null | null | null | memos/memos/users/forms.py | iotexpert/docmgr | 735c7bcbaeb73bc44efecffb175f268f2438ac3a | [
"MIT"
] | null | null | null | memos/memos/users/forms.py | iotexpert/docmgr | 735c7bcbaeb73bc44efecffb175f268f2438ac3a | [
"MIT"
] | null | null | null | from flask import current_app
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from memos.models.User import User
from memos.models.Memo import Memo
from memos.models.MemoFile import MemoFile
from memos.models.MemoSignature import MemoSignature
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',render_kw={})
email = StringField('Email',
validators=[DataRequired(), Email()],render_kw={})
delegates = StringField('Delegates', validators=[],render_kw={})
admin = BooleanField('Admin', default=False,
false_values=('False', 'false', ''),render_kw={})
readAll = BooleanField('Read All', default=False,
false_values=('False', 'false', ''),render_kw={})
subscriptions = StringField('Subscriptions',render_kw={})
pagesize = StringField('Page Size',render_kw={})
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
def validate_subscriptions(self,subscriptions):
users = User.valid_usernames(subscriptions.data)
if len(users['invalid_usernames']) > 0:
raise ValidationError(f'Invalid users {users["invalid_usernames"]}')
class RequestResetForm(FlaskForm):
"""
This function
"""
email = StringField('Email',
validators=[DataRequired(), Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
| 40.896907 | 98 | 0.659188 |
95419b554583bc803a43eac8c57ec34a913022a2 | 626 | py | Python | app/models/encryption.py | janaSunrise/ZeroCOM | 7197684ce708f080fe215b0a6e57c12836e4c0ab | [
"Apache-2.0"
] | 6 | 2021-03-27T08:58:04.000Z | 2021-05-23T17:07:09.000Z | app/models/encryption.py | janaSunrise/ZeroCOM | 7197684ce708f080fe215b0a6e57c12836e4c0ab | [
"Apache-2.0"
] | 2 | 2021-05-30T08:06:53.000Z | 2021-06-02T17:02:06.000Z | app/models/encryption.py | janaSunrise/ZeroCOM | 7197684ce708f080fe215b0a6e57c12836e4c0ab | [
"Apache-2.0"
] | null | null | null | import rsa
class RSA:
@classmethod
def generate_keys(cls, size: int = 512) -> tuple:
return rsa.newkeys(size)
@classmethod
def export_key_pkcs1(cls, public_key: rsa.PublicKey, format: str = "PEM") -> bytes:
return rsa.PublicKey.save_pkcs1(public_key, format=format)
@classmethod
def load_key_pkcs1(cls, public_key_pem: bytes) -> rsa.PublicKey:
return rsa.PublicKey.load_pkcs1(public_key_pem)
@classmethod
def sign_message(cls, message: bytes, private_key: rsa.PrivateKey, algorithm: str = "SHA-1") -> bytes:
return rsa.sign(message, private_key, algorithm)
| 31.3 | 106 | 0.693291 |
954332209f4c21416110fe7a318ebf31b6100c2e | 3,566 | py | Python | Terminal_Bot.py | Data-Alchemy/Unix_Automation | 3d5ee450e1b19f79509960f6d117c16fb8aa5313 | [
"Apache-2.0"
] | null | null | null | Terminal_Bot.py | Data-Alchemy/Unix_Automation | 3d5ee450e1b19f79509960f6d117c16fb8aa5313 | [
"Apache-2.0"
] | null | null | null | Terminal_Bot.py | Data-Alchemy/Unix_Automation | 3d5ee450e1b19f79509960f6d117c16fb8aa5313 | [
"Apache-2.0"
] | null | null | null | import paramiko,time,sys,json,os,pandas
########################################################################################################################
################################################### parms #############################################################
proxy = None
Port = 22
Username = open('').read() #put username in txt file
Pwd = open('').read() #put password in txt file
Host = ''
keys= '' #file with ssh keys
sudo_user = '' #optional parameter fill in if using sudo option in function must be passed as full command ie: sudo su - user
path = ''
download_from = ""
download_to = ""
## put commands one line at a time ##
listofcommands=f'''
'''
########################################################################################################################
def exec_remote_cmds(commands,waittime,sudo = None):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=Host, username=Username, password=Pwd, port=Port, key_filename=keys) #instantiate connection
shell = client.invoke_shell()
if sudo != None :
shell.send(sudo)
time.sleep(waittime)
receive_buffer = shell.recv(1024)
receive_buffer = ""
shell.send(commands)
status = shell.recv_ready()
cmple = []
return_cursor_item = None
page = 0
time.sleep(1)
while return_cursor_item != '$':
#status ==False :
time.sleep(1)
output = shell.recv(1024).decode("utf-8")
for i in output.split(';',) :
cmple.append(''.join(s for s in i ))
print (output)
#print("Page :", page)
return_cursor = [s for s in output.splitlines()][-1].strip() ## needed for custom exit subroutine since paramiko hangs the session
return_cursor_item = [l for l in return_cursor][len([l for l in return_cursor])-1] ## needed for custom exit subroutine since paramiko hangs the session
status+= shell.recv_ready()
page +=1
print("Pages Read:",page)
#for i in cmple: print(i.replace('[01','').replace('\n',''))
def download_remote_file(remotepath:str,localpath:str,waittime:int,sudo:str = None):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=Host, username=Username, password=Pwd, port=Port, key_filename=keys) #instantiate connection
shell = client.invoke_shell()
if sudo != None :
shell.send(sudo)
time.sleep(waittime)
receive_buffer = shell.recv(1024)
sftp = client.open_sftp()
sftp.get(remotepath,localpath)
while not os.path.exists(localpath):
time.sleep(waittime)
sftp.close()
def write_file_to_remote(remotepath:str,localpath:str,waittime:int,sudo:str = None):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=Host, username=Username, password=Pwd, port=Port, key_filename=keys) #instantiate connection
shell = client.invoke_shell()
if sudo != None :
shell.send(sudo)
time.sleep(waittime)
receive_buffer = shell.recv(1024)
sftp = client.open_sftp()
sftp.put(localpath,remotepath)
while not os.path.exists(remotepath):
time.sleep(waittime)
sftp.close()
exec_remote_cmds(listofcommands,1,sudo_user)#sudo user is option must by passed as full sudo su - if used
#write_file_to_remote(download_from,download_to,1,sudo_user)
#download_remote_file(download_to,download_from,1)
| 37.93617 | 160 | 0.615536 |
95433e555cbe86270b9f0c26744b230b46b56f5a | 825 | py | Python | pnnl/models/__init__.py | rkini-pnnl/volttron-GS | 60055438446a060176381468757ad0ec339f2371 | [
"BSD-3-Clause"
] | 1 | 2021-08-05T04:01:55.000Z | 2021-08-05T04:01:55.000Z | pnnl/models/__init__.py | kevinatkinson-pnnl/volttron-GS | 479c614a6f7cd779fcc208e8e35d27d0961a16f8 | [
"BSD-3-Clause"
] | null | null | null | pnnl/models/__init__.py | kevinatkinson-pnnl/volttron-GS | 479c614a6f7cd779fcc208e8e35d27d0961a16f8 | [
"BSD-3-Clause"
] | null | null | null | import importlib
import logging
from volttron.platform.agent import utils
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = "0.1"
__all__ = ['Model']
class Model(object):
def __init__(self, config, **kwargs):
base_module = "volttron.pnnl.models."
try:
model_type = config["model_type"]
except KeyError as e:
_log.exception("Missing Model Type key: {}".format(e))
raise e
_file, model_type = model_type.split(".")
module = importlib.import_module(base_module + _file)
model_class = getattr(module, model_type)
self.model = model_class(config, self)
def get_q(self, _set, sched_index, market_index, occupied):
q = self.model.predict(_set, sched_index, market_index, occupied)
return q
| 29.464286 | 73 | 0.65697 |
95446537feef632a16bbea1d71d8483703929711 | 826 | py | Python | src/cli.py | thisistrivial/cr-draft | 25defcf03466b044c28ad42661536e27b6df1222 | [
"MIT"
] | null | null | null | src/cli.py | thisistrivial/cr-draft | 25defcf03466b044c28ad42661536e27b6df1222 | [
"MIT"
] | null | null | null | src/cli.py | thisistrivial/cr-draft | 25defcf03466b044c28ad42661536e27b6df1222 | [
"MIT"
] | null | null | null |
import draft
import os
def run():
picks = [[],[]]
pairs = draft.pairs()
for player in range(1, 3):
os.system("clear")
pre = "[Player %d]: " % player
input(pre + "(Enter when ready) ")
pair = 0
while pair < 4:
pick = input(pre + "%s (1), %s (2) " %
(draft.get_name(pairs[player - 1][pair][0]),
draft.get_name(pairs[player - 1][pair][1])))
if pick in ("1", "2"):
npick = int(pick)
picks[player - 1].append(pairs[player - 1][pair][npick - 1])
picks[2 - player].append(pairs[player - 1][pair][2 - npick])
pair += 1
for player in range(1, 3):
os.system("clear")
pre = "[Player %d]: " % player
input(pre + "(Enter to show) ")
input(list(map(draft.get_name, picks[player - 1])))
os.system("clear")
run()
| 22.944444 | 69 | 0.521792 |
9547e7b57fef282a81e3052edbdb2d34bb2cd61a | 222 | py | Python | src/honey.py | terror/golf | 9d38f8376c2ddbbb34360a3353ec6f4289736bd4 | [
"Unlicense"
] | null | null | null | src/honey.py | terror/golf | 9d38f8376c2ddbbb34360a3353ec6f4289736bd4 | [
"Unlicense"
] | null | null | null | src/honey.py | terror/golf | 9d38f8376c2ddbbb34360a3353ec6f4289736bd4 | [
"Unlicense"
] | null | null | null | # https://open.kattis.com/problems/honey
print(*(lambda x: [x[int(input())] for _ in range(int(input()))])([1, 0, 6, 12, 90, 360, 2040, 10080, 54810, 290640, 1588356, 8676360, 47977776, 266378112, 1488801600]), sep="\n")
| 55.5 | 179 | 0.657658 |
954b661a558c8d594bb41be0460c68998860e06c | 4,712 | py | Python | plaso/parsers/winreg_plugins/usbstor.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 2 | 2016-02-18T12:46:29.000Z | 2022-03-13T03:04:59.000Z | plaso/parsers/winreg_plugins/usbstor.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | null | null | null | plaso/parsers/winreg_plugins/usbstor.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 6 | 2016-12-18T08:05:36.000Z | 2021-04-06T14:19:11.000Z | # -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
import logging
from plaso.events import windows_events
from plaso.lib import eventdata
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides (david.nides@gmail.com)'
class USBStorPlugin(interface.KeyPlugin):
"""USBStor key plugin."""
NAME = u'windows_usbstor_devices'
DESCRIPTION = u'Parser for USB Plug And Play Manager USBStor Registry Key.'
REG_KEYS = [u'\\{current_control_set}\\Enum\\USBSTOR']
REG_TYPE = u'SYSTEM'
URLS = [u'http://www.forensicswiki.org/wiki/USB_History_Viewing']
def GetEntries(
self, parser_mediator, key=None, registry_file_type=None,
codepage=u'cp1252', **kwargs):
"""Collect Values under USBStor and return an event object for each one.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_file_type: Optional string containing the Windows Registry file
type, e.g. NTUSER, SOFTWARE. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
for subkey in key.GetSubkeys():
text_dict = {}
text_dict[u'subkey_name'] = subkey.name
# Time last USB device of this class was first inserted.
event_object = windows_events.WindowsRegistryEvent(
subkey.last_written_timestamp, key.path, text_dict,
usage=eventdata.EventTimestamp.FIRST_CONNECTED, offset=key.offset,
registry_file_type=registry_file_type,
source_append=u': USBStor Entries')
parser_mediator.ProduceEvent(event_object)
name_values = subkey.name.split(u'&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logging.warning(
u'Expected 4 &-separated values in: {0:s}'.format(subkey.name))
if number_of_name_values >= 1:
text_dict[u'device_type'] = name_values[0]
if number_of_name_values >= 2:
text_dict[u'vendor'] = name_values[1]
if number_of_name_values >= 3:
text_dict[u'product'] = name_values[2]
if number_of_name_values >= 4:
text_dict[u'revision'] = name_values[3]
for devicekey in subkey.GetSubkeys():
text_dict[u'serial'] = devicekey.name
friendly_name_value = devicekey.GetValue(u'FriendlyName')
if friendly_name_value:
text_dict[u'friendly_name'] = friendly_name_value.data
else:
text_dict.pop(u'friendly_name', None)
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = devicekey.GetValue(u'ParentIdPrefix')
if parent_id_prefix_value:
text_dict[u'parent_id_prefix'] = parent_id_prefix_value.data
else:
text_dict.pop(u'parent_id_prefix', None)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event_object = windows_events.WindowsRegistryEvent(
devicekey.last_written_timestamp, key.path, text_dict,
usage=eventdata.EventTimestamp.LAST_CONNECTED, offset=key.offset,
registry_file_type=registry_file_type,
source_append=u': USBStor Entries')
parser_mediator.ProduceEvent(event_object)
# Build list of first Insertion times.
first_insert = []
device_parameter_key = devicekey.GetSubkey(u'Device Parameters')
if device_parameter_key:
first_insert.append(device_parameter_key.last_written_timestamp)
log_configuration_key = devicekey.GetSubkey(u'LogConf')
if (log_configuration_key and
log_configuration_key.last_written_timestamp not in first_insert):
first_insert.append(log_configuration_key.last_written_timestamp)
properties_key = devicekey.GetSubkey(u'Properties')
if (properties_key and
properties_key.last_written_timestamp not in first_insert):
first_insert.append(properties_key.last_written_timestamp)
# Add first Insertion times.
for timestamp in first_insert:
event_object = windows_events.WindowsRegistryEvent(
timestamp, key.path, text_dict,
usage=eventdata.EventTimestamp.LAST_CONNECTED, offset=key.offset,
registry_file_type=registry_file_type,
source_append=u': USBStor Entries')
parser_mediator.ProduceEvent(event_object)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| 39.266667 | 79 | 0.695034 |
9550103d22b0d4fa16de9bf491fa914c8ddf64fb | 4,877 | py | Python | musicbot/modules/default/helpmsg.py | oKidd/PrendeMusic | b66d54d93ed36587193c20b71201c4447d80ad85 | [
"MIT"
] | 5 | 2018-09-07T12:17:27.000Z | 2019-12-06T02:35:26.000Z | musicbot/modules/default/helpmsg.py | oKidd/PrendeMusic | b66d54d93ed36587193c20b71201c4447d80ad85 | [
"MIT"
] | null | null | null | musicbot/modules/default/helpmsg.py | oKidd/PrendeMusic | b66d54d93ed36587193c20b71201c4447d80ad85 | [
"MIT"
] | 2 | 2020-04-25T00:35:17.000Z | 2021-05-13T22:20:19.000Z | from collections import defaultdict
from discord.ext.commands import Cog, command
from discord.utils import get
from ...utils import check_restricted
from ... import exceptions
from ... import messagemanager
class Help(Cog):
async def get_cmd(self, name, bot, user, list_all_cmds=False):
cmd = bot.get_command(name)
user_permissions = bot.permissions.for_user(user)
if not check_restricted(cmd, user_permissions) or list_all_cmds:
return cmd
async def _gen_cog_cmd_dict(self, bot, user, list_all_cmds=False):
user_permissions = bot.permissions.for_user(user)
ret = defaultdict(dict)
cmds = bot.commands if list_all_cmds else check_restricted(bot.commands, user_permissions)
for cmd in cmds:
# This will always return at least cmd_help, since they needed perms to run this command
if not hasattr(cmd.callback, 'dev_cmd'):
cog_name = cmd.cog.qualified_name if cmd.cog else 'unknown'
ret[cog_name][cmd.qualified_name] = cmd
return ret
@command()
async def help(self, ctx, *options):
"""
Usage:
{command_prefix}help [options...] [name]
Options:
(none) prints a help message for the command with that
name.
cog prints a help message for the command in the
cog with that name. name argument is required.
all list all commands available. name argument will
be discarded if not used with cog option.
Prints a help message. Supplying multiple names can leads to unexpected behavior.
"""
prefix = ctx.bot.config.command_prefix
options = list(options)
list_all = True if 'all' in options else False
options.remove('all') if list_all else None
list_cog = True if 'cog' in options else False
options.remove('cog') if list_cog else None
name = '' if not options else ' '.join(options)
cogs = await self._gen_cog_cmd_dict(ctx.bot, ctx.author, list_all_cmds=list_all)
desc = ''
if list_cog:
cogdesc = ''
try:
cogs = {name[0]: cogs[name[0]]}
cogdesc = ctx.bot.cogs[name[0]].description
except KeyError:
raise exceptions.CommandError(ctx.bot.str.get('help?cmd?help?fail@cog', "No such cog"), expire_in=10)
desc = '\N{WHITE SMALL SQUARE} {}:\n{}\n\n'.format(name[0], cogdesc) if cogdesc else '\N{WHITE SMALL SQUARE} {}:\n'.format(name[0])
else:
if name:
cmd = await self.get_cmd(name, ctx.bot, ctx.author, list_all_cmds=True)
if not cmd:
raise exceptions.CommandError(ctx.bot.str.get('cmd-help-invalid', "No such command"), expire_in=10)
if not hasattr(cmd.callback, 'dev_cmd'):
usage = cmd.help
if ctx.bot.config.help_display_sig and hasattr(cmd, 'commands'):
usage = '{}\n\nSignature: {} {}'.format(usage, cmd.qualified_name, cmd.signature)
await messagemanager.safe_send_normal(
ctx,
ctx,
"```\n{}\n\n{}Aliases (for this name): {}```".format(
usage,
'' if not hasattr(cmd, 'commands') else 'This is a command group with following subcommands:\n{}\n\n'.format(', '.join(c.name for c in cmd.commands) if cmd.commands else None),
' '.join(cmd.aliases)
).format(command_prefix=ctx.bot.config.command_prefix),
expire_in=60
)
return
elif ctx.author.id in ctx.bot.config.owner_id:
cogs = await self._gen_cog_cmd_dict(ctx.bot, ctx.author, list_all_cmds=True)
cmdlisto = ''
for cog, cmdlist in cogs.items():
if len(cmdlist) > 0:
cmdlisto += ('\N{WHITE SMALL SQUARE} '+ cog + ' [' + str(len(cmdlist)) + ']:\n') if not list_cog else ''
cmdlisto += '```' + ', '.join([cmd for cmd in cmdlist.keys()]) + '```\n'
desc += cmdlisto + ctx.bot.str.get(
'cmd-help-response', 'For information about a particular command, run `{}help [command]`\n'
'For further help, see https://just-some-bots.github.io/MusicBot/'
).format(prefix)
if not list_all:
desc += ctx.bot.str.get('cmd-help-all', '\nOnly showing commands you can use, for a list of all commands, run `{}help all`').format(prefix)
await messagemanager.safe_send_normal(ctx, ctx, desc, reply=True, expire_in=60)
cogs = [Help] | 44.743119 | 204 | 0.568792 |
95515f6c6551928915064695c2fceeeba21d268c | 8,710 | py | Python | flatpak_update.py | willsALMANJ/flatpak_update | 84a8f59a11952a5daf57a18f0426b676f3a707c2 | [
"0BSD"
] | 1 | 2020-06-12T07:51:32.000Z | 2020-06-12T07:51:32.000Z | flatpak_update.py | willsALMANJ/flatpak_update | 84a8f59a11952a5daf57a18f0426b676f3a707c2 | [
"0BSD"
] | null | null | null | flatpak_update.py | willsALMANJ/flatpak_update | 84a8f59a11952a5daf57a18f0426b676f3a707c2 | [
"0BSD"
] | null | null | null | "Update a Flatpak repository for new versions of components"
import argparse
import asyncio
import datetime
from functools import total_ordering
import hashlib
from itertools import zip_longest
import json
from pathlib import Path
import re
import httpx
import jinja2
import yaml
GITHUB_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
@total_ordering
class Version:
"""Class embodying a software version
Assumes that version is purely composed of integers and .'s -- no alpha,
beta, prelease, etc. or other strings allowed.
"""
def __init__(self, version):
if isinstance(version, str):
version_tuple = tuple(version.split("."))
elif isinstance(version, tuple):
version_tuple = version
else:
raise ValueError(f"Invalid version: {version}")
self.version_tuple = tuple(int(x) for x in version_tuple)
self.date = None
def __iter__(self):
for part in self.version_tuple:
yield part
def __str__(self):
return ".".join(str(p) for p in self)
def __repr__(self):
return f"{type(self).__name__}({str(self)})"
def __len__(self):
return len(self.version_tuple)
def __hash__(self):
return hash(tuple(self))
def __getitem__(self, key):
if len(self) > key:
return self.version_tuple[key]
return 0
def __eq__(self, other):
return str(self) == str(other)
def __gt__(self, other):
for self_i, other_i in zip_longest(self, other, fillvalue=0):
if self_i == other_i:
continue
return self_i > other_i
return False
async def get_version_scrape(spec):
"Scrape raw HTML for version string regex to find latest version"
async with httpx.AsyncClient() as client:
response = await client.get(spec["url"])
matches = re.findall(spec["regex"], response.text)
version = max(Version(m) for m in matches)
return version
async def get_version_github_branches(spec):
"Get latest version for project that uses separate git tag for each version"
base_url = f"https://api.github.com/repos/{spec['project']}"
headers = {"Accept": "application/vnd.github.v3+json"}
async with httpx.AsyncClient() as client:
response = await client.get(f"{base_url}/branches", headers=headers)
data = response.json()
versions = [
Version(m.group(1))
for b in data
if (m := re.match(spec["regex"], b["name"]))
]
version = max(versions)
return version
async def get_version_github_releases(spec):
"Find the latest version on GitHub releases / tags page"
base_url = f"https://api.github.com/repos/{spec['project']}"
headers = {"Accept": "application/vnd.github.v3+json"}
if spec.get("tags"):
endpt = "tags"
else:
endpt = "releases"
async with httpx.AsyncClient() as client:
response = await client.get(f"{base_url}/{endpt}", headers=headers)
data = response.json()
versions = []
for item in data:
version_str = item["name"]
for sub in spec.get("substitutions", []):
version_str = version_str.replace(sub[0], sub[1])
try:
version = Version(version_str)
except ValueError:
continue
versions.append((version, item))
version, metadata = max(versions)
if spec.get("set_date"):
if endpt == "releases":
version_dt = datetime.datetime.strptime(
metadata["published_at"], GITHUB_DATE_FORMAT
)
elif endpt == "tags":
async with httpx.AsyncClient() as client:
response = await client.get(
metadata["commit"]["url"], headers=headers
)
data = response.json()
version_dt = datetime.datetime.strptime(
data["commit"]["committer"]["date"], GITHUB_DATE_FORMAT
)
version.date = version_dt.strftime("%Y-%m-%d")
return version
async def get_latest_version(spec):
"Get latest version of a single component"
if spec["type"] == "scrape":
version = await get_version_scrape(spec)
elif spec["type"] == "github_branches":
version = await get_version_github_branches(spec)
elif spec["type"] == "github_releases":
version = await get_version_github_releases(spec)
else:
raise ValueError(f"Bad spec type: {spec['type']}")
return version
async def get_latest_versions(specs):
"Look up latest versions of components in config file"
versions = await asyncio.gather(
*(get_latest_version(s["get_version"]) for s in specs)
)
return {s["name"]: v for s, v in zip(specs, versions)}
def load_manifest(path):
"Load json or yaml file"
with path.open("r") as file_:
if path.suffix == ".json":
manifest = json.load(file_)
else:
manifest = yaml.load(file_, Loader=yaml.SafeLoader)
return manifest
def get_current_versions(manifest):
"Parse versions from current Flatpak manifest"
versions = {"runtime": Version(manifest["runtime-version"])}
for module in manifest["modules"]:
match = re.search(r"-([0-9\.]+)\.tar\.gz$", module["sources"][0]["url"])
versions[module["name"]] = Version(match.group(1))
return versions
async def get_sha256(download_dir: Path, url):
"Get sha256 sum for url"
output_path = download_dir / Path(url).name
if not output_path.exists():
client = httpx.AsyncClient()
with output_path.open("wb") as file_:
async with client.stream("GET", url) as response:
async for chunk in response.aiter_raw():
file_.write(chunk)
with output_path.open("rb") as file_:
sha256 = hashlib.sha256()
while True:
data = file_.read(2 ** 16)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
async def get_sha256_set(named_urls):
"""Get sha256 sums for name:url pairs
Returns dictionary with {name}_sha256 keys for easy merging into j2
variables dict
"""
cache_dir = Path.cwd() / ".cache"
cache_dir.mkdir(exist_ok=True)
sums = await asyncio.gather(
*(get_sha256(cache_dir, v) for v in named_urls.values())
)
sha256_vars = [f"{n}_sha256" for n in named_urls]
return dict(zip(sha256_vars, sums))
def get_template_vars(config, current_versions, new_versions, manifest):
"Build up variables for jinja2 templates from version data"
env = {}
remote_sha256 = {}
env["runtime_version"] = new_versions["runtime"]
for spec in config["modules"]:
name = spec["name"]
env[f"{name}_version"] = new_versions[name]
env[f"{name}_source_url"] = spec["source_url"].format(
version=new_versions[name]
)
env[f"{name}_version_date"] = new_versions[name].date
if new_versions[name] > current_versions[name]:
remote_sha256[name] = env[f"{name}_source_url"]
else:
for mod in manifest["modules"]:
if mod["name"] == name:
env[f"{name}_sha256"] = mod["sources"][0]["sha256"]
new_sha256 = asyncio.run(get_sha256_set(remote_sha256))
env.update(**new_sha256)
return env
def render_templates(template_dir, env):
"Render a .j2 templates using collected version information"
for path in template_dir.glob("*.j2"):
with path.open("r") as file_:
template = jinja2.Template(file_.read())
with path.with_name(path.stem).open("w") as file_:
file_.write(template.render(**env))
def parse_args():
"Parse command line arguments"
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-c", required=True, help="Configuration file")
parser.add_argument(
"--manifest", "-m", required=True, help="Current flatpak manifest"
)
parser.add_argument(
"--template-dir", "-t", help="Directory with .j2 files to render"
)
return parser.parse_args()
def main():
"Main logic"
args = parse_args()
with open(args.config) as file_:
config = yaml.load(file_, Loader=yaml.SafeLoader)
new_versions = asyncio.run(
get_latest_versions([config["runtime"]] + config["modules"])
)
manifest = load_manifest(Path(args.manifest))
current_versions = get_current_versions(manifest)
env = get_template_vars(config, current_versions, new_versions, manifest)
render_templates(Path(args.template_dir), env)
if __name__ == "__main__":
main()
| 30.138408 | 83 | 0.62721 |
955231e63fbff36ad8601f161f98440ad3a247cb | 1,746 | py | Python | base/env/test_processStrategy.py | stevenchen521/quant_ml | f7d5efc49c934724f97fcafacc560f4a35b24551 | [
"MIT"
] | 5 | 2019-02-14T03:12:22.000Z | 2022-01-24T18:43:07.000Z | base/env/test_processStrategy.py | stevenchen521/quant_ml | f7d5efc49c934724f97fcafacc560f4a35b24551 | [
"MIT"
] | null | null | null | base/env/test_processStrategy.py | stevenchen521/quant_ml | f7d5efc49c934724f97fcafacc560f4a35b24551 | [
"MIT"
] | 2 | 2019-11-13T18:56:13.000Z | 2021-12-31T01:25:22.000Z | from unittest import TestCase
import base.env.pre_process as pre_process
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from helper.util import get_attribute
from base.env.pre_process_conf import active_stragery, get_strategy_analyze
import base.env.pre_process
class TestProcessStrategy(TestCase):
def test_get_active_strategy(self):
self.action_fetch, self.action_pre_analyze, self.indicators, self.action_post_analyze, self._label = \
pre_process.get_active_strategy(strategy=None)
self.assertIsNotNone(self.action_fetch)
self.assertIsNotNone(self.action_pre_analyze)
self.assertIsNotNone(self.indicators)
self.assertIsNotNone(self.action_post_analyze)
def test_process(self):
self.test_get_active_strategy()
dates, pre_frames, origin_frames, post_frames = pre_process.ProcessStrategy(
# self.action_fetch, self.action_pre_analyze,self.indicators, self.action_post_analyze,
['SH_index'], "2008-01-01", "2019-02-01", MinMaxScaler(), active_stragery).process()
result = post_frames['nasdaq'].dropna()
self.assertIsInstance(result, pd.DataFrame)
self.assertNotEqual(result.values.size, 0)
def test_palyaround(self):
# action_post_analyze = get_attribute('.'.join([active_stragery.get('module'), 'PreAnalyzeDefault']))
# action_post_analyze.fire(None, None)
pre_process.PreAnalyzeDefault.fire(None, None)
# print(class_post_analyze.fire(None, None))
def test_get_strategy_analyze(self):
self.assertIsNotNone(get_strategy_analyze(get_attribute(active_stragery)))
| 38.8 | 143 | 0.708477 |
9552f2a3d627a440738e08b8175d69d9667e0003 | 12,194 | py | Python | resources/lib/themoviedb/tmdb.py | bopopescu/ServerStatus | a883598248ad6f5273eb3be498e3b04a1fab6510 | [
"MIT"
] | null | null | null | resources/lib/themoviedb/tmdb.py | bopopescu/ServerStatus | a883598248ad6f5273eb3be498e3b04a1fab6510 | [
"MIT"
] | 1 | 2015-04-21T22:05:02.000Z | 2015-04-22T22:27:15.000Z | resources/lib/themoviedb/tmdb.py | GetSomeBlocks/Score_Soccer | a883598248ad6f5273eb3be498e3b04a1fab6510 | [
"MIT"
] | 2 | 2015-09-29T16:31:43.000Z | 2020-07-26T03:41:10.000Z | #!/usr/bin/env python2.5
#encoding:utf-8
#author:dbr/Ben
#project:themoviedb
#forked by ccjensen/Chris
#http://github.com/ccjensen/themoviedb
"""An interface to the themoviedb.org API
"""
__author__ = "dbr/Ben"
__version__ = "0.2b"
config = {}
config['apikey'] = "a8b9f96dde091408a03cb4c78477bd14"
config['urls'] = {}
config['urls']['movie.search'] = "http://api.themoviedb.org/2.1/Movie.search/en/xml/%(apikey)s/%%s" % (config)
config['urls']['movie.getInfo'] = "http://api.themoviedb.org/2.1/Movie.getInfo/en/xml/%(apikey)s/%%s" % (config)
import urllib
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import elementtree.ElementTree as ElementTree
# collections.defaultdict
# originally contributed by Yoav Goldberg <yoav.goldberg@gmail.com>
# new version by Jason Kirtland from Python cookbook.
# <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/523034>
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory, copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory, dict.__repr__(self))
# [XX] to make pickle happy in python 2.4:
import collections
collections.defaultdict = defaultdict
class TmdBaseError(Exception): pass
class TmdHttpError(TmdBaseError): pass
class TmdXmlError(TmdBaseError): pass
class XmlHandler:
"""Deals with retrieval of XML files from API
"""
def __init__(self, url):
self.url = url
def _grabUrl(self, url):
try:
urlhandle = urllib.urlopen(url)
except IOError, errormsg:
raise TmdHttpError(errormsg)
return urlhandle.read()
def getEt(self):
xml = self._grabUrl(self.url)
try:
et = ElementTree.fromstring(xml)
except SyntaxError, errormsg:
raise TmdXmlError(errormsg)
return et
class recursivedefaultdict(defaultdict):
def __init__(self):
self.default_factory = type(self)
class SearchResults(list):
"""Stores a list of Movie's that matched the search
"""
def __repr__(self):
return "<Search results: %s>" % (list.__repr__(self))
class MovieResult(dict):
"""A dict containing the information about a specific search result
"""
def __repr__(self):
return "<MovieResult: %s (%s)>" % (self.get("name"), self.get("released"))
class Movie(dict):
"""A dict containing the information about the film
"""
def __repr__(self):
return "<MovieResult: %s (%s)>" % (self.get("name"), self.get("released"))
class Categories(recursivedefaultdict):
"""Stores category information
"""
def set(self, category_et):
"""Takes an elementtree Element ('category') and stores the url,
using the type and name as the dict key.
For example:
<category type="genre" url="http://themoviedb.org/encyclopedia/category/80" name="Crime"/>
..becomes:
categories['genre']['Crime'] = 'http://themoviedb.org/encyclopedia/category/80'
"""
_type = category_et.get("type")
name = category_et.get("name")
url = category_et.get("url")
self[_type][name] = url
class Studios(recursivedefaultdict):
"""Stores category information
"""
def set(self, studio_et):
"""Takes an elementtree Element ('studio') and stores the url,
using the name as the dict key.
For example:
<studio url="http://www.themoviedb.org/encyclopedia/company/20" name="Miramax Films"/>
..becomes:
studios['name'] = 'http://www.themoviedb.org/encyclopedia/company/20'
"""
name = studio_et.get("name")
url = studio_et.get("url")
self[name] = url
class Countries(recursivedefaultdict):
"""Stores country information
"""
def set(self, country_et):
"""Takes an elementtree Element ('country') and stores the url,
using the name and code as the dict key.
For example:
<country url="http://www.themoviedb.org/encyclopedia/country/223" name="United States of America" code="US"/>
..becomes:
countries['code']['name'] = 'http://www.themoviedb.org/encyclopedia/country/223'
"""
code = country_et.get("code")
name = country_et.get("name")
url = country_et.get("url")
self[code][name] = url
class Images(recursivedefaultdict):
"""Stores image information
"""
def set(self, image_et):
"""Takes an elementtree Element ('image') and stores the url,
using the type, id and size as the dict key.
For example:
<image type="poster" size="original" url="http://images.themoviedb.org/posters/4181/67926_sin-city-02-color_122_207lo.jpg" id="4181"/>
..becomes:
images['poster']['4181']['original'] = 'http://images.themoviedb.org/posters/4181/67926_sin-city-02-color_122_207lo.jpg'
"""
_type = image_et.get("type")
_id = image_et.get("id")
size = image_et.get("size")
url = image_et.get("url")
self[_type][_id][size] = url
def __repr__(self):
return "<%s with %s posters and %s backdrops>" % (
self.__class__.__name__,
len(self['poster'].keys()),
len(self['backdrop'].keys())
)
def largest(self, _type, _id):
"""Attempts to return largest image of a specific type and id
"""
if(isinstance(_id, int)):
_id = str(_id)
for cur_size in ["original", "mid", "cover", "thumb"]:
for size in self[_type][_id]:
if cur_size in size:
return self[_type][_id][cur_size]
class CrewRoleList(dict):
"""Stores a list of roles, such as director, actor etc
>>> import tmdb
>>> tmdb.getMovieInfo(550)['cast'].keys()[:5]
['casting', 'producer', 'author', 'sound editor', 'actor']
"""
pass
class CrewList(list):
"""Stores list of crew in specific role
>>> import tmdb
>>> tmdb.getMovieInfo(550)['cast']['author']
[<author (id 7468): Chuck Palahniuk>, <author (id 7469): Jim Uhls>]
"""
pass
class Person(dict):
"""Stores information about a specific member of cast
"""
def __init__(self, job, _id, name, character, url):
self['job'] = job
self['id'] = _id
self['name'] = name
self['character'] = character
self['url'] = url
def __repr__(self):
if self['character'] is None or self['character'] == "":
return "<%(job)s (id %(id)s): %(name)s>" % self
else:
return "<%(job)s (id %(id)s): %(name)s (as %(character)s)>" % self
class MovieDb:
"""Main interface to www.themoviedb.com
The search() method searches for the film by title.
The getMovieInfo() method retrieves information about a specific movie using themoviedb id.
"""
def _parseSearchResults(self, movie_element):
cur_movie = MovieResult()
cur_images = Images()
for item in movie_element.getchildren():
if item.tag.lower() == "images":
for subitem in item.getchildren():
cur_images.set(subitem)
else:
cur_movie[item.tag] = item.text
cur_movie['images'] = cur_images
return cur_movie
def _parseMovie(self, movie_element):
cur_movie = Movie()
cur_categories = Categories()
cur_studios = Studios()
cur_countries = Countries()
cur_images = Images()
cur_cast = CrewRoleList()
for item in movie_element.getchildren():
if item.tag.lower() == "categories":
for subitem in item.getchildren():
cur_categories.set(subitem)
elif item.tag.lower() == "studios":
for subitem in item.getchildren():
cur_studios.set(subitem)
elif item.tag.lower() == "countries":
for subitem in item.getchildren():
cur_countries.set(subitem)
elif item.tag.lower() == "images":
for subitem in item.getchildren():
cur_images.set(subitem)
elif item.tag.lower() == "cast":
for subitem in item.getchildren():
job = subitem.get("job").lower()
p = Person(
job = job,
_id = subitem.get("id"),
name = subitem.get("name"),
character = subitem.get("character"),
url = subitem.get("url")
)
cur_cast.setdefault(job, CrewList()).append(p)
else:
cur_movie[item.tag] = item.text
cur_movie['categories'] = cur_categories
cur_movie['studios'] = cur_studios
cur_movie['countries'] = cur_countries
cur_movie['images'] = cur_images
cur_movie['cast'] = cur_cast
return cur_movie
def search(self, title):
"""Searches for a film by its title.
Returns SearchResults (a list) containing all matches (Movie instances)
"""
title = urllib.quote(title.encode("utf-8"))
url = config['urls']['movie.search'] % (title)
etree = XmlHandler(url).getEt()
search_results = SearchResults()
for cur_result in etree.find("movies").findall("movie"):
cur_movie = self._parseSearchResults(cur_result)
search_results.append(cur_movie)
return search_results
def getMovieInfo(self, id):
"""Returns movie info by from its tmdb id.
Returns a Movie instance
"""
url = config['urls']['movie.getInfo'] % (id)
etree = XmlHandler(url).getEt()
return self._parseMovie(etree.find("movies").findall("movie")[0])
def search(name = None):
"""Convenience wrapper for MovieDb.search - so you can do..
>>> import tmdb
>>> tmdb.search("Fight Club")
<Search results: [<MovieResult: Fight Club (1999-09-16)>]>
"""
mdb = MovieDb()
return mdb.search(name)
def getMovieInfo(id = None):
"""Convenience wrapper for MovieDb.search - so you can do..
>>> import tmdb
>>> tmdb.getMovieInfo(187)
<MovieResult: Sin City (2005-04-01)>
"""
mdb = MovieDb()
return mdb.getMovieInfo(id)
def main():
results = search("Fight Club")
searchResult = results[0]
movie = getMovieInfo(searchResult['id'])
print movie['name']
print "Producers:"
for prodr in movie['cast']['Producer']:
print " " * 4, prodr['name']
print movie['images']
for genreName in movie['categories']['genre']:
print "%s (%s)" % (genreName, movie['categories']['genre'][genreName])
if __name__ == '__main__':
main() | 33.31694 | 142 | 0.586354 |
955351f42a772eb848c0ae2b75d5d28ba1ff2a00 | 3,033 | py | Python | mir3/lib/knn.py | pymir3/pymir3 | c1bcca66a5ef1ff0ebd6373e3820e72dee6b0b70 | [
"MIT"
] | 12 | 2015-08-03T12:41:11.000Z | 2020-08-18T07:55:23.000Z | mir3/lib/knn.py | pymir3/pymir3 | c1bcca66a5ef1ff0ebd6373e3820e72dee6b0b70 | [
"MIT"
] | 1 | 2015-05-27T18:47:20.000Z | 2015-05-27T18:47:20.000Z | mir3/lib/knn.py | pymir3/pymir3 | c1bcca66a5ef1ff0ebd6373e3820e72dee6b0b70 | [
"MIT"
] | 3 | 2016-03-18T03:30:02.000Z | 2018-07-05T02:29:16.000Z | import numpy
import numpy.linalg
def distance_sum(inputs, references):
"""Sum of all distances between inputs and references
Each element should be in a row!
"""
norms = numpy.zeros(inputs.shape[0])
for i in xrange(references.shape[0]):
norms += numpy.apply_along_axis(numpy.linalg.norm, 1,
inputs-references[i,:])
return norms
def distance_min(inputs, references):
"""Minimum distances between inputs and any reference
Each element should be in a row!
"""
norms = numpy.ones(inputs.shape[0])*99999999
for i in xrange(references.shape[0]):
norms = numpy.minimum(norms,
numpy.apply_along_axis(numpy.linalg.norm, 1,
inputs-references[i,:]))
return norms
def distance_matrix(inputs):
"""Returns a distance matrix
"""
D = numpy.ones( (inputs.shape[0], inputs.shape[0]) )*99999999
for i in xrange(inputs.shape[0]):
for j in xrange(i):
D[i,j] = numpy.linalg.norm(inputs[i,:]-inputs[j,:])
D[j,i] = numpy.linalg.norm(inputs[i,:]-inputs[j,:])
return D
def distance_mutual_min(inputs, references):
"""Distance using a mutual distance reference
Inspired in:
USING MUTUAL PROXIMITY TO IMPROVE CONTENT-BASED AUDIO SIMILARITY
Dominik Schnitzer, Arthur Flexer, Markus Sched, Gerhard Widmer
"""
d = distance_matrix(inputs)
a = distance_min(inputs, references)
for i in xrange(len(a)):
a[i] = a[i] - numpy.min(d[:,i])
return a
def range_distance(inputs, references):
"""Minimum distance from boundaries of a rang
"""
mi = numpy.amin(references, 0)
ma = numpy.amax(references, 0)
norms = numpy.zeros(inputs.shape[0])
for i in xrange(inputs.shape[0]):
for j in xrange(inputs.shape[1]):
if (inputs[i,j] < mi[j]) or \
(inputs[i,j] > ma[j]):
norms[i] += numpy.min([abs(inputs[i,j]-mi[j]),\
abs(inputs[i,j]-ma[j])])**2
norms[i] = norms[i]**(0.5)
return norms
def mutual_range_distance(inputs, references):
"""Minimum distance from boundaries of a range
"""
mi = numpy.amin(references, 0)
ma = numpy.amax(references, 0)
norms = numpy.zeros(inputs.shape[0])
d = distance_matrix(inputs)
for i in xrange(inputs.shape[0]):
for j in xrange(inputs.shape[1]):
if (inputs[i,j] < mi[j]) or \
(inputs[i,j] > ma[j]):
norms[i] += numpy.min([abs(inputs[i,j]-mi[j]),\
abs(inputs[i,j]-ma[j])])**2
norms[i] = norms[i]**(0.5)
norms[i] = norms[i] - numpy.min(d[:,i])
return norms
#a = numpy.array([[2, 4, 6], [4, 3, 2], [5, -2, -1], [10, 11, 12], [15, 20, 31]])
#b = numpy.array([[10, 11, 12], [-1, -2, -3]])
#print distance_sum(a, b)
#print a
#print b
#print distance_min(a, b)
#print distance_mutual_min(a, b)
| 31.59375 | 81 | 0.567755 |
9553b680206d84c135ef2d6c3b9397a51e5c12a9 | 17,984 | py | Python | pysnmp/CABH-QOS2-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CABH-QOS2-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CABH-QOS2-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CABH-QOS2-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CABH-QOS2-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:26:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
clabProjCableHome, = mibBuilder.importSymbols("CLAB-DEF-MIB", "clabProjCableHome")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
InetAddress, InetAddressType, InetPortNumber = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType", "InetPortNumber")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
MibIdentifier, iso, Bits, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, Integer32, Counter32, Gauge32, Unsigned32, ObjectIdentity, ModuleIdentity, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "iso", "Bits", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "Integer32", "Counter32", "Gauge32", "Unsigned32", "ObjectIdentity", "ModuleIdentity", "IpAddress")
RowStatus, TextualConvention, DisplayString, TimeStamp, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString", "TimeStamp", "TruthValue")
cabhQos2Mib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8))
cabhQos2Mib.setRevisions(('2005-04-08 00:00',))
if mibBuilder.loadTexts: cabhQos2Mib.setLastUpdated('200504080000Z')
if mibBuilder.loadTexts: cabhQos2Mib.setOrganization('CableLabs Broadband Access Department')
cabhQos2Mib2Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 0))
cabhQos2MibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1))
cabhQos2Base = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 1))
cabhQos2PsIfAttributes = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 2))
cabhQos2PolicyHolderObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3))
cabhQos2DeviceObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4))
cabhQos2SetToFactory = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cabhQos2SetToFactory.setStatus('current')
cabhQos2LastSetToFactory = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 1, 2), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cabhQos2LastSetToFactory.setStatus('current')
cabhQos2PsIfAttribTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 2, 1), )
if mibBuilder.loadTexts: cabhQos2PsIfAttribTable.setStatus('current')
cabhQos2PsIfAttribEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cabhQos2PsIfAttribEntry.setStatus('current')
cabhQos2PsIfAttribNumPriorities = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 2, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cabhQos2PsIfAttribNumPriorities.setStatus('current')
cabhQos2PsIfAttribNumQueues = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 2, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cabhQos2PsIfAttribNumQueues.setStatus('current')
cabhQos2PolicyHolderEnabled = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cabhQos2PolicyHolderEnabled.setStatus('current')
cabhQos2PolicyAdmissionControl = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cabhQos2PolicyAdmissionControl.setStatus('current')
cabhQos2NumActivePolicyHolder = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cabhQos2NumActivePolicyHolder.setStatus('current')
cabhQos2PolicyTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4), )
if mibBuilder.loadTexts: cabhQos2PolicyTable.setStatus('current')
cabhQos2PolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1), ).setIndexNames((0, "CABH-QOS2-MIB", "cabhQos2PolicyOwner"), (0, "CABH-QOS2-MIB", "cabhQos2PolicyOwnerRuleId"))
if mibBuilder.loadTexts: cabhQos2PolicyEntry.setStatus('current')
cabhQos2PolicyOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("operatorOnly", 1), ("homeUser", 2), ("operatorForHomeUser", 3), ("upnp", 4))))
if mibBuilder.loadTexts: cabhQos2PolicyOwner.setStatus('current')
cabhQos2PolicyOwnerRuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cabhQos2PolicyOwnerRuleId.setStatus('current')
cabhQos2PolicyRuleOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyRuleOrder.setStatus('current')
cabhQos2PolicyAppDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyAppDomain.setStatus('current')
cabhQos2PolicyAppName = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 5), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyAppName.setStatus('current')
cabhQos2PolicyServiceProvDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyServiceProvDomain.setStatus('current')
cabhQos2PolicyServiceName = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyServiceName.setStatus('current')
cabhQos2PolicyPortDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 8), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyPortDomain.setStatus('current')
cabhQos2PolicyPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 9), InetPortNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyPortNumber.setStatus('current')
cabhQos2PolicyIpType = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 10), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyIpType.setStatus('current')
cabhQos2PolicyIpProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyIpProtocol.setStatus('current')
cabhQos2PolicySrcIp = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 12), InetAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicySrcIp.setStatus('current')
cabhQos2PolicyDestIp = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 13), InetAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyDestIp.setStatus('current')
cabhQos2PolicySrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 14), InetPortNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicySrcPort.setStatus('current')
cabhQos2PolicyDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 15), InetPortNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyDestPort.setStatus('current')
cabhQos2PolicyTraffImpNum = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyTraffImpNum.setStatus('current')
cabhQos2PolicyUserImportance = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyUserImportance.setStatus('current')
cabhQos2PolicyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 3, 4, 1, 18), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2PolicyRowStatus.setStatus('current')
cabhQos2TrafficClassTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1), )
if mibBuilder.loadTexts: cabhQos2TrafficClassTable.setStatus('current')
cabhQos2TrafficClassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1), ).setIndexNames((0, "CABH-QOS2-MIB", "cabhQos2TrafficClassMethod"), (0, "CABH-QOS2-MIB", "cabhQos2TrafficClassIdx"))
if mibBuilder.loadTexts: cabhQos2TrafficClassEntry.setStatus('current')
cabhQos2TrafficClassMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("static", 1), ("upnp", 2))))
if mibBuilder.loadTexts: cabhQos2TrafficClassMethod.setStatus('current')
cabhQos2TrafficClassIdx = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cabhQos2TrafficClassIdx.setStatus('current')
cabhQos2TrafficClassProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 256))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassProtocol.setStatus('current')
cabhQos2TrafficClassIpType = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 4), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassIpType.setStatus('current')
cabhQos2TrafficClassSrcIp = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 5), InetAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassSrcIp.setStatus('current')
cabhQos2TrafficClassDestIp = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 6), InetAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassDestIp.setStatus('current')
cabhQos2TrafficClassSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 7), InetPortNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassSrcPort.setStatus('current')
cabhQos2TrafficClassDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 8), InetPortNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassDestPort.setStatus('current')
cabhQos2TrafficClassImpNum = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassImpNum.setStatus('current')
cabhQos2TrafficClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 1, 4, 1, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cabhQos2TrafficClassRowStatus.setStatus('current')
cabhQos2Conformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 2))
cabhQos2Compliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 2, 1))
cabhQos2Groups = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 2, 2))
cabhQos2Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 2, 1, 1)).setObjects(("CABH-QOS2-MIB", "cabhQos2Group"), ("CABH-QOS2-MIB", "cabhQos2ClassifierGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cabhQos2Compliance = cabhQos2Compliance.setStatus('current')
cabhQos2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 2, 2, 1)).setObjects(("CABH-QOS2-MIB", "cabhQos2SetToFactory"), ("CABH-QOS2-MIB", "cabhQos2LastSetToFactory"), ("CABH-QOS2-MIB", "cabhQos2PsIfAttribNumPriorities"), ("CABH-QOS2-MIB", "cabhQos2PsIfAttribNumQueues"), ("CABH-QOS2-MIB", "cabhQos2PolicyHolderEnabled"), ("CABH-QOS2-MIB", "cabhQos2PolicyAdmissionControl"), ("CABH-QOS2-MIB", "cabhQos2NumActivePolicyHolder"), ("CABH-QOS2-MIB", "cabhQos2PolicyRuleOrder"), ("CABH-QOS2-MIB", "cabhQos2PolicyAppDomain"), ("CABH-QOS2-MIB", "cabhQos2PolicyAppName"), ("CABH-QOS2-MIB", "cabhQos2PolicyServiceProvDomain"), ("CABH-QOS2-MIB", "cabhQos2PolicyServiceName"), ("CABH-QOS2-MIB", "cabhQos2PolicyPortDomain"), ("CABH-QOS2-MIB", "cabhQos2PolicyPortNumber"), ("CABH-QOS2-MIB", "cabhQos2PolicyIpProtocol"), ("CABH-QOS2-MIB", "cabhQos2PolicyIpType"), ("CABH-QOS2-MIB", "cabhQos2PolicySrcIp"), ("CABH-QOS2-MIB", "cabhQos2PolicyDestIp"), ("CABH-QOS2-MIB", "cabhQos2PolicySrcPort"), ("CABH-QOS2-MIB", "cabhQos2PolicyDestPort"), ("CABH-QOS2-MIB", "cabhQos2PolicyTraffImpNum"), ("CABH-QOS2-MIB", "cabhQos2PolicyUserImportance"), ("CABH-QOS2-MIB", "cabhQos2PolicyRowStatus"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassProtocol"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassIpType"), ("CABH-QOS2-MIB", "cabhQos2PolicySrcIp"), ("CABH-QOS2-MIB", "cabhQos2PolicyDestIp"), ("CABH-QOS2-MIB", "cabhQos2PolicySrcPort"), ("CABH-QOS2-MIB", "cabhQos2PolicyDestPort"), ("CABH-QOS2-MIB", "cabhQos2PolicyTraffImpNum"), ("CABH-QOS2-MIB", "cabhQos2PolicyUserImportance"), ("CABH-QOS2-MIB", "cabhQos2PolicyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cabhQos2Group = cabhQos2Group.setStatus('current')
cabhQos2ClassifierGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 4, 8, 2, 2, 2)).setObjects(("CABH-QOS2-MIB", "cabhQos2TrafficClassProtocol"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassIpType"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassSrcIp"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassDestIp"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassSrcPort"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassDestPort"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassImpNum"), ("CABH-QOS2-MIB", "cabhQos2TrafficClassRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cabhQos2ClassifierGroup = cabhQos2ClassifierGroup.setStatus('current')
mibBuilder.exportSymbols("CABH-QOS2-MIB", cabhQos2PolicyAppName=cabhQos2PolicyAppName, cabhQos2PolicyEntry=cabhQos2PolicyEntry, cabhQos2TrafficClassProtocol=cabhQos2TrafficClassProtocol, cabhQos2TrafficClassRowStatus=cabhQos2TrafficClassRowStatus, cabhQos2SetToFactory=cabhQos2SetToFactory, cabhQos2Mib2Notifications=cabhQos2Mib2Notifications, cabhQos2PsIfAttribEntry=cabhQos2PsIfAttribEntry, cabhQos2PsIfAttribNumQueues=cabhQos2PsIfAttribNumQueues, cabhQos2PolicyHolderEnabled=cabhQos2PolicyHolderEnabled, cabhQos2PolicyIpProtocol=cabhQos2PolicyIpProtocol, PYSNMP_MODULE_ID=cabhQos2Mib, cabhQos2DeviceObjects=cabhQos2DeviceObjects, cabhQos2PolicyPortNumber=cabhQos2PolicyPortNumber, cabhQos2PolicyOwnerRuleId=cabhQos2PolicyOwnerRuleId, cabhQos2PsIfAttribTable=cabhQos2PsIfAttribTable, cabhQos2LastSetToFactory=cabhQos2LastSetToFactory, cabhQos2MibObjects=cabhQos2MibObjects, cabhQos2PsIfAttributes=cabhQos2PsIfAttributes, cabhQos2PolicyDestIp=cabhQos2PolicyDestIp, cabhQos2PolicyDestPort=cabhQos2PolicyDestPort, cabhQos2Compliances=cabhQos2Compliances, cabhQos2TrafficClassSrcPort=cabhQos2TrafficClassSrcPort, cabhQos2PolicyTraffImpNum=cabhQos2PolicyTraffImpNum, cabhQos2Conformance=cabhQos2Conformance, cabhQos2ClassifierGroup=cabhQos2ClassifierGroup, cabhQos2TrafficClassDestIp=cabhQos2TrafficClassDestIp, cabhQos2TrafficClassDestPort=cabhQos2TrafficClassDestPort, cabhQos2PolicyRowStatus=cabhQos2PolicyRowStatus, cabhQos2PolicySrcIp=cabhQos2PolicySrcIp, cabhQos2TrafficClassSrcIp=cabhQos2TrafficClassSrcIp, cabhQos2TrafficClassMethod=cabhQos2TrafficClassMethod, cabhQos2PolicySrcPort=cabhQos2PolicySrcPort, cabhQos2PolicyServiceName=cabhQos2PolicyServiceName, cabhQos2NumActivePolicyHolder=cabhQos2NumActivePolicyHolder, cabhQos2PolicyUserImportance=cabhQos2PolicyUserImportance, cabhQos2Compliance=cabhQos2Compliance, cabhQos2PsIfAttribNumPriorities=cabhQos2PsIfAttribNumPriorities, cabhQos2TrafficClassImpNum=cabhQos2TrafficClassImpNum, cabhQos2PolicyAdmissionControl=cabhQos2PolicyAdmissionControl, cabhQos2PolicyRuleOrder=cabhQos2PolicyRuleOrder, cabhQos2PolicyServiceProvDomain=cabhQos2PolicyServiceProvDomain, cabhQos2PolicyOwner=cabhQos2PolicyOwner, cabhQos2Groups=cabhQos2Groups, cabhQos2PolicyTable=cabhQos2PolicyTable, cabhQos2PolicyAppDomain=cabhQos2PolicyAppDomain, cabhQos2PolicyIpType=cabhQos2PolicyIpType, cabhQos2TrafficClassTable=cabhQos2TrafficClassTable, cabhQos2PolicyPortDomain=cabhQos2PolicyPortDomain, cabhQos2Mib=cabhQos2Mib, cabhQos2TrafficClassEntry=cabhQos2TrafficClassEntry, cabhQos2Group=cabhQos2Group, cabhQos2PolicyHolderObjects=cabhQos2PolicyHolderObjects, cabhQos2TrafficClassIpType=cabhQos2TrafficClassIpType, cabhQos2Base=cabhQos2Base, cabhQos2TrafficClassIdx=cabhQos2TrafficClassIdx)
| 145.032258 | 2,723 | 0.769462 |
9554b36b01a4be87039a97f47f4d8ef14a97ffe2 | 2,440 | py | Python | nasafree/views.py | luanmalaquias/projeto_api_nasa_django | 3441c404da821b4177571814014e89b0cff6a6b7 | [
"MIT"
] | null | null | null | nasafree/views.py | luanmalaquias/projeto_api_nasa_django | 3441c404da821b4177571814014e89b0cff6a6b7 | [
"MIT"
] | null | null | null | nasafree/views.py | luanmalaquias/projeto_api_nasa_django | 3441c404da821b4177571814014e89b0cff6a6b7 | [
"MIT"
] | null | null | null | from django.shortcuts import redirect, render
from .models import *
from datetime import date
from brain import ipfunc
def home(request):
dados = Apod.getObjectOrRequest()
buscaData = request.POST.get('data')
if request.method == 'POST':
dados = Apod.getObjectOrRequest(str(buscaData))
return render(request, 'views/apod.html', {'dados': dados[0]})
def marsWeather(request):
return render(request, 'views/mars-weather.html')
def mrp(request):
return render(request, 'views/mrp.html')
def mrpspirit(request):
dados = MRP.objects.filter(sol = 1, rover_name__icontains = "spirit").order_by('id')
buscaSol = request.POST.get('sol')
if request.method == 'POST' and buscaSol:
dados = MRP.getObjectOrRequest(buscaSol, "spirit")
context = {'dados': dados, 'dias': 609}
return render(request, 'views/mrpview.html', context)
def mrpopportunity(request):
dados = MRP.objects.filter(sol = 1, rover_name__icontains = "opportunity").order_by('id')
buscaSol = request.POST.get('sol')
if request.method == 'POST' and buscaSol:
dados = MRP.getObjectOrRequest(buscaSol, "opportunity")
context = {'dados': dados, 'dias': 5100}
return render(request, 'views/mrpview.html', context)
def mrpcuriosity(request):
dias = date.today() - date(2012, 8, 6)
dados = MRP.objects.filter(sol = 1, rover_name__icontains = "curiosity").order_by('id')
buscaSol = request.POST.get('sol')
if request.method == 'POST' and buscaSol:
dados = MRP.getObjectOrRequest(buscaSol, "curiosity")
context = {'dias': dias.days - 90, 'dados': dados}
return render(request, 'views/mrpview.html', context)
def trekImagery(request):
return render(request, 'views/trek-imagery.html')
def nasaSearch(request):
dados = None
pesquisa = request.GET.get('q')
if pesquisa:
dados = getRequestNasaSearch(pesquisa)
if request.method == 'POST':
index = int(request.POST.get('index'))
contextContent = getContextNasaSearch(dados, index)
return render(request, 'views/search-content.html', contextContent)
context = {'pesquisa': pesquisa, 'dados': dados}
return render(request, 'views/search.html', context)
def crewMembers(request):
context = getCrew()
return render(request, 'views/crewmembers.html', context)
def handlerErrorPage(request, exception):
return render(request, 'views/404.html') | 34.366197 | 93 | 0.685246 |
9554dabbb9a81e2fbde331f2e40edcaa0f221585 | 805 | py | Python | bslparloursite/videolibrary/models.py | natfarleydev/thebslparlour | ebb2588282cdb2a977ec6c5f8d82cec4e8fd1f99 | [
"CC0-1.0"
] | 1 | 2016-01-06T23:13:11.000Z | 2016-01-06T23:13:11.000Z | bslparloursite/videolibrary/models.py | natfarleydev/thebslparlour | ebb2588282cdb2a977ec6c5f8d82cec4e8fd1f99 | [
"CC0-1.0"
] | 4 | 2021-03-18T20:15:04.000Z | 2021-06-10T17:52:31.000Z | bslparloursite/videolibrary/models.py | natfarleydev/thebslparlour | ebb2588282cdb2a977ec6c5f8d82cec4e8fd1f99 | [
"CC0-1.0"
] | null | null | null | from django.db import models
from django.utils import timezone
from sizefield.models import FileSizeField
# Create your models here.
class Video(models.Model):
sha224 = models.CharField(max_length=56, unique=True)
filename = models.CharField(max_length=200)
dropbox_directory = models.CharField(max_length=200)
mime_type = models.CharField(max_length=200)
date_added = models.DateTimeField(default=timezone.now, editable=False)
size = FileSizeField()
class Meta:
abstract = True
def __str__(self):
return self.filename or self.sha224_id
class SourceVideo(Video):
vimeo_uri = models.IntegerField()
youtube_id = models.CharField(max_length=30, blank=True)
def __str__(self):
return self.filename+" ("+str(self.vimeo_uri)+")"
| 29.814815 | 75 | 0.720497 |
9554ef14a15f7437ba6f8f9a2cf1620b9d8dfb4c | 1,681 | py | Python | location.py | TED-996/Nightshift | 3cc76af96c8e85e913be8c2f8f70564ea9d9f95d | [
"MIT"
] | null | null | null | location.py | TED-996/Nightshift | 3cc76af96c8e85e913be8c2f8f70564ea9d9f95d | [
"MIT"
] | null | null | null | location.py | TED-996/Nightshift | 3cc76af96c8e85e913be8c2f8f70564ea9d9f95d | [
"MIT"
] | null | null | null | import os.path
import json
from astral import Astral
appdata_folder = os.path.join(os.environ["LOCALAPPDATA"], "Nightshift")
def set_location(latitude, longitude):
print "Setting location to {0}, {1}".format(latitude, longitude)
try:
if not os.path.exists(appdata_folder):
os.mkdir(appdata_folder)
file_obj = open(os.path.join(appdata_folder, "location.json"), "w")
json.dump({"longitude": longitude,
"latitude": latitude},
file_obj)
file_obj.close()
return True
except:
print "Could not save the location and sunrise/sunset."
raise
def set_location_city(city):
print "Trying to set location to", city
astral_obj = Astral()
try:
city_data = astral_obj[city]
except KeyError:
print "Sorry, but this city does not exist in the city database."
print "City names are capitalized and in English (e.g. Rome)"
return False
set_location(city_data.latitude, city_data.longitude)
return True
def get_location():
print "Getting saved location."
try:
file_obj = open(os.path.join(appdata_folder, "location.json"), "r")
result = json.load(file_obj)
file_obj.close()
return {"longitude": result["longitude"],
"latitude": result["latitude"]}
except IOError:
print "Could not read from location file."
print "Try setting your location with"
print "Nightshift.exe -s latitude longitude"
print "or"
print "Nightshift.exe -s city"
raise
except:
print "Could not get saved location."
raise
| 28.982759 | 75 | 0.625223 |
95583195ca817a2531ead6462fb4ef3915b9a847 | 12,140 | py | Python | src/awkward1/operations/reducers.py | martindurant/awkward-1.0 | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | [
"BSD-3-Clause"
] | null | null | null | src/awkward1/operations/reducers.py | martindurant/awkward-1.0 | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | [
"BSD-3-Clause"
] | null | null | null | src/awkward1/operations/reducers.py | martindurant/awkward-1.0 | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import numpy
import awkward1._util
import awkward1._connect._numpy
import awkward1.layout
import awkward1.operations.convert
def count(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] + reduce(xs[1:])
return reduce([numpy.size(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.count(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.count_nonzero)
def count_nonzero(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] + reduce(xs[1:])
return reduce([numpy.count_nonzero(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.count_nonzero(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.sum)
def sum(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] + reduce(xs[1:])
return reduce([numpy.sum(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.sum(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.prod)
def prod(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] * reduce(xs[1:])
return reduce([numpy.prod(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.prod(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.any)
def any(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] or reduce(xs[1:])
return reduce([numpy.any(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.any(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.all)
def all(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] and reduce(xs[1:])
return reduce([numpy.all(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.all(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.min)
def min(array, axis=None, keepdims=False, maskidentity=True):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 0:
return None
elif len(xs) == 1:
return xs[0]
else:
x, y = xs[0], reduce(xs[1:])
return x if x < y else y
tmp = awkward1._util.completely_flatten(layout)
return reduce([numpy.min(x) for x in tmp if len(x) > 0])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.min(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.max)
def max(array, axis=None, keepdims=False, maskidentity=True):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 0:
return None
elif len(xs) == 1:
return xs[0]
else:
x, y = xs[0], reduce(xs[1:])
return x if x > y else y
tmp = awkward1._util.completely_flatten(layout)
return reduce([numpy.max(x) for x in tmp if len(x) > 0])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.max(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
### The following are not strictly reducers, but are defined in terms of reducers and ufuncs.
def moment(x, n, weight=None, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
if weight is None:
sumw = count(x, axis=axis, keepdims=keepdims)
sumwxn = sum(x**n, axis=axis, keepdims=keepdims)
else:
sumw = sum(x*0 + weight, axis=axis, keepdims=keepdims)
sumwxn = sum((x*weight)**n, axis=axis, keepdims=keepdims)
return numpy.true_divide(sumwxn, sumw)
@awkward1._connect._numpy.implements(numpy.mean)
def mean(x, weight=None, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
if weight is None:
sumw = count(x, axis=axis, keepdims=keepdims)
sumwx = sum(x, axis=axis, keepdims=keepdims)
else:
sumw = sum(x*0 + weight, axis=axis, keepdims=keepdims)
sumwx = sum(x*weight, axis=axis, keepdims=keepdims)
return numpy.true_divide(sumwx, sumw)
@awkward1._connect._numpy.implements(numpy.var)
def var(x, weight=None, ddof=0, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
xmean = mean(x, weight=weight, axis=axis, keepdims=keepdims)
if weight is None:
sumw = count(x, axis=axis, keepdims=keepdims)
sumwxx = sum((x - xmean)**2, axis=axis, keepdims=keepdims)
else:
sumw = sum(x*0 + weight, axis=axis, keepdims=keepdims)
sumwxx = sum((x - xmean)**2 * weight, axis=axis, keepdims=keepdims)
if ddof != 0:
return numpy.true_divide(sumwxx, sumw) * numpy.true_divide(sumw, sumw - ddof)
else:
return numpy.true_divide(sumwxx, sumw)
@awkward1._connect._numpy.implements(numpy.std)
def std(x, weight=None, ddof=0, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
return numpy.sqrt(var(x, weight=weight, ddof=ddof, axis=axis, keepdims=keepdims))
def covar(x, y, weight=None, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
xmean = mean(x, weight=weight, axis=axis, keepdims=keepdims)
ymean = mean(y, weight=weight, axis=axis, keepdims=keepdims)
if weight is None:
sumw = count(x, axis=axis, keepdims=keepdims)
sumwxy = sum((x - xmean)*(y - ymean), axis=axis, keepdims=keepdims)
else:
sumw = sum(x*0 + weight, axis=axis, keepdims=keepdims)
sumwxy = sum((x - xmean)*(y - ymean)*weight, axis=axis, keepdims=keepdims)
return numpy.true_divide(sumwxy, sumw)
def corr(x, y, weight=None, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
xmean = mean(x, weight=weight, axis=axis, keepdims=keepdims)
ymean = mean(y, weight=weight, axis=axis, keepdims=keepdims)
xdiff = x - xmean
ydiff = y - ymean
if weight is None:
sumwxx = sum(xdiff**2, axis=axis, keepdims=keepdims)
sumwyy = sum(ydiff**2, axis=axis, keepdims=keepdims)
sumwxy = sum(xdiff*ydiff, axis=axis, keepdims=keepdims)
else:
sumwxx = sum((xdiff**2)*weight, axis=axis, keepdims=keepdims)
sumwyy = sum((ydiff**2)*weight, axis=axis, keepdims=keepdims)
sumwxy = sum((xdiff*ydiff)*weight, axis=axis, keepdims=keepdims)
return numpy.true_divide(sumwxy, numpy.sqrt(sumwxx * sumwyy))
def linearfit(x, y, weight=None, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
if weight is None:
sumw = count(x, axis=axis, keepdims=keepdims)
sumwx = sum(x, axis=axis, keepdims=keepdims)
sumwy = sum(y, axis=axis, keepdims=keepdims)
sumwxx = sum(x**2, axis=axis, keepdims=keepdims)
sumwxy = sum(x*y, axis=axis, keepdims=keepdims)
else:
sumw = sum(x*0 + weight, axis=axis, keepdims=keepdims)
sumwx = sum(x*weight, axis=axis, keepdims=keepdims)
sumwy = sum(y*weight, axis=axis, keepdims=keepdims)
sumwxx = sum((x**2)*weight, axis=axis, keepdims=keepdims)
sumwxy = sum(x*y*weight, axis=axis, keepdims=keepdims)
delta = (sumw*sumwxx) - (sumwx*sumwx)
intercept = numpy.true_divide(((sumwxx*sumwy) - (sumwx*sumwxy)), delta)
slope = numpy.true_divide(((sumw*sumwxy) - (sumwx*sumwy)), delta)
intercept_error = numpy.sqrt(numpy.true_divide(sumwxx, delta))
slope_error = numpy.sqrt(numpy.true_divide(sumw, delta))
intercept = awkward1.operations.convert.tolayout(intercept, allowrecord=True, allowother=True)
slope = awkward1.operations.convert.tolayout(slope, allowrecord=True, allowother=True)
intercept_error = awkward1.operations.convert.tolayout(intercept_error, allowrecord=True, allowother=True)
slope_error = awkward1.operations.convert.tolayout(slope_error, allowrecord=True, allowother=True)
scalar = not isinstance(intercept, awkward1.layout.Content) and not isinstance(slope, awkward1.layout.Content) and not isinstance(intercept_error, awkward1.layout.Content) and not isinstance(slope_error, awkward1.layout.Content)
if not isinstance(intercept, (awkward1.layout.Content, awkward1.layout.Record)):
intercept = awkward1.layout.NumpyArray(numpy.array([intercept]))
if not isinstance(slope, (awkward1.layout.Content, awkward1.layout.Record)):
slope = awkward1.layout.NumpyArray(numpy.array([slope]))
if not isinstance(intercept_error, (awkward1.layout.Content, awkward1.layout.Record)):
intercept_error = awkward1.layout.NumpyArray(numpy.array([intercept_error]))
if not isinstance(slope_error, (awkward1.layout.Content, awkward1.layout.Record)):
slope_error = awkward1.layout.NumpyArray(numpy.array([slope_error]))
out = awkward1.layout.RecordArray([intercept, slope, intercept_error, slope_error], ["intercept", "slope", "intercept_error", "slope_error"])
out.setparameter("__record__", "LinearFit")
if scalar:
out = out[0]
return awkward1._util.wrap(out, awkward1._util.behaviorof(x, y))
def softmax(x, axis=None, keepdims=False):
with numpy.errstate(invalid="ignore"):
expx = numpy.exp(x)
denom = sum(expx, axis=axis, keepdims=keepdims)
return numpy.true_divide(expx, denom)
__all__ = [x for x in list(globals()) if not x.startswith("_") and x not in ("collections", "numpy", "awkward1")]
| 48.174603 | 236 | 0.647117 |
9558fc73a95bcd6653e042be3bc2a2e8ae6c004c | 4,758 | py | Python | dotenv.py | ross-urban/django-dotenv | 16cbf7bb78571174bc5376c95c85b213857cb9f9 | [
"MIT"
] | null | null | null | dotenv.py | ross-urban/django-dotenv | 16cbf7bb78571174bc5376c95c85b213857cb9f9 | [
"MIT"
] | null | null | null | dotenv.py | ross-urban/django-dotenv | 16cbf7bb78571174bc5376c95c85b213857cb9f9 | [
"MIT"
] | 1 | 2021-02-16T15:37:18.000Z | 2021-02-16T15:37:18.000Z | import os
import re
import sys
import warnings
__version__ = '1.4.3'
line_re = re.compile(r"""
^
(?:export\s+)? # optional export
([\w\.]+) # key
(?:\s*=\s*|:\s+?) # separator
( # optional value begin
'(?:\'|[^'])*' # single quoted value
| # or
"(?:\"|[^"])*" # double quoted value
| # or
[^#\n]+ # unquoted value
)? # value end
(?:\s*\#.*)? # optional comment
$
""", re.VERBOSE)
variable_re = re.compile(r"""
(\\)? # is it escaped with a backslash?
(\$) # literal $
( # collect braces with var for sub
\{? # allow brace wrapping
([A-Z0-9_]+) # match the variable
\}? # closing brace
) # braces end
""", re.IGNORECASE | re.VERBOSE)
overrides = ('source_env', 'source_up')
def read_dotenv(dotenv=None, override=False):
"""
Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
If tests rely on .env files, setting the overwrite flag to True is a safe
way to ensure tests run consistently across all environments.
:param override: True if values in .env should override system variables.
"""
if dotenv is None:
frame_filename = sys._getframe().f_back.f_code.co_filename
dotenv = os.path.join(os.path.dirname(frame_filename), '.env')
if os.path.isdir(dotenv) and os.path.isfile(os.path.join(dotenv, '.env')):
dotenv = os.path.join(dotenv, '.env')
if os.path.exists(dotenv):
with open(dotenv) as f:
env = parse_dotenv(f.read())
for k, v in env.items():
if k in overrides:
continue
if override:
os.environ[k] = v
else:
os.environ.setdefault(k, v)
for k, v in env.items():
if k not in overrides:
continue
for fname in v:
read_dotenv(fname, override)
else:
warnings.warn("Not reading {0} - it doesn't exist.".format(dotenv),
stacklevel=2)
def parse_dotenv(content):
env = {}
def replace(variable):
"""Substitute variables in a value either from `os.environ` or
from previously declared variable that is still in our `env`"""
for parts in variable_re.findall(variable):
if parts[0] == '\\':
# Variable is escaped, don't replace it
replaced = ''.join(parts[1:-1])
else:
# Replace it with the value from the environment
replacement = os.environ.get(parts[-1])
if not replacement:
replacement = env.get(parts[-1], '')
replaced = env.get(parts[-1], replacement)
variable = variable.replace(''.join(parts[0:-1]), replaced)
return variable
for line in content.splitlines():
m1 = line_re.search(line)
if m1:
key, value = m1.groups()
if value is None:
value = ''
# Remove leading/trailing whitespace
value = value.strip()
# Remove surrounding quotes
m2 = re.match(r'^([\'"])(.*)\1$', value)
if m2:
quotemark, value = m2.groups()
else:
quotemark = None
# Unescape all chars except $ so variables can be escaped properly
if quotemark == '"':
value = re.sub(r'\\([^$])', r'\1', value)
if quotemark != "'":
value = replace(value)
env[key] = value
elif not re.search(r'^\s*(?:#.*)?$', line): # not comment or blank
fname = None
for prefix in overrides:
if prefix not in line:
continue
fname = line.split(prefix)[-1].strip()
fname = replace(fname)
if fname.startswith('~'):
fname = os.path.expanduser(fname)
exists = env.get(prefix)
if not exists:
exists = [fname, ]
else:
exists.append(fname)
env[prefix] = exists
break
if not fname:
warnings.warn(
"Line {0} doesn't match format".format(repr(line)),
SyntaxWarning
)
return env
| 31.72 | 78 | 0.481084 |
955b60084410969a08f97fe22aa1d69988088bf0 | 16,755 | py | Python | PsychoPy3 Experiments/ContrastDetection.py | mrhunsaker/ContrastDetection | bb058460c5f90119316d0637885cd47f7ca2a307 | [
"MIT"
] | null | null | null | PsychoPy3 Experiments/ContrastDetection.py | mrhunsaker/ContrastDetection | bb058460c5f90119316d0637885cd47f7ca2a307 | [
"MIT"
] | null | null | null | PsychoPy3 Experiments/ContrastDetection.py | mrhunsaker/ContrastDetection | bb058460c5f90119316d0637885cd47f7ca2a307 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2020.2.4post1),
on October 27, 2020, at 14:06
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2020.2.4post1'
expName = 'ContrastDetection' # from the Builder filename that created this script
expInfo = {'participant': 's_001', 'ori': '10'}
dlg = gui.DlgFromDict(dictionary=expInfo, sort_keys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + 'data' + os.sep + '%s_%s' % (expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='C:\\Users\\Ryan Hunsaker\\psychopy\\PsychoPy3 Experiments\\ContrastDetection.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[2496, 1664], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "instr"
instrClock = core.Clock()
instructions = visual.TextStim(win=win, name='instructions',
text="Press 'up' if you see the stimulus, 'down' if you didn't.\n\nAny key to start",
font='Atkinson Hyperlegible',
pos=[0, 0], height=0.1, wrapWidth=None, ori=0,
color=[1, 1, 1], colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
ready = keyboard.Keyboard()
# Initialize components for Routine "trial"
trialClock = core.Clock()
fixation = visual.GratingStim(
win=win, name='fixation',units='pix',
tex=None, mask=None,
ori=0, pos=[0, 0], size=[25, 25], sf=1, phase=0.0,
color=[1, 1, 1], colorSpace='rgb', opacity=1,blendmode='avg',
texRes=512, interpolate=True, depth=0.0)
gabor = visual.GratingStim(
win=win, name='gabor',units='pix',
tex='sin', mask='gauss',
ori=expInfo['ori'], pos=[0, 0], size=[1024,1024], sf=0.025, phase=1.0,
color='white', colorSpace='rgb', opacity=1,blendmode='avg',
texRes=512, interpolate=True, depth=-1.0)
resp = keyboard.Keyboard()
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "instr"-------
continueRoutine = True
# update component parameters for each repeat
ready.keys = []
ready.rt = []
_ready_allKeys = []
# keep track of which components have finished
instrComponents = [instructions, ready]
for thisComponent in instrComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
instrClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "instr"-------
while continueRoutine:
# get current time
t = instrClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=instrClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instructions* updates
if instructions.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
instructions.frameNStart = frameN # exact frame index
instructions.tStart = t # local t and not account for scr refresh
instructions.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(instructions, 'tStartRefresh') # time at next scr refresh
instructions.setAutoDraw(True)
# *ready* updates
waitOnFlip = False
if ready.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
ready.frameNStart = frameN # exact frame index
ready.tStart = t # local t and not account for scr refresh
ready.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ready, 'tStartRefresh') # time at next scr refresh
ready.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ready.clock.reset) # t=0 on next screen flip
win.callOnFlip(ready.clearEvents, eventType='keyboard') # clear events on next screen flip
if ready.status == STARTED and not waitOnFlip:
theseKeys = ready.getKeys(keyList=None, waitRelease=False)
_ready_allKeys.extend(theseKeys)
if len(_ready_allKeys):
ready.keys = _ready_allKeys[-1].name # just the last key pressed
ready.rt = _ready_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr"-------
for thisComponent in instrComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('instructions.started', instructions.tStartRefresh)
thisExp.addData('instructions.stopped', instructions.tStopRefresh)
# check responses
if ready.keys in ['', [], None]: # No response was made
ready.keys = None
thisExp.addData('ready.keys',ready.keys)
if ready.keys != None: # we had a response
thisExp.addData('ready.rt', ready.rt)
thisExp.addData('ready.started', ready.tStartRefresh)
thisExp.addData('ready.stopped', ready.tStopRefresh)
thisExp.nextEntry()
# the Routine "instr" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# --------Prepare to start Staircase "trials" --------
# set up handler to look after next chosen value etc
trials = data.StairHandler(startVal=0.9, extraInfo=expInfo,
stepSizes=asarray([1,1,0.9,0.9,0.8,0.8,0.6, 0.6, 0.4,0.4,0.2]), stepType='log',
nReversals=1, nTrials=30.0,
nUp=1, nDown=3,
minVal=0.0, maxVal=1.0,
originPath=-1, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
level = thisTrial = 0.9 # initialise some vals
for thisTrial in trials:
currentLoop = trials
level = thisTrial
# ------Prepare to start Routine "trial"-------
continueRoutine = True
routineTimer.add(2.500000)
# update component parameters for each repeat
gabor.setColor([level, level, level], colorSpace='rgb')
resp.keys = []
resp.rt = []
_resp_allKeys = []
# keep track of which components have finished
trialComponents = [fixation, gabor, resp]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation* updates
if fixation.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation.frameNStart = frameN # exact frame index
fixation.tStart = t # local t and not account for scr refresh
fixation.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation, 'tStartRefresh') # time at next scr refresh
fixation.setAutoDraw(True)
if fixation.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation.tStartRefresh + 0.5-frameTolerance:
# keep track of stop time/frame for later
fixation.tStop = t # not accounting for scr refresh
fixation.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation, 'tStopRefresh') # time at next scr refresh
fixation.setAutoDraw(False)
# *gabor* updates
if gabor.status == NOT_STARTED and tThisFlip >= 0.5-frameTolerance:
# keep track of start time/frame for later
gabor.frameNStart = frameN # exact frame index
gabor.tStart = t # local t and not account for scr refresh
gabor.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(gabor, 'tStartRefresh') # time at next scr refresh
gabor.setAutoDraw(True)
if gabor.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > gabor.tStartRefresh + 0.5-frameTolerance:
# keep track of stop time/frame for later
gabor.tStop = t # not accounting for scr refresh
gabor.frameNStop = frameN # exact frame index
win.timeOnFlip(gabor, 'tStopRefresh') # time at next scr refresh
gabor.setAutoDraw(False)
if gabor.status == STARTED: # only update if drawing
gabor.setPhase(trialClock.getTime()*2, log=False)
# *resp* updates
waitOnFlip = False
if resp.status == NOT_STARTED and tThisFlip >= 0.5-frameTolerance:
# keep track of start time/frame for later
resp.frameNStart = frameN # exact frame index
resp.tStart = t # local t and not account for scr refresh
resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(resp, 'tStartRefresh') # time at next scr refresh
resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if resp.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > resp.tStartRefresh + 2.0-frameTolerance:
# keep track of stop time/frame for later
resp.tStop = t # not accounting for scr refresh
resp.frameNStop = frameN # exact frame index
win.timeOnFlip(resp, 'tStopRefresh') # time at next scr refresh
resp.status = FINISHED
if resp.status == STARTED and not waitOnFlip:
theseKeys = resp.getKeys(keyList=['up', 'down'], waitRelease=False)
_resp_allKeys.extend(theseKeys)
if len(_resp_allKeys):
resp.keys = _resp_allKeys[-1].name # just the last key pressed
resp.rt = _resp_allKeys[-1].rt
# was this correct?
if (resp.keys == str('up')) or (resp.keys == 'up'):
resp.corr = 1
else:
resp.corr = 0
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addOtherData('fixation.started', fixation.tStartRefresh)
trials.addOtherData('fixation.stopped', fixation.tStopRefresh)
trials.addOtherData('gabor.started', gabor.tStartRefresh)
trials.addOtherData('gabor.stopped', gabor.tStopRefresh)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys = None
# was no response the correct answer?!
if str('up').lower() == 'none':
resp.corr = 1; # correct non-response
else:
resp.corr = 0; # failed to respond (incorrectly)
# store data for trials (StairHandler)
trials.addResponse(resp.corr)
trials.addOtherData('resp.rt', resp.rt)
trials.addOtherData('resp.started', resp.tStartRefresh)
trials.addOtherData('resp.stopped', resp.tStopRefresh)
thisExp.nextEntry()
# staircase completed
trials.saveAsExcel(filename + '.xlsx', sheetName='trials')
trials.saveAsText(filename + 'trials.csv', delim=',')
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv', delim='comma')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 43.861257 | 102 | 0.66607 |
955b6290b3f098708424dbec65825578b92645dc | 1,945 | py | Python | self_driving_desktop/grammar.py | wasimakh2/self-driving-desktop | 309b9b6614f8d3f2b85ed40c8e3cd9d72cd069a6 | [
"MIT"
] | 536 | 2019-05-08T02:54:27.000Z | 2022-03-24T10:02:07.000Z | self_driving_desktop/grammar.py | JonTheNiceGuy/self-driving-desktop | 309b9b6614f8d3f2b85ed40c8e3cd9d72cd069a6 | [
"MIT"
] | 17 | 2019-05-08T03:08:14.000Z | 2021-03-02T12:52:53.000Z | self_driving_desktop/grammar.py | JonTheNiceGuy/self-driving-desktop | 309b9b6614f8d3f2b85ed40c8e3cd9d72cd069a6 | [
"MIT"
] | 33 | 2019-05-08T03:50:56.000Z | 2021-12-08T11:22:29.000Z | grammar = r"""
start: (item ";")+
item: import | coords | playlist | step
import : "import" string
coords : "coords" coords_body
coords_body : "{" coord_def ("," coord_def)* "}"
coord_def: string ":" "{" coord_body ("," coord_body)* "}"
coord_body: string ":" "[" int "," int "]"
playlist : "playlist" string playlist_body
playlist_body : "{" (step ";")* "}"
step : screen
| repeat
| play
| active
| focus
| delay
| sleep
| shell
| coord_off
| coord
| mouse
| drag
| click
| btnclick
| btndown
| btnup
| scroll
| hscroll
| keypress
| keydown
| keyup
| hotkeys
| write
| copy
| paste
| save_clipboard
| load_clipboard
| copy_clipboard
| paste_clipboard
screen: "screen" string
repeat: "play" string+ int | "play" string+ number
play: "play" string+
active: "active" string
focus: "focus" string
delay: "delay" number
sleep: "sleep" number
shell: ("shell"|"sh") string+
coord_off: ("coord"|"mc") string number number number
coord: ("coord"|"mc") string number
mouse: ("mouse"|"mv"|"mm") number number number
drag: ("drag"|"md") string number number number
click: "click"
btnclick: ("btnclick"|"bc") string
btndown: ("btndown"|"bd") string
btnup: ("btnup"|"bu") string
scroll: "scroll" number
hscroll: "hscroll" number
keypress: ("keypress"|"kp") string
keydown: ("keydown"|"kd") string
keyup: ("keyup"|"ku") string
hotkeys: ("hotkeys"|"hk") string+
write: ("write"|"w"|"type"|"t") string number?
copy: "copy"
paste: "paste"
save_clipboard: ("save_clipboard"|"scb") string
load_clipboard: ("load_clipboard"|"lcb") string
copy_clipboard: ("copy_clipboard"|"ccb") string
paste_clipboard: ("paste_clipboard"|"pcb") string
int: INT
number: SIGNED_NUMBER
string: ESCAPED_STRING
COMMENT: /#[^\n]*/
IDENT: (LETTER|"_") (LETTER|INT|"-"|"_")*
NAME: LETTER (LETTER|INT|"-"|"_")*
WORD: LETTER+
%import common.LETTER
%import common.ESCAPED_STRING
%import common.INT
%import common.SIGNED_NUMBER
%import common.WS
%ignore COMMENT
%ignore WS
"""
| 19.45 | 58 | 0.682776 |
955dce71570249e6e13d912ac4f316735729f8a7 | 1,765 | py | Python | BB/conf.py | poco0317/BarinadeBot-Rewrite | 4f34246178ab2ee0fd4c0a79fff5a43adbed134c | [
"MIT"
] | 2 | 2021-05-06T09:07:44.000Z | 2021-05-11T23:45:38.000Z | BB/conf.py | poco0317/BarinadeBot-Rewrite | 4f34246178ab2ee0fd4c0a79fff5a43adbed134c | [
"MIT"
] | null | null | null | BB/conf.py | poco0317/BarinadeBot-Rewrite | 4f34246178ab2ee0fd4c0a79fff5a43adbed134c | [
"MIT"
] | null | null | null | import configparser
import os
import shutil
import traceback
class Conf:
def __init__(self, conf):
self.options = conf
config = configparser.ConfigParser(interpolation=None)
if not config.read(conf, encoding='utf-8'):
print("I had to remake the config file from default. Please check the config and restart once the proper settings have been changed.")
print("The config should exist here: " +self.options)
try:
shutil.copy(os.path.dirname(self.options)+"/example_config.ini", self.options)
except:
traceback.print_exc()
print("Well... Somehow the example I was copying from is also gone. You're in a bad spot.")
os._exit(1)
config.read(conf, encoding='utf-8')
self.THE_TOKEN = config.get("Login", "Token", fallback=Fallbacks.token)
self.owner_id = int(config.get("Permissions", "OwnerID", fallback=Fallbacks.ownerID))
self.download_path = config.get("Music", "Path", fallback=Fallbacks.download_path)
self.stopwords_path = config.get("BarTalk", "Stop_words_Path", fallback=Fallbacks.stopword_path)
self.stopwords = set()
if self.stopwords_path != "":
try:
f = open(self.stopwords_path, "r", encoding="utf-8")
for line in f:
self.stopwords.add(line.strip())
f.close()
except:
pass
class Fallbacks: #these will only get used if the user leaves the config.ini existant but really messes something up... everything breaks if they get used.
token = "0"
ownerID = 0
download_path = ""
stopword_path = "" | 41.046512 | 155 | 0.602833 |
955f1cf7d1a0592b32cf0c5abbfe0bb9060df419 | 2,805 | py | Python | geocamUtil/usng/convertUsngCsv.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 4 | 2017-03-03T16:24:24.000Z | 2018-06-24T05:50:40.000Z | geocamUtil/usng/convertUsngCsv.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 1 | 2021-09-29T17:17:30.000Z | 2021-09-29T17:17:30.000Z | geocamUtil/usng/convertUsngCsv.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 1 | 2017-12-19T20:45:53.000Z | 2017-12-19T20:45:53.000Z | #!/usr/bin/env python
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
"""
Takes as input a CSV file in the format:
37 46 29.2080,-122 25 08.1336,San Francisco
37 27 13.8132,-122 10 55.7184,Menlo Park
And outputs CSV in the format:
10S EG 51172 80985,San Francisco
10S EG 72335 45533,Menlo Park
Optionally outputs a KML file of placemarks as well, where the
placemark descriptions include USNG coordinates.
"""
import csv
from geocamUtil.usng import usng
from geocamUtil import KmlUtil
def parseDegMinSec(val):
valDeg, valMin, valSec = val.split(' ')
sgn = -1 if float(valDeg) < 0 else 1
return sgn * (abs(float(valDeg))
+ float(valMin) / 60.0
+ float(valSec) / 3600.0)
def convertUsngCsv(opts, inPath):
inFile = file(inPath, 'r')
inLines = csv.reader(inFile)
coords = []
for latDms, lonDms, name in inLines:
lat = parseDegMinSec(latDms)
lon = parseDegMinSec(lonDms)
easting, northing, zoneNumber, zoneLetter = usng.LLtoUTM(lat, lon)
easting += opts.eastOffset
northing += opts.northOffset
usngCoords = usng.UTMtoUSNG(easting, northing, zoneNumber, zoneLetter, precision=5)
print usngCoords, ' ', name
clat, clon = usng.UTMtoLL(easting, northing, zoneNumber, zoneLetter)
coords.append((clat, clon, name, usngCoords))
if opts.kml:
kbits = []
kbits.append('<Folder>\n')
for lat, lon, name, usngCoords in coords:
kbits.append("""
<Placemark>
<name>%(name)s</name>
<description>%(usngCoords)s</description>
<Point>
<coordinates>%(lon)s,%(lat)s</coordinates>
</Point>
</Placemark>
""" %
dict(lat=lat, lon=lon, name=name,
usngCoords=usngCoords))
kbits.append('</Folder>')
text = ''.join(kbits)
file(opts.kml, 'w').write(KmlUtil.wrapKml(text))
def main():
import optparse
parser = optparse.OptionParser('usage: %prog <in.csv>')
parser.add_option('--eastOffset',
type='float', default=0,
help='Offset to add to easting values for datum correction (meters)')
parser.add_option('--northOffset',
type='float', default=0,
help='Offset to add to northing values for datum correction (meters)')
parser.add_option('--kml',
help='Filename for KML output')
opts, args = parser.parse_args()
if len(args) != 1:
parser.error('expected exactly 1 arg')
inPath = args[0]
convertUsngCsv(opts, inPath)
if __name__ == '__main__':
main()
| 30.824176 | 92 | 0.624242 |
955fa29268924998ee2dd6306f368b16b34e2595 | 478 | py | Python | process_cifar10.py | IIGROUP/AttentionProbe | b2c88b064452741a7ccc6660a4b090743013cc73 | [
"MIT"
] | 11 | 2022-01-23T15:09:09.000Z | 2022-03-18T10:27:04.000Z | process_cifar10.py | Wang-jiahao/AttentionProbe | 41a3cc0d5454ec5bba78c3dace9cded00da8cff9 | [
"MIT"
] | null | null | null | process_cifar10.py | Wang-jiahao/AttentionProbe | 41a3cc0d5454ec5bba78c3dace9cded00da8cff9 | [
"MIT"
] | null | null | null | from torchvision.datasets import CIFAR10
from torchvision.datasets import CIFAR100
import os
root = '/database/cifar10/'
from PIL import Image
dataset_train = CIFAR10(root)
for k, (img, label) in enumerate(dataset_train):
print('processsing' + str(k))
if not os.path.exists(root + 'CIFAR10_image/' + str(label)+ '/'):
os.mkdir(root + 'CIFAR10_image/' + str(label)+ '/')
img.save(root + 'CIFAR10_image/' + str(label) + '/' + str(k) + '.png')
| 31.866667 | 75 | 0.650628 |
955fe4376191cdb0e3d9522af865a55375090411 | 246 | py | Python | examples/fullproject/items/user.py | cnkailyn/toapi | 03a49d02dd0a55f1f83270154144e1a08fae6b78 | [
"Apache-2.0"
] | null | null | null | examples/fullproject/items/user.py | cnkailyn/toapi | 03a49d02dd0a55f1f83270154144e1a08fae6b78 | [
"Apache-2.0"
] | null | null | null | examples/fullproject/items/user.py | cnkailyn/toapi | 03a49d02dd0a55f1f83270154144e1a08fae6b78 | [
"Apache-2.0"
] | 1 | 2019-11-12T20:15:50.000Z | 2019-11-12T20:15:50.000Z | from toapi import Item, XPath
class User(Item):
url = XPath('//a[@class="hnuser"][1]/@href')
name = XPath('//a[@class="hnuser"][1]/text()')
class Meta:
source = XPath('//tr[@class="athing"]')
route = '/news\?p=\d+'
| 22.363636 | 50 | 0.536585 |
95620b3f160fc0fb6d0c0896e86c4e7d56432d0b | 722 | py | Python | src/tests/test_fail.py | bspeagle/py_git_diff | 1674afc1dfac0408372e11945f4a36b297b77e66 | [
"MIT"
] | null | null | null | src/tests/test_fail.py | bspeagle/py_git_diff | 1674afc1dfac0408372e11945f4a36b297b77e66 | [
"MIT"
] | null | null | null | src/tests/test_fail.py | bspeagle/py_git_diff | 1674afc1dfac0408372e11945f4a36b297b77e66 | [
"MIT"
] | null | null | null | '''
Failure tests
'''
import os
from typing import Any
import pytest
from helpers.github import API
api = API()
pass_token = Any
fail_token = os.getenv('FAIL_TOKEN')
fail_org = os.getenv('FAIL_ORG')
fail_repo = os.getenv('FAIL_REPO')
def test_fail_auth():
'''
Fail 'auth' to Github
'''
with pytest.raises(SystemExit):
api.authenticate(fail_token)
def test_fail_org(token):
'''
Fail 'get organization'
'''
pass_token = token
with pytest.raises(SystemExit):
api.authenticate(pass_token)
api.get_organization(fail_org)
def test_fail_repo():
'''
Fail 'get repo'
'''
with pytest.raises(SystemExit):
api.get_repo("user", fail_repo)
| 16.044444 | 39 | 0.649584 |
9562440f3dc7a8e571a4195021e3f9febc5d8b84 | 3,042 | py | Python | tutorials/mechanisms/tutorial_convenience_inhibition.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 13 | 2020-11-05T10:59:13.000Z | 2022-03-21T01:38:31.000Z | tutorials/mechanisms/tutorial_convenience_inhibition.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 4 | 2022-01-27T10:23:40.000Z | 2022-03-10T18:16:06.000Z | tutorials/mechanisms/tutorial_convenience_inhibition.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 6 | 2020-08-04T17:01:33.000Z | 2022-03-21T01:38:32.000Z | # -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
# Test models
from skimpy.core import *
from skimpy.mechanisms import *
name = 'pfk'
SpecificConvenience = make_convenience_with_inhibition([-2, -1, 3], [1])
metabolites = SpecificConvenience.Reactants(substrate1 = 'A',
substrate2 = 'B',
product1 = 'C' )
inhibitors = SpecificConvenience.Inhibitors(inhibitor1 = 'I')
# thermo_data = {'S': 1e-2,
# 'P': 1e-2,
# 'sig_S': 0.1,
# 'sig_P': 0.1,
# 'gamma': 0.1,
# 'flux': 1.0,
# 'E_tot': 1e-5}
## QSSA Method
parameters = SpecificConvenience.Parameters(
vmax_forward = 1.0,
k_equilibrium=2.0,
km_substrate1 = 10.0,
km_substrate2 = 10.0,
km_product1 = 10.0,
ki_inhibitor1 = 1.0)
pfk = Reaction(name=name,
mechanism=SpecificConvenience,
reactants=metabolites,
inhibitors=inhibitors,
)
name = 'inhib'
metabolites = ReversibleMichaelisMenten.Reactants(substrate = 'C',
product = 'I')
## QSSA Method
parameters_inh = ReversibleMichaelisMenten.Parameters(
vmax_forward = 1.0,
k_equilibrium=2.0,
km_substrate = 10.0,
km_product = 10.0,
total_enzyme_concentration = 1.0,
)
inh = Reaction(name=name,
mechanism=ReversibleMichaelisMenten,
reactants=metabolites,
)
this_model = KineticModel()
this_model.add_reaction(pfk)
this_model.add_reaction(inh)
this_model.parametrize_by_reaction({inh.name:parameters_inh,
pfk.name: parameters})
this_model.compile_ode(sim_type = QSSA)
this_model.initial_conditions['A'] = 10.0
this_model.initial_conditions['B'] = 10.0
this_model.initial_conditions['C'] = 10.0
this_model.initial_conditions['I'] = 0.0
this_sol_qssa = this_model.solve_ode(np.linspace(0.0, 50.0, 500),solver_type = 'cvode')
this_sol_qssa.plot('output/base_out_qssa.html')
| 29.25 | 87 | 0.610782 |
9564a1cacc7687a8261fe339aaf329a5f5fa587d | 825 | py | Python | advanced-python/05_advanced_classes_and_objects/enums.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
] | null | null | null | advanced-python/05_advanced_classes_and_objects/enums.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
] | null | null | null | advanced-python/05_advanced_classes_and_objects/enums.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
] | null | null | null |
from enum import Enum, unique, auto
@unique
class Fruit(Enum):
APPLE = 1
BANANA = 2
ORANGE = 3
# if you don't care what value to put you can use auto => basically it will get last value used + 1 (if auto is for first item it will be 0 + 1 = 1)
PEAR = auto()
def main():
print(Fruit.APPLE) # Fruit.APPLE
print(type(Fruit.APPLE)) # <enum 'Fruit'>
print(repr(Fruit.APPLE)) # <Fruit.APPLE: 1>
print(Fruit.APPLE.name) # APPLE
print(Fruit.APPLE.value) # 1
# you cannot have duplicate keys in Fruits (you can have duplicate values if you are not having unique decorator!)
print(Fruit.PEAR.name) # PEAR
print(Fruit.PEAR.value) # 4
my_fruits = {}
my_fruits[Fruit.BANANA] = 'BANANA'
print(my_fruits[Fruit.BANANA])
if __name__ == '__main__':
main() | 27.5 | 152 | 0.641212 |
9566a5fe7dd2b6c92ef1c6e1f73143b039776af9 | 3,313 | py | Python | detector/detector.py | suhendaragung20/Object-Detection-Metrics | e756c9ba20ff1e89143c64e6d38288d2a8680f0e | [
"MIT"
] | null | null | null | detector/detector.py | suhendaragung20/Object-Detection-Metrics | e756c9ba20ff1e89143c64e6d38288d2a8680f0e | [
"MIT"
] | null | null | null | detector/detector.py | suhendaragung20/Object-Detection-Metrics | e756c9ba20ff1e89143c64e6d38288d2a8680f0e | [
"MIT"
] | null | null | null |
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
from imutils.object_detection import non_max_suppression
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
class detector:
def __init__(self):
folder_detector = 'inference_graph'
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = 'frozen_model/frozen_inference_graph.pb'
# Path to label map file
PATH_TO_LABELS = 'classes.pbtxt'
# Number of classes the object detector can identify
NUM_CLASSES = 37
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def detect_plate(self, frame):
image_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_expanded})
return (boxes, scores, classes, num, self.category_index)
| 37.224719 | 107 | 0.690915 |
9566c8358fb6d074b645539d82fffd8a430711c8 | 1,770 | py | Python | processDiscoveryNews4chat.py | data-henrik/watson-chatbot-discovery-news | d63d579718d4fc529af29bb413c73fdbd9b52361 | [
"Apache-2.0"
] | 3 | 2019-05-03T20:28:45.000Z | 2019-06-28T09:58:25.000Z | processDiscoveryNews4chat.py | psaupsau/watson-chatbot-discovery-news | d63d579718d4fc529af29bb413c73fdbd9b52361 | [
"Apache-2.0"
] | null | null | null | processDiscoveryNews4chat.py | psaupsau/watson-chatbot-discovery-news | d63d579718d4fc529af29bb413c73fdbd9b52361 | [
"Apache-2.0"
] | 4 | 2019-04-25T16:49:47.000Z | 2020-07-02T15:27:05.000Z | # Handle client-side action for an IBM Watson Assistant chatbot
#
# The code requires my Watson Conversation Tool. For details see
# https://github.com/data-henrik/watson-conversation-tool
#
#
# Setup: Configure your credentials
# - for Watson Assistant see instructions for the tool
# - for Discovery change username / password below
#
#
# Written by Henrik Loeser
import json
from watson_developer_cloud import DiscoveryV1
def handleClientActions(context, actions, watsonResponse):
print (">>> processing client actions...\n")
# Initialize the Discovery API
discovery = DiscoveryV1(
version='2018-08-01',
## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/discovery/api',
username='your-username',
password='your-password')
# We are going to access a system collection with English news
# You could change the language to news-de or news-es...
news_environment_id = 'system'
collection_id = 'news-en'
# We could query the different collections here
# collections = discovery.list_collections(news_environment_id).get_result()
# news_collections = [x for x in collections['collections']]
# print(json.dumps(collections, indent=2))
# Now query Discovery, sort the result and only return certain fields
query_results = discovery.query(
news_environment_id,
collection_id,
natural_language_query=context['topic'],
deduplicate="true",
sort="-score,-publication_date",
return_fields='title,url,publication_date').get_result()
# Write the result to our defined variable and return
context.update({'myNews':query_results})
return context | 35.4 | 95 | 0.714124 |
9566fa933abad53d06086af6a6e451a81990671d | 2,977 | py | Python | test_scripts/test_residual_solver_dynamic.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T14:08:20.000Z | 2021-04-09T14:08:20.000Z | test_scripts/test_residual_solver_dynamic.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 2 | 2021-04-28T15:05:01.000Z | 2021-11-10T15:12:56.000Z | test_scripts/test_residual_solver_dynamic.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 2 | 2021-02-01T08:49:45.000Z | 2021-08-10T02:07:36.000Z | import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from source.solving_strategies.strategies.residual_based_newton_raphson_solver import ResidualBasedNewtonRaphsonSolver
from source.solving_strategies.strategies.residual_based_picard_solver import ResidualBasedPicardSolver
from source.model.structure_model import StraightBeam
np.set_printoptions(suppress=False, precision=2, linewidth=140)
params = {
"name": "CaarcBeamPrototypeOptimizable",
"domain_size": "3D",
"system_parameters": {
"element_params": {
"type": "CRBeam",
"is_nonlinear": True
},
"material": {
"density": 7850.0,
"youngs_modulus": 2069000000,
"poisson_ratio": 0.29,
"damping_ratio": 0.1
},
"geometry": {
"length_x": 1.2,
"number_of_elements": 1,
"defined_on_intervals": [{
"interval_bounds": [0.0, "End"],
"length_y": [1.0],
"length_z": [1.0],
"area": [0.0001],
"shear_area_y": [0.0],
"shear_area_z": [0.0],
"moment_of_inertia_y": [0.0001],
"moment_of_inertia_z": [0.0001],
"torsional_moment_of_inertia": [0.0001],
"outrigger_mass": [0.0],
"outrigger_stiffness": [0.0]}]
}
},
"boundary_conditions": "fixed-free"
}
dt = 0.1
tend = 10.
steps = int(tend / dt)
array_time = np.linspace(0.0, tend, steps + 1)
array_time_kratos = np.linspace(0.1, 10, 101)
def test_residual_based_solvers():
f_ext = np.array([np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 100.0 * np.sin(t), 0.0, 0.0, 0.0])
for t in np.sin(array_time)])
u0 = np.zeros(6)
v0 = np.zeros(6)
a0 = np.zeros(6)
scheme = "BackwardEuler1"
beam = StraightBeam(params)
f_ext = beam.apply_bc_by_reduction(f_ext, 'column').T
newton_solver = ResidualBasedNewtonRaphsonSolver(array_time, scheme, dt,
[beam.comp_m, beam.comp_b, beam.comp_k],
[u0, v0, a0], f_ext, beam)
picard_solver = ResidualBasedPicardSolver(array_time, scheme, dt,
[beam.comp_m, beam.comp_b, beam.comp_k],
[u0, v0, a0], f_ext, beam)
newton_solver.solve()
picard_solver.solve()
reference_file = "kratos_reference_results/dynamic_displacement_z.txt"
disp_z_soln = np.loadtxt(reference_file)[:, 1]
plt.plot(array_time, newton_solver.displacement[2, :], c='b', label='Newton Raphson')
plt.plot(array_time, picard_solver.displacement[2, :], c='g', label='Picard')
plt.plot(array_time_kratos, disp_z_soln, c='k', label='Kratos reference')
plt.grid()
plt.legend()
plt.show()
| 34.616279 | 118 | 0.566006 |
9567e6ba5dc9af36046c391fbc5d4e1144009cc8 | 378 | py | Python | objects/regex_deleter.py | Egor2005l/cho | c7cb165394089b277be5c306edde0b8fb42e466d | [
"MIT"
] | null | null | null | objects/regex_deleter.py | Egor2005l/cho | c7cb165394089b277be5c306edde0b8fb42e466d | [
"MIT"
] | null | null | null | objects/regex_deleter.py | Egor2005l/cho | c7cb165394089b277be5c306edde0b8fb42e466d | [
"MIT"
] | null | null | null | from typing import Dict
from objects.base import BaseModel
class RegexDeleter(BaseModel):
name: str
regex: str
chat_id: int
for_all: bool
def save(self) -> Dict[str, int]:
return {
'name': self.name,
'regex': self.regex,
'chat_id': self.chat_id,
'for_all': self.for_all
}
| 19.894737 | 38 | 0.531746 |
95697b5d755424cb46ff8e11e52fcff72a602bf4 | 639 | py | Python | cartridge_external_payment/admin.py | thomasWajs/cartridge-external-payment | 02c1c2b43504a17547a908622c3d54a331945c77 | [
"BSD-2-Clause"
] | 7 | 2015-02-14T20:25:27.000Z | 2021-04-10T16:05:00.000Z | cartridge_external_payment/admin.py | thomasWajs/cartridge-external-payment | 02c1c2b43504a17547a908622c3d54a331945c77 | [
"BSD-2-Clause"
] | 2 | 2015-11-30T17:54:19.000Z | 2016-09-09T21:21:01.000Z | cartridge_external_payment/admin.py | thomasWajs/cartridge-external-payment | 02c1c2b43504a17547a908622c3d54a331945c77 | [
"BSD-2-Clause"
] | 3 | 2015-10-19T15:22:18.000Z | 2017-11-13T23:22:17.000Z | from copy import deepcopy
from django.contrib import admin
from cartridge.shop.admin import OrderAdmin
from cartridge.shop.models import Order
order_fieldsets = deepcopy(admin.site._registry[Order].fieldsets)
order_fieldsets[2][1]["fields"] = list(order_fieldsets[2][1]["fields"])
order_fieldsets[2][1]["fields"].insert(0, 'payment_done')
class ExternalPaymentOrderAdmin(OrderAdmin):
fieldsets = order_fieldsets
list_display = ("id", "billing_name", "total", "time", "payment_done",
"status", "transaction_id", "invoice")
admin.site.unregister(Order)
admin.site.register(Order, ExternalPaymentOrderAdmin)
| 33.631579 | 74 | 0.748044 |
9569b1f581ecb174ee905898df207343952c8b6e | 1,483 | py | Python | webeditor/app/attrdict.py | lshen1120/web-editor | 170ac96b47bf957a3a42a99092e45e88e584c49a | [
"Apache-2.0"
] | 4 | 2018-07-25T03:57:08.000Z | 2018-07-25T06:34:59.000Z | webeditor/app/attrdict.py | lshen1120/web-editor | 170ac96b47bf957a3a42a99092e45e88e584c49a | [
"Apache-2.0"
] | null | null | null | webeditor/app/attrdict.py | lshen1120/web-editor | 170ac96b47bf957a3a42a99092e45e88e584c49a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import copy
class AttrDict(dict):
def __init__(self, seq=None, **kwargs):
dict.__init__(self, seq or {}, **kwargs)
def __getattr__(self, name):
return self.get(name, self.get("__default", None))
def __setattr__(self, name, value):
self[name] = value
def __getitem__(self, name):
return self.get(name, self.get("__default", None))
def __deepcopy__(self, memo):
y = {}
memo[id(self)] = y
for key, value in self.iteritems():
y[copy.deepcopy(key, memo)] = copy.deepcopy(value, memo)
return y
def test_attr_dict_exist_attr():
a = AttrDict()
a.id = 1
assert a.id == 1, 'a.id != 1'
assert a.get('id') == 1, 'a.get("id") != 1'
assert a['id'] == 1, 'a["id"] != 1'
b = AttrDict(id=2)
assert b.id == 2, 'b.id != 2'
c = AttrDict({"id": 3})
assert c.id == 3, 'c.id != 3'
d = AttrDict(None)
d.id = 4
assert d.id == 4, 'd.id != 4'
def test_attr_dict_not_exist_attr():
a = AttrDict()
assert a.notexist is None, 'a.notexist is None'
assert a['notexist'] is None, 'a["notexist"] not __default '
def test_attr_dict_not_exist_attr_default():
a = AttrDict()
a.__default = ''
assert a.notexist == '', 'a.notexist not __default '
assert a['notexist'] == '', 'a["notexist"] not __default '
b = AttrDict(__default=0)
assert b.notexist == 0, 'b.notexist not __default '
| 26.017544 | 68 | 0.574511 |
956a1d409e8bf27f1eb1d39023f7ad436c2f2c6c | 16,034 | py | Python | EVeP/EVeP.py | amandaortega/EVeP | 4bca562c814210b1d835e9be63ab80385c93320b | [
"MIT"
] | null | null | null | EVeP/EVeP.py | amandaortega/EVeP | 4bca562c814210b1d835e9be63ab80385c93320b | [
"MIT"
] | null | null | null | EVeP/EVeP.py | amandaortega/EVeP | 4bca562c814210b1d835e9be63ab80385c93320b | [
"MIT"
] | null | null | null | """
Author: Amanda Ortega de Castro Ayres
Created in: September 19, 2019
Python version: 3.6
"""
from Least_SRMTL import Least_SRMTL
import libmr
from matplotlib import pyplot, cm
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import Axes3D, art3d
import numpy as np
import numpy.matlib
import sklearn.metrics
class EVeP(object):
"""
evolving Extreme Value Machine
Ruled-based predictor with EVM at the definition of the antecedent of the rules.
1. Create a new instance and provide the model parameters;
2. Call the predict(x) method to make predictions based on the given input;
3. Call the train(x, y) method to evolve the model based on the new input-output pair.
"""
# Model initialization
def __init__(self, sigma=0.5, delta=50, N=np.Inf, rho=None, columns_ts=None):
# Setting EVM algorithm parameters
self.sigma = sigma
self.tau = 99999
self.delta = delta
self.N = N
self.rho = rho
self.columns_ts = columns_ts
if self.rho is not None:
self.init_theta = 2
self.srmtl = Least_SRMTL(rho)
self.R = None
self.mr_x = list()
self.mr_y = list()
self.x0 = list()
self.y0 = list()
self.X = list()
self.y = list()
self.step = list()
self.last_update = list()
self.theta = list()
self.c = 0
# Initialization of a new instance of EV.
def add_EV(self, x0, y0, step):
self.mr_x.append(libmr.MR())
self.mr_y.append(libmr.MR())
self.x0.append(x0)
self.y0.append(y0)
self.X.append(x0)
self.y.append(y0)
self.step.append(step)
self.last_update.append(np.max(step))
self.theta.append(np.zeros_like(x0))
self.c = self.c + 1
if self.rho is None:
# coefficients of the consequent part
self.theta[-1] = np.insert(self.theta[-1], 0, y0, axis=1).T
else:
self.init_theta = 2
# coefficients of the consequent part
self.theta[-1] = np.insert(self.theta[-1], 0, y0, axis=1)
# Add the sample(s) (X, y) as covered by the extreme vector. Remove repeated points.
def add_sample_to_EV(self, index, X, y, step):
self.X[index] = np.concatenate((self.X[index], X))
self.y[index] = np.concatenate((self.y[index], y))
self.step[index] = np.concatenate((self.step[index], step))
if self.X[index].shape[0] > self.N:
indexes = np.argsort(-self.step[index].reshape(-1))
self.X[index] = self.X[index][indexes[: self.N], :]
self.y[index] = self.y[index][indexes[: self.N]]
self.step[index] = self.step[index][indexes[: self.N]]
self.x0[index] = np.average(self.X[index], axis=0).reshape(1, -1)
self.y0[index] = np.average(self.y[index], axis=0).reshape(1, -1)
self.last_update[index] = np.max(self.step[index])
if self.rho is None:
self.theta[index] = np.linalg.lstsq(np.insert(self.X[index], 0, 1, axis=1), self.y[index], rcond=None)[0]
def delete_from_list(self, list_, indexes):
for i in sorted(indexes, reverse=True):
del list_[i]
return list_
# Calculate the firing degree of the sample to the psi curve
def firing_degree(self, index, x=None, y=None):
if y is None:
return self.mr_x[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.x0[index], x).reshape(-1))
elif x is None:
return self.mr_y[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.y0[index], y).reshape(-1))
else:
return np.minimum(self.mr_x[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.x0[index], x).reshape(-1)), self.mr_y[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.y0[index], y).reshape(-1)))
# Fit the psi curve of the EVs according to the external samples
def fit(self, index, X_ext, y_ext):
self.fit_x(index, sklearn.metrics.pairwise.pairwise_distances(self.x0[index], X_ext)[0])
self.fit_y(index, sklearn.metrics.pairwise.pairwise_distances(self.y0[index], y_ext)[0])
# Fit the psi curve to the extreme values with distance D to the center of the EV
def fit_x(self, index, D):
self.mr_x[index].fit_low(1/2 * D, min(D.shape[0], self.tau))
# Fit the psi curve to the extreme values with distance D to the center of the EV
def fit_y(self, index, D):
self.mr_y[index].fit_low(1/2 * D, min(D.shape[0], self.tau))
# Get the distance from the origin of the input EV which has the given probability to belong to the curve
def get_distance_input(self, percentage, index=None):
if index is None:
return [self.mr_x[i].inv(percentage) for i in range(self.c)]
else:
return self.mr_x[index].inv(percentage)
# Get the distance from the origin of the output EV which has the given probability to belong to the curve
def get_distance_output(self, percentage, index=None):
if index is None:
return [self.mr_y[i].inv(percentage) for i in range(self.c)]
else:
return self.mr_y[index].inv(percentage)
# Obtain the samples that do not belong to the given EV
def get_external_samples(self, index=None):
if index is None:
X = np.concatenate(self.X)
y = np.concatenate(self.y)
else:
if self.c > 1:
X = np.concatenate(self.X[:index] + self.X[index + 1 :])
y = np.concatenate(self.y[:index] + self.y[index + 1 :])
else:
X = np.array([])
y = np.array([])
return (X, y)
# Merge two EVs of different clusters whenever the origin of one is inside the sigma probability of inclusion of the psi curve of the other
def merge(self):
self.sort_EVs()
index = 0
while index < self.c:
if index + 1 < self.c:
x0 = np.concatenate(self.x0[index + 1 : ])
y0 = np.concatenate(self.y0[index + 1 : ])
S_index = self.firing_degree(index, x0, y0)
index_to_merge = np.where(S_index > self.sigma)[0] + index + 1
if index_to_merge.size > 0:
self.init_theta = 2
for i in reversed(range(len(index_to_merge))):
self.add_sample_to_EV(index, self.X[index_to_merge[i]], self.y[index_to_merge[i]], self.step[index_to_merge[i]])
self.remove_EV([index_to_merge[i]])
index = index + 1
# Plot the granules that form the antecedent part of the rules
def plot(self, name_figure_input, name_figure_output, step):
# Input fuzzy granules plot
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(left=-2, right=2)
ax.axes.set_ylim3d(bottom=-2, top=2)
z_bottom = -0.3
ax.set_zticklabels("")
colors = cm.get_cmap('Dark2', self.c)
for i in range(self.c):
self.plot_EV_input(i, ax, '.', colors(i), z_bottom)
legend.append('$\lambda$ = ' + str(round(self.mr_x[new_order[i]].get_params()[0], 1)) + ' $\kappa$ = ' + str(round(self.mr_x[new_order[i]].get_params()[1], 1)))
# Plot axis' labels
ax.set_xlabel('u(t)', fontsize=15)
ax.set_ylabel('y(t)', fontsize=15)
ax.set_zlabel('$\mu_x$', fontsize=15)
ax.legend(legend, fontsize=10, loc=2)
# Save figure
fig.savefig(name_figure_input)
# Close plot
pyplot.close(fig)
# Output fuzzy granules plot
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.axes.set_xlim(left=-2, right=2)
for i in range(self.c):
self.plot_EV_output(i, ax, '.', colors(i), z_bottom)
# Plot axis' labels
ax.set_xlabel('y(t + 1)', fontsize=15)
ax.set_ylabel('$\mu_y$', fontsize=15)
ax.legend(legend, fontsize=10, loc=2)
# Save figure
fig.savefig(name_figure_output)
# Close plot
pyplot.close(fig)
# Plot the probability of sample inclusion (psi-model) together with the samples associated with the EV for the input fuzzy granules
def plot_EV_input(self, index, ax, marker, color, z_bottom):
# Plot the input samples in the XY plan
ax.scatter(self.X[index][:, 0], self.X[index][:, 1], z_bottom * np.ones((self.X[index].shape[0], 1)), marker=marker, color=color)
# Plot the radius for which there is a probability sigma to belong to the EV
radius = self.get_distance_input(self.sigma, index)
p = Circle((self.x0[index][0, 0], self.x0[index][0, 1]), radius, fill=False, color=color)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=z_bottom, zdir="z")
# Plot the psi curve of the EV
r = np.linspace(0, self.get_distance_input(0.05, index), 100)
theta = np.linspace(0, 2 * np.pi, 145)
radius_matrix, theta_matrix = np.meshgrid(r,theta)
X = self.x0[index][0, 0] + radius_matrix * np.cos(theta_matrix)
Y = self.x0[index][0, 1] + radius_matrix * np.sin(theta_matrix)
points = np.array([np.array([X, Y])[0, :, :].reshape(-1), np.array([X, Y])[1, :, :].reshape(-1)]).T
Z = self.firing_degree(index, points)
ax.plot_surface(X, Y, Z.reshape((X.shape[0], X.shape[1])), antialiased=False, cmap=cm.coolwarm, alpha=0.1)
# Plot the probability of sample inclusion (psi-model) together with the samples associated with the EV for the output fuzzy granules
def plot_EV_output(self, index, ax, marker, color, z_bottom):
# Plot the output data points in the X axis
ax.scatter(self.y[index], np.zeros_like(self.y[index]), marker=marker, color=color)
# Plot the psi curve of the EV
r = np.linspace(0, self.get_distance_output(0.01, index), 100)
points = np.concatenate((np.flip((self.y0[index] - r).T, axis=0), (self.y0[index] + r).T), axis=0)
Z = self.firing_degree(index, y=points)
#ax.plot(points, Z, antialiased=False, cmap=cm.coolwarm, alpha=0.1)
ax.plot(points, Z, color=color)
# Predict the output given the input sample x
def predict(self, x):
num = 0
den = 0
for i in range(self.c):
p = self.predict_EV(i, x)
num = num + self.firing_degree(i, x, p) * p
den = den + self.firing_degree(i, x, p)
if den == 0:
if self.columns_ts is None:
return np.mean(x)
return np.mean(x[:, self.columns_ts])
return num / den
# Predict the local output of x based on the linear regression of the samples stored at the EV
def predict_EV(self, index, x):
if self.rho is None:
return np.insert(x, 0, 1).reshape(1, -1) @ self.theta[index]
return np.insert(x, 0, 1).reshape(1, -1) @ self.theta[index].T
# Calculate the degree of relationship of all the rules to the rule of index informed as parameter
def relationship_rules(self, index):
distance_x = sklearn.metrics.pairwise.pairwise_distances(self.x0[index], np.concatenate(self.x0)).reshape(-1)
distance_y = sklearn.metrics.pairwise.pairwise_distances(self.y0[index], np.concatenate(self.y0)).reshape(-1)
relationship_x_center = self.mr_x[index].w_score_vector(distance_x)
relationship_y_center = self.mr_y[index].w_score_vector(distance_y)
relationship_x_radius = self.mr_x[index].w_score_vector(distance_x - self.get_distance_input(self.sigma))
relationship_y_radius = self.mr_y[index].w_score_vector(distance_y - self.get_distance_output(self.sigma))
return np.maximum(np.maximum(relationship_x_center, relationship_x_radius), np.maximum(relationship_y_center, relationship_y_radius))
# Remove the EV whose index was informed by parameter
def remove_EV(self, index):
self.mr_x = self.delete_from_list(self.mr_x, index)
self.mr_y = self.delete_from_list(self.mr_y, index)
self.x0 = self.delete_from_list(self.x0, index)
self.y0 = self.delete_from_list(self.y0, index)
self.X = self.delete_from_list(self.X, index)
self.y = self.delete_from_list(self.y, index)
self.step = self.delete_from_list(self.step, index)
self.last_update = self.delete_from_list(self.last_update, index)
self.theta = self.delete_from_list(self.theta, index)
self.c = len(self.mr_x)
# Remove the EVs that didn't have any update in the last threshold steps
def remove_outdated_EVs(self, threshold):
indexes_to_remove = list()
for index in range(self.c):
if self.last_update[index] <= threshold:
indexes_to_remove.append(index)
if len(indexes_to_remove) > 0:
self.remove_EV(indexes_to_remove)
if self.rho is not None:
self.update_R()
self.init_theta = 2
# Sort the EVs according to the last update
def sort_EVs(self):
new_order = (-np.array(self.last_update)).argsort()
self.mr_x = list(np.array(self.mr_x)[new_order])
self.mr_y = list(np.array(self.mr_y)[new_order])
self.x0 = list(np.array(self.x0)[new_order])
self.y0 = list(np.array(self.y0)[new_order])
self.X = list(np.array(self.X)[new_order])
self.y = list(np.array(self.y)[new_order])
self.step = list(np.array(self.step)[new_order])
self.last_update = list(np.array(self.last_update)[new_order])
# Evolves the model (main method)
def train(self, x, y, step):
best_EV = None
best_EV_value = 0
# check if it is possible to insert the sample in an existing model
for index in range(self.c):
tau = self.firing_degree(index, x, y)
if tau > best_EV_value and tau > self.sigma:
best_EV = index
best_EV_value = tau
update = False
# Add the sample to an existing EV
if best_EV is not None:
self.add_sample_to_EV(best_EV, x, y, step)
# Create a new EV
else:
self.add_EV(x, y, step)
update = True
self.update_EVs()
if step != 0 and (step % self.delta) == 0:
self.remove_outdated_EVs(step[0, 0] - self.delta)
self.merge()
update = True
if self.rho is not None:
if update:
self.update_R()
self.theta = self.srmtl.train(self.X, self.y, self.init_theta)
self.init_theta = 1
# Update the psi curve of the EVs
def update_EVs(self):
for i in range(self.c):
(X_ext, y_ext) = self.get_external_samples(i)
if X_ext.shape[0] > 0:
self.fit(i, X_ext, y_ext)
def update_R(self):
S = np.zeros((self.c, self.c))
for i in range(self.c):
S[i, :] = self.relationship_rules(i)
self.R = None
for i in range(self.c):
for j in range(i + 1, self.c):
if S[i, j] > 0 or S[j, i] > 0:
edge = np.zeros((self.c, 1))
edge[i] = max(S[i, j], S[j, i])
edge[j] = - max(S[i, j], S[j, i])
if self.R is None:
self.R = edge
else:
self.R = np.concatenate((self.R, edge), axis=1)
self.srmtl.set_RRt(self.R) | 40.489899 | 247 | 0.591493 |
956c1145058b098a2e217c047220f62e14dea6e3 | 5,375 | py | Python | bigml/modelfields.py | alanponce/python | 9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26 | [
"Apache-2.0"
] | 1 | 2021-06-20T11:51:22.000Z | 2021-06-20T11:51:22.000Z | bigml/modelfields.py | alanponce/python | 9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26 | [
"Apache-2.0"
] | null | null | null | bigml/modelfields.py | alanponce/python | 9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2013-2016 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A BasicModel resource.
This module defines a BasicModel to hold the main information of the model
resource in BigML. It becomes the starting point for the Model class, that
is used for local predictions.
"""
import logging
LOGGER = logging.getLogger('BigML')
from bigml.util import invert_dictionary, DEFAULT_LOCALE
from bigml.fields import DEFAULT_MISSING_TOKENS
def check_model_structure(model):
"""Checks the model structure to see if it contains all the needed keys
"""
return (isinstance(model, dict) and 'resource' in model and
model['resource'] is not None and
('object' in model and 'model' in model['object'] or
'model' in model))
class ModelFields(object):
""" A lightweight wrapper of the field information in the model, cluster
or anomaly objects
"""
def __init__(self, fields, objective_id=None, data_locale=None,
missing_tokens=None):
if isinstance(fields, dict):
try:
self.objective_id = objective_id
self.uniquify_varnames(fields)
self.inverted_fields = invert_dictionary(fields)
self.fields = {}
self.fields.update(fields)
self.data_locale = data_locale
self.missing_tokens = missing_tokens
if self.data_locale is None:
self.data_locale = DEFAULT_LOCALE
if self.missing_tokens is None:
self.missing_tokens = DEFAULT_MISSING_TOKENS
except KeyError:
raise Exception("Wrong field structure.")
def uniquify_varnames(self, fields):
"""Tests if the fields names are unique. If they aren't, a
transformation is applied to ensure unicity.
"""
unique_names = set([fields[key]['name'] for key in fields])
if len(unique_names) < len(fields):
self.transform_repeated_names(fields)
def transform_repeated_names(self, fields):
"""If a field name is repeated, it will be transformed adding its
column number. If that combination is also a field name, the
field id will be added.
"""
# The objective field treated first to avoid changing it.
if self.objective_id:
unique_names = [fields[self.objective_id]['name']]
else:
unique_names = []
field_ids = sorted([field_id for field_id in fields
if field_id != self.objective_id])
for field_id in field_ids:
new_name = fields[field_id]['name']
if new_name in unique_names:
new_name = "{0}{1}".format(fields[field_id]['name'],
fields[field_id]['column_number'])
if new_name in unique_names:
new_name = "{0}_{1}".format(new_name, field_id)
fields[field_id]['name'] = new_name
unique_names.append(new_name)
def normalize(self, value):
"""Transforms to unicode and cleans missing tokens
"""
if isinstance(value, basestring) and not isinstance(value, unicode):
value = unicode(value, "utf-8")
return None if value in self.missing_tokens else value
def filter_input_data(self, input_data, by_name=True):
"""Filters the keys given in input_data checking against model fields
"""
if isinstance(input_data, dict):
# remove all missing values
for key, value in input_data.items():
value = self.normalize(value)
if value is None:
del input_data[key]
if by_name:
# We no longer check that the input data keys match some of
# the dataset fields. We only remove the keys that are not
# used as predictors in the model
input_data = dict(
[[self.inverted_fields[key], value]
for key, value in input_data.items()
if key in self.inverted_fields and
(self.objective_id is None or
self.inverted_fields[key] != self.objective_id)])
else:
input_data = dict(
[[key, value]
for key, value in input_data.items()
if key in self.fields and
(self.objective_id is None or
key != self.objective_id)])
return input_data
else:
LOGGER.error("Failed to read input data in the expected"
" {field:value} format.")
return {}
| 37.852113 | 77 | 0.596279 |
956d8de300f81dc8c0786d5a4d8868b85762da6f | 16,229 | py | Python | networks/infill/func_intersect_ub.py | LArbys/ublarcvserver | 02381c937f49a2eab2f754017ab431c3f6fa70d7 | [
"Apache-2.0"
] | 2 | 2020-07-09T19:34:03.000Z | 2021-06-21T23:09:23.000Z | networks/larflow/models/func_intersect_ub.py | LArbys/ublarcvserver | 02381c937f49a2eab2f754017ab431c3f6fa70d7 | [
"Apache-2.0"
] | null | null | null | networks/larflow/models/func_intersect_ub.py | LArbys/ublarcvserver | 02381c937f49a2eab2f754017ab431c3f6fa70d7 | [
"Apache-2.0"
] | null | null | null | import os,time
import torch
from larcv import larcv
import numpy as np
import ROOT as rt
from array import array
class IntersectUB( torch.autograd.Function ):
larcv_version = None
dataloaded = False
imgdimset = False
@classmethod
def load_intersection_data(cls,intersectiondatafile=None,larcv_version=None,nsource_wires=3456,ntarget_wires=2400):
if intersectiondatafile is None:
# set default
if os.environ["LARCV_VERSION"].strip()=="1":
intersectiondatafile = "../gen3dconsistdata/consistency3d_data_larcv1.root"
cls.larcv_version = 1
elif os.environ["LARCV_VERSION"].strip()=="2":
intersectiondatafile = "../gen3dconsistdata/consistency3d_data_larcv2.root"
cls.larcv_version = 2
else:
raise RuntimeError("Invalid LARCV_VERSION: {}".format(LARCV_VERSION))
else:
if larcv_version is None:
raise ValueError("When specifiying data, need to specify larcv version")
cls.larcv_version = larcv_version
if not os.path.exists(intersectiondatafile):
raise RuntimeError("could not find intersection data file: {}".format(intersectiondatafile))
cls.nsource_wires = nsource_wires
cls.ntarget_wires = ntarget_wires
# intersection location (y,z) for (source,target) intersections
cls.intersections_t = torch.zeros( (2, 2, cls.nsource_wires, cls.ntarget_wires ) ).float()
# fill intersection matrix (should make image2d instead of this loop fill
if os.environ["LARCV_VERSION"]=="1":
io = larcv.IOManager(larcv.IOManager.kREAD,"inersect3d",
larcv.IOManager.kTickBackward)
io.add_in_file(intersectiondata)
io.initialize()
ev_y2u = io.get_data(larcv.kProductImage2D,"y2u_intersect")
if ev_y2u.Image2DArray().size()!=2:
raise RuntimeError("Y2U intersection image2d vector should be len 2 (for detector y,z)")
cls.intersections_t[0,0,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2u.Image2DArray()[0] ).reshape(cls.ntarget_wires,cls.nsource_wires).transpose((1,0)) )
cls.intersections_t[0,1,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2u.Image2DArray()[1] ).reshape(cls.ntarget_wires,cls.nsource_wires).transpose((1,0)) )
ev_y2v = io.get_data(larcv.kProductImage2D,"y2v_intersect")
if ev_y2v.Image2DArray().size()!=2:
raise RuntimeError("Y2V intersection image2d vector should be len 2 (for detector y,z)")
cls.intersections_t[1,0,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2v.Image2DArray()[0] ).reshape(cls.ntarget_wires,cls.nsource_wires).transpose((1,0)) )
cls.intersections_t[1,1,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2v.Image2DArray()[1] ).reshape(cls.ntarget_wires,cls.nsource_wires).transpose((1,0)) )
elif os.environ["LARCV_VERSION"]=="2":
io = larcv.IOManager()
io.add_in_file(intersectiondatafile)
io.initialize()
ev_y2u = io.get_data("image2d","y2u_intersect")
ev_y2v = io.get_data("image2d","y2v_intersect")
cls.intersections_t[0,0,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2u.as_vector()[0] ).transpose((1,0)) )
cls.intersections_t[0,1,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2u.as_vector()[1] ).transpose((1,0)) )
cls.intersections_t[1,0,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2v.as_vector()[0] ).transpose((1,0)) )
cls.intersections_t[1,1,:,:] = torch.from_numpy( larcv.as_ndarray( ev_y2v.as_vector()[1] ).transpose((1,0)) )
cls.dataloaded = True
@classmethod
def set_img_dims(cls,nrows,ncols):
cls.nrows = nrows
cls.ncols = ncols
# index of source matrix: each column gets value same as index
src_index_np = np.tile( np.linspace( 0, float(ncols)-1, ncols ), nrows )
src_index_np = src_index_np.reshape( (nrows, ncols) ).transpose( (1,0) )
cls.src_index_t = torch.from_numpy( src_index_np ).float()
#print "src_index_np: ",self.src_index_np.shape#, self.src_index_np[3,:]
cls.imgdimset = True
@classmethod
def print_intersect_grad(cls):
print "Y2U: dy/du -------------- "
w = 500
for u in xrange(300,310):
print " (w=500,u={}) ".format(u),cls.intersections_t[0,0,500,u+1]-cls.intersections_t[0,0,500,u]
print "Y2U: dz/du -------------- "
for u in xrange(300,310):
print " (w=500,u={}) ".format(u),cls.intersections_t[0,1,500,u+1]-cls.intersections_t[0,1,500,u]
print "Y2V: dy/dv -------------- "
for v in xrange(300,310):
print " (w=500,v={}) ".format(v),cls.intersections_t[1,0,500,v+1]-cls.intersections_t[1,0,500,v]
print "Y2V: dz/dv -------------- "
for v in xrange(300,310):
print " (w=500,v={}) ".format(v),cls.intersections_t[1,1,500,v+1]-cls.intersections_t[1,1,500,v]
@staticmethod
def forward(ctx,pred_flowy2u, pred_flowy2v, source_originx, targetu_originx, targetv_originx ):
assert(IntersectUB.dataloaded and IntersectUB.imgdimset and IntersectUB.larcv_version is not None)
## our device
dev = pred_flowy2u.device
## switch tensors to device
IntersectUB.src_index_t = IntersectUB.src_index_t.to(device=dev)
IntersectUB.intersections_t = IntersectUB.intersections_t.to(device=dev)
#print pred_flowy2u.is_cuda
#print IntersectUB.src_index_t.is_cuda
#print IntersectUB.intersections_t.is_cuda
## img dims
ncols = IntersectUB.ncols
nrows = IntersectUB.nrows
ntarget_wires = IntersectUB.ntarget_wires
batchsize = pred_flowy2u.size()[0]
if type(source_originx) is float:
source_originx_t = torch.ones( (batchsize), dtype=torch.float ).to(device=dev)*source_originx
else:
source_originx_t = source_originx
if type(targetu_originx) is float:
targetu_originx_t = torch.ones( (batchsize), dtype=torch.float ).to(device=dev)*targetu_originx
else:
targetu_originx_t = targetu_originx
if type(targetv_originx) is float:
targetv_originx_t = torch.ones( (batchsize), dtype=torch.float ).to(device=dev)*targetv_originx
else:
targetv_originx_t = targetv_originx
#print "source origin: ",source_originx_t
#print "targetu origin: ",targetu_originx_t
#print "targetv origin: ",targetv_originx_t
## wire position calcs
source_fwire_t = torch.zeros( (batchsize,1,ncols,nrows), dtype=torch.float ).to( device=dev )
pred_target1_fwire_t = torch.zeros( (batchsize,1,ncols,nrows), dtype=torch.float ).to( device=dev )
pred_target2_fwire_t = torch.zeros( (batchsize,1,ncols,nrows), dtype=torch.float ).to( device=dev )
for b in xrange(batchsize):
## we need to get the source wire, add origin wire + relative position
source_fwire_t[b,:] = IntersectUB.src_index_t.add( source_originx_t[b] )
## calcualte the wires in the target planes
pred_target1_fwire_t[b,:] = (IntersectUB.src_index_t+pred_flowy2u[b,:]).add( targetu_originx_t[b] )
pred_target2_fwire_t[b,:] = (IntersectUB.src_index_t+pred_flowy2v[b,:]).add( targetv_originx_t[b] )
## clamp for those out of flow and round
pred_target1_fwire_t.clamp(0,ntarget_wires).round()
pred_target2_fwire_t.clamp(0,ntarget_wires).round()
#print "source fwire: ",source_fwire_t
#print "target1 fwire: ",pred_target1_fwire_t
#print "target2 fwire: ",pred_target2_fwire_t
## calculate the index for the lookup table
pred_target1_index_t = (source_fwire_t*ntarget_wires + pred_target1_fwire_t).long()
pred_target2_index_t = (source_fwire_t*ntarget_wires + pred_target2_fwire_t).long()
## get the (y,z) of the intersection we've flowed to
posyz_target1_t = torch.zeros( (batchsize,2,ncols,nrows) ).to( device=dev )
posyz_target2_t = torch.zeros( (batchsize,2,ncols,nrows) ).to( device=dev )
for b in xrange(batchsize):
posyz_target1_t[b,0,:,:] = torch.take( IntersectUB.intersections_t[0,0,:,:], pred_target1_index_t[b,0,:,:].reshape( ncols*nrows ) ).reshape( (ncols,nrows) ) # det-y
posyz_target1_t[b,1,:,:] = torch.take( IntersectUB.intersections_t[0,1,:,:], pred_target1_index_t[b,0,:,:].reshape( ncols*nrows ) ).reshape( (ncols,nrows) ) # det-y
posyz_target2_t[b,0,:,:] = torch.take( IntersectUB.intersections_t[1,0,:,:], pred_target2_index_t[b,0,:,:].reshape( ncols*nrows ) ).reshape( (ncols,nrows) ) # det-y
posyz_target2_t[b,1,:,:] = torch.take( IntersectUB.intersections_t[1,1,:,:], pred_target2_index_t[b,0,:,:].reshape( ncols*nrows ) ).reshape( (ncols,nrows) ) # det-y
#ctx.save_for_backward(posyz_target1_t,posyz_target2_t)
#print "posyz_target1: ",posyz_target1_t
#print "posyz_target2: ",posyz_target2_t
return (posyz_target1_t,posyz_target2_t)
@staticmethod
def backward(ctx,grad_output1,grad_output2):
#posyz_target1_t, posyz_target2_t, = ctx.saved_tensors
#diffy = posyz_target1_t[0,:] - posyz_target2_t[0,:] # ydiff
#diffz = posyz_target1_t[1,:] - posyz_target2_t[1,:] # zdiff
batchsize = grad_output1.size()[0]
grad_input_u = (-0.3464*grad_output1[:,0,:,:]).reshape( (batchsize,1,IntersectUB.ncols,IntersectUB.nrows) ) # only y-pos changes with respect to the intersection of Y-U wires
grad_input_v = ( 0.3464*grad_output2[:,0,:,:]).reshape( (batchsize,1,IntersectUB.ncols,IntersectUB.nrows) ) # only y-pos changes with respect to the intersection of Y-V wires
return grad_input_u,grad_input_v, None, None, None
if __name__=="__main__":
device = torch.device("cuda:0")
#device = torch.device("cpu")
IntersectUB.load_intersection_data()
IntersectUB.set_img_dims(512,832)
IntersectUB.print_intersect_grad()
# save a histogram
rout = rt.TFile("testout_func_intersect_ub.root","recreate")
ttest = rt.TTree("test","Consistency 3D Loss test data")
dloss = array('d',[0])
dtime = array('d',[0])
ttest.Branch("loss",dloss,"loss/D")
ttest.Branch("dtime",dtime,"dtime/D")
# as test, we process some pre-cropped small samples
io = larcv.IOManager()
io.add_in_file( "../testdata/smallsample/larcv_dlcosmictag_5482426_95_smallsample082918.root" ) # create a unit test file (csv)
io.initialize()
nentries = io.get_n_entries()
print "Number of Entries: ",nentries
start = time.time()
istart=0
iend=nentries
#istart=155
#iend=156
for ientry in xrange(istart,iend):
tentry = time.time()
io.read_entry( ientry )
if os.environ["LARCV_VERSION"]=="1":
ev_adc_test = io.get_data(larcv.kProductImage2D,"adc")
ev_flowy2u_test = io.get_data(larcv.kProductImage2D,"larflow_y2u")
ev_flowy2v_test = io.get_data(larcv.kProductImage2D,"larflow_y2v")
ev_trueflow_test = io.get_data(larcv.kProductImage2D,"pixflow")
ev_truevisi_test = io.get_data(larcv.kProductImage2D,"pixvisi")
flowy2u = ev_flowy2u_test.Image2DArray()[0]
flowy2v = ev_flowy2v_test.Image2DArray()[0]
truey2u = ev_trueflow_test.Image2DArray()[0]
truey2v = ev_trueflow_test.Image2DArray()[1]
visiy2u = ev_truevisi_test.Image2DArray()[0]
visiy2v = ev_truevisi_test.Image2DArray()[1]
source_meta = ev_adc_test.Image2DArray()[2].meta()
targetu_meta = ev_adc_test.Image2DArray()[0].meta()
targetv_meta = ev_adc_test.Image2DArray()[1].meta()
elif os.environ["LARCV_VERSION"]=="2":
ev_adc_test = io.get_data("image2d","adc")
ev_flowy2u_test = io.get_data("image2d","larflow_y2u")
ev_flowy2v_test = io.get_data("image2d","larflow_y2v")
ev_trueflow_test = io.get_data("image2d","pixflow")
ev_truevisi_test = io.get_data("image2d","pixvisi")
flowy2u = ev_flowy2u_test.as_vector()[0]
flowy2v = ev_flowy2v_test.as_vector()[0]
truey2u = ev_trueflow_test.as_vector()[0]
truey2v = ev_trueflow_test.as_vector()[1]
visiy2u = ev_truevisi_test.as_vector()[0]
visiy2v = ev_truevisi_test.as_vector()[1]
source_meta = ev_adc_test.as_vector()[2].meta()
targetu_meta = ev_adc_test.as_vector()[0].meta()
targetv_meta = ev_adc_test.as_vector()[1].meta()
# numpy arrays
index = (0,1)
if os.environ["LARCV_VERSION"]=="2":
index = (1,0)
np_flowy2u = larcv.as_ndarray(flowy2u).transpose(index).reshape((1,1,source_meta.cols(),source_meta.rows()))
np_flowy2v = larcv.as_ndarray(flowy2v).transpose(index).reshape((1,1,source_meta.cols(),source_meta.rows()))
np_visiy2u = larcv.as_ndarray(visiy2u).transpose(index).reshape((1,1,source_meta.cols(),source_meta.rows()))
np_visiy2v = larcv.as_ndarray(visiy2v).transpose(index).reshape((1,1,source_meta.cols(),source_meta.rows()))
np_trueflowy2u = larcv.as_ndarray(truey2u).transpose(index).reshape((1,1,source_meta.cols(),source_meta.rows()))
np_trueflowy2v = larcv.as_ndarray(truey2v).transpose(index).reshape((1,1,source_meta.cols(),source_meta.rows()))
#print "NAN indices (flow-Y2U): ",np.argwhere( np.isnan(np_flowy2u) )
#print "NAN indices (flow-Y2V): ",np.argwhere( np.isnan(np_flowy2v) )
#print "NAN indices (visi-Y2U): ",np.argwhere( np.isnan(np_visiy2u) )
#print "NAN indices (visi-Y2V): ",np.argwhere( np.isnan(np_visiy2v) )
# tensor conversion
predflow_y2u_t = torch.from_numpy( np_flowy2u ).to(device=device).requires_grad_()
predflow_y2v_t = torch.from_numpy( np_flowy2v ).to(device=device).requires_grad_()
trueflow_y2u_t = torch.from_numpy( np_trueflowy2u ).to(device=device).requires_grad_()
trueflow_y2v_t = torch.from_numpy( np_trueflowy2v ).to(device=device).requires_grad_()
truevisi_y2u_t = torch.from_numpy( np_visiy2u ).to(device=device)
truevisi_y2v_t = torch.from_numpy( np_visiy2v ).to(device=device)
#print "requires grad: ",predflow_y2u_t.requires_grad,predflow_y2v_t.requires_grad
#y2u_t = predflow_y2u_t
#y2v_t = predflow_y2v_t
y2u_t = trueflow_y2u_t
y2v_t = trueflow_y2v_t
source_origin = torch.zeros( (1) ).to(device=device)
targetu_origin = torch.zeros( (1) ).to(device=device)
targetv_origin = torch.zeros( (1) ).to(device=device)
for b in xrange(1):
source_origin[0] = source_meta.min_x()
targetu_origin[0] = targetu_meta.min_x()
targetv_origin[0] = targetv_meta.min_x()
posyz_fromy2u,posyz_fromy2v = IntersectUB.apply( y2u_t, y2v_t, source_origin, targetu_origin, targetv_origin )
mask = truevisi_y2u_t*truevisi_y2v_t
diff = (posyz_fromy2u-posyz_fromy2v)
#print "diff.shape=",diff.shape
#print "mask.shape=",mask.shape
diff[:,0,:,:] *= mask[:,0,:,:]
diff[:,1,:,:] *= mask[:,0,:,:]
l2 = diff[:,0,:,:]*diff[:,0,:,:] + diff[:,1,:,:]*diff[:,1,:,:]
#print "l2 shape: ",l2.shape
if mask.sum()>0:
lossval = l2.sum()/mask.sum()
else:
lossval = l2.sum()
# backward test
tback = time.time()
lossval.backward()
print " runbackward: ",time.time()-tback," secs"
print "Loss (iter {}): {}".format(ientry,lossval.item())," iscuda",lossval.is_cuda
dloss[0] = lossval.item()
dtime[0] = time.time()-tentry
ttest.Fill()
end = time.time()
tloss = end-start
print "Time: ",tloss," secs / ",tloss/nentries," secs per event"
rout.cd()
ttest.Write()
rout.Close()
| 48.735736 | 182 | 0.644217 |
956dfd3898d4db0373cc4caa6c858737d336c6e2 | 3,270 | py | Python | simple_api/object/permissions.py | ladal1/simple_api | 1b5d560476bccad9f68a7331d092dbdb68c48bf7 | [
"MIT"
] | 1 | 2021-02-24T22:14:59.000Z | 2021-02-24T22:14:59.000Z | simple_api/object/permissions.py | ladal1/simple_api | 1b5d560476bccad9f68a7331d092dbdb68c48bf7 | [
"MIT"
] | null | null | null | simple_api/object/permissions.py | ladal1/simple_api | 1b5d560476bccad9f68a7331d092dbdb68c48bf7 | [
"MIT"
] | null | null | null | from inspect import isclass
def build_permissions_fn(permissions):
def fn(**kwargs):
for perm in permissions:
if not perm().has_permission(**kwargs):
raise PermissionError(perm().error_message(**kwargs))
return fn
class BasePermission:
def __init__(self, **kwargs):
self.kwargs = kwargs
def permission_statement(self, **kwargs):
raise NotImplementedError
def has_permission(self, **kwargs):
# to achieve hierarchical checking for permissions (a subclass calls the permission statement of the superclass
# and only if it passes, executes its own), we need to traverse over the whole linearization order of the
# permission class; however, classes like object, which are naturally also a part of the chain, do not
# contain the `permission_statement` method and therefore should be just skipped; the same is true for abstract
# permission classes which contain the method, but is not implemented - like this one for example: to achieve
# this, we try calling the method and if it turns out to not be implemented, we skip it as well
for cls in reversed(self.__class__.__mro__):
if not hasattr(cls, "permission_statement"):
continue
try:
if not cls.permission_statement(self, **kwargs):
return False
except NotImplementedError:
continue
return True
def error_message(self, **kwargs):
return "You do not have permission to access this."
class LogicalConnector:
def __init__(self, *permissions):
self.permissions = permissions
def __call__(self, **kwargs):
for perm in self.permissions:
assert isclass(perm) or isinstance(perm, LogicalConnector), \
"Permissions in logical connectors must be classes."
return LogicalResolver(self.permissions, self.resolve_fn)
def resolve_fn(self, permissions, **kwargs):
raise NotImplementedError
class LogicalResolver:
def __init__(self, permissions, resolve_fn):
self.permissions = permissions
self.resolve_fn = resolve_fn
def has_permission(self, **kwargs):
return self.resolve_fn(self.permissions, **kwargs)
def error_message(self, **kwargs):
return "You do not have permission to access this."
class Or(LogicalConnector):
def resolve_fn(self, permissions, **kwargs):
for perm in permissions:
if perm().has_permission(**kwargs):
return True
return False
class And(LogicalConnector):
def resolve_fn(self, permissions, **kwargs):
for perm in permissions:
if not perm().has_permission(**kwargs):
return False
return True
class Not(LogicalConnector):
def resolve_fn(self, permissions, **kwargs):
assert len(permissions) == 1, "`Not` accepts only one permission class as parameter."
return not permissions[0]().has_permission(**kwargs)
class AllowAll(BasePermission):
def permission_statement(self, **kwargs):
return True
class AllowNone(BasePermission):
def permission_statement(self, **kwargs):
return False
| 34.0625 | 119 | 0.663609 |
95728ec4185daad9b5e30451603845ad35ca972b | 290 | py | Python | Desafios/MODULO 1/Desafio 12.py | deneyjunior/python-mundos-cev | 4bc82bf0630f65cf66e5442ae57b72fd4b0207fc | [
"MIT"
] | null | null | null | Desafios/MODULO 1/Desafio 12.py | deneyjunior/python-mundos-cev | 4bc82bf0630f65cf66e5442ae57b72fd4b0207fc | [
"MIT"
] | null | null | null | Desafios/MODULO 1/Desafio 12.py | deneyjunior/python-mundos-cev | 4bc82bf0630f65cf66e5442ae57b72fd4b0207fc | [
"MIT"
] | null | null | null | # Faça um algoritmo que leia o preço de um produto e mostre o novo preço com um desconto.
preco = float(input('Digite o preço atual do produto: R$ '))
desconto = float(input('Digite o valor do desconto (0.X): '))
novopreco = preco * desconto
print('O novo preço é R$ {}.'.format(novopreco)) | 58 | 89 | 0.713793 |
957446e0b2daddda7b2cb6fdb76915dc45c186cf | 1,188 | py | Python | test.py | Ericqle/Senior-Design-Project | aa8e2134b26aef151d3736d306a4fbc9fe69790e | [
"MIT"
] | null | null | null | test.py | Ericqle/Senior-Design-Project | aa8e2134b26aef151d3736d306a4fbc9fe69790e | [
"MIT"
] | null | null | null | test.py | Ericqle/Senior-Design-Project | aa8e2134b26aef151d3736d306a4fbc9fe69790e | [
"MIT"
] | null | null | null | from draw_control import DrawControl
if __name__ == '__main__':
zotter = DrawControl()
test = input("track, rail, pen, hor, ver, diag: ")
while(test):
if test == "track":
dir_in = input('dir step: ')
dir = dir_in.split(" ")
zotter.track.spin_fixed_step(int(dir[0]), int(dir[1]))
elif test == "rail":
dir_in = input('dir step: ')
dir = dir_in.split(" ")
zotter.rail.spin_fixed_step(int(dir[0]), int(dir[1]))
elif test == "pen":
angle = float(input("angle: "))
zotter.pen_holder.turn_angle(angle)
elif test == "hor":
steps = input('steps: ')
s = int(steps)
zotter.draw_hor_line(0, s)
elif test == "ver":
steps = input('steps: ')
s = int(steps)
zotter.draw_ver_line(0, s)
elif test == "diag":
dir_in = input('dir1 dir2 steps1 steps2: ')
dir = dir_in.split(" ")
zotter.draw_diagonal(int(dir[0]), int(dir[1]), int(dir[2]), int(dir[3]))
test = input("track, rail, pen, hor, ver, diag: ")
zotter.close_board() | 29.7 | 84 | 0.505051 |
95770242fba26f6f07dd35c5f3e789a8b70b5318 | 2,961 | py | Python | fcos/datasets/cityscapes.py | rosshemsley/fcos | de30bb2c78df54cae6814282c7166beed333d34c | [
"MIT"
] | 5 | 2020-08-02T11:03:25.000Z | 2021-12-12T19:37:09.000Z | fcos/datasets/cityscapes.py | rosshemsley/fcos | de30bb2c78df54cae6814282c7166beed333d34c | [
"MIT"
] | null | null | null | fcos/datasets/cityscapes.py | rosshemsley/fcos | de30bb2c78df54cae6814282c7166beed333d34c | [
"MIT"
] | 1 | 2021-03-05T12:19:48.000Z | 2021-03-05T12:19:48.000Z | import pathlib
import logging
from torch import nn
import numpy as np
import torch
import torch.functional as F
import torchvision.transforms as T
from torch.utils.data import Dataset
from torchvision.datasets.cityscapes import Cityscapes
import cv2
from torchvision.transforms import ToPILImage
from torch.utils.data import DataLoader
from torchvision.transforms import (
RandomResizedCrop,
RandomHorizontalFlip,
Normalize,
RandomErasing,
Resize,
ToTensor,
RandomAffine,
Compose,
ColorJitter,
)
logger = logging.getLogger(__name__)
from enum import Enum
class Split(Enum):
TEST = 1
TRAIN = 2
VALIDATE = 3
class CityscapesData(Dataset):
def __init__(self, split: Split, cityscapes_dir: pathlib.Path, image_transforms=None):
v = _get_split(split)
logger.info(f"Loading Cityscapes '{v}' dataset from '{cityscapes_dir}'")
t = image_transforms if image_transforms is not None else []
self.dataset = Cityscapes(
# TODO(Ross): make this an argument
cityscapes_dir,
split=v,
mode="fine",
target_type=["polygon"],
transform=Compose([*t, ToTensor()]),
)
def __len__(self) -> int:
# return min(len(self.dataset), 10)
return len(self.dataset)
def __getitem__(self, idx):
img, poly = self.dataset[idx]
class_labels, box_labels = _poly_to_labels(img, poly)
return img, class_labels, box_labels
def collate_fn(batch):
return (
torch.stack([b[0] for b in batch], dim=0),
[b[1] for b in batch],
[b[2] for b in batch],
)
def tensor_to_image(t) -> np.ndarray:
"""
Return a PIL image (RGB)
"""
img = Compose([ToPILImage(),])(t)
return np.array(img)
def _poly_to_labels(image_tensor, poly):
_, img_height, img_width = image_tensor.shape
# TODO(Ross): fix this.
h = poly["imgHeight"]
w = poly["imgWidth"]
scaling = img_height / h
box_labels = []
class_labels = []
for obj in poly["objects"]:
if obj["label"] == "car":
polygon = obj["polygon"]
min_x = min(x for x, _ in polygon) * scaling
max_x = max(x for x, _ in polygon) * scaling
max_y = max(y for _, y in polygon) * scaling
min_y = min(y for _, y in polygon) * scaling
box_labels.append(torch.FloatTensor([min_x, min_y, max_x, max_y]))
class_labels.append(torch.IntTensor([1]))
if len(class_labels) == 0:
return torch.zeros((0, 1)), torch.zeros(0, 4)
return torch.stack(class_labels), torch.stack(box_labels)
def _get_split(split_name: str) -> Split:
if split_name is Split.TEST:
return "test"
elif split_name is Split.VALIDATE:
return "val"
elif split_name is Split.TRAIN:
return "train"
else:
raise ValueError(f"unknown split kind {split_name}")
| 24.882353 | 90 | 0.628842 |
9577bc0bf7d0b84bf30768ffc4afd03a7523fd8b | 6,960 | py | Python | core/event/event_handler.py | ErlendHer/AlgoView | 946c2bb38e2ab3af011281c9672af4fcca84ae87 | [
"Apache-2.0"
] | 1 | 2020-11-26T09:37:28.000Z | 2020-11-26T09:37:28.000Z | core/event/event_handler.py | ErlendHer/AlgoView | 946c2bb38e2ab3af011281c9672af4fcca84ae87 | [
"Apache-2.0"
] | null | null | null | core/event/event_handler.py | ErlendHer/AlgoView | 946c2bb38e2ab3af011281c9672af4fcca84ae87 | [
"Apache-2.0"
] | 1 | 2020-12-09T17:18:12.000Z | 2020-12-09T17:18:12.000Z | class EventHandler:
def __init__(self, maze, maze_handler, maze_builder, bfs, a_star, indexes, text_table, screen):
"""
Initialize a new EventHandler instance.
:param maze: _maze list
:param maze_handler: MazeHandler instance
:param maze_builder: MazeBuilder instance
:param bfs: BFS instance
:param a_star: AStar instance
:param indexes: dictionary of algorithms and their respective text_table indexes
:param text_table: TextTable instance
:param screen pygame screen instance
"""
self._maze = maze
self._maze_handler = maze_handler
self._maze_builder = maze_builder
self._bfs = bfs
self._a_star = a_star
self.__indexes = indexes
self.__text_table = text_table
self.__screen = screen
self.__current_table_index = 0
self.__active = False
self._event_queue = lambda: None
self._generator = None
def is_active(self):
"""
Check weather there is an active event in the event queue.
:return: True if currently processing event, False otherwise.
"""
return self.__active
def __reset(self):
"""
Called after a event has terminated, reset the event handler, set active to false and empty the
event queue.
:return: None
"""
self.__active = False
self._generator = None
self._event_queue = lambda: None
self._maze_handler.unlock()
def next(self):
"""
Calls the next generator call from the current active event.
:return: None
"""
self._event_queue()
def __next_new_maze_event(self):
"""
This is the generator function for the new_maze_event. Update the next tile to color from the
maze generation.
:return: None
"""
# get the next tile to color, and number of increments
next_tile, increments = next(self._generator, (-1, 0))
if next_tile >= 0:
# increment the value of the text_table
self.__text_table.increment_value(self.__current_table_index, increments)
self.__text_table.draw_table_element(self.__screen, self.__current_table_index)
# update the maze
self._maze[next_tile][2] = 0
self._maze_handler.draw_box_by_idx(next_tile)
else:
# reset event handler
self._maze_handler.remove_grey_tiles()
self.__reset()
def __next_bfs_or_a_star_event(self):
"""
This is the generator function for the new_bfs_event or new_a_star_event. Update the next tile to color from
the bfs.
:return: None
"""
# get the next tile to color
next_tile = next(self._generator, [-1])
if next_tile[0] >= 0:
# 5 iterations per step to give similar speed to baseline random maze generation
for i in range(5):
# increment the value of the text_table
self.__text_table.increment_value(self.__current_table_index)
self.__text_table.draw_table_element(self.__screen, self.__current_table_index)
# update the maze
self._maze[next_tile[0]][2] = next_tile[1]
self._maze_handler.draw_box_by_idx(next_tile[0])
else:
# reset event handler
self._maze_handler.remove_grey_tiles()
self.__reset()
def __next_bi_bfs_event(self):
"""
This is the generator function for the new_bfs_event. Update the next tile to color from the
bfs.
:return: None
"""
# get the next tile to color
next_tile = next(self._generator, [-1])
if next_tile[0] >= 0:
# 5 iterations per step to give similar speed to baseline random _maze generation
for i in range(5):
# increment the value in the text_table
self.__text_table.increment_value(self.__current_table_index)
self.__text_table.draw_table_element(self.__screen, self.__current_table_index)
# update maze
self._maze[next_tile[0]][2] = next_tile[1]
self._maze_handler.draw_box_by_idx(next_tile[0])
else:
# reset event_handler
self._maze_handler.remove_grey_tiles()
self.__reset()
def new_maze_event(self):
"""
Create a new event for building a randomized maze.
:return: None
"""
if not self.__active:
self.__active = True
self.__current_table_index = self.__indexes['random_maze']
self.__text_table.reset_value(self.__current_table_index)
self._generator = self._maze_builder.generate_random_maze()
self._event_queue = self.__next_new_maze_event
self._maze_handler.reset_maze()
self._maze_handler.lock()
self._maze = self._maze_handler.maze
def new_bfs_event(self):
"""
Create a new event for finding the shortest path with bfs.
:return: None
"""
if not self.__active:
self.__active = True
self.__current_table_index = self.__indexes['bfs']
self.__text_table.reset_value(self.__current_table_index)
self._maze_handler.remove_all_colored_tiles()
self._maze = self._maze_handler.maze
self._generator = self._bfs.bfs_shortest_path(self._maze)
self._maze_handler.lock()
self._event_queue = self.__next_bfs_or_a_star_event
def new_bidirectional_bfs_event(self):
"""
Create a new event for finding the shortest path with bfs.
:return: None
"""
if not self.__active:
self.__active = True
self.__current_table_index = self.__indexes['bi_bfs']
self.__text_table.reset_value(self.__current_table_index)
self._maze_handler.remove_all_colored_tiles()
self._maze = self._maze_handler.maze
self._generator = self._bfs.bidirectional_bfs(self._maze)
self._maze_handler.lock()
self._event_queue = self.__next_bi_bfs_event
def new_a_star_event(self):
"""
Create a new event for finding the shortest path with bfs.
:return: None
"""
if not self.__active:
self.__active = True
self.__current_table_index = self.__indexes['a_star']
self.__text_table.reset_value(self.__current_table_index)
self._maze_handler.remove_all_colored_tiles()
self._maze = self._maze_handler.maze
self._generator = self._a_star.a_star(self._maze)
self._maze_handler.lock()
self._event_queue = self.__next_bfs_or_a_star_event
| 34.117647 | 116 | 0.616379 |
957893734aaf183904dce73b5e054520162c5d69 | 11,838 | py | Python | flask_jquery/app.py | lmj0328/SocialMediaReport | 555aa3551844b5ee67bcf9296d574fd99977982d | [
"MIT"
] | 1 | 2021-02-28T05:01:37.000Z | 2021-02-28T05:01:37.000Z | flask_jquery/app.py | lmj0328/SocialMediaReport | 555aa3551844b5ee67bcf9296d574fd99977982d | [
"MIT"
] | null | null | null | flask_jquery/app.py | lmj0328/SocialMediaReport | 555aa3551844b5ee67bcf9296d574fd99977982d | [
"MIT"
] | 1 | 2020-03-12T02:08:10.000Z | 2020-03-12T02:08:10.000Z | from flask import Flask, render_template, request, jsonify
from flask import request
from flask import Response
from flask import url_for
from flask import jsonify
import GetOldTweets3 as got
import pandas as pd
import datetime
import numpy as np
import requests
import json
from pyquery import PyQuery as pq
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("index.html")
@app.route('/report', methods=['POST'])
def report():
request_info = request.form.get("name")
data = {}
data["userInput"] = {}
data["userInput"]["name"] = request_info
data["numOfPost"] = {}
data["numOfPost"]["total"] = 0
data["numOfPost"]["instagram"] = 18
data["numOfPost"]["facebook"] = 0
### error handling
errorMessage = {
"noUserInput": "Oops, you did not enter any username ...",
"wrongTwitterInput": "Oops, the twitter account you enter either does not exist or has no content in it...",
"wrongInstagramInput": "Oops, the Instagram username you enter either does not exist or is set to private...",
"emptyInstagramContent": "Oops, your instagram account currently has no content..",
}
#TWITTER
if request.form.get("twitter-input"):
twitter_info = request.form.get("twitter-input")
data["userInput"]["twitterInput"] = twitter_info
username = twitter_info
tweetCriteria = got.manager.TweetCriteria().setUsername(username)\
.setSince("2019-01-01")\
.setUntil("2019-12-31")\
.setEmoji("unicode")
tweet_df = pd.DataFrame({'got_criteria':got.manager.TweetManager.getTweets(tweetCriteria)})
tweets_df = pd.DataFrame()
def get_twitter_info():
tweets_df["tweet_text"] = tweet_df["got_criteria"].apply(lambda x: x.text)
tweets_df["date"] = tweet_df["got_criteria"].apply(lambda x: x.date)
tweets_df["hashtags"] = tweet_df["got_criteria"].apply(lambda x: x.hashtags)
tweets_df["link"] = tweet_df["got_criteria"].apply(lambda x: x.permalink)
tweets_df["favorites"] = tweet_df["got_criteria"].apply(lambda x: x.favorites)
tweets_df["retweets"] = tweet_df["got_criteria"].apply(lambda x: x.retweets)
tweets_df["mentions"] = tweet_df["got_criteria"].apply(lambda x: x.mentions)
get_twitter_info()
tweets_df['mentions'] = tweets_df['mentions'].astype(str)
# Data Aggregation
# Number of posts
num_post = tweets_df.shape[0]
print(num_post)
if num_post == 0:
return render_template("error.html", data = errorMessage["wrongTwitterInput"])
else:
data["numOfPost"]["twitter"] = num_post
data["numOfPost"]["total"] = num_post
#Month with most post
tweets_df['month'] = pd.DatetimeIndex(tweets_df['date']).month
month_posts = tweets_df.groupby(['month']).size().reset_index(name='counts')
most_month_val = month_posts[month_posts.counts == month_posts.counts.max()]
most_month = most_month_val.month.values[0]
month_posts_count = most_month_val.counts.values[0]
most_month_verb = datetime.date(1900, most_month, 1).strftime('%B')
month_posts.index = month_posts.month
df2 = pd.DataFrame({'month':range(1, 13), 'counts':0})
df2.index = df2.month
df2.counts = month_posts.counts
df2= df2.fillna(0)
df2.drop('month',1).reset_index()
month_trend = df2.counts.tolist()
data["monthMostPost"] = {
"month": most_month_verb,
"total": month_posts_count,
"facebook": 0,
"twitter": month_posts_count,
"monthPost":month_trend
}
# Twitter Total Like
total_like = tweets_df.favorites.sum()
total_like = format(total_like, ',')
data["totalLikesTwitter"] = total_like
# Twitter most like posts
most_favorites_set = tweets_df[tweets_df.favorites == tweets_df.favorites.max()]
most_fav_text = most_favorites_set.tweet_text.values[0]
most_fav_date = most_favorites_set.date.values[0]
most_fav_date = pd.to_datetime(str(most_fav_date ))
most_fav_date = most_fav_date.strftime('%Y.%m.%d')
data["twitterPostWithMostLikes"] = {
"content":most_fav_text,
"date": most_fav_date,
"twitterAccount": "@" + twitter_info
}
# The latest post
tweets_df['hour'] = tweets_df.date.dt.hour
pos = tweets_df.hour.sub(3).abs().values.argmin()
df1 = tweets_df.iloc[[pos]]
latest_text = df1.tweet_text.values[0]
latest_date = df1.date.values[0]
latest_date = pd.to_datetime(str(latest_date))
latest_date = latest_date.strftime('%Y.%m.%d')
if df1.hour.values[0] < 5 or df1.hour.values[0] > 20:
latest_hour = (str(df1.hour.values[0]), ':00')
latest_hour = "".join(latest_hour)
data["twitterLatestPost"] = {
"latePost": bool(True),
"content":latest_text,
"date":latest_date,
"time":latest_hour,
"twitterAccount": "@" + twitter_info
}
else:
data["twitterLatestPost"] = {
"latePost": bool(False)
}
#Mention Most
tweets_df['mentions'].replace('', np.nan, inplace=True)
tweets_df.dropna(subset=['mentions'], inplace=True)
mention_set = tweets_df.groupby(['mentions']).size().reset_index(name='counts')
mention_set.sort_values(by=['counts'], inplace=True, ascending=False)
if mention_set.shape[0] > 3:
mention_set = mention_set.iloc[:3]
mention_name = mention_set.mentions.tolist()
mention_counts = mention_set.counts.tolist()
data["twitterPeopleMentionedMost"] = {
"names":mention_name
}
data["twitterPeopleMentioneTimes"] = {
"top_times":mention_counts
}
# Tweet first post
tweet_arrange = tweets_df.sort_values(by = ['date'])
first_tweet = tweet_arrange.iloc[[0]]
first_text = first_tweet.tweet_text.values[0]
first_date = first_tweet.date.values[0]
first_date = pd.to_datetime(str(first_date))
first_date = first_date.strftime('%Y.%m.%d')
data["twitterFirstPostYear"] = {
"content":first_text,
"date": first_date,
"twitterAccount": "@" + twitter_info
}
# Tweet last post
last_tweet = tweet_arrange.iloc[[-1]]
last_text = last_tweet.tweet_text.values[0]
last_date = last_tweet.date.values[0]
last_date = pd.to_datetime(str(last_date))
last_date = last_date.strftime('%Y.%m.%d')
data["twitterLastPostYear"] = {
"content":last_text,
"date": last_date,
"twitterAccount": "@" + twitter_info
}
# Hashtags
tweets_df['hashtags'].replace('', np.nan, inplace=True)
tweets_df.dropna(subset=['hashtags'], inplace=True)
hash_set = tweets_df.groupby(['hashtags']).size().reset_index(name='counts')
hash_set.sort_values(by=['counts'], inplace=True, ascending=False)
hash_name = hash_set.hashtags.tolist()
hash_counts = hash_set.counts.tolist()
if len(hash_name) == 0:
data["twitterHashtag"] = {
"hashtag": bool(False)
}
else:
hash_most = hash_name[0]
hash_most
data["twitterHashtag"] = {
"hashtag": bool(True),
"hashtags":hash_name,
"hashtagsCount": hash_counts,
"hashtagMost": hash_most
}
else:
twitter_info = bool(False)
data["userInput"]["twitterInput"] = bool(False)
## FACEBOOK
if request.form.get("facebook-input"):
facebook_info = request.form.get("facebook-input")
else:
facebook_info = bool(False)
data["userInput"]["facebookInput"] = bool(False)
## INSTAGRAM
if request.form.get("instagram-input"):
instagram_info = request.form.get("instagram-input")
ins_user = instagram_info
url = ("https://www.instagram.com/", ins_user, '/')
url = "".join(url)
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
# 'cookie': 'mid=W4VyZwALAAHeINz8GOIBiG_jFK5l; mcd=3; csrftoken=KFLY0ovWwChYoayK3OBZLvSuD1MUL04e; ds_user_id=8492674110; sessionid=IGSCee8a4ca969a6825088e207468e4cd6a8ca3941c48d10d4ac59713f257114e74b%3Acwt7nSRdUWOh00B4kIEo4ZVb4ddaZDgs%3A%7B%22_auth_user_id%22%3A8492674110%2C%22_auth_user_backend%22%3A%22accounts.backends.CaseInsensitiveModelBackend%22%2C%22_auth_user_hash%22%3A%22%22%2C%22_platform%22%3A4%2C%22_token_ver%22%3A2%2C%22_token%22%3A%228492674110%3Avsy7NZ3ZPcKWXfPz356F6eXuSUYAePW8%3Ae8135a385c423477f4cc8642107dec4ecf3211270bb63eec0a99da5b47d7a5b7%22%2C%22last_refreshed%22%3A1535472763.3352122307%7D; csrftoken=KFLY0ovWwChYoayK3OBZLvSuD1MUL04e; rur=FRC; urlgen="{\"103.102.7.202\": 57695}:1furLR:EZ6OcQaIegf5GSdIydkTdaml6QU"'
}
def get_urls(url):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
else:
print('error code:', response.status_code)
return True
except Exception as e:
print(e)
return None
html = get_urls(url)
if (html == True):
return render_template("error.html", data = errorMessage["wrongInstagramInput"])
else:
urls = []
doc = pq(html)
items = doc('script[type="text/javascript"]').items()
for item in items:
if item.text().strip().startswith('window._sharedData'):
js_data = json.loads(item.text()[21:-1], encoding='utf-8')
edges = js_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["edges"]
for edge in edges:
url = edge['node']['display_url']
urls.append(url)
if urls == []:
return render_template("error.html", data = errorMessage["emptyInstagramContent"])
else:
data["userInput"]["instagramInput"] = instagram_info
data["insPostMostComments"] = {
"pictureLink": ["picture here"],
"comments": ["comment1", "comment2", "comment3", "comment4"],
"totalComments": 12
}
data["insNinephotos"] = urls
else:
instagram_info = bool(False)
data["userInput"]["instagramInput"] = bool(False)
# if no user input for social media
if (not twitter_info) and (not instagram_info) and (not facebook_info):
return render_template("error.html", data = errorMessage["noUserInput"])
else:
#Incoming data is processed here and converted into following format:
data["year"] = 2019
data["facebookNumOfFriends"] = 423
data["facebookRecentAcademic"] = {
"schoolName":"Texas Academy of Mathematics and Technology",
"year":2019
}
return render_template("samplereport.html", data = data)
if __name__ == '__main__':
app.run(debug=True, threaded=True)
| 41.391608 | 755 | 0.598496 |
9579e725a92b212adbfbee1f939f56455d5e30da | 22 | py | Python | nextfeed/settings/__init__.py | Nurdok/nextfeed | 197818310bbf7134badc2ef5ed11ab5ede7fdb35 | [
"MIT"
] | 1 | 2015-08-09T10:42:04.000Z | 2015-08-09T10:42:04.000Z | nextfeed/settings/__init__.py | Nurdok/nextfeed | 197818310bbf7134badc2ef5ed11ab5ede7fdb35 | [
"MIT"
] | null | null | null | nextfeed/settings/__init__.py | Nurdok/nextfeed | 197818310bbf7134badc2ef5ed11ab5ede7fdb35 | [
"MIT"
] | null | null | null | __author__ = 'Rachum'
| 11 | 21 | 0.727273 |
957ac7e6d29caaecedbbbd4e6c92497096862e51 | 10,072 | py | Python | croisee/croisee/models.py | fiee/croisee | 922a163b627855468aac84e0c56ea51082424732 | [
"BSD-3-Clause"
] | 6 | 2017-09-06T02:03:36.000Z | 2021-07-11T15:06:29.000Z | croisee/croisee/models.py | fiee/croisee | 922a163b627855468aac84e0c56ea51082424732 | [
"BSD-3-Clause"
] | null | null | null | croisee/croisee/models.py | fiee/croisee | 922a163b627855468aac84e0c56ea51082424732 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import unicodedata
import re, os
import logging
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.auth.models import User
logger = logging.getLogger(settings.PROJECT_NAME)
REPLACEMENTS = (
# international characters that need more than just stripping accents
('Ä', 'AE'),
('Ö', 'OE'),
('Ü', 'UE'),
('ß', 'SS'),
('Œ', 'OE'),
('Æ', 'AE'),
('Ø', 'OE'),
)
reASCIIonly = re.compile(r'[^A-Z]', re.I)
reCleanInput = re.compile(r'[^\w_%\?\*]', re.I)
def cleanword(word, strict=True):
word = word.upper()
for k,v in REPLACEMENTS:
word = word.replace(k,v)
word = unicodedata.normalize('NFD', word).encode('ASCII', errors='ignore').decode('ASCII') # decompose international chars
if strict:
word = reASCIIonly.sub('', word)
else:
word = reCleanInput.sub('', word)
return word
def splitwordline(line):
"""
a line from a wordlist may contain word, description and priority, separated by tabs
if description and priority are missing, default is the word and 0
"""
parts = line.replace('\n','').split('\t')
if len(parts)==1:
parts.extend([parts[0],0])
elif len(parts)==2:
parts.append(0)
elif len(parts)>3:
parts = parts[0:2]
if len(parts[1])<2:
parts[1] = parts[0]
try:
parts[2] = int(parts[2])
except ValueError as ex:
parts[2] = 0
parts[0] = cleanword(parts[0])
return parts
class Dictionary(models.Model):
"""
A dictionary
"""
class Meta:
verbose_name = _('Dictionary')
verbose_name_plural = _('Dictionaries')
ordering = ['language','name']
unique_together = (('name','language'),)
name = models.CharField(_('Name'), max_length=31, help_text=_('A short descriptive name'))
public = models.BooleanField(_('public?'), default=True, help_text=_('May everyone use this dictionary?'))
language = models.CharField(_('Language'), max_length=15,
default=settings.LANGUAGE_CODE, choices=settings.LANGUAGES,
help_text=_('Language of (most of) the words in this dictionary'))
description = models.CharField(_('Description'), max_length=255, blank=True)
owner = models.ForeignKey(User, verbose_name=_('Owner'))
def __str__(self):
return "%s (%s)" % (self.name, self.language)
def get_absolute_url(self):
return '/dictionary/%d/' % self.id
class Word(models.Model):
"""
A word with a description, according to a dictionary
"""
class Meta:
verbose_name = _('Word')
verbose_name_plural = _('Words')
ordering = ['word','priority']
unique_together = (('word','dictionary'),)
word = models.CharField(_('Word'), max_length=63, help_text=_('a word fitting a crossword puzzle; will become uppercased; no numbers, hyphens etc.'))
dictionary = models.ForeignKey(Dictionary, verbose_name=_('Dictionary')) #, related_name="%(class)s_related")
description = models.CharField(_('Description'), max_length=127, help_text=_('Meaning of the word within the context of the selected dictionary'))
priority = models.SmallIntegerField(_('Priority'), default=0, help_text=_('0 is neutral, you can increase or decrease the priority'))
def __str__(self):
return "%s\t%s" % (self.word, self.description)
def save(self, *args, **kwargs):
self.word = cleanword(self.word)
super(Word, self).save(*args, **kwargs)
def get_absolute_url(self):
return '/dictionary/%d/%s/' % (self.dictionary.id, self.word)
class WordlistUpload(models.Model):
"""
Wordlist importer
"""
wordlist_file = models.FileField(_('wordlist file (.txt)'), upload_to=os.path.relpath(os.path.join(settings.MEDIA_ROOT, 'temp')),
help_text=_('Select a .txt file containing a single word per line to upload as a new dictionary.'))
dictionary = models.ForeignKey(Dictionary, null=True, blank=True, help_text=_('Select a dictionary to add these words to. leave this empty to create a new dictionary from the supplied name.'))
name = models.CharField(_('Name'), max_length=31, blank=True, help_text=_('A short descriptive name'))
uniqueonly = models.BooleanField(_('only unique'), default=True, help_text=_('Import only words that are not contained in any other dictionary?'))
public = models.BooleanField(_('public?'), default=True, help_text=_('May everyone use this dictionary?'))
language = models.CharField(_('Language'), max_length=15,
default=settings.LANGUAGE_CODE, choices=settings.LANGUAGES,
help_text=_('Language of (most of) the words in this dictionary'))
description = models.CharField(_('Description'), blank=True, max_length=255)
owner = models.ForeignKey(User, verbose_name=_('Owner'))
class Meta:
verbose_name = _('wordlist upload')
verbose_name_plural = _('wordlist uploads')
def __str__(self):
return "%s (%s)" % (self.name, self.wordlist_file)
def save(self, *args, **kwargs):
super(WordlistUpload, self).save(*args, **kwargs)
dictionary = self.process_wordlist()
super(WordlistUpload, self).delete()
return dictionary
def process_wordlist(self):
if not os.path.isfile(self.wordlist_file.path):
# TODO: throw exception?
return None
wordfile = open(self.wordlist_file.path, 'rU', encoding='utf-8')
lines = wordfile.readlines()
wordfile.close()
if self.dictionary:
D = self.dictionary
else:
if not self.name:
# TODO: throw exception?
return false
D = Dictionary.objects.create(
name = self.name,
public = self.public,
language = self.language,
description = self.description,
owner = self.owner,
)
D.save()
for line in lines:
(newword, newdesc, newprio) = splitwordline(line)
newdesc = newdesc[:127] # max. length
# TODO: exception if decoding fails
if len(newword) < 2: continue
try:
if self.uniqueonly:
W = Word.objects.filter(word=newword, dictionary__language=D.language)
W = W[0]
else:
W = Word.objects.get(word=newword, dictionary=D)
except (Word.DoesNotExist, IndexError):
W = Word.objects.create(word=newword, dictionary=D)
if newdesc: W.description = newdesc
if newprio: W.priority = newprio
W.save()
try:
os.remove(self.wordlist_file.path)
except Exception as ex:
logger.exception(ex)
return D
PUZZLE_TYPES = (
('d', _('default crossword puzzle with black squares')), # numbers and black squares in grid. only possible type ATM
('b', _('crossword puzzle with bars (no squares)')),
('s', _('Swedish crossword puzzle (questions in squares)')), # default in most magazines
# other...
)
class Puzzle(models.Model):
"""
"""
title = models.CharField(verbose_name=_('title'), max_length=255, blank=True, help_text=_('title or short description of this puzzle'))
code = models.SlugField(verbose_name=_('code'), max_length=63, editable=False, unique=True, help_text=_('auto-generated URL code of this puzzle'))
public = models.BooleanField(verbose_name=_('public'), default=True, help_text=_('Is this puzzle publicly viewable?'))
language = models.CharField(verbose_name=_('language'), max_length=7, default=settings.LANGUAGE_CODE, help_text=_('main language of this puzzle'), choices=settings.LANGUAGES)
owner = models.ForeignKey(User, verbose_name=_('owner'), help_text=_('owner of the puzzle'))
createdby = models.ForeignKey(User, verbose_name=_('created by'), related_name='+', editable=False, help_text=_('user that saved the puzzle for the first time (may be anonymous)'))
lastchangedby = models.ForeignKey(User, verbose_name=_('last changed by'), related_name='+', editable=False, help_text=_('user that saved the puzzle the latest time'))
createdon = models.DateTimeField(verbose_name=_('created on'), auto_now_add=True, help_text=_('timestamp of creation (first save)'))
lastchangedon = models.DateTimeField(verbose_name=_('last changed on'), auto_now=True, help_text=_('timestamp of last change'))
type = models.CharField(verbose_name=_('type'), max_length=1, default='d', editable=False, help_text=_('type of this puzzle'), choices=PUZZLE_TYPES)
width = models.PositiveSmallIntegerField(verbose_name=_('width'), default=settings.CROISEE_GRIDDEF_X, help_text=_('width of the puzzle (number of characters)'))
height = models.PositiveSmallIntegerField(verbose_name=_('height'), default=settings.CROISEE_GRIDDEF_Y, help_text=_('height of the puzzle (number of characters)'))
text = models.TextField(verbose_name=_('text'), blank=True, help_text=_('characters of the puzzle (solution)'))
numbers = models.TextField(verbose_name=_('numbers'), blank=True, help_text=_('list of coordinates of word start numbers')) # x,y,num\n
questions = models.TextField(verbose_name=_('questions'), blank=True, help_text=_('list of questions')) # 1::h::Description\n
class Meta:
verbose_name = _('crossword puzzle')
verbose_name_plural = _('crossword puzzles')
def __str__(self):
return "%s (%s)" % (self.code, self.title)
def get_absolute_url(self):
return '/puzzle/%s/' % self.code
| 43.601732 | 196 | 0.642276 |
957b9b53b7b5837fb4e6e2e80f7b80d9f1347ef1 | 5,372 | py | Python | tests/views/userprofile/forms_test.py | BMeu/Aerarium | 119946cead727ef68b5ecea339990d982c006391 | [
"MIT"
] | null | null | null | tests/views/userprofile/forms_test.py | BMeu/Aerarium | 119946cead727ef68b5ecea339990d982c006391 | [
"MIT"
] | 139 | 2018-12-26T07:54:31.000Z | 2021-06-01T23:14:45.000Z | tests/views/userprofile/forms_test.py | BMeu/Aerarium | 119946cead727ef68b5ecea339990d982c006391 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from unittest import TestCase
from flask_login import login_user
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import ValidationError
from app import create_app
from app import db
from app.configuration import TestConfiguration
from app.localization import get_language_names
from app.userprofile import User
from app.views.userprofile.forms import UniqueEmail
from app.views.userprofile.forms import UserSettingsForm
class UniqueEmailTest(TestCase):
def setUp(self):
"""
Initialize the test cases.
"""
self.app = create_app(TestConfiguration)
self.app_context = self.app.app_context()
self.app_context.push()
self.request_context = self.app.test_request_context()
self.request_context.push()
db.create_all()
def tearDown(self):
"""
Reset the test cases.
"""
db.session.remove()
db.drop_all()
self.request_context.pop()
self.app_context.pop()
def test_init_default_message(self):
"""
Test initializing the UniqueEmail validator with the default error message.
Expected result: The default error message is used.
"""
validator = UniqueEmail()
self.assertEqual('The email address already is in use.', validator.message)
def test_init_custom_message(self):
"""
Test initializing the UniqueEmail validator with a custom error message.
Expected result: The custom error message is used.
"""
message = 'Another user already claims this email address.'
validator = UniqueEmail(message=message)
self.assertEqual(message, validator.message)
def test_call_no_data(self):
"""
Test the validator on an empty field.
Expected result: No error is raised.
"""
class UniqueEmailForm(FlaskForm):
email = StringField('Email')
form = UniqueEmailForm()
validator = UniqueEmail()
# noinspection PyNoneFunctionAssignment
validation = validator(form, form.email)
self.assertIsNone(validation)
def test_call_unused_email(self):
"""
Test the validator on a field with an unused email address.
Expected result: No error is raised.
"""
class UniqueEmailForm(FlaskForm):
email = StringField('Email')
form = UniqueEmailForm()
form.email.data = 'test@example.com'
validator = UniqueEmail()
# noinspection PyNoneFunctionAssignment
validation = validator(form, form.email)
self.assertIsNone(validation)
def test_call_email_of_current_user(self):
"""
Test the validator on a field with the current user's email address.
Expected result: No error is raised.
"""
class UniqueEmailForm(FlaskForm):
email = StringField('Email')
# Create a test user.
name = 'John Doe'
email = 'test@example.com'
user = User(email, name)
db.session.add(user)
db.session.commit()
# Log in the test user.
login_user(user)
form = UniqueEmailForm()
form.email.data = email
validator = UniqueEmail()
# noinspection PyNoneFunctionAssignment
validation = validator(form, form.email)
self.assertIsNone(validation)
def test_call_email_of_different_user(self):
"""
Test the validator on a field with a different user's email address.
Expected result: An error is raised.
"""
class UniqueEmailForm(FlaskForm):
email = StringField('Email')
# Create a test user.
name = 'John Doe'
email = 'test@example.com'
user = User(email, name)
db.session.add(user)
db.session.commit()
message = 'Another user already claims this email address.'
form = UniqueEmailForm()
form.email.data = email
validator = UniqueEmail()
with self.assertRaises(ValidationError) as thrown_message:
# noinspection PyNoneFunctionAssignment
validation = validator(form, form.email)
self.assertIsNone(validation)
self.assertEqual(message, thrown_message)
class UserSettingsFormTest(TestCase):
def setUp(self):
"""
Initialize the test cases.
"""
self.app = create_app(TestConfiguration)
self.app_context = self.app.app_context()
self.app_context.push()
self.request_context = self.app.test_request_context()
self.request_context.push()
db.create_all()
def tearDown(self):
"""
Reset the test cases.
"""
db.session.remove()
db.drop_all()
self.request_context.pop()
self.app_context.pop()
def test_init(self):
"""
Test that the form is correctly initialized.
Expected result: The language field is initialized with the available languages.
"""
languages = get_language_names(TestConfiguration.TRANSLATION_DIR)
form = UserSettingsForm()
self.assertListEqual(list(languages), form.language.choices)
| 27.690722 | 92 | 0.625838 |
957d235d1750094b4270c5454f14c28d2e8173f1 | 769 | py | Python | Post/migrations/0002_auto_20201110_0901.py | singh-sushil/minorproject | 02fe8c1dce41109447d5f394bb37e10cb34d9316 | [
"MIT"
] | 2 | 2020-12-27T11:28:02.000Z | 2021-01-04T07:52:38.000Z | Post/migrations/0002_auto_20201110_0901.py | singh-sushil/minorproject | 02fe8c1dce41109447d5f394bb37e10cb34d9316 | [
"MIT"
] | 1 | 2020-12-26T13:36:12.000Z | 2020-12-26T13:36:12.000Z | Post/migrations/0002_auto_20201110_0901.py | singh-sushil/minorproject | 02fe8c1dce41109447d5f394bb37e10cb34d9316 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-11-10 03:16
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Post', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='post',
name='phone_number',
field=models.CharField(blank=True, max_length=13, validators=[django.core.validators.RegexValidator(message="Phone number must be 10 digits and entered in the format: '98XXXXXXXX'.", regex='^\\+?1?\\d{10}$')]),
),
]
| 30.76 | 224 | 0.594278 |
957e510be8f3a2b81dab14d254545719454d7bb3 | 2,714 | py | Python | About.py | pm-str/CountDown-More | 90eed19b3d5e417d474f1d79e07c6740f5a9a53d | [
"MIT"
] | null | null | null | About.py | pm-str/CountDown-More | 90eed19b3d5e417d474f1d79e07c6740f5a9a53d | [
"MIT"
] | null | null | null | About.py | pm-str/CountDown-More | 90eed19b3d5e417d474f1d79e07c6740f5a9a53d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'About.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 241)
self.verticalLayoutWidget = QtWidgets.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 20, 381, 71))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_2.setStyleSheet("<a href=\\\"https://github.com/pm-str/CountDown-More\\\">CountDown & More</font> </a>")
self.label_2.setOpenExternalLinks(True)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_3.setOpenExternalLinks(True)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(10, 130, 361, 51))
self.label_4.setObjectName("label_4")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(160, 200, 89, 25))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Dialog)
self.pushButton.clicked.connect(Dialog.close)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Author: Mikhail Pavlov"))
self.label_2.setText(_translate("Dialog", "GitHub: <a href=\"https://github.com/pm-str/CountDown-and-More\">CountDown & More</a>"))
self.label_3.setText(_translate("Dialog", "Contacts: <a href=\"mailto:pavl.mikhail@gmail.com\">pavl.mikhail@gmail.com</a>"))
self.label_4.setText(_translate("Dialog", "Thanks for using this program! \n"
"You\'re free to ask me any question in any time. \n"
"Recommendations and suggestions are welcome."))
self.pushButton.setText(_translate("Dialog", "OK"))
| 49.345455 | 140 | 0.70339 |
957eae3da3f74babe3abba60f328832ad8f0ef04 | 948 | py | Python | userprofile/migrations/0001_initial.py | jmickela/stalkexchange | 2182fcdfb716dbe3c227c83ac52c567331cc9e73 | [
"Apache-2.0"
] | null | null | null | userprofile/migrations/0001_initial.py | jmickela/stalkexchange | 2182fcdfb716dbe3c227c83ac52c567331cc9e73 | [
"Apache-2.0"
] | 10 | 2020-06-05T17:05:48.000Z | 2022-03-11T23:13:08.000Z | userprofile/migrations/0001_initial.py | jmickela/stalkexchange | 2182fcdfb716dbe3c227c83ac52c567331cc9e73 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('about', models.TextField(verbose_name='About')),
('zip', models.CharField(help_text='Your zip code is used to keep search results local', max_length=5, verbose_name='Zip Code', blank=True)),
('photo', models.ImageField(upload_to=b'', verbose_name='Photo', blank=True)),
('user', models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.461538 | 157 | 0.635021 |
957f17448a40b5f7a9697897e18e53b84546771d | 1,244 | py | Python | DML.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | DML.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | DML.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | import pymysql
conexao = pymysql.connect(
host='localhost',
user='root',
password='admin1234',
db='ex_01')
cursor = conexao.cursor(pymysql.cursors.DictCursor)
def select(fields, tables, where=None):
global cursor
query = 'SELECT ' + fields + ' FROM ' + tables
if where:
query += ' WHERE ' + where
cursor.execute(query)
return cursor.fetchall()
def insert(values, table, fields=None):
global cursor, conexao
query = 'INSERT INTO ' + table
if fields:
query += ' ('+fields+')'
query += ' VALUES ' + '('+values+')'
try:
cursor.execute(query)
conexao.commit()
return True
except ValueError as Error:
return Error
def update(table, field, values, where):
global cursor, conexao
query = 'UPDATE ' + table + ' SET ' + field + " = '" + values + "'" ' WHERE ' + where
cursor.execute(query)
conexao.commit()
def delete(table, where):
global cursor, conexao
query = 'DELETE FROM ' + table + ' WHERE ' + where
cursor.execute('SET FOREIGN_KEY_CHECKS = 0;')
cursor.execute(query)
conexao.commit()
return conexao.commit
| 18.848485 | 90 | 0.568328 |
9581ac185297ca50496beb710a3edddd006be6af | 6,792 | py | Python | misc_scripts/downsample.py | rajesh-ibm-power/MITObim | 5d617054975a0e30e0f6c6fb88d21862eaae238f | [
"MIT"
] | 81 | 2015-01-21T21:48:20.000Z | 2022-03-22T12:43:50.000Z | misc_scripts/downsample.py | rajesh-ibm-power/MITObim | 5d617054975a0e30e0f6c6fb88d21862eaae238f | [
"MIT"
] | 47 | 2015-02-16T22:53:00.000Z | 2021-12-16T20:38:17.000Z | misc_scripts/downsample.py | rajesh-ibm-power/MITObim | 5d617054975a0e30e0f6c6fb88d21862eaae238f | [
"MIT"
] | 37 | 2015-01-29T07:34:32.000Z | 2022-03-17T07:20:00.000Z | #!/usr/bin/python
"""downsample
Author: Christoph Hahn (christoph.hahn@uni-graz.at)
February 2017
Extract a random subsample of ~ x % reads from fastq data.
The choice is based on a random number generator. For each fastq read, a random number between 1-100 will be generated. If the random number is smaller than the desired proportion in percent, the read will be kept, otherwise it will be discarded. So to extract ~15 % of the reads any read that gets a random number of <=15 will be kept, which will result in roughly 15% of the reads.
Subsamples can be taken from several fastq files at the same time. We allow to input paired end data in two separate files. If so specified subsamples will be taken so that the pairs will remain intact and the ouptut will be given in interleaved format.
Input fastq files can be compressed with gzipped. Mixed compressed / non-compressed input is possible except in the case of paired end data. In this case both read files need to be either compressed or non-compressed.
Examples:
# sample ~20 % of reads from three files
downsample.py -s 20 -r test.fastq.gz -r test2.fastq -r test3.fastq.gz > test.subsample_20.fastq
# sample ~30 % of reads from two files, and interleave reads from the two files on the fly
downsample.py -s 30 --interleave -r test_R1.fastq.gz -r test_R2.fastq.gz > test.interleaved.subsample_30.fastq
# sample ~40 % of reads from three files, defining a seed for the random number generator, to allow replication of the process.
downsample.py -s 20 --rand -421039 -r test.fastq.gz -r test2.fastq -r test3.fastq.gz > test.subsample_40.fastq
# sample ~20 % of reads from two files, compressing results on the fly.
downsample.py -s 20 -r test.fastq.gz -r test2.fastq | gzip > test.subsample_20.fastq.gz
"""
import sys
# import re
# import random
def parse_arguments():
import sys
import argparse
VERSION="0.1"
DESCRIPTION='''
downsample.py - version: v.%s
''' %VERSION
parser = argparse.ArgumentParser(description=DESCRIPTION, prog='downsample.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=''' examples:
# sample ~20 % of reads from three files
downsample.py -s 20 -r test.fastq.gz -r test2.fastq -r test3.fastq.gz
# sample ~30 % of reads from two files, and interleave reads in output
downsample.py -s 30 --interleave -r test_R1.fastq.gz -r test_R2.fastq.gz
# sample ~40 % of reads from three files, defining a seed for the random number generator, to allow replication of the process.
downsample.py -s 20 --rand -421039 -r test.fastq.gz -r test2.fastq -r test3.fastq.gz > test.subsample_40.fastq
# sample ~20 % of reads from two files, compressing results on the fly.
downsample.py -s 20 -r test.fastq.gz -r test2.fastq | gzip > test.subsample_20.fastq.gz
#sample ~5% of reads from a single file that contains interleaved read data
downsample.py -s 5 --interleave -r test.interleaved.fastq.gz | gzip > test.interleaved.subsample_5.fastq.gz
''')
parser.add_argument("-r", "--reads", help="Readfile (flag can be used repeatadly to process several files", metavar="<FILE>", action="append")
parser.add_argument("-s", "--sample", help="Desired size of subsample in percent (1-100; default = 50)", type=int, metavar="<INT>", action="store", default=50)
parser.add_argument("--interleave", help="Optional. In case of two input files, data will be interleaved from these in the output. Otherwise data will be treated as already interleaved.", action="store_true")
parser.add_argument("--seed", help="Optional. Seed for random number generator", metavar="<INT>", type=int, action="store")
parser.add_argument("--version", action="version", version=VERSION)
if not parser.parse_args().reads or len(sys.argv) == 1:
print
parser.print_usage()
print
sys.exit(1)
return parser.parse_args()
def check_args(args):
if args.sample < 1 or args.sample > 100:
sys.exit("\n only sample size 1-100 is valid\n")
def set_seed(seed):
import random
if not seed:
seed = random.randint(-100000000,100000000)
sys.stderr.write("seed for random number generator is: %i\n" %seed)
random.seed(seed)
def decide(string, percent):
import random
if (random.randint(1,100) <= percent):
print string,
def sample_interleave(file1, file2, percent):
import re
while True:
out = ''
line = file1.readline()
if line.strip() == "":
break
out+=re.sub(r" 1:n.*", "/1",line)
for i in xrange(3):
out+=re.sub(r" 2:n.*","/2",file1.readline())
for i in xrange(4):
out+=re.sub(r" 2:n.*","/2",file2.readline())
decide(out, percent)
def sample(fi, percent, step):
import re
while True:
out = ''
line = fi.readline()
if line.strip() == "":
break
out+=line
for i in xrange(step-1):
out+=fi.readline()
decide(out, percent)
def main():
import sys
args = parse_arguments()
check_args(args)
sys.stderr.write("\ndownsampling to %i percent\n" %args.sample)
set_seed(args.seed)
if args.interleave and len(args.reads) == 2:
sys.stderr.write("interleaving sample from input files %s and %s\n" %(args.reads[0], args.reads[1]))
if args.reads[0][-2:] == "gz":
import gzip
with gzip.open(args.reads[0]) as f1:
with gzip.open(args.reads[1]) as f2:
sample_interleave(f1, f2, args.sample)
else:
with open(args.reads[0]) as f1:
with open(args.reads[1]) as f2:
sample_interleave(f1, f2, args.sample)
f1.close()
f2.close()
else: #that is all other cases
if args.interleave:
sys.stderr.write("you indicated interleaved input file(s) -> stepsize = 8 lines\n")
step = 8
else:
sys.stderr.write("you indicated single end data -> stepsize = 4 lines\n")
step = 4
for readsfile in args.reads:
if readsfile[-2:] == "gz":
import gzip
f = gzip.open(readsfile)
else:
f = open(readsfile)
sample(f, args.sample, step)
f.close()
sys.stderr.write("Done!\n\n")
if __name__ == '__main__':
sys.exit(main())
| 36.12766 | 383 | 0.6197 |
9581c71bce4ce0b38517044c9d5a2c496d783a78 | 585 | py | Python | find_nb.py | DemetriusStorm/100daysofcode | ce87a596b565c5740ae3c48adac91cba779b3833 | [
"MIT"
] | null | null | null | find_nb.py | DemetriusStorm/100daysofcode | ce87a596b565c5740ae3c48adac91cba779b3833 | [
"MIT"
] | null | null | null | find_nb.py | DemetriusStorm/100daysofcode | ce87a596b565c5740ae3c48adac91cba779b3833 | [
"MIT"
] | null | null | null | """
Your task is to construct a building which will be a pile of n cubes.
The cube at the bottom will have a volume of n^3, the cube above will have volume of (n-1)^3 and so on until the top
which will have a volume of 1^3.
You are given the total volume m of the building. Being given m can you find the number n of cubes you will have to
build?
The parameter of the function findNb (find_nb, find-nb, findNb) will be an integer m and you have to return the integer
n such as n^3 + (n-1)^3 + ... + 1^3 = m if such a n exists or -1 if there is no such n.
"""
def find_nb(n):
pass | 45 | 119 | 0.711111 |
9582a4c6372ffccedd8c93f53707273fd3fe596d | 4,597 | py | Python | src/__main__.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | null | null | null | src/__main__.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | 16 | 2021-04-29T14:22:46.000Z | 2021-05-21T04:02:02.000Z | src/__main__.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | 2 | 2021-04-09T16:39:45.000Z | 2021-05-02T19:39:32.000Z | """Class to handle iterating through tweets in real time."""
import json
import os
import pandas as pd
# Said this was unused.
# from bluebird import BlueBird
from bluebird.scraper import BlueBird
from sentiment import PoliticalClassification
from train import TrainingML
col_names32 = "created_at,id,id_str,full_text,truncated,display_text_range,entities,source,in_reply_to_status_id,in_reply_to_status_id_str,in_reply_to_user_id,in_reply_to_user_id_str,in_reply_to_screen_name,user_id,user_id_str,geo,coordinates,place,contributors,is_quote_status,retweet_count,favorite_count,conversation_id,conversation_id_str,favorited,retweeted,possibly_sensitive,possibly_sensitive_editable,lang,supplemental_language,,self_thread"
# api = TwitterClient()
# trained_model = TrainingML()
# sentiment = PoliticalClassification()
user_results = "../data/results.csv"
def search_term():
"""Using a user-specified keyword to find related tweets."""
index = 0
searching = input("Enter a term to search. \n")
query = {
'fields': [
{'items': [searching]},
]
}
for tweet in BlueBird().search(query):
index += 1
with open('../data/temp.json', 'w') as temp:
json.dump(tweet, temp)
df = pd.read_json('../data/temp.json', lines=True)
with open(user_results, 'a') as f:
df.to_csv(f, header=None, index=False)
if index == 50:
dummy_file = user_results + '.bak'
with open(user_results, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
write_obj.write(col_names32 + '\n')
for line in read_obj:
write_obj.write(line)
os.remove(user_results)
os.rename(dummy_file, user_results)
break
def search_hashtag():
""""Using a user-specified hashtag to find related tweets."""
index = 0
searching = input("Enter a hashtag to search. \n")
query = {
'fields': [
{'items': [searching], 'target':'hashtag'},
]
}
for tweet in BlueBird().search(query):
index += 1
with open('data/temp.json', 'w') as temp:
json.dump(tweet, temp)
df = pd.read_json('data/temp.json', lines=True)
with open(user_results, 'a') as f:
df.to_csv(f, header=None, index=False)
if index == 50:
dummy_file = user_results + '.bak'
with open(user_results, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
write_obj.write(col_names32 + '\n')
for line in read_obj:
write_obj.write(line)
os.remove(user_results)
os.rename(dummy_file, user_results)
break
def search_user():
"""Using a user-specified username to find related tweets."""
index = 0
searching = input("Enter a user to search. \n")
query = {
'fields': [
{'items': [searching], 'target':'from'},
]
}
for tweet in BlueBird().search(query):
index += 1
with open('data/temp.json', 'w') as temp:
json.dump(tweet, temp)
df = pd.read_json('data/temp.json', lines=True)
with open(user_results, 'a') as f:
df.to_csv(f, header=None, index=False)
if index == 50:
dummy_file = user_results + '.bak'
with open(user_results, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
write_obj.write(col_names32 + '\n')
for line in read_obj:
write_obj.write(line)
os.remove(user_results)
os.rename(dummy_file, user_results)
break
def main():
"""Main method to give selection options."""
try:
os.remove('../results.csv')
os.remove('../temp.csv')
except:
print()
print("Welcome to the Fake News Dection Program! \n")
print("Would you like to search by:\nkeyword\nhashtag\nuser")
done = False
while done == False:
choice = input("keyword/hashtag/user: ")
if choice == "keyword":
search_term()
done = True
elif choice == "hashtag":
search_hashtag()
done = True
elif choice == "user":
search_user()
done = True
else:
print("Sorry, Bad Input. Please Enter One of the Options Below")
done = False
try:
os.remove('data/temp.json')
except:
print()
if __name__ == '__main__':
# calls main function
main()
| 32.146853 | 450 | 0.591255 |
9583bb525f9a10680502ac52b441c849a250aefe | 2,107 | py | Python | cdk-cross-stack-references/app.py | MauriceBrg/aws-blog.de-projects | ce0e86ccdd845c68c41d9190239926756e09c998 | [
"MIT"
] | 36 | 2019-10-01T12:19:49.000Z | 2021-09-11T00:55:43.000Z | cdk-cross-stack-references/app.py | MauriceBrg/aws-blog.de-projects | ce0e86ccdd845c68c41d9190239926756e09c998 | [
"MIT"
] | 2 | 2021-06-02T00:19:43.000Z | 2021-06-02T00:51:48.000Z | cdk-cross-stack-references/app.py | MauriceBrg/aws-blog.de-projects | ce0e86ccdd845c68c41d9190239926756e09c998 | [
"MIT"
] | 29 | 2019-07-23T04:05:15.000Z | 2021-08-12T14:36:57.000Z | #!/usr/bin/env python3
import aws_cdk.aws_iam as iam
import aws_cdk.aws_s3 as s3
from aws_cdk import core
class ExportingStack(core.Stack):
exported_role_a: iam.Role
exported_role_b: iam.Role
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.exported_role_a = iam.Role(
self,
"exporting-role-a",
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")
)
self.exported_role_b = iam.Role(
self,
"exporting-role-b",
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")
)
# compat_output = core.CfnOutput(
# self,
# id="will-be-overwritten",
# # TODO: Update the value according to your environment
# value=f"arn:aws:iam::{core.Aws.ACCOUNT_ID}:role/export-exportingroleb66286D65-CZGEAEVHHA32",
# export_name="export:ExportsOutputFnGetAttexportingroleb66286D65ArnE09A9A52"
# )
# compat_output.override_logical_id("ExportsOutputFnGetAttexportingroleb66286D65ArnE09A9A52")
class ImportingStack(core.Stack):
def __init__(
self,
scope: core.Construct,
id: str,
role_a: iam.Role,
role_b: iam.Role,
**kwargs
) -> None:
super().__init__(scope, id, **kwargs)
test_bucket = s3.Bucket(
self,
"some-bucket",
removal_policy=core.RemovalPolicy.DESTROY
)
test_bucket.add_to_resource_policy(
iam.PolicyStatement(
actions=["s3:GetObject"],
principals=[
role_a,
# role_b
],
resources=[
test_bucket.arn_for_objects("*"),
test_bucket.bucket_arn
]
)
)
app = core.App()
export = ExportingStack(app, "export")
ImportingStack(
app,
"import",
role_a=export.exported_role_a,
role_b=export.exported_role_b
)
app.synth()
| 25.385542 | 106 | 0.570954 |
9583cbd4d2fe5cf7c96a5c027ce0ed71ff87cf28 | 6,344 | py | Python | test_trustpaylib.py | beezz/trustpaylib | a56d12d6ff97ad02034d85940ec09abbfe9eba76 | [
"BSD-3-Clause"
] | null | null | null | test_trustpaylib.py | beezz/trustpaylib | a56d12d6ff97ad02034d85940ec09abbfe9eba76 | [
"BSD-3-Clause"
] | null | null | null | test_trustpaylib.py | beezz/trustpaylib | a56d12d6ff97ad02034d85940ec09abbfe9eba76 | [
"BSD-3-Clause"
] | 1 | 2016-05-27T07:12:47.000Z | 2016-05-27T07:12:47.000Z | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
import pytest
import trustpaylib
try:
unicode
py3 = False
except NameError:
py3 = True
unicode = lambda s: s
class TestTrustPayCore:
secret_key = "abcd1234"
aid = "9876543210"
pay_request = trustpaylib.build_pay_request(
AID=aid,
AMT="123.45",
CUR="EUR",
REF="1234567890",
)
def test_validate_request(self):
pr = trustpaylib.build_pay_request()
with pytest.raises(ValueError):
trustpaylib.TrustPay.validate_request(pr)
trustpaylib.TrustPay.validate_request(self.pay_request)
pr = trustpaylib.build_pay_request(SIG="SIG")
with pytest.raises(ValueError):
trustpaylib.TrustPay.validate_request(pr)
env = trustpaylib.build_environment(
aid=self.aid,
secret_key=self.secret_key,
)
tp_client = trustpaylib.TrustPay(env)
pr = tp_client.sign_request(self.pay_request)
trustpaylib.TrustPay.validate_request(pr)
pr = trustpaylib.build_pay_request(
AID=self.aid,
AMT="123.45",
REF="1234567890",
CUR="GRG",
)
with pytest.raises(ValueError):
trustpaylib.TrustPay.validate_request(pr)
pr = trustpaylib.build_pay_request(
AID=self.aid,
AMT="123.45",
REF="1234567890",
CUR="EUR",
LNG="prd",
)
with pytest.raises(ValueError):
trustpaylib.TrustPay.validate_request(pr)
pr = trustpaylib.build_pay_request(
AID=self.aid,
AMT="123.45",
REF="1234567890",
CUR="EUR",
CNT="tra",
)
with pytest.raises(ValueError):
trustpaylib.TrustPay.validate_request(pr)
pr = trustpaylib.build_pay_request(
AID=self.aid,
AMT="123.4566",
REF="1234567890",
CUR="EUR",
CNT="tra",
)
with pytest.raises(ValueError):
trustpaylib.TrustPay.validate_request(pr)
def test_cls_creation(self):
with pytest.raises(ValueError):
trustpaylib.build_environment(lol='olo')
assert trustpaylib.build_environment(aid=self.aid)
pr = trustpaylib.build_pay_request(
AMT=123.45,
NURL=None,
)
if not py3:
assert isinstance(pr.AMT, unicode)
assert pr.NURL is None
assert pr.RURL is None
def test_sign_msg(self):
sign = (
"DF174E635DABBFF7897A82822521DD7"
"39AE8CC2F83D65F6448DD2FF991481EA3"
)
msg = "".join((
self.aid,
self.pay_request.AMT,
self.pay_request.CUR,
self.pay_request.REF,
))
sign_message = trustpaylib.sign_message
assert sign_message(self.secret_key, msg) == sign
env = trustpaylib.build_environment(
aid=self.aid,
secret_key=self.secret_key,
)
tp_client = trustpaylib.TrustPay(env)
assert sign_message(
self.secret_key,
tp_client.create_signature_msg(self.pay_request),
) == sign
assert tp_client.pay_request_signature(self.pay_request) == sign
def test_environment(self):
env = trustpaylib.build_environment(
aid=self.aid,
secret_key=self.secret_key,
)
assert env.redirect_url is None
assert env.aid and env.secret_key
assert env.api_url == trustpaylib.API_URL
env = trustpaylib.build_environment(
aid=self.aid,
secret_key=self.secret_key,
api_url="grg prd"
)
assert env.api_url == "grg prd"
env = trustpaylib.build_test_environment(
aid=self.aid,
secret_key=self.secret_key,
)
assert env.api_url == trustpaylib.TEST_API_URL
env = trustpaylib.build_test_environment(
aid=self.aid,
secret_key=self.secret_key,
api_url="grg prd"
)
assert env.api_url == "grg prd"
def test_filter_nones(self):
assert not trustpaylib._filter_dict_nones({'none': None})
filtered = trustpaylib._filter_dict_nones({
"none": None,
"value": "Value",
})
assert "none" not in filtered
assert "value" in filtered
def test_build_link(self):
env = trustpaylib.build_environment(
aid=self.aid,
secret_key=self.secret_key,
)
tp_client = trustpaylib.TrustPay(env)
assert tp_client.build_link(self.pay_request)
assert trustpaylib.build_link_for_request(
env.api_url, self.pay_request)
client_link = tp_client.build_link(
self.pay_request,
sign=False,
)
link = trustpaylib.build_link_for_request(
env.api_url, self.pay_request)
assert client_link == link
def test_result_codes(self):
redirect = trustpaylib.build_redirect(
RES=1001,
REF="12345",
PID="1234",
)
assert trustpaylib.TrustPay.get_result_desc_from_redirect(redirect)
notification = trustpaylib.build_notification(
RES=1001,
REF="12345",
)
assert len(trustpaylib.TrustPay.get_result_desc_from_notification(
notification)) == 2
def test_check_notif_signature(self):
notification = trustpaylib.build_notification(
AID=unicode("1234567890"),
TYP=unicode("CRDT"),
AMT=unicode("123.45"),
CUR=unicode("EUR"),
REF=unicode("9876543210"),
RES=unicode("0"),
TID=unicode("11111"),
OID=unicode("1122334455"),
TSS=unicode("Y"),
SIG=unicode(
"97C92D7A0C0AD99CE5DE55C3597D5ADA"
"0D423991E2D01938BC0F684244814A37"
)
)
env = trustpaylib.build_environment(
aid=unicode("1234567890"),
secret_key=self.secret_key,
)
tp_client = trustpaylib.TrustPay(env)
assert tp_client.check_notification_signature(notification)
| 29.784038 | 75 | 0.576135 |
9584860203f1962d57d77ed27e2fa1c1d418bbe7 | 606 | py | Python | Day 01/AdventOfCode01.py | KelvinFurtado/Advent-of-Code-2020 | 7aab4d542507222ef6aaef699d16cc1e2936e1d5 | [
"MIT"
] | null | null | null | Day 01/AdventOfCode01.py | KelvinFurtado/Advent-of-Code-2020 | 7aab4d542507222ef6aaef699d16cc1e2936e1d5 | [
"MIT"
] | null | null | null | Day 01/AdventOfCode01.py | KelvinFurtado/Advent-of-Code-2020 | 7aab4d542507222ef6aaef699d16cc1e2936e1d5 | [
"MIT"
] | null | null | null | inputfile = open('inputDay01.txt', 'r')
values = [int(i) for i in inputfile.readlines()]
#PART1
def aoc01(numbers, value):
for x in numbers:
if value - x in numbers:
return x * (value - x)
#PART2
def aoc02(numbers, value):
num1, num2 = None, None
for x in numbers:
n = value - x
for y in numbers:
if n-y in numbers:
num1 = y
num2 = n-y
if x + num1 + num2 == value:
return x * num1 * num2
print("Part1:",aoc01(values,2020))
print("Part2:",aoc02(values,2020))
inputfile.close() | 25.25 | 48 | 0.531353 |
9584cb7682c9b757f9f395cf4af9a536e43da394 | 1,686 | py | Python | src/backend/api/views/auth_views.py | zackramjan/motuz | 892252eb50acbd8135bf9df9872df5e4cfe6277b | [
"MIT"
] | 84 | 2019-05-10T14:56:48.000Z | 2022-03-19T17:07:24.000Z | src/backend/api/views/auth_views.py | zackramjan/motuz | 892252eb50acbd8135bf9df9872df5e4cfe6277b | [
"MIT"
] | 226 | 2019-05-28T21:59:22.000Z | 2022-03-09T10:58:24.000Z | src/backend/api/views/auth_views.py | zackramjan/motuz | 892252eb50acbd8135bf9df9872df5e4cfe6277b | [
"MIT"
] | 16 | 2019-09-27T01:35:49.000Z | 2022-03-08T16:18:50.000Z | import logging
from flask import request
from flask_restplus import Resource, Namespace, fields
from ..managers import auth_manager
from ..managers.auth_manager import token_required
from ..exceptions import HTTP_EXCEPTION
api = Namespace('auth', description='Authentication related operations')
dto = api.model('auth', {
'username': fields.String(required=True, description='The (Linux) username'),
'password': fields.String(required=True, description='The user password'),
})
@api.route('/login/')
class UserLogin(Resource):
@api.expect(dto, validate=True)
def post(self):
"""Login and retrieve JWT token"""
try:
return auth_manager.login_user(request.json), 200
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
@api.route('/refresh/')
class TokenRefresh(Resource):
def post(self):
"""Use JWT refresh token to retrienve a new JWT access token"""
try:
return auth_manager.refresh_token(), 200
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
@api.route('/logout/')
class LogoutAPI(Resource):
def post(self):
"""Logout and invalidate JWT token"""
try:
return auth_manager.logout_user(), 200
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
| 29.068966 | 81 | 0.641163 |
95850f5ad82092788d3a213273d93bc24cd594e7 | 4,079 | py | Python | src/api/algorithm/abstract.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | src/api/algorithm/abstract.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | src/api/algorithm/abstract.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod, abstractproperty
from typing import Dict
__all__ = ["AbstractAlgorithm"]
class AbstractAlgorithm(ABC):
@abstractmethod
def preprocess(self, text: str) -> Dict:
"""Применяется к каждому фрагменту текста.
:param text: Текст
:type text: str
:rtype: Dict
:return: Словарь вида {
Параметр: Значение
}
"""
pass
@abstractmethod
def compare(self, res1: Dict, res2: Dict) -> Dict:
"""Сравнивает два словаря, возвращаемые preprocess.
Применяется к каждой уникальной паре фрагментов текста
:type res1: Dict
:type res2: Dict
:rtype: Dict
:return: Словарь вида {
intersection: Численная характеристика связи от 0 до 1
data: Прочие характеристики связи
}
"""
pass
@abstractproperty
def name(self) -> str:
pass
@abstractproperty
def preprocess_keys(self) -> list:
"""Набор ключей словаря, которые возвращает метод preprocess.
Предполагается, что результаты методов preprocess для разных алгоритмов
могут пересекаться. При этом одинаковые результаты записываются под
одинаковыми ключами.
Если для фрагмента уже имеются все необходимые результаты, то нет
смысла выполнять метод preprocess.
:rtype: list
"""
pass
def analyze(self, res: Dict, acc=None):
"""Получить общие результаты. Этот метод должен применится к каждому
фрагменту. После чего полученный объект-аккумулятор передается в
AbstractAlgorithm.describe_result для получения общих результатов
работы
В отличие от остальных методов, имеет стандартную реализацию, но
имеет смысл его переопределить/расширить в наследниках,
чтобы получить более конкретные результаты.
:param res: результат AbstractAlgorithm.preprocess
:type res: Dict
:param acc: аккумулятор. Хранит данные об обработке всех предыдущих
:return: Аккумулятор
"""
if acc is None:
acc = {
'fragments': 0,
'edges': 0,
'sum_intersect': 0
}
acc['fragments'] += 1
return acc
def analyze_comparison(self, res1: Dict, res2: Dict,
comp_res: Dict, acc):
"""Проанализировать результаты сравнения фрагментов.
Этот метод должен применится к каждой связи.
:param res1: Результат AbstractAlgorithm.preprocess
:type res1: Dict
:param res2: Результат AbstractAlgorithm.preprocess
:type res2: Dict
:param comp_res: Результат AbstractAlgorithm.compare(res1, res2)
:type comp_res: Dict
:param acc: тот же аккумулятор, что и в AbstractAlgorithm.analyze
"""
acc['edges'] += 1
acc['sum_intersect'] += comp_res['intersection']
return acc
def describe_result(self, acc) -> str:
"""Описывает общие результаты работы алгоритма в формате HTML-строки
:param acc: Результат применение AbstractAlgorithm.analyze ко всем
фрагментам
:rtype: str
"""
if acc['edges']:
avg_inter = f"{acc['sum_intersect'] / acc['edges'] * 100:.2f}%"
else:
avg_inter = "0%"
return f"""
Проанализировано фрагментов: {acc['fragments']} <br>
Найдено связей: {acc['edges']} <br>
Среднее пересечение:
{avg_inter}
"""
@abstractmethod
def describe_comparison(self, comp_dict) -> str:
"""Описывает результаты сравнения фрагментов
:param comp_dict: Словарь из AbstractAlgorithm.compare
:rtype: str
"""
pass
@abstractmethod
def describe_preprocess(self, prep_dict) -> str:
"""Описывает результаты предобработки фрагмента в виде HTML-строки
:param prep_dict: Словарь из AbstractAlgorithm.preprocess
:rtype: str
"""
pass
| 31.620155 | 79 | 0.61314 |
95851ced698edaf85c4890ce3e5ba9ddb348e00d | 304 | py | Python | buidl/libsec_build.py | jamesob/buidl-python | 84ef0284c2bff8bb09cb804c6a02f99e78e59dbe | [
"MIT"
] | 45 | 2020-10-23T13:03:41.000Z | 2022-03-27T17:32:43.000Z | buidl/libsec_build.py | jamesob/buidl-python | 84ef0284c2bff8bb09cb804c6a02f99e78e59dbe | [
"MIT"
] | 87 | 2020-10-23T19:59:36.000Z | 2022-03-03T18:05:58.000Z | buidl/libsec_build.py | jamesob/buidl-python | 84ef0284c2bff8bb09cb804c6a02f99e78e59dbe | [
"MIT"
] | 8 | 2020-11-26T14:29:32.000Z | 2022-03-01T23:00:44.000Z | #!/usr/bin/python3
from cffi import FFI
source = open("libsec.h", "r").read()
header = """
#include <secp256k1.h>
#include <secp256k1_extrakeys.h>
#include <secp256k1_schnorrsig.h>
"""
ffi = FFI()
ffi.cdef(source)
ffi.set_source("_libsec", header, libraries=["secp256k1"])
ffi.compile(verbose=True)
| 16.888889 | 58 | 0.703947 |
9587650c0783fa597913cbb4c287026be8eb0512 | 938 | py | Python | src/crud-redmine/client/kafka_client.py | LeoNog96/IntegradorRedmine | bb5477caa9088665b3d18e26530609ba831517d9 | [
"MIT"
] | null | null | null | src/crud-redmine/client/kafka_client.py | LeoNog96/IntegradorRedmine | bb5477caa9088665b3d18e26530609ba831517d9 | [
"MIT"
] | null | null | null | src/crud-redmine/client/kafka_client.py | LeoNog96/IntegradorRedmine | bb5477caa9088665b3d18e26530609ba831517d9 | [
"MIT"
] | null | null | null | from kafka import KafkaConsumer, KafkaProducer
import json
class Producer:
producer = None
producer_topic = None
def __init__(self, server, topic):
self.producer = KafkaProducer (
bootstrap_servers=[server],
value_serializer=lambda x: json.dumps(x).encode('utf-8')
)
self.producer_topic = topic
def send_message(self, msg):
producer.send(producer_topic,value=msg)
producer.flush()
class Consumer:
__consumer = None
consumer_topic = None
def __init__(self, server, topic):
self.__consumer = KafkaConsumer(
'kafka-python-topic',
bootstrap_servers=[server],
auto_offset_reset='latest',
enable_auto_commit=True,
value_deserializer=lambda x: json.loads(x.decode('utf-8'))
)
def get_consumer(self):
return self.__consumer | 24.684211 | 88 | 0.60661 |
958794f84d8fee2575a58b5c2e83f3a77dc04ee4 | 2,038 | py | Python | remove_empty_csv's.py | asadrazaa1/emails-extraction | bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde | [
"Unlicense"
] | null | null | null | remove_empty_csv's.py | asadrazaa1/emails-extraction | bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde | [
"Unlicense"
] | null | null | null | remove_empty_csv's.py | asadrazaa1/emails-extraction | bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde | [
"Unlicense"
] | null | null | null | import psycopg2
import sys
from nltk.tokenize import sent_tokenize
import re
import csv
import os
# pmid {16300001 - 16400000}
try:
# starting_pmid = 16300001
# intermediate_pmid = 16400000
starting_pmid = 100001
intermediate_pmid = 200000
ending_pmid = 32078260
while 1:
if intermediate_pmid<ending_pmid:
#open existing csv files
with open('pmid {%s - %s}.csv' % (starting_pmid, intermediate_pmid), mode='r') as csv_file:
reader = csv.reader(csv_file)
if len(list(reader))==1:
#removing the file if there is only header in the file and there is no data
os.remove('pmid {%s - %s}.csv' % (starting_pmid, intermediate_pmid))
print ("File " + str(starting_pmid) + " - " + str(intermediate_pmid) + " has been removed.")
else:
print ("File " + str(starting_pmid) + " - " + str(intermediate_pmid) + " is not empty.")
starting_pmid = intermediate_pmid + 1
intermediate_pmid = intermediate_pmid + 100000
else:
print("Entering base case ...")
with open('pmid {%s - %s}.csv' % (starting_pmid, ending_pmid), mode='r') as csv_file:
reader = csv.reader(csv_file)
if len(list(reader))==1:
os.remove('pmid {%s - %s}.csv' % (starting_pmid, ending_pmid))
print ("File " + str(starting_pmid) + " - " + str(ending_pmid) + " has been removed.")
else:
print ("File " + str(starting_pmid) + " - " + str(ending_pmid) + " is not empty.")
break
#94357012, total rows
#51556076, null affiliation
#42800936, not null affiliation
#21, minimum pmid
#32078260, maximum pmid
# print(len(temp_row))
sys.exit('Script completed')
except (Exception, psycopg2.Error) as error:
sys.exit('Script failed')
| 33.966667 | 112 | 0.552993 |
9587f1170fd14bbc3fde52488cf4748e36a462f2 | 439 | py | Python | src/core.py | unior-nlp-research-group/Ghigliottina | d78cf54cb7412301dd35ef3f3d6419a0350fe3af | [
"Apache-2.0"
] | 2 | 2021-01-21T11:20:57.000Z | 2021-01-21T17:51:07.000Z | src/core.py | unior-nlp-research-group/Ghigliottina | d78cf54cb7412301dd35ef3f3d6419a0350fe3af | [
"Apache-2.0"
] | null | null | null | src/core.py | unior-nlp-research-group/Ghigliottina | d78cf54cb7412301dd35ef3f3d6419a0350fe3af | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import utility
###################
## main
###################
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", help="the path to the model file")
args = parser.parse_args()
print('Loading association matrix')
matrix = utility.loadObjFromPklFile(args.model)
interactive_solver(matrix)
| 23.105263 | 79 | 0.603645 |
958823c46f3203892c3a9a7227ee987c3b6cf53a | 3,412 | py | Python | volcengine_ml_platform/datasets/image_dataset.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | 11 | 2021-09-08T09:20:54.000Z | 2022-02-18T06:45:47.000Z | volcengine_ml_platform/datasets/image_dataset.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | 1 | 2021-09-24T03:21:07.000Z | 2021-09-24T06:32:26.000Z | volcengine_ml_platform/datasets/image_dataset.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | 4 | 2021-09-23T07:54:06.000Z | 2021-11-27T09:40:55.000Z | import json
from collections.abc import Callable
from typing import Optional
import numpy as np
from PIL import Image
from volcengine_ml_platform.datasets.dataset import _Dataset
from volcengine_ml_platform.io.tos_dataset import TorchTOSDataset
class ImageDataset(_Dataset):
"""
ImageDataset创建函数 ``ImageDataset`` ,需要提供于 ml_engine 交互的基本信息,方便传输下载内容
Args:
dataset_id(str): ml_engine 创建时提供的 dataset_id
annotation_id(str, None): ml_engine 创建时提供的 注释集 annotation_id
local_path(str): 数据下载到本地的目录
tos_source(str, None): 数据集的manifest文件的 tos url,一般可不设置
"""
def download(self, local_path: str = "ImageDataset", limit=-1):
"""把数据集从 TOS 下载到本地
Args:
local_path(str): 设置下载目录
limit (int, optional): 设置最大下载数据条目
"""
"""download datasets from source
Args:
limit (int, optional): download size. Defaults to -1 (no limit).
"""
if local_path:
self.local_path = local_path
self._create_manifest_dataset(
manifest_keyword="ImageURL",
)
def split(self, training_dir: str, testing_dir: str, ratio=0.8, random_state=0):
return super().split_dataset(
ImageDataset, training_dir, testing_dir, ratio, random_state
)
def load_as_np(self, offset=0, limit=-1):
"""load images as numpy array
Args:
offset (int, optional): num of images to skip. Defaults to 0.
limit (int, optional): num of images to load. Defaults to -1.
Returns:
np array of images
list of annotations
"""
images = []
annotations = []
with open(self._manifest_path(), encoding="utf-8") as f:
for i, line in enumerate(f):
manifest_line = json.loads(line)
if i < offset:
continue
if limit != -1 and i >= offset + limit:
break
file_path = manifest_line["Data"]["FilePath"]
image = Image.open(file_path)
images.append(np.asarray(image))
annotations.append(manifest_line["Annotation"])
return np.array(images), annotations
def parse_image_manifest(self, manifest_file_path):
# parse manifest
manifest_info = {"buckets": [], "keys": [], "annotations": []}
with open(manifest_file_path, encoding="utf-8") as f:
for _, line in enumerate(f):
manifest_line = json.loads(line)
url = manifest_line["Data"]["ImageURL"]
bucket = url.split("//")[1].split("/")[0]
key = url.split(f"{bucket}/")[1]
manifest_info["buckets"].append(bucket)
manifest_info["keys"].append(key)
manifest_info["annotations"].append(
manifest_line["Annotation"],
)
return manifest_info
def init_torch_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
):
manifest_info = self.get_manifest_info(self.parse_image_manifest)
torch_dataset = TorchTOSDataset(
manifest_info=manifest_info,
transform=transform,
target_transform=target_transform,
)
return torch_dataset
| 32.188679 | 84 | 0.586166 |
95887e566eb9b0860bede603c8c4d3bf2e059af1 | 5,634 | py | Python | main.py | TrueMLGPro/MultiDownloader | 8ef6cdccbe253fe79cf3cec9ed83fd40c3f834bc | [
"Apache-2.0"
] | 3 | 2021-02-05T09:33:39.000Z | 2021-07-25T18:39:43.000Z | main.py | TrueMLGPro/MultiDownloader | 8ef6cdccbe253fe79cf3cec9ed83fd40c3f834bc | [
"Apache-2.0"
] | null | null | null | main.py | TrueMLGPro/MultiDownloader | 8ef6cdccbe253fe79cf3cec9ed83fd40c3f834bc | [
"Apache-2.0"
] | 1 | 2022-02-28T21:41:12.000Z | 2022-02-28T21:41:12.000Z | # Copyright 2020 TrueMLGPro
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pyfiglet
import subprocess
import sys
parser = argparse.ArgumentParser(add_help=False)
group_download = parser.add_argument_group('Download Tools')
group_download.add_argument('URL', metavar='url', help='a url to download', nargs='?')
group_download.add_argument('-c', '--curl', dest='curl', action='store_true', help='Uses curl for download')
group_download.add_argument('-w', '--wget', dest='wget', action='store_true', help='Uses wget for download')
group_download.add_argument('-H', '--httrack', dest='httrack', action='store_true', help='Uses httrack for mirroring')
group_download_args = parser.add_argument_group('Download Arguments')
group_download_args.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Makes output more detailed')
group_download_args.add_argument('-d', '--depth', dest='depth', help='Defines depth of mirror (httrack only)')
group_download_args.add_argument('-eD', '--ext-depth', dest='ext_depth', help='Defines depth of mirror for external links (httrack only)')
group_download_args.add_argument('-cN', '--conn-num', dest='conn_num', help='Defines a number of active connections during mirroring (httrack only)')
group_files = parser.add_argument_group('Files')
group_files.add_argument('-f', '--filename', dest='filename', help='Sets filename (or path) for file which is being downloaded')
group_misc = parser.add_argument_group('Misc')
group_misc.add_argument('-u', '--update', dest='update', action='store_true', help='Updates MultiDownloader')
group_misc.add_argument('-h', '--help', action='help', help='Shows this help message and exits')
args = parser.parse_args()
def banner():
banner_figlet = pyfiglet.figlet_format("MultiDownloader", font="small")
print(banner_figlet + "Made by TrueMLGPro | v1.0")
def menu():
print("\n" + "1. Download using curl" + "\n"
+ "2. Download using wget" + "\n"
+ "3. Mirror website using httrack" + "\n"
+ "4. Update Multidownloader" + "\n"
+ "5. Exit" + "\n"
+ "6. Get args")
def main():
if (len(sys.argv) <= 1):
banner()
menu()
while True:
choice = input("[>>] ")
if (choice == "1"):
print("[i] Using curl to download...")
curl_download(input("[+] Enter URL: "),
input("[+] Enter filename: "),
input("[+] Verbose? (y/n): "))
menu()
elif (choice == "2"):
print("[i] Using wget to download...")
wget_download(input("[+] Enter URL: "),
input("[+] Enter filename: "),
input("[+] Verbose? (y/n): "))
menu()
elif (choice == "3"):
print("[i] Using httrack to mirror...")
httrack_download(input("[+] Enter URL: "),
input("[+] Enter project path for mirror: "),
input("[+] Enter depth level: "),
input("[+] Enter external links depth level: "),
input("[+] Enter number of connections: "),
input("[+] Verbose? (y/n): "))
elif (choice == "4"):
print("[i] Getting latest updates for MultiDownloader..." + "\n")
subprocess.call('sh scripts/update.sh', shell=True)
menu()
elif (choice == "5"):
print("[!] Exiting...")
sys.exit()
elif (choice == "6"):
print(args)
elif type(choice) != int:
print("[!!!] Invalid choice. Exiting...")
sys.exit()
def curl_download(url, filename, verbose=None):
print("[i] Downloading using curl - " + url + " with filename: " + filename)
if (verbose == "y"):
subprocess.call(f"curl -L -O {filename} -v {url}", shell=True)
elif (verbose == "n"):
subprocess.call(f"curl -L -O {filename} {url}", shell=True)
else:
subprocess.call(f"curl -L -O {filename} {url}", shell=True)
def wget_download(url, filename, verbose=None):
print("[i] Downloading using wget - " + url + " with filename: " + filename + "\n" + ("Verbose: ") + str(verbose))
if (verbose == "y"):
subprocess.call(f"wget -O {filename} -v {url}", shell=True)
elif (verbose == "n"):
subprocess.call(f"wget -O {filename} {url}", shell=True)
else:
subprocess.call(f"wget -O {filename} {url}", shell=True)
def httrack_download(url, path, mirror_depth, ext_links_depth, conn_num, verbose=None):
print("[i] Cloning using httrack - " + url + " on path: " + path)
subprocess.call(f"httrack {url} -O {path} -r{mirror_depth} -%e{ext_links_depth} -c{conn_num}", shell=True)
def launch_updater():
print("[i] Getting latest updates for MultiDownloader..." + "\n")
subprocess.call('sh scripts/update.sh', shell=True)
if (args.curl):
if (args.verbose):
curl_download(args.URL, args.filename, args.verbose)
else:
curl_download(args.URL, args.filename)
if (args.wget):
if (args.verbose):
wget_download(args.URL, args.filename, args.verbose)
else:
wget_download(args.URL, args.filename)
if (args.httrack):
if (args.verbose):
httrack_download(args.URL, args.filename, args.depth, args.ext_depth, args.conn_num, args.verbose)
else:
httrack_download(args.URL, args.filename, args.depth, args.ext_depth, args.conn_num)
if (args.update):
launch_updater()
try:
main()
except KeyboardInterrupt:
print("[!] Exiting...")
sys.exit() | 39.398601 | 149 | 0.671814 |
958a38d4edf87c352270fdf92a3b1727c3d068e0 | 1,129 | py | Python | forge/kubernetes.py | Acidburn0zzz/forge | c53d99f49abe61a2657a1a41232211bb48ee182d | [
"Apache-2.0"
] | 1 | 2017-11-15T15:04:44.000Z | 2017-11-15T15:04:44.000Z | forge/kubernetes.py | Acidburn0zzz/forge | c53d99f49abe61a2657a1a41232211bb48ee182d | [
"Apache-2.0"
] | 2 | 2021-03-20T05:32:38.000Z | 2021-03-26T00:39:11.000Z | forge/kubernetes.py | Acidburn0zzz/forge | c53d99f49abe61a2657a1a41232211bb48ee182d | [
"Apache-2.0"
] | null | null | null | import os, glob
from tasks import task, TaskError, get, sh, SHResult
def is_yaml_empty(dir):
for name in glob.glob("%s/*.yaml" % dir):
with open(name) as f:
if f.read().strip():
return False
return True
class Kubernetes(object):
def __init__(self, namespace=None, context=None, dry_run=False):
self.namespace = namespace or os.environ.get("K8S_NAMESPACE", None)
self.context = context
self.dry_run = dry_run
@task()
def resources(self, yaml_dir):
if is_yaml_empty(yaml_dir):
return []
cmd = "kubectl", "apply", "--dry-run", "-f", yaml_dir, "-o", "name"
if self.namespace:
cmd += "--namespace", self.namespace
return sh(*cmd).output.split()
@task()
def apply(self, yaml_dir):
if is_yaml_empty(yaml_dir):
return SHResult("", 0, "")
cmd = "kubectl", "apply", "-f", yaml_dir
if self.namespace:
cmd += "--namespace", self.namespace
if self.dry_run:
cmd += "--dry-run",
result = sh(*cmd)
return result
| 29.710526 | 75 | 0.558902 |
958ba96c16c5793bb5abfd2bf23b7c56685312b0 | 615 | py | Python | src/models.py | mchuck/tiny-ssg | 52998288daea9fe592b8e6ce769eca782db591cd | [
"MIT"
] | null | null | null | src/models.py | mchuck/tiny-ssg | 52998288daea9fe592b8e6ce769eca782db591cd | [
"MIT"
] | null | null | null | src/models.py | mchuck/tiny-ssg | 52998288daea9fe592b8e6ce769eca782db591cd | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import List, Dict, Any
@dataclass
class WebsitePage:
title: str
body: str
tags: List[str]
created_at: str
url: str
slug: str
meta: Dict
@dataclass
class WebsiteTag:
name: str
slug: str
pages: List[WebsitePage]
@dataclass
class WebsiteCollection:
name: str
pages: List[WebsitePage]
tags: List[WebsiteTag]
@dataclass
class Website:
collections: Dict[str, WebsiteCollection]
meta: Dict
@dataclass
class Templates:
# TODO: Remove Any
index_template: Any
page_template: Any
tag_template: Any
| 16.184211 | 45 | 0.689431 |
958c59599470ad36c300e0c6dec5381bb27923b6 | 1,952 | py | Python | demucs/ema.py | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 | [
"MIT"
] | 1 | 2022-02-14T05:52:53.000Z | 2022-02-14T05:52:53.000Z | demucs/ema.py | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 | [
"MIT"
] | null | null | null | demucs/ema.py | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Inspired from https://github.com/rwightman/pytorch-image-models
from contextlib import contextmanager
import torch
from .states import swap_state
class ModelEMA:
"""
Perform EMA on a model. You can switch to the EMA weights temporarily
with the `swap` method.
ema = ModelEMA(model)
with ema.swap():
# compute valid metrics with averaged model.
"""
def __init__(self, model, decay=0.9999, unbias=True, device='cpu'):
self.decay = decay
self.model = model
self.state = {}
self.count = 0
self.device = device
self.unbias = unbias
self._init()
def _init(self):
for key, val in self.model.state_dict().items():
if val.dtype != torch.float32:
continue
device = self.device or val.device
if key not in self.state:
self.state[key] = val.detach().to(device, copy=True)
def update(self):
if self.unbias:
self.count = self.count * self.decay + 1
w = 1 / self.count
else:
w = 1 - self.decay
for key, val in self.model.state_dict().items():
if val.dtype != torch.float32:
continue
device = self.device or val.device
self.state[key].mul_(1 - w)
self.state[key].add_(val.detach().to(device), alpha=w)
@contextmanager
def swap(self):
with swap_state(self.model, self.state):
yield
def state_dict(self):
return {'state': self.state, 'count': self.count}
def load_state_dict(self, state):
self.count = state['count']
for k, v in state['state'].items():
self.state[k].copy_(v)
| 29.134328 | 73 | 0.585553 |
958d20eb83026863f5c7fe7f0d9e55731a14596b | 250 | py | Python | tests/test_all.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | tests/test_all.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | tests/test_all.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | import os
import fnmatch
import deep_learning
tests = [file for file in os.listdir(os.getcwd()) if fnmatch.fnmatch(file, 'test_*.py')]
tests.remove('test_all.py')
for test in tests:
print '---------- '+test+' ----------'
execfile(test)
| 22.727273 | 89 | 0.632 |
958e7f740b7a101b6adbafb3854a0ff8c7e6558c | 12,328 | py | Python | gws.py | intelligence-csd-auth-gr/greek-words-evolution | ab1ee717f7567ffa8171e64f835932af7502955d | [
"MIT"
] | 9 | 2020-07-12T13:45:24.000Z | 2021-12-05T16:08:58.000Z | word_embeddings/we.py | emiltj/NLP_exam_2021 | 9342e8dc9ad684927bbfa5eb6c125dd53c14cccb | [
"MIT"
] | 2 | 2021-03-30T14:35:26.000Z | 2022-03-12T00:40:17.000Z | word_embeddings/we.py | emiltj/NLP_exam_2021 | 9342e8dc9ad684927bbfa5eb6c125dd53c14cccb | [
"MIT"
] | 2 | 2021-04-23T13:07:55.000Z | 2021-12-16T14:06:51.000Z | import warnings
import argparse
import os
import logging
import lib.metadata as metadata
import lib.model as model
import lib.text as text
import lib.website as website
warnings.filterwarnings('ignore')
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
DATA_FOLDER = os.path.join(os.path.curdir, 'data')
MODELS_FOLDER = os.path.join(os.path.curdir, 'output', 'models')
SCRAPPED_PDF_FOLDER = os.path.join(os.path.curdir, 'data', 'scrap', 'pdf')
FASTTEXT_PATH = os.path.join(os.path.curdir, 'fastText', 'fasttext')
SCRAPPED_TEXT_FOLDER = os.path.join(os.path.curdir, 'data', 'scrap', 'text')
PRODUCED_TEXTS_FOLDER = os.path.join(os.path.curdir, 'output', 'texts')
LIB_FOLDER = os.path.join(os.path.curdir, 'lib')
MODEL_FILE_EXTENSION = '.model'
TEXT_FILE_EXTENSION = '.txt'
PDF_FILE_EXTENSION = '.pdf'
POST_URLS_FILENAME = 'post_urls.pickle'
METADATA_FILENAME = 'raw_metadata.csv'
CORPORA = [
{
'name': 'openbook',
'textFilesFolder': os.path.join(DATA_FOLDER, 'corpora', 'openbook', 'text', 'parsable'),
'metadataFilename': os.path.join(DATA_FOLDER, 'corpora', 'openbook', 'metadata.tsv')
},
{
'name': 'project_gutenberg',
'textFilesFolder': os.path.join(DATA_FOLDER, 'corpora', 'project_gutenberg', 'text', 'parsable'),
'metadataFilename': os.path.join(DATA_FOLDER, 'corpora', 'project_gutenberg', 'metadata.tsv')
},
]
COMBINED_TEXTS_FILENAME = 'corpus_combined.txt'
COMBINED_MODEL_FILENAME = os.path.join(MODELS_FOLDER, 'corpus_combined_model.bin')
NEIGHBORS_COUNT = 20
#####################################
# Set up required folders and perform any other preliminary tasks
#####################################
if not os.path.exists(SCRAPPED_PDF_FOLDER):
os.makedirs(SCRAPPED_PDF_FOLDER)
if not os.path.exists(SCRAPPED_TEXT_FOLDER):
os.makedirs(SCRAPPED_TEXT_FOLDER)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
def websiteParser(args):
if args.action == 'fetchLinks':
logger.info('Selected action: Fetch website links')
links = website.fetchLinks(args.target)
print(links)
elif args.action == 'fetchMetadata':
logger.info('Selected action: Fetch website metadata')
metadata = website.fetchMetadata(args.target, PDF_FILE_EXTENSION, METADATA_FILENAME)
print(metadata)
elif args.action == 'fetchFiles':
logger.info('Selected action: Fetch website files')
website.fetchFiles(args.target, PDF_FILE_EXTENSION, METADATA_FILENAME, SCRAPPED_PDF_FOLDER)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
def metadataParser(args):
if (args.action == 'printStandard'):
combinedMetadata = metadata.getCombined(CORPORA, args.corpus, False)
print(combinedMetadata)
elif (args.action == 'printEnhanced' or args.action == 'exportEnhanced'):
combinedMetadata = metadata.getCombined(CORPORA, args.corpus, True)
if args.action == 'printEnhanced':
print(combinedMetadata)
if args.action == 'exportEnhanced':
text.exportMetadata(combinedMetadata)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
def textParser(args):
combinedMetadata = metadata.getCombined(CORPORA, args.corpus, True)
if args.action == 'exportByPeriod':
logger.info('Selected action: Export combined text by period')
text.exportTextByPeriod(combinedMetadata, args.fromYear, args.toYear, args.splitYearsInterval)
elif args.action == 'extractFromPDF':
logger.info('Selected action: Extract text from PDF')
text.extractTextFromPdf(combinedMetadata, SCRAPPED_PDF_FOLDER, PDF_FILE_EXTENSION, SCRAPPED_TEXT_FOLDER,
TEXT_FILE_EXTENSION)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
def modelParser(args):
if args.action == 'create':
logger.info('Selected action: Create models')
model.createModelsFromTextFiles(args.textsFolder, TEXT_FILE_EXTENSION, MODELS_FOLDER, MODEL_FILE_EXTENSION)
elif args.action == 'getNN':
logger.info('Selected action: Retrieve Nearest Neighbours')
modelFilename = args.period + MODEL_FILE_EXTENSION
nearestNeighbours = model.getNeighboursForWord(text.preProcessText(args.word), modelFilename, MODELS_FOLDER,
FASTTEXT_PATH, NEIGHBORS_COUNT)
print(nearestNeighbours)
elif args.action == 'getCD':
logger.info('Selected action: Get cosine distance')
model.exportByDistance(args.action, MODEL_FILE_EXTENSION, MODELS_FOLDER, args.fromYear, args.toYear,
NEIGHBORS_COUNT, FASTTEXT_PATH)
elif args.action == 'getCS':
logger.info('Selected action: Get cosine similarity')
model.exportByDistance(args.action, MODEL_FILE_EXTENSION, MODELS_FOLDER, args.fromYear, args.toYear,
NEIGHBORS_COUNT, FASTTEXT_PATH)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='1.0.0')
subparsers = parser.add_subparsers()
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_website = subparsers.add_parser('website')
parser_website.add_argument('--target', default='openbook', choices=['openbook'], help='Target website to '
'scrap data from')
parser_website.add_argument('--action', default='fetchFiles', choices=['fetchLinks', 'fetchMetadata', 'fetchFiles'],
help='The action to execute on the selected website')
parser_website.set_defaults(func=websiteParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_metadata = subparsers.add_parser('metadata')
parser_metadata.add_argument('--corpus', default='all', choices=['all', 'openbook', 'project_gutenberg'],
help='The name of the target corpus to work with')
parser_metadata.add_argument('--action', default='printStandard', choices=['printStandard', 'printEnhanced',
'exportEnhanced'],
help='Action to perform against the metadata of the selected text corpus')
parser_metadata.add_argument('--fromYear', default=1800, type=int, help='The target starting year to extract data from')
parser_metadata.add_argument('--toYear', default=1900, type=int, help='The target ending year to extract data from')
parser_metadata.add_argument('--splitYearsInterval', default=10, type=int, help='The interval to split the years with '
'and export the extracted data')
parser_metadata.set_defaults(func=metadataParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_text = subparsers.add_parser('text')
parser_text.add_argument('--corpus', default='all', choices=['all', 'openbook', 'project_gutenberg'],
help='The name of the target corpus to work with')
parser_text.add_argument('--action', default='exportByPeriod', choices=['exportByPeriod', 'extractFromPDF'],
help='Action to perform against the selected text corpus')
parser_text.add_argument('--fromYear', default=1800, type=int, help='The target starting year to extract data from')
parser_text.add_argument('--toYear', default=1900, type=int, help='The target ending year to extract data from')
parser_text.add_argument('--splitYearsInterval', default=10, type=int, help='The interval to split the years with '
'and export the extracted data')
parser_text.set_defaults(func=textParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_model = subparsers.add_parser('model')
parser_model.add_argument('--action', default='getNN', choices=['create', 'getNN', 'getCS', 'getCD'],
help='Action to perform against the selected model')
parser_model.add_argument('--word', help='Target word to get nearest neighbours for')
parser_model.add_argument('--period', help='The target period to load the model from')
parser_model.add_argument('--textsFolder', default='./output/texts', help='The target folder that contains the '
'texts files')
parser_model.add_argument('--fromYear', default='1800', help='the target starting year to create the model for')
parser_model.add_argument('--toYear', default='1900', help='the target ending year to create the model for')
parser_model.set_defaults(func=modelParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
if __name__ == '__main__':
args = parser.parse_args()
args.func(args)
| 56.036364 | 120 | 0.455224 |
958e9155b3239d72fa5b7b6e836c3597e9e664a8 | 3,887 | py | Python | OP3/op3/messages.py | gvx/op3 | 888ab5975a3f911fc9ed9afea983928de3110033 | [
"MIT"
] | null | null | null | OP3/op3/messages.py | gvx/op3 | 888ab5975a3f911fc9ed9afea983928de3110033 | [
"MIT"
] | null | null | null | OP3/op3/messages.py | gvx/op3 | 888ab5975a3f911fc9ed9afea983928de3110033 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from collections.abc import MutableSequence
from datetime import datetime
from typing import NamedTuple, Any, Optional, Iterator
from .encoders import ENCODERS, string_encode, default_encoder, datetime_encode, blob_encode
class Element(NamedTuple):
value: Any
tag: str
@classmethod
def from_pair(cls, value: Any, tag: Optional[str]=None) -> 'Element':
if isinstance(value, Element):
assert tag is None
return value
if isinstance(value, tuple):
assert tag is None
value, tag = value
return cls(value, tag or default_encoder(value))
def encode(self) -> bytes:
return ENCODERS[self.tag](self.value)
def __repr__(self):
return f'Element({self.value!r}, tag={self.tag!r})'
class AbstractMessage(ABC, MutableSequence):
address: str
_items: list
def clear(self):
self._items.clear()
def __len__(self):
return len(self._items)
def __getitem__(self, index):
return self._items[index]
def __delitem__(self, index):
del self._items[index]
@abstractmethod
def _build_message(self) -> Iterator[bytes]:
assert NotImplementedError # no coverage: abstract method
def __bytes__(self):
return b''.join(self._build_message())
def __eq__(self, other):
if type(other) is not type(self):
return False
return other.address == self.address and other._items == self._items
class Message(AbstractMessage):
def __init__(self, address: str, *args: Any) -> None:
self.address = address
self._items = []
self.extend(args)
def append(self, value: Any, *, tag: Optional[str]=None) -> None:
self._items.append(Element.from_pair(value, tag))
def insert(self, index: int, value: Any, *, tag: Optional[str]=None) -> None:
self._items.insert(index, Element.from_pair(value, tag))
def __setitem__(self, index, value):
self._items[index] = Element.from_pair(value)
def values(self) -> Iterator[Any]:
for item in self._items:
yield item.value
def tags(self) -> Iterator[str]:
for item in self._items:
yield item.tag
def _build_message(self) -> Iterator[bytes]:
yield string_encode(self.address)
yield string_encode(',' + ''.join(self.tags()))
for item in self._items:
yield item.encode()
def __repr__(self):
return f'Message({self.address!r}, {repr(self._items)[1:-1]})'
class Bundle(AbstractMessage):
timetag: datetime
def __init__(self, timetag: datetime, *args: Message) -> None:
self.timetag = timetag
self._items = []
self.extend(args)
@property
def address(self):
return '#bundle'
def valid_to_insert(self, item: AbstractMessage):
if not isinstance(item, AbstractMessage):
return False
if isinstance(item, Bundle):
return item.timetag >= self.timetag
return True
def append(self, item: AbstractMessage) -> None:
assert self.valid_to_insert(item)
self._items.append(item)
def insert(self, index: int, value: AbstractMessage) -> None:
assert self.valid_to_insert(value)
self._items.insert(index, value)
def __setitem__(self, index, value):
assert self.valid_to_insert(value)
self._items[index] = value
def _build_message(self) -> Iterator[bytes]:
yield string_encode(self.address)
yield datetime_encode(self.timetag)
for item in self._items:
yield blob_encode(bytes(item))
def __eq__(self, other):
return super().__eq__(other) and other.timetag == self.timetag
def __repr__(self):
return f'Bundle({self.timetag!r}, {repr(self._items)[1:-1]})'
| 30.849206 | 92 | 0.637252 |
958ef26cd63d83883ded41820724c2716c93e70b | 2,716 | py | Python | ssepaperless/Organizer/views.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/views.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/views.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from django.template import RequestContext, loader
from Organizer.models import Department
from Organizer.models import Advisor
from Organizer.models import Student
from Organizer.models import Course
from Organizer.models import Degree
from Organizer.models import Certificate
from Organizer.models import Degree_Core_Course_Structure
from Organizer.models import Degree_Elective_Course_Structure
from Organizer.models import Certificate_Course_Structure
def index(request):
department_list = Department.objects.all()
template = loader.get_template('Organizer/index.html')
context = RequestContext(request, {
'department_list': department_list
})
return HttpResponse(template.render(context))
def index2(request, department_id):
department = get_object_or_404(Department, pk=department_id)
return render(request, 'Organizer/index2.html', {'department': department})
def advisorinfo(request, department_id, advisor_id):
department = get_object_or_404(Department, pk=department_id)
advisor = get_object_or_404(Advisor, pk = advisor_id)
return render(request, 'Organizer/advisorinfo.html', {'department': department, 'advisor': advisor})
def detail(request, department_id, advisor_id):
department = get_object_or_404(Department, pk=department_id)
advisor = get_object_or_404(Advisor, pk=advisor_id)
return render(request, 'Organizer/detail.html', {'department': department,'advisor': advisor})
def advisordegree(request, department_id, advisor_id):
department = get_object_or_404(Department, pk=department_id)
advisor = get_object_or_404(Advisor, pk=advisor_id)
return render(request, 'Organizer/advisordegree.html', {'department': department,'advisor': advisor})
def degree(request, department_id, degree_id):
department = get_object_or_404(Department, pk=department_id)
degree = get_object_or_404(Degree, pk=degree_id)
return render(request, 'Organizer/degree.html', {'department': department,'degree': degree})
def coursedegree(request, degree_id, degree_core_course_structure_id):
core_course_structure = get_object_or_404(Degree_Core_Course_Structure, pk=degree_core_course_structure_id)
return render(request, 'Organizer/coursedegree.html', {'core_course_structure': core_course_structure})
def certificate(request, department_id, certificate_id):
department = get_object_or_404(Department, pk=department_id)
certificate = get_object_or_404(Certificate, pk=certificate_id)
return render(request, 'Organizer/certificate.html', {'department': department,'certificate': certificate})
# Create your views here.
| 48.5 | 111 | 0.796024 |
95908c4c021ce144e1c7f298836a5c4a2cc424d8 | 462 | py | Python | project/3/cal.py | Aries-Dawn/Cpp-Program-Design | 9d4fc9a902fff2f76e41314f5d6c52871d30a511 | [
"MIT"
] | null | null | null | project/3/cal.py | Aries-Dawn/Cpp-Program-Design | 9d4fc9a902fff2f76e41314f5d6c52871d30a511 | [
"MIT"
] | null | null | null | project/3/cal.py | Aries-Dawn/Cpp-Program-Design | 9d4fc9a902fff2f76e41314f5d6c52871d30a511 | [
"MIT"
] | null | null | null | import numpy as np
matrixA = np.loadtxt('./mat-A-32.txt')
matrixB = np.loadtxt('./mat-B-32.txt')
checking = np.loadtxt('./out32.txt')
result = np.dot(matrixA, matrixB)
diff = result - checking
print(checking)
print(result)
print(diff)
np.absolute(diff)
print(np.max(diff))
[rows, cols] = diff.shape
with open ('./out2048-diff.txt','w') as f:
for i in range(rows):
for j in range(cols):
f.write("%.6f "%diff[i, j])
f.write('\n')
| 23.1 | 42 | 0.623377 |
959230e7e9d9994cf553883c73d07ce0fe30741d | 16,749 | py | Python | 2020/src/day24.py | Sujatha-Nagarajan/AdventOfCode | afce23c74fd0a72caa29c1604a582b21806e794e | [
"CC0-1.0"
] | 1 | 2020-12-05T06:14:37.000Z | 2020-12-05T06:14:37.000Z | 2020/src/day24.py | Sujatha-Nagarajan/AdventOfCode | afce23c74fd0a72caa29c1604a582b21806e794e | [
"CC0-1.0"
] | null | null | null | 2020/src/day24.py | Sujatha-Nagarajan/AdventOfCode | afce23c74fd0a72caa29c1604a582b21806e794e | [
"CC0-1.0"
] | null | null | null | import re
from collections import defaultdict
from util import *
input1="""sesenwnenenewseeswwswswwnenewsewsw
neeenesenwnwwswnenewnwwsewnenwseswesw
seswneswswsenwwnwse
nwnwneseeswswnenewneswwnewseswneseene
swweswneswnenwsewnwneneseenw
eesenwseswswnenwswnwnwsewwnwsene
sewnenenenesenwsewnenwwwse
wenwwweseeeweswwwnwwe
wsweesenenewnwwnwsenewsenwwsesesenwne
neeswseenwwswnwswswnw
nenwswwsewswnenenewsenwsenwnesesenew
enewnwewneswsewnwswenweswnenwsenwsw
sweneswneswneneenwnewenewwneswswnese
swwesenesewenwneswnwwneseswwne
enesenwswwswneneswsenwnewswseenwsese
wnwnesenesenenwwnenwsewesewsesesew
nenewswnwewswnenesenwnesewesw
eneswnwswnwsenenwnwnwwseeswneewsenese
neswnwewnwnwseenwseesewsenwsweewe
wseweeenwnesenwwwswnew"""
input2="""nwesesesesewswenwswsesesenesenwnesenwse
nwnenwnwnenenwnenwnenewnwenenwwnenesesenw
neneswnenwenwseeneweswsenesewnenenee
senwewnwnenenwnwnwwesenenwswnenwwnwnw
swseseeseswseseeswseneseswsesesenwsesew
weeneeneswsewnwnesweseneswenwneswne
swseseswswneswswsesewswswseswse
swswseeswswwswnweenewswswesenwswwse
swswswswsweswseeswseseseseeswwsewnw
eneeseenenweeneenenee
eesesenwsesweeseeese
neenenenewnenenenenenwnenenenwnwne
nenenwnwnwnenwnenwnwswnenesenenwnw
neneweweneneenenenenesewneeneenee
nwweswswewneenenwneneneeswneneneswne
eeseeneseesesesewneswseeeseese
swseswsenwswnewswseswswswseswswse
senenenwnwnenwnwnwewnwwnwswnenenwnwnwenw
senwnenenwnwnenwnwwnwswnwnwnenwnwenenwnw
neweseneswswnwswnwswseneseenwseeswee
esesweeneeneswsenwsweeeeseeseee
nenenwewseswseseswsewseneewwwnww
neeswswenwnewnwnwwswwwneswswnwwwnwnw
wwweswwwwwwswwwwww
eeseenweenwseneeeeeeweeenee
eeeeesenenenwesweeeswenwswseswee
neswenenesenenenewnwenesweneneeswne
swswswenwswwswswswswswwwswweswnwsww
seseswseseseeswneeeeesewesesenenw
swwswwwswwwswwswsweneswwwsesww
eneeswenweewenwseeeseeeseswwnw
swnenwswswswseseswswswwseswswswswswswsw
seeseseeseeeesesesenwsenwseweseese
swswswswnwnesweswewseseneswswwnenwsw
eewnenweneswwseeeeneneeeeeene
esenweswwnwnwnwnwnwnwnwnwnwnwnwnwwnw
seeeeeseeneeswweeeeeeneenw
weneswswenenenenwneswneswneneneesene
wnwsesesenwnwnenwnwnesweneenwseswwsw
sewsesesesesesesesewsenesesesesesenesese
swswswwnwswswwweswswswnewwseswsww
nwneneswnwnwnenenenwsenenwnwnenwwnene
neenwenenwsweseeswsesweeseseswneswene
eneeenewewneeneeneweneeesee
nwnwwwnwnewsenwsenw
sesesewswswwneneneeseeewswnwswnwsenw
sewwswwnwwewwwneswswswwwneew
nwsenwwnenenenesenwsenenenenenenenenenwne
sewsewnesenwsenesenwsesweswswsesenenw
eseeeeeeeenweeeeseesee
eseenwseesweswenweseenweeeeswee
neseseseswwneswsesesesewseseseswse
sesweewseseeesenwseeeseeswsweneenw
wnwneseeeseseeseenwwenwseseesese
enwneswnwneneneneneneneenenwnenwwnene
wnwneneneneneneewnwwnenweneesw
nwnenenenwnenenwenenwneneseswnewnenene
nwwsenwnwnenwnenwnenwneneneenwnwsenenww
wwwsewwnwwwnewwneswwewww
swseswwswseswswswswsenwseneeneewsenwsw
nwnesenenwenwnwnwnenwnwnwneswnwnwnenene
seeeweswnenenwsenewenenwewneseee
nwwnwneseswsesweenweswsese
seeseseswsesenesesesenesewseseseeese
swwswneswwnwswneswwewsesewswswsww
seswneswswseswswseseswnwswswswswseswew
wwwwwswwewseswwwwwswwnenesw
nwnwwsenwnenwnwnwnwneenwnwnwnwnenwnww
nwnwneswwswseswswnwnwenwnenesenenenwswenw
neneneneeneeseneneneeneneneswnwnenee
neeeswswnweenwsweseneeseswnwnewe
neswesenwneneneenenweeneene
swseeneewnwseeeenwesenweseseeswnw
eweneeneseeneneneneeeseeeneewene
eeneneewneneeeswneneeneeenwsenenew
nwnwswsweswswnenwswseswswswsweswswnesw
neeeeseeeswewenenwswnene
nwenwnwenwswnwnwwnwswnwnwneswnwneswswese
neswseweeneneeeseenwwnenesenenwnee
wswwseewwwewweewwswnewwwsww
swswswwswswwwswswswswswnweeswswswsw
enenenenenenenenenenwnenenwsenenenewnw
seseswseseseeswseneseseseseseneseesee
neewneeweeeeeneese
enewneseeweneneeneneewenesenene
enwswneeswnwswsewenwwnesewneswseswe
senwswnwnenwnwwnenwnwswnwnwnwnwnwnwnwnwe
sewswneswswswseseseseswswseneswseswswsw
nwnenwwsewneneswnweenwnwnenwnwnwsenene
eswwewswswnwswsw
wwwwwwwwswsenw
nwnwnwnwnwnwnwwwnwnwwnwnwwnwnenwsese
seswnewnenwnweswnwsesenwseeseesesewnw
neneseneeswneneneswwsenwnw
nwnwnwnewnwnesenwnwnenwnwswnwneseenwnw
wwneneeneswneneeewwnesesenenenese
eseeswswsesenwneeewswnenwnwnewnw
nwnesesenewwwswnwewsenwwsewnwwww
eneeenewneneeneneneneswnenwewnesee
neneenewenenenenew
nwsenweewnwwwwenwnwswnwnenwswnwnwse
seseeeeeeswwsenwseeseeseseeese
wwwnwwwwwwwewewwwwwww
swswseseneseswswsewseseseswenwneseseswsw
seswsesesenweseeseswwseseneswsesesesese
swswseswswswswswswswswswnwswswseswsw
nweeneewneeseseesenwsenwseweswnwnw
eeeeeseseeeeewsweenweeeese
nwesesesesenenewwwneeeeweeee
eenenwneneeswewneeeeneenee
seseseswseswseswsenwseeswsesenwseseswne
eseseseswseeneseeseseewnwswsesenese
nwnenwnwseewsenwenewsenwsweswswenenee
wsenwnwwnwwnwneeenwnwne
seswswswswseneneseseswswswswswswswwnesww
wswseswnwsweswseseswesesenwswseseseswsw
sewweseseneseneswsesesenesesesesesese
nwwnwneewwewwwwnwwwwwswwwswsw
nwsweswwneeeeeenwseeenwnwswswesw
wwwwwwsenwwnewwnwwwenwwwew
swneneneswneneswenenwnwnewnwnwsenenenwnene
eswnweseweseeseenwsene
esewewneneneneseneneneneneneneneewwne
eneeeenesenwnenwseneneenenenweesw
nwnwnwseenwnweswnwnwnwnenewnwnwnwswenw
neenenesewsewneeswseseenwweeeesw
eewneeeneeesweeeeeeenenee
nenenenenenwwsenenenenenenenwswneneneene
nwwwwswwwnewwwwwwnwwww
enwswseswenesenwenwseseeswesesenewse
swesweneeenwenenweeneneesweeee
wnwnwnesenwnwwsenewswnwwnwsenwseneswse
neseswseneenwsweneswwnwsenwnesewsenwsw
swswswseseneswweenwswswsesenewseswnesenw
weseseeseseswseseseneeeesesewnese
seeneswnewsesewnwwwwnw
sewseeseeeeesewneeese
seseseenwewsesewneseeeeesweenw
ewswwwswswwswwwswswswsw
nwnwwwsenewswnwwwenwsenwnenwnwnwnw
esenwseseweeneneneswwsewsesewneese
wnwnwswnenwnewwenenesewnenenwnesenesene
wnwnenwwnwnwnwnwnwnwswnwnwnwnwwnenwnwse
eewnesenwsesesweeneeewesweeesee
swenwesweseenwseeseseseenenwesee
nwnwneswnwenwwnwnwnenenwnwnweneswsenw
swwwewwnewseewswwswnewwwww
swwwswewwwsewwsewnwwswwwwnww
wsenwewnwwsewwwnwsewnwnwwsenwnwnw
neswswnenenwneneneenewneneneswnwsw
wwwswwwwnwewwwsewwwwwwwnw
seseseswseswswnweswwswswswnwsesesesesee
nwneneenesenenenwwnwneneswnwnwnenenenwnw
neeenwneeneesweenweeeesw
eweeeeeeeeeeeeneswneeeswe
wseewesesesesesesesesewseenesesesesee
eeeeeseeeeweeee
wsewneseeewseswnewnenenwnenewnesenw
wswenwnwnwnwwnwnwnwwewnwwnwwnww
wwwwwwnewwwsewwwswwneseww
wwwwewwwwwwnw
nwnwnenenwnwnwseenwwsenwenwnwwwnw
seeswswsewnewnwwsweswwswnwswswnwnw
eweseseneeseese
sweeeeeswenenesweseeeeseneee
wnwswewnenewsewwnewwwswwsww
nweenwwwneswnwsenwsewewnwwnwnwww
eswneeneneneneeeneeneeneneeeswne
eeseseneeeeeeeweeneeeswee
enweeseneswnenwnwnwswswswnw
swseseseseseeseeneeese
swswswswswswswswwseswswswswswseeneswnwsesw
senwseenwwsweswseseswse
wnwwnwwnwnwnwwsenwnwnwnewnwnwnwsenwnwse
seseenwesesenwseseseseseseseseseseswswsw
nesewnenenwnwneneswneneswsenwnenwnenw
eswnwweenweseeneeswneeeeeee
seesweneewenenweswseseweseneswsenwse
wsesesesenesesesesesesesesesesese
neneneneenenenenenewneneneneneneswne
eseseeseeseeenweeeswseesenwse
neneseeeeweseewwseseeenweseee
senewnwwwswwewnwwnwwsewenewse
seseseeweseseseneseeeseweseseseee
wswseswseenwwswneswswnwswsww
nwnwnwnwwswnwnwnwnwenenwenwnwnwnwne
sewnwsenweswswswneenwwsenewnwnewnwnw
swseeseseswseneswwsesewwwswnenesesese
eeeneneesweeeeeeneee
nwnweswnwnwenwnwnwnwnwswnwsenwnwnwsenw
swswswswnwenwswswswswweswswesw
nenwnwneswnwneswnenwnenenenwnwnenenenene
wneneneneneneneneseswneneesenewnenwe
enwsenenweneeswswsesesweseseseseswsese
swwseseseeseewnesewnewswseseswseswse
enwneeneneswneneneenenenene
eswweeeeseeneeeeeesesenweee
nenwsenwnwnenwneswnwnwnwnwenwnwnenwnenw
esenwswwnwnwenwsenwnwseseenwswnwwew
nwswnewwwnwswnwwnenwnenwswnwwwwnwnw
eeweseeseseeeeeeesesesewese
nwseeeeenwseeseeeeseeeseeeew
senenwswnweswnwwwwwnwnenwwseswwnwe
ewswnwnewewsenwswseneswswswswseswsw
nwseswnenwwenwwswsesenwnwneewwnwse
seeneseseneweseseseeseseswseseseese
wwsewwswswwswswneswweswswswswwsww
swswswsweswswswswswnwwswswsesesweswsw
seeeeweweeweeeseneenewene
nwseseesewseesewnwneewseesesenenwee
swewwnwnwswswwwweswswswswswneswe
eeneeeneneneneeweneneenesenenenew
swsewwwsewnewwwnwwwwwwnewww
seneswwweswswswwsweswswswswswwswswwnw
seneseseseswsesesewsesesesesw
seswswseswswwewswswswswseswswswswnenw
eseseseesesenenweseesweseeewseseese
swesenenwswnesesenwwwnwse
nenewswnenenenenenesenenenenenenenenesw
senwneseneeneenenw
wseseseeseseeseseseeseseseeenesewe
neeeneweenenee
nwsenenwnenwneeneeeneneneneeswnesene
nwswseeneseenwswnweseneswswnweesesese
nwseseseswsesesesewseeesesesese
eswenesewnenwnwwwnwnwnwneswesenwswsene
sewswwswwswswwswwswswwwneneswnwsww
nenwswenenenenesesesenwwneswnenenewew
senenwswseswsewwsewseseseneeswneswswsw
nwwwnwswswseswseswswwnwweswwwew
eswswswswseseseswswseswswnwswsweswwswse
nenesenenewnenenwnenenenesenenenenenenesw
wnwnwnwwwwwewwwswwwnewnwsenwsw
enwnewnwneswewnewwswwneeseswesew
nwnwnenenwsenwewnenwnenenenwnenwnwnwnwsw
nesenewneenwnwnwnwnwneneneswneswnewnee
ewenewswwsewenwwsenenwwswnwsenwnw
nesenwsenwseseeswswnwese
wnwsenwnwsenwnwswwnwwnenwnwseswnwnwne
newnenwneneenwesenesenenwseseweswswe
senwsesesenwsweseswswsenwnesesesww
sweswseswswwseseseswswsesesenwneseseswnw
nwwenwnwnwsenwnweswnwswnwwswnwnwnenw
enesenenwsewesewsweeneeeeweeee
nwnwnwnwwnwnwnwnwnwwwewnwenwnwnwnw
wseswseseswneeseeseenwseenwseswnwse
seesenwnwwwewseswswnwnwnwe
sewwwwwwwweswwswwwwnewww
neneeswnenwneneswsenweneswneseswseeww
nwnwswenwnwnwnwnwneseswnwsweneswenwnwsw
wwwswwneswwwwwnewswww
senwnwnenwenenenenwnwnewswnwnwnwesw
wswwwwnwswwswwewswnweswswwswew
swseseseswseseswswseseswsesesesenesenwse
nwwnenenenwswsweneenenenenesweneeene
wwnwswwswwswewwwsewwwswswswe
ewwnwwnenesweseenwswswseeswwneenww
eseswsesenwwnwseseseseseseseseswesesesw
wwwwwsewwwwnenwsewnwnenwwwww
nenenenwneneeneswnenenenenwwnenwneenw
seeeeeeeswwseeeeeweneeeenw
senweeneeneswwneeneesweeeswenenesw
nwwnenwnwnwenwnenwnwnwswswnwnwnwenwse
sesenwseseseeseseseseseseese
swswneswsewseseswseswseswseswseeswsewse
seseeseseseeeesesesesewee
seneneeswnenenenenenenenwsenwnenenwnenenew
eeeneeneneweeewneeneneneeseene
swneneneneeneneneenenenenwneenewnene
seswnwseswnwnewneswswnesenewswwwswswsw
enweseeweewewesweeenw
wwwswewneswwwsesewwwnwwswswww
nwwwswwneswwsewswse
swwneseswswnewwswnewwwwse
nwwnwwwnwewwnwswwwwnwwnwwewnw
seseseswneseswseeseswswseswseswwesesenwsw
nweseseseseseseswewseseswsesesesesesese
seseswesesenwseenwsenwseseseseseseseswse
swsenwwnwnwnwnwneswewnenwnweweeswne
eeeenweeeweeeeeeeeesee
nwwwwswnewswewwenwnwwwewswwnw
nwnwnwnwwnwswenwnwnwnwnwnwnwnwnwnwnw
nwwsewewnesewswewnwswwnwwneewse
wnwnwwnwwwnwwnwnewwswwswwwne
enenesweeeeeeneenweeneeneeesw
neeeenweneeneneneneeeeeswsweee
sweswwewseswwwseneneswsewnwwsww
neesesenweweseneseeesesewseeseenwe
sweeeneeeswnene
nwwwswwnwseeweswwwnw
ewswswswwwwneswswnwswswwswswswwww
neewseenwneeswseeneweneweenwesw
seneweseeseseseewseseswweeeese
eneswswswnwswwwswswswswswswswswneswsw
sewswswswswnwswseswswswswnewswwwsww
wwsesenenwnewwwsenw
swnewweswwenenwneseenenenenenenewne
nwwnwnwnwswnwnenwwwnwnwnw
sesesesenwseswnwseseseseseeseswswswswse
swnwswenenwswswneweswwsewsw
nwnwnwnwnenenewewnenenenesenwnwswnwnw
seseswsewswswsenwseseesesenenwesenesww
neneeneeseeeewwwneeenweeeswe
enenwewwswswsenewswsenwewseeneenee
nwwenwswwwnwwnwnwwnwwwwewenwww
sweswnwswesenwsweswseswswnwnwswsweswnwsw
seseseneseseweseewseseswsesewsesese
eweneeneeeeeeseeeeeeeeesw
ewwwnwwwwswwwswswwwwwswwnesw
swnwnenwnwnwwnwnewnwswnwenwnwnwsenwnw
swneeswseseneswwnesesenwsesesenwswnww
seswneseseesesewseseene
wnwwsewnenwnwwsesesenwnwsesesewwwne
eswswwwswseswewwswwswnwswswwwnw
enwnwnenenwnwnwswnwnenwnenwnwswnwnw
newsesenwnenenwnwenwnewnwwnwnwswnwnwnwnw
swneneenesenwwsenwnewnesesenenenwnenw
neneenwnwswswswweeeeeeeenenee
swswseswnwswswswenwswsesenwswseswnewswse
newnenwnenewsenewnesenewneesenwnene
neseseseseswsesewseswneseseswsesesesese
ewseeeeseeesesesesenwseeeswse
wseeeseeeseseesewseenwswseneeee
neseneseswswsesenesewswsesenewsesesenwse
swneneneneneenwneeswneneneeneneneswne
eenewnenenesweeenenenenene
nenenesenenenwneeneeneewseeeene
nwsenenwnenewnwnwnwnwnwnenwnenwnwsenenenw
swnwenwnwwnwnwnwswswewnwnwnwnenwnwnwnww
wwswnwwewwwwsewnwwwww
wseseswnenewwwwwswwwsw
swswwswswwnewswwwseeswwwswwswsw
enesesewewsesweeeseeseseseseesese
nwwnewenwnenwnwnweneswnwneswnenwneenw
eweeeeeseeeweweneeeesesese
wswswswswswswnwseswneswswswswwwswsww
swsenwnwwnwseseseswweeneenwnesenwnee
neeswneneneneewneneneneneneneneneene
nwnewnwnwnwwnwnenwwswnwnwnwwsenwsenwnenw
nwnwnwnwnwnwnwnwsenwnwnwnw
nwnewnwswwwswneewsewnewwswwwww
eeswwesesesenwseeeeeeseeenwe
nenwneswswenwsweneeswneneneneneeswnenw
neneseeneneesewneswnenenwnw
nwnwwwenwenwnwnwwnwnwnwnwwnwswnwnwnw
seseseswswseswsenwswnenwseswwweseswnese
wwewwwseswwswsenwwnwweswnwnee
neenwnwseeneewwneneenenenesewseenese
nwwswswenesewwwwswwswenwneswnewse
seswwnenwnwnenwwneeswsewewsewsesw
eseeseseeeesweneee
eenesesweeeeenwswnwneenenwswnenene
seneseseeeseeswswseswsenwsenwnwsesesese
seseseseseeeeseseeenw
wnwwwweewwwwwswnewwswwww
swswnewnwswseswswswswswwwswswswswwsw
nwnwenwnwnwnwewnwnwnwnwnwnwnwswnwnwnw
seswswswseseseswseswswnwneseneswswseswne
swseswseswswswsewseswsesenenwsesenwseseese
swswsweseswneneswwnewswswswswswswswnew
nwwnwnwnwswnwnwnwnwsenwnwneeeswnwnwnenw
wneneneneseneneew
weeeseweneenewseesesewesesesese
swnweswswseeseswswswseswswswswnwseswew
eneseenweeeswneeeenweswneeee
neneenewneweneneeneseneneswneenenwnene
nesenewneneneeseswneneeneenenwenewnw
neneneweeneneewneeneeneneneene
senenewnesewwwwswswneswwneswsenwse
eenweneseeswnenweswnwsee
nwnwnwsenwnwnwnenwneneneswnwew
sweswneseenwesweeswnwewseneneneeenw
swnwneswwswseswswswswseswseseswswsesw
nwnwnwnwnewnwnwnwsewewsenw
swseseseseswseswswswsenwse
nenwnwesenwnwsenwwnwsenwneswneeneswnw
ewwwewnwwsewnw
nwwnwwnwnwswwnwnwnwnwwnenw
wneeneneneenwswswwneneeneneenesene
nenwnenwnenwnwnenwnwneesenwnenenenwwnew
eenewnenwswwseeenwsenwweneneswne
nwnwnenenwnwwnwnwnwnwswnwswsenw
eeseneenenenenwwseswneneewneenenenee
nenwnenewnwswnenewnwseswneenwnenesene
wnwswwnwwenwenwnwwnwswnwnwnwnwnwnww
neeneneenesewneseenenewnwenwswenese
nwewneeswnwnwseseneswneneswnenwswnwnw
nwnesesewseswsewsewnenenesesenewsesese
seneseswswswswswsenwseseseswseswswswsee
nwwswnwsewwwnewsewnwewesewnwnwnw
eswwenwnenwnwnenwswnwnwnwnwsenenwwne
senweesenwwsewseeneeeenesewseee
nenwnenwnwnwswnwswnenwnenweeswnenwnenene
wnwnwwewsewnewwswwwnwnwwnwwwww
nwwnwswnwnwwnwnenwnwswseewnwnwnwnwe
nenwnenenenwnwnwnenwnenenweenenenwwnesw
wnwnenweseneswwswnwneeseswnenenwswwe
eeeweeeeeeweeeseeee
wwwswwnwwnweswweneswnenwwwnwww
swswwswswseswswswsweseswswseneswswse
seswnwewswwwwswwsewnwneswswewww
seswwwwnewwnwwewwnewwwwww
seneswnenwsweewnwnwenwswswswnesenew
eswnweeesweeseneeeeeeeeeee
wnwswswswswwswswswswwswwwewewsww
nwnwnenwnwnweeswsenweesewswswnwnwswnw
seswwnwsewwwwswsenenw
wwswwwwswswseswwwwewswwswnww
seswseseswseseswseseseswswseseswswnw
swnesewwnwwneswne
wswswsesweswswswseswwswswsweswswnwnwe
seenwsenweseseseesesewseseseseesese
esenwnwnwneswnwnenwwsenwnenwwsenenww
eeeeeneseswseseeseenwseeesw
swseseeneseneseeswwnwese
eeeenweswseeeesee
seseswweenwswnewwwwnew
wswswswnwswswwswswswseneneswseseseeswse
nwswwnwsewewswwswwwenenwwnwww
seneseweseseeneesesesesesenweseseswse
nwnenwnwnwnwsewwenwnenwsenesenwnwnenwne
senwenewsesesewnwwseeweseswsesesenwe
wenwewnwnwnwwnwewnwwwnwwwwnw
seeeeseseeseseesenwseenweesesese
swswswwnwswwwwswnewswswwwswswwew
nenwnenenwnwnwsenenwneneneswnwnwnwsesene
wnewswsenesewswwwswnwwswswnewwseew
wsesenwenwseswsenwwseeseenesenenwwnw
senewewswwswwewwwwnewswwwswsw
swneenwseweseeenwweseseeesenwnwse"""
lines=input1.split('\n')
tiles=defaultdict(lambda:False)# false = white
def get_neighbours(x,y):
yield x+1,y+1
yield x+1,y-1
yield x-1,y+1
yield x-1,y-1
yield x+2,y
yield x-2,y
def day():
global tiles
tiles2=defaultdict(lambda:False)
for x,y in list(tiles.keys()):
black_tiles=sum(tiles[nx,ny] for nx,ny in get_neighbours(x,y))
if tiles[x,y]: # black
if black_tiles==1 or black_tiles==2:
tiles2[x,y]=True
else:
if black_tiles==2:
tiles2[x,y]=True
# calculate for neighbour tiles too because all tiles needs to be covered
for x,y in get_neighbours(x,y):
black_tiles=sum(tiles[nx,ny] for nx,ny in get_neighbours(x,y))
if tiles[x,y]: # black
if black_tiles==1 or black_tiles==2:
tiles2[x,y]=True
else:
if black_tiles==2:
tiles2[x,y]=True
return tiles2
#main
start_profiling()
for l in lines:
# find directions in order
directions=re.findall('(se|sw|ne|nw|e|w)',l)
x=y=0
for d in directions:
if d=='se':
x+=1
y+=1
elif d=='sw':
x-=1
y+=1
elif d=='ne':
x+=1
y-=1
elif d=='nw':
x-=1
y-=1
elif d=='e':
x+=2
elif d=='w':
x-=2
tiles[x,y]=not tiles[x,y]
print('a)',sum(t for t in tiles.values()))
end_profiling()
start_profiling()
for _ in range(100):
tiles=day()
print('b)', sum(t for t in tiles.values()))
end_profiling()
| 31.661626 | 81 | 0.920294 |
9594993f4525fce4f5b648804a7994f70f4ed262 | 4,773 | py | Python | ci/check-documentation.py | FredrikBlomgren/aff3ct | fa616bd923b2dcf03a4cf119cceca51cf810d483 | [
"MIT"
] | 315 | 2016-06-21T13:32:14.000Z | 2022-03-28T09:33:59.000Z | ci/check-documentation.py | a-panella/aff3ct | 61509eb756ae3725b8a67c2d26a5af5ba95186fb | [
"MIT"
] | 153 | 2017-01-17T03:51:06.000Z | 2022-03-24T15:39:26.000Z | ci/check-documentation.py | a-panella/aff3ct | 61509eb756ae3725b8a67c2d26a5af5ba95186fb | [
"MIT"
] | 119 | 2017-01-04T14:31:58.000Z | 2022-03-21T08:34:16.000Z | #!/usr/bin/env python3
import argparse
import sys
import re
import subprocess
import os
import glob
import copy
import aff3ct_help_parser as ahp
# read all the lines from the given file and set them in a list of string lines with striped \n \r
def readFileInTable(filename):
aFile = open(filename, "r")
lines = []
for line in aFile:
line = re.sub('\r','',line.rstrip('\n'))
if len(line) > 0:
lines.append(line)
aFile.close()
return lines;
def get_keys(filename):
lines = readFileInTable(filename)
list_keys = []
for l in lines:
if l.startswith(".. |"):
start_pos = 4
end_pos = l.find("|", start_pos)
list_keys.append(l[start_pos:end_pos])
return list_keys
def run_aff3ct(args_list):
try:
processAFFECT = subprocess.Popen(args_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutAFFECT, stderrAFFECT) = processAFFECT.communicate()
except KeyboardInterrupt:
os.kill(processAFFECT.pid, signal.SIGINT)
(stdoutAFFECT, stderrAFFECT) = processAFFECT.communicate()
err = stderrAFFECT.decode(encoding='UTF-8')
std = stdoutAFFECT.decode(encoding='UTF-8').split("\n")
return std, err
def aff3ct_helpmap_to_keys_list(help_map, aff3ct_keys): # fill aff3ct_keys from help_map
# ahp.print_help_map(help_map)
for m in help_map: # module
for a in help_map[m]: # argument
if type(help_map[m][a]) is dict:
key = help_map[m][a]["key"]
if key != "":
try:
aff3ct_keys.index(key)
except Exception as e:
aff3ct_keys.append(key)
else:
pass
def get_aff3ct_help_keys(aff3ct_path):
# get the available codes and simulation types
args_list = [aff3ct_path, "-h"]
std, err = run_aff3ct(args_list)
helpMap = ahp.help_to_map(std)
codesList = helpMap["Simulation"]["--sim-cde-type, -C"]["limits"] [1:-1].split("|")
simList = helpMap["Simulation"]["--sim-type" ]["limits"] [1:-1].split("|")
# try to run all codes ans simu to get their helps
aff3ct_keys = []
for c in codesList:
for s in simList:
args_list = [aff3ct_path, "-C", c, "-H", "-k", "--sim-type", s, "-p", "8"]
std, err = run_aff3ct(args_list)
helpMap = ahp.help_to_map(std)
aff3ct_helpmap_to_keys_list(helpMap, aff3ct_keys)
return aff3ct_keys
def get_doc_keys(doc_path):
doc_keys = []
for filename in glob.iglob(doc_path + '**/*.rst', recursive=True):
pattern = re.compile("\|(factory::[^ ]*)\|")
for i, line in enumerate(open(filename)):
for match in re.finditer(pattern, line):
doc_keys.append(match.group(1))
# remove duplicates
doc_keys = list(set(doc_keys))
return doc_keys
def display_keys(keys):
for e in keys:
print (" - [" + e + "]")
if len(keys) == 0:
print (" The keys list is empty.")
def check_keys(keys_file, aff3ct_path, doc_path):
list_keys = get_keys(keys_file)
aff3ct_keys = get_aff3ct_help_keys(aff3ct_path)
doc_keys = get_doc_keys(doc_path)
list_keys.sort()
aff3ct_keys.sort()
doc_keys.sort()
aff3ct_keys_save = copy.deepcopy(aff3ct_keys)
not_in_aff3ct_keys = []
for k in list_keys:
try:
idx = aff3ct_keys.index(k)
del aff3ct_keys[idx]
except Exception as e:
not_in_aff3ct_keys.append(k)
not_in_doc_keys = []
for k in aff3ct_keys_save:
try:
idx = doc_keys.index(k)
del doc_keys[idx]
except Exception as e:
not_in_doc_keys.append(k)
# manages special key exceptions
exceptions_not_in_doc_keys = ["factory::Frozenbits_generator::p+pb-path"]
exceptions_doc_keys = ["factory::BFER::p+mpi-comm-freq", "factory::Launcher::except-a2l"]
for e in exceptions_not_in_doc_keys:
if e in not_in_doc_keys: not_in_doc_keys.remove(e)
for e in exceptions_doc_keys:
if e in doc_keys: doc_keys.remove(e)
print("Keys used in the AFF3CT help but not defined in the strings database (undocumented keys):")
display_keys(aff3ct_keys)
print()
print("Keys used in the AFF3CT doc but not used in the AFF3CT help:")
display_keys(doc_keys)
print()
print("Keys used in the AFF3CT help but not used in the AFF3CT doc:")
display_keys(not_in_doc_keys)
print()
print("Keys defined in the strings database but not used in the AFF3CT help or in the AFF3CT doc:")
display_keys(not_in_aff3ct_keys)
print()
nDiff = len(aff3ct_keys) + len(doc_keys) + len(not_in_doc_keys)
return nDiff;
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--keys', action='store', dest='keys_file', type=str, default='doc/strings.rst')
parser.add_argument('--aff3ct', action='store', dest='aff3ct_path', type=str, default='build/bin/aff3ct')
parser.add_argument('--doc', action='store', dest='doc_path', type=str, default='doc/source/user/simulation/parameters/')
args = parser.parse_args()
nDiff = check_keys(args.keys_file, args.aff3ct_path, args.doc_path)
sys.exit(nDiff); | 27.431034 | 128 | 0.707521 |
9595a509a88acc24d2199e14d5a84b03b3fb5415 | 677 | py | Python | todoster/list_projects.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
] | 5 | 2020-08-05T21:02:35.000Z | 2021-11-11T14:31:35.000Z | todoster/list_projects.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
] | 1 | 2020-09-24T04:41:20.000Z | 2020-09-28T04:37:50.000Z | todoster/list_projects.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
] | 1 | 2021-08-09T19:23:24.000Z | 2021-08-09T19:23:24.000Z | from todoster.file_operations import load_projects
from todoster.output_formatter import format_string
def list_projects(arguments):
projects = load_projects()
if not arguments.show_all_projects:
projects = list(filter(lambda x: x["active"], projects))
print()
project_counter = 1
for project in projects:
counter = format_string(str(project_counter).rjust(3), dim=True)
title = format_string(project["title"], dim=(not project["active"]))
shortcode = format_string("#" + project["shortcode"], color=project["color"])
print(counter + " " + title + " (" + shortcode + ")")
project_counter += 1
print()
| 33.85 | 85 | 0.669129 |
95988a5a0c747ad5cc792f45a029f70fc328bc8e | 621 | py | Python | src/game_test.py | TomNo/tictactoe-mcts | 5d5db97f54fe5a3bf7c9afaaa4d74984fdb30ec4 | [
"MIT"
] | null | null | null | src/game_test.py | TomNo/tictactoe-mcts | 5d5db97f54fe5a3bf7c9afaaa4d74984fdb30ec4 | [
"MIT"
] | null | null | null | src/game_test.py | TomNo/tictactoe-mcts | 5d5db97f54fe5a3bf7c9afaaa4d74984fdb30ec4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Tomas Novacik'
import unittest2
from game import Game
from board import Board, PlayerType, Move
class GameTest(unittest2.TestCase):
def test_winning_move(self):
game = Game()
game.start()
# set winning status to board
board = Board()
[board.place_move(Move(0, i, PlayerType.CIRCLE)) for i in range(4)]
winning_move = 0, 4
game._board = board
game.move(*winning_move)
self.assertTrue(game.is_finished)
def test_clone(self):
game = Game()
game.start()
game.clone()
# eof
| 18.264706 | 75 | 0.613527 |
95993548b5a77661a71dcd96b3ee1f6f35d686ce | 1,911 | py | Python | skills_taxonomy_v2/pipeline/skills_extraction/get_sentence_embeddings_utils.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
] | 3 | 2021-11-21T17:21:12.000Z | 2021-12-10T21:19:57.000Z | skills_taxonomy_v2/pipeline/skills_extraction/get_sentence_embeddings_utils.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
] | 16 | 2021-10-06T11:20:35.000Z | 2022-02-02T11:44:28.000Z | skills_taxonomy_v2/pipeline/skills_extraction/get_sentence_embeddings_utils.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
] | 1 | 2021-10-04T12:27:20.000Z | 2021-10-04T12:27:20.000Z | """
Functions to mask sentences of undesirable words (stopwords, punctuation etc).
Used in get_sentence_embeddings.py to process sentences before finding embeddings.
"""
import re
from skills_taxonomy_v2.pipeline.skills_extraction.cleaning_sentences import (
separate_camel_case,
)
def is_token_word(token, token_len_threshold, stopwords, custom_stopwords):
"""
Returns true if the token:
- Doesn't contain 'www'
- Isn't too long (if it is it is usually garbage)
- Isn't a proper noun/number/quite a few other word types
- Isn't a word with numbers in (these are always garbage)
"""
return (
("www" not in token.text)
and (len(token) < token_len_threshold)
and (
token.pos_
not in [
"PROPN",
"NUM",
"SPACE",
"X",
"PUNCT",
"ADP",
"AUX",
"CONJ",
"DET",
"PART",
"PRON",
"SCONJ",
]
)
and (not re.search("\d", token.text))
and (not token.text.lower() in stopwords + custom_stopwords)
and (not token.lemma_.lower() in stopwords + custom_stopwords)
)
def process_sentence_mask(
sentence, nlp, bert_vectorizer, token_len_threshold, stopwords, custom_stopwords
):
"""
Mask sentence of stopwords etc, then get sentence embedding
"""
sentence = separate_camel_case(sentence)
doc = nlp(sentence)
masked_sentence = ""
for i, token in enumerate(doc):
if is_token_word(token, token_len_threshold, stopwords, custom_stopwords):
masked_sentence += " " + token.text
else:
masked_sentence += " [MASK]"
return masked_sentence
| 29.4 | 85 | 0.553114 |
959a854d76fcee93383a4561465ab39d08da02e1 | 1,000 | py | Python | migrations/versions/033809bcaf32_destinations.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 11 | 2017-08-23T17:41:43.000Z | 2018-10-24T03:00:38.000Z | migrations/versions/033809bcaf32_destinations.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 480 | 2017-07-14T00:29:11.000Z | 2020-01-06T19:04:51.000Z | migrations/versions/033809bcaf32_destinations.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 22 | 2017-07-07T00:07:32.000Z | 2020-02-27T19:43:14.000Z | """destinations
Revision ID: 033809bcaf32
Revises: 4a77b8fb792a
Create Date: 2017-08-24 05:56:45.166590
"""
from alembic import op
import sqlalchemy as sa
import geoalchemy2
# revision identifiers, used by Alembic.
revision = '033809bcaf32'
down_revision = '4a77b8fb792a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('destinations',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('point', geoalchemy2.types.Geometry(geometry_type='POINT'), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('address', sa.String(length=300), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('destinations')
# ### end Alembic commands ###
| 27.027027 | 89 | 0.698 |