hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf93e015d6a43ea34aae7527d45de4ab3dbc6b2 | 5,867 | py | Python | mapclientplugins/geometricfitstep/step.py | s-fong/mapclientplugins.geometricfitstep | 2f9c5e5909a725ffe8b4a0dcda99c0d7d97f666d | [
"Apache-2.0"
] | null | null | null | mapclientplugins/geometricfitstep/step.py | s-fong/mapclientplugins.geometricfitstep | 2f9c5e5909a725ffe8b4a0dcda99c0d7d97f666d | [
"Apache-2.0"
] | null | null | null | mapclientplugins/geometricfitstep/step.py | s-fong/mapclientplugins.geometricfitstep | 2f9c5e5909a725ffe8b4a0dcda99c0d7d97f666d | [
"Apache-2.0"
] | null | null | null |
"""
MAP Client Plugin Step
"""
import json
from PySide import QtGui
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
from mapclientplugins.geometricfitstep.configuredialog import ConfigureDialog
from mapclientplugins.geometricfitstep.model.geometricfitmodel import GeometricFitModel
from mapclientplugins.geometricfitstep.view.geometricfitwidget import GeometricFitWidget
class GeometricFitStep(WorkflowStepMountPoint):
"""
Skeleton step which is intended to be a helpful starting point
for new steps.
"""
def __init__(self, location):
super(GeometricFitStep, self).__init__('Geometric Fit', location)
self._configured = False # A step cannot be executed until it has been configured.
self._category = 'Fitting'
# Add any other initialisation code here:
self._icon = QtGui.QImage(':/geometricfitstep/images/fitting.png')
# Ports:
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#file_location'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#file_location'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'http://physiomeproject.org/workflow/1.0/rdf-schema#file_location'))
# Port data:
self._port0_inputZincModelFile = None # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location
self._port1_inputZincDataFile = None # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location
self._port2_outputZincModelFile = None # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location
# Config:
self._config = {}
self._config['identifier'] = ''
self._model = None
self._view = None
def execute(self):
"""
Add your code here that will kick off the execution of the step.
Make sure you call the _doneExecution() method when finished. This method
may be connected up to a button in a widget for example.
"""
# Put your execute step code here before calling the '_doneExecution' method.
self._model = GeometricFitModel(self._port0_inputZincModelFile, self._port1_inputZincDataFile, self._location, self._config['identifier'])
self._view = GeometricFitWidget(self._model)
self._view.registerDoneExecution(self._doneExecution)
self._setCurrentWidget(self._view)
def setPortData(self, index, dataIn):
"""
Add your code here that will set the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
uses port for this step then the index can be ignored.
:param index: Index of the port to return.
:param dataIn: The data to set for the port at the given index.
"""
if index == 0:
self._port0_inputZincModelFile = dataIn # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location
elif index == 1:
self._port1_inputZincDataFile = dataIn # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location
def getPortData(self, index):
"""
Add your code here that will return the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
provides port for this step then the index can be ignored.
:param index: Index of the port to return.
"""
self._port2_outputZincModelFile = self._model.getOutputModelFileName()
return self._model_port2_outputZincModelFile # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location
def configure(self):
"""
This function will be called when the configure icon on the step is
clicked. It is appropriate to display a configuration dialog at this
time. If the conditions for the configuration of this step are complete
then set:
self._configured = True
"""
dlg = ConfigureDialog()
dlg.identifierOccursCount = self._identifierOccursCount
dlg.setConfig(self._config)
dlg.validate()
dlg.setModal(True)
if dlg.exec_():
self._config = dlg.getConfig()
self._configured = dlg.validate()
self._configuredObserver()
def getIdentifier(self):
"""
The identifier is a string that must be unique within a workflow.
"""
return self._config['identifier']
def setIdentifier(self, identifier):
"""
The framework will set the identifier for this step when it is loaded.
"""
self._config['identifier'] = identifier
def serialize(self):
"""
Add code to serialize this step to string. This method should
implement the opposite of 'deserialize'.
"""
return json.dumps(self._config, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def deserialize(self, string):
"""
Add code to deserialize this step from string. This method should
implement the opposite of 'serialize'.
:param string: JSON representation of the configuration in a string.
"""
self._config.update(json.loads(string))
d = ConfigureDialog()
d.identifierOccursCount = self._identifierOccursCount
d.setConfig(self._config)
self._configured = d.validate()
| 42.514493 | 146 | 0.667292 |
acf93e306cd7346778619144579b1eda04e9226e | 1,741 | py | Python | trivia.py | catherinedevlin/holy-grail-run-away | ff9ae8a68dafdbe4ec70cf824452e862584cc139 | [
"MIT"
] | 1 | 2020-05-13T19:03:49.000Z | 2020-05-13T19:03:49.000Z | trivia.py | catherinedevlin/holy-grail-run-away | ff9ae8a68dafdbe4ec70cf824452e862584cc139 | [
"MIT"
] | null | null | null | trivia.py | catherinedevlin/holy-grail-run-away | ff9ae8a68dafdbe4ec70cf824452e862584cc139 | [
"MIT"
] | null | null | null | import random
import re
import subprocess
import enquiries
blankline = re.compile(r'\n\s*\n', re.DOTALL)
SOURCE = 'trivia.txt'
with open(SOURCE) as infile:
content = infile.read()
class Question:
def __init__(self, raw):
self.raw = raw
lines = self.raw.splitlines()
self.question = lines.pop(0)
self.lines = lines
self.correct = lines[0]
def dict(self):
options = self.lines.copy()
random.shuffle(options)
return {
'question': self.question,
'choices': options,
'answer': self.answer
}
def ask(self):
options = self.lines.copy()
random.shuffle(options)
answer = enquiries.choose(self.question, options)
result = answer == self.correct
try:
if result:
subprocess.run(('xplayer', 'video/correct.mp4'), timeout=6)
else:
subprocess.run(('xplayer', 'video/wrong.mp4'), timeout=6)
except subprocess.TimeoutExpired:
pass
return result
def load():
raw_questions = blankline.split(content)
return [Question(r) for r in raw_questions]
if __name__ == '__main__':
questions = load()
question_deck = questions.copy()
random.shuffle(question_deck)
streak = 0
while True:
subprocess.run('clear')
print(
'\n\nYour winning streak is {} questions long.\n\n'.format(streak))
if not question_deck:
question_deck = questions.copy()
random.shuffle(question_deck)
question = question_deck.pop()
result = question.ask()
if result:
streak += 1
else:
streak = 0
| 24.521127 | 79 | 0.572659 |
acf93ee6dcb66e88d35fd836005237f815fd7165 | 1,098 | py | Python | all_domains/python/strings/string_validators.py | ejspeiro/HackerRank | 2e489588e8d7102acb676cc49fe07ee83e4f66e9 | [
"MIT"
] | null | null | null | all_domains/python/strings/string_validators.py | ejspeiro/HackerRank | 2e489588e8d7102acb676cc49fe07ee83e4f66e9 | [
"MIT"
] | null | null | null | all_domains/python/strings/string_validators.py | ejspeiro/HackerRank | 2e489588e8d7102acb676cc49fe07ee83e4f66e9 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
if __name__ == '__main__':
s = input()
hasAlNum = False
hasAlpha = False
hasDigit = False
hasLower = False
hasUpper = False
digitChecked = False
lowerChecked = False
upperChecked = False
# Check every character in the string. Linear complexity.
for cc in s:
if ~digitChecked and cc.isdigit():
hasDigit = True
digitChecked = True
if ~lowerChecked and cc.islower():
hasLower = True
lowerChecked = True
if ~upperChecked and cc.isupper():
hasUpper = True
upperChecked = True
if lowerChecked and upperChecked:
hasAlpha = True
if hasAlpha and hasDigit:
hasAlNum = True;
if hasAlNum:
print(True)
else:
print(False)
if hasAlpha:
print(True)
else:
print(False)
if hasDigit:
print(True)
else:
print(False)
if hasLower:
print(True)
else:
print(False)
if hasUpper:
print(True)
else:
print(False)
| 20.333333 | 61 | 0.551002 |
acf93f50534e6acb522c344f863a868bde741929 | 1,188 | py | Python | src/pretalx/common/management/commands/regenerate_css.py | Hydro2shine/sprout | 7dfa5e9fa0a7ef9157517ad0752e393599053873 | [
"Apache-2.0"
] | null | null | null | src/pretalx/common/management/commands/regenerate_css.py | Hydro2shine/sprout | 7dfa5e9fa0a7ef9157517ad0752e393599053873 | [
"Apache-2.0"
] | 1 | 2019-07-05T20:03:42.000Z | 2019-07-05T20:03:42.000Z | src/pretalx/common/management/commands/regenerate_css.py | Hydro2shine/sprout | 7dfa5e9fa0a7ef9157517ad0752e393599053873 | [
"Apache-2.0"
] | null | null | null | from django.core.management.base import BaseCommand
from pretalx.common.tasks import regenerate_css
from pretalx.event.models.event import Event, Event_SettingsStore
class Command(BaseCommand):
help = 'Rebuild static files and language files'
def add_arguments(self, parser):
parser.add_argument('--event', type=str)
def handle(self, *args, **options):
event = options.get('event')
if event:
try:
event = Event.objects.get(slug__iexact=event)
except Event.DoesNotExist:
self.stdout.write(self.style.ERROR('This event does not exist.'))
return
regenerate_css.apply_async(args=(event.pk, ))
self.stdout.write(self.style.SUCCESS(f'[{event.slug}] Event style was successfully regenerated.'))
else:
for es in Event_SettingsStore.objects.filter(key='agenda_css_file').order_by('-object__date_from'):
event = Event.objects.get(pk=es.object_id)
regenerate_css.apply_async(args=(event.pk, ))
self.stdout.write(self.style.SUCCESS(f'[{event.slug}] Event style was successfully regenerated.'))
| 40.965517 | 114 | 0.653199 |
acf940f5c1ff4df752546050e6a1e709e39a5eec | 6,317 | py | Python | main.py | jaswanthbjk/SoccerRobotPerception | 0528e51ba35abe3ef5ecd59ff187a959bc8f79bb | [
"MIT"
] | null | null | null | main.py | jaswanthbjk/SoccerRobotPerception | 0528e51ba35abe3ef5ecd59ff187a959bc8f79bb | [
"MIT"
] | null | null | null | main.py | jaswanthbjk/SoccerRobotPerception | 0528e51ba35abe3ef5ecd59ff187a959bc8f79bb | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import os as os
from PIL import Image
import numpy as np
import torch
import os
from glob import glob
import cv2
import random
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data import DataLoader as get_data_loader
from torch.utils.tensorboard import SummaryWriter
from torch import randperm
from torch._utils import _accumulate
import torchvision
from torchvision import datasets, transforms, models
import torchvision.transforms.functional as F
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import torch.nn as nn
import time
import sklearn.metrics as skm
from scipy.spatial import distance
from scipy.stats import multivariate_normal
from torch.optim import Adam
from dataloader import Blobdataset, SegDataset, split_dataset
from model import NimbRoNet2
from train import train_model
from metrics import evaluate_detection, evaluate_segmentation
import argparse
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# device = torch.device("cpu")
print("Using device: ", device)
my_parser = argparse.ArgumentParser(description='List the content of a folder')
# Add the arguments
my_parser.add_argument('--batch_size',
type=int,
default=8,
help='batch size')
my_parser.add_argument('--num_epochs',
type=int,
default = 100,
help='number of epochs')
my_parser.add_argument('--dataset_path',
type=str,
default = "/scratch/smuthi2s/data/",
help='Give the path to dataset')
my_parser.add_argument('--save_path',
type=str,
default = "./",
help='Give the path to save model and logs')
args = my_parser.parse_args()
batch_size = args.batch_size
num_epochs = args.num_epochs
save_path = args.save_path
input_path = args.dataset_path
print("======Loading detection dataset======")
detection_dataset = Blobdataset(input_path+"blob")
train_detection_dataset, val_detection_dataset, test_detection_dataset = split_dataset(detection_dataset)
print("Total samples: ",len(detection_dataset))
print("Training samples: ",len(train_detection_dataset))
print("Validation samples: ",len(val_detection_dataset))
print("Testing samples: ",len(test_detection_dataset))
print("=====================================")
print("======Loading segmentation dataset======")
segmentation_dataset = SegDataset(input_path+"segmentation")
train_segmentation_dataset, val_segmentation_dataset, test_segmentation_dataset = split_dataset(segmentation_dataset)
print("Total samples: ",len(segmentation_dataset))
print("Training samples: ",len(train_segmentation_dataset))
print("Validation samples: ",len(val_segmentation_dataset))
print("Testing samples: ",len(test_segmentation_dataset))
print("========================================")
train_detection_loader = get_data_loader(dataset=train_detection_dataset,
batch_size=batch_size,
shuffle=True)
train_seg_loader = get_data_loader(dataset=train_segmentation_dataset,
batch_size=batch_size,
shuffle=True)
val_detection_loader = get_data_loader(dataset=val_detection_dataset,
batch_size=1,
shuffle=True)
val_seg_loader = get_data_loader(dataset=val_segmentation_dataset,
batch_size=1,
shuffle=True)
test_detection_loader = get_data_loader(dataset=test_detection_dataset,
batch_size=1,
shuffle=True)
test_seg_loader = get_data_loader(dataset=test_segmentation_dataset,
batch_size=1,
shuffle=True)
print("======Hyperpatameters======")
print("Batch size: ",batch_size)
print("Number of epochs: ",num_epochs)
print("======Loading NimbRoNet2 model======")
model = NimbRoNet2()
optimizer = Adam([{"params":model.encoder.parameters(), "lr":0.000001},
{"params":model.conv1x1_1.parameters(), "lr":0.001},
{"params":model.conv1x1_2.parameters(), "lr":0.001},
{"params":model.conv1x1_3.parameters(), "lr":0.001},
{"params":model.transpose_conv1.parameters(), "lr":0.001},
{"params":model.transpose_conv2.parameters(), "lr":0.001},
{"params":model.transpose_conv3.parameters(), "lr":0.001},
{"params":model.loc_dep_conv.parameters(), "lr":0.001}],
lr=0.001)
writer = SummaryWriter(save_path+'final_logs')
print("======Training NimbRoNet2 model started with vl======")
model = train_model(model, num_epochs, train_detection_loader, train_seg_loader, val_detection_loader, val_seg_loader, optimizer, writer, save_path, device)
writer.flush()
writer.close()
print("Saving NimbRoNet2 model at: ", save_path)
torch.save(model, save_path+"nimbronet2_gpu_tvl.pth")
print("======Testing NimbRoNet2 model======")
model = torch.load(save_path+"nimbronet2_gpu_tvl.pth")
model.eval()
f1_score, accuracy, recall, precision, fdr = evaluate_detection(model,test_detection_loader,device)
print("Ball detection metrics: \n F1 score: %.3f, Accuracy: %.3f, Recall: %.3f, Precision: %.3f, FDR: %.3f"%(f1_score[0],accuracy[0],recall[0],precision[0],fdr[0]))
print("Goal Post detection metrics: \n F1 score: %.3f, Accuracy: %.3f, Recall: %.3f, Precision: %.3f, FDR: %.3f"%(f1_score[1],accuracy[1],recall[1],precision[1],fdr[1]))
print("Robot detection metrics: \n F1 score: %.3f, Accuracy: %.3f, Recall: %.3f, Precision: %.3f, FDR: %.3f"%(f1_score[2],accuracy[2],recall[2],precision[2],fdr[2]))
acc, iou = evaluate_segmentation(model,test_seg_loader,device)
print("Background: Accuracy: %.3f, IoU: %.3f"%(acc[0],iou[0]))
print("Field: Accuracy: %.3f, IoU: %.3f"%(acc[1],iou[1]))
print("Line: Accuracy: %.3f, IoU: %.3f"%(acc[2],iou[2]))
| 41.019481 | 169 | 0.647301 |
acf9413cf6a33cc413ff53ae6abab985753d523c | 383 | py | Python | utils/pandaman.py | EniasCailliau/Kaggle-User-Authentication | 2da4c800af662bd0f93df75a51bf4e79fd44a902 | [
"MIT"
] | null | null | null | utils/pandaman.py | EniasCailliau/Kaggle-User-Authentication | 2da4c800af662bd0f93df75a51bf4e79fd44a902 | [
"MIT"
] | null | null | null | utils/pandaman.py | EniasCailliau/Kaggle-User-Authentication | 2da4c800af662bd0f93df75a51bf4e79fd44a902 | [
"MIT"
] | null | null | null | import numpy as np
import inspect
def translate_column_indices(indices, data_frame):
return np.apply_along_axis(lambda x: data_frame.columns.values[x], 0, indices.T)
def print_stats(*args, **kwargs):
if len(args)>0:
print("only named arguments are accepted by this print_stats")
for k,v in kwargs.items():
print("Shape of {} : {}".format(k, v.shape))
| 25.533333 | 84 | 0.689295 |
acf942bc58781aa440235995d57cd44b199dccb8 | 8,308 | py | Python | fairmodels/plotnine/geoms/annotation_logticks.py | Locust2520/python-fairmodels | 0572f7c205b67c148bdc83b8dc4eaf70c06468a5 | [
"MIT"
] | null | null | null | fairmodels/plotnine/geoms/annotation_logticks.py | Locust2520/python-fairmodels | 0572f7c205b67c148bdc83b8dc4eaf70c06468a5 | [
"MIT"
] | 1 | 2020-10-02T21:43:06.000Z | 2020-10-15T22:52:39.000Z | fairmodels/plotnine/geoms/annotation_logticks.py | Locust2520/python-fairmodels | 0572f7c205b67c148bdc83b8dc4eaf70c06468a5 | [
"MIT"
] | null | null | null | import warnings
import numpy as np
import pandas as pd
from ..coords import coord_flip
from ..exceptions import PlotnineWarning
from ..utils import log
from .annotate import annotate
from .geom_rug import geom_rug
class _geom_logticks(geom_rug):
"""
Internal geom implementing drawing of annotation_logticks
"""
DEFAULT_AES = {}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'sides': 'bl', 'alpha': 1,
'color': 'black', 'size': 0.5, 'linetype': 'solid',
'lengths': (0.036, 0.0225, 0.012), 'base': 10}
legend_geom = 'path'
@staticmethod
def _check_log_scale(base, sides, panel_params, coord):
"""
Check the log transforms
Parameters
----------
base : float or None
Base of the logarithm in which the ticks will be
calculated. If ``None``, the base of the log transform
the scale will be used.
sides : str (default: bl)
Sides onto which to draw the marks. Any combination
chosen from the characters ``btlr``, for *bottom*, *top*,
*left* or *right* side marks. If ``coord_flip()`` is used,
these are the sides *after* the flip.
panel_params : SimpleNamespace
``x`` and ``y`` view scale values.
coord : coord
Coordinate (e.g. coord_cartesian) system of the geom.
Returns
-------
out : tuple
The bases (base_x, base_y) to use when generating the ticks.
"""
def is_log(scale):
if not hasattr(scale, 'trans'):
return False
trans = scale.trans
return (trans.__class__.__name__.startswith('log') and
hasattr(trans, 'base'))
base_x, base_y = base, base
x_scale = panel_params.x.scale
y_scale = panel_params.y.scale
x_is_log = is_log(x_scale)
y_is_log = is_log(y_scale)
if isinstance(coord, coord_flip):
x_is_log, y_is_log = y_is_log, x_is_log
if 't' in sides or 'b' in sides:
if base_x is None:
if x_is_log:
base_x = x_scale.trans.base
else: # no log, no defined base. See warning below.
base_x = 10
if not x_is_log:
warnings.warn(
"annotation_logticks for x-axis which does not have "
"a log scale. The logticks may not make sense.",
PlotnineWarning)
elif x_is_log and base_x != x_scale.trans.base:
warnings.warn(
"The x-axis is log transformed in base {} ,"
"but the annotation_logticks are computed in base {}"
"".format(base_x, x_scale.trans.base),
PlotnineWarning)
if 'l' in sides or 'r' in sides:
if base_y is None:
if y_is_log:
base_y = y_scale.trans.base
else: # no log, no defined base. See warning below.
base_y = 10
if not y_is_log:
warnings.warn(
"annotation_logticks for y-axis which does not have "
"a log scale. The logticks may not make sense.",
PlotnineWarning)
elif y_is_log and base_y != y_scale.trans.base:
warnings.warn(
"The y-axis is log transformed in base {} ,"
"but the annotation_logticks are computed in base {}"
"".format(base_y, y_scale.trans.base),
PlotnineWarning)
return base_x, base_y
@staticmethod
def _calc_ticks(value_range, base):
"""
Calculate tick marks within a range
Parameters
----------
value_range: tuple
Range for which to calculate ticks.
Returns
-------
out: tuple
(major, middle, minor) tick locations
"""
def _minor(x, mid_idx):
return np.hstack([x[1:mid_idx], x[mid_idx+1:-1]])
# * Calculate the low and high powers,
# * Generate for all intervals in along the low-high power range
# The intervals are in normal space
# * Calculate evenly spaced breaks in normal space, then convert
# them to log space.
low = np.floor(value_range[0])
high = np.ceil(value_range[1])
arr = base ** np.arange(low, float(high+1))
n_ticks = base - 1
breaks = [log(np.linspace(b1, b2, n_ticks+1), base)
for (b1, b2) in list(zip(arr, arr[1:]))]
# Partition the breaks in the 3 groups
major = np.array([x[0] for x in breaks] + [breaks[-1][-1]])
if n_ticks % 2:
mid_idx = n_ticks // 2
middle = [x[mid_idx] for x in breaks]
minor = np.hstack([_minor(x, mid_idx) for x in breaks])
else:
middle = []
minor = np.hstack([x[1:-1] for x in breaks])
return major, middle, minor
def draw_panel(self, data, panel_params, coord, ax, **params):
# Any passed data is ignored, the relevant data is created
sides = params['sides']
lengths = params['lengths']
_aesthetics = {
'size': params['size'],
'color': params['color'],
'alpha': params['alpha'],
'linetype': params['linetype']
}
base_x, base_y = self._check_log_scale(
params['base'], sides, panel_params, coord)
if 'b' in sides or 't' in sides:
tick_positions = self._calc_ticks(panel_params.x.range, base_x)
for (positions, length) in zip(tick_positions, lengths):
data = pd.DataFrame(dict(x=positions, **_aesthetics))
super().draw_group(data, panel_params, coord, ax,
length=length, **params)
if 'l' in sides or 'r' in sides:
tick_positions = self._calc_ticks(panel_params.y.range, base_y)
for (positions, length) in zip(tick_positions, lengths):
data = pd.DataFrame(dict(y=positions, **_aesthetics))
super().draw_group(data, panel_params, coord, ax,
length=length, **params)
class annotation_logticks(annotate):
"""
Marginal log ticks.
If added to a plot that does not have a log10 axis
on the respective side, a warning will be issued.
Parameters
----------
sides : str (default: bl)
Sides onto which to draw the marks. Any combination
chosen from the characters ``btlr``, for *bottom*, *top*,
*left* or *right* side marks. If ``coord_flip()`` is used,
these are the sides *after* the flip.
alpha : float (default: 1)
Transparency of the ticks
color : str | tuple (default: 'black')
Colour of the ticks
size : float
Thickness of the ticks
linetype : 'solid' | 'dashed' | 'dashdot' | 'dotted' | tuple
Type of line. Default is *solid*.
lengths: tuple (default (0.036, 0.0225, 0.012))
length of the ticks drawn for full / half / tenth
ticks relative to panel size
base : float (default: None)
Base of the logarithm in which the ticks will be
calculated. If ``None``, the base used to log transform
the scale will be used.
"""
def __init__(self, sides='bl', alpha=1, color='black', size=0.5,
linetype='solid', lengths=(0.036, 0.0225, 0.012),
base=None):
if len(lengths) != 3:
raise ValueError(
"length for annotation_logticks must be a tuple of 3 floats")
self._annotation_geom = _geom_logticks(sides=sides,
alpha=alpha,
color=color,
size=size,
linetype=linetype,
lengths=lengths,
base=base)
| 37.936073 | 77 | 0.532017 |
acf942daf67ec4e05a6bcb25ff48207f20c467ab | 274 | py | Python | Scripts/simulation/ui/__init__.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/ui/__init__.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/ui/__init__.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\ui\__init__.py
# Compiled at: 2011-07-18 20:22:55
pass | 45.666667 | 107 | 0.722628 |
acf9451eb051c7052071c48825ab4e332029e0e7 | 611 | py | Python | backend/base/urls/user_urls.py | your-code-is-my-property/proshop | 08bb9b749de00f2cca87b8ffb4e31615d84fdfe0 | [
"MIT"
] | 1 | 2021-08-06T16:24:30.000Z | 2021-08-06T16:24:30.000Z | backend/base/urls/user_urls.py | your-code-is-my-property/proshop | 08bb9b749de00f2cca87b8ffb4e31615d84fdfe0 | [
"MIT"
] | null | null | null | backend/base/urls/user_urls.py | your-code-is-my-property/proshop | 08bb9b749de00f2cca87b8ffb4e31615d84fdfe0 | [
"MIT"
] | null | null | null | from django.urls import path
from base.views import user_views as views
urlpatterns = [
path('', views.getUsers, name="users"),
path('login/', views.MyTokenObtainPairView.as_view(), name="login"),
path('register/', views.registerUser, name="register"),
path('profile/', views.getUserProfile, name="user-profile"),
path('profile/update/', views.updateUserProfile, name="update-user-profile"),
path('update/<str:pk>/', views.updateUser, name="user-update"),
path('<str:pk>/', views.getUserById, name="user"),
path('delete/<str:pk>/', views.deleteUser, name="user-delete"),
]
| 32.157895 | 81 | 0.679214 |
acf94538887e367759c2daf7e7357aefc391f63c | 1,110 | py | Python | electrum_onion/gui/kivy/i18n.py | nezero/electrum-onion | 1dbd3da67c6bb09d97686b291b7fb92920afe6f3 | [
"MIT"
] | 5 | 2021-02-09T00:38:21.000Z | 2021-08-20T13:28:56.000Z | electrum_onion/gui/kivy/i18n.py | nezero/electrum-onion | 1dbd3da67c6bb09d97686b291b7fb92920afe6f3 | [
"MIT"
] | 2 | 2021-03-19T16:24:17.000Z | 2021-03-19T16:25:10.000Z | electrum_onion/gui/kivy/i18n.py | nezero/electrum-onion | 1dbd3da67c6bb09d97686b291b7fb92920afe6f3 | [
"MIT"
] | 3 | 2021-03-06T21:02:53.000Z | 2022-03-21T00:18:15.000Z | import gettext
class _(str):
observers = set()
lang = None
def __new__(cls, s):
if _.lang is None:
_.switch_lang('en')
t = _.translate(s)
o = super(_, cls).__new__(cls, t)
o.source_text = s
return o
@staticmethod
def translate(s, *args, **kwargs):
return _.lang(s)
@staticmethod
def bind(label):
try:
_.observers.add(label)
except:
pass
# garbage collection
new = set()
for label in _.observers:
try:
new.add(label)
except:
pass
_.observers = new
@staticmethod
def switch_lang(lang):
# get the right locales directory, and instanciate a gettext
from electrum_onion.i18n import LOCALE_DIR
locales = gettext.translation('electrum', LOCALE_DIR, languages=[lang], fallback=True)
_.lang = locales.gettext
for label in _.observers:
try:
label.text = _(label.text.source_text)
except:
pass
| 23.617021 | 94 | 0.527027 |
acf945632e6c36a86b133ad1073819225b320ee8 | 371 | py | Python | backend/commitz/celery.py | marcosflp/commitz | 9402e96bad0ac66f054b22c020bf6fbac358b4ec | [
"MIT"
] | 1 | 2020-03-02T19:58:37.000Z | 2020-03-02T19:58:37.000Z | backend/commitz/celery.py | marcosflp/commitz | 9402e96bad0ac66f054b22c020bf6fbac358b4ec | [
"MIT"
] | 7 | 2020-06-06T01:12:25.000Z | 2022-02-10T11:48:09.000Z | backend/commitz/celery.py | marcosflp/commitz | 9402e96bad0ac66f054b22c020bf6fbac358b4ec | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
import os
from django.apps import apps
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "commitz.settings.local")
app = Celery('commitz_tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: [n.name for n in apps.get_app_configs()])
| 21.823529 | 73 | 0.789757 |
acf947a7a4c7c4f3d5f68a329309bc146250223f | 1,812 | py | Python | scripts/parametric_traction.py | kumiori/stability-bifurcation | 9a82bf40742a9b16122b7a476ad8aec65fe22539 | [
"MIT"
] | 1 | 2020-08-10T22:38:26.000Z | 2020-08-10T22:38:26.000Z | scripts/parametric_traction.py | kumiori/stability-bifurcation | 9a82bf40742a9b16122b7a476ad8aec65fe22539 | [
"MIT"
] | 2 | 2020-08-13T16:33:51.000Z | 2020-09-01T21:05:34.000Z | scripts/parametric_traction.py | kumiori/stability-bifurcation | 9a82bf40742a9b16122b7a476ad8aec65fe22539 | [
"MIT"
] | 1 | 2020-10-25T22:28:11.000Z | 2020-10-25T22:28:11.000Z | from traction_stabilitycheck import *
import numpy as np
from utils import ColorPrint
# ell_list = np.linspace(.1, .5, 20)
# ell_min = 0.1
#ell_max = 2.
ell_list = np.logspace(np.log10(.1), np.log10(1.5), 10)
def t_stab(ell, q=2):
coeff_stab = 2.*2.*np.pi*q/(q+1)**(3./2.)*np.sqrt(2)
if 1/ell > coeff_stab:
return 1.
else:
return coeff_stab*ell/1.
def t_bif(ell, q=2):
# coeff = t_stab(ell, q)*(q+1)/(2.*q)
coeff_bif = 2.*2.*np.pi*q/(q+1)**(3./2.)*np.sqrt(2)*(q+1)/(2.*q)
if 1/ell > coeff_bif:
return 1.
else:
return coeff_bif*ell/1.
print([t_stab(ell) for ell in ell_list])
print([t_bif(ell) for ell in ell_list])
print([3./4.*t_stab(ell) for ell in ell_list])
# sys.exit()
for ell in ell_list:
# tstab = 1./ell*4*np.pi/3.**(3./2.)
eps = .3
ell_r = ell*np.sqrt(2)
# *np.sqrt(2)
tstab = t_stab(ell_r, 2)
tbif = t_bif(ell_r, 2)
print('tstab, tbif', tstab, tbif)
# sys.exit(//z)
# tstab = 1.
lmin = tstab - 1.
# load_min = load_min if lmin > 0. else 0.
load_min = lmin if lmin > 0. else 0.
load_max = tstab + 1.
# loads = [tstab-2*eps, tstab+2*eps]
ColorPrint.print_info('Solving ell {}'.format(ell))
ColorPrint.print_info('Load: [{} {}]'.format(load_min, load_max))
ColorPrint.print_info('stab limit: {} '.format(tstab))
ColorPrint.print_info('uniq limit: {} '.format(tbif))
try:
traction_test(
ell=ell,
load_min=load_min,
load_max=load_max,
# loads = loads,
nsteps=30,
nu=0.,
n=10,
# Lx=Lx,
Ly=.05,
# outdir='../output/parametric-traction-plane-stress/ell-{:2f}'.format(ell),
outdir='../output/parametric-traction-paper/ell-{:2f}'.format(ell),
# outdir='../output/parametric-traction-n-10/ell-{:2f}'.format(ell),
breakifunstable = True
)
except:
ColorPrint.print_warn("Something went somewhere, most likely an instability")
| 26.26087 | 79 | 0.64128 |
acf947bf43db4ef2db87ee0f20bef50ac7649383 | 2,599 | py | Python | experiments/imagenet/greedy_weighted_ensemble.py | Haijunlv/swa_gaussian | 412a1f0a18f8607c2493e48275abe5345cd3eb1e | [
"BSD-2-Clause"
] | null | null | null | experiments/imagenet/greedy_weighted_ensemble.py | Haijunlv/swa_gaussian | 412a1f0a18f8607c2493e48275abe5345cd3eb1e | [
"BSD-2-Clause"
] | null | null | null | experiments/imagenet/greedy_weighted_ensemble.py | Haijunlv/swa_gaussian | 412a1f0a18f8607c2493e48275abe5345cd3eb1e | [
"BSD-2-Clause"
] | null | null | null | # 贪婪算法求解不同权重组合方案
import argparse
import os
import random
import sys
import time
import data
import glob
import copy
import pickle
import numpy as np
from scipy import optimize
from sklearn.metrics import accuracy_score
parser = argparse.ArgumentParser(description="SGD/SWA training")
parser.add_argument(
"--pred_path",
type=str,
default=None,
required=True,
help="training directory (default: None)",
)
parser.add_argument(
"--label_path",
type=str,
default=None,
required=True,
help="training directory (default: None)",
)
def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
return averaged_model_parameter + \
(model_parameter - averaged_model_parameter) / (num_averaged + 1)
def greedy_ensemble(metric_np_index, pred_list, label):
bast_acc = 0
ensemble_logit = 0
ensemble_list = []
num_averaged = 0
for i in range(len(metric_np_index)):
avg_logit = avg_fn(ensemble_logit, pred_list[metric_np_index[i]], num_averaged)
avg_acc = get_metric(avg_logit, label)
print("i:{}, metric_np_index[i]:{} avg_acc:{}, bast_acc:{}, num_averaged:{}".format(i, metric_np_index[i], avg_acc, bast_acc, num_averaged))
if avg_acc > bast_acc:
ensemble_list.append(metric_np_index[i])
bast_acc = avg_acc
ensemble_logit = avg_logit
num_averaged += 1
print("best acc:{}, ensemble_list:{}".format(bast_acc, ensemble_list))
def get_metric(logit, label):
y_valid_pred_cls = np.argmax(logit, axis=1)
acc = accuracy_score(label, y_valid_pred_cls)
return acc
def main():
args = parser.parse_args()
print("args:{}".format(args))
pred_path = args.pred_path
label_path = args.label_path
pred_pkl_paths = glob.glob(pred_path)
pred_list = []
for pred_pkl_path in pred_pkl_paths:
with open(pred_pkl_path, 'rb') as f:
pkl = pickle.load(f, encoding='iso-8859-1')
pred_list.append(pkl["logits"])
with open(label_path, 'rb') as f:
pkl = pickle.load(f, encoding='iso-8859-1')
label = pkl["label"]
metric_list = []
for i, logit in enumerate(pred_list):
acc = get_metric(logit, label)
metric_list.append(acc)
print("metric_list:{}".format(metric_list))
metric_np = np.array(metric_list)
# 降序
metric_np_index = np.argsort(-metric_np)
print("sort metric_list index:{}".format(metric_np_index))
# import pdb
# pdb.set_trace()
greedy_ensemble(metric_np_index, pred_list, label)
if __name__ == '__main__':
main() | 28.56044 | 148 | 0.677568 |
acf94820e5e3e52e1f26eb58bf956b6172af3e56 | 3,646 | py | Python | tests/test_RawDataC.py | schrma/garminmanager | 47b0f5847bb3b519696b845b1619520aba190919 | [
"MIT"
] | null | null | null | tests/test_RawDataC.py | schrma/garminmanager | 47b0f5847bb3b519696b845b1619520aba190919 | [
"MIT"
] | null | null | null | tests/test_RawDataC.py | schrma/garminmanager | 47b0f5847bb3b519696b845b1619520aba190919 | [
"MIT"
] | null | null | null | import garminmanager.RawDataC
import datetime
from garminmanager.enumerators.EnumHealthTypeC import EnumHealtTypeC
def test_compare_datetime():
my_dates = {datetime.datetime(2019,4,10,23,00) : 100,
datetime.datetime(2019,4,10,23,30) : 101,
datetime.datetime(2019,4,11,1,00) : 102,
datetime.datetime(2019,4,11,2,00) : 103
}
raw_data1 = garminmanager.RawDataC.RawDataC()
raw_data2 = garminmanager.RawDataC.RawDataC()
for key, value in my_dates.items():
raw_data1.add_x(key)
raw_data1.add_y(value)
raw_data2.add_x(key)
raw_data2.add_y(value)
assert raw_data1 == raw_data2
x = raw_data1.get_x()
x[0] = datetime.datetime(2019, 4, 11, 23, 00)
assert (raw_data1 == raw_data2) == False
def test_compare():
raw_data1 = garminmanager.RawDataC.RawDataC()
raw_data1.add_x([1, 2, 3, 4, 5, 6, 7])
raw_data1.add_y([10, 20, 30, 40, 50, 60, 70])
raw_data2 = garminmanager.RawDataC.RawDataC()
raw_data2.add_x([1, 2, 3, 4, 5, 6, 7])
raw_data2.add_y([10, 20, 30, 40, 50, 60, 70])
raw_data1.set_data_type(EnumHealtTypeC.heartrate)
raw_data2.set_data_type(EnumHealtTypeC.heartrate)
assert raw_data1 == raw_data2
raw_data2 = garminmanager.RawDataC.RawDataC()
raw_data2.add_x([1, 2, 3, 4, 5, 6, 7])
raw_data2.add_y([11, 20, 30, 40, 50, 60, 70])
assert (raw_data1 == raw_data2) == False
raw_data2 = garminmanager.RawDataC.RawDataC()
raw_data2.add_x([2, 2, 3, 4, 5, 6, 7])
raw_data2.add_y([10, 20, 30, 40, 50, 60, 70])
assert (raw_data1 == raw_data2) == False
raw_data2 = garminmanager.RawDataC.RawDataC()
raw_data2.add_x([1, 2, 3, 4, 5, 6, 7])
raw_data2.add_y([10, 20, 30, 40, 50, 60, 70])
raw_data1.set_data_type(EnumHealtTypeC.intensity)
raw_data2.set_data_type(EnumHealtTypeC.heartrate)
assert (raw_data1 == raw_data2) == False
def test_add_class():
raw_data1 = garminmanager.RawDataC.RawDataC()
raw_data1.add_x([1, 2, 3, 4, 5, 6, 7])
raw_data1.add_y([10, 20, 30, 40, 50, 60, 70])
raw_data2 = garminmanager.RawDataC.RawDataC()
raw_data2.add_x([8, 9, 10, 11, 12, 13, 14])
raw_data2.add_y([80, 90, 100, 110, 120, 130, 140])
raw_data1.set_data_type(EnumHealtTypeC.heartrate)
raw_data2.set_data_type(EnumHealtTypeC.heartrate)
raw_total = raw_data1 + raw_data2
x = raw_total.get_x()
y = raw_total.get_y()
xorg1 = raw_data1.get_x()
yorg1 = raw_data1.get_y()
for i, item in enumerate(xorg1):
assert item == x[i]
assert yorg1[i] == y[i]
xorg2 = raw_data2.get_x()
yorg2 = raw_data2.get_y()
for i, item in enumerate(xorg2):
my_offset = len(xorg1)
assert item == x[i+my_offset]
assert yorg2[i] == y[i+my_offset]
raw_data1 = garminmanager.RawDataC.RawDataC()
raw_total = raw_data1 + raw_data2
assert raw_data2 == raw_total
raw_total = raw_data2 + raw_data1
assert raw_data2 == raw_total
def test_update():
raw_data1 = garminmanager.RawDataC.RawDataC()
raw_data1.add_x([1, 2, 3, 4, 5, 6, 7])
raw_data1.add_y([10, 20, 30, 40, 50, 60, 70])
raw_data_class = raw_data1.get_xy_data()
i = 0
for item in raw_data_class:
x = raw_data1.get_x()
y = raw_data1.get_y()
assert item.x == x[i]
assert item.y == y[i]
i = i + 1
raw_data1.add_xy(8,10)
raw_data_class = raw_data1.get_xy_data()
i = 0
for item in raw_data_class:
x = raw_data1.get_x()
y = raw_data1.get_y()
assert item.x == x[i]
assert item.y == y[i]
i = i + 1 | 28.484375 | 68 | 0.636314 |
acf948293611736f53142021409c0f58c39007b9 | 3,265 | py | Python | parlai/crowdsourcing/tasks/acute_eval/fast_acute_blueprint.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 9,228 | 2017-05-03T03:40:34.000Z | 2022-03-31T14:03:29.000Z | parlai/crowdsourcing/tasks/acute_eval/fast_acute_blueprint.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 2,660 | 2017-05-03T23:06:02.000Z | 2022-03-31T21:24:29.000Z | parlai/crowdsourcing/tasks/acute_eval/fast_acute_blueprint.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 2,058 | 2017-05-04T12:19:48.000Z | 2022-03-31T10:28:11.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional
from mephisto.operations.registry import register_mephisto_abstraction
from omegaconf import MISSING
from parlai.crowdsourcing.tasks.acute_eval.acute_eval_blueprint import (
AcuteEvalBlueprintArgs,
AcuteEvalBlueprint,
)
FAST_ACUTE_BLUEPRINT_TYPE = "fast_acute"
@dataclass
class FastAcuteBlueprintArgs(AcuteEvalBlueprintArgs):
_blueprint_type: str = FAST_ACUTE_BLUEPRINT_TYPE
_group: str = field(
default="FastAcuteBlueprint",
metadata={
'help': """Run all the steps of ACUTE-Eval with one simple command"""
},
)
config_path: str = field(
default=MISSING,
metadata={'help': 'Path to JSON of model types and their parameters'},
)
root_dir: str = field(default=MISSING, metadata={'help': 'Root save folder'})
onboarding_path: Optional[str] = field(
default=None,
metadata={'help': 'Path to JSON file of settings for running onboarding'},
)
models: Optional[str] = field(
default=None,
metadata={
"help": "Comma separated list of models for round robin evaluation (must be at least 2)"
},
)
model_pairs: Optional[str] = field(
default=None,
metadata={
"help": "Comma separated list of model pairs for evaluation, model1:model2,model1:model3"
},
)
acute_eval_type: str = field(
default='engaging', metadata={"help": "Which evaluation to run for ACUTEs"}
)
matchups_per_pair: int = field(
default=60,
metadata={"help": "How many matchups to generate for each pair of models"},
)
task: Optional[str] = field(
default=None, metadata={'help': 'The ParlAI task used for self-chat'}
)
sufficient_matchups_multiplier: int = field(
default=2,
metadata={
'help': "Multiplier on how many conversation pairs to build. Probably doesn't need to be changed"
},
)
num_self_chats: int = field(
default=100, metadata={'help': "Number of self-chats to run per model"}
)
num_task_data_episodes: int = field(
default=500,
metadata={
'help': "Number of episodes to save if running ACUTEs on a ParlAI task"
},
)
selfchat_max_turns: int = field(
default=6,
metadata={'help': "The number of dialogue turns before self chat ends"},
)
use_existing_self_chat_files: bool = field(
default=False,
metadata={'help': "Use any existing self-chat files without prompting"},
)
randomize_conversations: bool = field(
default=True,
metadata={
'help': "Randomize conversations used for match-ups or take conversations in order"
},
)
@register_mephisto_abstraction()
class FastAcuteBlueprint(AcuteEvalBlueprint):
"""
Subclass of AcuteEvalBlueprint with params for fast ACUTE runs.
"""
ArgsClass = FastAcuteBlueprintArgs
BLUEPRINT_TYPE = FAST_ACUTE_BLUEPRINT_TYPE
| 32.65 | 109 | 0.658499 |
acf9485f896471ed7b71a33d80090785f0e7f7bc | 1,190 | py | Python | lab2/selenium_scenario2_chrome.py | michalmatt/pjwstk-tau | 2216b1256cc8811ba35631de64df2b787cc5f39b | [
"MIT"
] | null | null | null | lab2/selenium_scenario2_chrome.py | michalmatt/pjwstk-tau | 2216b1256cc8811ba35631de64df2b787cc5f39b | [
"MIT"
] | null | null | null | lab2/selenium_scenario2_chrome.py | michalmatt/pjwstk-tau | 2216b1256cc8811ba35631de64df2b787cc5f39b | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import logging
# Logger initiation & setup
logger = logging.getLogger('TAU lab2')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Chrome webdriver loading
s = Service('D:\SeleniumWebDrivers\Chrome\chromedriver.exe')
browser = webdriver.Chrome(service=s)
# Chrome: Empik scenario #2
logger.info('Chrome | Empik | Scenariusz #2')
logger.info('Przechodzę na stronę Empik')
browser.get('https://www.empik.com/')
logger.info('Przechodzę w menu do "Pomoc"')
temp = browser.find_element(By.CLASS_NAME, 'empikNav__menu-mobile-2')
temp.click()
# temp = browser.find_element(By.CLASS_NAME, 'css-1i919hk-input')
# temp.click()
# temp.send_keys('organek')
# temp.send_keys(Keys.ENTER)
# logger.info('Nie udało mi się zidentyfikować pierwszego produktu z listy wyszukiwania, aby kontynuować scenariusz')
# logger.info('Zamykam przeglądarkę')
# browser.close()
| 35 | 117 | 0.771429 |
acf9487dbee03e658fa6ef75382dab9d64a32208 | 4,024 | py | Python | openfl/interface/director.py | ssg-research/openfl | b60cbfbdad595e653c94cee23fd35add993b94b0 | [
"Apache-2.0"
] | 297 | 2021-01-13T08:49:35.000Z | 2022-03-31T15:06:43.000Z | openfl/interface/director.py | ssg-research/openfl | b60cbfbdad595e653c94cee23fd35add993b94b0 | [
"Apache-2.0"
] | 265 | 2021-02-02T09:57:33.000Z | 2022-03-30T22:51:55.000Z | openfl/interface/director.py | ssg-research/openfl | b60cbfbdad595e653c94cee23fd35add993b94b0 | [
"Apache-2.0"
] | 81 | 2021-01-18T07:52:36.000Z | 2022-03-26T18:55:54.000Z | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Director CLI."""
import logging
import shutil
import sys
from pathlib import Path
import click
from click import group
from click import option
from click import pass_context
from click import Path as ClickPath
from yaml import safe_load
from openfl.component.director import Director
from openfl.interface.cli_helper import WORKSPACE
from openfl.transport import DirectorGRPCServer
from openfl.utilities.path_check import is_directory_traversal
logger = logging.getLogger(__name__)
@group()
@pass_context
def director(context):
"""Manage Federated Learning Director."""
context.obj['group'] = 'director'
@director.command(name='start')
@option('-c', '--director-config-path', default='director.yaml',
help='The director config file path', type=ClickPath(exists=True))
@option('--tls/--disable-tls', default=True,
is_flag=True, help='Use TLS or not (By default TLS is enabled)')
@option('-rc', '--root-cert-path', 'root_certificate', required=False,
type=ClickPath(exists=True), default=None,
help='Path to a root CA cert')
@option('-pk', '--private-key-path', 'private_key', required=False,
type=ClickPath(exists=True), default=None,
help='Path to a private key')
@option('-oc', '--public-cert-path', 'certificate', required=False,
type=ClickPath(exists=True), default=None,
help='Path to a signed certificate')
def start(director_config_path, tls, root_certificate, private_key, certificate):
"""Start the director service."""
director_config_path = Path(director_config_path).absolute()
logger.info('🧿 Starting the Director Service.')
if is_directory_traversal(director_config_path):
click.echo('The director config file path is out of the openfl workspace scope.')
sys.exit(1)
with open(director_config_path) as stream:
director_config = safe_load(stream)
settings = director_config.get('settings', {})
sample_shape = settings.get('sample_shape', '')
target_shape = settings.get('target_shape', '')
logger.info(f'Sample shape: {sample_shape}, target shape: {target_shape}')
listen_host = settings.get('listen_host')
listen_port = settings.get('listen_port')
root_certificate = root_certificate or settings.get('root_certificate')
if root_certificate:
root_certificate = Path(root_certificate).absolute()
private_key = private_key or settings.get('private_key')
if private_key:
private_key = Path(private_key).absolute()
certificate = certificate or settings.get('certificate')
if certificate:
certificate = Path(certificate).absolute()
kwargs = {}
if listen_host:
kwargs['listen_host'] = listen_host
if listen_port:
kwargs['listen_port'] = listen_port
director_server = DirectorGRPCServer(
director_cls=Director,
tls=tls,
sample_shape=sample_shape,
target_shape=target_shape,
root_certificate=root_certificate,
private_key=private_key,
certificate=certificate,
settings=settings,
**kwargs
)
director_server.start()
@director.command(name='create-workspace')
@option('-p', '--director-path', required=True,
help='The director path', type=ClickPath())
def create(director_path):
"""Create a director workspace."""
if is_directory_traversal(director_path):
click.echo('The director path is out of the openfl workspace scope.')
sys.exit(1)
director_path = Path(director_path).absolute()
if director_path.exists():
if not click.confirm('Director workspace already exists. Recreate?', default=True):
sys.exit(1)
shutil.rmtree(director_path)
(director_path / 'cert').mkdir(parents=True, exist_ok=True)
(director_path / 'logs').mkdir(parents=True, exist_ok=True)
shutil.copyfile(WORKSPACE / 'default/director.yaml', director_path / 'director.yaml')
| 36.917431 | 91 | 0.706511 |
acf949352a89f048639bb1f64e1b9e3533cf5c66 | 3,857 | py | Python | CycleGAN/model.py | lmyybh/pytorch-networks | 8da055f5042c3803b275734afc89d33d239d7585 | [
"MulanPSL-1.0"
] | null | null | null | CycleGAN/model.py | lmyybh/pytorch-networks | 8da055f5042c3803b275734afc89d33d239d7585 | [
"MulanPSL-1.0"
] | null | null | null | CycleGAN/model.py | lmyybh/pytorch-networks | 8da055f5042c3803b275734afc89d33d239d7585 | [
"MulanPSL-1.0"
] | null | null | null | import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, in_channels):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.ReflectionPad2d(1), # padding, keep the image size constant after next conv2d
nn.Conv2d(in_channels, in_channels, 3),
nn.InstanceNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, in_channels, 3),
nn.InstanceNorm2d(in_channels)
)
def forward(self, x):
return x + self.block(x)
class GeneratorResNet(nn.Module):
def __init__(self, in_channels, num_residual_blocks=9):
super(GeneratorResNet, self).__init__()
# Inital Convolution 3*256*256 -> 64*256*256
out_channels=64
self.conv = nn.Sequential(
nn.ReflectionPad2d(in_channels), # padding, keep the image size constant after next conv2d
nn.Conv2d(in_channels, out_channels, 2*in_channels+1),
nn.InstanceNorm2d(out_channels),
nn.ReLU(inplace=True),
)
channels = out_channels
# Downsampling 64*256*256 -> 128*128*128 -> 256*64*64
self.down = []
for _ in range(2):
out_channels = channels * 2
self.down += [
nn.Conv2d(channels, out_channels, 3, stride=2, padding=1),
nn.InstanceNorm2d(out_channels),
nn.ReLU(inplace=True),
]
channels = out_channels
self.down = nn.Sequential(*self.down)
# Transformation (ResNet) 256*64*64
self.trans = [ResidualBlock(channels) for _ in range(num_residual_blocks)]
self.trans = nn.Sequential(*self.trans)
# Upsampling 256*64*64 -> 128*128*128 -> 64*256*256
self.up = []
for _ in range(2):
out_channels = channels // 2
self.up += [
nn.Upsample(scale_factor=2), # bilinear interpolation
nn.Conv2d(channels, out_channels, 3, stride=1, padding=1),
nn.InstanceNorm2d(out_channels),
nn.ReLU(inplace=True),
]
channels = out_channels
self.up = nn.Sequential(*self.up)
# Out layer 64*256*256 -> 3*256*256
self.out = nn.Sequential(
nn.ReflectionPad2d(in_channels),
nn.Conv2d(channels, in_channels, 2*in_channels+1),
nn.Tanh()
)
def forward(self, x):
x = self.conv(x)
x = self.down(x)
x = self.trans(x)
x = self.up(x)
x = self.out(x)
return x
class Discriminator(nn.Module):
def __init__(self, in_channels):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
# why normalize=False?
*self.block(in_channels, 64, normalize=False), # 3*256*256 -> 64*128*128
*self.block(64, 128), # 64*128*128 -> 128*64*64
*self.block(128, 256), # 128*64*64 -> 256*32*32
*self.block(256, 512), # 256*32*32 -> 512*16*16
# Why padding first then convolution?
nn.ZeroPad2d((1,0,1,0)), # padding left and top 512*16*16 -> 512*17*17
nn.Conv2d(512, 1, 4, padding=1) # 512*17*17 -> 1*16*16
)
self.scale_factor = 16
@staticmethod
def block(in_channels, out_channels, normalize=True):
layers = [nn.Conv2d(in_channels, out_channels, 4, stride=2, padding=1)]
if normalize:
layers.append(nn.InstanceNorm2d(out_channels))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
def forward(self, x):
return self.model(x)
| 35.063636 | 102 | 0.554317 |
acf949e3c29e4b2e48cecd529ee9cbff46251fa4 | 1,609 | py | Python | expenses/models.py | ronyyosef/Xpense | 9b7bf5785072dee5c95863130a3ea0eb9c2832db | [
"MIT"
] | null | null | null | expenses/models.py | ronyyosef/Xpense | 9b7bf5785072dee5c95863130a3ea0eb9c2832db | [
"MIT"
] | null | null | null | expenses/models.py | ronyyosef/Xpense | 9b7bf5785072dee5c95863130a3ea0eb9c2832db | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models import Avg, IntegerField, QuerySet
from django.utils import timezone
from house.models import House
from django.core.validators import MinValueValidator
class Expenses(models.Model):
class Category(models.TextChoices):
RENT = 'Rent'
MORTGAGE = 'Mortgage'
BILLS = 'Bills'
TRANSPORTATION = 'Transportation'
CLOTHING = 'Clothing'
HEALTHCARE = 'Healthcare'
FOOD = 'Food'
INSURANCE = 'Insurance'
KIDS = 'Kids'
CULTURE = 'Culture'
VACATIONS = 'Vacations'
OTHER = 'Other'
description = models.TextField(max_length=250, default='')
category = models.CharField(max_length=32, choices=Category.choices, default=Category.OTHER)
house_name = models.ForeignKey(House, on_delete=models.CASCADE)
amount = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
date = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'Category:{self.category},Amount:{self.amount}'
@staticmethod
def create_expense(house_name, amount, date, category):
expense = Expenses(house_name=house_name, amount=amount, date=date, category=category)
expense.save()
return expense
@staticmethod
def average_expenses_of_houses_by_categories(houses) -> QuerySet:
return (
Expenses.objects.filter(house_name__name__in=houses.values_list('name')).order_by().values(
'category').annotate(average=Avg("amount", output_field=IntegerField()))
)
| 36.568182 | 103 | 0.685519 |
acf94c58b40f4df24e5d82da4c82b0c02512f8a9 | 1,461 | py | Python | examples/skeletona.py | Zrufy/telepot | a4d62a32c82e5799e9f7afc400275e9d487ddb76 | [
"MIT"
] | 2 | 2021-04-11T12:03:19.000Z | 2021-04-11T12:03:23.000Z | examples/skeletona.py | Zrufy/telepot | a4d62a32c82e5799e9f7afc400275e9d487ddb76 | [
"MIT"
] | null | null | null | examples/skeletona.py | Zrufy/telepot | a4d62a32c82e5799e9f7afc400275e9d487ddb76 | [
"MIT"
] | null | null | null | import sys
import asyncio
import telepot
import telepot.async
"""
$ python3.4 skeletona.py <token>
A skeleton for your **async** telepot programs.
"""
@asyncio.coroutine
def handle(msg):
flavor = telepot.flavor(msg)
# normal message
if flavor == 'normal':
content_type, chat_type, chat_id = telepot.glance(msg)
print('Normal Message:', content_type, chat_type, chat_id)
# Do your stuff according to `content_type` ...
# inline query - need `/setinline`
elif flavor == 'inline_query':
query_id, from_id, query_string = telepot.glance(msg, flavor=flavor)
print('Inline Query:', query_id, from_id, query_string)
# Compose your own answers
articles = [{'type': 'article',
'id': 'abc', 'title': 'ABC', 'message_text': 'Good morning'}]
yield from bot.answerInlineQuery(query_id, articles)
# chosen inline result - need `/setinlinefeedback`
elif flavor == 'chosen_inline_result':
result_id, from_id, query_string = telepot.glance(msg, flavor=flavor)
print('Chosen Inline Result:', result_id, from_id, query_string)
# Remember the chosen answer to do better next time
else:
raise telepot.BadFlavor(msg)
TOKEN = sys.argv[1] # get token from command-line
bot = telepot.async.Bot(TOKEN)
loop = asyncio.get_event_loop()
loop.create_task(bot.messageLoop(handle))
print('Listening ...')
loop.run_forever()
| 27.055556 | 85 | 0.665298 |
acf94db02c0ee9579120370c6775d9473c16ac0b | 4,115 | py | Python | src/train.py | switchkiller/ml_imdb | 9a5c505023ab0e8fd74c0437e62a6e932621b471 | [
"MIT"
] | null | null | null | src/train.py | switchkiller/ml_imdb | 9a5c505023ab0e8fd74c0437e62a6e932621b471 | [
"MIT"
] | null | null | null | src/train.py | switchkiller/ml_imdb | 9a5c505023ab0e8fd74c0437e62a6e932621b471 | [
"MIT"
] | null | null | null | import pandas as pd
import nltk
# Read data from files
train = pd.read_csv( "../data/labeledTrainData.tsv", delimiter="\t", quoting=3 )
test = pd.read_csv( "../data/testData.tsv", delimiter="\t", quoting=3 )
unlabeled_train = pd.read_csv( "../data/unlabeledTrainData.tsv", delimiter="\t", quoting=3 )
# Verify the number of reviews that were read (100,000 in total)
# print "Read %d labeled train reviews, %d labeled test reviews, and %d unlabeled reviews\n" % (train["review"].size,
# test["review"].size, unlabeled_train["review"].size )
# Import various modules for string cleaning
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
def review_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(review).get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Return a list of words
return(words)
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# Define a function to split a review into parsed sentences
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( review_to_wordlist( raw_sentence, \
remove_stopwords ))
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
sentences = [] # Initialize an empty list of sentences
print ("Parsing sentences from training set")
for review in train["review"]:
sentences += review_to_sentences(review, tokenizer)
print ("Parsing sentences from unlabeled set")
for review in unlabeled_train["review"]:
sentences += review_to_sentences(review, tokenizer)
print (len(sentences))
print (sentences[0])
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
from gensim.models import word2vec
print ("Training model...")
model = word2vec.Word2Vec(sentences, workers=num_workers,
size=num_features, min_count = min_word_count,
window = context, sample = downsampling)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context"
model.save(model_name)
# >>> model = gensim.models.Word2Vec.load('300features_40minwords_10context')
# >>> model.doesnt_match("france england germany berlin".split())
#
# >>> model.most_similar("man")
#
# >>> model.most_similar("queen")
#
# >>> model.most_similar("awful")
| 36.096491 | 117 | 0.710571 |
acf94df9f274c79c757e4b4058dd95100c57ff5e | 5,310 | py | Python | google-cloud-sdk/lib/surface/compute/networks/subnets/create.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/networks/subnets/create.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/networks/subnets/create.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating subnetworks."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.networks import flags as network_flags
from googlecloudsdk.command_lib.compute.networks.subnets import flags
def _AddArgs(cls, parser):
"""Add subnetwork create arguments to parser."""
cls.SUBNETWORK_ARG = flags.SubnetworkArgument()
cls.NETWORK_ARG = network_flags.NetworkArgumentForOtherResource(
'The network to which the subnetwork belongs.')
cls.SUBNETWORK_ARG.AddArgument(parser, operation_type='create')
cls.NETWORK_ARG.AddArgument(parser)
parser.add_argument(
'--description',
help='An optional description of this subnetwork.')
parser.add_argument(
'--range',
required=True,
help='The IP space allocated to this subnetwork in CIDR format.')
parser.add_argument(
'--enable-private-ip-google-access',
action='store_true',
default=False,
help=('Enable/disable access to Google Cloud APIs from this subnet for '
'instances without a public ip address.'))
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Define a subnet for a network in custom subnet mode."""
NETWORK_ARG = None
SUBNETWORK_ARG = None
@classmethod
def Args(cls, parser):
parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT)
_AddArgs(cls, parser)
def Run(self, args):
"""Issues a list of requests necessary for adding a subnetwork."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
network_ref = self.NETWORK_ARG.ResolveAsResource(args, holder.resources)
subnet_ref = self.SUBNETWORK_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
request = client.messages.ComputeSubnetworksInsertRequest(
subnetwork=client.messages.Subnetwork(
name=subnet_ref.Name(),
description=args.description,
network=network_ref.SelfLink(),
ipCidrRange=args.range,
privateIpGoogleAccess=args.enable_private_ip_google_access,
),
region=subnet_ref.region,
project=subnet_ref.project)
return client.MakeRequests([(client.apitools_client.subnetworks,
'Insert', request)])
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class CreateBeta(Create):
"""Define a subnet for a network in custom subnet mode."""
@classmethod
def Args(cls, parser):
parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT)
_AddArgs(cls, parser)
parser.add_argument(
'--secondary-range',
type=arg_parsers.ArgDict(min_length=1),
action='append',
metavar='PROPERTY=VALUE',
help="""\
Adds a secondary IP range to the subnetwork for use in IP aliasing.
For example, `--secondary-range range1=192.168.64.0/24` adds
a secondary range 192.168.64.0/24 with name range1.
* `RANGE_NAME` - Name of the secondary range.
* `RANGE` - `IP range in CIDR format.`
""")
def Run(self, args):
"""Issues a list of requests for adding a subnetwork. Overrides."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
network_ref = self.NETWORK_ARG.ResolveAsResource(args, holder.resources)
subnet_ref = self.SUBNETWORK_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
request = client.messages.ComputeSubnetworksInsertRequest(
subnetwork=client.messages.Subnetwork(
name=subnet_ref.Name(),
description=args.description,
network=network_ref.SelfLink(),
ipCidrRange=args.range,
privateIpGoogleAccess=args.enable_private_ip_google_access,
),
region=subnet_ref.region,
project=subnet_ref.project)
secondary_ranges = []
if args.secondary_range:
for secondary_range in args.secondary_range:
for range_name, ip_cidr_range in sorted(secondary_range.iteritems()):
secondary_ranges.append(
client.messages.SubnetworkSecondaryRange(
rangeName=range_name,
ipCidrRange=ip_cidr_range))
request.subnetwork.secondaryIpRanges = secondary_ranges
return client.MakeRequests([(client.apitools_client.subnetworks,
'Insert', request)])
| 36.62069 | 78 | 0.705838 |
acf94e5188c2b6594b2c44c62d95bb3105bb1ed1 | 23,715 | py | Python | huobi/platforms/huobi_usdt_swap_trade.py | yahgwai/huobi_futures_Python | 6f96379368b53848017df2f6a94f3a53083cec0a | [
"MIT"
] | null | null | null | huobi/platforms/huobi_usdt_swap_trade.py | yahgwai/huobi_futures_Python | 6f96379368b53848017df2f6a94f3a53083cec0a | [
"MIT"
] | null | null | null | huobi/platforms/huobi_usdt_swap_trade.py | yahgwai/huobi_futures_Python | 6f96379368b53848017df2f6a94f3a53083cec0a | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
Huobi Swap Api Module.
Author: QiaoXiaofeng
Date: 2020/09/02
Email: andyjoe318@gmail.com
"""
import gzip
import json
import copy
import datetime
import time
import urllib
import hmac
import base64
import urllib
import hashlib
import datetime
import time
from urllib.parse import urljoin
from huobi.asset import Asset
from huobi.order import Order
from huobi.position import Position
from huobi.error import Error
from huobi.utils import tools, logger
from huobi.tasks import SingleTask, LoopRunTask
from huobi.const import HUOBI_USDT_SWAP
from huobi.utils.websocket import Websocket
from huobi.utils.request import AsyncHttpRequests
from huobi.utils.decorator import async_method_locker
from huobi.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL
from huobi.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET, ORDER_TYPE_MAKER, ORDER_TYPE_FOK, ORDER_TYPE_IOC
from huobi.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \
ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED, TRADE_TYPE_BUY_OPEN, TRADE_TYPE_SELL_OPEN, TRADE_TYPE_BUY_CLOSE, \
TRADE_TYPE_SELL_CLOSE
from .huobi_usdt_swap_api import HuobiUsdtSwapRestAPI
__all__ = ("HuobiUsdtSwapTrade", )
class HuobiUsdtSwapTrade(Websocket):
""" Huobi Swap Trade module. You can initialize trade object with some attributes in kwargs.
Attributes:
account: Account name for this trade exchange.
strategy: What's name would you want to created for you strategy.
symbol: Symbol name for your trade.
host: HTTP request host. default `https://api.hbdm.com"`.
wss: Websocket address. default `wss://www.hbdm.com`.
access_key: Account's ACCESS KEY.
secret_key Account's SECRET KEY.
asset_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `asset_update_callback` is like `async def on_asset_update_callback(asset: Asset): pass` and this
callback function will be executed asynchronous when received AssetEvent.
order_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `order_update_callback` is like `async def on_order_update_callback(order: Order): pass` and this
callback function will be executed asynchronous when some order state updated.
position_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `position_update_callback` is like `async def on_position_update_callback(order: Position): pass` and
this callback function will be executed asynchronous when some position state updated.
init_success_callback: You can use this param to specific a async callback function when you initializing Trade
object. `init_success_callback` is like `async def on_init_success_callback(success: bool, error: Error, **kwargs): pass`
and this callback function will be executed asynchronous after Trade module object initialized successfully.
"""
def __init__(self, **kwargs):
"""Initialize."""
e = None
if not kwargs.get("account"):
e = Error("param account miss")
if not kwargs.get("strategy"):
e = Error("param strategy miss")
if not kwargs.get("symbol"):
e = Error("param symbol miss")
if not kwargs.get("contract_type"):
e = Error("param contract_type miss")
if not kwargs.get("host"):
kwargs["host"] = "https://api.hbdm.com"
if not kwargs.get("wss"):
kwargs["wss"] = "wss://api.hbdm.com"
if not kwargs.get("access_key"):
e = Error("param access_key miss")
if not kwargs.get("secret_key"):
e = Error("param secret_key miss")
if e:
logger.error(e, caller=self)
if kwargs.get("init_success_callback"):
SingleTask.run(kwargs["init_success_callback"], False, e)
return
self._account = kwargs["account"]
self._strategy = kwargs["strategy"]
self._platform = HUOBI_USDT_SWAP
self._symbol = kwargs["symbol"]
self._contract_type = kwargs["contract_type"]
self._host = kwargs["host"]
self._wss = kwargs["wss"]
self._access_key = kwargs["access_key"]
self._secret_key = kwargs["secret_key"]
self._order_update_callback = kwargs.get("order_update_callback")
self._position_update_callback = kwargs.get("position_update_callback")
self._asset_update_callback = kwargs.get("asset_update_callback")
self._init_success_callback = kwargs.get("init_success_callback")
url = self._wss + "/linear-swap-notification"
super(HuobiUsdtSwapTrade, self).__init__(url, send_hb_interval=5)
self._assets = {} # Asset detail, {"BTC": {"free": "1.1", "locked": "2.2", "total": "3.3"}, ... }.
self._orders = {} # Order objects, {"order_id": order, ...}.
self._position = Position(self._platform, self._account, self._strategy, self._symbol + '/' + self._contract_type)
self._order_channel = "orders.{symbol}".format(symbol=self._symbol)
self._position_channel = "positions.{symbol}".format(symbol=self._symbol)
self._asset_channel = "accounts.{symbol}".format(symbol=self._symbol)
self._subscribe_order_ok = False
self._subscribe_position_ok = False
self._subscribe_asset_ok = False
self._rest_api = HuobiUsdtSwapRestAPI(self._access_key, self._secret_key, self._host)
self.initialize()
@property
def assets(self):
return copy.copy(self._assets)
@property
def orders(self):
return copy.copy(self._orders)
@property
def position(self):
return copy.copy(self._position)
@property
def rest_api(self):
return self._rest_api
async def _send_heartbeat_msg(self, *args, **kwargs):
data = {"op": "pong", "ts": str(int(time.time()*1000))}
if not self.ws:
logger.error("Websocket connection not yeah!", caller=self)
return
await self.ws.send_json(data)
async def connected_callback(self):
"""After connect to Websocket server successfully, send a auth message to server."""
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
data = {
"AccessKeyId": self._access_key,
"SignatureMethod": "HmacSHA256",
"SignatureVersion": "2",
"Timestamp": timestamp
}
sign = self.generate_signature("GET", data, "/linear-swap-notification")
data["op"] = "auth"
data["type"] = "api"
data["Signature"] = sign
await self.ws.send_json(data)
def generate_signature(self, method, params, request_path):
host_url = urllib.parse.urlparse(self._wss).hostname.lower()
sorted_params = sorted(params.items(), key=lambda d: d[0], reverse=False)
encode_params = urllib.parse.urlencode(sorted_params)
payload = [method, host_url, request_path, encode_params]
payload = "\n".join(payload)
payload = payload.encode(encoding="UTF8")
secret_key = self._secret_key.encode(encoding="utf8")
digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
signature = signature.decode()
return signature
async def auth_callback(self, data):
if data["err-code"] != 0:
e = Error("Websocket connection authorized failed: {}".format(data))
logger.error(e, caller=self)
SingleTask.run(self._init_success_callback, False, e)
return
self._subscribe_order_ok = False
self._subscribe_position_ok = False
self._subscribe_asset_ok = False
# subscribe order
data = {
"op": "sub",
"cid": tools.get_uuid1(),
"topic": self._order_channel
}
await self.ws.send_json(data)
# subscribe position
data = {
"op": "sub",
"cid": tools.get_uuid1(),
"topic": self._position_channel
}
await self.ws.send_json(data)
# subscribe asset
data = {
"op": "sub",
"cid": tools.get_uuid1(),
"topic": self._asset_channel
}
await self.ws.send_json(data)
async def sub_callback(self, data):
if data["err-code"] != 0:
e = Error("subscribe {} failed!".format(data["topic"]))
logger.error(e, caller=self)
SingleTask.run(self._init_success_callback, False, e)
return
if data["topic"] == self._order_channel:
self._subscribe_order_ok = True
elif data["topic"] == self._position_channel:
self._subscribe_position_ok = True
elif data["topic"] == self._asset_channel:
self._subscribe_asset_ok = True
if self._subscribe_order_ok and self._subscribe_position_ok \
and self._subscribe_asset_ok:
success, error = await self._rest_api.get_open_orders(self._symbol)
if error:
e = Error("get open orders failed!")
SingleTask.run(self._init_success_callback, False, e)
elif "data" in success and "orders" in success["data"]:
for order_info in success["data"]["orders"]:
order_info["ts"] = order_info["created_at"]
self._update_order(order_info)
SingleTask.run(self._init_success_callback, True, None)
else:
logger.warn("get open orders:", success, caller=self)
e = Error("Get Open Orders Unknown error")
SingleTask.run(self._init_success_callback, False, e)
@async_method_locker("HuobiSwapTrade.process_binary.locker")
async def process_binary(self, raw):
""" 处理websocket上接收到的消息
@param raw 原始的压缩数据
"""
data = json.loads(gzip.decompress(raw).decode())
logger.debug("data:", data, caller=self)
op = data.get("op")
if op == "ping":
hb_msg = {"op": "pong", "ts": data.get("ts")}
await self.ws.send_json(hb_msg)
elif op == "auth":
await self.auth_callback(data)
elif op == "sub":
await self.sub_callback(data)
elif op == "notify":
if data["topic"].startswith("orders"):
self._update_order(data)
elif data["topic"].startswith("positions"):
self._update_position(data)
elif data["topic"].startswith("accounts"):
self._update_asset(data)
async def create_order(self, action, price, quantity, order_type=ORDER_TYPE_LIMIT, client_order_id=None, *args, **kwargs):
""" Create an order.
Args:
action: Trade direction, BUY or SELL.
price: Price of each contract.
quantity: The buying or selling quantity.
order_type: Order type, LIMIT or MARKET.
kwargs:
lever_rate: Leverage rate, 10 or 20.
Returns:
order_no: Order ID if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
if int(quantity) > 0:
if action == ORDER_ACTION_BUY:
direction = "buy"
offset = "open"
elif action == ORDER_ACTION_SELL:
direction = "sell"
offset = "close"
else:
return None, "action error"
else:
if action == ORDER_ACTION_BUY:
direction = "buy"
offset = "close"
elif action == ORDER_ACTION_SELL:
direction = "sell"
offset = "open"
else:
return None, "action error"
lever_rate = kwargs.get("lever_rate", 20)
if order_type == ORDER_TYPE_LIMIT:
order_price_type = "limit"
elif order_type == ORDER_TYPE_MARKET:
order_price_type = "optimal_20"
elif order_type == ORDER_TYPE_MAKER:
order_price_type = "post_only"
elif order_type == ORDER_TYPE_FOK:
order_price_type = "fok"
elif order_type == ORDER_TYPE_IOC:
order_price_type = "ioc"
else:
return None, "order type error"
quantity = abs(int(quantity))
result, error = await self._rest_api.create_order(self._symbol,
price, quantity, direction, offset, lever_rate,
order_price_type, client_order_id)
if error:
return None, error
return str(result["data"]["order_id"]), None
async def create_orders(self, orders, *args, **kwargs):
""" batch create orders
Args:
orders_data: []
list item:
action: Trade direction, BUY or SELL.
price: Price of each contract.
quantity: The buying or selling quantity.
order_type: Order type, LIMIT or MARKET.
lever_rate: leverage.
kwargs:
Returns:
success: order info if created successfully.
error: erros information.
"""
orders_data = []
for order in orders:
if int(order["quantity"]) > 0:
if order["action"] == ORDER_ACTION_BUY:
direction = "buy"
offset = "open"
elif order["action"] == ORDER_ACTION_SELL:
direction = "sell"
offset = "close"
else:
return None, "action error"
else:
if order["action"] == ORDER_ACTION_BUY:
direction = "buy"
offset = "close"
elif order["action"] == ORDER_ACTION_SELL:
direction = "sell"
offset = "open"
else:
return None, "action error"
lever_rate = order["lever_rate"]
if order["order_type"] == ORDER_TYPE_LIMIT:
order_price_type = "limit"
elif order["order_type"] == ORDER_TYPE_MARKET:
order_price_type = "optimal_20"
elif order["order_type"] == ORDER_TYPE_MAKER:
order_price_type = "post_only"
elif order["order_type"] == ORDER_TYPE_FOK:
order_price_type = "fok"
elif order["order_type"] == ORDER_TYPE_IOC:
order_price_type = "ioc"
else:
return None, "order type error"
quantity = abs(int(order["quantity"]))
client_order_id = order.get("client_order_id", "")
orders_data.append({"contract_code": self._symbol, \
"client_order_id": client_order_id, "price": order["price"], "volume": quantity, "direction": direction, "offset": offset, \
"leverRate": lever_rate, "orderPriceType": order_price_type})
result, error = await self._rest_api.create_orders({"orders_data": orders_data})
if error:
return None, error
order_nos = [ order["order_id"] for order in result.get("data").get("success")]
return order_nos, result.get("data").get("errors")
async def revoke_order(self, *order_nos):
""" Revoke (an) order(s).
Args:
order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel
all orders for this symbol(initialized in Trade object). If you set 1 param, you can cancel an order.
If you set multiple param, you can cancel multiple orders. Do not set param length more than 100.
Returns:
Success or error, see bellow.
"""
# If len(order_nos) == 0, you will cancel all orders for this symbol(initialized in Trade object).
if len(order_nos) == 0:
success, error = await self._rest_api.revoke_order_all(self._symbol)
if error:
return False, error
if success.get("errors"):
return False, success["errors"]
return True, None
# If len(order_nos) == 1, you will cancel an order.
if len(order_nos) == 1:
success, error = await self._rest_api.revoke_order(self._symbol, order_nos[0])
if error:
return order_nos[0], error
if success.get("errors"):
return False, success["errors"]
else:
return order_nos[0], None
# If len(order_nos) > 1, you will cancel multiple orders.
if len(order_nos) > 1:
success, error = await self._rest_api.revoke_orders(self._symbol, order_nos)
if error:
return order_nos[0], error
if success.get("errors"):
return False, success["errors"]
return success, error
async def get_open_order_nos(self):
""" Get open order id list.
Args:
None.
Returns:
order_nos: Open order id list, otherwise it's None.
error: Error information, otherwise it's None.
"""
success, error = await self._rest_api.get_open_orders(self._symbol)
if error:
return None, error
else:
order_nos = []
for order_info in success["data"]["orders"]:
if order_info["contract_code"] != self._symbol:
continue
order_nos.append(str(order_info["order_id"]))
return order_nos, None
def _update_order(self, order_info):
""" Order update.
Args:
order_info: Order information.
"""
if order_info["contract_code"] != self._symbol:
return
order_no = str(order_info["order_id"])
status = order_info["status"]
order = self._orders.get(order_no)
if not order:
if order_info["direction"] == "buy":
if order_info["offset"] == "open":
trade_type = TRADE_TYPE_BUY_OPEN
else:
trade_type = TRADE_TYPE_BUY_CLOSE
else:
if order_info["offset"] == "close":
trade_type = TRADE_TYPE_SELL_CLOSE
else:
trade_type = TRADE_TYPE_SELL_OPEN
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"client_order_id": order_info.get("client_order_id"),
"order_price_type": order_info.get("order_price_type"),
"order_type": order_info["order_type"],
"action": ORDER_ACTION_BUY if order_info["direction"] == "buy" else ORDER_ACTION_SELL,
"symbol": self._symbol + '/' + self._contract_type,
"price": order_info["price"],
"quantity": order_info["volume"],
"trade_type": trade_type
}
order = Order(**info)
self._orders[order_no] = order
order.trade_quantity = None
order.trade_price = None
if order_info.get("trade"):
quantity = 0
price = 0
amount = 0
count = len(order_info.get("trade"))
for trade in order_info.get("trade"):
order.role = trade.get("role")
quantity += float(trade.get("trade_volume"))
amount += float(trade.get("trade_volume")*trade.get("trade_price"))
price = amount/quantity
order.trade_quantity = int(quantity)
order.trade_price = price
if status in [1, 2, 3]:
order.status = ORDER_STATUS_SUBMITTED
elif status == 4:
order.status = ORDER_STATUS_PARTIAL_FILLED
order.remain = int(order.quantity) - int(order_info["trade_volume"])
elif status == 6:
order.status = ORDER_STATUS_FILLED
order.remain = 0
elif status in [5, 7]:
order.status = ORDER_STATUS_CANCELED
order.remain = int(order.quantity) - int(order_info["trade_volume"])
else:
return
order.avg_price = order_info["trade_avg_price"]
order.ctime = order_info["created_at"]
order.utime = order_info["ts"]
SingleTask.run(self._order_update_callback, copy.copy(order))
# Delete order that already completed.
if order.status in [ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:
self._orders.pop(order_no)
# publish order
logger.info("symbol:", order.symbol, "order:", order, caller=self)
def _update_position(self, data):
""" Position update.
Args:
position_info: Position information.
Returns:
None.
"""
for position_info in data["data"]:
if position_info["contract_code"] != self._symbol:
continue
if position_info["direction"] == "buy":
self._position.long_quantity = int(position_info["volume"])
self._position.long_avg_price = position_info["cost_open"]
else:
self._position.short_quantity = int(position_info["volume"])
self._position.short_avg_price = position_info["cost_open"]
# self._position.liquid_price = None
self._position.utime = data["ts"]
SingleTask.run(self._position_update_callback, copy.copy(self._position))
def _update_asset(self, data):
""" Asset update.
Args:
data: asset data.
Returns:
None.
"""
assets = {}
for item in data["data"]:
symbol = item["symbol"].upper()
total = float(item["margin_balance"])
free = float(item["margin_available"])
locked = float(item["margin_frozen"])
if total > 0:
assets[symbol] = {
"total": "%.8f" % total,
"free": "%.8f" % free,
"locked": "%.8f" % locked
}
if assets == self._assets:
update = False
else:
update = True
if hasattr(self._assets, "assets") is False:
info = {
"platform": self._platform,
"account": self._account,
"assets": assets,
"timestamp": tools.get_cur_timestamp_ms(),
"update": update
}
asset = Asset(**info)
self._assets = asset
SingleTask.run(self._asset_update_callback, copy.copy(self._assets))
else:
for symbol in assets:
self._assets.assets.update({
symbol: assets[symbol]
})
self._assets.timestamp = tools.get_cur_timestamp_ms()
SingleTask.run(self._asset_update_callback, copy.copy(self._assets))
| 39.590985 | 144 | 0.580181 |
acf94f0b3575be8c7c9a279236985878c0e4934f | 7,329 | py | Python | larval_gonad_ovary/bulk.py | jfear/larval_gonad_ovary | b0941dbdd450aae5efd6ff60632e6eec7574ab69 | [
"MIT"
] | null | null | null | larval_gonad_ovary/bulk.py | jfear/larval_gonad_ovary | b0941dbdd450aae5efd6ff60632e6eec7574ab69 | [
"MIT"
] | null | null | null | larval_gonad_ovary/bulk.py | jfear/larval_gonad_ovary | b0941dbdd450aae5efd6ff60632e6eec7574ab69 | [
"MIT"
] | null | null | null | """Helper functions for working with bulk data.
We performed bulk RNA-seq and this is a set of helpers for dealing with this
data.
"""
from pathlib import Path
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
import matplotlib.pyplot as plt
from lcdblib.plotting import maPlot, PairGrid, corrfunc
from .cell_selection import filter_gene_counts_by_barcode
BULK = [
'B9_OCP',
'B10_OCP',
'B11_OCP',
'B12_OCP',
]
def read_bulk(path, filter=None, pattern='*/*.featurecounts.txt'):
"""Read in a folder of feature count data.
Using the lcdb-wf, featurecounts are organized in a set of sub-folders for
each sample. Given a path will read in the data and return a dataframe.
Optionally a list of sample names can be given to filter by.
Parameters
----------
path : str
Directory path to output from the lcdb-wf.
filter : None | list
List of sample names to include. Defaults to use the TCP libraries.
pattern : str
Glob pattern for finding the featurecounts files.
Example
-------
>>> df = read_build('../bulk-rnaseq-wf/data/rnaseq_samples',
filter=['B5_TCP', 'B6_TCP'])
"""
bulk = Path(path)
dfs = []
for fname in bulk.glob(pattern):
sname = fname.parent.name
if (filter is not None) & (sname in filter):
dat = pd.read_csv(fname, sep='\t', comment='#',
index_col=[0]).iloc[:, -1]
dat.name = sname
dfs.append(dat)
bulk_dat = pd.concat(dfs, axis=1)
bulk_dat = bulk_dat[bulk_dat.columns.sort_values()]
return bulk_dat
def read_bulk_for_lengths(path, filter=None, pattern='*/*.featurecounts.txt'):
"""Read in a folder of feature count data to get gene lengths.
Using the lcdb-wf, featurecounts are organized in a set of sub-folders for
each sample. Given a path will read in the data and return a dataframe.
Optionally a list of sample names can be given to filter by.
Parameters
----------
path : str
Directory path to output from the lcdb-wf.
filter : None | list
List of sample names to include. Defaults to use the TCP libraries.
pattern : str
Glob pattern for finding the featurecounts files.
Example
-------
>>> df = read_build('../bulk-rnaseq-wf/data/rnaseq_samples',
filter=['B5_TCP', 'B6_TCP'])
"""
bulk = Path(path)
dfs = []
for fname in bulk.glob(pattern):
sname = fname.parent.name
if (filter is not None) & (sname in filter):
dat = pd.read_csv(fname, sep='\t', comment='#',
index_col=[0]).iloc[:, -2]
dat.name = 'length'
dfs.append(dat)
bulk_dat = pd.concat(dfs, axis=0)
return bulk_dat.to_frame().reset_index().drop_duplicates().set_index('Geneid').length
def plot_bulk_pairwise_corr(bulk_dat, subplots_kws=None, scatter_kws=None,
corrfunc_kws=None):
"""Plot a pairgrid of RNA-seq data.
The upper triangle is the scatter plot and spearman correlation. The lower
triangle is a common MA-Plot. The diagonal is the density.
bulk_dat : pd.DataFrame
DataFrame with RNA-seq data (genes, samples)
"""
if subplots_kws is None:
subplots_kws = {}
if scatter_kws is None:
scatter_kws = {}
if corrfunc_kws is None:
corrfunc_kws = {}
subplots_default = {
'sharex': False,
'sharey': False
}
subplots_default.update(subplots_kws)
scatter_default = {
's': 10
}
scatter_default.update(scatter_kws)
corrfunc_default = {
}
corrfunc_default.update(corrfunc_kws)
g = PairGrid(bulk_dat, subplots_kws=subplots_default)
g.map_lower(maPlot, scatter_kws=scatter_default)
g.map_upper(plt.scatter, **scatter_default)
g.map_upper(corrfunc, **corrfunc_default)
g.map_diag(sns.kdeplot)
return g
def scRNAseq_corr_distribution(umi, raw, bulk_dat, start=200,
interval=100, stop=10000):
"""Calculate the correlation distribution between scRNASeq and Bulk.
Iterate by intervals of cells and calculate the correlation of summed
scRNASeq vs Bulk RNA-Seq.
Parameters
----------
umi : pd.DataFrame
DataFrame of UMI counts by Cell (tidy)
raw : CellRangerCounts
A named tuple of CellRangerCounts.
bulk_dat : pd.DataFrame
DataFrame of bulk RNA-seq data (genes, samples)
start : int
Number of cells to start with [default 200]
interval : int
Number of cells to add each iteration [default 100]
stop : int
Number of cells to stop at [default 10,000]
Returns
-------
pd.DataFrame
Rows are the number of UMI sorted cells. Columns are Bulk RNASeq
samples. Values are Spearman r coefficients.
"""
_umi = umi.sort_values(by='umi_count', ascending=False)
res = []
loc = start
while loc < stop:
dat = filter_gene_counts_by_barcode(_umi.index[:loc], raw).sum(axis=1)
corrs = []
for col in bulk_dat.columns:
corrs.append(spearmanr(bulk_dat[col], dat).correlation)
res.append([loc, *corrs])
loc += interval
col_names = ['Cell Number']
col_names.extend(bulk_dat.columns)
df = pd.DataFrame(res, columns=col_names)
return df.set_index('Cell Number')
def plot_corr_distribution(corr):
fig, axes = plt.subplots(2, 2, sharex=True)
for col, ax in zip(corr.columns, axes.flatten()):
ax.plot(corr[col])
ax.set_title(col)
ax.set_ylabel('Spearman r')
ax.set_xlabel('Cells')
plt.tight_layout()
def scRNAseq_corr_distribution_random(umi, raw, bulk_dat, interval=100,
stop=10000, random_state=42):
"""Calculate the correlation distribution between scRNASeq and Bulk.
Iterate by intervals of cells and calculate the correlation of summed
scRNASeq vs Bulk RNA-Seq.
Parameters
----------
umi : pd.DataFrame
DataFrame of UMI counts by Cell (tidy)
raw : CellRangerCounts
A named tuple of CellRangerCounts.
bulk_dat : pd.DataFrame
DataFrame of bulk RNA-seq data (genes, samples)
interval : int
Number of cells to add each iteration [default 100]
stop : int
Number of cells to stop at [default 10,000]
random_state : None | int
Random state to use for sampling. Set to None if you want full random
with each iteration.
Returns
-------
pd.DataFrame
Rows are the number of UMI sorted cells. Columns are Bulk RNASeq
samples. Values are Spearman r coefficients.
"""
res = []
loc = interval
while loc < stop:
idx = umi.sample(n=loc, random_state=random_state).index
dat = filter_gene_counts_by_barcode(idx, raw).sum(axis=1)
corrs = []
for col in bulk_dat.columns:
corrs.append(spearmanr(bulk_dat[col], dat).correlation)
res.append([loc, *corrs])
loc += interval
col_names = ['Cell Number']
col_names.extend(bulk_dat.columns)
df = pd.DataFrame(res, columns=col_names)
return df.set_index('Cell Number')
| 28.51751 | 89 | 0.63392 |
acf94f5bfae8cc362c2e3dd37bd3b372fc77960e | 2,765 | py | Python | azure-mgmt-logic/azure/mgmt/logic/models/integration_account_partner.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | azure-mgmt-logic/azure/mgmt/logic/models/integration_account_partner.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | null | null | null | azure-mgmt-logic/azure/mgmt/logic/models/integration_account_partner.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .integration_account_resource import IntegrationAccountResource
class IntegrationAccountPartner(IntegrationAccountResource):
"""IntegrationAccountPartner.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: The resource id.
:type id: str
:param name: The resource name.
:type name: str
:param type: The resource type.
:type type: str
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict
:param partner_type: The partner type. Possible values include:
'NotSpecified', 'B2B'
:type partner_type: str or :class:`PartnerType
<azure.mgmt.logic.models.PartnerType>`
:ivar created_time: The created time.
:vartype created_time: datetime
:ivar changed_time: The changed time.
:vartype changed_time: datetime
:param metadata: The metadata.
:type metadata: object
:param content: The partner content.
:type content: :class:`PartnerContent
<azure.mgmt.logic.models.PartnerContent>`
"""
_validation = {
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'partner_type': {'key': 'properties.partnerType', 'type': 'PartnerType'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'content': {'key': 'properties.content', 'type': 'PartnerContent'},
}
def __init__(self, id=None, name=None, type=None, location=None, tags=None, partner_type=None, metadata=None, content=None):
super(IntegrationAccountPartner, self).__init__(id=id, name=name, type=type, location=location, tags=tags)
self.partner_type = partner_type
self.created_time = None
self.changed_time = None
self.metadata = metadata
self.content = content
| 38.943662 | 128 | 0.619168 |
acf94fd1e96f8dcb618aadda56f23efa6caff8e7 | 2,526 | py | Python | feature_generation/ngram_vectorizer_training.py | tommartensen/fake-news-detector | d9d903a57d4c1e5c0acabe85018d7614ee59f194 | [
"MIT"
] | 4 | 2019-02-25T19:14:31.000Z | 2020-04-07T16:08:08.000Z | feature_generation/ngram_vectorizer_training.py | tommartensen/fake-news-detector | d9d903a57d4c1e5c0acabe85018d7614ee59f194 | [
"MIT"
] | null | null | null | feature_generation/ngram_vectorizer_training.py | tommartensen/fake-news-detector | d9d903a57d4c1e5c0acabe85018d7614ee59f194 | [
"MIT"
] | 1 | 2018-11-24T00:53:47.000Z | 2018-11-24T00:53:47.000Z | import getopt
import json
import os
import pickle
import sys
import math
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
def calc_df(upper_bound, size):
"""
Helper function to constrain the length of the vector.
:param upper_bound: ngram upper bound
:param size: size of the data set
:return:
"""
return math.floor((size / upper_bound) * (30 / 13000))
def dump_vectorizer(vectorizer, filename):
print("Dumping vectorizer...")
with open(os.path.join(os.path.dirname(__file__), "../feature_generation/vectorizers/" + filename + ".vec"), "wb") as f:
pickle.dump(vectorizer, f)
def main(argv):
if len(argv) < 2:
print('ngram_vectorizer.py -l <lower bound> -u <upper bound> [-t]')
sys.exit(2)
lower_bound = 1
upper_bound = 1
include_tfidf = 0
try:
opts, args = getopt.getopt(argv, "htl:u:", ["lower_bound=", "upper_bound="])
except getopt.GetoptError:
print('ngram_vectorizer.py -l <lower bound> -u <upper bound> [-t]')
sys.exit(2)
filename = "ngram"
for opt, arg in opts:
if opt == '-h':
print('ngram_vectorizer.py -l <lower bound> -u <upper bound> [-t]')
sys.exit()
elif opt in ("-l", "--lower_bound"):
filename += "_l" + arg
lower_bound = int(arg)
elif opt in ("-u", "--upper_bound"):
filename += "_u" + arg
upper_bound = int(arg)
elif opt in "-t":
filename += "_t"
include_tfidf = 1
articles = []
labels = []
print("Preparing data...")
with open(os.path.join(os.path.dirname(__file__), "../preprocessing/data/training_set.json"), "r") as f:
data = json.load(f)
for article in data:
articles.append(article[0])
labels.append(article[1])
print("Vectorizing ngrams...")
vectorizer = CountVectorizer(ngram_range=(lower_bound, upper_bound), stop_words="english", min_df=calc_df(
upper_bound, len(labels)))
features = vectorizer.fit_transform(articles).toarray()
if include_tfidf:
print("Performing term-frequency times inverse document-frequency transformation...")
transformer = TfidfTransformer(smooth_idf=False)
features = transformer.fit_transform(features).toarray()
print("Dumping tokenized features...")
with open(os.path.join(os.path.dirname(__file__), "../feature_generation/data/trained/" + filename + ".json"), "w") as f:
json.dump(features.tolist(), f)
with open(os.path.join(os.path.dirname(__file__), "../feature_generation/data/trained/labels_training.json"), "w") as f:
json.dump(labels, f)
dump_vectorizer(vectorizer, filename)
if __name__ == "__main__":
main(sys.argv[1:])
| 29.717647 | 122 | 0.698337 |
acf9508eca7a28e0f3919a53aa6e7cc12abf11fe | 3,739 | py | Python | __explorations__/2020_29/cnnnnn.py | tyoc213/blog | ebabd6f4d2fe2791f22c348e0a97184120acea28 | [
"Apache-2.0"
] | 1 | 2021-03-19T03:30:21.000Z | 2021-03-19T03:30:21.000Z | __explorations__/2020_29/cnnnnn.py | tyoc213/blog | ebabd6f4d2fe2791f22c348e0a97184120acea28 | [
"Apache-2.0"
] | 6 | 2020-05-15T22:11:50.000Z | 2022-01-12T17:29:55.000Z | __explorations__/2020_29/cnnnnn.py | tyoc213/blog | ebabd6f4d2fe2791f22c348e0a97184120acea28 | [
"Apache-2.0"
] | null | null | null | # %%
from fastai2.vision.all import *
# %%
tpu_device = torch.device('cuda:0')
torch.cuda.empty_cache()
class XLAOptimProxy:
def __init__(self,opt:Optimizer):
#print("XLAOptimProxy#inicializando __init__")
self.opt = opt
def xla_step(self):
#print("------------- xla optimizer!!!!!!!! BARRIER TRYE")
#xm.optimizer_step(self.opt,barrier=True) # sync on gradient update
self.opt.step()
def __getattr__(self,name):
if name == 'step': # override proxying for step method
#print_local("calling xla_step")
return getattr(self,'xla_step')
# proxy everything else
#print_local(f"calling XLAOptimProxy#{name}")
return getattr(self.opt,name)
@patch_to(Learner)
def create_opt(self):
ooo = self.opt_func(self.splitter(self.model), lr=self.lr)
prox = XLAOptimProxy(ooo)
self.opt = prox
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
import inspect
@patch_to(DataLoader)
def __iter__(self):
print("__iter__")
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#__iter__ 0")
self.randomize()
self.before_iter()
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#__iter__ START FOR 1")
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None:
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#iterator to device from {b[0].device} y {b[1].device} to {self.device}")
b = to_device(b, self.device)
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#iterator to done!!!!")
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#yielding 3!!!! yield self.after_batch({b[0].device}) len of b is {len(b)}")
#print(inspect.getsource(self.after_batch))
print(type(self.after_batch))
print(self.after_batch)
yield self.after_batch(b)
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#yielding 4!!!!")
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#__iter__ END FOR 2")
self.after_iter()
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#__iter__ after ITER")
if hasattr(self, 'it'): delattr(self, 'it')
# TRACE: print(f"{datetime.now().strftime(' (%H:%M:%S.%f)')} DataLoader#DataLoader#DataLoader#END __iter__")
#%%
path = untar_data(URLs.PETS)/'images'
pat = r'(.+)_\d+.jpg$'
datablock = DataBlock(
blocks=(ImageBlock,CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(seed=42),
get_y=using_attr(RegexLabeller(pat),'name'),
item_tfms=aug_transforms(size=224,min_scale=0.75),
)
datablock.summary(path)
dls = datablock.dataloaders(path,bs=64, device=tpu_device)
print("CNN Learner")
learner = cnn_learner(dls, resnet34, metrics=accuracy)
print("FINE TUNE")
learner.fine_tune(1,base_lr=4e-3,freeze_epochs=2)
print("end FINE TUNE")
# %%
| 39.357895 | 201 | 0.629045 |
acf952079f6f7bd63647a9be2f3f3c88641ca1ad | 5,716 | py | Python | traffic/data/basic/runways.py | obbe79/traffic | 6e3e38a20c8745da23eb00259b9a6f399c4c5a11 | [
"MIT"
] | null | null | null | traffic/data/basic/runways.py | obbe79/traffic | 6e3e38a20c8745da23eb00259b9a6f399c4c5a11 | [
"MIT"
] | null | null | null | traffic/data/basic/runways.py | obbe79/traffic | 6e3e38a20c8745da23eb00259b9a6f399c4c5a11 | [
"MIT"
] | null | null | null | import pickle
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
from zipfile import ZipFile
import altair as alt
import pandas as pd
import requests
from shapely.geometry import base, shape
from shapely.ops import linemerge
from ...core.geodesy import bearing
from ...core.mixins import ShapelyMixin
__github_url = "https://raw.githubusercontent.com/"
base_url = __github_url + "ProfHoekstra/bluesky/master/data/navdata"
class Threshold(NamedTuple):
latitude: float
longitude: float
bearing: float
name: str
RunwaysType = Dict[str, List[Tuple[Threshold, Threshold]]]
class RunwayAirport(ShapelyMixin):
def __init__(self, runways: List[Tuple[Threshold, Threshold]]):
self._runways = runways
@property
def data(self) -> pd.DataFrame:
return pd.DataFrame.from_records(
self.list, columns=["latitude", "longitude", "bearing", "name"]
)
@property
def list(self) -> List[Threshold]:
return sum((list(runway) for runway in self._runways), [])
def geojson(self) -> List[Dict[str, Any]]:
return [
{
"geometry": {
"type": "LineString",
"coordinates": tuple(
(thrs.longitude, thrs.latitude) for thrs in runway
),
},
"properties": "/".join(thrs.name for thrs in runway),
"type": "Feature",
}
for runway in self._runways
]
@property
def shape(self) -> base.BaseGeometry:
return linemerge(shape(x["geometry"]) for x in self.geojson())
def geoencode(
self, mode: str = "geometry"
) -> Optional[alt.Chart]: # coverage: ignore
if mode == "geometry":
return (
super().geoencode().mark_geoshape(strokeWidth=2, stroke="black")
)
elif mode == "labels":
rwy_labels = alt.Chart(self.data).encode(
longitude="longitude:Q", latitude="latitude:Q", text="name:N"
)
rwy_layers = [
rwy_labels.transform_filter(alt.datum.name == name).mark_text(
angle=bearing, baseline="middle", dy=10
)
for (name, bearing) in zip(self.data.name, self.data.bearing)
]
return alt.layer(*rwy_layers)
else:
return None
class Runways(object):
cache_dir: Optional[Path] = None
def __init__(self) -> None:
self._runways: Optional[RunwaysType] = None
assert self.cache_dir is not None
self._cache = self.cache_dir / "runways_bluesky.pkl"
@property
def runways(self) -> RunwaysType:
if self._runways is not None:
return self._runways
if self._cache.exists():
with self._cache.open("rb") as fh:
self._runways = pickle.load(fh)
else:
self.download_bluesky()
assert self._runways is not None
with self._cache.open("wb") as fh:
pickle.dump(self._runways, fh)
return self._runways
def __getitem__(self, airport) -> Optional[RunwayAirport]:
if isinstance(airport, str):
from .. import airports
airport = airports[airport]
if airport is None:
return None
return RunwayAirport(self.runways[airport.icao])
def download_bluesky(self) -> None: # coverage: ignore
self._runways = dict()
c = requests.get(base_url + "/apt.zip")
with ZipFile(BytesIO(c.content)).open("apt.dat", "r") as fh:
for line in fh.readlines():
elems = (
line.decode(encoding="ascii", errors="ignore")
.strip()
.split()
)
if len(elems) == 0:
continue
# 1: AIRPORT
if elems[0] == "1":
# Add airport to runway threshold database
cur: List[Tuple[Threshold, Threshold]] = list()
self.runways[elems[4]] = cur
if elems[0] == "100":
# Only asphalt and concrete runways
if int(elems[2]) > 2:
continue
lat0 = float(elems[9])
lon0 = float(elems[10])
# offset0 = float(elems[11])
lat1 = float(elems[18])
lon1 = float(elems[19])
# offset1 = float(elems[20])
# threshold information:
# ICAO code airport,
# Runway identifier,
# latitude, longitude, bearing
# vertices: gives vertices of the box around the threshold
# opposite runways are on the same line.
# RWY1: 8-11, RWY2: 17-20
# Hence, there are two thresholds per line
# thr0: First lat0 and lon0, then lat1 and lat1, offset=[11]
# thr1: First lat1 and lat1, then lat0 and lon0, offset=[20]
brng0 = bearing(lat0, lon0, lat1, lon1)
brng1 = bearing(lat1, lon1, lat0, lon0)
brng0 = brng0 if brng0 > 0 else 360 + brng0
brng1 = brng1 if brng1 > 0 else 360 + brng1
thr0 = Threshold(lat0, lon0, brng0, elems[8])
thr1 = Threshold(lat1, lon1, brng1, elems[17])
cur.append((thr0, thr1))
| 32.293785 | 80 | 0.525717 |
acf953498914e24772edc2f12b89a6168bd65a5a | 2,021 | py | Python | openstack/tests/functional/load_balancer/v2/test_load_balancer.py | teresa-ho/stx-openstacksdk | 7d723da3ffe9861e6e9abcaeadc1991689f782c5 | [
"Apache-2.0"
] | null | null | null | openstack/tests/functional/load_balancer/v2/test_load_balancer.py | teresa-ho/stx-openstacksdk | 7d723da3ffe9861e6e9abcaeadc1991689f782c5 | [
"Apache-2.0"
] | null | null | null | openstack/tests/functional/load_balancer/v2/test_load_balancer.py | teresa-ho/stx-openstacksdk | 7d723da3ffe9861e6e9abcaeadc1991689f782c5 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from openstack.load_balancer.v2 import load_balancer
from openstack.tests.functional import base
@unittest.skipUnless(base.service_exists(service_type='load_balancer'),
'Load-balancing service does not exist')
class TestLoadBalancer(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
ID = None
VIP_SUBNET_ID = uuid.uuid4().hex
@classmethod
def setUpClass(cls):
super(TestLoadBalancer, cls).setUpClass()
test_lb = cls.conn.load_balancer.create_load_balancer(
name=cls.NAME, vip_subnet_id=cls.VIP_SUBNET_ID)
assert isinstance(test_lb, load_balancer.LoadBalancer)
cls.assertIs(cls.NAME, test_lb.name)
cls.ID = test_lb.id
@classmethod
def tearDownClass(cls):
test_lb = cls.conn.load_balancer.delete_load_balancer(
cls.ID, ignore_missing=False)
cls.assertIs(None, test_lb)
def test_find(self):
test_lb = self.conn.load_balancer.find_load_balancer(self.NAME)
self.assertEqual(self.ID, test_lb.id)
def test_get(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.ID)
self.assertEqual(self.NAME, test_lb.name)
self.assertEqual(self.ID, test_lb.id)
self.assertEqual(self.VIP_SUBNET_ID, test_lb.vip_subnet_id)
def test_list(self):
names = [lb.name for lb in self.conn.load_balancer.load_balancers()]
self.assertIn(self.NAME, names)
| 36.089286 | 76 | 0.715487 |
acf95395c154c1aedfbf1a9fa3f9f3f4c5a16157 | 1,264 | py | Python | PyEMD/splines.py | LonelyBattle/PyEMD | c293e30d8d280db6103dffa864b67795c79d33c6 | [
"Apache-2.0"
] | 1 | 2021-07-26T05:44:56.000Z | 2021-07-26T05:44:56.000Z | PyEMD/splines.py | BisonLeo/PyEMD | 612474f616b5d68510a4a467b7fce7f2c7c12b48 | [
"Apache-2.0"
] | null | null | null | PyEMD/splines.py | BisonLeo/PyEMD | 612474f616b5d68510a4a467b7fce7f2c7c12b48 | [
"Apache-2.0"
] | 1 | 2021-07-26T05:44:57.000Z | 2021-07-26T05:44:57.000Z | from __future__ import division
import numpy as np
from scipy.interpolate import Akima1DInterpolator
def cubic_spline_3pts(x, y, T):
"""
Apperently scipy.interpolate.interp1d does not support
cubic spline for less than 4 points.
"""
x0, x1, x2 = x
y0, y1, y2 = y
x1x0, x2x1 = x1-x0, x2-x1
y1y0, y2y1 = y1-y0, y2-y1
_x1x0, _x2x1 = 1./x1x0, 1./x2x1
m11, m12, m13= 2*_x1x0, _x1x0, 0
m21, m22, m23 = _x1x0, 2.*(_x1x0+_x2x1), _x2x1
m31, m32, m33 = 0, _x2x1, 2.*_x2x1
v1 = 3*y1y0*_x1x0*_x1x0
v3 = 3*y2y1*_x2x1*_x2x1
v2 = v1+v3
M = np.array([[m11,m12,m13],[m21,m22,m23],[m31,m32,m33]])
v = np.array([v1,v2,v3]).T
k = np.array(np.linalg.inv(M).dot(v))
a1 = k[0]*x1x0 - y1y0
b1 =-k[1]*x1x0 + y1y0
a2 = k[1]*x2x1 - y2y1
b2 =-k[2]*x2x1 + y2y1
t = T[np.r_[T>=x0] & np.r_[T<=x2]]
t1 = (T[np.r_[T>=x0]&np.r_[T< x1]] - x0)/x1x0
t2 = (T[np.r_[T>=x1]&np.r_[T<=x2]] - x1)/x2x1
t11, t22 = 1.-t1, 1.-t2
q1 = t11*y0 + t1*y1 + t1*t11*(a1*t11 + b1*t1)
q2 = t22*y1 + t2*y2 + t2*t22*(a2*t22 + b2*t2)
q = np.append(q1,q2)
return t, q
def akima(X, Y, x):
spl = Akima1DInterpolator(X,Y)
return spl(x)
| 24.784314 | 62 | 0.536392 |
acf9548aaacba81bcdad477be8e92f1736f305a1 | 642 | py | Python | data_structure/challenges/tree-level-order-traversal.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | data_structure/challenges/tree-level-order-traversal.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | data_structure/challenges/tree-level-order-traversal.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | 1 | 2020-01-30T06:47:09.000Z | 2020-01-30T06:47:09.000Z | """
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
class Queue(object):
def __init__(self):
self.queque = []
def put(self, some_shit):
self.queque.append(some_shit)
def get(self):
return self.queque.pop(0)
def empty(self):
return bool(self.queque == [])
def levelOrder(root, virgin=True):
q = Queue()
if virgin:
q.put(root)
while not q.empty():
node = q.get()
print node.data
q.put(node.left)
q.put(node.right)
levelOrder(node, virgin=False)
| 22.928571 | 40 | 0.593458 |
acf954e36a4e4bf650b090e4a1e654467a7d6d3a | 662 | py | Python | examples/test_chinese_pdf.py | hyahiaoui/SeleniumBase | 8c7fc30cadf289731767953f22eeab3c92271760 | [
"MIT"
] | null | null | null | examples/test_chinese_pdf.py | hyahiaoui/SeleniumBase | 8c7fc30cadf289731767953f22eeab3c92271760 | [
"MIT"
] | null | null | null | examples/test_chinese_pdf.py | hyahiaoui/SeleniumBase | 8c7fc30cadf289731767953f22eeab3c92271760 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from seleniumbase import BaseCase
class ChinesePdfTestClass(BaseCase):
def test_chinese_pdf(self):
pdf = "https://github.com/seleniumbase/SeleniumBase/" "files/3895614/unittest.pdf"
# Get and print PDF text
pdf_text = self.get_pdf_text(pdf, page=2)
print("\n" + pdf_text)
# Assert PDF contains the expected text on Page 2
self.assert_pdf_text(pdf, "个测试类", page=2)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(pdf, "运行单元测试")
self.assert_pdf_text(pdf, "等待测试结束后显示所有结果")
self.assert_pdf_text(pdf, "测试的执行跟方法的顺序没有关系")
| 31.52381 | 90 | 0.664653 |
acf955501cc991471d9b58a399c0d0ca612badbf | 10,044 | py | Python | openstackclient/tests/unit/api/test_object_store_v1.py | alvarosimon/python-openstackclient | 2ab3396f19796935ddcb281b865d37839a4f84f7 | [
"Apache-2.0"
] | 1 | 2018-04-23T20:59:31.000Z | 2018-04-23T20:59:31.000Z | openstackclient/tests/unit/api/test_object_store_v1.py | adgeese/python-openstackclient | 06263bd5852aad9cd03a76f50140fbbb2d0751ba | [
"Apache-2.0"
] | null | null | null | openstackclient/tests/unit/api/test_object_store_v1.py | adgeese/python-openstackclient | 06263bd5852aad9cd03a76f50140fbbb2d0751ba | [
"Apache-2.0"
] | 1 | 2020-07-21T02:18:23.000Z | 2020-07-21T02:18:23.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Object Store v1 API Library Tests"""
import mock
from keystoneauth1 import session
from requests_mock.contrib import fixture
from openstackclient.api import object_store_v1 as object_store
from openstackclient.tests.unit import utils
FAKE_ACCOUNT = 'q12we34r'
FAKE_AUTH = '11223344556677889900'
FAKE_URL = 'http://gopher.com/v1/' + FAKE_ACCOUNT
FAKE_CONTAINER = 'rainbarrel'
FAKE_OBJECT = 'spigot'
LIST_CONTAINER_RESP = [
'qaz',
'fred',
]
LIST_OBJECT_RESP = [
{'name': 'fred', 'bytes': 1234, 'content_type': 'text'},
{'name': 'wilma', 'bytes': 5678, 'content_type': 'text'},
]
class TestObjectAPIv1(utils.TestCase):
def setUp(self):
super(TestObjectAPIv1, self).setUp()
sess = session.Session()
self.api = object_store.APIv1(session=sess, endpoint=FAKE_URL)
self.requests_mock = self.useFixture(fixture.Fixture())
class TestContainer(TestObjectAPIv1):
def setUp(self):
super(TestContainer, self).setUp()
def test_container_create(self):
headers = {
'x-trans-id': '1qaz2wsx',
}
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/qaz',
headers=headers,
status_code=201,
)
ret = self.api.container_create(container='qaz')
data = {
'account': FAKE_ACCOUNT,
'container': 'qaz',
'x-trans-id': '1qaz2wsx',
}
self.assertEqual(data, ret)
def test_container_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/qaz',
status_code=204,
)
ret = self.api.container_delete(container='qaz')
self.assertIsNone(ret)
def test_container_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL,
json=LIST_CONTAINER_RESP,
status_code=200,
)
ret = self.api.container_list()
self.assertEqual(LIST_CONTAINER_RESP, ret)
def test_container_list_prefix(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '?prefix=foo%2f&format=json',
json=LIST_CONTAINER_RESP,
status_code=200,
)
ret = self.api.container_list(
prefix='foo/',
)
self.assertEqual(LIST_CONTAINER_RESP, ret)
def test_container_list_marker_limit_end(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '?marker=next&limit=2&end_marker=stop&format=json',
json=LIST_CONTAINER_RESP,
status_code=200,
)
ret = self.api.container_list(
marker='next',
limit=2,
end_marker='stop',
)
self.assertEqual(LIST_CONTAINER_RESP, ret)
# def test_container_list_full_listing(self):
# sess = self.app.client_manager.session
#
# def side_effect(*args, **kwargs):
# rv = sess.get().json.return_value
# sess.get().json.return_value = []
# sess.get().json.side_effect = None
# return rv
#
# resp = [{'name': 'is-name'}]
# sess.get().json.return_value = resp
# sess.get().json.side_effect = side_effect
#
# data = lib_container.list_containers(
# self.app.client_manager.session,
# fake_url,
# full_listing=True,
# )
#
# # Check expected values
# sess.get.assert_called_with(
# fake_url,
# params={
# 'format': 'json',
# 'marker': 'is-name',
# }
# )
# self.assertEqual(resp, data)
def test_container_show(self):
headers = {
'X-Container-Meta-Owner': FAKE_ACCOUNT,
'x-container-object-count': '1',
'x-container-bytes-used': '577',
}
resp = {
'account': FAKE_ACCOUNT,
'container': 'qaz',
'object_count': '1',
'bytes_used': '577',
'properties': {'Owner': FAKE_ACCOUNT},
}
self.requests_mock.register_uri(
'HEAD',
FAKE_URL + '/qaz',
headers=headers,
status_code=204,
)
ret = self.api.container_show(container='qaz')
self.assertEqual(resp, ret)
class TestObject(TestObjectAPIv1):
def setUp(self):
super(TestObject, self).setUp()
@mock.patch('openstackclient.api.object_store_v1.io.open')
def base_object_create(self, file_contents, mock_open):
mock_open.read.return_value = file_contents
headers = {
'etag': 'youreit',
'x-trans-id': '1qaz2wsx',
}
# TODO(dtroyer): When requests_mock gains the ability to
# match against request.body add this check
# https://review.openstack.org/127316
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/qaz/counter.txt',
headers=headers,
# body=file_contents,
status_code=201,
)
ret = self.api.object_create(
container='qaz',
object='counter.txt',
)
data = {
'account': FAKE_ACCOUNT,
'container': 'qaz',
'object': 'counter.txt',
'etag': 'youreit',
'x-trans-id': '1qaz2wsx',
}
self.assertEqual(data, ret)
def test_object_create(self):
self.base_object_create('111\n222\n333\n')
self.base_object_create(bytes([0x31, 0x00, 0x0d, 0x0a, 0x7f, 0xff]))
def test_object_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/qaz/wsx',
status_code=204,
)
ret = self.api.object_delete(
container='qaz',
object='wsx',
)
self.assertIsNone(ret)
def test_object_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/qaz',
json=LIST_OBJECT_RESP,
status_code=200,
)
ret = self.api.object_list(container='qaz')
self.assertEqual(LIST_OBJECT_RESP, ret)
def test_object_list_delimiter(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/qaz?delimiter=%7C',
json=LIST_OBJECT_RESP,
status_code=200,
)
ret = self.api.object_list(
container='qaz',
delimiter='|',
)
self.assertEqual(LIST_OBJECT_RESP, ret)
def test_object_list_prefix(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/qaz?prefix=foo%2f',
json=LIST_OBJECT_RESP,
status_code=200,
)
ret = self.api.object_list(
container='qaz',
prefix='foo/',
)
self.assertEqual(LIST_OBJECT_RESP, ret)
def test_object_list_marker_limit_end(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/qaz?marker=next&limit=2&end_marker=stop',
json=LIST_CONTAINER_RESP,
status_code=200,
)
ret = self.api.object_list(
container='qaz',
marker='next',
limit=2,
end_marker='stop',
)
self.assertEqual(LIST_CONTAINER_RESP, ret)
# def test_list_objects_full_listing(self):
# sess = self.app.client_manager.session
#
# def side_effect(*args, **kwargs):
# rv = sess.get().json.return_value
# sess.get().json.return_value = []
# sess.get().json.side_effect = None
# return rv
#
# resp = [{'name': 'is-name'}]
# sess.get().json.return_value = resp
# sess.get().json.side_effect = side_effect
#
# data = lib_object.list_objects(
# sess,
# fake_url,
# fake_container,
# full_listing=True,
# )
#
# # Check expected values
# sess.get.assert_called_with(
# fake_url + '/' + fake_container,
# params={
# 'format': 'json',
# 'marker': 'is-name',
# }
# )
# self.assertEqual(resp, data)
def test_object_show(self):
headers = {
'content-type': 'text/alpha',
'content-length': '577',
'last-modified': '20130101',
'etag': 'qaz',
'x-container-meta-owner': FAKE_ACCOUNT,
'x-object-meta-wife': 'Wilma',
'x-object-meta-Husband': 'fred',
'x-tra-header': 'yabba-dabba-do',
}
resp = {
'account': FAKE_ACCOUNT,
'container': 'qaz',
'object': FAKE_OBJECT,
'content-type': 'text/alpha',
'content-length': '577',
'last-modified': '20130101',
'etag': 'qaz',
'properties': {'wife': 'Wilma',
'Husband': 'fred'},
}
self.requests_mock.register_uri(
'HEAD',
FAKE_URL + '/qaz/' + FAKE_OBJECT,
headers=headers,
status_code=204,
)
ret = self.api.object_show(
container='qaz',
object=FAKE_OBJECT,
)
self.assertEqual(resp, ret)
| 29.715976 | 77 | 0.544902 |
acf95558ea0ea714827980a0fd8986c3c9f50215 | 975 | py | Python | 07-closest-city/togeojson.py | tomwhite/datavision-code | f702cb19eebef857aca512bb096d2dc6cd3695af | [
"Apache-2.0"
] | 1 | 2020-04-04T17:22:02.000Z | 2020-04-04T17:22:02.000Z | 07-closest-city/togeojson.py | tomwhite/datavision-code | f702cb19eebef857aca512bb096d2dc6cd3695af | [
"Apache-2.0"
] | null | null | null | 07-closest-city/togeojson.py | tomwhite/datavision-code | f702cb19eebef857aca512bb096d2dc6cd3695af | [
"Apache-2.0"
] | 1 | 2021-01-24T19:47:26.000Z | 2021-01-24T19:47:26.000Z | import json
import re
geometries = []
pattern = r'.*lat=(-?\d+\.\d*)\|long=(-?\d+\.\d*)\|.*label=.*\[\[(.*)\]\]'
for line in open('data/wikipedia-list-of-cities-uk.txt', 'r'):
m = re.match(pattern, line)
if m:
lat, lng, city = m.group(1), m.group(2), m.group(3)
if '|' in city:
pos = city.find('|') + 1
city = city[pos:]
geometries.append({
"type": "Point",
"coordinates": [lng, lat],
"properties": {
"name": city
}
})
for line in open('data/wikipedia-list-of-cities-ireland.csv', 'r'):
city, lat, lng = line.strip().split(',')
if city == 'city':
continue # header
geometries.append({
"type": "Point",
"coordinates": [lng, lat],
"properties": {
"name": city
}
})
places = {
"type": "GeometryCollection",
"geometries": geometries
}
print(json.dumps(places, indent=2))
| 25 | 74 | 0.484103 |
acf95568382017f5084946a9f51ce8854b919fb7 | 1,643 | py | Python | instagram_clone/images/migrations/0002_auto_20171202_2048.py | hwshim0810/instaclone-Django | 79c77bfee39b69298c42ec0d154bb989c9e82730 | [
"MIT"
] | null | null | null | instagram_clone/images/migrations/0002_auto_20171202_2048.py | hwshim0810/instaclone-Django | 79c77bfee39b69298c42ec0d154bb989c9e82730 | [
"MIT"
] | 17 | 2020-06-05T16:52:12.000Z | 2022-03-11T23:22:59.000Z | instagram_clone/images/migrations/0002_auto_20171202_2048.py | hwshim0810/instaclone-Django | 79c77bfee39b69298c42ec0d154bb989c9e82730 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-02 11:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('images', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='comment',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='images.Image'),
),
migrations.AddField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 35.717391 | 132 | 0.612295 |
acf9559ec436226fb786a113bc5dda268b835051 | 33,440 | py | Python | tensorflow/contrib/learn/python/learn/estimators/head_test.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 65 | 2016-09-26T01:30:40.000Z | 2021-08-11T17:00:41.000Z | tensorflow/contrib/learn/python/learn/estimators/head_test.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 5 | 2017-02-21T08:37:52.000Z | 2017-03-29T05:46:05.000Z | tensorflow/contrib/learn/python/learn/estimators/head_test.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 10 | 2017-02-08T21:39:27.000Z | 2018-10-04T17:34:54.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual([] if expected_global is None else expected_global,
[k.name for k in variables.global_variables()])
test_case.assertItemsEqual([] if expected_model is None else expected_model,
[k.name for k in variables.model_variables()])
test_case.assertItemsEqual([] if expected_trainable is None else
expected_trainable,
[k.name for k in variables.trainable_variables()])
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class RegressionModelHeadTest(test.TestCase):
# TODO(zakaria): test multilabel regression.
def testRegression(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionEvalMode(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=prediction)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib._regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = {label_name: constant_op.constant([[0.], [1.], [1.]])}
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
features = {"label_weight": constant_op.constant(weights)}
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with ops.Graph().as_default():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = sparse_tensor.SparseTensor(
indices=constant_op.constant(
[[0, 0], [1, 0], [2, 0]], dtype=dtypes.int64),
values=constant_op.constant([0., 1., 1.]),
dense_shape=[3, 1])
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
class MultiLabelModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabel(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelEvalMode(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib._multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant([[1., 0., 0.]])
labels = {label_name: constant_op.constant([[0, 0, 1]])}
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithWeight(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
features = {"label_weight": constant_op.constant(.1)}
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, [
"loss", "centered_bias/bias_0", "centered_bias/bias_1",
"centered_bias/bias_2"
])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class BinaryClassificationModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": np.mean(self._labels),
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": np.mean(self._labels),
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassification(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationEvalMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInferMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.INFER, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
self.assertEquals(1, len(model_fn_ops.output_alternatives))
self.assertEquals(constants.ProblemType.LOGISTIC_REGRESSION,
model_fn_ops.output_alternatives[None][0])
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = sparse_tensor.SparseTensor(
indices=constant_op.constant(
[[0, 0], [1, 0], [2, 0]], dtype=dtypes.int64),
values=constant_op.constant([0, 1, 1]),
dense_shape=[3, 1])
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = {label_name: constant_op.constant(self._labels)}
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithWeights(self):
n_classes = 2
head = head_lib._multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
features = {"label_weight": constant_op.constant(weights)}
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# TODO(ptucker): Is this the correct eval loss, sum not average?
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib._multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = (2,)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClass(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassEvalMode(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
features = {"label_weight": constant_op.constant(weight)}
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib._multi_class_head(n_classes=n_classes)
class BinarySvmModelHeadTest(test.TestCase):
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMDefaultWeights(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMEvalMode(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.EVAL,
_noop_train_op,
logits=predictions)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib._binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = {label_name: constant_op.constant(self._labels)}
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
weights = (7., 11.)
features = {"weights": constant_op.constant(weights)}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
"accuracy": 1.,
"loss": expected_weighted_sum / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib._binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class MultiHeadTest(test.TestCase):
def testTrain_withNoHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
self.assertEquals(None, model_fn_ops.predictions)
self.assertTrue(model_fn_ops.loss is not None)
self.assertTrue(model_fn_ops.train_op is not None)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertEquals(None, model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2], [1, .5])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
self.assertEquals(None, model_fn_ops.predictions)
self.assertTrue(model_fn_ops.loss is not None)
self.assertTrue(model_fn_ops.train_op is not None)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertEquals(None, model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2], [1, .5])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.INFER,
_noop_train_op,
logits=logits)
self.assertTrue(model_fn_ops.predictions)
self.assertEquals(None, model_fn_ops.loss)
self.assertEquals(None, model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertTrue(len(model_fn_ops.output_alternatives) == 2)
# Tests predictions keys
pred_keys = model_fn_ops.predictions.keys()
self.assertTrue(
("head1", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
self.assertTrue(
("head1", prediction_key.PredictionKey.CLASSES) in pred_keys)
self.assertTrue(
("head2", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
self.assertTrue(
("head2", prediction_key.PredictionKey.CLASSES) in pred_keys)
# Tests output alternative
out_alts = model_fn_ops.output_alternatives
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head1"][0])
self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
out_alts["head1"][1].keys())
self.assertTrue(
prediction_key.PredictionKey.CLASSES in out_alts["head1"][1].keys())
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head2"][0])
self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
out_alts["head2"][1].keys())
self.assertTrue(
prediction_key.PredictionKey.CLASSES in out_alts["head2"][1].keys())
def testEval(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2], [1, .5])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertTrue(model_fn_ops.predictions)
self.assertTrue(model_fn_ops.loss is not None)
self.assertEquals(None, model_fn_ops.train_op)
self.assertTrue(model_fn_ops.eval_metric_ops)
self.assertEquals(None, model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys
self.assertTrue("accuracy/head1" in metric_ops.keys())
self.assertTrue("accuracy/head2" in metric_ops.keys())
def _noop_train_op(unused_loss):
return control_flow_ops.no_op()
if __name__ == "__main__":
test.main()
| 39.480519 | 80 | 0.653379 |
acf955e510f3f08f675fe46512718387cc3cba23 | 2,235 | gyp | Python | sysapps/sysapps.gyp | gaurangkumar/crosswalk | 1b9b80835e83e77390bd6cdbc03beb63f2a6f550 | [
"BSD-3-Clause"
] | 2,211 | 2015-01-01T08:50:09.000Z | 2022-03-30T02:48:16.000Z | sysapps/sysapps.gyp | gaurangkumar/crosswalk | 1b9b80835e83e77390bd6cdbc03beb63f2a6f550 | [
"BSD-3-Clause"
] | 1,269 | 2015-01-02T10:43:16.000Z | 2020-01-17T00:58:09.000Z | sysapps/sysapps.gyp | gaurangkumar/crosswalk | 1b9b80835e83e77390bd6cdbc03beb63f2a6f550 | [
"BSD-3-Clause"
] | 585 | 2015-01-02T01:12:15.000Z | 2022-03-09T07:07:18.000Z | {
'targets': [
{
'target_name': 'sysapps',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base',
'../../net/net.gyp:net',
'../../ui/base/ui_base.gyp:ui_base',
'../../ui/gfx/gfx.gyp:gfx',
'../../ui/gfx/gfx.gyp:gfx_geometry',
'../extensions/extensions.gyp:xwalk_extensions',
'sysapps_resources.gyp:xwalk_sysapps_resources',
],
'variables': {
'jsapi_component': 'sysapps',
},
'includes': [
'../../build/filename_rules.gypi',
'../xwalk_jsapi.gypi',
],
'sources': [
'common/binding_object.h',
'common/binding_object_store.cc',
'common/binding_object_store.h',
'common/common.idl',
'common/event_target.cc',
'common/event_target.h',
'common/sysapps_manager.cc',
'common/sysapps_manager.h',
'raw_socket/raw_socket.idl',
'raw_socket/raw_socket_extension.cc',
'raw_socket/raw_socket_extension.h',
'raw_socket/raw_socket_object.cc',
'raw_socket/raw_socket_object.h',
'raw_socket/tcp_server_socket.idl',
'raw_socket/tcp_server_socket_object.cc',
'raw_socket/tcp_server_socket_object.h',
'raw_socket/tcp_socket.idl',
'raw_socket/tcp_socket_object.cc',
'raw_socket/tcp_socket_object.h',
'raw_socket/udp_socket.idl',
'raw_socket/udp_socket_object.cc',
'raw_socket/udp_socket_object.h',
],
'conditions': [
['OS!="android"', {
'dependencies': [
'../../components/components.gyp:storage_monitor',
'../../media/media.gyp:media',
'../../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
],
}],
['OS=="win"', {
'link_settings': {
'libraries': [
'-lPdh.lib',
],
},
}]
],
'direct_dependent_settings': {
'include_dirs': [
# Build units including this module should have this
# on theirs include path because of the code we generate
# from the IDL files.
'<(SHARED_INTERMEDIATE_DIR)',
]
},
},
],
}
| 30.616438 | 66 | 0.538255 |
acf95643b69bb0e073b915a0fbb2016a93ed5584 | 8,089 | py | Python | dataset.py | ZhangYu-zjut/CNN-GRU | d82bf17e95d481c46e0f1096f9459aace41a7d05 | [
"MIT"
] | null | null | null | dataset.py | ZhangYu-zjut/CNN-GRU | d82bf17e95d481c46e0f1096f9459aace41a7d05 | [
"MIT"
] | null | null | null | dataset.py | ZhangYu-zjut/CNN-GRU | d82bf17e95d481c46e0f1096f9459aace41a7d05 | [
"MIT"
] | null | null | null | # encoding:utf-8
# python=3.6+
import numpy as np
import os
import csv
import pandas as pd
import torch
import torch as t
from torch.utils.data import Dataset
import torchvision.transforms as T
from PIL import Image
from config import parsers
opt = parsers()
"""
Read the image data
1.read the data
2.split the data
3.do the transform
"""
class DataSet(Dataset):
# Split the dataset into 3 parts: train,val,test
"""
File dir:<train>,<test>
Train set: train=True,test=False
Val set: train=False,test=False
Test set: test=True
"""
def __init__(self, img_path, transform=None, train=True, test=False,
train_val_ratio=0.8,normalize_type='mean_std'):
self.root_path = img_path
self.test = test
imgs = [os.path.join(img_path, i) for i in os.listdir(img_path)]
# df = csv.read(csv_path,header=None,names=[])
# sort the image by id
# train: root/train/1.jpg
# test: root/test/2.jpg
imgs = sorted(imgs, key=lambda x: int(x.split('/')[-1].split('-')[0]))
img_num = len(imgs)
print("img_num", img_num)
data_mean, data_std = self.get_mean_std(img_path)
min_d, max_d = self.get_min_max(img_path)
time_series_data_all = self.get_time_series_data()
time_series_length = len(time_series_data_all)
"""
Time series data split:
# all data: [0:len(all_img)]
# train: [0:len(train_imgs)*train_val_ratio]
# val: [len(train_imgs)*train_val_ratio:len(train_imgs)]
# test: [len(all_img) - len(test_imgs) : len(all_img)]
"""
# normalize first
time_series_data_all = self.normalize(time_series_data_all,normalize_type)
self.time_series_data_all_x,self.time_series_data_all_y = self.split_data(time_series_data_all)
if (self.test):
self.img = imgs
#self.time_series_part_x = self.time_series_data_all_x[time_series_length-img_num:time_series_length]
#self.time_series_part_y = self.time_series_data_all_y[time_series_length - img_num:time_series_length]
else:
if train:
self.img = imgs[:int(train_val_ratio * img_num)]
#self.time_series_part_x = self.time_series_data_all_x[0:int(img_num*train_val_ratio)]
#self.time_series_part_y = self.time_series_data_all_y[0:int(img_num * train_val_ratio)]
else:
self.img = imgs[int(train_val_ratio * img_num):]
#self.time_series_part_x = self.time_series_data_all_x[int(img_num*train_val_ratio):img_num]
#self.time_series_part_y = self.time_series_data_all_y[0:int(img_num * train_val_ratio)]
# Default transfprm
if transform == None:
normalize = T.Normalize(
mean=[.5, .5, .5],
std=[.5, .5, .5]
)
# for val and test data
if (test == True or train == False):
self.transform = T.Compose([
T.Resize(224),
T.CenterCrop(224),
T.ToTensor(),
normalize
])
# for train data
else:
self.transform = T.Compose([
T.Resize(224),
T.RandomCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize])
pass
def normalize(self,data,normalize_type):
if normalize_type == 'mean_std':
self.mean = np.mean(data)
self.std = np.std(data)
data = (data - np.mean(data))/(np.std(data))
if normalize_type == 'max_min':
self.max = np.max(data)
self.min = np.min(data)
data = (data - np.min(data))/(np.max(data) - np.min(data))
return data
def get_max_min(self):
return self.max, self.min
pass
def get_mean_std(self):
return self.mean, self.std
def get_time_series_data(self):
irr_data_all = pd.read_csv(r'irr_data.csv', header=None, names=['irr']).values.reshape(-1, )
"""
data_train = irr_data_all[:int(opt.train_size * len(irr_data_all))]
data_test = irr_data_all[int(opt.train_size * len(irr_data_all)):]
irr_data_all_x, irr_data_all_y = self.split_data(irr_data_all)
"""
return irr_data_all
pass
def split_data(self,data):
x_data = []
y_data = []
window = int(opt.window)
for i in range(len(data)):
if (i < window):
tmp = np.zeros(window)
tmp[0: (window - i)] = 0
tmp[(window - i):] = data[:i]
x_data.append(tmp)
else:
tmp = data[(i - window):i]
x_data.append(tmp)
y_data.append(data[i])
x_data = np.array(x_data).astype(float)
y_data = np.array(y_data).astype(float)
return x_data, y_data
# horizon=1, time interval = 10 mins
def split_data_new(self,data,horizon=1):
x_data = []
y_data = []
window = int(opt.window)
# i: current position
for i in range(len(data)-horizon):
if (i < window):
tmp = np.zeros(window)
tmp[0: (window - i)] = 0
tmp[(window - i):] = data[:i]
x_data.append(tmp)
else:
tmp = data[(i - window):i]
x_data.append(tmp)
y_data.append(data[i+horizon])
x_data = np.array(x_data).astype(float)
y_data = np.array(y_data).astype(float)
return x_data, y_data
def __getitem__(self, index):
img_path = self.img[index]
data = Image.open(img_path)
if (opt.debug):
print("img_path is", img_path)
img_data = self.transform(data)
img_id = int(self.img[index].split('/')[-1].split('-')[0])
"""
if(img_id>len(self.time_series_part_x)):
print("len(time_series_part_x)", len(self.time_series_part_x))
print("img id:", img_id)
"""
irr_time_series_x = torch.tensor(self.time_series_data_all_x[img_id])
irr_time_series_y = torch.tensor(self.time_series_data_all_y[img_id])
target = float(self.img[index].split('/')[-1].split('-')[1])
target = 1.0 * (target - self.min) / (self.max - self.min)
target = torch.Tensor(np.array(target))
if(opt.return_img_id):
return img_data, irr_time_series_x, irr_time_series_y,img_id
return img_data,irr_time_series_x,irr_time_series_y
#return data, target
pass
def __len__(self):
# print("length is",len(self.img))
return len(self.img)
pass
def get_mean_std(self, img_path):
imgs = [os.path.join(img_path, i) for i in os.listdir(img_path)]
# print((imgs[0].split('/')[-1]))
# print((imgs[0].split('/')[-1].split('-')[1]))
count = 0
# for x in imgs:
# print(x.split('/')[-1])
# print(x.split('/')[-1].split('-')[1])
# count += 1
# print("count is",count)
# print(imgs)
value = [int(x.split('/')[-1].split('-')[1]) for x in imgs]
# print("imgs is",imgs)
# print("values is",value)
value = np.array(value)
mean_d, std = np.mean(value), np.std(value)
# print("mean is ",mean_d,"std is",std)
self.mean, self.std = mean_d, std
return mean_d, std
pass
def get_min_max(self, img_path):
imgs = [os.path.join(img_path, i) for i in os.listdir(img_path)]
value = [int(x.split('/')[-1].split('-')[1]) for x in imgs]
# print("imgs is",imgs)
# print("values is",value)
value = np.array(value)
min_d, max_d = np.min(value), np.max(value)
# print("mean is ",mean_d,"std is",std)
self.min, self.max = min_d, max_d
return min_d, max_d
pass
| 35.017316 | 115 | 0.559896 |
acf959c0f04ae4fe4a834ae603ea38ba5bad2e6c | 513 | py | Python | .venv/lib/python3.8/site-packages/cffi/__init__.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | 5 | 2020-10-01T00:43:32.000Z | 2021-10-30T16:15:27.000Z | venv/Lib/site-packages/cffi/__init__.py | toHarsh/sRecords | 3ae86ca234c9d97c50d32c5378c24a6e189dfbb1 | [
"MIT"
] | 6 | 2020-11-18T00:10:41.000Z | 2022-02-09T01:04:55.000Z | venv/Lib/site-packages/cffi/__init__.py | toHarsh/sRecords | 3ae86ca234c9d97c50d32c5378c24a6e189dfbb1 | [
"MIT"
] | 6 | 2020-10-05T00:53:35.000Z | 2022-02-23T20:18:47.000Z | __all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError',
'FFIError']
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
from .error import PkgConfigError
__version__ = "1.14.2"
__version_info__ = (1, 14, 2)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
# if nothing is clearly incompatible.
__version_verifier_modules__ = "0.8.6"
| 34.2 | 78 | 0.758285 |
acf95a3c081cb2b640ca05bae3e95a667adad821 | 15,780 | py | Python | homeassistant/components/light/__init__.py | FlorianLudwig/home-assistant | 29ad3961e581d3591ce0963a7fa01672abadedf7 | [
"Apache-2.0"
] | 2 | 2017-10-26T19:43:55.000Z | 2017-12-30T23:29:00.000Z | homeassistant/components/light/__init__.py | FlorianLudwig/home-assistant | 29ad3961e581d3591ce0963a7fa01672abadedf7 | [
"Apache-2.0"
] | 2 | 2019-04-15T02:43:04.000Z | 2019-04-15T02:49:10.000Z | homeassistant/components/light/__init__.py | FlorianLudwig/home-assistant | 29ad3961e581d3591ce0963a7fa01672abadedf7 | [
"Apache-2.0"
] | 1 | 2019-06-19T07:43:11.000Z | 2019-06-19T07:43:11.000Z | """
Provides functionality to interact with lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light/
"""
import asyncio
import csv
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_CONTROL
from homeassistant.components.group import \
ENTITY_ID_FORMAT as GROUP_ENTITY_ID_FORMAT
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON,
STATE_ON)
from homeassistant.exceptions import UnknownUser, Unauthorized
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers import intent
from homeassistant.loader import bind_hass
import homeassistant.util.color as color_util
DOMAIN = 'light'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=30)
GROUP_NAME_ALL_LIGHTS = 'all lights'
ENTITY_ID_ALL_LIGHTS = GROUP_ENTITY_ID_FORMAT.format('all_lights')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Bitfield of features supported by the light entity
SUPPORT_BRIGHTNESS = 1
SUPPORT_COLOR_TEMP = 2
SUPPORT_EFFECT = 4
SUPPORT_FLASH = 8
SUPPORT_COLOR = 16
SUPPORT_TRANSITION = 32
SUPPORT_WHITE_VALUE = 128
# Integer that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_XY_COLOR = "xy_color"
ATTR_HS_COLOR = "hs_color"
ATTR_COLOR_TEMP = "color_temp"
ATTR_KELVIN = "kelvin"
ATTR_MIN_MIREDS = "min_mireds"
ATTR_MAX_MIREDS = "max_mireds"
ATTR_COLOR_NAME = "color_name"
ATTR_WHITE_VALUE = "white_value"
# Brightness of the light, 0..255 or percentage
ATTR_BRIGHTNESS = "brightness"
ATTR_BRIGHTNESS_PCT = "brightness_pct"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# List of possible effects
ATTR_EFFECT_LIST = "effect_list"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
COLOR_GROUP = "Color descriptors"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Service call validation schemas
VALID_TRANSITION = vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255))
VALID_BRIGHTNESS_PCT = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
LIGHT_TURN_ON_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.comp_entity_ids,
vol.Exclusive(ATTR_PROFILE, COLOR_GROUP): cv.string,
ATTR_TRANSITION: VALID_TRANSITION,
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP):
vol.All(vol.ExactSequence((cv.byte, cv.byte, cv.byte)),
vol.Coerce(tuple)),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP):
vol.All(vol.ExactSequence((cv.small_float, cv.small_float)),
vol.Coerce(tuple)),
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP):
vol.All(vol.ExactSequence(
(vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)))),
vol.Coerce(tuple)),
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP):
vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP):
vol.All(vol.Coerce(int), vol.Range(min=0)),
ATTR_WHITE_VALUE: vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
ATTR_FLASH: vol.In([FLASH_SHORT, FLASH_LONG]),
ATTR_EFFECT: cv.string,
})
LIGHT_TURN_OFF_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.comp_entity_ids,
ATTR_TRANSITION: VALID_TRANSITION,
ATTR_FLASH: vol.In([FLASH_SHORT, FLASH_LONG]),
})
LIGHT_TOGGLE_SCHEMA = LIGHT_TURN_ON_SCHEMA
PROFILE_SCHEMA = vol.Schema(
vol.ExactSequence((str, cv.small_float, cv.small_float, cv.byte))
)
INTENT_SET = 'HassLightSet'
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass, entity_id=None):
"""Return if the lights are on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LIGHTS
return hass.states.is_state(entity_id, STATE_ON)
def preprocess_turn_on_alternatives(params):
"""Process extra data for turn light on request."""
profile = Profiles.get(params.pop(ATTR_PROFILE, None))
if profile is not None:
params.setdefault(ATTR_XY_COLOR, profile[:2])
params.setdefault(ATTR_BRIGHTNESS, profile[2])
color_name = params.pop(ATTR_COLOR_NAME, None)
if color_name is not None:
try:
params[ATTR_RGB_COLOR] = color_util.color_name_to_rgb(color_name)
except ValueError:
_LOGGER.warning('Got unknown color %s, falling back to white',
color_name)
params[ATTR_RGB_COLOR] = (255, 255, 255)
kelvin = params.pop(ATTR_KELVIN, None)
if kelvin is not None:
mired = color_util.color_temperature_kelvin_to_mired(kelvin)
params[ATTR_COLOR_TEMP] = int(mired)
brightness_pct = params.pop(ATTR_BRIGHTNESS_PCT, None)
if brightness_pct is not None:
params[ATTR_BRIGHTNESS] = int(255 * brightness_pct/100)
xy_color = params.pop(ATTR_XY_COLOR, None)
if xy_color is not None:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
rgb_color = params.pop(ATTR_RGB_COLOR, None)
if rgb_color is not None:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
class SetIntentHandler(intent.IntentHandler):
"""Handle set color intents."""
intent_type = INTENT_SET
slot_schema = {
vol.Required('name'): cv.string,
vol.Optional('color'): color_util.color_name_to_rgb,
vol.Optional('brightness'): vol.All(vol.Coerce(int), vol.Range(0, 100))
}
async def async_handle(self, intent_obj):
"""Handle the hass intent."""
hass = intent_obj.hass
slots = self.async_validate_slots(intent_obj.slots)
state = hass.helpers.intent.async_match_state(
slots['name']['value'],
[state for state in hass.states.async_all()
if state.domain == DOMAIN])
service_data = {
ATTR_ENTITY_ID: state.entity_id,
}
speech_parts = []
if 'color' in slots:
intent.async_test_feature(
state, SUPPORT_COLOR, 'changing colors')
service_data[ATTR_RGB_COLOR] = slots['color']['value']
# Use original passed in value of the color because we don't have
# human readable names for that internally.
speech_parts.append('the color {}'.format(
intent_obj.slots['color']['value']))
if 'brightness' in slots:
intent.async_test_feature(
state, SUPPORT_BRIGHTNESS, 'changing brightness')
service_data[ATTR_BRIGHTNESS_PCT] = slots['brightness']['value']
speech_parts.append('{}% brightness'.format(
slots['brightness']['value']))
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, service_data)
response = intent_obj.create_response()
if not speech_parts: # No attributes changed
speech = 'Turned on {}'.format(state.name)
else:
parts = ['Changed {} to'.format(state.name)]
for index, part in enumerate(speech_parts):
if index == 0:
parts.append(' {}'.format(part))
elif index != len(speech_parts) - 1:
parts.append(', {}'.format(part))
else:
parts.append(' and {}'.format(part))
speech = ''.join(parts)
response.async_set_speech(speech)
return response
async def async_setup(hass, config):
"""Expose light control via state machine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LIGHTS)
await component.async_setup(config)
# load profiles from files
profiles_valid = await Profiles.load_profiles(hass)
if not profiles_valid:
return False
async def async_handle_light_on_service(service):
"""Handle a turn light on service call."""
# Get the validated data
params = service.data.copy()
# Convert the entity ids to valid light ids
target_lights = await component.async_extract_from_service(service)
params.pop(ATTR_ENTITY_ID, None)
if service.context.user_id:
user = await hass.auth.async_get_user(service.context.user_id)
if user is None:
raise UnknownUser(context=service.context)
entity_perms = user.permissions.check_entity
for light in target_lights:
if not entity_perms(light, POLICY_CONTROL):
raise Unauthorized(
context=service.context,
entity_id=light,
permission=POLICY_CONTROL
)
preprocess_turn_on_alternatives(params)
update_tasks = []
for light in target_lights:
light.async_set_context(service.context)
pars = params
if not pars:
pars = params.copy()
pars[ATTR_PROFILE] = Profiles.get_default(light.entity_id)
preprocess_turn_on_alternatives(pars)
await light.async_turn_on(**pars)
if not light.should_poll:
continue
update_tasks.append(
light.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks, loop=hass.loop)
# Listen for light on and light off service calls.
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_light_on_service,
schema=LIGHT_TURN_ON_SCHEMA)
component.async_register_entity_service(
SERVICE_TURN_OFF, LIGHT_TURN_OFF_SCHEMA,
'async_turn_off'
)
component.async_register_entity_service(
SERVICE_TOGGLE, LIGHT_TOGGLE_SCHEMA,
'async_toggle'
)
hass.helpers.intent.async_register(SetIntentHandler())
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Profiles:
"""Representation of available color profiles."""
_all = None
@classmethod
async def load_profiles(cls, hass):
"""Load and cache profiles."""
def load_profile_data(hass):
"""Load built-in profiles and custom profiles."""
profile_paths = [os.path.join(os.path.dirname(__file__),
LIGHT_PROFILES_FILE),
hass.config.path(LIGHT_PROFILES_FILE)]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path) as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for rec in reader:
profile, color_x, color_y, brightness = \
PROFILE_SCHEMA(rec)
profiles[profile] = (color_x, color_y, brightness)
except vol.MultipleInvalid as ex:
_LOGGER.error(
"Error parsing light profile from %s: %s",
profile_path, ex)
return None
return profiles
cls._all = await hass.async_add_job(load_profile_data, hass)
return cls._all is not None
@classmethod
def get(cls, name):
"""Return a named profile."""
return cls._all.get(name)
@classmethod
def get_default(cls, entity_id):
"""Return the default turn-on profile for the given light."""
# pylint: disable=unsupported-membership-test
name = entity_id + ".default"
if name in cls._all:
return name
name = ENTITY_ID_ALL_LIGHTS + ".default"
if name in cls._all:
return name
return None
class Light(ToggleEntity):
"""Representation of a light."""
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return None
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return None
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return None
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return 500
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return None
@property
def effect_list(self):
"""Return the list of supported effects."""
return None
@property
def effect(self):
"""Return the current effect."""
return None
@property
def state_attributes(self):
"""Return optional state attributes."""
data = {}
supported_features = self.supported_features
if supported_features & SUPPORT_COLOR_TEMP:
data[ATTR_MIN_MIREDS] = self.min_mireds
data[ATTR_MAX_MIREDS] = self.max_mireds
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT_LIST] = self.effect_list
if self.is_on:
if supported_features & SUPPORT_BRIGHTNESS:
data[ATTR_BRIGHTNESS] = self.brightness
if supported_features & SUPPORT_COLOR_TEMP:
data[ATTR_COLOR_TEMP] = self.color_temp
if self.supported_features & SUPPORT_COLOR and self.hs_color:
# pylint: disable=unsubscriptable-object,not-an-iterable
hs_color = self.hs_color
data[ATTR_HS_COLOR] = (
round(hs_color[0], 3),
round(hs_color[1], 3),
)
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
if supported_features & SUPPORT_WHITE_VALUE:
data[ATTR_WHITE_VALUE] = self.white_value
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT] = self.effect
return {key: val for key, val in data.items() if val is not None}
@property
def supported_features(self):
"""Flag supported features."""
return 0
| 33.432203 | 79 | 0.647592 |
acf95a3dfe57aa86b6bb477fa65cd2d2376377c8 | 1,499 | py | Python | seleniumbase/config/ad_block_list.py | adityasarin/SeleniumBase | 419e4c52a9cffd140e01070eaae0e8326cfd6d8e | [
"MIT"
] | null | null | null | seleniumbase/config/ad_block_list.py | adityasarin/SeleniumBase | 419e4c52a9cffd140e01070eaae0e8326cfd6d8e | [
"MIT"
] | 1 | 2021-06-01T22:58:57.000Z | 2021-06-01T22:58:57.000Z | seleniumbase/config/ad_block_list.py | adityasarin/SeleniumBase | 419e4c52a9cffd140e01070eaae0e8326cfd6d8e | [
"MIT"
] | null | null | null | """
For use with SeleniumBase ad_block functionality.
Usage:
On the command line:
"pytest SOME_TEST.py --ad_block"
From inside a test:
self.ad_block()
If using the command line version, the ad_block functionality gets
activated after "self.wait_for_ready_state_complete()" is called,
which is always run after page loads, unless changed in "settings.py".
Using ad_block will slow down test runs a little. (Use only if necessary.)
Format: A CSS Selector that's ready for JavaScript's querySelectorAll()
"""
AD_BLOCK_LIST = [
'[aria-label="Ad"]',
'[class^="sponsored-content"]',
'[data-ad-details*="Advertisement"]',
'[data-native_ad*="placement"]',
'[data-provider="dianomi"]',
'[data-type="ad"]',
'[data-track-event-label*="-taboola-"]',
'[href*="doubleclick.net/"]',
'[id*="-ad-"]',
'[id*="_ads_"]',
'[id*="AdFrame"]',
'[id*="carbonads"]',
'[id^="ad-"]',
'[id^="outbrain_widget"]',
'[id^="taboola-"]',
'[id="dianomiRightRail"]',
'[src*="smartads."]',
'[src*="ad_nexus"]',
'[src*="/ads/"]',
'[data-dcm-click-tracker*="/adclick."]',
'[data-google-query-id^="C"]',
'div.ad-container',
'div.ad_module',
'div.ad-subnav-container',
'div.ad-wrapper',
'div.data-ad-container',
'div.l-ad',
'div.right-ad',
'div.wx-adWrapper',
'img.img_ad',
'link[href*="/adservice."]',
'script[src*="/adservice."]',
'script[src*="/pagead/"]',
'section.dianomi-ad',
]
| 27.254545 | 74 | 0.597732 |
acf95a5a4e414a63c970f11f274355815025cc2b | 5,535 | py | Python | Examples/IPv4 Address/add_static_ip4_address_example/add_static_ip4_address_example_page.py | TMAers/gateway-workflows | 38f22f1b31d0a4d18db0ad7466ce72f518c17076 | [
"Apache-2.0"
] | null | null | null | Examples/IPv4 Address/add_static_ip4_address_example/add_static_ip4_address_example_page.py | TMAers/gateway-workflows | 38f22f1b31d0a4d18db0ad7466ce72f518c17076 | [
"Apache-2.0"
] | null | null | null | Examples/IPv4 Address/add_static_ip4_address_example/add_static_ip4_address_example_page.py | TMAers/gateway-workflows | 38f22f1b31d0a4d18db0ad7466ce72f518c17076 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2018-09-27
# Gateway Version: 18.10.1
# Description: Example Gateway workflows
"""
Add static IPv4 address page
"""
# Various Flask framework items.
import os
from flask import url_for, redirect, render_template, flash, g, request
from bluecat import route, util
import config.default_config as config
from main_app import app
from .add_static_ip4_address_example_form import GenericFormTemplate
def module_path():
"""
Get module path.
:return:
"""
return os.path.dirname(os.path.abspath(str(__file__)))
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, '/add_static_ip4_address_example/add_static_ip4_address_example_endpoint')
@util.workflow_permission_required('add_static_ip4_address_example_page')
@util.exception_catcher
def add_static_ip4_address_example_add_static_ip4_address_example_page():
"""
Renders the form the user would first see when selecting the workflow.
:return:
"""
form = GenericFormTemplate()
# Remove this line if your workflow does not need to select a configuration
form.configuration.choices = util.get_configurations(default_val=True)
return render_template(
'add_static_ip4_address_example_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options()
)
@route(app, '/add_static_ip4_address_example/form', methods=['POST'])
@util.workflow_permission_required('add_static_ip4_address_example_page')
@util.exception_catcher
def add_static_ip4_address_example_add_static_ip4_address_example_page_form():
"""
Processes the final form after the user has input all the required data.
:return:
"""
# pylint: disable=broad-except
form = GenericFormTemplate()
# Remove this line if your workflow does not need to select a configuration
form.configuration.choices = util.get_configurations(default_val=True)
if form.validate_on_submit():
try:
# Retrieve form attributes
configuration = g.user.get_api().get_entity_by_id(form.configuration.data)
selected_view = request.form.get('view', '')
selected_hostname = request.form.get('hostname', '')
hostinfo = ''
if selected_view != '' and selected_hostname != '':
view = configuration.get_view(selected_view)
hostinfo = util.safe_str(selected_hostname) \
+ '.' \
+ util.safe_str(request.form.get('zone', '')) \
+ ',' \
+ util.safe_str(view.get_id()) \
+ ',' \
+ 'true' \
+ ',' \
+ 'false'
properties = 'name=' + form.description.data
# Assign ip4 object
ip4_object = configuration.assign_ip4_address(request.form.get('ip4_address', ''),
form.mac_address.data,
hostinfo,
'MAKE_STATIC',
properties)
# Put form processing code here
g.user.logger.info('Success - Static IP4 Address '
+ ip4_object.get_property('address')
+ ' Added with Object ID: '
+ util.safe_str(ip4_object.get_id()))
flash('Success - Static IP4 Address '
+ ip4_object.get_property('address')
+ ' Added with Object ID: '
+ util.safe_str(ip4_object.get_id()),
'succeed')
page = 'add_static_ip4_address_exampleadd_static_ip4_address_example_add_static_ip4_address_example_page'
return redirect(url_for(page))
except Exception as e:
flash(util.safe_str(e))
# Log error and render workflow page
g.user.logger.warning('%s' % util.safe_str(e), msg_type=g.user.logger.EXCEPTION)
return render_template('add_static_ip4_address_example_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options())
else:
g.user.logger.info('Form data was not valid.')
return render_template('add_static_ip4_address_example_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options())
| 41.616541 | 117 | 0.608491 |
acf95ba64024d1df4e8638383b539599c636a62a | 728 | py | Python | templates/Web/_composition/Flask/Page.Flask.List.AddRoutes.WithCosmos.Mongo/backend/mongo/mongo_service.py | wbhsm/WebTemplateStudio | ace7e8779c26105d0ceaf492d5a504fca5a08991 | [
"MIT"
] | null | null | null | templates/Web/_composition/Flask/Page.Flask.List.AddRoutes.WithCosmos.Mongo/backend/mongo/mongo_service.py | wbhsm/WebTemplateStudio | ace7e8779c26105d0ceaf492d5a504fca5a08991 | [
"MIT"
] | null | null | null | templates/Web/_composition/Flask/Page.Flask.List.AddRoutes.WithCosmos.Mongo/backend/mongo/mongo_service.py | wbhsm/WebTemplateStudio | ace7e8779c26105d0ceaf492d5a504fca5a08991 | [
"MIT"
] | null | null | null | from flask import request
from bson import ObjectId
from .mongo_client import list_items
from .utils import serialize
def get():
items = list_items.find()
serialized_list_items = [serialize(item) for item in items]
return serialized_list_items
def create():
data = request.get_json()
list_item = {'text': data['text']}
created_item = list_items.insert_one(list_item)
return {'id': str(created_item.inserted_id), 'text': list_item['text']}
def delete(id):
query_str = {'_id': ObjectId(id)}
result = list_items.delete_one(query_str)
if result.deleted_count == 0:
raise Exception('Could not find an item with given id')
return {'id': id, 'text': 'This comment was deleted'}
| 29.12 | 75 | 0.699176 |
acf95c44d04e8e460590c1086c7decae4f851a88 | 2,894 | py | Python | test/hyperactive_cnn_in_keras_test.py | skn123/LDWPSO-CNN | 7f05eb1defee2e968e5b3bed53f2b444b2b48fdb | [
"MIT"
] | 6 | 2020-01-24T16:15:34.000Z | 2022-03-21T13:53:32.000Z | test/hyperactive_cnn_in_keras_test.py | skn123/LDWPSO-CNN | 7f05eb1defee2e968e5b3bed53f2b444b2b48fdb | [
"MIT"
] | 1 | 2020-06-15T04:19:32.000Z | 2020-06-15T04:19:32.000Z | test/hyperactive_cnn_in_keras_test.py | skn123/LDWPSO-CNN | 7f05eb1defee2e968e5b3bed53f2b444b2b48fdb | [
"MIT"
] | 3 | 2021-03-29T17:11:27.000Z | 2021-05-17T13:33:10.000Z | # coding: utf-8
# Example with a convolutional neural network in keras
import time
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
from hyperactive import RandomSearchOptimizer, ParticleSwarmOptimizer
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# this defines the structure of the model and print("time: {}".format(t2-t1))the search space in each layer
search_config = {
"keras.compile.0": {"loss": ["categorical_crossentropy"], "optimizer": ["SGD"]},
"keras.fit.0": {"epochs": [5], "batch_size": [500], "verbose": [2]},
"keras.layers.Conv2D.1": {
"filters": [32, 64, 128],
#"kernel_size": range(3, 4),
"kernel_size": [(3, 3)],
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.2": {"pool_size": [(2, 2)]},
"keras.layers.Conv2D.3": {
"filters": [16, 32, 64, 128],
"kernel_size": [(3, 3)],
"activation": ["relu"],
},
"keras.layers.MaxPooling2D.4": {"pool_size": [(2, 2)]},
"keras.layers.Flatten.5": {},
#"keras.layers.Dense.6": {"units": [30], "activation": ["relu"]},
"keras.layers.Dense.6": {"units": range(30, 100, 10), "activation": ["relu"]},
#"keras.layers.Dropout.7": {"rate": 0.4},
"keras.layers.Dropout.7": {"rate": list(np.arange(0.2, 0.8, 0.2))},
"keras.layers.Dense.8": {"units": [10], "activation": ["softmax"]},
}
start_point = {
"keras.compile.0": {"loss": ["categorical_crossentropy"], "optimizer": ["adam"]},
"keras.fit.0": {"epochs": [5], "batch_size": [500], "verbose": [0]},
"keras.layers.Conv2D.1": {
"filters": [64],
"kernel_size": [3],
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.2": {"pool_size": [(2, 2)]},
"keras.layers.Conv2D.3": {
"filters": [32],
"kernel_size": [3],
"activation": ["relu"],
},
"keras.layers.MaxPooling2D.4": {"pool_size": [(2, 2)]},
"keras.layers.Flatten.5": {},
"keras.layers.Dense.6": {"units": [30], "activation": ["relu"]},
"keras.layers.Dropout.7": {"rate": [0.2]},
"keras.layers.Dense.8": {"units": [10], "activation": ["softmax"]},
}
Optimizer = RandomSearchOptimizer(search_config, metric='accuracy', warim_start=start_point, n_iter=5) # verbosity=1で最適パラメータ表示
#Optimizer = ParticleSwarmOptimizer(search_config, n_iter=20)
t1 = time.time()
# search best hyperparameter for given data
Optimizer.fit(X_train, y_train)
t2 = time.time()
print("time: {}".format(t2-t1))
# predict from test data
Optimizer.predict(X_test)
# calculate accuracy score
score = Optimizer.score(X_test, y_test)
print("test score: {}".format(score)) | 34.047059 | 127 | 0.612992 |
acf95d22b5264ecd7b06d991b5af722c9f770c5a | 2,548 | py | Python | tests/test_basis.py | stpotter16/PySplineFit | 9e0e4d7dd31d4b5fabc7f194e822d1655c047880 | [
"MIT"
] | 2 | 2020-06-21T19:09:34.000Z | 2021-04-18T01:10:37.000Z | tests/test_basis.py | stpotter16/PySplineFit | 9e0e4d7dd31d4b5fabc7f194e822d1655c047880 | [
"MIT"
] | 12 | 2019-12-07T22:23:37.000Z | 2020-05-03T21:02:04.000Z | tests/test_basis.py | stpotter16/PySplineFit | 9e0e4d7dd31d4b5fabc7f194e822d1655c047880 | [
"MIT"
] | 2 | 2019-04-26T20:29:31.000Z | 2020-08-19T01:19:04.000Z | """
Tests for basis module of the PySplineFit Module
Released under MIT License. See LICENSE file for details
Copyright (C) 2019 Sam Potter
Requires pytest
"""
from .context import pysplinefit
from pysplinefit import basis
import pytest
import numpy as np
def test_basis_functions():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
# The NURBS Book Ex. 2.3
basis_vals = basis.basis_functions(knot_span, knot, degree, knot_vector)
expected = np.array([0.125, 0.75, 0.125])
condition = np.allclose(basis_vals, expected)
assert condition
def test_basis_functions2():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
# The NURBS Book Ex. 2.3
basis_vals = basis.basis_functions(knot_span, knot, degree, knot_vector)
basis_sum = np.sum(basis_vals)
assert np.isclose(basis_sum, 1.0)
def test_basis_function_ders():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
deriv_order = 2
# The NURBS Book Ex. 2.4
ders_vals = basis.basis_function_ders(knot_span, knot, degree, knot_vector, deriv_order)
expected = np.array([[0.125, -0.5, 1.0],
[0.75, 0, -2.0],
[0.125, 0.5, 1.0]])
condition = np.allclose(ders_vals, expected)
assert condition
def test_one_basis_function():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot = 5.0/2.0
# The NURBS Book Ex. 2.5
basis_val1 = basis.one_basis_function(degree, knot_vector, 3, knot)
basis_val2 = basis.one_basis_function(degree, knot_vector, 4, knot)
basis_vals = np.array([basis_val1, basis_val2])
expected = np.array([0.75, 0.125])
condition = np.allclose(basis_vals, expected)
assert condition
def test_one_basis_function_ders():
degree = 2
knot_vector = [0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5]
# n = m - p - 1 -> n + 1 = m + 1 - p - 1
knot_span = 4
knot = 5.0/2.0
deriv_order = 2
# The NURBS Book Ex. 2.4
basis_deriv_vals = basis.one_basis_function_ders(degree, knot_vector, knot_span, knot, deriv_order)
expected = np.array([0.125, 0.5, 1.0])
condition = np.allclose(basis_deriv_vals, expected)
assert condition
| 23.376147 | 103 | 0.593799 |
acf95db31d1ef086e0449c16c5a315b414ab3fdc | 729 | py | Python | tests/extract_compounds_etoxsys_to_jon.py | erikvanmulligen/etransafe-use-scenarios | 6ec26120315abd66efdbfee6f5acba71e698a2b4 | [
"MIT"
] | null | null | null | tests/extract_compounds_etoxsys_to_jon.py | erikvanmulligen/etransafe-use-scenarios | 6ec26120315abd66efdbfee6f5acba71e698a2b4 | [
"MIT"
] | 1 | 2021-02-11T14:59:37.000Z | 2021-02-11T14:59:37.000Z | tests/extract_compounds_etoxsys_to_jon.py | erikvanmulligen/etransafe-use-scenarios | 6ec26120315abd66efdbfee6f5acba71e698a2b4 | [
"MIT"
] | null | null | null | '''
This is a module to test what data comes back from eToxSys
'''
from src.knowledgehub.api import KnowledgeHubAPI
import argparse
def main():
parser = argparse.ArgumentParser(description='Process parameters for collecting findings from primitive adapter')
parser.add_argument('-username', required=True, help='username')
parser.add_argument('-password', required=True, help='password')
args = parser.parse_args()
api = KnowledgeHubAPI('TEST', )
api.login(args.username, args.password)
socs = {}
studies = api.eToxSys().getStudiesByCompoundNames(['omeprazole'])
f = open("../data/studies_etox.json", "w")
f.write(str(studies))
f.close()
if __name__ == "__main__":
main()
| 25.137931 | 117 | 0.696845 |
acf95db71830d027963b6f2c570ce168201cf5dc | 398 | py | Python | tools/create_zodb.py | liuyxpp/ngpy | 24f4c07e336d255302618ea113ba2e02f60e01b4 | [
"BSD-3-Clause"
] | 1 | 2021-09-06T10:19:55.000Z | 2021-09-06T10:19:55.000Z | tools/create_zodb.py | liuyxpp/ngpy | 24f4c07e336d255302618ea113ba2e02f60e01b4 | [
"BSD-3-Clause"
] | null | null | null | tools/create_zodb.py | liuyxpp/ngpy | 24f4c07e336d255302618ea113ba2e02f60e01b4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Create a zodb database storage file.
'''
import sys
import os
from ZODB import FileStorage, DB
if len(sys.argv) < 2:
zodbfs = 'test.fs'
else:
zodbfs = sys.argv[1]
if os.path.exists(zodbfs):
print "WARNING: file %s exists!" % zodbfs
exit()
storage = FileStorage.FileStorage(zodbfs)
db = DB(storage)
conn = db.open()
root = conn.root()
conn.close()
| 15.92 | 45 | 0.658291 |
acf95df15d9dc6c2d47ef4af8e3c342902d3c9d0 | 5,992 | py | Python | com_cheese_api/cop/rec/recommend/model/recommend_ai.py | soominok/cheese-api | 6188b114c89f5aa0f83d92d25e7a5ebda379805e | [
"MIT"
] | null | null | null | com_cheese_api/cop/rec/recommend/model/recommend_ai.py | soominok/cheese-api | 6188b114c89f5aa0f83d92d25e7a5ebda379805e | [
"MIT"
] | null | null | null | com_cheese_api/cop/rec/recommend/model/recommend_ai.py | soominok/cheese-api | 6188b114c89f5aa0f83d92d25e7a5ebda379805e | [
"MIT"
] | null | null | null | from com_cheese_api.cop.rec.recommend.model.recommend_dfo import RecommendDfo
import pandas as pd
import numpy as np
import os
import joblib
# import tensorflow as tf
# from tensorflow import keras
# from keras.models import load_model
from com_cheese_api.cmm.utl.file import FileReader
class RecommendAi(object):
def __init__(self):
self.data = None
self.cheese_model = joblib.load("com_cheese_api/cop/modeling/cheese_knn_model.pkl")
# self.cheese_model = load_model("com_cheese_api/cop/modeling/cheese_model.h5")
@staticmethod
def read_survey(query, param):
# query = """SELECT * FROM recommends WHERE user_id = 'user_id'"""
survey_result = RecommendDfo().dump_to_csv(query, param)
# cheese_data = FileReader.csv_load(file_path, 'utf-8-sig')
return survey_result
def preprocessing_data(self, query, param):
survey_result = RecommendAi.read_survey(query, param)
# dummy_result = pd.get_dummies(survey_result)
# print(dummy_result)
features = ['user_id', '간식', '감자', '견과류', '과일', '그라탕',
'김가루', '꿀', '딥소스', '라자냐', '리소토', '마르게리타 피자', '막걸리', '맥앤치즈', '맥주',
'멤브리요', '무화과', '바질', '발사믹 식초', '발사믹식초', '배', '베이컨', '볶음밥', '비스킷', '빵',
'사케', '사퀘테리', '샌드위치', '샐러드', '샐러리', '샤퀴테리', '소금', '스테이크', '스프', '스프레드',
'올리브오일', '올리브유', '와인', '위스키', '잼', '채소', '치즈케이크', '카나페', '카프레제',
'카프레제 샐러드', '크래커', '크로스티니', '키쉬', '타르트', '타파스', '테이블치즈', '토마토', '토스트',
'파스타', '팬케이크', '퐁듀', '피자', '핑거 푸드', '핑거푸드', '화이트와인']
survey_df = pd.DataFrame(survey_result, columns=features)
food_value = survey_result.values
print(f'food_value: {food_value}')
select_food = []
for food_item in food_value:
select_food.append(food_item[1:])
print(f'select_food: {select_food}')
for select_food in food_value:
for column_item in survey_df.columns:
if column_item in select_food:
# for row_index in survey_result.iterrows():
survey_df[column_item] = 1
else:
survey_df.fillna(0, inplace=True)
# for select_item in select_food:
# for column_item in survey_df.columns:
# if column_item in select_item:
# survey_df[select_item] = 1
# else:
# survey_df.fillna(0, inplace=True)
# survey_data = '{0:g}'.format(survey_df)
print(f'origin survey_df : {survey_df}')
if (survey_df['피자'] == 1).any:
survey_df['마르게리타 피자'] = 1
if (survey_df['베이컨'] == 1).any:
survey_df['샤퀴테리'] = 1
if (survey_df['맥앤치즈'] == 1).any:
survey_df['테이블치즈'] = 1
if (survey_df['볶음밥'] == 1).any:
survey_df['김가루'] = 1
if (survey_df['과일'] == 1).any:
survey_df['배'] = 1
survey_df['토마토'] = 1
survey_df['무화과'] = 1
if (survey_df['빵'] == 1).any:
survey_df['토스트'] = 1
survey_df['샌드위치'] = 1
survey_df['팬케이크'] = 1
survey_df['간식'] = 1
if (survey_df['샐러드'] == 1).any:
survey_df['샐러리'] = 1
survey_df['채소'] = 1
if (survey_df['카프레제'] == 1).any:
survey_df['카프레제 샐러드'] = 1
if (survey_df['핑거푸드'] == 1).any:
survey_df['타파스'] = 1
survey_df['핑거 푸드'] = 1
survey_df['크로스티니'] = 1
survey_df['카나페'] = 1
survey_df['크래커'] = 1
survey_df['비스킷'] = 1
if (survey_df['타르트'] == 1).any:
survey_df['키쉬'] = 1
if (survey_df['견과류'] == 1).any:
survey_df['감자'] = 1
survey_df['멤브리요'] = 1
if (survey_df['딥소스'] == 1).any:
survey_df['스프레드'] = 1
if (survey_df['발사믹식초'] == 1).any:
survey_df['발사믹 식초'] = 1
survey_df['소금'] = 1
if (survey_df['올리브유'] == 1).any:
survey_df['올리브오일'] = 1
print(f'final survey_df : {survey_df}')
self.data = survey_df
# print(f'data: {self.data}')
# survey_list = np.array(self.data)
# print(survey_list)
# print(type(survey_list))
survey_list = []
for list_item in np.array(self.data):
survey_list.append(list_item[1:])
print(survey_list)
print(type(survey_list))
return survey_list
def recommend_cheese(self, query, param):
survey_list = self.preprocessing_data(query, param)
recommend_pred = self.cheese_model.predict(np.array(survey_list).tolist()).tolist()
print(recommend_pred)
# return self.predict_data(self.model, self.data)
return recommend_pred
if __name__ == '__main__':
recommendAi = RecommendAi()
recommendAi.recommend_cheese(query, param)
# from com_cheese_api.cop.rec.recommend.model.recommend_dfo import RecommendDfo
# import pandas as pd
# import numpy as np
# import os
# import joblib
# from com_cheese_api.cmm.utl.file import FileReader
# class RecommendAi(object):
# def __init__(self):
# self.data = RecommendDfo().dump_to_csv
# # self.cheese_model = joblib.load("com_cheese_api/cop/machine/cheese_knn_model.pkl")
# self.cheese_model = joblib.load("com_cheese_api/cop/machine/cheese_model.h5")
# def recommend_cheese(self, user_id, file_path):
# query = """SELECT * FROM recommends WHERE user_id = 'user_id'"""
# survey = self.data(query)
# # cheese_data = FileReader.csv_load(file_path, 'utf-8-sig')
# return self.predict_data(self.model, survey)
# def predict_data(self, model, data):
# recom_cheese = data['chooseFood_1', 'chooseFood_2']
# if __name__ == '__main__':
# recommendAi = RecommendAi()
# # recommendAi.dump_to_csv('com_cheese_api', 'com_cheese_api/cop/rec/recommend/data/recommend_data.csv') | 35.666667 | 109 | 0.570761 |
acf95e353b0674efcecce90c3e638ecf24f4a3a2 | 12,022 | py | Python | web/agent.py | nortxort/web | 9014575e2dc6e35f6fe4f815fb6845dd496f40e7 | [
"MIT"
] | 4 | 2020-05-20T20:12:37.000Z | 2021-06-02T23:47:11.000Z | web/agent.py | nortxort/web | 9014575e2dc6e35f6fe4f815fb6845dd496f40e7 | [
"MIT"
] | null | null | null | web/agent.py | nortxort/web | 9014575e2dc6e35f6fe4f815fb6845dd496f40e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2020 Nortxort
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import random
DEFAULT_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0'
# https://techblog.willshouse.com/2012/01/03/most-common-user-agents/
COMMON_AGENTS = [
DEFAULT_AGENT,
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15',
'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.4 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:73.0) Gecko/20100101 Firefox/73.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36 OPR/67.0.3575.53',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 Edg/80.0.361.66',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:73.0) Gecko/20100101 Firefox/73.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 OPR/67.0.3575.97',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36 Edg/80.0.361.109',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0 Waterfox/56.2.14',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 YaBrowser/20.3.0.1223 Yowser/2.5 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 OPR/67.0.3575.115',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 YaBrowser/20.3.1.197 Yowser/2.5 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:68.0) Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.136 YaBrowser/20.2.4.143 Yowser/2.5 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
]
def random_agent() -> str:
"""
Get a random user agent from the most common user agents.
:return: A random user agent
"""
return random.choice(COMMON_AGENTS)
| 88.397059 | 150 | 0.708202 |
acf9601eda20b2d8067f205f18dcb4dca7269155 | 10,741 | py | Python | discord/__main__.py | Ryomen-Sukuna/discord.py | 0bcb0d0e3ce395d42a5b1dae61b0090791ee018d | [
"MIT"
] | 1 | 2022-01-31T12:57:09.000Z | 2022-01-31T12:57:09.000Z | discord/__main__.py | Ryomen-Sukuna/discord.py | 0bcb0d0e3ce395d42a5b1dae61b0090791ee018d | [
"MIT"
] | null | null | null | discord/__main__.py | Ryomen-Sukuna/discord.py | 0bcb0d0e3ce395d42a5b1dae61b0090791ee018d | [
"MIT"
] | 1 | 2022-02-11T14:39:46.000Z | 2022-02-11T14:39:46.000Z | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, Tuple, Dict
import argparse
import sys
from pathlib import Path
import discord
import pkg_resources
import aiohttp
import platform
def show_version() -> None:
entries = []
entries.append('- Python v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}'.format(sys.version_info))
version_info = discord.version_info
entries.append('- discord.py v{0.major}.{0.minor}.{0.micro}-{0.releaselevel}'.format(version_info))
if version_info.releaselevel != 'final':
pkg = pkg_resources.get_distribution('discord.py')
if pkg:
entries.append(f' - discord.py pkg_resources: v{pkg.version}')
entries.append(f'- aiohttp v{aiohttp.__version__}')
uname = platform.uname()
entries.append('- system info: {0.system} {0.release} {0.version}'.format(uname))
print('\n'.join(entries))
def core(parser: argparse.ArgumentParser, args: argparse.Namespace) -> None:
if args.version:
show_version()
_bot_template = """#!/usr/bin/env python3
from discord.ext import commands
import discord
import config
class Bot(commands.{base}):
def __init__(self, **kwargs):
super().__init__(command_prefix=commands.when_mentioned_or('{prefix}'), **kwargs)
async def setup_hook(self):
for cog in config.cogs:
try:
await self.load_extension(cog)
except Exception as exc:
print(f'Could not load extension {{cog}} due to {{exc.__class__.__name__}}: {{exc}}')
async def on_ready(self):
print(f'Logged on as {{self.user}} (ID: {{self.user.id}})')
bot = Bot()
# write general commands here
bot.run(config.token)
"""
_gitignore_template = """# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Our configuration files
config.py
"""
_cog_template = '''from discord.ext import commands
import discord
class {name}(commands.Cog{attrs}):
"""The description for {name} goes here."""
def __init__(self, bot):
self.bot = bot
{extra}
async def setup(bot):
await bot.add_cog({name}(bot))
'''
_cog_extras = '''
async def cog_load(self):
# loading logic goes here
pass
async def cog_unload(self):
# clean up logic goes here
pass
async def cog_check(self, ctx):
# checks that apply to every command in here
return True
async def bot_check(self, ctx):
# checks that apply to every command to the bot
return True
async def bot_check_once(self, ctx):
# check that apply to every command but is guaranteed to be called only once
return True
async def cog_command_error(self, ctx, error):
# error handling to every command in here
pass
async def cog_before_invoke(self, ctx):
# called before a command is called here
pass
async def cog_after_invoke(self, ctx):
# called after a command is called here
pass
'''
# certain file names and directory names are forbidden
# see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247%28v=vs.85%29.aspx
# although some of this doesn't apply to Linux, we might as well be consistent
_base_table: Dict[str, Optional[str]] = {
'<': '-',
'>': '-',
':': '-',
'"': '-',
# '/': '-', these are fine
# '\\': '-',
'|': '-',
'?': '-',
'*': '-',
}
# NUL (0) and 1-31 are disallowed
_base_table.update((chr(i), None) for i in range(32))
_translation_table = str.maketrans(_base_table)
def to_path(parser: argparse.ArgumentParser, name: str, *, replace_spaces: bool = False) -> Path:
if isinstance(name, Path):
return name
if sys.platform == 'win32':
forbidden = (
'CON',
'PRN',
'AUX',
'NUL',
'COM1',
'COM2',
'COM3',
'COM4',
'COM5',
'COM6',
'COM7',
'COM8',
'COM9',
'LPT1',
'LPT2',
'LPT3',
'LPT4',
'LPT5',
'LPT6',
'LPT7',
'LPT8',
'LPT9',
)
if len(name) <= 4 and name.upper() in forbidden:
parser.error('invalid directory name given, use a different one')
name = name.translate(_translation_table)
if replace_spaces:
name = name.replace(' ', '-')
return Path(name)
def newbot(parser: argparse.ArgumentParser, args: argparse.Namespace) -> None:
new_directory = to_path(parser, args.directory) / to_path(parser, args.name)
# as a note exist_ok for Path is a 3.5+ only feature
# since we already checked above that we're >3.5
try:
new_directory.mkdir(exist_ok=True, parents=True)
except OSError as exc:
parser.error(f'could not create our bot directory ({exc})')
cogs = new_directory / 'cogs'
try:
cogs.mkdir(exist_ok=True)
init = cogs / '__init__.py'
init.touch()
except OSError as exc:
print(f'warning: could not create cogs directory ({exc})')
try:
with open(str(new_directory / 'config.py'), 'w', encoding='utf-8') as fp:
fp.write('token = "place your token here"\ncogs = []\n')
except OSError as exc:
parser.error(f'could not create config file ({exc})')
try:
with open(str(new_directory / 'bot.py'), 'w', encoding='utf-8') as fp:
base = 'Bot' if not args.sharded else 'AutoShardedBot'
fp.write(_bot_template.format(base=base, prefix=args.prefix))
except OSError as exc:
parser.error(f'could not create bot file ({exc})')
if not args.no_git:
try:
with open(str(new_directory / '.gitignore'), 'w', encoding='utf-8') as fp:
fp.write(_gitignore_template)
except OSError as exc:
print(f'warning: could not create .gitignore file ({exc})')
print('successfully made bot at', new_directory)
def newcog(parser: argparse.ArgumentParser, args: argparse.Namespace) -> None:
cog_dir = to_path(parser, args.directory)
try:
cog_dir.mkdir(exist_ok=True)
except OSError as exc:
print(f'warning: could not create cogs directory ({exc})')
directory = cog_dir / to_path(parser, args.name)
directory = directory.with_suffix('.py')
try:
with open(str(directory), 'w', encoding='utf-8') as fp:
attrs = ''
extra = _cog_extras if args.full else ''
if args.class_name:
name = args.class_name
else:
name = str(directory.stem)
if '-' in name or '_' in name:
translation = str.maketrans('-_', ' ')
name = name.translate(translation).title().replace(' ', '')
else:
name = name.title()
if args.display_name:
attrs += f', name="{args.display_name}"'
if args.hide_commands:
attrs += ', command_attrs=dict(hidden=True)'
fp.write(_cog_template.format(name=name, extra=extra, attrs=attrs))
except OSError as exc:
parser.error(f'could not create cog file ({exc})')
else:
print('successfully made cog at', directory)
def add_newbot_args(subparser: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
parser = subparser.add_parser('newbot', help='creates a command bot project quickly')
parser.set_defaults(func=newbot)
parser.add_argument('name', help='the bot project name')
parser.add_argument('directory', help='the directory to place it in (default: .)', nargs='?', default=Path.cwd())
parser.add_argument('--prefix', help='the bot prefix (default: $)', default='$', metavar='<prefix>')
parser.add_argument('--sharded', help='whether to use AutoShardedBot', action='store_true')
parser.add_argument('--no-git', help='do not create a .gitignore file', action='store_true', dest='no_git')
def add_newcog_args(subparser: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
parser = subparser.add_parser('newcog', help='creates a new cog template quickly')
parser.set_defaults(func=newcog)
parser.add_argument('name', help='the cog name')
parser.add_argument('directory', help='the directory to place it in (default: cogs)', nargs='?', default=Path('cogs'))
parser.add_argument('--class-name', help='the class name of the cog (default: <name>)', dest='class_name')
parser.add_argument('--display-name', help='the cog name (default: <name>)')
parser.add_argument('--hide-commands', help='whether to hide all commands in the cog', action='store_true')
parser.add_argument('--full', help='add all special methods as well', action='store_true')
def parse_args() -> Tuple[argparse.ArgumentParser, argparse.Namespace]:
parser = argparse.ArgumentParser(prog='discord', description='Tools for helping with discord.py')
parser.add_argument('-v', '--version', action='store_true', help='shows the library version')
parser.set_defaults(func=core)
subparser = parser.add_subparsers(dest='subcommand', title='subcommands')
add_newbot_args(subparser)
add_newcog_args(subparser)
return parser, parser.parse_args()
def main() -> None:
parser, args = parse_args()
args.func(parser, args)
if __name__ == '__main__':
main()
| 31.223837 | 122 | 0.639885 |
acf96053c91cd1d7e5c8f69dd342b10426656c79 | 3,260 | py | Python | salt/client/netapi.py | johnj/salt | b23656fa5ee24047c43ac702d6796a700570f749 | [
"Apache-2.0"
] | 5 | 2017-02-07T05:39:29.000Z | 2020-06-13T02:07:33.000Z | salt/client/netapi.py | johnj/salt | b23656fa5ee24047c43ac702d6796a700570f749 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/client/netapi.py | johnj/salt | b23656fa5ee24047c43ac702d6796a700570f749 | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # encoding: utf-8
'''
The main entry point for salt-api
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import signal
import logging
# Import salt-api libs
import salt.loader
import salt.utils.process
log = logging.getLogger(__name__)
class RunNetapi(salt.utils.process.SignalHandlingProcess):
'''
Runner class that's pickable for netapi modules
'''
def __init__(self, opts, fname, **kwargs):
super(RunNetapi, self).__init__(**kwargs)
self.opts = opts
self.fname = fname
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state['opts'],
state['fname'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'fname': self.fname,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
netapi = salt.loader.netapi(self.opts)
netapi_func = netapi[self.fname]
netapi_func()
class NetapiClient(object):
'''
Start each netapi module that is configured to run
'''
def __init__(self, opts):
self.opts = opts
self.process_manager = salt.utils.process.ProcessManager(name='NetAPIProcessManager')
self.netapi = salt.loader.netapi(self.opts)
def run(self):
'''
Load and start all available api modules
'''
if not len(self.netapi):
log.error("Did not find any netapi configurations, nothing to start")
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
for fun in self.netapi:
if fun.endswith('.start'):
log.info('Starting %s netapi module', fun)
self.process_manager.add_process(
RunNetapi,
args=(self.opts, fun),
kwargs=kwargs,
name='RunNetapi'
)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
| 32.929293 | 93 | 0.631902 |
acf9607a7367bc0066281c09c97f60076c1cc2f4 | 65,874 | py | Python | src/transformers/models/mobilebert/modeling_mobilebert.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
] | 2 | 2022-02-19T07:02:52.000Z | 2022-02-19T07:02:55.000Z | src/transformers/models/mobilebert/modeling_mobilebert.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
] | 1 | 2022-02-17T12:40:59.000Z | 2022-02-17T12:40:59.000Z | src/transformers/models/mobilebert/modeling_mobilebert.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
] | 2 | 2022-03-21T04:32:39.000Z | 2022-03-22T01:02:49.000Z | # MIT License
#
# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import logging
from .configuration_mobilebert import MobileBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
_CONFIG_FOR_DOC = "MobileBertConfig"
_TOKENIZER_FOR_DOC = "MobileBertTokenizer"
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["google/mobilebert-uncased"]
def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.replace("ffn_layer", "ffn")
name = name.replace("FakeLayerNorm", "LayerNorm")
name = name.replace("extra_output_weights", "dense/kernel")
name = name.replace("bert", "mobilebert")
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class NoNorm(nn.Module):
def __init__(self, feat_size, eps=None):
super().__init__()
self.bias = nn.Parameter(torch.zeros(feat_size))
self.weight = nn.Parameter(torch.ones(feat_size))
def forward(self, input_tensor):
return input_tensor * self.weight + self.bias
NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm}
class MobileBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.trigram_input = config.trigram_input
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
embed_dim_multiplier = 3 if self.trigram_input else 1
embedded_input_size = self.embedding_size * embed_dim_multiplier
self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.trigram_input:
# From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
# Devices (https://arxiv.org/abs/2004.02984)
#
# The embedding table in BERT models accounts for a substantial proportion of model size. To compress
# the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
# Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
# dimensional output.
inputs_embeds = torch.cat(
[
nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),
inputs_embeds,
nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),
],
dim=2,
)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MobileBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
self.value = nn.Linear(
config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
mixed_query_layer = self.query(query_tensor)
mixed_key_layer = self.key(key_tensor)
mixed_value_layer = self.value(value_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class MobileBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
if not self.use_bottleneck:
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MobileBertSelfAttention(config)
self.output = MobileBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
self_outputs = self.self(
query_tensor,
key_tensor,
value_tensor,
attention_mask,
head_mask,
output_attentions,
)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.output(self_outputs[0], layer_input)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class MobileBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class OutputBottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
else:
self.bottleneck = OutputBottleneck(config)
def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):
layer_output = self.dense(intermediate_states)
if not self.use_bottleneck:
layer_output = self.dropout(layer_output)
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
else:
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
layer_output = self.bottleneck(layer_output, residual_tensor_2)
return layer_output
class BottleneckLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
layer_input = self.dense(hidden_states)
layer_input = self.LayerNorm(layer_input)
return layer_input
class Bottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
self.use_bottleneck_attention = config.use_bottleneck_attention
self.input = BottleneckLayer(config)
if self.key_query_shared_bottleneck:
self.attention = BottleneckLayer(config)
def forward(self, hidden_states):
# This method can return three different tuples of values. These different values make use of bottlenecks,
# which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
# usage. These linear layer have weights that are learned during training.
#
# If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
# key, query, value, and "layer input" to be used by the attention layer.
# This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
# in the attention self output, after the attention scores have been computed.
#
# If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
# four values, three of which have been passed through a bottleneck: the query and key, passed through the same
# bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
#
# Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
# and the residual layer will be this value passed through a bottleneck.
bottlenecked_hidden_states = self.input(hidden_states)
if self.use_bottleneck_attention:
return (bottlenecked_hidden_states,) * 4
elif self.key_query_shared_bottleneck:
shared_attention_input = self.attention(hidden_states)
return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
else:
return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
class FFNOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class FFNLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate = MobileBertIntermediate(config)
self.output = FFNOutput(config)
def forward(self, hidden_states):
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
class MobileBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.num_feedforward_networks = config.num_feedforward_networks
self.attention = MobileBertAttention(config)
self.intermediate = MobileBertIntermediate(config)
self.output = MobileBertOutput(config)
if self.use_bottleneck:
self.bottleneck = Bottleneck(config)
if config.num_feedforward_networks > 1:
self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
self_attention_outputs = self.attention(
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
s = (attention_output,)
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.num_feedforward_networks != 1:
for i, ffn_module in enumerate(self.ffn):
attention_output = ffn_module(attention_output)
s += (attention_output,)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output, hidden_states)
outputs = (
(layer_output,)
+ outputs
+ (
torch.tensor(1000),
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_output,
intermediate_output,
)
+ s
)
return outputs
class MobileBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class MobileBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.do_activate = config.classifier_activation
if self.do_activate:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
if not self.do_activate:
return first_token_tensor
else:
pooled_output = self.dense(first_token_tensor)
pooled_output = torch.tanh(pooled_output)
return pooled_output
class MobileBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MobileBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MobileBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
hidden_states += self.decoder.bias
return hidden_states
class MobileBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MobileBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class MobileBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MobileBertConfig
pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
load_tf_weights = load_tf_weights_in_mobilebert
base_model_prefix = "mobilebert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, NoNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class MobileBertForPreTrainingOutput(ModelOutput):
"""
Output type of [`MobileBertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
MOBILEBERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
MOBILEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertModel(MobileBertPreTrainedModel):
"""
https://arxiv.org/pdf/2004.02984.pdf
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = MobileBertEmbeddings(config)
self.encoder = MobileBertEncoder(config)
self.pooler = MobileBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, self.device
)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
""",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForPreTraining(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddigs):
self.cls.predictions.decoder = new_embeddigs
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
# resize dense output embedings at first
self.cls.predictions.dense = self._get_resized_lm_head(
self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
)
return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Examples:
```python
>>> from transformers import MobileBertTokenizer, MobileBertForPreTraining
>>> import torch
>>> tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
>>> ) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MobileBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
class MobileBertForMaskedLM(MobileBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
self.cls = MobileBertOnlyMLMHead(config)
self.config = config
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddigs):
self.cls.predictions.decoder = new_embeddigs
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
# resize dense output embedings at first
self.cls.predictions.dense = self._get_resized_lm_head(
self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
)
return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class MobileBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@add_start_docstrings(
"""MobileBert Model with a `next sentence prediction (classification)` head on top.""",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`.
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Examples:
```python
>>> from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction
>>> import torch
>>> tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_score,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.mobilebert = MobileBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing
class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(
MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing
class MobileBertForTokenClassification(MobileBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 41.326223 | 159 | 0.67119 |
acf9608e5bcb24879ab6ab1725f93bcfd97a8cbb | 4,992 | py | Python | pythia/legacy/train_model/evaluate_with_ensemble.py | mandliya/pythia_updated | e986c4dff7cc3a9f6b85ffe8e7d45ea53ab36e95 | [
"BSD-3-Clause"
] | 44 | 2020-12-10T07:36:11.000Z | 2022-03-01T10:45:31.000Z | pythia/legacy/train_model/evaluate_with_ensemble.py | Bunlong/pythia | 1bed85e59a753bec73e6d3fcf1461651d45c791b | [
"BSD-3-Clause"
] | 11 | 2021-05-12T09:41:27.000Z | 2022-03-02T08:48:04.000Z | pythia/legacy/train_model/evaluate_with_ensemble.py | Bunlong/pythia | 1bed85e59a753bec73e6d3fcf1461651d45c791b | [
"BSD-3-Clause"
] | 11 | 2020-03-07T08:10:15.000Z | 2021-06-24T05:39:36.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import glob
import os
import numpy as np
import torch
import yaml
from torch.utils.data import DataLoader
import _pickle as pickle
from train_model.dataset_utils import prepare_eval_data_set
from train_model.Engineer import masked_unk_softmax, one_stage_run_model
from train_model.model_factory import is_one_stageModel
tmp_model_file_name = "tmp_result_%d.pkl"
tmp_model_file_name_pattern = "tmp_result*.pkl"
class answer_json:
def __init__(self):
self.answers = []
def add(self, ques_id, ans):
res = {"question_id": ques_id, "answer": ans}
self.answers.append(res)
def compute_score_with_prob(prob, scores):
max_prob_pos = prob.max(axis=1, keepdims=1) == prob
score_sum = np.sum(scores * max_prob_pos)
return score_sum
def ensemble(results, ans_unk_idx):
final_result = masked_unk_softmax(results[0], dim=1, mask_idx=ans_unk_idx)
if len(results) == 1:
return final_result
for result in results[1:]:
final_result += masked_unk_softmax(result, dim=1, mask_idx=ans_unk_idx)
return final_result
def ensemble_model(model_dir, max_model=None, clear=True):
count = 0
final_result = None
for model_file in glob.glob(os.path.join(model_dir, tmp_model_file_name_pattern)):
count += 1
if max_model is not None and count > max_model:
break
with open(os.path.join(model_dir, model_file), "rb") as f:
pred_result = pickle.load(f)
if final_result is None:
final_result = pred_result
else:
final_result += pred_result
# remove tmp file after ensembling
if clear:
os.remove(os.path.join(model_dir, model_file))
return final_result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True, help="config yaml file")
parser.add_argument("--out_dir", type=str, required=True, help="output dir")
parser.add_argument(
"--model_paths", nargs="+", help="paths for model", default=None
)
parser.add_argument(
"--model_dirs", nargs="+", help="directories for models", default=None
)
args = parser.parse_args()
config_file = args.config
out_dir = args.out_dir
model_files = args.model_paths
model_dirs = args.model_dirs
with open(config_file, "r") as f:
config = yaml.load(f)
# get the potential shared data_config info
data_root_dir = config["data"]["data_root_dir"]
batch_size = config["data"]["batch_size"]
data_set_val = prepare_eval_data_set(
**config["data"], **config["model"], verbose=True
)
data_reader_val = DataLoader(data_set_val, shuffle=True, batch_size=100)
ans_dic = data_set_val.answer_dict
ans_json_out = answer_json()
current_models = (
[]
if model_files is None
else [torch.load(model_file) for model_file in model_files]
)
if model_dirs is not None:
for model_dir in model_dirs:
for file in glob.glob(model_dir + "/**/best_model.pth", recursive=True):
this_model = torch.load(file)
current_models.append(this_model)
if len(current_models) == 0:
exit("no model provided")
model_type = config["model"]["model_type"]
total_score = 0
total_max_score = 0
total_sample = 0
num_of_model = len(current_models)
os.makedirs(out_dir, exist_ok=True)
if is_one_stageModel(model_type):
for i, batch in enumerate(data_reader_val):
if i % 100 == 0:
print("process batch %d" % i)
verbose_info = batch["verbose_info"]
answer_scores = batch["ans_scores"]
answer_scores_np = answer_scores.numpy()
for imd, current_model in enumerate(current_models):
# print("process model %d"%imd)
logit_res = one_stage_run_model(batch, current_model)
softmax_res = masked_unk_softmax(
logit_res, dim=1, mask_idx=ans_dic.UNK_idx
)
softmax_res_data = softmax_res.data.cpu().numpy()
with open(os.path.join(out_dir, tmp_model_file_name % imd), "wb") as w:
pickle.dump(softmax_res_data, w)
ensembled_soft_max_result = ensemble_model(out_dir, num_of_model)
nsample, _ = answer_scores_np.shape
total_sample += nsample
scores = compute_score_with_prob(
ensembled_soft_max_result, answer_scores_np
)
total_score += scores
print(
"model: %d, sample= %d, score =%.6f"
% (num_of_model, total_sample, total_score / total_sample)
)
| 30.439024 | 87 | 0.648838 |
acf9630784f3eb9fa67764b0926ab759b5e96659 | 331 | py | Python | scripts/process_cls_real_data.py | edawson/rkmh | ea3d2e6791e8202ec0e487e648c0182f1766728b | [
"MIT"
] | 43 | 2016-06-29T15:55:36.000Z | 2022-03-07T03:18:45.000Z | scripts/process_cls_real_data.py | edawson/rkmh | ea3d2e6791e8202ec0e487e648c0182f1766728b | [
"MIT"
] | 12 | 2016-06-29T12:37:01.000Z | 2021-07-06T18:58:00.000Z | scripts/process_cls_real_data.py | edawson/rkmh | ea3d2e6791e8202ec0e487e648c0182f1766728b | [
"MIT"
] | 8 | 2016-09-01T17:10:53.000Z | 2021-02-26T10:55:31.000Z | import sys
if __name__ == "__main__":
with open(sys.argv[1], "r") as ifi:
for line in ifi:
tokens = line.strip().split("\t")
tokens = [i.strip().strip(";") for i in tokens]
print tokens[0].split("|")[2].split("_")[0], tokens[3].split(";")[0], tokens[4].split(";")[0]
| 27.583333 | 105 | 0.489426 |
acf96543ab47761f7d1d2d4822bbc7298a893e42 | 223 | py | Python | prometeo-dashboard/api-main/__init__.py | oscillator25/Prometeo-Dashboard | 975be901e1019951c5887674199247db2515de15 | [
"Apache-2.0"
] | null | null | null | prometeo-dashboard/api-main/__init__.py | oscillator25/Prometeo-Dashboard | 975be901e1019951c5887674199247db2515de15 | [
"Apache-2.0"
] | 2 | 2021-08-31T22:47:12.000Z | 2021-09-21T17:27:36.000Z | prometeo-dashboard/api-main/__init__.py | KOSASIH/Prometeo-Dashboard | 0b97c3f6afb4fe5736a6189e92d104c5e9801c63 | [
"Apache-2.0"
] | null | null | null | """Routes"""
from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f)
and not f.endswith('__init__.py')]
| 27.875 | 57 | 0.668161 |
acf965fd4ed73e4b2e196e12dadce850c8cd10be | 1,230 | py | Python | fnx/table.py | hoefkensj/ANSI | 5e0b42f7c4415c394dc78bd1c295e93b83019e1d | [
"Unlicense"
] | null | null | null | fnx/table.py | hoefkensj/ANSI | 5e0b42f7c4415c394dc78bd1c295e93b83019e1d | [
"Unlicense"
] | null | null | null | fnx/table.py | hoefkensj/ANSI | 5e0b42f7c4415c394dc78bd1c295e93b83019e1d | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import ANSI.units.table
print('THERE ARE STILL SOME PROBLEMS ,... ONE OF THEM BEING WHEN THE [ENTER] LAUNCH IS BADLE DEBOUNCED THE WHOLE APP BREAKS')
# table1 = {
# 'T': [['title'], ['subtitle']], # T TITLES
# 'H': ['idx', 'header1', 'header2'], # H HEADERS
# 'M': {
# 'fss': '\u2502', # \u250B',
# 'pdd': {'char':'-','min':[2,4,4]},
# 'mrg': ' ',
# 'al': ['l', 'c', 'r']
# }, # M META
# 'D': [
# [1 , 'd2', '123'],
# [5 , 'data\tp2', '\033[1msomedata\033[0m', ],
# [3 , '\033[32mGreen\033[0m', 'data5299'],
# ],
# 'F': [['help'], ['footer']]
# }
# for r,row in enumerate(zip(table['C']['mtx_D_cfss_yxH'],table['C']['mtx_D_cfss'])):
# for c,(D_cfss_xyH,D_cfss) in enumerate(zip(*row)):
# ANSI.fnx.m.stdout_mwrite(txt=[D_cfss_xyH,D_cfss],style=['green']);sys.stdout.flush()
#
# org=ANSI.lib.lib_tty.pos_cursor()['y']
# crd=H(f"{ANSI.lib.lib_tty.pos_cursor()['y']};{ANSI.lib.lib_tty.pos_cursor()['x']}") #
# # [print(i) for i in range(18)]
# # crd=H(f"{ANSI.lib.lib_tty.pos_cursor()['y']};{ANSI.lib.lib_tty.pos_cursor()['x']}")
# # print(repr(table['C']['lst_H_cfss_yxH']),repr(table['C']['lst_css']))
# print('crd:',repr(crd))
# table = tbl_calc(table1)
| 28.604651 | 125 | 0.558537 |
acf96684a8c24f14bf2682a78c72fc408f3783cd | 2,068 | py | Python | ros/src/twist_controller/twist_controller.py | JB1984/CarND-Capstone | 5b2920a1c4727954eddcb77446a8a4346521864e | [
"MIT"
] | 2 | 2019-05-02T00:33:33.000Z | 2019-05-10T09:55:18.000Z | ros/src/twist_controller/twist_controller.py | JB1984/CarND-Capstone | 5b2920a1c4727954eddcb77446a8a4346521864e | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | JB1984/CarND-Capstone | 5b2920a1c4727954eddcb77446a8a4346521864e | [
"MIT"
] | 2 | 2019-05-07T00:34:11.000Z | 2019-05-21T10:07:26.000Z |
import rospy
from yaw_controller import YawController
from pid import PID
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass,fuel_capacity,brake_deadband,decel_limit,accel_limit,wheel_radius,wheel_base,steer_ratio,max_lat_accel,max_steer_angle):
self.yaw_controller = YawController(wheel_base,steer_ratio,0.1,max_lat_accel,max_steer_angle)
kp=0.3
kd=0.
ki=0.1
mn=0. # min throttle value
mx=0.2 # max throttle value
self.throttle_pid = PID(kp, ki, kd, mn, mx)
self.throttle_controller=PID(kp,ki,kd,mn,mx)
tau=0.5 # cut off frequency
ts=.02 # sample time
self.vel_lpf=LowPassFilter(tau,ts)
self.vehicle_mass=vehicle_mass
self.fuel_cap=fuel_capacity
self.brake_dea=brake_deadband
self.decel_limit=decel_limit
self.accel_limit=accel_limit
self.wheel_radius=wheel_radius
self.last_time=rospy.get_time()
def control(self, current_vel, dbw_enabled, target_linear_vel, target_angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
if not dbw_enabled:
self.throttle_pid.reset()
return 0., 0., 0.
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
current_vel=self.vel_lpf.filt(current_vel)
steer = self.yaw_controller.get_steering(target_linear_vel, target_angular_vel, current_vel)
velocity_error = target_linear_vel - current_vel
throttle = self.throttle_pid.step(velocity_error, sample_time)
brake = 0
if target_linear_vel==0. and current_vel<0.2:
throttle=0
brake=400
elif throttle <.1 and velocity_error<0:
throttle=0
decel=max(velocity_error,self.decel_limit)
brake=abs(decel)*self.vehicle_mass*self.wheel_radius
# return 1., 0., 0.
return throttle, brake, steer
| 33.354839 | 156 | 0.675048 |
acf967e204a2bbca819866e05ce1653807ed500f | 226 | py | Python | get_recent.py | kevingoldsmith/lastfm-extract | 0a7d15409fbd52628d9eeda3a5360dcfd13943c6 | [
"MIT"
] | null | null | null | get_recent.py | kevingoldsmith/lastfm-extract | 0a7d15409fbd52628d9eeda3a5360dcfd13943c6 | [
"MIT"
] | null | null | null | get_recent.py | kevingoldsmith/lastfm-extract | 0a7d15409fbd52628d9eeda3a5360dcfd13943c6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pylast
network = pylast.LastFMNetwork(api_key=api_key, api_secret=api_secret, username=username, password_hash=password_hash)
user = network.get_user(username)
recents = user.get_recent_tracks()
| 28.25 | 118 | 0.814159 |
acf968c5a9464e671368f2d8f409aca9b2540266 | 1,367 | py | Python | website/website/urls.py | jordanchou/curtinideas | 4420ef5853223c9e7745544b16f0b3264192e980 | [
"MIT"
] | null | null | null | website/website/urls.py | jordanchou/curtinideas | 4420ef5853223c9e7745544b16f0b3264192e980 | [
"MIT"
] | null | null | null | website/website/urls.py | jordanchou/curtinideas | 4420ef5853223c9e7745544b16f0b3264192e980 | [
"MIT"
] | null | null | null | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from accounts import urls as accounts_urls
from submission import urls as submissions_urls
#-----------------------------------------------------------------------------
urlpatterns = [
url(r'^accounts/', include(accounts_urls, namespace="accounts")),
url(r'^$', 'website.views.index', name='index'),
url(r'^about_us/', 'website.views.about_us'),
url(r'^faq/', 'website.views.faq'),
url(r'^admin/', admin.site.urls),
url(r'^submission/', include(submissions_urls, namespace="submission"))
]
#----------------------------------------------------------------------------- | 41.424242 | 79 | 0.624726 |
acf96937953dc64a1e1fc67cbce5c8b1d97363af | 547 | py | Python | Ch07_Code/MySQL_create_DB.py | arifmudi/Python-GUI-Programming-Cookbook-Third-Edition | 942c151a62ef422bb6fdd15b4b141a07c699cb9a | [
"MIT"
] | 68 | 2019-08-23T10:54:38.000Z | 2022-03-09T20:21:39.000Z | Ch07_Code/MySQL_create_DB.py | arifmudi/Python-GUI-Programming-Cookbook-Third-Edition | 942c151a62ef422bb6fdd15b4b141a07c699cb9a | [
"MIT"
] | null | null | null | Ch07_Code/MySQL_create_DB.py | arifmudi/Python-GUI-Programming-Cookbook-Third-Edition | 942c151a62ef422bb6fdd15b4b141a07c699cb9a | [
"MIT"
] | 46 | 2019-09-03T18:04:29.000Z | 2022-03-30T01:06:52.000Z | '''
Created on May 29, 2019
Ch07
@author: Burkhard A. Meier
'''
import mysql.connector
import Ch07_Code.GuiDBConfig as guiConf
GUIDB = 'GuiDB'
# unpack dictionary credentials
conn = mysql.connector.connect(**guiConf.dbConfig)
cursor = conn.cursor()
try:
cursor.execute("CREATE DATABASE {} \
DEFAULT CHARACTER SET 'utf8'".format(GUIDB))
except mysql.connector.Error as err:
print("Failed to create DB: {}".format(err))
conn.close() | 23.782609 | 79 | 0.590494 |
acf969f835b9f29b298e7277006466007478918c | 18,318 | py | Python | tensorflow/python/summary/writer/writer.py | tftuner/tf_fast_recovery | 996544fb0a4fec34f83602c82489e436484d2732 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/summary/writer/writer.py | tftuner/tf_fast_recovery | 996544fb0a4fec34f83602c82489e436484d2732 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/summary/writer/writer.py | tftuner/tf_fast_recovery | 996544fb0a4fec34f83602c82489e436484d2732 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
import glob
import subprocess
from tensorflow.python.summary.summary_iterator import summary_iterator
from tensorflow.core.framework.summary_pb2 import Summary
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
`tf.Session.run` or
`tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export(v1=["summary.FileWriter"])
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter`
instead forms a compatibility layer over new graph-based summaries
(`tf.contrib.summary`) to facilitate the use of new summary writing with
pre-existing code that expects a `FileWriter` instance.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None,
session=None):
"""Creates a `FileWriter`, optionally shared within the given session.
Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
The `session` argument to the constructor makes the returned `FileWriter` a
compatibility layer over new graph-based summaries (`tf.contrib.summary`).
Crucially, this means the underlying writer resource and events file will
be shared with any other `FileWriter` using the same `session` and `logdir`,
and with any `tf.contrib.summary.SummaryWriter` in this session using the
the same shared resource name (which by default scoped to the logdir). If
no such resource exists, one will be created using the remaining arguments
to this constructor, but if one already exists those arguments are ignored.
In either case, ops will be added to `session.graph` to control the
underlying file writer resource. See `tf.contrib.summary` for more details.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
session: A `tf.compat.v1.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`FileWriter` is not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.summary.FileWriter is not compatible with eager execution. "
"Use tf.contrib.summary instead.")
if session is not None:
event_writer = EventFileWriterV2(
session, logdir, max_queue, flush_secs, filename_suffix)
else:
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
self._closed = False
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def _warn_if_event_writer_is_closed(self):
if self._closed:
warnings.warn("Attempting to use a closed FileWriter. "
"The operation will be a noop unless the FileWriter "
"is explicitly reopened.")
def _add_event(self, event, step):
self._warn_if_event_writer_is_closed()
super(FileWriter, self)._add_event(event, step)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self._warn_if_event_writer_is_closed()
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
# Flushing a closed EventFileWriterV2 raises an exception. It is,
# however, a noop for EventFileWriter.
self._warn_if_event_writer_is_closed()
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
self._closed = True
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
self._closed = False
def read_eventfile(self, logdir):
# read the oldest eventfile from logdir
event_paths = glob.glob(os.path.join(logdir, "event*"))
if len(event_paths) == 0:
# no eventfiles in local directory, try to read from hdfs
hdfs_paths = os.path.join(logdir, "event*")
output = subprocess.Popen(['hadoop','fs','-ls',hdfs_paths], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
search_results = []
for line in output.stdout:
search_results.append(line)
if len(search_results) == 0:
return []
# sorted by date and time
search_results = sorted(search_results, key=lambda x: " ".join([x.split()[5], x.split()[6]]))
event_paths = [x.split()[-1] for x in search_results]
else:
event_paths = sorted(event_paths, key=lambda x: os.path.getctime(x))
events = summary_iterator(event_paths[0])
valid_events = [e for e in events if e.summary.value and e.summary.value[0].tag == "loss"]
return valid_events
| 39.995633 | 113 | 0.702751 |
acf969fbeb25b9cb3d46227fcc5bb30d020efcf3 | 1,096 | py | Python | Python_Code/ch2/5_deliberate.py | xiaoandx/learningCode | 2c41bc7199ef21a70d1935f32296d520e18f719f | [
"MIT"
] | 13 | 2020-10-25T15:38:15.000Z | 2022-02-21T02:21:24.000Z | Python_Code/ch2/5_deliberate.py | xiaoandx/learningCode | 2c41bc7199ef21a70d1935f32296d520e18f719f | [
"MIT"
] | 4 | 2020-10-26T08:37:27.000Z | 2020-12-14T08:49:51.000Z | Python_Code/ch2/5_deliberate.py | xiaoandx/learningCode | 2c41bc7199ef21a70d1935f32296d520e18f719f | [
"MIT"
] | 10 | 2020-10-25T15:38:30.000Z | 2021-09-15T03:54:39.000Z | '''
Copyright (c) 2020 WEI.ZHOU. All rights reserved.
The following code snippets are only used for circulation and cannot be used for business.
If the code is used, no consent is required, but the author has nothing to do with any problems and consequences.
In case of code problems, feedback can be made through the following email address.
<xiaoandx@gmail.com>
a = 0.1
b = 0.2
c = 0.63
d = 0.6
print(a+b)
print(c-d)
0.1 + 0.2 = 0.30000000000000004
0.63 - 0.6 = 0.030000000000000027
出现计算机运算结果与实际不符,原因如下:
1.浮点数为什么缺乏精确性的问题,
是因为实数的无限精度跟计算机的有限内存之间是有矛盾的。
2.底层 CPU 和IEEE 754 标准通过自己的浮点单位去执行
算术时的特征,看似有穷的小数, 在计算机的二进制表示里却是无穷的
解决办法:
引入Decimal 模块,该模块的高精度数字用字符串来做展示和中转
'''
import decimal
num1 = decimal.Decimal('0.1')
num2 = decimal.Decimal('0.2')
num3 = decimal.Decimal('0.63')
num4 = decimal.Decimal('0.6')
print(num1 + num2)
print(num3 - num4)
| 26.095238 | 196 | 0.57573 |
acf96af0524730be86a9be4a1e7c0cba0972d4b9 | 2,365 | py | Python | app/core/models.py | josekang/recipe-app-api | 059e5b048d09943ccb11442d584d83a5f4e036df | [
"MIT"
] | null | null | null | app/core/models.py | josekang/recipe-app-api | 059e5b048d09943ccb11442d584d83a5f4e036df | [
"MIT"
] | null | null | null | app/core/models.py | josekang/recipe-app-api | 059e5b048d09943ccb11442d584d83a5f4e036df | [
"MIT"
] | null | null | null | import uuid
import os
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin
from django.conf import settings
# Create your models here.
def recipe_image_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError("Users must have a valid email address")
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=128)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
def __str__(self):
return self.email
class Tag(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Recipe(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 29.936709 | 90 | 0.682875 |
acf96b34d9283e07d29299bc51b8c595cc190d31 | 504 | py | Python | docs/006_Adding_a_Database/midway/models/race.py | kellanjacobs/midwaypython | 3fb311746a75fe832002e800dffc3b80cb819122 | [
"Apache-2.0"
] | null | null | null | docs/006_Adding_a_Database/midway/models/race.py | kellanjacobs/midwaypython | 3fb311746a75fe832002e800dffc3b80cb819122 | [
"Apache-2.0"
] | null | null | null | docs/006_Adding_a_Database/midway/models/race.py | kellanjacobs/midwaypython | 3fb311746a75fe832002e800dffc3b80cb819122 | [
"Apache-2.0"
] | 4 | 2019-02-04T14:03:53.000Z | 2019-03-18T18:30:06.000Z | from sqlalchemy import (
Column,
ForeignKey,
Integer,
)
from sqlalchemy.orm import relationship
from .meta import Base
class Race(Base):
""" The SQLAlchemy declarative model class for a Page object. """
__tablename__ = 'races'
id = Column(Integer, primary_key=True)
race_number = Column(Integer, nullable=False)
place = Column(Integer, default=99)
horse_id = Column(ForeignKey('horses.id'), nullable=False)
horse = relationship('Horse', back_populates="races")
| 26.526316 | 69 | 0.704365 |
acf96b5aa2382f7615c7ee0d8d16f98783ef97d0 | 16,625 | py | Python | patrickstar/core/chunk_list.py | zhuzilin/PatrickStar | 72daf8dbded07e03d911db6369b075c9bfcd5245 | [
"BSD-3-Clause"
] | null | null | null | patrickstar/core/chunk_list.py | zhuzilin/PatrickStar | 72daf8dbded07e03d911db6369b075c9bfcd5245 | [
"BSD-3-Clause"
] | 2 | 2022-03-15T06:57:11.000Z | 2022-03-15T07:55:31.000Z | patrickstar/core/chunk_list.py | zhuzilin/PatrickStar | 72daf8dbded07e03d911db6369b075c9bfcd5245 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import torch
from patrickstar.core.const import ChunkType
from patrickstar.core.memtracer import RuntimeMemTracer
from patrickstar.profiler import profiler
from patrickstar.utils import logger, get_rank, get_world_size
import patrickstar.utils.global_timer as global_timer
from .chunk_data import Chunk
from .comm import CommInfo
from .const import ChunkState
from patrickstar.core.eviction_policy import ChunkEvictionPolicyBase
from patrickstar.core.memory_cache import MemoryCache
class ChunkList(object):
r"""Manage the entities of all chunks.
There are 4 kinds of chunk list:
param fp16, param fp32, momentum, variance
All of them are managed by one instance of this class.
"""
generated_chunk_id = -1
def __init__(
self,
local_rank: int,
memory_tracer: RuntimeMemTracer,
chunk_eviction_policy: ChunkEvictionPolicyBase,
with_mem_cache: bool = False,
):
"""
Args:
local_rank: int.
"""
self.id_to_chunk_map: dict[int, Chunk] = {}
self.chunk_type_to_id_list_map: dict[ChunkType, int] = {}
for chunk_type in ChunkType:
self.chunk_type_to_id_list_map[chunk_type] = []
self._time_profile = True
self.moments_cnt_of_iteration = None
self.local_rank = local_rank
self.device = torch.device(f"cuda:{local_rank}")
self.chunk_eviction_policy = chunk_eviction_policy
self.memory_tracer = memory_tracer
self.with_mem_cache = with_mem_cache
if self.with_mem_cache:
self.memory_cache = MemoryCache(2, self.memory_tracer)
else:
self.memory_cache = None
def chunk_ids_generator(self, chunk_type: ChunkType):
r"""Return the chunk_id of all chunks with type `chunk_type`
Args:
chunk_type: :class:`ChunkType`.
"""
for chunk_id in self.chunk_type_to_id_list_map[chunk_type]:
yield chunk_id
def generate_chunk_id(self) -> int:
r"""Get the chunk id of next chunk."""
ChunkList.generated_chunk_id += 1
return ChunkList.generated_chunk_id
def __getitem__(self, chunk_id: int):
r"""Search a chunk by id."""
return self.id_to_chunk_map.get(chunk_id)
def size(self) -> int:
r"""Total number of chunks."""
return len(self.id_to_chunk_map)
def __len__(self) -> int:
return self.size()
def get_chunk_memory_used(self, device):
r"""The total memory of payload of all chunks on `device`.
Args:
device: :class:`torch.device`.
Returns:
float.
"""
mem_used = 0
for _, chunk in self.id_to_chunk_map.items():
if (
chunk.get_device() is not None
and chunk.get_device().type == device.type
):
mem_used += chunk.get_payload_space()
return mem_used
def max_chunk_size(self):
max_size = 0
for _, chunk in self.id_to_chunk_map.items():
max_size = max(chunk.capacity, max_size)
return max_size
def try_best_allocate_payload(self, chunk: Chunk, compute_device):
"""
Try our best to allocate payload for chunk.
First free up chunk size space on the target device.
If it dose not work, we second free up all chunks not in used on the target device.
"""
payload_space = chunk.get_chunk_space()
self.prepare_device(compute_device, payload_space)
if chunk.allocate_payload(compute_device):
return
else:
self.clear_useless_chunks(compute_device)
if chunk.allocate_payload(compute_device) is False:
raise RuntimeError(
f"Allocation chunk payload fails on {compute_device}, even if we try our best."
)
def access_chunk(self, chunk_id: int, compute_device: torch.device):
r"""Prepare the memory of chunk to `compute_device` with `chunk_id`.
We need to move the chunk when it is on other devices.
TODO(jiaruifang) Add async copy and record the lifecycle of chunks during
the first iteration, so that we can prefetch the next chunk after sync
the memcopy of the first chunk.
Args:
chunk_id: int.
compute_device: :class:`torch.device`.
"""
chunk = self.id_to_chunk_map[chunk_id]
chunk_state = chunk.get_state()
payload_space = chunk.get_chunk_space()
# If chunk was released, we need to reallocate it.
if chunk_state == ChunkState.RELEASED:
logger.debug(
f"rank {get_rank()} access_chunk chunk {chunk_id}, "
f"need to allocate {payload_space} B memory on {compute_device}"
)
# Allocating a chunk on compute_device.
self.try_best_allocate_payload(chunk, compute_device)
return
elif chunk.get_device().type != compute_device.type:
self.prepare_device(compute_device, payload_space)
chunk.move(compute_device)
assert (
chunk.get_device().type == compute_device.type
), f"chunk device {chunk.get_device()} compute device {compute_device}"
return
else:
logger.debug(f"access_chunk chunk {chunk_id} already on {compute_device}")
def clear_useless_chunks(self, target_device: torch.device):
"""
Move out all chunks not incompute on target_device.
"""
print(f"Offloading all chunks not used on {target_device}")
new_device = (
torch.device("cpu") if target_device.type == "cuda" else self.device
)
for chunk_id, chunk in self.id_to_chunk_map.items():
if (
chunk.get_device() is not None
and chunk.get_device().type == target_device.type
and chunk.get_state() != ChunkState.COMPUTE
and not chunk.is_pin()
):
if not self.prepare_device(new_device, chunk.get_payload_space()):
break
self.chunk_move(chunk_id, new_device)
def prepare_device(self, target_device: torch.device, need_bytes: int):
"""
Make `need_byes` room on `target_device`. If there are not enough empty
space, we need to release or move away some chunks.
Args:
target_device: :class:`torch.device`.
need_bytes: int.
"""
if self._time_profile:
global_timer.my_timer.start_profile("CHUNK_LIST_prepare_device")
ava_chunk_mem_size = self.memory_tracer.available_chunk_mem(target_device.type)
remaining_chunk_mem_size = self.memory_tracer.remaining_chunk_mem(
target_device.type
)
logger.debug(
f"prepare_target: device {target_device} need_bytes {need_bytes / 1e6} MB, "
f"ava_chunk_mem_size {ava_chunk_mem_size / 1e6} MB, "
f"remaining_chunk_mem_size {remaining_chunk_mem_size / 1e6} MB."
)
# TODO(jiaruifang) Situation where there is no space.
# This condition is not good enough, we need to check if botn CPU and GPU
# don't have enough space.
if ava_chunk_mem_size < need_bytes:
logger.error(
f"{target_device} has not enough space for {need_bytes} elements"
)
logger.error(
f"{target_device} has not enough space for {need_bytes / 1e6} MB. "
f"Device used Chunk Memory is {self.get_chunk_memory_used(target_device) / 1e6} MB. "
f"Avaibale Chunk Memory is {ava_chunk_mem_size / 1e6} MB"
)
return False
# TODO(jiaruifang) We can catch the error and the release or move the chunks here.
# raise RuntimeError(
# f"{target_device} has not enough space for {need_bytes / 1e6} MB. "
# f"Device used Chunk Memory is {self.get_chunk_memory_used(target_device) / 1e6} MB. "
# f"Avaibale Chunk Memory is {ava_chunk_mem_size / 1e6} MB"
# )
extra_need_bytes = need_bytes - remaining_chunk_mem_size
logger.debug(
f"{target_device} (ava_chunk_mem_size {ava_chunk_mem_size / 1e6} MB) "
f"now remaining_chunk_mem_size size {remaining_chunk_mem_size / 1e6} MB, "
f"needs {need_bytes / 1e6} MB"
)
# No need for new allocation.
if extra_need_bytes <= 0:
if self._time_profile:
global_timer.my_timer.finish_profile("CHUNK_LIST_prepare_device")
return
logger.debug(
f"the device {target_device} has no enough free chunk memory, "
f"required size is {extra_need_bytes} bytes"
)
# Make some room on `target_device`.
moved_list = self._chunk_to_move_out_for_room_making(
extra_need_bytes, target_device
)
# TODO(jiaruifang) Here we assume the new device has enough room and force the chunk
# to new device. However, the size of the chunk may be smaller than the ava_chunk_mem
# of the new device and trigger bugs.
new_device = (
torch.device("cpu") if target_device.type == "cuda" else self.device
)
# Move the chunk to new device. If there are not enough space on the new device, abort.
for idx in moved_list:
self.chunk_move(idx, new_device)
if self._time_profile:
global_timer.my_timer.finish_profile("CHUNK_LIST_prepare_device")
return True
def make_room(self, offload_size_in_bytes, target_device):
r"""Move `offload_size_in_bytes` size of chunks away from `target_device`.
Can not move chunk of state `COMPUTE`.
Args:
offload_size_in_bytes: int.
target_device: :class:`torch.device`.
"""
if self._time_profile:
global_timer.my_timer.start_profile("CHUNK_LIST_make_room")
moved_list = self._chunk_to_move_out_for_room_making(
offload_size_in_bytes, target_device
)
new_device = (
torch.device("cpu") if target_device.type == "cuda" else self.device
)
for idx in moved_list:
self.chunk_move(idx, new_device)
if self._time_profile:
global_timer.my_timer.finish_profile("CHUNK_LIST_make_room")
def chunk_move(self, chunk_id: int, device: torch.device):
r"""Move chunk of id `chunk_id` to `device`.
NOTE(): Please make sure `device` has enough remaining_chunk_mem before.
Args:
chunk_id: int.
device: :class:`torch.device`.
"""
if self._time_profile:
global_timer.my_timer.start_profile("CHUNK_LIST_chunk_move")
chunk = self.id_to_chunk_map[chunk_id]
remaining_chunk_mem_size = self.memory_tracer.remaining_chunk_mem(device.type)
chunk_mem_size = chunk.get_payload_space()
if remaining_chunk_mem_size < chunk_mem_size:
raise RuntimeError(
f"chunk move failed. {device} has not {chunk_mem_size / 1e6} MB memory space. "
f"Free space is {remaining_chunk_mem_size / 1e6} MB. "
f"The reason may be that the overall memory of CPU and GPU is not enough for the model."
)
if chunk.get_device() != device:
logger.debug(f"move chunk {chunk_id} from {chunk.get_device()} to {device}")
chunk.move(device)
if self._time_profile:
global_timer.my_timer.finish_profile("CHUNK_LIST_chunk_move")
def new_chunk(
self,
chunk_id: int,
chunk_size: int,
data_type: torch.dtype,
is_dummy: bool = False,
chunk_type: ChunkType = ChunkType.UNDEF,
):
r"""Create a chunk without initializing its memory.
Args:
chunk_id: int.
chunk_size: int.
data_type: :class:`torch.dtype`.
is_dummy: bool.
chunk_type: :class:ChunkType.
Returns:
:class:`CommInfo`
"""
if chunk_id in self.id_to_chunk_map:
raise RuntimeError(
f"chunk list new chunk with chunk_id {chunk_id} already existed"
)
self.id_to_chunk_map[chunk_id] = Chunk(
capacity=chunk_size,
data_type=data_type,
chunk_id=chunk_id,
memory_tracer=self.memory_tracer,
memory_cache=self.memory_cache if self.with_mem_cache else None,
local_rank=self.local_rank,
is_dummy=is_dummy,
)
world_size = get_world_size()
global_rank = get_rank()
self.chunk_type_to_id_list_map[chunk_type].append(chunk_id)
if profiler.started():
profiler.chunk_life_cycle[chunk_id] = {"type": chunk_type, "life_cycle": []}
num_type_chunk = len(self.chunk_type_to_id_list_map[chunk_type])
comm_info = CommInfo(
chunk_type=chunk_type,
group_id=(num_type_chunk - 1) // world_size,
offset=(num_type_chunk - 1) % world_size,
)
logger.debug(
f"global_rank {global_rank}, allocate with new chunk chunk_id {chunk_id} size {chunk_size} "
f"data_type {data_type} comm group {comm_info}"
)
return comm_info
def is_empty(self, chunk_type: ChunkType):
r"""Whether chunk list of type `chunk_type` is empty."""
return len(self.chunk_type_to_id_list_map[chunk_type]) == 0
def last_chunk_id(self, chunk_type: ChunkType):
r"""Get the last id of type `chunk_type`."""
if self.is_empty(chunk_type):
raise RuntimeError(
f"Call last_chunk_id on an empty {chunk_type} chunk list"
)
return self.chunk_type_to_id_list_map[chunk_type][-1]
def generate_chunk(self):
r"""Return all the chunks along with its id."""
for chunk_id, chunk in self.id_to_chunk_map.items():
yield chunk_id, chunk
def _chunk_to_move_out_for_room_making(
self, size_in_bytes: int, target_device: torch.device
) -> List:
r"""Find the chunks to move for making `size_in_bytes` of room on `target_device`.
Args:
size_in_bytes: int.
target_device: :class:`torch.device`.
Returns:
A list of chunk_ids.
"""
moved_list = self.chunk_eviction_policy.derive_eviction_list(
self.id_to_chunk_map, size_in_bytes, target_device
)
return moved_list
def update_state(self, chunk_id, old_state, new_state):
r"""Update the state of chunk of id `chunk_id`."""
self.id_to_chunk_map[chunk_id].update_state(old_state, new_state)
| 39.489311 | 104 | 0.637113 |
acf96b6516b401ea8d3169deeb95ccc39c420f4f | 3,145 | py | Python | learning_log/learning_log/settings.py | luckelectricity/crash | e920d41f7b4e5567199cddd57f21edf7b7bd6bc7 | [
"MIT"
] | null | null | null | learning_log/learning_log/settings.py | luckelectricity/crash | e920d41f7b4e5567199cddd57f21edf7b7bd6bc7 | [
"MIT"
] | null | null | null | learning_log/learning_log/settings.py | luckelectricity/crash | e920d41f7b4e5567199cddd57f21edf7b7bd6bc7 | [
"MIT"
] | null | null | null | """
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 're^%)^48zbance4jx_#)zl9v9f2v@ldqauxcsqf8nb9+^w^=69'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'learning_logs',
'users'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.569106 | 91 | 0.697297 |
acf96c2e5f68cbb6f2960ad7f579c019d5f63d96 | 10,801 | py | Python | pm4pymdl/algo/mvp/gen_framework4/versions_discovery/classic.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | null | null | null | pm4pymdl/algo/mvp/gen_framework4/versions_discovery/classic.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 3 | 2021-07-07T15:32:55.000Z | 2021-07-07T16:15:36.000Z | pm4pymdl/algo/mvp/gen_framework4/versions_discovery/classic.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | null | null | null | import math
from statistics import mean
from pm4py.objects.conversion.log import converter
from pm4pymdl.algo.mvp.utils import succint_mdl_to_exploded_mdl
from collections import Counter
def apply(df, parameters=None):
if parameters is None:
parameters = {}
stream = get_stream_from_dataframe(df, parameters=parameters)
return apply_stream(stream, parameters=parameters)
def get_stream_from_dataframe(df, parameters=None):
if parameters is None:
parameters = {}
df_type = df.type
df = df.sort_values(["event_timestamp", "event_id"])
if df_type == "succint":
df = succint_mdl_to_exploded_mdl.apply(df)
columns = [x for x in df.columns if
not x.startswith("event") or x == "event_activity" or x == "event_id" or x == "event_timestamp"]
df = df[columns]
stream = converter.apply(df, variant=converter.Variants.TO_EVENT_STREAM)
return stream
def apply_stream(stream, parameters=None):
if parameters is None:
parameters = {}
support = parameters["support"] if "support" in parameters else 1
epsilon = parameters["epsilon"] if "epsilon" in parameters else 0.0
debug = parameters["debug"] if "debug" in parameters else False
noise_obj_number = parameters["noise_obj_number"] if "noise_obj_number" in parameters else 0.0
types_lifecycle = {}
eo = {}
eot = {}
eoe = {}
ee = {}
timestamps = {}
start_activities = dict()
end_activities = dict()
for ev in stream:
cl = [k for k in ev if not k.startswith("event_") and str(ev[k]) != "nan"][0]
if not cl in types_lifecycle:
types_lifecycle[cl] = {}
o = ev[cl]
if not o in types_lifecycle[cl]:
types_lifecycle[cl][o] = []
types_lifecycle[cl][o].append(ev)
for t in types_lifecycle:
eot[t] = dict()
start_activities[t] = Counter()
end_activities[t] = Counter()
objects_lifecycle = types_lifecycle[t]
for o in objects_lifecycle:
evs = objects_lifecycle[o]
i = 0
while i < len(evs):
i1 = evs[i]["event_id"]
a1 = evs[i]["event_activity"]
if i == 0:
start_activities[t][a1] += 1
t1 = evs[i]["event_timestamp"].timestamp()
if a1 not in eo:
eo[a1] = set()
if a1 not in eot[t]:
eot[t][a1] = set()
eo[a1].add((i1, o))
eot[t][a1].add((i1, o))
if i < len(evs) - 1:
i2 = evs[i + 1]["event_id"]
a2 = evs[i + 1]["event_activity"]
t2 = evs[i + 1]["event_timestamp"].timestamp()
if not (a1, t, a2) in eoe:
eoe[(a1, t, a2)] = set()
ee[(a1, t, a2)] = set()
eoe[(a1, t, a2)].add((i1, o, i2))
ee[(a1, t, a2)].add((i1, i2))
if not (i1, o, i2) in timestamps:
timestamps[(i1, o, i2)] = []
if not (i1, i2) in timestamps:
timestamps[(i1, i2)] = []
timestamps[(i1, o, i2)].append(t2 - t1)
timestamps[(i1, i2)].append(t2 - t1)
else:
end_activities[t][a1] += 1
i = i + 1
for el in timestamps:
timestamps[el] = mean(timestamps[el])
ret = {}
ret["activities"] = {}
for act in eo:
ret["activities"][act] = {}
ret["activities"][act]["events"] = len({x[0] for x in eo[act]})
ret["activities"][act]["objects"] = len({x[1] for x in eo[act]})
ret["activities"][act]["events_set"] = {x[0] for x in eo[act]}
ret["activities"][act]["objects_set"] = {x[1] for x in eo[act]}
ret["activities"][act]["eo_set"] = eo[act]
ret["activities"][act]["eo"] = len(eo[act])
ret["types_view"] = {}
activities_mapping = {}
activities_mapping_count = {}
for t in types_lifecycle:
ret["types_view"][t] = {"edges": {}, "activities": {}, "start_activities": dict(start_activities[t]),
"end_activities": dict(end_activities[t])}
for act in eot[t]:
values = eot[t][act]
val_group = {x[0]: set() for x in values}
for x in values:
val_group[x[0]].add(x[1])
val_group = list(len(y) for x, y in val_group.items())
ret["types_view"][t]["activities"][act] = {}
ret["types_view"][t]["activities"][act]["events"] = {x[0] for x in values}
ret["types_view"][t]["activities"][act]["objects"] = {x[1] for x in values}
ret["types_view"][t]["activities"][act]["eo"] = values
ret["types_view"][t]["activities"][act]["min_obj"] = min(val_group)
ret["types_view"][t]["activities"][act]["max_obj"] = max(val_group)
ret["types_view"][t]["activities"][act]["val_group"] = val_group
available_keys = {x for x in eoe.keys() if x[1] == t}
for k in available_keys:
a1 = k[0]
a2 = k[2]
values = eoe[k]
values_ee = ee[k]
values_timestamp_eoe = mean([timestamps[v] for v in values])
values_timestamp_ee = mean([timestamps[v] for v in values_ee])
g_1_2 = group_1_2(values)
g_1_3 = group_1_3(values)
g_1_2 = g_1_2[:math.ceil(len(g_1_2) * (1.0 - noise_obj_number))]
g_1_3 = g_1_3[:math.ceil(len(g_1_3) * (1.0 - noise_obj_number))]
g_1_2_min = min(g_1_2)
g_1_2_max = max(g_1_2)
g_1_3_min = min(g_1_3)
g_1_3_max = max(g_1_3)
ret["types_view"][t]["edges"][(a1, a2)] = {}
ret["types_view"][t]["edges"][(a1, a2)]["events"] = {(x[0], x[2]) for x in values}
ret["types_view"][t]["edges"][(a1, a2)]["objects"] = {x[1] for x in values}
ret["types_view"][t]["edges"][(a1, a2)]["eo"] = values
ret["types_view"][t]["edges"][(a1, a2)]["support_entry"] = ret["types_view"][t]["activities"][a2][
"objects"].intersection(ret["types_view"][t]["edges"][(a1, a2)]["objects"])
ret["types_view"][t]["edges"][(a1, a2)]["dev_entry"] = ret["types_view"][t]["activities"][a2][
"objects"].difference(ret["types_view"][t]["edges"][(a1, a2)]["objects"])
ret["types_view"][t]["edges"][(a1, a2)]["support_exit"] = ret["types_view"][t]["activities"][a1][
"objects"].intersection(ret["types_view"][t]["edges"][(a1, a2)]["objects"])
ret["types_view"][t]["edges"][(a1, a2)]["dev_exit"] = ret["types_view"][t]["activities"][a1][
"objects"].difference(ret["types_view"][t]["edges"][(a1, a2)]["objects"])
sen = len(ret["types_view"][t]["edges"][(a1, a2)]["support_entry"])
den = len(ret["types_view"][t]["edges"][(a1, a2)]["dev_entry"])
sex = len(ret["types_view"][t]["edges"][(a1, a2)]["support_exit"])
dex = len(ret["types_view"][t]["edges"][(a1, a2)]["dev_exit"])
ret["types_view"][t]["edges"][(a1, a2)]["perc_entry"] = sen / (sen + den)
ret["types_view"][t]["edges"][(a1, a2)]["perc_exit"] = sex / (sex + dex)
if sen >= support and den / sen <= epsilon:
ret["types_view"][t]["edges"][(a1, a2)]["must_entry"] = True
else:
ret["types_view"][t]["edges"][(a1, a2)]["must_entry"] = False
if sex >= support and dex / sex <= epsilon:
ret["types_view"][t]["edges"][(a1, a2)]["must_exit"] = True
else:
ret["types_view"][t]["edges"][(a1, a2)]["must_exit"] = False
ret["types_view"][t]["edges"][(a1, a2)]["min_exit_obj"] = g_1_2_min
ret["types_view"][t]["edges"][(a1, a2)]["max_exit_obj"] = g_1_2_max
ret["types_view"][t]["edges"][(a1, a2)]["min_entry_obj"] = g_1_3_min
ret["types_view"][t]["edges"][(a1, a2)]["max_entry_obj"] = g_1_3_max
ret["types_view"][t]["edges"][(a1, a2)]["semantics"] = "EXI=%d..%d\nENT=%d..%d" % (
g_1_2_min, g_1_2_max, g_1_3_min, g_1_3_max)
ret["types_view"][t]["edges"][(a1, a2)]["semantics_list"] = [[g_1_2_min, g_1_2_max], [g_1_3_min, g_1_3_max]]
ret["types_view"][t]["edges"][(a1, a2)]["performance_events"] = values_timestamp_ee
ret["types_view"][t]["edges"][(a1, a2)]["performance_eo"] = values_timestamp_eoe
for edge in ret["types_view"][t]["edges"]:
ret["types_view"][t]["edges"][edge]["events_set"] = ret["types_view"][t]["edges"][edge]["events"]
ret["types_view"][t]["edges"][edge]["objects_set"] = ret["types_view"][t]["edges"][edge]["objects"]
ret["types_view"][t]["edges"][edge]["eo_set"] = ret["types_view"][t]["edges"][edge]["eo"]
ret["types_view"][t]["edges"][edge]["events"] = len(ret["types_view"][t]["edges"][edge]["events"])
ret["types_view"][t]["edges"][edge]["objects"] = len(ret["types_view"][t]["edges"][edge]["objects"])
ret["types_view"][t]["edges"][edge]["eo"] = len(ret["types_view"][t]["edges"][edge]["eo"])
for act in ret["types_view"][t]["activities"]:
o = len(ret["types_view"][t]["activities"][act]["objects"])
if act not in activities_mapping or activities_mapping_count[act] < o:
activities_mapping[act] = t
activities_mapping_count[act] = o
ret["types_view"][t]["activities"][act]["events_set"] = ret["types_view"][t]["activities"][act]["events"]
ret["types_view"][t]["activities"][act]["objects_set"] = ret["types_view"][t]["activities"][act]["objects"]
ret["types_view"][t]["activities"][act]["eo_set"] = ret["types_view"][t]["activities"][act]["eo"]
ret["types_view"][t]["activities"][act]["events"] = len(ret["types_view"][t]["activities"][act]["events"])
ret["types_view"][t]["activities"][act]["objects"] = o
ret["types_view"][t]["activities"][act]["eo"] = len(ret["types_view"][t]["activities"][act]["eo"])
ret["activities_mapping"] = activities_mapping
return ret
def group_1_2(values):
ret = {}
for val in values:
e1 = val[0]
o = val[1]
if not e1 in ret:
ret[e1] = set()
ret[e1].add(o)
return list(len(x) for x in ret.values())
def group_1_3(values):
ret = {}
for val in values:
e1 = val[2]
o = val[1]
if not e1 in ret:
ret[e1] = set()
ret[e1].add(o)
return list(len(x) for x in ret.values())
| 46.96087 | 120 | 0.529673 |
acf96c36b6f5943c56bb8a8c72492d6a3cc81dfb | 841 | py | Python | Algorithm/Easy/1-500/156MergeIntervals.py | MartinYan623/Lint-Code | 57d2fa441d6496234615736e3f55d0b71aaa51dc | [
"MIT"
] | null | null | null | Algorithm/Easy/1-500/156MergeIntervals.py | MartinYan623/Lint-Code | 57d2fa441d6496234615736e3f55d0b71aaa51dc | [
"MIT"
] | 1 | 2020-08-08T10:14:53.000Z | 2020-08-08T10:18:37.000Z | Algorithm/Easy/1-500/156MergeIntervals.py | MartinYan623/Lint-Code | 57d2fa441d6496234615736e3f55d0b71aaa51dc | [
"MIT"
] | null | null | null | """
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
class Solution:
"""
@param intervals: interval list.
@return: A new interval list.
"""
def merge(self, intervals):
# write your code here
if len(intervals) < 2: # 数组长度小于2直接返回
return intervals
intervals = sorted(intervals, key=lambda x: x.start) # 排序
res = [intervals[0]]
i = 1
k = 0
while i < len(intervals):
if res[k].end >= intervals[i].start: # 判断是否可以合并
res[k] = Interval(res[k].start, res[k].end if res[k].end >= intervals[i].end else intervals[i].end)
else:
res.append(intervals[i])
k += 1
i += 1
return res
| 26.28125 | 115 | 0.520809 |
acf96cf9820c89470363d136d3c30cbbe379df89 | 713 | py | Python | temperature/sample.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 3 | 2017-09-03T17:17:44.000Z | 2017-12-10T12:26:46.000Z | temperature/sample.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | null | null | null | temperature/sample.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 2 | 2017-10-01T01:10:55.000Z | 2018-07-15T19:49:29.000Z | # reads data from USB-serial to collect data from development board
# source: Python for Secret Agents, Volume II, 2015 (Safari online)
# usage:
# 1. run tmp36.py on development board
# 2. run this collector on computer
# 3. development board and computer are connected via USB
# pre-condition: pyserial installed on computer
import serial, sys
def sample(port, baudrate=9600, limit=128):
with serial.Serial(port, baudrate, timeout=1) as t_sensor:
while limit != 0:
line = t_sensor.readline()
if line:
print(line.decode("ascii").rstrip())
sys.stdout.flush()
limit -= 1
while True:
sample('/dev/tty.SLAB_USBtoUART', 115200)
| 33.952381 | 67 | 0.659187 |
acf96cfa6da8ba0af937bc9a0bb6fbeedb11c9c3 | 229 | py | Python | src/setup.py | abhi18av/drug-resistance-prediction-hackathon | 55821e5aad92dfa56da872898f095abf925a7184 | [
"MIT"
] | 2 | 2020-09-23T08:54:50.000Z | 2021-01-12T15:16:47.000Z | src/setup.py | abhi18av/drug-resistance-prediction-hackathon | 55821e5aad92dfa56da872898f095abf925a7184 | [
"MIT"
] | 8 | 2020-09-21T10:00:07.000Z | 2020-10-04T13:17:41.000Z | src/setup.py | abhi18av/drug-resistance-prediction-hackathon | 55821e5aad92dfa56da872898f095abf925a7184 | [
"MIT"
] | 4 | 2020-09-21T12:17:41.000Z | 2020-09-23T08:54:54.000Z | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Drug Resistance Prediction based on SNPs',
author='Abhinav Sharma',
license='MIT',
)
| 20.818182 | 59 | 0.681223 |
acf96def0329b3e8a96141403e73c6bf2d4630e3 | 4,705 | py | Python | test/functional/importprunedfunds.py | Jcing95/iop-core | fb9625ff8d2d61da01b566492f1ddd8ebc388ca2 | [
"MIT"
] | 1 | 2017-09-26T11:17:15.000Z | 2017-09-26T11:17:15.000Z | test/functional/importprunedfunds.py | Jcing95/iop-core | fb9625ff8d2d61da01b566492f1ddd8ebc388ca2 | [
"MIT"
] | 1 | 2017-10-05T08:58:47.000Z | 2017-10-05T08:58:47.000Z | test/functional/importprunedfunds.py | Jcing95/iop-core | fb9625ff8d2d61da01b566492f1ddd8ebc388ca2 | [
"MIT"
] | 1 | 2017-09-30T10:58:24.000Z | 2017-09-30T10:58:24.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from test_framework.test_framework import IoPTestFramework
from test_framework.util import *
class ImportPrunedFundsTest(IoPTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
address2_pubkey = self.nodes[0].validateaddress(address2)['pubkey'] # Using pubkey
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
#Check only one address
address_info = self.nodes[0].validateaddress(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),101)
#Address Test - before import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
#Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
#Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
#Import with affiliated address with no rescan
self.nodes[1].importaddress(address2, "add2", False)
result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2)
balance2 = self.nodes[1].getbalance("add2", 0, True)
assert_equal(balance2, Decimal('0.05'))
#Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
balance3 = self.nodes[1].getbalance("add3", 0, False)
assert_equal(balance3, Decimal('0.025'))
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.075'))
#Addresses Test - after import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
#Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
self.nodes[1].removeprunedfunds(txnid2)
balance2 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance2, Decimal('0.025'))
self.nodes[1].removeprunedfunds(txnid3)
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.0'))
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| 40.560345 | 117 | 0.657811 |
acf96fb6eb8481c25b513c230f4dd9ac15b17205 | 6,053 | py | Python | Pyrado/scripts/sandbox/sb_sbi.py | KhanhThiVo/SimuRLacra | fdeaf2059c2ed80ea696f018c29290510b5c4cb9 | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | Pyrado/scripts/sandbox/sb_sbi.py | KhanhThiVo/SimuRLacra | fdeaf2059c2ed80ea696f018c29290510b5c4cb9 | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | Pyrado/scripts/sandbox/sb_sbi.py | KhanhThiVo/SimuRLacra | fdeaf2059c2ed80ea696f018c29290510b5c4cb9 | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 1 | 2020-11-24T15:25:26.000Z | 2020-11-24T15:25:26.000Z | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Testing the simulation-based inference (SBI) toolbox using a very basic example
"""
import functools
import numpy as np
import sbi.utils as utils
import torch as to
import torch.nn as nn
from matplotlib import pyplot as plt
from sbi.inference import simulate_for_sbi, SNPE_C
from sbi.user_input.user_input_checks import prepare_for_sbi
from sbi.utils import posterior_nn
import pyrado
from pyrado.sampling.sbi_embeddings import Embedding, LastStepEmbedding
from pyrado.environments.pysim.one_mass_oscillator import OneMassOscillatorSim
from pyrado.environments.sim_base import SimEnv
from pyrado.plotting.distribution import draw_posterior_distr_2d
from pyrado.plotting.utils import num_rows_cols_from_length
from pyrado.policies.base import Policy
from pyrado.policies.special.dummy import IdlePolicy
from pyrado.sampling.rollout import rollout
from pyrado.spaces.singular import SingularStateSpace
def simple_omo_sim(domain_params: to.Tensor, env: SimEnv, policy: Policy, embedding: Embedding) -> to.Tensor:
""" The most simple interface of a simulation to sbi, see `SimRolloutSamplerForSBI` """
domain_params = to.atleast_2d(domain_params)
data = []
for dp in domain_params:
ro = rollout(
env,
policy,
eval=True,
stop_on_done=False,
reset_kwargs=dict(domain_param=dict(k=dp[0], d=dp[1])),
)
data.append(to.from_numpy(ro.observations).to(dtype=to.get_default_dtype()))
data = to.stack(data, dim=0).unsqueeze(1) # batched domain param int the 1st dim and one rollout in the 2nd dim
return embedding(Embedding.pack(data))
if __name__ == "__main__":
# Config
plt.rcParams.update({"text.usetex": True})
basic_wrapper = False
pyrado.set_seed(0)
# Environment and policy
env = OneMassOscillatorSim(dt=1 / 200, max_steps=200)
env.init_space = SingularStateSpace(np.array([-0.7, 0])) # no variance over the initial state
policy = IdlePolicy(env.spec)
# Domain parameter mapping and prior, oly use 2 domain parameters here to simplify the plotting later
dp_mapping = {0: "k", 1: "d"}
prior = utils.BoxUniform(low=to.tensor([20.0, 0.0]), high=to.tensor([40.0, 0.3]))
# Create time series embedding
embedding = LastStepEmbedding(env.spec, dim_data=env.spec.obs_space.flat_dim)
# Wrap the simulator to abstract the env and the policy away from sbi
w_simulator = functools.partial(simple_omo_sim, env=env, policy=policy, embedding=embedding)
# Learn a likelihood from the simulator
density_estimator = posterior_nn(model="maf", embedding_net=nn.Identity(), hidden_features=20, num_transforms=4)
snpe = SNPE_C(prior, density_estimator)
simulator, prior = prepare_for_sbi(w_simulator, prior)
domain_param, data_sim = simulate_for_sbi(
simulator=simulator,
proposal=prior,
num_simulations=300,
num_workers=1,
)
snpe.append_simulations(domain_param, data_sim)
density_estimator = snpe.train()
posterior = snpe.build_posterior(density_estimator)
# Create a fake (random) true distribution
num_instances_real = 1
dp_gt = {"k": 30, "d": 0.1}
domain_param_gt = to.tensor([dp_gt[key] for _, key in dp_mapping.items()])
domain_param_gt = domain_param_gt.repeat((num_instances_real, 1))
domain_param_gt += domain_param_gt * to.randn(num_instances_real, 2) / 5
data_real = to.cat([simulator(dp) for dp in domain_param_gt], dim=0)
# data_real = Embedding.unpack(data_real, dim_data_orig=env.spec.obs_space.flat_dim)
assert data_real.shape[0] == num_instances_real
# Plot the posterior
_, axs = plt.subplots(*num_rows_cols_from_length(num_instances_real), figsize=(14, 14), tight_layout=True)
axs = np.atleast_2d(axs)
draw_posterior_distr_2d(
axs,
"separate",
posterior,
data_real,
dp_mapping,
dims=(0, 1),
condition=Embedding.pack(domain_param_gt),
prior=prior,
)
# Plot the ground truth domain parameters
for idx, dp_gt in enumerate(domain_param_gt):
axs[idx // axs.shape[1], idx % axs.shape[1]].scatter(
x=dp_gt[0], y=dp_gt[1], marker="o", s=30, zorder=3, color="white", edgecolors="black"
)
plt.show()
| 44.182482 | 116 | 0.735338 |
acf970183cb4a9f14b97e0e61ae1c1b5efbbb8a6 | 3,171 | py | Python | SpiMediaGallery/main/migrations/0021_creates_file_model_migrate_data.py | Swiss-Polar-Institute/spi-media-gallery | 2f66f938cbe1a7a25a5971d42abb1b0b5deca31e | [
"MIT"
] | 5 | 2020-02-21T20:38:50.000Z | 2022-02-19T11:00:46.000Z | SpiMediaGallery/main/migrations/0021_creates_file_model_migrate_data.py | Swiss-Polar-Institute/spi-media-gallery | 2f66f938cbe1a7a25a5971d42abb1b0b5deca31e | [
"MIT"
] | 23 | 2019-10-01T17:13:39.000Z | 2022-01-21T20:02:26.000Z | SpiMediaGallery/main/migrations/0021_creates_file_model_migrate_data.py | Swiss-Polar-Institute/spi-media-gallery | 2f66f938cbe1a7a25a5971d42abb1b0b5deca31e | [
"MIT"
] | 2 | 2022-02-03T08:52:51.000Z | 2022-02-03T08:58:00.000Z | # Generated by Django 2.2.2 on 2019-07-31 14:46
from django.db import migrations, models
import django.db.models.deletion
def copy_data_to_file_model(apps, queryset):
File = apps.get_model('main', 'File')
for row in queryset:
file = File()
file.object_storage_key = row.object_storage_key
file.md5 = row.md5
file.size = row.file_size
file.save()
row.file = file
row.save()
def migrate_to_files_model(apps, schema_editor):
Medium = apps.get_model('main', 'Medium')
MediumResized = apps.get_model('main', 'MediumResized')
copy_data_to_file_model(apps, Medium.objects.all())
copy_data_to_file_model(apps, MediumResized.objects.all())
class Migration(migrations.Migration):
dependencies = [
('main', '0020_auto_20190727_0651'),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_storage_key', models.CharField(max_length=1024)),
('md5', models.CharField(null=True, blank=True, max_length=32)),
('size', models.BigIntegerField()),
],
),
migrations.DeleteModel(
name='MediumForPagination',
),
migrations.CreateModel(
name='MediumForView',
fields=[
],
options={
'indexes': [],
'proxy': True,
'constraints': [],
},
bases=('main.medium',),
),
migrations.AlterField(
model_name='medium',
name='copyright',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='main.Copyright'),
),
migrations.AlterField(
model_name='medium',
name='license',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='main.License'),
),
migrations.AddField(
model_name='medium',
name='file',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='main.File'),
),
migrations.AddField(
model_name='mediumresized',
name='file',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='main.File'),
),
migrations.RunPython(migrate_to_files_model),
migrations.RemoveField(
model_name='medium',
name='object_storage_key',
),
migrations.RemoveField(
model_name='medium',
name='file_size',
),
migrations.RemoveField(
model_name='mediumresized',
name='md5',
),
migrations.RemoveField(
model_name='mediumresized',
name='object_storage_key',
),
migrations.RemoveField(
model_name='mediumresized',
name='file_size',
),
]
| 31.71 | 125 | 0.568906 |
acf970b0a5322fdf3899ae5924bd7dc73a229e8e | 9,290 | py | Python | docs/source/conf.py | shivank-gupta/vyked | 98836b3230775c5ad52dfc72291b2958d3a244c9 | [
"MIT"
] | 57 | 2015-02-28T07:42:45.000Z | 2021-11-13T08:41:06.000Z | docs/source/conf.py | niks660097/async_framework | 57591d167bee365d5aa9bb5446b952095506e040 | [
"MIT"
] | 106 | 2015-05-27T05:34:06.000Z | 2021-04-21T04:34:42.000Z | docs/source/conf.py | nerandell/vyked | 7b2554454a50110e15928db7105e074a9e521517 | [
"MIT"
] | 22 | 2015-05-27T05:08:15.000Z | 2018-09-18T12:08:25.000Z | #!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
#
# vyked documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 17 13:49:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../vyked'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'vyked'
copyright = '2015, Vyked Project'
author = 'Ankit Chandawala'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.50'
# The full version, including alpha/beta/rc tags.
release = '1.2.50'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'vykeddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'vyked.tex', 'vyked Documentation',
'Ankit Chandawala, Kashif Razzaqui, Anuvrat Parashar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vyked', 'vyked Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'vyked', 'vyked Documentation',
author, 'vyked', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.924399 | 79 | 0.716362 |
acf970ecd0afaf952553956c4a6c4fad31b8fc25 | 2,057 | py | Python | tests/test_core_ansible.py | iRomi14/accelpy | 997aec98e0e410c0f270db085625def8a0cb8f2c | [
"Apache-2.0"
] | null | null | null | tests/test_core_ansible.py | iRomi14/accelpy | 997aec98e0e410c0f270db085625def8a0cb8f2c | [
"Apache-2.0"
] | null | null | null | tests/test_core_ansible.py | iRomi14/accelpy | 997aec98e0e410c0f270db085625def8a0cb8f2c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""Ansible handler tests"""
def mock_ansible_local(config_dir):
"""
Mock Ansible playbook to performs local do-nothing execution.
Args:
config_dir (py.path.local) Configuration directory.
"""
from accelpy._common import yaml_write
yaml_write([{
"hosts": "127.0.0.1",
"connection": "local"
}], config_dir.join('playbook.yml'))
def test_ansible(tmpdir):
"""
Test Ansible handler
Args:
tmpdir (py.path.local) tmpdir pytest fixture
"""
from accelpy._ansible import Ansible
from accelpy._common import yaml_read, json_write
source_dir = tmpdir.join('source').ensure(dir=True)
config_dir = tmpdir.join('config').ensure(dir=True)
variables = dict(key='value')
# Ensure Accelize "cred.json" exists
json_write(dict(client_secret='', client_id=''),
source_dir.join('cred.json'))
# Test: Create configuration (With not specific provider and application)
ansible = Ansible(config_dir, variables=variables, user_config=source_dir)
ansible.create_configuration()
playbook = yaml_read(config_dir.join('playbook.yml'))[0]
assert 'pre_tasks' in playbook
assert playbook['vars'] == variables
assert playbook['roles'] == ['common.init']
assert config_dir.join('cred.json').isfile()
# Test: Re-create should not raise
ansible.create_configuration()
# Test: lint should not raise on basic configuration
ansible.lint()
# Test: Galaxy install role
ansible.galaxy_install(['dev-sec.os-hardening', 'dev-sec.ssh-hardening'])
# Test: Galaxy install should do nothing if no roles
ansible.galaxy_install([])
# Test: Create configuration (with application that requires dependencies)
ansible = Ansible(config_dir, application_type='container_service')
ansible.create_configuration()
playbook = yaml_read(config_dir.join('playbook.yml'))[0]
assert 'pre_tasks' in playbook
assert not playbook['vars']
assert 'container_service' in playbook['roles']
| 31.646154 | 78 | 0.68984 |
acf97177782637c3ee02249418d807da69834028 | 893 | py | Python | app/framework/requests/__init__.py | polowis/Pandoru | 99cd3d29c65e10041fc630af20ab56a6da0035f2 | [
"MIT"
] | null | null | null | app/framework/requests/__init__.py | polowis/Pandoru | 99cd3d29c65e10041fc630af20ab56a6da0035f2 | [
"MIT"
] | null | null | null | app/framework/requests/__init__.py | polowis/Pandoru | 99cd3d29c65e10041fc630af20ab56a6da0035f2 | [
"MIT"
] | null | null | null | from flask import request
from app.framework.requests.validate_request import Validator
def requests(name):
return request.form.get(name)
class Request:
TYPE = ['integer', 'alpha', 'alphanumeric', 'email']
def register(cls, validation: list):
for index, value in validation:
try:
result = requests(index)
self.validate(result, value)
except:
print(f'Not found {index}')
def validate(self, index: str, value: str):
for i in Request.TYPE:
if value == Request.TYPE:
return validate_with(index, value)
def validate_with(self, index: str, value: str):
if value == 'integer':
return Validator.validate_integer(value)
if value == 'alphanumeric':
return
def is_validate(self):
return True | 27.90625 | 61 | 0.580067 |
acf974114787302f9ac3ddc6fc38750de3706ca5 | 17,385 | py | Python | train.py | visiongo-kr/KWS | e45a1a3d509a880ae13c28d2d18a25eda789d560 | [
"Apache-2.0"
] | 1 | 2021-07-30T08:22:32.000Z | 2021-07-30T08:22:32.000Z | train.py | visiongo-kr/KWS | e45a1a3d509a880ae13c28d2d18a25eda789d560 | [
"Apache-2.0"
] | null | null | null | train.py | visiongo-kr/KWS | e45a1a3d509a880ae13c28d2d18a25eda789d560 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Modifications Copyright 2017 Arm Inc. All Rights Reserved.
# Added model dimensions as command line argument and changed to Adam optimizer
#
#
"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
For more information, please see
https://www.tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
from tensorflow.contrib import slim as slim
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))
learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
FLAGS.model_size_info,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.float32, [None, label_count], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits))
tf.summary.scalar('cross_entropy', cross_entropy_mean)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.name_scope('train'), tf.control_dependencies(update_ops), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_op = tf.train.AdamOptimizer(
learning_rate_input)
train_step = slim.learning.create_train_op(cross_entropy_mean, train_op)
# train_step = tf.train.GradientDescentOptimizer(
# learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
expected_indices = tf.argmax(ground_truth_input, 1)
correct_prediction = tf.equal(predicted_indices, expected_indices)
confusion_matrix = tf.confusion_matrix(
expected_indices, predicted_indices, num_classes=label_count)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
# Parameter counts
params = tf.trainable_variables()
num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))
print('Total number of Parameters: ', num_params)
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
best_accuracy = 0
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training', sess)
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
merged_summaries, evaluation_step, cross_entropy_mean, train_step,
increment_global_step
],
feed_dict={
fingerprint_input: train_fingerprints,
ground_truth_input: train_ground_truth,
learning_rate_input: learning_rate_value,
dropout_prob: 1.0
})
train_writer.add_summary(train_summary, training_step)
tf.logging.info('Step #%d: rate %f, accuracy %.2f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
is_last_step = (training_step == training_steps_max)
if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
validation_fingerprints, validation_ground_truth = (
audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
0.0, 0, 'validation', sess))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy, conf_matrix = sess.run(
[merged_summaries, evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: validation_fingerprints,
ground_truth_input: validation_ground_truth,
dropout_prob: 1.0
})
validation_writer.add_summary(validation_summary, training_step)
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (validation_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Step %d: Validation accuracy = %.2f%% (N=%d)' %
(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint when validation accuracy improves
if total_accuracy > best_accuracy:
best_accuracy = total_accuracy
checkpoint_path = os.path.join(FLAGS.train_dir, 'best',
FLAGS.model_architecture + '_'+ str(int(best_accuracy*10000)) + '.ckpt')
tf.logging.info('Saving best model to "%s-%d"', checkpoint_path, training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
tf.logging.info('So far the best validation accuracy is %.2f%%' % (best_accuracy*100))
set_size = audio_processor.set_size('testing')
tf.logging.info('set_size=%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: test_fingerprints,
ground_truth_input: test_ground_truth,
dropout_prob: 1.0
})
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (test_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Final test accuracy = %.2f%% (N=%d)' % (total_accuracy * 100,
set_size))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='my_wavs/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='help,visiongo_help',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='my_wavs',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='dnn',
help='What model architecture to use')
parser.add_argument(
'--model_size_info',
type=int,
nargs="+",
default=[128,128,128],
help='Model dimensions - different for various models')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 38.633333 | 114 | 0.691573 |
acf97440e5275f2a5d70ff600aa00c2c6c816d79 | 2,905 | py | Python | nets/mobilenet025.py | noahzhy/retinaface-keras | a54a96426c3c2d7b97cf798f09bcf68a30859f9f | [
"MIT"
] | null | null | null | nets/mobilenet025.py | noahzhy/retinaface-keras | a54a96426c3c2d7b97cf798f09bcf68a30859f9f | [
"MIT"
] | null | null | null | nets/mobilenet025.py | noahzhy/retinaface-keras | a54a96426c3c2d7b97cf798f09bcf68a30859f9f | [
"MIT"
] | null | null | null | import warnings
import numpy as np
import keras
from keras.models import Model
from keras.layers import DepthwiseConv2D,Input,Activation,Dropout,Reshape,BatchNormalization,GlobalAveragePooling2D,GlobalMaxPooling2D,Conv2D
from keras import backend as K
import tensorflow as tf
def _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1)):
x = Conv2D(filters, kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = BatchNormalization(name='conv1_bn')(x)
return Activation(tf.nn.relu6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs, pointwise_conv_filters,
depth_multiplier=1, strides=(1, 1), block_id=1):
x = DepthwiseConv2D((3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(inputs)
x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)
x = Activation(tf.nn.relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
return Activation(tf.nn.relu6, name='conv_pw_%d_relu' % block_id)(x)
# def relu6(x):
# return K.relu(x, max_value=6)
def MobileNet(img_input, depth_multiplier=1):
# 640,640,3 -> 320,320,8
x = _conv_block(img_input, 8, strides=(2, 2))
# 320,320,8 -> 320,320,16
x = _depthwise_conv_block(x, 16, depth_multiplier, block_id=1)
# 320,320,16 -> 160,160,32
x = _depthwise_conv_block(x, 32, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 32, depth_multiplier, block_id=3)
# 160,160,32 -> 80,80,64
x = _depthwise_conv_block(x, 64, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 64, depth_multiplier, block_id=5)
feat1 = x
# 80,80,64 -> 40,40,128
x = _depthwise_conv_block(x, 128, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=11)
feat2 = x
# 40,40,128 -> 20,20,256
x = _depthwise_conv_block(x, 256, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 256, depth_multiplier, block_id=13)
feat3 = x
return feat1, feat2, feat3
| 38.733333 | 142 | 0.628916 |
acf9744fdb6c5b27a69529ed36529b786a1de1c9 | 19,842 | py | Python | Bio/AlignIO/__init__.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 3 | 2016-11-21T09:55:56.000Z | 2019-04-09T17:39:43.000Z | Bio/AlignIO/__init__.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 32 | 2016-11-21T07:38:21.000Z | 2017-08-16T13:00:03.000Z | Bio/AlignIO/__init__.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 8 | 2016-11-24T18:57:35.000Z | 2022-01-16T08:15:25.000Z | # Copyright 2008-2017 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Multiple sequence alignment input/output as alignment objects.
The Bio.AlignIO interface is deliberately very similar to Bio.SeqIO, and in
fact the two are connected internally. Both modules use the same set of file
format names (lower case strings). From the user's perspective, you can read
in a PHYLIP file containing one or more alignments using Bio.AlignIO, or you
can read in the sequences within these alignmenta using Bio.SeqIO.
Bio.AlignIO is also documented at http://biopython.org/wiki/AlignIO and by
a whole chapter in our tutorial:
* `HTML Tutorial`_
* `PDF Tutorial`_
.. _`HTML Tutorial`: http://biopython.org/DIST/docs/tutorial/Tutorial.html
.. _`PDF Tutorial`: http://biopython.org/DIST/docs/tutorial/Tutorial.pdf
Input
-----
For the typical special case when your file or handle contains one and only
one alignment, use the function Bio.AlignIO.read(). This takes an input file
handle (or in recent versions of Biopython a filename as a string), format
string and optional number of sequences per alignment. It will return a single
MultipleSeqAlignment object (or raise an exception if there isn't just one
alignment):
>>> from Bio import AlignIO
>>> align = AlignIO.read("Phylip/interlaced.phy", "phylip")
>>> print(align)
SingleLetterAlphabet() alignment with 3 rows and 384 columns
-----MKVILLFVLAVFTVFVSS---------------RGIPPE...I-- CYS1_DICDI
MAHARVLLLALAVLATAAVAVASSSSFADSNPIRPVTDRAASTL...VAA ALEU_HORVU
------MWATLPLLCAGAWLLGV--------PVCGAAELSVNSL...PLV CATH_HUMAN
For the general case, when the handle could contain any number of alignments,
use the function Bio.AlignIO.parse(...) which takes the same arguments, but
returns an iterator giving MultipleSeqAlignment objects (typically used in a
for loop). If you want random access to the alignments by number, turn this
into a list:
>>> from Bio import AlignIO
>>> alignments = list(AlignIO.parse("Emboss/needle.txt", "emboss"))
>>> print(alignments[2])
SingleLetterAlphabet() alignment with 2 rows and 120 columns
-KILIVDDQYGIRILLNEVFNKEGYQTFQAANGLQALDIVTKER...--- ref_rec
LHIVVVDDDPGTCVYIESVFAELGHTCKSFVRPEAAEEYILTHP...HKE gi|94967506|receiver
Most alignment file formats can be concatenated so as to hold as many
different multiple sequence alignments as possible. One common example
is the output of the tool seqboot in the PHLYIP suite. Sometimes there
can be a file header and footer, as seen in the EMBOSS alignment output.
Output
------
Use the function Bio.AlignIO.write(...), which takes a complete set of
Alignment objects (either as a list, or an iterator), an output file handle
(or filename in recent versions of Biopython) and of course the file format::
from Bio import AlignIO
alignments = ...
count = SeqIO.write(alignments, "example.faa", "fasta")
If using a handle make sure to close it to flush the data to the disk::
from Bio import AlignIO
alignments = ...
with open("example.faa", "w") as handle:
count = SeqIO.write(alignments, handle, "fasta")
In general, you are expected to call this function once (with all your
alignments) and then close the file handle. However, for file formats
like PHYLIP where multiple alignments are stored sequentially (with no file
header and footer), then multiple calls to the write function should work as
expected when using handles.
If you are using a filename, the repeated calls to the write functions will
overwrite the existing file each time.
Conversion
----------
The Bio.AlignIO.convert(...) function allows an easy interface for simple
alignment file format conversions. Additionally, it may use file format
specific optimisations so this should be the fastest way too.
In general however, you can combine the Bio.AlignIO.parse(...) function with
the Bio.AlignIO.write(...) function for sequence file conversion. Using
generator expressions provides a memory efficient way to perform filtering or
other extra operations as part of the process.
File Formats
------------
When specifying the file format, use lowercase strings. The same format
names are also used in Bio.SeqIO and include the following:
- clustal - Output from Clustal W or X, see also the module Bio.Clustalw
which can be used to run the command line tool from Biopython.
- emboss - EMBOSS tools' "pairs" and "simple" alignment formats.
- fasta - The generic sequence file format where each record starts with
an identifer line starting with a ">" character, followed by
lines of sequence.
- fasta-m10 - For the pairswise alignments output by Bill Pearson's FASTA
tools when used with the -m 10 command line option for machine
readable output.
- ig - The IntelliGenetics file format, apparently the same as the
MASE alignment format.
- nexus - Output from NEXUS, see also the module Bio.Nexus which can also
read any phylogenetic trees in these files.
- phylip - Interlaced PHYLIP, as used by the PHLIP tools.
- phylip-sequential - Sequential PHYLIP.
- phylip-relaxed - PHYLIP like format allowing longer names.
- stockholm - A richly annotated alignment file format used by PFAM.
- mauve - Output from progressiveMauve/Mauve
Note that while Bio.AlignIO can read all the above file formats, it cannot
write to all of them.
You can also use any file format supported by Bio.SeqIO, such as "fasta" or
"ig" (which are listed above), PROVIDED the sequences in your file are all the
same length.
"""
from __future__ import print_function
from Bio._py3k import basestring
# TODO
# - define policy on reading aligned sequences with gaps in
# (e.g. - and . characters) including how the alphabet interacts
#
# - Can we build the to_alignment(...) functionality
# into the generic Alignment class instead?
#
# - How best to handle unique/non unique record.id when writing.
# For most file formats reading such files is fine; The stockholm
# parser would fail.
#
# - MSF multiple alignment format, aka GCG, aka PileUp format (*.msf)
# http://www.bioperl.org/wiki/MSF_multiple_alignment_format
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import Alphabet, AlphabetEncoder, _get_base_alphabet
from Bio.File import as_handle
from . import StockholmIO
from . import ClustalIO
from . import NexusIO
from . import PhylipIO
from . import EmbossIO
from . import FastaIO
from . import MafIO
from . import MauveIO
# Convention for format names is "mainname-subtype" in lower case.
# Please use the same names as BioPerl and EMBOSS where possible.
_FormatToIterator = { # "fasta" is done via Bio.SeqIO
"clustal": ClustalIO.ClustalIterator,
"emboss": EmbossIO.EmbossIterator,
"fasta-m10": FastaIO.FastaM10Iterator,
"maf": MafIO.MafIterator,
"mauve": MauveIO.MauveIterator,
"nexus": NexusIO.NexusIterator,
"phylip": PhylipIO.PhylipIterator,
"phylip-sequential": PhylipIO.SequentialPhylipIterator,
"phylip-relaxed": PhylipIO.RelaxedPhylipIterator,
"stockholm": StockholmIO.StockholmIterator,
}
_FormatToWriter = { # "fasta" is done via Bio.SeqIO
# "emboss" : EmbossIO.EmbossWriter, (unfinished)
"clustal": ClustalIO.ClustalWriter,
"maf": MafIO.MafWriter,
"mauve": MauveIO.MauveWriter,
"nexus": NexusIO.NexusWriter,
"phylip": PhylipIO.PhylipWriter,
"phylip-sequential": PhylipIO.SequentialPhylipWriter,
"phylip-relaxed": PhylipIO.RelaxedPhylipWriter,
"stockholm": StockholmIO.StockholmWriter,
}
def write(alignments, handle, format):
"""Write complete set of alignments to a file.
Arguments:
- alignments - A list (or iterator) of MultipleSeqAlignment objects,
or a single alignment object.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of alignments written (as an integer).
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if isinstance(alignments, MultipleSeqAlignment):
# This raised an exception in older versions of Biopython
alignments = [alignments]
with as_handle(handle, 'w') as fp:
# Map the file format to a writer class
if format in _FormatToWriter:
writer_class = _FormatToWriter[format]
count = writer_class(fp).write_file(alignments)
elif format in SeqIO._FormatToWriter:
# Exploit the existing SeqIO parser to do the dirty work!
# TODO - Can we make one call to SeqIO.write() and count the alignments?
count = 0
for alignment in alignments:
if not isinstance(alignment, MultipleSeqAlignment):
raise TypeError("Expect a list or iterator of MultipleSeqAlignment "
"objects, got: %r" % alignment)
SeqIO.write(alignment, fp, format)
count += 1
elif format in _FormatToIterator or format in SeqIO._FormatToIterator:
raise ValueError("Reading format '%s' is supported, but not writing"
% format)
else:
raise ValueError("Unknown format '%s'" % format)
assert isinstance(count, int), "Internal error - the underlying %s " \
"writer should have returned the alignment count, not %s" \
% (format, repr(count))
return count
# This is a generator function!
def _SeqIO_to_alignment_iterator(handle, format, alphabet=None, seq_count=None):
"""Use Bio.SeqIO to create an MultipleSeqAlignment iterator (PRIVATE).
Arguments:
- handle - handle to the file.
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. fasta, phylip, clustal)
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If count is omitted (default) then all the sequences in the file are
combined into a single MultipleSeqAlignment.
"""
from Bio import SeqIO
assert format in SeqIO._FormatToIterator
if seq_count:
# Use the count to split the records into batches.
seq_record_iterator = SeqIO.parse(handle, format, alphabet)
records = []
for record in seq_record_iterator:
records.append(record)
if len(records) == seq_count:
yield MultipleSeqAlignment(records, alphabet)
records = []
if records:
raise ValueError("Check seq_count argument, not enough sequences?")
else:
# Must assume that there is a single alignment using all
# the SeqRecord objects:
records = list(SeqIO.parse(handle, format, alphabet))
if records:
yield MultipleSeqAlignment(records, alphabet)
def _force_alphabet(alignment_iterator, alphabet):
"""Iterate over alignments, over-riding the alphabet (PRIVATE)."""
# Assume the alphabet argument has been pre-validated
given_base_class = _get_base_alphabet(alphabet).__class__
for align in alignment_iterator:
if not isinstance(_get_base_alphabet(align._alphabet),
given_base_class):
raise ValueError("Specified alphabet %s clashes with "
"that determined from the file, %s"
% (repr(alphabet), repr(align._alphabet)))
for record in align:
if not isinstance(_get_base_alphabet(record.seq.alphabet),
given_base_class):
raise ValueError("Specified alphabet %s clashes with "
"that determined from the file, %s"
% (repr(alphabet), repr(record.seq.alphabet)))
record.seq.alphabet = alphabet
align._alphabet = alphabet
yield align
def parse(handle, format, seq_count=None, alphabet=None):
"""Iterate over an alignment file as MultipleSeqAlignment objects.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. fasta, phylip, clustal)
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If you have the file name in a string 'filename', use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> for alignment in AlignIO.parse(filename, format):
... print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 124
Alignment of length 119
Alignment of length 120
Alignment of length 118
Alignment of length 125
If you have a string 'data' containing the file contents, use::
from Bio import AlignIO
from StringIO import StringIO
my_iterator = AlignIO.parse(StringIO(data), format)
Use the Bio.AlignIO.read() function when you expect a single record only.
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if alphabet is not None and not (isinstance(alphabet, Alphabet) or
isinstance(alphabet, AlphabetEncoder)):
raise ValueError("Invalid alphabet, %s" % repr(alphabet))
if seq_count is not None and not isinstance(seq_count, int):
raise TypeError("Need integer for seq_count (sequences per alignment)")
with as_handle(handle, 'rU') as fp:
# Map the file format to a sequence iterator:
if format in _FormatToIterator:
iterator_generator = _FormatToIterator[format]
if alphabet is None:
i = iterator_generator(fp, seq_count)
else:
try:
# Initially assume the optional alphabet argument is supported
i = iterator_generator(fp, seq_count, alphabet=alphabet)
except TypeError:
# It isn't supported.
i = _force_alphabet(iterator_generator(fp, seq_count),
alphabet)
elif format in SeqIO._FormatToIterator:
# Exploit the existing SeqIO parser to the dirty work!
i = _SeqIO_to_alignment_iterator(fp, format,
alphabet=alphabet,
seq_count=seq_count)
else:
raise ValueError("Unknown format '%s'" % format)
# This imposes some overhead... wait until we drop Python 2.4 to fix it
for a in i:
yield a
def read(handle, format, seq_count=None, alphabet=None):
"""Turn an alignment file into a single MultipleSeqAlignment object.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. fasta, phylip, clustal)
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If the handle contains no alignments, or more than one alignment,
an exception is raised. For example, using a PFAM/Stockholm file
containing one alignment:
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> format = "clustal"
>>> alignment = AlignIO.read(filename, format)
>>> print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 411
If however you want the first alignment from a file containing
multiple alignments this function would raise an exception.
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.read(filename, format)
Traceback (most recent call last):
...
ValueError: More than one record found in handle
Instead use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = next(AlignIO.parse(filename, format))
>>> print("First alignment has length %i" % alignment.get_alignment_length())
First alignment has length 124
You must use the Bio.AlignIO.parse() function if you want to read multiple
records from the handle.
"""
iterator = parse(handle, format, seq_count, alphabet)
try:
first = next(iterator)
except StopIteration:
first = None
if first is None:
raise ValueError("No records found in handle")
try:
second = next(iterator)
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
if seq_count:
assert len(first) == seq_count
return first
def convert(in_file, in_format, out_file, out_format, alphabet=None):
"""Convert between two alignment files, returns number of alignments.
Arguments:
- in_file - an input handle or filename
- in_format - input file format, lower case string
- output - an output handle or filename
- out_file - output file format, lower case string
- alphabet - optional alphabet to assume
**NOTE** - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even the
conversion is aborted (e.g. an invalid out_format name is given).
"""
# TODO - Add optimised versions of important conversions
# For now just off load the work to SeqIO parse/write
with as_handle(in_file, 'rU') as in_handle:
# Don't open the output file until we've checked the input is OK:
alignments = parse(in_handle, in_format, None, alphabet)
# This will check the arguments and issue error messages,
# after we have opened the file which is a shame.
with as_handle(out_file, 'w') as out_handle:
count = write(alignments, out_handle, out_format)
return count
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| 41.684874 | 88 | 0.676494 |
acf9751540ab26f60c6c3e69497c8418e67f6dd2 | 69 | py | Python | tests/__init__.py | dem4ply/chibi_requests | c182a6b2842ac46c21fa1d8959e36f9da0ed463e | [
"WTFPL"
] | null | null | null | tests/__init__.py | dem4ply/chibi_requests | c182a6b2842ac46c21fa1d8959e36f9da0ed463e | [
"WTFPL"
] | null | null | null | tests/__init__.py | dem4ply/chibi_requests | c182a6b2842ac46c21fa1d8959e36f9da0ed463e | [
"WTFPL"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit test package for chibi_requests."""
| 17.25 | 43 | 0.608696 |
acf9757f3baf4aee3a88da6188bf2a32f7b98aa4 | 1,466 | py | Python | zarr_cache/util.py | CCI-Tools/zarr-cache | 23c873966e06440b65406fd633b0c8fe63ca70a1 | [
"MIT"
] | null | null | null | zarr_cache/util.py | CCI-Tools/zarr-cache | 23c873966e06440b65406fd633b0c8fe63ca70a1 | [
"MIT"
] | null | null | null | zarr_cache/util.py | CCI-Tools/zarr-cache | 23c873966e06440b65406fd633b0c8fe63ca70a1 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2020 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
def close_store(store: collections.MutableMapping):
"""
Close *store*, if possible, that is, if *store* has a callable attribute ``close``, call it.
:param store: The store
"""
if hasattr(store, 'close') and callable(getattr(store, 'close')):
store.close()
| 45.8125 | 96 | 0.753752 |
acf975c0ca21fd3d0ef6222d072797d2c12e30da | 876 | py | Python | jupygit/__init__.py | fferegrino/jupy | bf52b080e84d5b114ecf04f466788b05e0f18ed1 | [
"MIT"
] | null | null | null | jupygit/__init__.py | fferegrino/jupy | bf52b080e84d5b114ecf04f466788b05e0f18ed1 | [
"MIT"
] | 7 | 2018-08-17T06:27:54.000Z | 2021-02-02T21:57:09.000Z | jupygit/__init__.py | fferegrino/jupy | bf52b080e84d5b114ecf04f466788b05e0f18ed1 | [
"MIT"
] | 1 | 2018-11-21T05:48:26.000Z | 2018-11-21T05:48:26.000Z | from notebook.utils import url_path_join
from .GitHandlers import GitCleanHandler, GitRestoreHandler, GitCheckRecoveryHandler
def load_jupyter_server_extension(nb_server_app):
web_app = nb_server_app.web_app
host_pattern = '.*$'
web_app.add_handlers(host_pattern, [
(url_path_join(web_app.settings['base_url'], r'/git/clean'),
GitCleanHandler),
(url_path_join(web_app.settings['base_url'], r'/git/restore'),
GitRestoreHandler),
(url_path_join(web_app.settings['base_url'], r'/git/check'),
GitCheckRecoveryHandler)
])
def _jupyter_nbextension_paths():
"""Required to load JS button"""
return [dict(
section="notebook",
src="static",
dest="jupygit",
require="jupygit/index")]
def _jupyter_server_extension_paths():
return [{
"module": "jupygit"
}]
| 27.375 | 84 | 0.670091 |
acf976da3b7134e6698c3d9d167cafacdddac793 | 1,706 | py | Python | pycashier/read_filter.py | DaylinMorgan/pycashier | 74391c4ec79241a299c0855f6cb095a2ea858dc9 | [
"MIT"
] | 1 | 2020-08-28T21:04:18.000Z | 2020-08-28T21:04:18.000Z | pycashier/read_filter.py | daylinmorgan/pycashier | 74391c4ec79241a299c0855f6cb095a2ea858dc9 | [
"MIT"
] | 1 | 2020-08-06T16:02:18.000Z | 2020-12-09T20:16:57.000Z | pycashier/read_filter.py | daylinmorgan/pycashier | 74391c4ec79241a299c0855f6cb095a2ea858dc9 | [
"MIT"
] | 1 | 2020-08-07T19:56:15.000Z | 2020-08-07T19:56:15.000Z | import csv
from pathlib import Path
from .console import console
def get_filter_count(file_in, filter_percent):
total_reads = 0
with open(file_in, newline="") as csvfile:
spamreader = csv.reader(csvfile, delimiter="\t")
for row in spamreader:
total_reads += float(row[1])
filter_count = int(round(total_reads * filter_percent / 100, 0))
return filter_count
def filter_by_percent(file_in, filter_percent, outdir):
filter_by_count(file_in, get_filter_count(file_in, filter_percent), outdir)
def filter_by_count(file_in, filter_count, outdir):
name = file_in.stem
ext = file_in.suffix
csv_out = Path(outdir) / f"{name}.min{filter_count}{ext}"
with open(file_in, "r") as csv_in:
with open(csv_out, "w") as csv_out:
for line in csv_in:
linesplit = line.split("\t")
if int(linesplit[1]) >= filter_count:
csv_out.write(f"{linesplit[0]}\t{linesplit[1]}")
def read_filter(
sample, filter_count, filter_percent, quality, ratio, distance, **kwargs
):
outdir = kwargs["outdir"]
file_in = (
Path(kwargs["pipelinedir"])
/ f"{sample}.barcodes.q{quality}.r{ratio}d{distance}.tsv"
)
if filter_count is not None:
console.log(
f"[green]{sample}[/green]: removing "
f"sequences with less than {filter_count} reads"
)
filter_by_count(file_in, filter_count, outdir)
else:
console.log(
f"[green]{sample}[/green]: removing"
f" sequences with less than {filter_percent}% of total reads"
)
filter_by_percent(file_in, filter_percent, outdir)
| 27.079365 | 79 | 0.630129 |
acf97741c33bbc45faefc875860e0e1555753e8c | 1,622 | py | Python | release/scripts/addons/presets/operator/mesh.landscape_add/tech_effect.py | noorbeast/BlenderSource | 65ebecc5108388965678b04b43463b85f6c69c1d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2019-09-16T10:29:19.000Z | 2022-02-11T14:43:18.000Z | engine/2.80/scripts/addons/presets/operator/mesh.landscape_add/tech_effect.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | engine/2.80/scripts/addons/presets/operator/mesh.landscape_add/tech_effect.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | import bpy
op = bpy.context.active_operator
op.ant_terrain_name = 'Landscape'
op.land_material = ''
op.water_material = ''
op.texture_block = ''
op.at_cursor = True
op.smooth_mesh = True
op.tri_face = False
op.sphere_mesh = False
op.subdivision_x = 256
op.subdivision_y = 256
op.mesh_size = 2.0
op.mesh_size_x = 2.0
op.mesh_size_y = 2.0
op.random_seed = 0
op.noise_offset_x = 3.1415927410125732
op.noise_offset_y = 3.1415927410125732
op.noise_offset_z = 0.0
op.noise_size_x = 1.0
op.noise_size_y = 1.0
op.noise_size_z = 1.0
op.noise_size = 0.5
op.noise_type = 'marble_noise'
op.basis_type = 'CELLNOISE'
op.vl_basis_type = 'BLENDER'
op.distortion = 1.0
op.hard_noise = '1'
op.noise_depth = 4
op.amplitude = 0.5
op.frequency = 2.0
op.dimension = 1.0
op.lacunarity = 2.0
op.offset = 1.0
op.gain = 1.0
op.marble_bias = '2'
op.marble_sharp = '3'
op.marble_shape = '7'
op.height = 1.0
op.height_invert = False
op.height_offset = 0.0
op.fx_mixfactor = 0.75
op.fx_mix_mode = '7'
op.fx_type = '12'
op.fx_bias = '1'
op.fx_turb = 0.0
op.fx_depth = 1
op.fx_amplitude = 0.20000000298023224
op.fx_frequency = 1.25
op.fx_size = 1.0
op.fx_loc_x = 0.0
op.fx_loc_y = 0.0
op.fx_height = -0.5
op.fx_invert = False
op.fx_offset = 0.15000000596046448
op.edge_falloff = '0'
op.falloff_x = 20.0
op.falloff_y = 20.0
op.edge_level = 0.0
op.maximum = 1.0
op.minimum = -1.0
op.vert_group = ''
op.strata = 9.0
op.strata_type = '0'
op.water_plane = False
op.water_level = 0.009999999776482582
op.remove_double = False
op.show_main_settings = True
op.show_noise_settings = True
op.show_displace_settings = True
op.refresh = True
op.auto_refresh = True
| 21.918919 | 38 | 0.734895 |
acf9775881b2f3b443c911fca8e7b84bff37bbf9 | 441 | py | Python | sauna/commands/__init__.py | bubu11e/sauna | d3740d705bc1f44a7a04c0ae1f2391f834c9d0d6 | [
"BSD-2-Clause"
] | 21 | 2016-02-22T08:52:07.000Z | 2022-03-31T19:40:18.000Z | sauna/commands/__init__.py | bubu11e/sauna | d3740d705bc1f44a7a04c0ae1f2391f834c9d0d6 | [
"BSD-2-Clause"
] | 38 | 2016-02-25T16:02:14.000Z | 2021-08-16T18:42:12.000Z | sauna/commands/__init__.py | bubu11e/sauna | d3740d705bc1f44a7a04c0ae1f2391f834c9d0d6 | [
"BSD-2-Clause"
] | 14 | 2016-02-22T19:20:38.000Z | 2021-07-02T04:41:53.000Z | class CommandRegister:
all_commands = {}
def command(self, **options):
def decorator(func):
command_name = options.pop('name', func.__name__)
self.all_commands[command_name] = func
return func
return decorator
@classmethod
def get_command(cls, command_name):
try:
return cls.all_commands[command_name]
except KeyError:
return None
| 25.941176 | 61 | 0.598639 |
acf9776d9b8b433fabf1e312339e532d53e91e8f | 11,750 | py | Python | python/tvm/_ffi/_ctypes/packed_func.py | janifer112x/incubator-tvm | 98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6 | [
"Apache-2.0"
] | 22 | 2022-03-18T07:29:31.000Z | 2022-03-23T14:54:32.000Z | python/tvm/_ffi/_ctypes/packed_func.py | janifer112x/incubator-tvm | 98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6 | [
"Apache-2.0"
] | 3 | 2020-04-20T15:37:55.000Z | 2020-05-13T05:34:28.000Z | python/tvm/_ffi/_ctypes/packed_func.py | janifer112x/incubator-tvm | 98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6 | [
"Apache-2.0"
] | 2 | 2022-03-18T08:26:34.000Z | 2022-03-20T06:02:48.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-branches, global-statement, unused-import
"""Function configuration API."""
import ctypes
import traceback
from numbers import Number, Integral
from ..base import _LIB, get_last_ffi_error, py2cerror, check_call
from ..base import c_str, string_types
from ..runtime_ctypes import DataType, TVMByteArray, TVMContext, ObjectRValueRef
from . import ndarray as _nd
from .ndarray import NDArrayBase, _make_array
from .types import TVMValue, ArgTypeCode
from .types import TVMPackedCFunc, TVMCFuncFinalizer
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _ctx_to_int64
from .object import ObjectBase, PyNativeObject, _set_class_object
from . import object as _object
PackedFuncHandle = ctypes.c_void_p
ModuleHandle = ctypes.c_void_p
ObjectHandle = ctypes.c_void_p
TVMRetValueHandle = ctypes.c_void_p
def _ctypes_free_resource(rhandle):
"""callback to free resources when it it not needed."""
pyobj = ctypes.cast(rhandle, ctypes.py_object)
ctypes.pythonapi.Py_DecRef(pyobj)
# Global callback that is always alive
TVM_FREE_PYOBJ = TVMCFuncFinalizer(_ctypes_free_resource)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(TVM_FREE_PYOBJ))
def _make_packed_func(handle, is_global):
"""Make a packed function class"""
obj = _CLASS_PACKED_FUNC.__new__(_CLASS_PACKED_FUNC)
obj.is_global = is_global
obj.handle = handle
return obj
def convert_to_tvm_func(pyfunc):
"""Convert a python function to TVM function
Parameters
----------
pyfunc : python function
The python function to be converted.
Returns
-------
tvmfunc: tvm.nd.Function
The converted tvm function.
"""
local_pyfunc = pyfunc
def cfun(args, type_codes, num_args, ret, _):
""" ctypes function """
num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args
pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args))
# pylint: disable=broad-except
try:
rv = local_pyfunc(*pyargs)
except Exception:
msg = traceback.format_exc()
msg = py2cerror(msg)
_LIB.TVMAPISetLastError(c_str(msg))
return -1
if rv is not None:
if isinstance(rv, tuple):
raise ValueError("PackedFunction can only support one return value")
temp_args = []
values, tcodes, _ = _make_tvm_args((rv,), temp_args)
if not isinstance(ret, TVMRetValueHandle):
ret = TVMRetValueHandle(ret)
if _LIB.TVMCFuncSetReturn(ret, values, tcodes, ctypes.c_int(1)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = rv
return 0
handle = PackedFuncHandle()
f = TVMPackedCFunc(cfun)
# NOTE: We will need to use python-api to increase ref count of the f
# TVM_FREE_PYOBJ will be called after it is no longer needed.
pyobj = ctypes.py_object(f)
ctypes.pythonapi.Py_IncRef(pyobj)
if _LIB.TVMFuncCreateFromCFunc(f, pyobj, TVM_FREE_PYOBJ, ctypes.byref(handle)) != 0:
raise get_last_ffi_error()
return _make_packed_func(handle, False)
def _make_tvm_args(args, temp_args):
"""Pack arguments into c args tvm call accept"""
num_args = len(args)
values = (TVMValue * num_args)()
type_codes = (ctypes.c_int * num_args)()
for i, arg in enumerate(args):
if isinstance(arg, ObjectBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif arg is None:
values[i].v_handle = None
type_codes[i] = ArgTypeCode.NULL
elif isinstance(arg, NDArrayBase):
values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p)
type_codes[i] = (
ArgTypeCode.NDARRAY_HANDLE if not arg.is_view else ArgTypeCode.DLTENSOR_HANDLE
)
elif isinstance(arg, PyNativeObject):
values[i].v_handle = arg.__tvm_object__.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif isinstance(arg, _nd._TVM_COMPATS):
values[i].v_handle = ctypes.c_void_p(arg._tvm_handle)
type_codes[i] = arg.__class__._tvm_tcode
elif isinstance(arg, Integral):
values[i].v_int64 = arg
type_codes[i] = ArgTypeCode.INT
elif isinstance(arg, Number):
values[i].v_float64 = arg
type_codes[i] = ArgTypeCode.FLOAT
elif isinstance(arg, DataType):
values[i].v_str = c_str(str(arg))
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, TVMContext):
values[i].v_int64 = _ctx_to_int64(arg)
type_codes[i] = ArgTypeCode.TVM_CONTEXT
elif isinstance(arg, (bytearray, bytes)):
# from_buffer only taeks in bytearray.
if isinstance(arg, bytes):
byte_arr = bytearray(arg)
temp_args.append(byte_arr)
arg = byte_arr
arr = TVMByteArray()
arr.data = ctypes.cast(
(ctypes.c_byte * len(arg)).from_buffer(arg), ctypes.POINTER(ctypes.c_byte)
)
arr.size = len(arg)
values[i].v_handle = ctypes.c_void_p(ctypes.addressof(arr))
temp_args.append(arr)
type_codes[i] = ArgTypeCode.BYTES
elif isinstance(arg, string_types):
values[i].v_str = c_str(arg)
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, (list, tuple, dict, _CLASS_OBJECT_GENERIC)):
arg = _FUNC_CONVERT_TO_OBJECT(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
temp_args.append(arg)
elif isinstance(arg, _CLASS_MODULE):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.MODULE_HANDLE
elif isinstance(arg, PackedFuncBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
elif isinstance(arg, ctypes.c_void_p):
values[i].v_handle = arg
type_codes[i] = ArgTypeCode.HANDLE
elif isinstance(arg, ObjectRValueRef):
values[i].v_handle = ctypes.cast(ctypes.byref(arg.obj.handle), ctypes.c_void_p)
type_codes[i] = ArgTypeCode.OBJECT_RVALUE_REF_ARG
elif callable(arg):
arg = convert_to_tvm_func(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
temp_args.append(arg)
else:
raise TypeError("Don't know how to handle type %s" % type(arg))
return values, type_codes, num_args
class PackedFuncBase(object):
"""Function base."""
__slots__ = ["handle", "is_global"]
# pylint: disable=no-member
def __init__(self, handle, is_global):
"""Initialize the function with handle
Parameters
----------
handle : PackedFuncHandle
the handle to the underlying function.
is_global : bool
Whether this is a global function in python
"""
self.handle = handle
self.is_global = is_global
def __del__(self):
if not self.is_global and _LIB is not None:
if _LIB.TVMFuncFree(self.handle) != 0:
raise get_last_ffi_error()
def __call__(self, *args):
"""Call the function with positional arguments
args : list
The positional arguments to the function call.
"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
self.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
return RETURN_SWITCH[ret_tcode.value](ret_val)
def __init_handle_by_constructor__(fconstructor, args):
"""Initialize handle by constructor"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
fconstructor.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
assert ret_tcode.value == ArgTypeCode.OBJECT_HANDLE
handle = ret_val.v_handle
return handle
def _return_module(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, ModuleHandle):
handle = ModuleHandle(handle)
return _CLASS_MODULE(handle)
def _handle_return_func(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, PackedFuncHandle):
handle = PackedFuncHandle(handle)
return _CLASS_PACKED_FUNC(handle, False)
def _get_global_func(name, allow_missing=False):
handle = PackedFuncHandle()
check_call(_LIB.TVMFuncGetGlobal(c_str(name), ctypes.byref(handle)))
if handle.value:
return _make_packed_func(handle, False)
if allow_missing:
return None
raise ValueError("Cannot find global function %s" % name)
# setup return handle for function type
_object.__init_by_constructor__ = __init_handle_by_constructor__
RETURN_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _handle_return_func
RETURN_SWITCH[ArgTypeCode.MODULE_HANDLE] = _return_module
RETURN_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = lambda x: _make_array(x.v_handle, False, True)
C_TO_PY_ARG_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _wrap_arg_func(
_handle_return_func, ArgTypeCode.PACKED_FUNC_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.MODULE_HANDLE] = _wrap_arg_func(
_return_module, ArgTypeCode.MODULE_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.DLTENSOR_HANDLE] = lambda x: _make_array(x.v_handle, True, False)
C_TO_PY_ARG_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = lambda x: _make_array(x.v_handle, False, True)
_CLASS_MODULE = None
_CLASS_PACKED_FUNC = None
_CLASS_OBJECT_GENERIC = None
_FUNC_CONVERT_TO_OBJECT = None
def _set_class_module(module_class):
"""Initialize the module."""
global _CLASS_MODULE
_CLASS_MODULE = module_class
def _set_class_packed_func(packed_func_class):
global _CLASS_PACKED_FUNC
_CLASS_PACKED_FUNC = packed_func_class
def _set_class_object_generic(object_generic_class, func_convert_to_object):
global _CLASS_OBJECT_GENERIC
global _FUNC_CONVERT_TO_OBJECT
_CLASS_OBJECT_GENERIC = object_generic_class
_FUNC_CONVERT_TO_OBJECT = func_convert_to_object
| 35.285285 | 100 | 0.663574 |
acf9778a163f98a2ac1721ba1601d7fbd82a5a08 | 15,480 | py | Python | dynamic_profile/utils.py | ebsuku/wazimap-dynamic-profile | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | [
"MIT"
] | 1 | 2020-02-04T05:03:54.000Z | 2020-02-04T05:03:54.000Z | dynamic_profile/utils.py | ebsuku/wazimap-dynamic-profile | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | [
"MIT"
] | null | null | null | dynamic_profile/utils.py | ebsuku/wazimap-dynamic-profile | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | [
"MIT"
] | 1 | 2020-01-03T20:30:43.000Z | 2020-01-03T20:30:43.000Z | import logging
from collections import OrderedDict, defaultdict
from wazimap.models.data import DataNotFound
from wazimap.data.utils import (
collapse_categories,
calculate_median,
calculate_median_stat,
group_remainder,
get_stat_data,
percent,
current_context,
dataset_context,
merge_dicts,
)
from census.profile import find_dicts_with_key
from census.utils import get_ratio
from wazimap.geo import geo_data
from itertools import repeat
from dynamic_profile.models import Profile, IndicatorProfile
MERGE_KEYS = set(["values", "numerators", "error"])
log = logging.getLogger(__name__)
def enhance_api_data(api_data):
dict_list = find_dicts_with_key(api_data, "values")
for d in dict_list:
raw = {}
enhanced = {}
geo_value = d["values"]["this"]
num_comparatives = 2
# create our containers for transformation
for obj in ["values", "error", "numerators", "numerator_errors"]:
if obj not in d:
raw[obj] = dict(zip(geo_data.comparative_levels, repeat(0)))
else:
raw[obj] = d[obj]
enhanced[obj] = OrderedDict()
enhanced["index"] = OrderedDict()
enhanced["error_ratio"] = OrderedDict()
comparative_sumlevs = []
for sumlevel in geo_data.comparative_levels:
# add the index value for comparatives
if sumlevel in raw["values"]:
enhanced["values"][sumlevel] = raw["values"][sumlevel]
enhanced["index"][sumlevel] = get_ratio(
geo_value, raw["values"][sumlevel]
)
# add to our list of comparatives for the template to use
if sumlevel != "this":
comparative_sumlevs.append(sumlevel)
# add the moe ratios
if (sumlevel in raw["values"]) and (sumlevel in raw["error"]):
enhanced["error"][sumlevel] = raw["error"][sumlevel]
enhanced["error_ratio"][sumlevel] = get_ratio(
raw["error"][sumlevel], raw["values"][sumlevel], 3
)
# add the numerators and numerator_errors
if sumlevel in raw["numerators"]:
enhanced["numerators"][sumlevel] = raw["numerators"][sumlevel]
if (sumlevel in raw["numerators"]) and (
sumlevel in raw["numerator_errors"]
):
enhanced["numerator_errors"][sumlevel] = raw["numerator_errors"][
sumlevel
]
if len(enhanced["values"]) >= (num_comparatives + 1):
break
# replace data with enhanced version
for obj in [
"values",
"index",
"error",
"error_ratio",
"numerators",
"numerator_errors",
]:
d[obj] = enhanced[obj]
return api_data
class BuildIndicator(object):
def __init__(self, geo, session, profile, *args, **kwargs):
"""
Args:
geo: Geography object
session: sqlalchemy session object
profile: Profile object.
"""
self.geo = geo
self.session = session
self.profile = profile
def context_comparative_geo(self, comp_geo):
"""
Calculte the dataset_context stat_data for a comparative geo.
"""
with dataset_context(year=str(self.profile.dataset_context)):
try:
distribution, total = get_stat_data(
[self.profile.field_name],
comp_geo,
self.session,
table_name=self.profile.table_name.name,
exclude_zero=self.profile.exclude_zero,
percent=self.profile.percent,
recode=self.profile.recode,
key_order=self.profile.key_order,
exclude=self.profile.exclude,
order_by=self.profile.order_by,
)
return distribution
except DataNotFound:
return {}
def comparative_geo(self, geo, header=False):
"""
calculate the stat data for comparative geos
"""
distribution, total = get_stat_data(
[self.profile.field_name],
geo,
self.session,
table_name=self.profile.table_name.name,
exclude_zero=self.profile.exclude_zero,
percent=self.profile.percent,
recode=self.profile.recode,
key_order=self.profile.key_order,
exclude=self.profile.exclude,
order_by=self.profile.order_by,
)
if header:
return total
return distribution
def header_compare_geos(self):
values = {}
comparative_geos = geo_data.get_comparative_geos(self.geo)
for comp_geo in comparative_geos:
values.update(
{
"{}".format(comp_geo.geo_level): self.comparative_geo(
comp_geo, header=True
)
}
)
return values
def compare_geos(self):
"""
Get the values for the comparative geo and add it to the main geo
"Note": for land cover we dont have provincial or country, need to skip these for
comparative geo.
"""
comparative_geos = geo_data.get_comparative_geos(self.geo)
for comp_geo in comparative_geos:
try:
if self.profile.dataset_context:
if comp_geo.geo_level not in ("country", "province"):
merge_dicts(
self.distribution,
self.context_comparative_geo(comp_geo),
comp_geo.geo_level,
)
else:
merge_dicts(
self.distribution,
self.comparative_geo(comp_geo),
comp_geo.geo_level,
)
except KeyError as error:
log.warn("Unable to merge dicts for profile %s", self.profile.title)
log.error("Unable to merge dicts: %s", error)
pass
def header(self):
"""
This will contain any information relating to noteworthy stats about the indicator.
By default this will return the highest value within the indicator
results = 'text|number|percent'
"""
header = {
"title": self.profile.title,
"info": self.profile.info,
"result": {
"config": True,
"type": "name",
"name": "",
"summary": self.profile.summary,
"stat_data": {},
},
"extra_results": [],
}
try:
if self.distribution:
if self.profile.header_result == "distribution_total":
stat_values = {"this": self.total}
stat_values.update(self.header_compare_geos())
header["result"]["stat_data"]["values"] = stat_values
header["result"]["stat_data"]["summary"] = self.profile.summary
header["result"]["stat_data"]["name"] = self.profile.summary
header["result"]["type"] = "number"
elif self.profile.header_result == "highest_percent":
stat_data = self.distribution[self.profile.header_field]
header["result"]["type"] = "percentage"
header["result"]["stat_data"] = stat_data
header["result"]["stat_data"]["type"] = "percentage"
header["result"]["stat_data"]["summary"] = self.profile.summary
elif self.profile.header_result == "highest_category":
stat_data = self.distribution[list(self.distribution.keys())[0]]
header["result"]["stat_data"] = stat_data
header["result"]["stat_data"]["type"] = "name"
header["result"]["stat_data"]["summary"] = self.profile.summary
except (AttributeError, DataNotFound, KeyError):
pass
header = enhance_api_data(header)
return header
def chart(self):
"""
Details about the chart
"""
return {
"chart_title": self.profile.chart_title,
"chart_type": self.profile.chart_type,
"chart_design": self.profile.chart_design,
}
def dataset_context_stat_data(self):
"""
Calulate statistical data with a particular context
"""
with dataset_context(year=str(self.profile.dataset_context)):
try:
self.distribution, self.total = get_stat_data(
[self.profile.field_name],
self.geo,
self.session,
table_name=self.profile.table_name.name,
exclude_zero=self.profile.exclude_zero,
percent=self.profile.percent,
recode=self.profile.recode,
key_order=self.profile.key_order,
exclude=self.profile.exclude,
order_by=self.profile.order_by,
)
self.compare_geos()
if self.profile.group_remainder:
group_remainder(self.distribution, self.profile.group_remainder)
self.distribution = enhance_api_data(self.distribution)
return {"stat_values": self.distribution}
except DataNotFound:
return {}
def stat_data(self):
"""
Calculate the statistical data
"""
try:
self.distribution, self.total = get_stat_data(
[self.profile.field_name],
self.geo,
self.session,
table_name=self.profile.table_name.name,
exclude_zero=self.profile.exclude_zero,
percent=self.profile.percent,
recode=self.profile.recode,
key_order=self.profile.key_order,
exclude=self.profile.exclude,
order_by=self.profile.order_by,
)
self.compare_geos()
if self.profile.group_remainder:
group_remainder(self.distribution, self.profile.group_remainder)
self.distribution = enhance_api_data(self.distribution)
return {"stat_values": self.distribution}
except KeyError as error:
log.warn(
"Unable to calculate statistics for profile: %s", self.profile.title
)
log.warn("error: %s", error)
return {}
except DataNotFound as error:
log.warn(
"Unable to calculate statistics for profile: %s", self.profile.title
)
log.warn("Unable to find data for this indicator: %s", error)
return {}
def calculation(self):
"""
Get the statistical data for this indicator
"""
if self.profile.dataset_context:
return self.dataset_context_stat_data()
else:
return self.stat_data()
def meta(self):
"""
Any other information about the indicator
"""
if self.profile.parent_profile:
parent_profile = self.profile.parent_profile.title
else:
parent_profile = None
return {
"display_order": self.profile.display_order,
"children": [],
"parent_profile": parent_profile,
}
def create(self):
"""
Create the dictionary containing all the details about the indicator
"""
dicts = [self.chart(), self.meta(), self.calculation(), self.header()]
indicator = {}
for d in dicts:
indicator.update(d)
return indicator
class BuildProfile(object):
"""
Configure how the profile with its indicators will be built.
"""
def __init__(self, name, geo, session):
"""
Args:
name: Profile name
geo: Geography Object
session: sqlalchemy session object.
"""
self.name = name
self.geo = geo
self.session = session
self.indicators = []
def create(self, cls_indicator):
"""
Create all the indicators for this profile
Args:
cls_indicator: class in which the indicator will be calculated
"""
for model_indicator in IndicatorProfile.objects.filter(
profile__name=self.name
).filter(geo_level__contains=[self.geo.geo_level]):
new_indicator = cls_indicator(self.geo, self.session, model_indicator)
self.indicators.append(new_indicator.create())
self.sort()
self.merge()
self.sort_children()
return self.indicators
def sort(self):
"""
Sort Indicators accoring to the display_order
"""
self.indicators = sorted(self.indicators, key=lambda i: i["display_order"])
def merge(self):
"""
We need to go through all the indicators and check whether they have a parent indicator.
If they do have a parent, we need to append that indicator to the parent indicator
We will then remove all the top level indicators that have a parent.
"""
for indicator in self.indicators:
title = indicator.get("parent_profile", None)
if not None:
for other in self.indicators:
if other["title"] == title:
other["children"].append(indicator)
other["extra_results"].append(indicator["result"])
break
self.indicators = [
indictor for indictor in self.indicators if not indictor["parent_profile"]
]
def sort_children(self):
"""
sort children based on their display orders
"""
for indicator in self.indicators:
children = indicator["children"]
children_sort = sorted(children, key=lambda i: i["display_order"])
indicator["children"] = children_sort
class Section(object):
"""
Combine all the profiles and indicators
"""
def __init__(self, geo, session):
"""
Args:
geo: A geography object
session: sqlalchemy session
"""
self.geo = geo
self.session = session
self.profiles = OrderedDict(
(p.name, [])
for p in Profile.objects.filter(
geo_level__contains=[self.geo.geo_level]
).order_by("display_order")
)
def build(self, cls_profile, cls_indicator):
"""
Args:
cls_profile: class that will generate the profile
cls_indicator: class that the generate the indicator
"""
for profile_name in self.profiles.keys():
profile = cls_profile(profile_name, self.geo, self.session)
self.profiles[profile_name] = profile.create(cls_indicator)
return self.profiles
| 35.261959 | 96 | 0.547997 |
acf977ed62b23fa0c704c1d21a12621ba77b2699 | 7,219 | py | Python | algoliasearch_django/registration.py | allenjseb/algoliasearch-django | 466b3920d20e6269b09e23a96b9c2f02bcea088d | [
"MIT"
] | 1 | 2017-10-31T14:54:10.000Z | 2017-10-31T14:54:10.000Z | algoliasearch_django/registration.py | allenjseb/algoliasearch-django | 466b3920d20e6269b09e23a96b9c2f02bcea088d | [
"MIT"
] | null | null | null | algoliasearch_django/registration.py | allenjseb/algoliasearch-django | 466b3920d20e6269b09e23a96b9c2f02bcea088d | [
"MIT"
] | 2 | 2019-10-15T15:10:28.000Z | 2020-02-27T11:48:41.000Z | from __future__ import unicode_literals
import logging
from django.db.models.signals import post_save
from django.db.models.signals import pre_delete
from algoliasearch import algoliasearch
from .models import AlgoliaIndex
from .settings import SETTINGS
from .version import VERSION
from algoliasearch.version import VERSION as CLIENT_VERSION
from platform import python_version
from django import get_version as django_version
logger = logging.getLogger(__name__)
class AlgoliaEngineError(Exception):
"""Something went wrong with Algolia Engine."""
class RegistrationError(AlgoliaEngineError):
"""Something went wrong when registering a model."""
class AlgoliaEngine(object):
def __init__(self, settings=SETTINGS):
"""Initializes the Algolia engine."""
try:
app_id = settings['APPLICATION_ID']
api_key = settings['API_KEY']
except KeyError:
raise AlgoliaEngineError(
'APPLICATION_ID and API_KEY must be defined.')
self.__auto_indexing = settings.get('AUTO_INDEXING', True)
self.__settings = settings
self.__registered_models = {}
self.client = algoliasearch.Client(app_id, api_key)
self.client.set_extra_header('User-Agent',
'Algolia for Python (%s); Python (%s); Algolia for Django (%s); Django (%s)'
% (CLIENT_VERSION, python_version(), VERSION, django_version))
def is_registered(self, model):
"""Checks whether the given models is registered with Algolia engine"""
return model in self.__registered_models
def register(self, model, index_cls=AlgoliaIndex, auto_indexing=None):
"""
Registers the given model with Algolia engine.
If the given model is already registered with Algolia engine, a
RegistrationError will be raised.
"""
# Check for existing registration.
if self.is_registered(model):
raise RegistrationError(
'{} is already registered with Algolia engine'.format(model))
# Perform the registration.
if not issubclass(index_cls, AlgoliaIndex):
raise RegistrationError(
'{} should be a subclass of AlgoliaIndex'.format(index_cls))
index_obj = index_cls(model, self.client, self.__settings)
self.__registered_models[model] = index_obj
if (isinstance(auto_indexing, bool) and
auto_indexing) or self.__auto_indexing:
# Connect to the signalling framework.
post_save.connect(self.__post_save_receiver, model)
pre_delete.connect(self.__pre_delete_receiver, model)
logger.info('REGISTER %s', model)
def unregister(self, model):
"""
Unregisters the given model with Algolia engine.
If the given model is not registered with Algolia engine, a
RegistrationError will be raised.
"""
if not self.is_registered(model):
raise RegistrationError(
'{} is not registered with Algolia engine'.format(model))
# Perform the unregistration.
del self.__registered_models[model]
# Disconnect from the signalling framework.
post_save.disconnect(self.__post_save_receiver, model)
pre_delete.disconnect(self.__pre_delete_receiver, model)
logger.info('UNREGISTER %s', model)
def get_registered_models(self):
"""
Returns a list of models that have been registered with Algolia
engine.
"""
return list(self.__registered_models.keys())
def get_adapter(self, model):
"""Returns the adapter associated with the given model."""
if not self.is_registered(model):
raise RegistrationError(
'{} is not registered with Algolia engine'.format(model))
return self.__registered_models[model]
def get_adapter_from_instance(self, instance):
"""Returns the adapter associated with the given instance."""
model = instance.__class__
return self.get_adapter(model)
# Proxies methods.
def save_record(self, instance, **kwargs):
"""Saves the record.
If `update_fields` is set, this method will use partial_update_object()
and will update only the given fields (never `_geoloc` and `_tags`).
For more information about partial_update_object:
https://github.com/algolia/algoliasearch-client-python#update-an-existing-object-in-the-index
"""
adapter = self.get_adapter_from_instance(instance)
adapter.save_record(instance, **kwargs)
def delete_record(self, instance):
"""Deletes the record."""
adapter = self.get_adapter_from_instance(instance)
adapter.delete_record(instance)
def update_records(self, model, qs, batch_size=1000, **kwargs):
"""
Updates multiple records.
This method is optimized for speed. It takes a QuerySet and the same
arguments as QuerySet.update(). Optionally, you can specify the size
of the batch send to Algolia with batch_size (default to 1000).
>>> from algoliasearch_django import update_records
>>> qs = MyModel.objects.filter(myField=False)
>>> update_records(MyModel, qs, myField=True)
>>> qs.update(myField=True)
"""
adapter = self.get_adapter(model)
adapter.update_records(qs, batch_size=batch_size, **kwargs)
def raw_search(self, model, query='', params=None):
"""Performs a search query and returns the parsed JSON."""
if params is None:
params = {}
adapter = self.get_adapter(model)
return adapter.raw_search(query, params)
def clear_index(self, model):
"""Clears the index."""
adapter = self.get_adapter(model)
adapter.clear_index()
def reindex_all(self, model, batch_size=1000):
"""
Reindex all the records.
By default, this method use Model.objects.all() but you can implement
a method `get_queryset` in your subclass. This can be used to optimize
the performance (for example with select_related or prefetch_related).
"""
adapter = self.get_adapter(model)
return adapter.reindex_all(batch_size)
def reset(self, settings=None):
"""Reinitializes the Algolia engine and its client.
:param settings: settings to use instead of the default django.conf.settings.algolia
"""
self.__init__(settings=settings if settings is not None else SETTINGS)
# Signalling hooks.
def __post_save_receiver(self, instance, **kwargs):
"""Signal handler for when a registered model has been saved."""
logger.debug('RECEIVE post_save FOR %s', instance.__class__)
self.save_record(instance, **kwargs)
def __pre_delete_receiver(self, instance, **kwargs):
"""Signal handler for when a registered model has been deleted."""
logger.debug('RECEIVE pre_delete FOR %s', instance.__class__)
self.delete_record(instance)
# Algolia engine
algolia_engine = AlgoliaEngine()
| 37.21134 | 113 | 0.664081 |
acf979ae886a25064d8240dbce8d04a02cd0df70 | 66,813 | py | Python | pymatgen/io/vasp/tests/test_sets.py | chunweizhu/pymatgen | acfe5899ee50add1e2a0dd6385ee4fba78122e0f | [
"MIT"
] | 1 | 2021-02-12T11:21:56.000Z | 2021-02-12T11:21:56.000Z | pymatgen/io/vasp/tests/test_sets.py | chunweizhu/pymatgen | acfe5899ee50add1e2a0dd6385ee4fba78122e0f | [
"MIT"
] | null | null | null | pymatgen/io/vasp/tests/test_sets.py | chunweizhu/pymatgen | acfe5899ee50add1e2a0dd6385ee4fba78122e0f | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import hashlib
import tempfile
import unittest
import pytest # type: ignore
from _pytest.monkeypatch import MonkeyPatch # type: ignore
from monty.json import MontyDecoder
from pymatgen.core import SETTINGS
from pymatgen.core import Lattice, Species, Structure
from pymatgen.core.surface import SlabGenerator
from pymatgen.io.vasp.inputs import Kpoints, Poscar
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.io.vasp.sets import *
from pymatgen.util.testing import PymatgenTest
MODULE_DIR = Path(__file__).resolve().parent
dec = MontyDecoder()
class SetChangeCheckTest(PymatgenTest):
def test_sets_changed(self):
# WARNING!
# These tests will fail when you change an input set.
# They are included as a sanity check: if you want to change
# an input set, please make sure to notify the users for that set.
# For sets starting with "MVL" this is @shyuep, for sets starting
# with "MP" this is @shyuep and @mkhorton.
os.chdir(MODULE_DIR / "..")
input_sets = glob.glob("*.yaml")
hashes = {}
for input_set in input_sets:
with open(input_set, "r") as f:
hashes[input_set] = hashlib.sha1(f.read().encode("utf-8")).hexdigest()
known_hashes = {
"MVLGWSet.yaml": "f4df9516cf7dd923b37281172c662a70fa32bebc",
"MVLRelax52Set.yaml": "eb538ffb45c0cd13f13df48afc1e71c44d2e34b2",
"MPHSERelaxSet.yaml": "2bb969e64b57ff049077c8ec10e64f94c9c97f42",
"VASPIncarBase.yaml": "19762515f8deefb970f2968fca48a0d67f7964d4",
"MPSCANRelaxSet.yaml": "2604952d387f6531bfc37641ac3b1ffcce9f1bc1",
"MPRelaxSet.yaml": "4ea97d776fbdc7e168036f73e9176012a56c0a45",
"MITRelaxSet.yaml": "1a0970f8cad9417ec810f7ab349dc854eaa67010",
"vdW_parameters.yaml": "66541f58b221c8966109156f4f651b2ca8aa76da",
}
self.assertDictEqual(
hashes,
known_hashes,
"These tests will fail when you change an input set. "
"They are included as a sanity check: if you want to "
"change an input set, please make sure to notify the "
"users for that set. "
'For sets starting with "MVL" this is @shyuep, '
'for sets starting with "MP" this is @shyuep and @mkhorton.',
)
class MITMPRelaxSetTest(PymatgenTest):
@classmethod
def setUpClass(cls):
cls.monkeypatch = MonkeyPatch()
filepath = cls.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
cls.structure = poscar.structure
cls.coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
cls.lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
cls.mitset = MITRelaxSet(cls.structure)
cls.mitset_unsorted = MITRelaxSet(cls.structure, sort_structure=False)
cls.mpset = MPRelaxSet(cls.structure)
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_metal_check(self):
structure = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Cu"], [[0, 0, 0]])
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vis = MITRelaxSet(structure)
incar = vis.incar
# Verify some things
self.assertIn("ISMEAR", str(w[-1].message))
def test_poscar(self):
structure = Structure(self.lattice, ["Fe", "Mn"], self.coords)
mitparamset = MITRelaxSet(structure, sort_structure=False)
s_unsorted = mitparamset.poscar.structure
mitparamset = MITRelaxSet(structure, sort_structure=True)
s_sorted = mitparamset.poscar.structure
self.assertEqual(s_unsorted[0].specie.symbol, "Fe")
self.assertEqual(s_sorted[0].specie.symbol, "Mn")
def test_potcar_symbols(self):
coords = []
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
structure = Structure(lattice, ["P", "Fe", "O"], coords)
mitparamset = MITRelaxSet(structure)
syms = mitparamset.potcar_symbols
self.assertEqual(syms, ["Fe", "P", "O"])
paramset = MPRelaxSet(structure, sort_structure=False)
syms = paramset.potcar_symbols
self.assertEqual(syms, ["P", "Fe_pv", "O"])
def test_potcar_validation(self):
structure = Structure(self.lattice, ["P", "Fe"], self.coords)
# Use pytest's monkeypatch to temporarily point pymatgen to a directory
# containing the wrong POTCARs (LDA potcars in a PBE directory)
with self.monkeypatch.context() as m:
m.setitem(SETTINGS, "PMG_VASP_PSP_DIR", str(self.TEST_FILES_DIR / "wrong_potcars"))
with pytest.warns(BadInputSetWarning, match="not known by pymatgen"):
MITRelaxSet(structure).potcar
def test_lda_potcar(self):
structure = Structure(self.lattice, ["P", "Fe"], self.coords)
p = MITRelaxSet(structure, user_potcar_functional="LDA").potcar
self.assertEqual(p.functional, "LDA")
def test_nelect(self):
coords = [[0] * 3, [0.5] * 3, [0.75] * 3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ["Si", "Si", "Fe"], coords)
self.assertAlmostEqual(MITRelaxSet(s).nelect, 16)
# Test estimate of number of bands (function of nelect) with nmag>0
self.assertAlmostEqual(MITRelaxSet(s).estimate_nbands(), 13)
self.assertAlmostEqual(MPRelaxSet(s).estimate_nbands(), 17)
# Test estimate of number of bands (function of nelect) with nmag==0
s = Structure(lattice, ["Si", "Si", "Si"], coords)
self.assertAlmostEqual(MITRelaxSet(s).estimate_nbands(), 11)
self.assertAlmostEqual(MPRelaxSet(s).estimate_nbands(), 11)
# Check that it works even when oxidation states are present. Was a bug
# previously.
s = Structure(lattice, ["Si4+", "Si4+", "Fe2+"], coords)
self.assertAlmostEqual(MITRelaxSet(s).nelect, 16)
self.assertAlmostEqual(MPRelaxSet(s).nelect, 22)
# Check that it works for disordered structure. Was a bug previously
s = Structure(lattice, ["Si4+", "Fe2+", "Si4+"], coords)
self.assertAlmostEqual(MITRelaxSet(s).nelect, 16)
self.assertAlmostEqual(MPRelaxSet(s).nelect, 22)
def test_get_incar(self):
incar = self.mpset.incar
self.assertEqual(incar["LDAUU"], [5.3, 0, 0])
self.assertAlmostEqual(incar["EDIFF"], 0.0012)
incar = self.mitset.incar
self.assertEqual(incar["LDAUU"], [4.0, 0, 0])
self.assertAlmostEqual(incar["EDIFF"], 1e-5)
si = 14
coords = []
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
# Silicon structure for testing.
latt = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(latt, [si, si], coords)
incar = MPRelaxSet(struct).incar
self.assertNotIn("LDAU", incar)
coords = []
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = MPRelaxSet(struct).incar
self.assertNotIn("LDAU", incar)
# check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [5.3, 0])
self.assertEqual(incar["MAGMOM"], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [4.0, 0])
# Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords, site_properties={"magmom": (5.2, -4.5)})
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["MAGMOM"], [-4.5, 5.2])
incar = MITRelaxSet(struct, sort_structure=False).incar
self.assertEqual(incar["MAGMOM"], [5.2, -4.5])
struct = Structure(lattice, [Species("Fe", 2, {"spin": 4.1}), "Mn"], coords)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["MAGMOM"], [5, 4.1])
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["MAGMOM"], [4, 3])
userset = MPRelaxSet(struct, user_incar_settings={"MAGMOM": {"Fe": 10, "S": -5, "Mn3+": 100}})
self.assertEqual(userset.incar["MAGMOM"], [100, 0.6])
noencutset = MPRelaxSet(struct, user_incar_settings={"ENCUT": None})
self.assertNotIn("ENCUT", noencutset.incar)
# sulfide vs sulfate test
coords = []
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [1.9, 0])
# Make sure Matproject sulfides are ok.
self.assertNotIn("LDAUU", MPRelaxSet(struct).incar)
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [4.0, 0, 0])
# Make sure Matproject sulfates are ok.
self.assertEqual(MPRelaxSet(struct).incar["LDAUU"], [5.3, 0, 0])
# test for default LDAUU value
userset_ldauu_fallback = MPRelaxSet(struct, user_incar_settings={"LDAUU": {"Fe": 5.0, "S": 0}})
self.assertEqual(userset_ldauu_fallback.incar["LDAUU"], [5.0, 0, 0])
# Expected to be oxide (O is the most electronegative atom)
s = Structure(lattice, ["Fe", "O", "S"], coords)
incar = MITRelaxSet(s).incar
self.assertEqual(incar["LDAUU"], [4.0, 0, 0])
# Expected to be chloride (Cl is the most electronegative atom)
s = Structure(lattice, ["Fe", "Cl", "S"], coords)
incar = MITRelaxSet(s, user_incar_settings={"LDAU": True}).incar
self.assertFalse("LDAUU" in incar) # LDAU = False
# User set a compound to be sulfide by specifing values of "LDAUL" etc.
s = Structure(lattice, ["Fe", "Cl", "S"], coords)
incar = MITRelaxSet(
s,
user_incar_settings={
"LDAU": True,
"LDAUL": {"Fe": 3},
"LDAUU": {"Fe": 1.8},
},
).incar
self.assertEqual(incar["LDAUL"], [3.0, 0, 0])
self.assertEqual(incar["LDAUU"], [1.8, 0, 0])
# test that van-der-Waals parameters are parsed correctly
incar = MITRelaxSet(struct, vdw="optB86b").incar
self.assertEqual(incar["GGA"], "Mk")
self.assertEqual(incar["LUSE_VDW"], True)
self.assertEqual(incar["PARAM1"], 0.1234)
# Test that NELECT is updated when a charge is present
si = 14
coords = []
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
# Silicon structure for testing.
latt = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(latt, [si, si], coords, charge=1)
mpr = MPRelaxSet(struct, use_structure_charge=True)
self.assertEqual(mpr.incar["NELECT"], 7, "NELECT not properly set for nonzero charge")
# test that NELECT does not get set when use_structure_charge = False
mpr = MPRelaxSet(struct, use_structure_charge=False)
self.assertFalse(
"NELECT" in mpr.incar.keys(),
"NELECT should not be set when " "use_structure_charge is False",
)
struct = Structure(latt, ["Co", "O"], coords)
mpr = MPRelaxSet(struct)
self.assertEqual(mpr.incar["MAGMOM"], [0.6, 0.6])
struct = Structure(latt, ["Co4+", "O"], coords)
mpr = MPRelaxSet(struct)
self.assertEqual(mpr.incar["MAGMOM"], [1, 0.6])
# test passing user_incar_settings and user_kpoint_settings of None
sets = [
MPRelaxSet(struct, user_incar_settings=None, user_kpoints_settings=None),
MPStaticSet(struct, user_incar_settings=None, user_kpoints_settings=None),
MPNonSCFSet(struct, user_incar_settings=None, user_kpoints_settings=None),
]
for mp_set in sets:
self.assertNotEqual(mp_set.kpoints, None)
self.assertNotEqual(mp_set.incar, None)
def test_get_kpoints(self):
kpoints = MPRelaxSet(self.structure).kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 5]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = MPRelaxSet(self.structure, user_kpoints_settings={"reciprocal_density": 1000}).kpoints
self.assertEqual(kpoints.kpts, [[6, 10, 13]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints_obj = Kpoints(kpts=[[3, 3, 3]])
kpoints_return = MPRelaxSet(self.structure, user_kpoints_settings=kpoints_obj).kpoints
self.assertEqual(kpoints_return.kpts, [[3, 3, 3]])
kpoints = self.mitset.kpoints
self.assertEqual(kpoints.kpts, [[25]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
recip_paramset = MPRelaxSet(self.structure, force_gamma=True)
recip_paramset.kpoints_settings = {"reciprocal_density": 40}
kpoints = recip_paramset.kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 5]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_get_vasp_input(self):
d = self.mitset.get_vasp_input()
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
s = self.structure.copy()
s.make_supercell(4)
paramset = MPRelaxSet(s)
d = paramset.get_vasp_input()
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_MPMetalRelaxSet(self):
mpmetalset = MPMetalRelaxSet(self.get_structure("Sn"))
incar = mpmetalset.incar
self.assertEqual(incar["ISMEAR"], 1)
self.assertEqual(incar["SIGMA"], 0.2)
kpoints = mpmetalset.kpoints
self.assertArrayAlmostEqual(kpoints.kpts[0], [5, 5, 5])
def test_as_from_dict(self):
mitset = MITRelaxSet(self.structure)
mpset = MPRelaxSet(self.structure)
mpuserset = MPRelaxSet(
self.structure,
user_incar_settings={"MAGMOM": {"Fe": 10, "S": -5, "Mn3+": 100}},
)
d = mitset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v._config_dict["INCAR"]["LDAUU"]["O"]["Fe"], 4)
d = mpset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v._config_dict["INCAR"]["LDAUU"]["O"]["Fe"], 5.3)
d = mpuserset.as_dict()
v = dec.process_decoded(d)
# self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.user_incar_settings["MAGMOM"], {"Fe": 10, "S": -5, "Mn3+": 100})
def test_hubbard_off_and_ediff_override(self):
p = MPRelaxSet(self.structure, user_incar_settings={"LDAU": False, "EDIFF": 1e-10})
self.assertNotIn("LDAUU", p.incar)
self.assertEqual(p.incar["EDIFF"], 1e-10)
# after testing, we have determined LMAXMIX should still be 4 for d-block
# even if U is turned off (thanks Andrew Rosen for reporting)
self.assertEqual(p.incar["LMAXMIX"], 4)
def test_write_input(self):
self.mitset.write_input(".", make_dir_if_not_present=True)
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR"]:
self.assertTrue(os.path.exists(f))
self.assertFalse(os.path.exists("Fe4P4O16.cif"))
self.mitset.write_input(".", make_dir_if_not_present=True, include_cif=True)
self.assertTrue(os.path.exists("Fe4P4O16.cif"))
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR", "Fe4P4O16.cif"]:
os.remove(f)
self.mitset.write_input(".", make_dir_if_not_present=True, potcar_spec=True)
for f in ["INCAR", "KPOINTS", "POSCAR"]:
self.assertTrue(os.path.exists(f))
self.assertFalse(os.path.exists("POTCAR"))
self.assertTrue(os.path.exists("POTCAR.spec"))
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR.spec"]:
os.remove(f)
def test_user_potcar_settings(self):
vis = MPRelaxSet(self.structure, user_potcar_settings={"Fe": "Fe"})
potcar = vis.potcar
self.assertEqual(potcar.symbols, ["Fe", "P", "O"])
def test_valid_magmom_struct(self):
# First test the helper function
struct = self.structure.copy()
get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="v")
props = [isite.properties for isite in struct.sites]
self.assertEquals(props, [{"magmom": [1.0, 1.0, 1.0]}] * len(props))
struct = self.structure.copy()
get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="s")
props = [isite.properties for isite in struct.sites]
self.assertEquals(props, [{"magmom": 1.0}] * len(props))
struct.insert(0, "Li", [0, 0, 0])
get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="a")
props = [isite.properties for isite in struct.sites]
self.assertEquals(props, [{"magmom": 1.0}] * len(props))
struct = self.structure.copy()
get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="v")
struct.insert(0, "Li", [0, 0, 0], properties={"magmom": 10.0})
with self.assertRaises(TypeError) as context:
get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="a")
self.assertTrue("Magmom type conflict" in str(context.exception))
# Test the behavior of MPRelaxSet to atomacically fill in the missing magmom
struct = self.structure.copy()
get_valid_magmom_struct(structure=struct, inplace=True, spin_mode="s")
struct.insert(0, "Li", [0, 0, 0])
vis = MPRelaxSet(struct, user_potcar_settings={"Fe": "Fe"}, validate_magmom=False)
with self.assertRaises(TypeError) as context:
print(vis.get_vasp_input())
self.assertTrue("argument must be a string" in str(context.exception))
vis = MPRelaxSet(struct, user_potcar_settings={"Fe": "Fe"}, validate_magmom=True)
self.assertEqual(vis.get_vasp_input()["INCAR"]["MAGMOM"], [1.0] * len(struct))
class MPStaticSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def test_init(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst)
# Check as from dict.
vis = MPStaticSet.from_dict(vis.as_dict())
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst)
non_prev_vis = MPStaticSet(vis.structure, user_incar_settings={"LORBIT": 12, "LWAVE": True})
self.assertEqual(non_prev_vis.incar["NSW"], 0)
# Check that the ENCUT and Kpoints style has NOT been inherited.
self.assertEqual(non_prev_vis.incar["ENCUT"], 520)
# Check that user incar settings are applied.
self.assertEqual(non_prev_vis.incar["LORBIT"], 12)
self.assertTrue(non_prev_vis.incar["LWAVE"])
self.assertEqual(non_prev_vis.kpoints.style, Kpoints.supported_modes.Gamma)
v2 = MPStaticSet.from_dict(non_prev_vis.as_dict())
self.assertEqual(v2.incar["ENCUT"], 520)
# Check that user incar settings are applied.
self.assertEqual(v2.incar["LORBIT"], 12)
leps_vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run, lepsilon=True)
self.assertTrue(leps_vis.incar["LEPSILON"])
self.assertEqual(leps_vis.incar["IBRION"], 8)
self.assertEqual(leps_vis.incar["EDIFF"], 1e-5)
self.assertNotIn("NPAR", leps_vis.incar)
self.assertNotIn("NSW", leps_vis.incar)
self.assertEqual(non_prev_vis.kpoints.kpts, [[11, 10, 10]])
non_prev_vis = MPStaticSet(vis.structure, reciprocal_density=200)
self.assertEqual(non_prev_vis.kpoints.kpts, [[14, 12, 12]])
# Check LCALCPOL flag
lcalcpol_vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run, lcalcpol=True)
self.assertTrue(lcalcpol_vis.incar["LCALCPOL"])
def test_user_incar_kspacing(self):
# Make sure user KSPACING settings properly overrides KPOINTS.
si = self.get_structure("Si")
vis = MPRelaxSet(si, user_incar_settings={"KSPACING": 0.22})
self.assertEqual(vis.incar["KSPACING"], 0.22)
self.assertEqual(vis.kpoints, None)
def test_kspacing_override(self):
# If KSPACING is set and user_kpoints_settings are given,
# make sure the user_kpoints_settings override KSPACING
si = self.get_structure("Si")
vis = MPRelaxSet(
si,
user_incar_settings={"KSPACING": 0.22},
user_kpoints_settings={"reciprocal_density": 1000},
)
self.assertEqual(vis.incar.get("KSPACING"), None)
self.assertIsInstance(vis.kpoints, Kpoints)
def test_override_from_prev_calc(self):
# test override_from_prev
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPStaticSet(_dummy_structure)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst)
# Check LCALCPOL flag
lcalcpol_vis = MPStaticSet(_dummy_structure, lcalcpol=True)
lcalcpol_vis = lcalcpol_vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(lcalcpol_vis.incar["LCALCPOL"])
def test_standardize_structure(self):
sga = SpacegroupAnalyzer(self.get_structure("Si"))
original_structure = sga.get_conventional_standard_structure()
sm = StructureMatcher(primitive_cell=False, scale=False)
vis = MPStaticSet(original_structure)
self.assertTrue(sm.fit(vis.structure, original_structure))
vis = MPStaticSet(original_structure, standardize=True)
self.assertFalse(sm.fit(vis.structure, original_structure))
def test_write_input_zipped(self):
vis = MPStaticSet(self.get_structure("Si"))
vis.write_input(output_dir=".", potcar_spec=True, zip_output=True)
self.assertTrue(os.path.exists("MPStaticSet.zip"))
with ZipFile("MPStaticSet.zip", "r") as zip:
contents = zip.namelist()
print(contents)
self.assertTrue(set(contents).issuperset({"INCAR", "POSCAR", "POTCAR.spec", "KPOINTS"}))
spec = zip.open("POTCAR.spec", "r").read().decode()
self.assertEqual(spec, "Si")
os.remove("MPStaticSet.zip")
def test_conflicting_arguments(self):
with pytest.raises(ValueError, match="deprecated"):
si = self.get_structure("Si")
vis = MPStaticSet(si, potcar_functional="PBE", user_potcar_functional="PBE")
def test_grid_size_from_struct(self):
# TODO grab a bunch_of_calculations store as a list of tuples
# (structure, ngx, ..., ngxf, ...) where all the grid size values are generated by vasp
# check that the code produces the same grid sizes
fname = self.TEST_FILES_DIR / "grid_data_files" / "vasp_inputs_for_grid_check.json"
parsed_vasp_data = loadfn(fname)
for tt in parsed_vasp_data:
ng = [tt["input"]["parameters"][ik] for ik in ["NGX", "NGY", "NGZ"]]
ngf = [tt["input"]["parameters"][ik] for ik in ["NGXF", "NGYF", "NGZF"]]
struct = tt["input"]["structure"]
static_set = MPStaticSet(struct)
matched = static_set.calculate_ng() == (ng, ngf)
self.assertTrue(matched)
def tearDown(self):
shutil.rmtree(self.tmp)
warnings.simplefilter("default")
class MPNonSCFSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def test_init(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
# check boltztrap mode
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Boltztrap")
self.assertEqual(vis.incar["ISMEAR"], 0)
# check uniform mode
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform")
self.assertEqual(vis.incar["ISMEAR"], -5)
self.assertEqual(vis.incar["ISYM"], 2)
# check uniform mode with automatic nedos
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform", nedos=0)
self.assertEqual(vis.incar["NEDOS"], 12217)
# test line mode
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run,
mode="Line",
copy_chgcar=False,
user_incar_settings={"SIGMA": 0.025},
)
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
# Check that the user_incar_settings works
self.assertEqual(vis.incar["SIGMA"], 0.025)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal)
# Check as from dict.
vis = MPNonSCFSet.from_dict(vis.as_dict())
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal)
vis.write_input(self.tmp)
self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Line", copy_chgcar=True)
# check ISMEAR set correctly for line mode
self.assertEqual(vis.incar["ISMEAR"], 0)
vis.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
os.remove(os.path.join(self.tmp, "CHGCAR"))
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, standardize=True, mode="Line", copy_chgcar=True)
vis.write_input(self.tmp)
self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
def test_override_from_prev(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
# test override_from_prev
vis = MPNonSCFSet(_dummy_structure, mode="Boltztrap")
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISMEAR"], 0)
vis = MPNonSCFSet(_dummy_structure, mode="Uniform")
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISMEAR"], -5)
self.assertEqual(vis.incar["ISYM"], 2)
vis = MPNonSCFSet(_dummy_structure, mode="Uniform", nedos=0)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NEDOS"], 12217)
# test line mode
vis = MPNonSCFSet(
_dummy_structure,
mode="Line",
copy_chgcar=False,
user_incar_settings={"SIGMA": 0.025},
)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.incar["SIGMA"], 0.025)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal)
vis = MPNonSCFSet(_dummy_structure, mode="Line", copy_chgcar=True)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISMEAR"], 0)
vis.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
os.remove(os.path.join(self.tmp, "CHGCAR"))
vis = MPNonSCFSet(_dummy_structure, standardize=True, mode="Line", copy_chgcar=True)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
vis.write_input(self.tmp)
self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
def test_kpoints(self):
# test k-points are generated in the correct format
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform", copy_chgcar=False)
self.assertEqual(np.array(vis.kpoints.kpts).shape, (1, 3))
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Line", copy_chgcar=False)
self.assertNotEqual(np.array(vis.kpoints.kpts).shape, (1, 3))
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Boltztrap", copy_chgcar=False)
self.assertNotEqual(np.array(vis.kpoints.kpts).shape, (1, 3))
def test_optics(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run,
copy_chgcar=False,
optics=True,
mode="Uniform",
nedos=2001,
)
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
# check NEDOS and ISMEAR set correctly
self.assertEqual(vis.incar["NEDOS"], 2001)
self.assertEqual(vis.incar["ISMEAR"], -5)
self.assertEqual(vis.incar["ISYM"], 2)
self.assertTrue(vis.incar["LOPTICS"])
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Gamma)
def test_user_kpoint_override(self):
user_kpoints_override = Kpoints(
style=Kpoints.supported_modes.Gamma, kpts=((1, 1, 1),)
) # the default kpoints style is reciprocal
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run,
copy_chgcar=False,
optics=True,
mode="Uniform",
nedos=2001,
user_kpoints_settings=user_kpoints_override,
)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Gamma)
def tearDown(self):
shutil.rmtree(self.tmp)
warnings.simplefilter("default")
class MagmomLdauTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_structure_from_prev_run(self):
vrun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.magmom_ldau")
structure = vrun.final_structure
poscar = Poscar(structure)
structure_decorated = get_structure_from_prev_run(vrun)
ldau_ans = {"LDAUU": [5.3, 0.0], "LDAUL": [2, 0], "LDAUJ": [0.0, 0.0]}
magmom_ans = [5.0, 5.0, 5.0, 5.0, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6]
ldau_dict = {}
for key in ("LDAUU", "LDAUJ", "LDAUL"):
if hasattr(structure_decorated[0], key.lower()):
m = dict([(site.specie.symbol, getattr(site, key.lower())) for site in structure_decorated])
ldau_dict[key] = [m[sym] for sym in poscar.site_symbols]
magmom = [site.magmom for site in structure_decorated]
self.assertEqual(ldau_dict, ldau_ans)
self.assertEqual(magmom, magmom_ans)
def test_ln_magmom(self):
YAML_PATH = os.path.join(os.path.dirname(__file__), "../VASPIncarBase.yaml")
MAGMOM_SETTING = loadfn(YAML_PATH)["INCAR"]["MAGMOM"]
structure = Structure.from_file(self.TEST_FILES_DIR / "La4Fe4O12.cif")
structure.add_oxidation_state_by_element({"La": +3, "Fe": +3, "O": -2})
for ion in MAGMOM_SETTING:
s = structure.copy()
s.replace_species({"La3+": ion})
vis = MPRelaxSet(s)
fe_pos = vis.poscar.comment.index("Fe")
if fe_pos == 0:
magmom_ans = [5] * 4 + [MAGMOM_SETTING[ion]] * 4 + [0.6] * 12
else:
magmom_ans = [MAGMOM_SETTING[ion]] * 4 + [5] * 4 + [0.6] * 12
self.assertEqual(vis.incar["MAGMOM"], magmom_ans)
class MITMDSetTest(PymatgenTest):
def setUp(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDSet(self.struct, 300, 1200, 10000)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_params(self):
param = self.mitmdparam
syms = param.potcar_symbols
self.assertEqual(syms, ["Fe", "P", "O"])
incar = param.incar
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar["EDIFF"], 1e-5)
kpoints = param.kpoints
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_as_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDSet)
self.assertEqual(v._config_dict["INCAR"]["TEBEG"], 300)
self.assertEqual(v._config_dict["INCAR"]["PREC"], "Low")
class MVLNPTMDSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mvl_npt_set = MVLNPTMDSet(self.struct, start_temp=0, end_temp=300, nsteps=1000)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
npt_set = self.mvl_npt_set
syms = npt_set.potcar_symbols
self.assertEqual(syms, ["Fe", "P", "O"])
incar = npt_set.incar
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar["EDIFF"], 1e-5)
self.assertEqual(incar["LANGEVIN_GAMMA_L"], 1)
self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10])
enmax = max([npt_set.potcar[i].keywords["ENMAX"] for i in range(self.struct.ntypesp)])
self.assertAlmostEqual(incar["ENCUT"], 1.5 * enmax)
self.assertEqual(incar["IALGO"], 48)
self.assertEqual(incar["ISIF"], 3)
self.assertEqual(incar["MDALGO"], 3)
self.assertEqual(incar["SMASS"], 0)
self.assertEqual(incar["PREC"], "Low")
kpoints = npt_set.kpoints
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_as_from_dict(self):
d = self.mvl_npt_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MVLNPTMDSet)
self.assertEqual(v._config_dict["INCAR"]["NSW"], 1000)
class MITNEBSetTest(PymatgenTest):
def setUp(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ["Si", "Si"], c1)
s2 = Structure(Lattice.cubic(5), ["Si", "Si"], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites, to_unit_cell=True))
self.structures = structs
self.vis = MITNEBSet(self.structures)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_potcar_symbols(self):
syms = self.vis.potcar_symbols
self.assertEqual(syms, ["Si"])
def test_incar(self):
incar = self.vis.incar
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar["EDIFF"], 0.00001)
def test_kpoints(self):
kpoints = self.vis.kpoints
self.assertEqual(kpoints.kpts, [[25]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
def test_as_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v._config_dict["INCAR"]["IMAGES"], 2)
def test_write_input(self):
self.vis.write_input(".", write_cif=True, write_endpoint_inputs=True, write_path_cif=True)
self.assertTrue(os.path.exists("INCAR"))
self.assertTrue(os.path.exists("KPOINTS"))
self.assertTrue(os.path.exists("POTCAR"))
self.assertTrue(os.path.exists("00/POSCAR"))
self.assertTrue(os.path.exists("01/POSCAR"))
self.assertTrue(os.path.exists("02/POSCAR"))
self.assertTrue(os.path.exists("03/POSCAR"))
self.assertFalse(os.path.exists("04/POSCAR"))
self.assertTrue(os.path.exists("00/INCAR"))
self.assertTrue(os.path.exists("path.cif"))
for d in ["00", "01", "02", "03"]:
shutil.rmtree(d)
for f in ["INCAR", "KPOINTS", "POTCAR", "path.cif"]:
os.remove(f)
class MPSOCSetTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_from_prev_calc(self):
prev_run = self.TEST_FILES_DIR / "fe_monomer"
vis = MPSOCSet.from_prev_calc(
prev_calc_dir=prev_run,
magmom=[3],
saxis=(1, 0, 0),
user_incar_settings={"SIGMA": 0.025},
)
self.assertEqual(vis.incar["ISYM"], -1)
self.assertTrue(vis.incar["LSORBIT"])
self.assertEqual(vis.incar["ICHARG"], 11)
self.assertEqual(vis.incar["SAXIS"], [1, 0, 0])
self.assertEqual(vis.incar["MAGMOM"], [[0, 0, 3]])
self.assertEqual(vis.incar["SIGMA"], 0.025)
def test_override_from_prev_calc(self):
# test override_from_prev_calc
prev_run = self.TEST_FILES_DIR / "fe_monomer"
vis = MPSOCSet(
_dummy_structure,
magmom=[3],
saxis=(1, 0, 0),
user_incar_settings={"SIGMA": 0.025},
)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISYM"], -1)
self.assertTrue(vis.incar["LSORBIT"])
self.assertEqual(vis.incar["ICHARG"], 11)
self.assertEqual(vis.incar["SAXIS"], [1, 0, 0])
self.assertEqual(vis.incar["MAGMOM"], [[0, 0, 3]])
self.assertEqual(vis.incar["SIGMA"], 0.025)
class MPNMRSetTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
filepath = self.TEST_FILES_DIR / "Li.cif"
structure = Structure.from_file(filepath)
vis = MPNMRSet(structure)
self.assertTrue(vis.incar.get("LCHIMAG", None))
self.assertEqual(vis.incar.get("QUAD_EFG", None), None)
vis = MPNMRSet(structure, mode="efg")
self.assertFalse(vis.incar.get("LCHIMAG", None))
self.assertEqual(vis.incar.get("QUAD_EFG", None), [-0.808])
vis = MPNMRSet(structure, mode="efg", isotopes=["Li-7"])
self.assertFalse(vis.incar.get("LCHIMAG", None))
self.assertEqual(vis.incar.get("QUAD_EFG", None), [-40.1])
class MVLSlabSetTest(PymatgenTest):
def setUp(self):
s = self.get_structure("Li2O")
gen = SlabGenerator(s, (1, 0, 0), 10, 10)
self.slab = gen.get_slab()
self.bulk = self.slab.oriented_unit_cell
vis_bulk = MVLSlabSet(self.bulk, bulk=True)
vis = MVLSlabSet(self.slab)
vis_dipole = MVLSlabSet(self.slab, auto_dipole=True)
self.d_bulk = vis_bulk.get_vasp_input()
self.d_slab = vis.get_vasp_input()
self.d_dipole = vis_dipole.get_vasp_input()
self.vis = vis
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_user_incar_settings(self):
# Make sure user incar settings properly override AMIX.
si = self.get_structure("Si")
vis = MVLSlabSet(si, user_incar_settings={"AMIX": 0.1})
self.assertEqual(vis.incar["AMIX"], 0.1)
def test_bulk(self):
incar_bulk = self.d_bulk["INCAR"]
poscar_bulk = self.d_bulk["POSCAR"]
self.assertEqual(incar_bulk["ISIF"], 3)
self.assertEqual(incar_bulk["EDIFF"], 1e-4)
self.assertEqual(incar_bulk["EDIFFG"], -0.02)
self.assertEqual(poscar_bulk.structure.formula, self.bulk.formula)
def test_slab(self):
incar_slab = self.d_slab["INCAR"]
poscar_slab = self.d_slab["POSCAR"]
potcar_slab = self.d_slab["POTCAR"]
self.assertEqual(incar_slab["AMIN"], 0.01)
self.assertEqual(incar_slab["AMIX"], 0.2)
self.assertEqual(incar_slab["BMIX"], 0.001)
self.assertEqual(incar_slab["NELMIN"], 8)
# No volume relaxation during slab calculations
self.assertEqual(incar_slab["ISIF"], 2)
self.assertEqual(potcar_slab.functional, "PBE")
self.assertEqual(potcar_slab.symbols[1], "O")
self.assertEqual(potcar_slab.symbols[0], "Li_sv")
self.assertEqual(poscar_slab.structure.formula, self.slab.formula)
# Test auto-dipole
dipole_incar = self.d_dipole["INCAR"]
self.assertTrue(dipole_incar["LDIPOL"])
self.assertArrayAlmostEqual(dipole_incar["DIPOL"], [0.2323, 0.2323, 0.2165], decimal=4)
self.assertEqual(dipole_incar["IDIPOL"], 3)
def test_kpoints(self):
kpoints_slab = self.d_slab["KPOINTS"].kpts[0]
kpoints_bulk = self.d_bulk["KPOINTS"].kpts[0]
self.assertEqual(kpoints_bulk[0], kpoints_slab[0])
self.assertEqual(kpoints_bulk[1], kpoints_slab[1])
self.assertEqual(kpoints_bulk[0], 15)
self.assertEqual(kpoints_bulk[1], 15)
self.assertEqual(kpoints_bulk[2], 15)
# The last kpoint in a slab should always be 1
self.assertEqual(kpoints_slab[2], 1)
def test_as_dict(self):
vis_dict = self.vis.as_dict()
new = MVLSlabSet.from_dict(vis_dict)
class MVLElasticSetTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
mvlparam = MVLElasticSet(self.get_structure("Graphite"))
incar = mvlparam.incar
self.assertEqual(incar["IBRION"], 6)
self.assertEqual(incar["NFREE"], 2)
self.assertEqual(incar["POTIM"], 0.015)
self.assertNotIn("NPAR", incar)
class MVLGWSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.s = PymatgenTest.get_structure("Li2O")
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
shutil.rmtree(self.tmp)
def test_static(self):
mvlgwsc = MVLGWSet(self.s)
incar = mvlgwsc.incar
self.assertEqual(incar["SIGMA"], 0.01)
kpoints = mvlgwsc.kpoints
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
symbols = mvlgwsc.potcar.symbols
self.assertEqual(symbols, ["Li_sv_GW", "O_GW"])
def test_diag(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwdiag = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=True, mode="diag")
mvlgwdiag.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertEqual(mvlgwdiag.incar["NBANDS"], 32)
self.assertEqual(mvlgwdiag.incar["ALGO"], "Exact")
self.assertTrue(mvlgwdiag.incar["LOPTICS"])
# test override_from_prev_calc
mvlgwdiag = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="diag")
mvlgwdiag.override_from_prev_calc(prev_calc_dir=prev_run)
mvlgwdiag.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertEqual(mvlgwdiag.incar["NBANDS"], 32)
self.assertEqual(mvlgwdiag.incar["ALGO"], "Exact")
self.assertTrue(mvlgwdiag.incar["LOPTICS"])
def test_bse(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=True, mode="BSE")
mvlgwgbse.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVEDER")))
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=False, mode="GW")
self.assertEqual(mvlgwgbse.incar["NOMEGA"], 80)
self.assertEqual(mvlgwgbse.incar["ENCUTGW"], 250)
self.assertEqual(mvlgwgbse.incar["ALGO"], "GW0")
mvlgwgbse1 = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=False, mode="BSE")
self.assertEqual(mvlgwgbse1.incar["ANTIRES"], 0)
self.assertEqual(mvlgwgbse1.incar["NBANDSO"], 20)
self.assertEqual(mvlgwgbse1.incar["ALGO"], "BSE")
# test override_from_prev_calc
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="BSE")
mvlgwgbse.override_from_prev_calc(prev_calc_dir=prev_run)
mvlgwgbse.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVEDER")))
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="GW")
mvlgwgbse.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(mvlgwgbse.incar["NOMEGA"], 80)
self.assertEqual(mvlgwgbse.incar["ENCUTGW"], 250)
self.assertEqual(mvlgwgbse.incar["ALGO"], "GW0")
mvlgwgbse1 = MVLGWSet(_dummy_structure, copy_wavecar=False, mode="BSE")
mvlgwgbse1.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(mvlgwgbse1.incar["ANTIRES"], 0)
self.assertEqual(mvlgwgbse1.incar["NBANDSO"], 20)
self.assertEqual(mvlgwgbse1.incar["ALGO"], "BSE")
class MPHSEBSTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
prev_run = self.TEST_FILES_DIR / "static_silicon"
vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="uniform")
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 16)
vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="gap")
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 18)
vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="line")
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(vis.incar["HFSCREEN"], 0.2)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ISYM"], 3)
self.assertEqual(len(vis.kpoints.kpts), 180)
def test_override_from_prev_calc(self):
prev_run = self.TEST_FILES_DIR / "static_silicon"
vis = MPHSEBSSet(_dummy_structure, mode="uniform")
vis = vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 16)
vis = MPHSEBSSet(_dummy_structure, mode="gap")
vis = vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 18)
vis = MPHSEBSSet(_dummy_structure, mode="line")
vis = vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(vis.incar["HFSCREEN"], 0.2)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ISYM"], 3)
self.assertEqual(len(vis.kpoints.kpts), 180)
class MVLScanRelaxSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mvl_scan_set = MVLScanRelaxSet(
self.struct, user_potcar_functional="PBE_52", user_incar_settings={"NSW": 500}
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
incar = self.mvl_scan_set.incar
self.assertIn("METAGGA", incar)
self.assertIn("LASPH", incar)
self.assertIn("ADDGRID", incar)
self.assertEqual(incar["NSW"], 500)
# Test SCAN+rVV10
scan_rvv10_set = MVLScanRelaxSet(self.struct, vdw="rVV10")
self.assertEqual(scan_rvv10_set.incar["BPARAM"], 15.7)
def test_potcar(self):
self.assertEqual(self.mvl_scan_set.potcar.functional, "PBE_52")
test_potcar_set_1 = MVLScanRelaxSet(self.struct, user_potcar_functional="PBE_54")
self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_54")
self.assertRaises(ValueError, MVLScanRelaxSet, self.struct, user_potcar_functional="PBE")
def test_as_from_dict(self):
d = self.mvl_scan_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MVLScanRelaxSet)
self.assertEqual(v._config_dict["INCAR"]["METAGGA"], "SCAN")
self.assertEqual(v.user_incar_settings["NSW"], 500)
class MPScanRelaxSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mp_scan_set = MPScanRelaxSet(
self.struct, user_potcar_functional="PBE_52", user_incar_settings={"NSW": 500}
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
incar = self.mp_scan_set.incar
self.assertEqual(incar["METAGGA"], "R2scan")
self.assertEqual(incar["LASPH"], True)
self.assertEqual(incar["ENAUG"], 1360)
self.assertEqual(incar["ENCUT"], 680)
self.assertEqual(incar["NSW"], 500)
# the default POTCAR contains metals
self.assertEqual(incar["KSPACING"], 0.22)
self.assertEqual(incar["ISMEAR"], 2)
self.assertEqual(incar["SIGMA"], 0.2)
def test_scan_substitute(self):
mp_scan_sub = MPScanRelaxSet(
self.struct,
user_potcar_functional="PBE_52",
user_incar_settings={"METAGGA": "SCAN"},
)
incar = mp_scan_sub.incar
self.assertEqual(incar["METAGGA"], "Scan")
def test_nonmetal(self):
# Test that KSPACING and ISMEAR change with a nonmetal structure
file_path = self.TEST_FILES_DIR / "POSCAR.O2"
struct = Poscar.from_file(file_path, check_for_POTCAR=False).structure
scan_nonmetal_set = MPScanRelaxSet(struct, bandgap=1.1)
incar = scan_nonmetal_set.incar
self.assertAlmostEqual(incar["KSPACING"], 0.3064757, places=5)
self.assertEqual(incar["ISMEAR"], -5)
self.assertEqual(incar["SIGMA"], 0.05)
def test_kspacing_cap(self):
# Test that KSPACING is capped at 0.44 for insulators
file_path = self.TEST_FILES_DIR / "POSCAR.O2"
struct = Poscar.from_file(file_path, check_for_POTCAR=False).structure
scan_nonmetal_set = MPScanRelaxSet(struct, bandgap=10)
incar = scan_nonmetal_set.incar
self.assertAlmostEqual(incar["KSPACING"], 0.44, places=5)
self.assertEqual(incar["ISMEAR"], -5)
self.assertEqual(incar["SIGMA"], 0.05)
def test_incar_overrides(self):
# use 'user_incar_settings' to override the KSPACING, ISMEAR, and SIGMA
# parameters that MPScanSet normally determines
mp_scan_set2 = MPScanRelaxSet(
self.struct,
user_incar_settings={"KSPACING": 0.5, "ISMEAR": 0, "SIGMA": 0.05},
)
incar = mp_scan_set2.incar
self.assertEqual(incar["KSPACING"], 0.5)
self.assertEqual(incar["ISMEAR"], 0)
self.assertEqual(incar["SIGMA"], 0.05)
# Test SCAN+rVV10
def test_rvv10(self):
scan_rvv10_set = MPScanRelaxSet(self.struct, vdw="rVV10")
self.assertIn("LUSE_VDW", scan_rvv10_set.incar)
self.assertEqual(scan_rvv10_set.incar["BPARAM"], 15.7)
def test_other_vdw(self):
# should raise a warning.
# IVDW key should not be present in the incar
with pytest.warns(UserWarning, match=r"not supported at this time"):
scan_vdw_set = MPScanRelaxSet(self.struct, vdw="DFTD3")
self.assertNotIn("LUSE_VDW", scan_vdw_set.incar)
self.assertNotIn("IVDW", scan_vdw_set.incar)
def test_potcar(self):
self.assertEqual(self.mp_scan_set.potcar.functional, "PBE_52")
# the default functional should be PBE_54
test_potcar_set_1 = MPScanRelaxSet(self.struct)
self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_54")
self.assertRaises(ValueError, MPScanRelaxSet, self.struct, user_potcar_functional="PBE")
def test_as_from_dict(self):
d = self.mp_scan_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MPScanRelaxSet)
self.assertEqual(v._config_dict["INCAR"]["METAGGA"], "R2SCAN")
self.assertEqual(v.user_incar_settings["NSW"], 500)
def test_write_input(self):
self.mp_scan_set.write_input(".")
self.assertTrue(os.path.exists("INCAR"))
self.assertFalse(os.path.exists("KPOINTS"))
self.assertTrue(os.path.exists("POTCAR"))
self.assertTrue(os.path.exists("POSCAR"))
for f in ["INCAR", "POSCAR", "POTCAR"]:
os.remove(f)
class MPScanStaticSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def test_init(self):
# test inheriting from a previous SCAN relaxation
prev_run = self.TEST_FILES_DIR / "scan_relaxation"
vis = MPScanStaticSet.from_prev_calc(prev_calc_dir=prev_run)
# check that StaticSet settings were applied
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["LREAL"], False)
self.assertEqual(vis.incar["LORBIT"], 11)
self.assertEqual(vis.incar["LVHAR"], True)
self.assertEqual(vis.incar["ISMEAR"], -5)
# Check that ENCUT and other INCAR settings were inherited.
self.assertEqual(vis.incar["ENCUT"], 680)
self.assertEqual(vis.incar["METAGGA"], "R2scan")
self.assertEqual(vis.incar["KSPACING"], 0.34292842)
# Check as from dict.
# check that StaticSet settings were applied
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["LREAL"], False)
self.assertEqual(vis.incar["LORBIT"], 11)
self.assertEqual(vis.incar["LVHAR"], True)
# Check that ENCUT and KSPACING were inherited.
self.assertEqual(vis.incar["ENCUT"], 680)
self.assertEqual(vis.incar["METAGGA"], "R2scan")
self.assertEqual(vis.incar["KSPACING"], 0.34292842)
non_prev_vis = MPScanStaticSet(
vis.structure,
user_incar_settings={"ENCUT": 800, "LORBIT": 12, "LWAVE": True},
)
# check that StaticSet settings were applied
self.assertEqual(non_prev_vis.incar["NSW"], 0)
self.assertEqual(non_prev_vis.incar["LREAL"], False)
self.assertEqual(non_prev_vis.incar["LVHAR"], True)
self.assertEqual(vis.incar["ISMEAR"], -5)
# Check that ENCUT and other INCAR settings were inherited.
self.assertEqual(non_prev_vis.incar["METAGGA"], "R2scan")
# the KSPACING will have the default value here, since no previous calc
self.assertEqual(non_prev_vis.incar["KSPACING"], 0.22)
# Check that user incar settings are applied.
self.assertEqual(non_prev_vis.incar["ENCUT"], 800)
self.assertEqual(non_prev_vis.incar["LORBIT"], 12)
self.assertTrue(non_prev_vis.incar["LWAVE"])
v2 = MPScanStaticSet.from_dict(non_prev_vis.as_dict())
# Check that user incar settings are applied.
self.assertEqual(v2.incar["ENCUT"], 800)
self.assertEqual(v2.incar["LORBIT"], 12)
self.assertTrue(non_prev_vis.incar["LWAVE"])
# Check LCALCPOL flag
lcalcpol_vis = MPScanStaticSet.from_prev_calc(prev_calc_dir=prev_run, lcalcpol=True)
self.assertTrue(lcalcpol_vis.incar["LCALCPOL"])
# Check LEPSILON flag
lepsilon_vis = MPScanStaticSet.from_prev_calc(prev_calc_dir=prev_run, lepsilon=True)
self.assertTrue(lepsilon_vis.incar["LEPSILON"])
self.assertTrue(lepsilon_vis.incar["LPEAD"])
self.assertEqual(lepsilon_vis.incar["IBRION"], 8)
self.assertIsNone(lepsilon_vis.incar.get("NSW"))
self.assertIsNone(lepsilon_vis.incar.get("NPAR"))
def test_override_from_prev_calc(self):
# test override_from_prev
prev_run = self.TEST_FILES_DIR / "scan_relaxation"
vis = MPScanStaticSet(_dummy_structure)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
# check that StaticSet settings were applied
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["LREAL"], False)
self.assertEqual(vis.incar["LORBIT"], 11)
self.assertEqual(vis.incar["LVHAR"], True)
self.assertEqual(vis.incar["ISMEAR"], -5)
# Check that ENCUT and other INCAR settings were inherited.
self.assertEqual(vis.incar["ENCUT"], 680)
self.assertEqual(vis.incar["METAGGA"], "R2scan")
self.assertEqual(vis.incar["KSPACING"], 0.34292842)
# Check LCALCPOL flag
lcalcpol_vis = MPScanStaticSet(_dummy_structure, lcalcpol=True)
lcalcpol_vis = lcalcpol_vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(lcalcpol_vis.incar["LCALCPOL"])
# Check LEPSILON flag
lepsilon_vis = MPScanStaticSet(_dummy_structure, lepsilon=True)
lepsilon_vis = lepsilon_vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(lepsilon_vis.incar["LEPSILON"])
self.assertTrue(lepsilon_vis.incar["LPEAD"])
self.assertEqual(lepsilon_vis.incar["IBRION"], 8)
self.assertIsNone(lepsilon_vis.incar.get("NSW"))
self.assertIsNone(lepsilon_vis.incar.get("NPAR"))
def test_conflicting_arguments(self):
with pytest.raises(ValueError, match="deprecated"):
si = self.get_structure("Si")
vis = MPScanStaticSet(si, potcar_functional="PBE", user_potcar_functional="PBE")
def tearDown(self):
shutil.rmtree(self.tmp)
warnings.simplefilter("default")
class FuncTest(PymatgenTest):
def test_batch_write_input(self):
structures = [
PymatgenTest.get_structure("Li2O"),
PymatgenTest.get_structure("LiFePO4"),
]
batch_write_input(structures)
for d in ["Li4Fe4P4O16_1", "Li2O1_0"]:
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR"]:
self.assertTrue(os.path.exists(os.path.join(d, f)))
for d in ["Li4Fe4P4O16_1", "Li2O1_0"]:
shutil.rmtree(d)
class MVLGBSetTest(PymatgenTest):
def setUp(self):
filepath = self.TEST_FILES_DIR / "Li.cif"
self.s = Structure.from_file(filepath)
self.bulk = MVLGBSet(self.s)
self.slab = MVLGBSet(self.s, slab_mode=True)
self.d_bulk = self.bulk.get_vasp_input()
self.d_slab = self.slab.get_vasp_input()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_bulk(self):
incar_bulk = self.d_bulk["INCAR"]
self.assertEqual(incar_bulk["ISIF"], 3)
def test_slab(self):
incar_slab = self.d_slab["INCAR"]
self.assertEqual(incar_slab["ISIF"], 2)
def test_kpoints(self):
kpoints = self.d_slab["KPOINTS"]
k_a = int(40 / (self.s.lattice.abc[0]) + 0.5)
k_b = int(40 / (self.s.lattice.abc[1]) + 0.5)
self.assertEqual(kpoints.kpts, [[k_a, k_b, 1]])
class MVLRelax52SetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mvl_rlx_set = MVLRelax52Set(self.struct, user_potcar_functional="PBE_54", user_incar_settings={"NSW": 500})
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
incar = self.mvl_rlx_set.incar
self.assertIn("NSW", incar)
self.assertEqual(incar["LREAL"], "Auto")
def test_potcar(self):
self.assertEqual(self.mvl_rlx_set.potcar.functional, "PBE_54")
self.assertIn("Fe", self.mvl_rlx_set.potcar.symbols)
self.struct.remove_species(["Fe"])
test_potcar_set_1 = MVLRelax52Set(self.struct, user_potcar_functional="PBE_52")
self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_52")
self.assertRaises(ValueError, MVLRelax52Set, self.struct, user_potcar_functional="PBE")
def test_potcar_functional_warning(self):
with pytest.warns(FutureWarning, match="argument is deprecated"):
test_potcar_set_1 = MVLRelax52Set(self.struct, potcar_functional="PBE_52")
def test_as_from_dict(self):
d = self.mvl_rlx_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MVLRelax52Set)
self.assertEqual(v.incar["NSW"], 500)
class LobsterSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
# test for different parameters!
self.lobsterset1 = LobsterSet(self.struct, isym=-1, ismear=-5)
self.lobsterset2 = LobsterSet(self.struct, isym=0, ismear=0)
# only allow isym=-1 and isym=0
with self.assertRaises(ValueError):
self.lobsterset_new = LobsterSet(self.struct, isym=2, ismear=0)
# only allow ismear=-5 and ismear=0
with self.assertRaises(ValueError):
self.lobsterset_new = LobsterSet(self.struct, isym=-1, ismear=2)
# test if one can still hand over grid density of kpoints
self.lobsterset3 = LobsterSet(self.struct, isym=0, ismear=0, user_kpoints_settings={"grid_density": 6000})
# check if users can overwrite settings in this class with the help of user_incar_settings
self.lobsterset4 = LobsterSet(self.struct, user_incar_settings={"ALGO": "Fast"})
# use basis functions supplied by user
self.lobsterset5 = LobsterSet(
self.struct,
user_supplied_basis={"Fe": "3d 3p 4s", "P": "3p 3s", "O": "2p 2s"},
)
with self.assertRaises(ValueError):
self.lobsterset6 = LobsterSet(self.struct, user_supplied_basis={"Fe": "3d 3p 4s", "P": "3p 3s"})
self.lobsterset7 = LobsterSet(
self.struct,
address_basis_file=os.path.join(MODULE_DIR, "../../lobster/lobster_basis/BASIS_PBE_54_standard.yaml"),
)
with pytest.warns(BadInputSetWarning, match="Overriding the POTCAR"):
self.lobsterset6 = LobsterSet(self.struct)
def test_incar(self):
incar1 = self.lobsterset1.incar
self.assertIn("NBANDS", incar1)
self.assertEqual(incar1["NBANDS"], 116)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["ISMEAR"], -5)
self.assertEqual(incar1["ISYM"], -1)
self.assertEqual(incar1["ALGO"], "Normal")
self.assertEqual(incar1["EDIFF"], 1e-6)
incar2 = self.lobsterset2.incar
self.assertEqual(incar2["ISYM"], 0)
self.assertEqual(incar2["ISMEAR"], 0)
incar4 = self.lobsterset4.incar
self.assertEqual(incar4["ALGO"], "Fast")
def test_kpoints(self):
kpoints1 = self.lobsterset1.kpoints
self.assertTrue(kpoints1.comment.split(" ")[6], 6138)
kpoints2 = self.lobsterset2.kpoints
self.assertTrue(kpoints2.comment.split(" ")[6], 6138)
kpoints3 = self.lobsterset3.kpoints
self.assertTrue(kpoints3.comment.split(" ")[6], 6000)
def test_potcar(self):
# PBE_54 is preferred at the moment
self.assertEqual(self.lobsterset1.potcar_functional, "PBE_54")
def test_as_from_dict(self):
dict_here = self.lobsterset1.as_dict()
lobsterset_new = LobsterSet.from_dict(dict_here)
# test relevant parts again
incar1 = lobsterset_new.incar
self.assertIn("NBANDS", incar1)
self.assertEqual(incar1["NBANDS"], 116)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["ISMEAR"], -5)
self.assertEqual(incar1["ISYM"], -1)
self.assertEqual(incar1["ALGO"], "Normal")
kpoints1 = lobsterset_new.kpoints
self.assertTrue(kpoints1.comment.split(" ")[6], 6138)
self.assertEqual(lobsterset_new.potcar_functional, "PBE_54")
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]},
)
if __name__ == "__main__":
unittest.main()
| 41.014733 | 120 | 0.637571 |
acf97a506f9b7d7bb25ae4bf4e2026dbf211f695 | 732 | py | Python | djconnectwise/migrations/0005_auto_20170225_0101.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | 10 | 2017-04-27T19:51:38.000Z | 2020-10-09T17:21:23.000Z | djconnectwise/migrations/0005_auto_20170225_0101.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | 45 | 2017-02-07T22:52:07.000Z | 2021-11-25T21:45:44.000Z | djconnectwise/migrations/0005_auto_20170225_0101.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | 9 | 2017-01-27T00:07:33.000Z | 2021-07-12T19:48:27.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0004_auto_20170224_1853'),
]
operations = [
migrations.RenameField(
model_name='callbackentry',
old_name='enabled',
new_name='inactive_flag',
),
migrations.RemoveField(
model_name='callbackentry',
name='entry_id',
),
migrations.AddField(
model_name='callbackentry',
name='description',
field=models.CharField(max_length=100, default=''),
preserve_default=False,
),
]
| 24.4 | 63 | 0.579235 |
acf97aaa2dc9f1bed5d7c65c9ad6b98755908ddf | 13,581 | py | Python | psi/controller/calibration/util.py | psiexperiment/psiexperiment | 2701558e1d0637b8a5d6762912dfb5c183f3be87 | [
"MIT"
] | 2 | 2020-07-10T07:49:52.000Z | 2020-11-15T13:20:52.000Z | psi/controller/calibration/util.py | psiexperiment/psiexperiment | 2701558e1d0637b8a5d6762912dfb5c183f3be87 | [
"MIT"
] | 1 | 2020-04-20T20:37:48.000Z | 2020-04-20T20:37:48.000Z | psi/controller/calibration/util.py | psiexperiment/psiexperiment | 2701558e1d0637b8a5d6762912dfb5c183f3be87 | [
"MIT"
] | 3 | 2020-04-17T15:03:36.000Z | 2022-01-14T23:19:29.000Z | import logging
log = logging.getLogger(__name__)
import json
from pathlib import Path
import numpy as np
import pandas as pd
from scipy import signal
from fractions import math
from psi.util import as_numeric, psi_json_decoder_hook, PSIJsonEncoder
def db(target, reference=1):
target = as_numeric(target)
reference = as_numeric(reference)
return 20*np.log10(target/reference)
def dbi(db, reference=1):
db = as_numeric(db)
return (10**(db/20))*reference
def dbtopa(db):
'''
Convert dB SPL to Pascal
.. math:: 10^(dB/20.0)/20e-6
>>> print dbtopa(100)
2.0
>>> print dbtopa(120)
20.0
>>> print patodb(dbtopa(94.0))
94.0
Will also take sequences:
>>> print dbtopa([80, 100, 120])
[ 0.2 2. 20. ]
'''
return dbi(db, 20e-6)
def patodb(pa):
'''
Convert Pascal to dB SPL
.. math:: 20*log10(pa/20e-6)
>>> print round(patodb(1))
94.0
>>> print patodb(2)
100.0
>>> print patodb(0.2)
80.0
Will also take sequences:
>>> print patodb([0.2, 2.0, 20.0])
[ 80. 100. 120.]
'''
return db(pa, 20e-6)
def normalize_rms(waveform, out=None):
'''
Normalize RMS power to 1 (typically used when generating a noise waveform
that will be scaled by a calibration factor)
waveform : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `waveform`.
'''
return np.divide(waveform, rms(waveform), out)
def csd(s, fs, window=None, waveform_averages=None):
if waveform_averages is not None:
new_shape = (waveform_averages, -1) + s.shape[1:]
s = s.reshape(new_shape).mean(axis=0)
s = signal.detrend(s, type='linear', axis=-1)
n = s.shape[-1]
if window is not None:
w = signal.get_window(window, n)
s = w/w.mean()*s
return np.fft.rfft(s, axis=-1)/n
def phase(s, fs, window=None, waveform_averages=None, unwrap=True):
c = csd(s, fs, window, waveform_averages)
p = np.angle(c)
if unwrap:
p = np.unwrap(p)
return p
def psd(s, fs, window=None, waveform_averages=None):
c = csd(s, fs, window, waveform_averages)
return 2*np.abs(c)/np.sqrt(2.0)
def psd_freq(s, fs):
return np.fft.rfftfreq(s.shape[-1], 1.0/fs)
def psd_df(s, fs, *args, **kw):
p = psd(s, fs)
freqs = pd.Index(psd_freq(s, fs), name='frequency')
if p.ndim == 1:
name = s.name if isinstance(s, pd.Series) else 'psd'
return pd.Series(p, index=freqs, name=name)
else:
index = s.index if isinstance(s, pd.DataFrame) else None
return pd.DataFrame(p, columns=freqs, index=index)
def tone_conv(s, fs, frequency, window=None):
frequency_shape = tuple([Ellipsis] + [np.newaxis]*s.ndim)
frequency = np.asarray(frequency)[frequency_shape]
s = signal.detrend(s, type='linear', axis=-1)
n = s.shape[-1]
if window is not None:
w = signal.get_window(window, n)
s = w/w.mean()*s
t = np.arange(n)/fs
r = 2.0*s*np.exp(-1.0j*(2.0*np.pi*t*frequency))
return np.mean(r, axis=-1)
def tone_power_conv(s, fs, frequency, window=None):
r = tone_conv(s, fs, frequency, window)
return np.abs(r)/np.sqrt(2.0)
def tone_phase_conv(s, fs, frequency, window=None):
r = tone_conv(s, fs, frequency, window)
return np.angle(r)
def tone_power_fft(s, fs, frequency, window=None):
power = psd(s, fs, window)
freqs = psd_freq(s, fs)
flb, fub = freqs*0.9, freqs*1.1
mask = (freqs >= flb) & (freqs < fub)
return power[..., mask].max(axis=-1)
def tone_phase_fft(s, fs, frequency, window=None):
p = phase(s, fs, window, unwrap=False)
freqs = psd_freq(s, fs)
flb, fub = freqs*0.9, freqs*1.1
mask = (freqs >= flb) & (freqs < fub)
return p[..., mask].max(axis=-1)
def tone_power_conv_nf(s, fs, frequency, window=None):
samples = s.shape[-1]
resolution = fs/samples
frequencies = frequency+np.arange(-2, 3)*resolution
magnitude = tone_power_conv(s, fs, frequencies, window)
nf_rms = magnitude[(0, 1, 3, 4), ...].mean(axis=0)
tone_rms = magnitude[2]
return nf_rms, tone_rms
def analyze_mic_sens(ref_waveforms, exp_waveforms, vrms, ref_mic_gain,
exp_mic_gain, output_gain, ref_mic_sens, **kwargs):
ref_data = analyze_tone(ref_waveforms, mic_gain=ref_mic_gain, **kwargs)
exp_data = analyze_tone(exp_waveforms, mic_gain=exp_mic_gain, **kwargs)
# Actual output SPL
output_spl = ref_data['mic_rms']-ref_mic_sens-db(20e-6)
# Output SPL assuming 0 dB gain and 1 VRMS
norm_output_spl = output_spl-output_gain-db(vrms)
# Exp mic sensitivity in dB(V/Pa)
exp_mic_sens = exp_data['mic_rms']+ref_mic_sens-ref_data['mic_rms']
result = {
'output_spl': output_spl,
'norm_output_spl': norm_output_spl,
'exp_mic_sens': exp_mic_sens,
'output_gain': output_gain,
}
shared = ('time', 'frequency')
result.update({k: ref_data[k] for k in shared})
t = {'ref_'+k: ref_data[k] for k, v in ref_data.items() if k not in shared}
result.update(t)
t = {'exp_'+k: exp_data[k] for k, v in exp_data.items() if k not in shared}
result.update(t)
return result
def thd(s, fs, frequency, harmonics=3, window=None):
ph = np.array([tone_power_conv(s, fs, frequency*(i+1), window)[np.newaxis] \
for i in range(harmonics)])
ph = np.concatenate(ph, axis=0)
return (np.sum(ph[1:]**2, axis=0)**0.5)/ph[0]
def analyze_tone(waveforms, frequency, fs, mic_gain, trim=0, thd_harmonics=3):
trim_n = int(trim*fs)
waveforms = waveforms[:, trim_n:-trim_n]
# Get average tone power across channels
power = tone_power_conv(waveforms, fs, frequency, window='flattop')
power = db(power).mean(axis=0)
average_waveform = waveforms.mean(axis=0)
time = np.arange(len(average_waveform))/fs
# Correct for gains (i.e. we want to know the *actual* Vrms at 0 dB input
# and 0 dB output gain).
power -= mic_gain
#max_harmonic = np.min(int(np.floor((fs/2.0)/frequency)), thd_harmonics)
harmonics = []
for i in range(thd_harmonics):
f_harmonic = frequency*(i+1)
p = tone_power_conv(waveforms, fs, f_harmonic, window='flattop')
p_harmonic = db(p).mean(axis=0)
harmonics.append({
'harmonic': i+1,
'frequency': f_harmonic,
'mic_rms': p_harmonic,
})
harmonic_v = []
for h_info in harmonics:
harmonic_v.append(dbi(h_info['mic_rms']))
harmonic_v = np.asarray(harmonic_v)[:thd_harmonics]
thd = (np.sum(harmonic_v[1:]**2)**0.5)/harmonic_v[0]
return {
'frequency': frequency,
'time': time,
'mic_rms': power,
'thd': thd,
'mic_waveform': average_waveform,
'harmonics': harmonics,
}
def rms(s, detrend=False):
if detrend:
s = signal.detrend(s, axis=-1)
return np.mean(s**2, axis=-1)**0.5
def golay_pair(n=15):
'''
Generate pair of Golay sequences
'''
a0 = np.array([1, 1])
b0 = np.array([1, -1])
for i in range(n):
a = np.concatenate([a0, b0])
b = np.concatenate([a0, -b0])
a0, b0 = a, b
return a.astype(np.float32), b.astype(np.float32)
def transfer_function(stimulus, response, fs):
response = response[:len(stimulus)]
h_response = np.fft.rfft(response, axis=-1)
h_stimulus = np.fft.rfft(stimulus, axis=-1)
freq = psd_freq(response, fs)
return freq, 2*np.abs(h_response*np.conj(h_stimulus))
def golay_tf(a, b, a_signal, b_signal, fs):
'''
Estimate system transfer function from Golay sequence
Implements algorithm as described in Zhou et al. 1992.
'''
a_signal = a_signal[..., :len(a)]
b_signal = b_signal[..., :len(b)]
ah_psd = np.fft.rfft(a_signal, axis=-1)
bh_psd = np.fft.rfft(b_signal, axis=-1)
a_psd = np.fft.rfft(a)
b_psd = np.fft.rfft(b)
h_omega = (ah_psd*np.conj(a_psd) + bh_psd*np.conj(b_psd))/(2*len(a))
freq = psd_freq(a, fs)
h_psd = np.abs(h_omega)
h_phase = np.unwrap(np.angle(h_omega))
return freq, h_psd, h_phase
def golay_ir(n, a, b, a_signal, b_signal):
'''
Estimate system impulse response from Golay sequence
Implements algorithm described in Zhou et al. 1992
'''
a_signal = a_signal.mean(axis=0)
b_signal = b_signal.mean(axis=0)
a_conv = np.apply_along_axis(np.convolve, 1, a_signal, a[::-1], 'full')
b_conv = np.apply_along_axis(np.convolve, 1, b_signal, b[::-1], 'full')
return 1.0/(2.0*n)*(a_conv+b_conv)[..., -len(a):]
def summarize_golay(fs, a, b, a_response, b_response, waveform_averages=None):
if waveform_averages is not None:
n_epochs, n_time = a_response.shape
new_shape = (waveform_averages, -1, n_time)
a_response = a_response.reshape(new_shape).mean(axis=0)
b_response = b_response.reshape(new_shape).mean(axis=0)
time = np.arange(a_response.shape[-1])/fs
freq, tf_psd, tf_phase = golay_tf(a, b, a_response, b_response, fs)
tf_psd = tf_psd.mean(axis=0)
tf_phase = tf_phase.mean(axis=0)
return {
'psd': tf_psd,
'phase': tf_phase,
'frequency': freq,
}
def freq_smooth(frequency, power, bandwidth=20):
'''
Uses Konno & Ohmachi (1998) algorithm
'''
smoothed = []
old = np.seterr(all='ignore')
for f in frequency:
if f == 0:
# Special case for divide by 0
k = np.zeros_like(frequency)
else:
r = bandwidth*np.log10(frequency/f)
k = (np.sin(r)/r)**4
# Special case for np.log10(0/frequency)
k[0] = 0
# Special case where ratio is 1 (log of ratio is set to 0)
k[frequency == f] = 1
# Equalize weights
k /= k.sum(axis=0)
smoothed.append(np.sum(power*k))
np.seterr(**old)
return np.array(smoothed)
def ir_iir(impulse_response, fs, smooth=None, *args, **kwargs):
csd = np.fft.rfft(impulse_response)
psd = np.abs(csd)/len(impulse_response)
phase = np.unwrap(np.angle(csd))
frequency = np.fft.rfftfreq(len(impulse_response), fs**-1)
# Smooth in the frequency domain
if smooth is not None:
psd = dbi(freq_smooth(frequency, db(psd), smooth))
phase = freq_smooth(frequency, phase, smooth)
return iir(psd, phase, frequency, *args, **kwargs)
def iir(psd, phase, frequency, cutoff=None, phase_correction=None,
truncate=None, truncate_spectrum=False, reference='mean'):
'''
Given the impulse response, compute the inverse impulse response.
Parameters
----------
# TODO
Note
----
Specifying the cutoff range is highly recommended to get a well-behaved
function.
'''
# Equalize only a subset of the calibrated frequencies
if cutoff is not None:
lb, ub = cutoff
m = (frequency >= lb) & (frequency < ub)
inverse_psd = psd[m].mean()/psd
inverse_psd[~m] = 1
else:
inverse_psd = psd.mean()/psd
if phase_correction == 'linear':
m, b = np.polyfit(frequency, phase, 1)
inverse_phase = 2*np.pi*np.arange(len(frequency))*m+b
elif phase_correction == 'subtract':
inverse_phase = 2*np.pi-phase
else:
inverse_phase = phase
filtered_spectrum = inverse_psd*np.exp(inverse_phase*1j)
if truncate_spectrum:
orig_ub = np.round(frequency[-1])
ub = np.round(ub)
filtered_spectrum = filtered_spectrum[frequency <= ub]
iir = truncated_ifft(filtered_spectrum, orig_ub, ub)
else:
iir = np.fft.irfft(filtered_spectrum)
if truncate:
truncate_samples = int(truncate*fs)
iir = iir[:truncate_samples]
return iir
def truncated_ifft(spectrum, original_fs, truncated_fs):
iir = np.fft.irfft(spectrum)
lcm = original_fs*truncated_fs/math.gcd(original_fs, truncated_fs)
up = lcm/truncated_fs
down = lcm/original_fs
iir = signal.resample_poly(iir, up, down)
iir *= truncated_fs/original_fs
return iir
def save_calibration(channels, filename):
from psi.util import get_tagged_values
from json import dump
settings = {}
for channel in channels:
metadata = get_tagged_values(channel.calibration, 'metadata')
metadata['calibration_type'] = channel.calibration.__class__.__name__
settings[channel.name] = metadata
with open(filename, 'w') as fh:
dump(settings, fh, indent=4, cls=PSIJsonEncoder)
def load_calibration_data(filename):
from psi.controller.calibration.api import calibration_registry
settings = json.loads(Path(filename).read_text(),
object_hook=psi_json_decoder_hook)
calibrations = {}
for c_name, c_calibration in settings.items():
# This is will deal with legacy calibration configs in which the source
# was a top-level key rather than being stored as an attribute.
if 'source' in c_calibration:
attrs = c_calibration.setdefault('attrs', {})
attrs['source'] = c_calibration.pop('source')
calibrations[c_name] = calibration_registry.from_dict(**c_calibration)
return calibrations
def load_calibration(filename, channels):
'''
Load calibration configuration for hardware from json file
'''
calibrations = load_calibration_data(filename)
for c in channels:
if c.name in calibrations:
c.calibration = calibrations[c.name]
| 29.652838 | 80 | 0.630366 |
acf97c1cf34ba91c10ddab69f21ab60a9c82721a | 5,707 | py | Python | Algorithm.Python/TensorFlowNeuralNetworkAlgorithm.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 4 | 2020-03-30T06:00:05.000Z | 2020-06-29T02:51:25.000Z | Algorithm.Python/TensorFlowNeuralNetworkAlgorithm.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 30 | 2020-03-16T07:27:37.000Z | 2021-05-17T05:51:04.000Z | Algorithm.Python/TensorFlowNeuralNetworkAlgorithm.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 1 | 2021-01-04T18:03:14.000Z | 2021-01-04T18:03:14.000Z | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import clr
clr.AddReference("System")
clr.AddReference("QuantConnect.Algorithm")
clr.AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
import numpy as np
import tensorflow as tf
class TensorFlowNeuralNetworkAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013, 10, 7) # Set Start Date
self.SetEndDate(2013, 10, 8) # Set End Date
self.SetCash(100000) # Set Strategy Cash
spy = self.AddEquity("SPY", Resolution.Minute) # Add Equity
self.symbols = [spy.Symbol] # potential trading symbols pool (in this algorithm there is only 1).
self.lookback = 30 # number of previous days for training
self.Schedule.On(self.DateRules.Every(DayOfWeek.Monday), self.TimeRules.AfterMarketOpen("SPY", 28), self.NetTrain) # train the neural network 28 mins after market open
self.Schedule.On(self.DateRules.Every(DayOfWeek.Monday), self.TimeRules.AfterMarketOpen("SPY", 30), self.Trade) # trade 30 mins after market open
def add_layer(self, inputs, in_size, out_size, activation_function=None):
# add one more layer and return the output of this layer
# this is one NN with only one hidden layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
def NetTrain(self):
# Daily historical data is used to train the machine learning model
history = self.History(self.symbols, self.lookback + 1, Resolution.Daily)
# model: use prices_x to fit prices_y; key: symbol; value: according price
self.prices_x, self.prices_y = {}, {}
# key: symbol; values: prices for sell or buy
self.sell_prices, self.buy_prices = {}, {}
for symbol in self.symbols:
if not history.empty:
# Daily historical data is used to train the machine learning model
# use open prices to predict the next days'
self.prices_x[symbol] = list(history.loc[symbol.Value]['open'][:-1])
self.prices_y[symbol] = list(history.loc[symbol.Value]['open'][1:])
for symbol in self.symbols:
if symbol in self.prices_x:
# create numpy array
x_data = np.array(self.prices_x[symbol]).astype(np.float32).reshape((-1,1))
y_data = np.array(self.prices_y[symbol]).astype(np.float32).reshape((-1,1))
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# add hidden layer
l1 = self.add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = self.add_layer(l1, 10, 1, activation_function=None)
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
# use gradient descent and square error
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# the following is precedure for tensorflow
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(200):
# training
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
# predict today's price
y_pred_final = sess.run(prediction, feed_dict = {xs: y_data})[0][-1]
# get sell prices and buy prices as trading signals
self.sell_prices[symbol] = y_pred_final - np.std(y_data)
self.buy_prices[symbol] = y_pred_final + np.std(y_data)
def Trade(self):
'''
Enter or exit positions based on relationship of the open price of the current bar and the prices defined by the machine learning model.
Liquidate if the open price is below the sell price and buy if the open price is above the buy price
'''
for holding in self.Portfolio.Values:
if self.CurrentSlice[holding.Symbol].Open < self.sell_prices[holding.Symbol] and holding.Invested:
self.Liquidate(holding.Symbol)
if self.CurrentSlice[holding.Symbol].Open > self.buy_prices[holding.Symbol] and not holding.Invested:
self.SetHoldings(holding.Symbol, 1 / len(self.symbols)) | 47.957983 | 175 | 0.623445 |
acf97c22578785ce0916bbf7ad2ed56bc37ce2d1 | 2,286 | py | Python | main.py | pvalls/startup-job-coding-challenge | 841e044d15a45f2a8930d854a9589c0d04db3ce6 | [
"MIT"
] | null | null | null | main.py | pvalls/startup-job-coding-challenge | 841e044d15a45f2a8930d854a9589c0d04db3ce6 | [
"MIT"
] | null | null | null | main.py | pvalls/startup-job-coding-challenge | 841e044d15a45f2a8930d854a9589c0d04db3ce6 | [
"MIT"
] | null | null | null | # main.py - Python 3.7.5
# Coding Challenge - Sunlight Hours
# Solution proposal submitted by Pol Valls Rué.
import json
import time as t
from functions.getBuilding import getBuilding
from functions.getNeighborhood import getNeighborhood
from functions.apartmentExists import apartmentExists
from functions.computeAngleTrigonometry import computeAngleTrigonometry
from functions.computeSunlightHours import computeSunlightHours
# Define the sunrise/sunset time to work with. Use format 'hh:mm:ss'
sunrise_time_str = '08:14:00'
sunset_time_str = '17:25:00'
# Convert to "struct_time" object
sunrise_time = t.strptime(sunrise_time_str, '%H:%M:%S')
sunset_time = t.strptime(sunset_time_str, '%H:%M:%S')
# Initialize city as a global list variable
city = list()
# API FUNCTIONS #
# init method that takes a String containing a JSON describing the city
def init(city_json):
global city
city = json.loads(city_json)
# getSunlightHours. It returns the sunlight hours as a string like “hh:mm:ss - hh:mm:ss” in 24hr format.
def getSunlightHours(neighborhood_name, building_name, apartment_number):
# Try to extract the indicated neighborhood and indicated building object from city
try:
neighborhood = getNeighborhood(city, neighborhood_name)
building = getBuilding(neighborhood, building_name)
except:
raise Exception("Error: The given neighborhood or building does not exist in the current city.")
# Check if the apartment exists
apartmentExists(building, apartment_number)
# Place the indicated apartment in a 2D coordinate system.
x = building.get("distance")
y = apartment_number*neighborhood.get("apartments_height")
# Compute the greatest angle during sunrise/sunset at which there is a higher apartment blocking sunlight
[sunrise_angle, sunset_angle] = computeAngleTrigonometry(neighborhood, building_name, x, y)
# Using the default sunrise and sunset time as well as the shadow angles found, compute resulting sunlight hours
[final_sunrise_time_str, final_sunset__time_str] = computeSunlightHours(sunrise_time, sunset_time, sunrise_angle, sunset_angle)
# print(final_sunrise_time_str + " - " + final_sunset__time_str)
return (final_sunrise_time_str + " - " + final_sunset__time_str)
| 40.105263 | 131 | 0.772528 |
acf97da613b0c77b986b5e937276950a27fc809d | 231 | py | Python | store_project/products/models.py | DmitrySevostianov/simle_store_django | a8f78e008e28fc1821360d8339191b2a22aaa8c2 | [
"MIT"
] | null | null | null | store_project/products/models.py | DmitrySevostianov/simle_store_django | a8f78e008e28fc1821360d8339191b2a22aaa8c2 | [
"MIT"
] | null | null | null | store_project/products/models.py | DmitrySevostianov/simle_store_django | a8f78e008e28fc1821360d8339191b2a22aaa8c2 | [
"MIT"
] | null | null | null | from django.db import models
class Product(models.Model):
name = models.CharField(max_length=512)
price = models.IntegerField()
description = models.TextField()
def __str__(self):
return str(self.name)
| 23.1 | 43 | 0.688312 |
acf97e4b7a16163baf81825e544017cac17e20fe | 9,405 | py | Python | pandas/io/parquet.py | JeanRoca/pandas | c30029e2c7b90acdaf7fc479fa063694a6eb0e4a | [
"BSD-3-Clause"
] | null | null | null | pandas/io/parquet.py | JeanRoca/pandas | c30029e2c7b90acdaf7fc479fa063694a6eb0e4a | [
"BSD-3-Clause"
] | null | null | null | pandas/io/parquet.py | JeanRoca/pandas | c30029e2c7b90acdaf7fc479fa063694a6eb0e4a | [
"BSD-3-Clause"
] | null | null | null | """ parquet compat """
from warnings import catch_warnings
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas import DataFrame, get_option
from pandas.io.common import get_filepath_or_buffer, is_s3_url
def get_engine(engine):
""" return our implementation """
if engine == "auto":
engine = get_option("io.parquet.engine")
if engine == "auto":
# try engines in this order
try:
return PyArrowImpl()
except ImportError:
pass
try:
return FastParquetImpl()
except ImportError:
pass
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"pyarrow or fastparquet is required for parquet "
"support"
)
if engine not in ["pyarrow", "fastparquet"]:
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
class BaseImpl:
api = None # module
@staticmethod
def validate_dataframe(df):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {"string", "unicode", "empty"}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, str) for name in df.index.names if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df, path, compression, **kwargs):
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
pyarrow = import_optional_dependency(
"pyarrow", extra="pyarrow is required for parquet support."
)
import pyarrow.parquet
self.api = pyarrow
def write(
self,
df,
path,
compression="snappy",
coerce_timestamps="ms",
index=None,
partition_cols=None,
**kwargs
):
self.validate_dataframe(df)
path, _, _, _ = get_filepath_or_buffer(path, mode="wb")
if index is None:
from_pandas_kwargs = {}
else:
from_pandas_kwargs = {"preserve_index": index}
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
if partition_cols is not None:
self.api.parquet.write_to_dataset(
table,
path,
compression=compression,
coerce_timestamps=coerce_timestamps,
partition_cols=partition_cols,
**kwargs
)
else:
self.api.parquet.write_table(
table,
path,
compression=compression,
coerce_timestamps=coerce_timestamps,
**kwargs
)
def read(self, path, columns=None, **kwargs):
path, _, _, should_close = get_filepath_or_buffer(path)
kwargs["use_pandas_metadata"] = True
result = self.api.parquet.read_table(
path, columns=columns, **kwargs
).to_pandas()
if should_close:
try:
path.close()
except: # noqa: flake8
pass
return result
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
fastparquet = import_optional_dependency(
"fastparquet", extra="fastparquet is required for parquet support."
)
self.api = fastparquet
def write(
self, df, path, compression="snappy", index=None, partition_cols=None, **kwargs
):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
"partition_cols. Use partition_cols for "
"partitioning data"
)
elif "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
if is_s3_url(path):
# path is s3:// so we need to open the s3file in 'wb' mode.
# TODO: Support 'ab'
path, _, _, _ = get_filepath_or_buffer(path, mode="wb")
# And pass the opened s3file to the fastparquet internal impl.
kwargs["open_with"] = lambda path, _: path
else:
path, _, _, _ = get_filepath_or_buffer(path)
with catch_warnings(record=True):
self.api.write(
path,
df,
compression=compression,
write_index=index,
partition_on=partition_cols,
**kwargs
)
def read(self, path, columns=None, **kwargs):
if is_s3_url(path):
from pandas.io.s3 import get_file_and_filesystem
# When path is s3:// an S3File is returned.
# We need to retain the original path(str) while also
# pass the S3File().open function to fsatparquet impl.
s3, filesystem = get_file_and_filesystem(path)
try:
parquet_file = self.api.ParquetFile(path, open_with=filesystem.open)
finally:
s3.close()
else:
path, _, _, _ = get_filepath_or_buffer(path)
parquet_file = self.api.ParquetFile(path)
return parquet_file.to_pandas(columns=columns, **kwargs)
def to_parquet(
df,
path,
engine="auto",
compression="snappy",
index=None,
partition_cols=None,
**kwargs
):
"""
Write a DataFrame to the parquet format.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory path
while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output. If
``False``, they will not be written to the file. If ``None``, the
engine's default behavior will be used.
.. versionadded:: 0.24.0
partition_cols : list or string, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
String identifies a single column to be partitioned.
.. versionadded:: 0.24.0
kwargs
Additional keyword arguments passed to the engine
.. versionchanged:: 1.0.0
partition_cols
Added ability to pass in a string for a single column name
"""
if isinstance(partition_cols, str):
partition_cols = [partition_cols]
impl = get_engine(engine)
return impl.write(
df,
path,
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs
)
def read_parquet(path, engine="auto", columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded:: 0.21.0
Parameters
----------
path : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.parquet``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
columns : list, default=None
If not None, only these columns will be read from the file.
.. versionadded:: 0.21.1
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(path, columns=columns, **kwargs)
| 30.836066 | 87 | 0.593939 |
acf97f8de23d553ec1756002ebb77dd4d3edfcaf | 6,558 | py | Python | nengo/ipynb.py | pedrombmachado/nengo | abc85e1a75ce2f980e19eef195d98081f95efd28 | [
"BSD-2-Clause"
] | null | null | null | nengo/ipynb.py | pedrombmachado/nengo | abc85e1a75ce2f980e19eef195d98081f95efd28 | [
"BSD-2-Clause"
] | null | null | null | nengo/ipynb.py | pedrombmachado/nengo | abc85e1a75ce2f980e19eef195d98081f95efd28 | [
"BSD-2-Clause"
] | null | null | null | """IPython extension that activates special Jupyter notebook features of Nengo.
At the moment this only activates the improved progress bar.
Use ``%load_ext nengo.ipynb`` in a Jupyter notebook to load the extension.
Note
----
This IPython extension cannot be unloaded.
"""
from html import escape
import warnings
import IPython
try:
import notebook
notebook_version = notebook.version_info
except ImportError:
notebook_version = IPython.version_info
from nengo.rc import rc
from nengo.utils.ipython import has_ipynb_widgets
from nengo.utils.progress import ProgressBar, timestamp2timedelta
if has_ipynb_widgets():
# pylint: disable=ungrouped-imports
from IPython.display import display
if IPython.version_info[0] <= 3:
from IPython.html.widgets import DOMWidget
import IPython.utils.traitlets as traitlets
else:
import ipywidgets
from ipywidgets import DOMWidget
import traitlets
else:
raise ImportError(
"Required dependency could not be loaded. Please install ipywidgets."
)
def load_ipython_extension(ipython):
if IPython.version_info[0] >= 5:
warnings.warn(
"Loading the nengo.ipynb notebook extension is no longer "
"required. Progress bars are automatically activated for IPython "
"version 5 and later."
)
elif has_ipynb_widgets() and rc.get("progress", "progress_bar") == "auto":
warnings.warn(
"The nengo.ipynb notebook extension is deprecated. Please upgrade "
"to IPython version 5 or later."
)
IPythonProgressWidget.load_frontend(ipython)
rc.set(
"progress",
"progress_bar",
".".join((__name__, IPython2ProgressBar.__name__)),
)
class IPythonProgressWidget(DOMWidget):
"""IPython widget for displaying a progress bar."""
# pylint: disable=too-many-public-methods
_view_name = traitlets.Unicode("NengoProgressBar", sync=True)
if notebook_version[0] >= 4:
_view_module = traitlets.Unicode("nengo", sync=True)
progress = traitlets.Float(0.0, sync=True)
text = traitlets.Unicode("", sync=True)
WIDGET = """
var NengoProgressBar = widgets.DOMWidgetView.extend({
render: function() {
// Work-around for messed up CSS in IPython 4
$('.widget-subarea').css({flex: '2 1 0%'});
// $el is the DOM of the widget
this.$el.css({width: '100%', marginBottom: '0.5em'});
this.$el.html([
'<div style="',
'width: 100%;',
'border: 1px solid #cfcfcf;',
'border-radius: 4px;',
'text-align: center;',
'position: relative;">',
'<div class="pb-text" style="',
'position: absolute;',
'width: 100%;">',
'0%',
'</div>',
'<div class="pb-bar" style="',
'background-color: #bdd2e6;',
'width: 0%;',
'transition: width 0.1s linear;">',
' ',
'</div>',
'</div>'].join(''));
},
update: function() {
this.$el.css({width: '100%', marginBottom: '0.5em'});
var progress = 100 * this.model.get('progress');
var text = this.model.get('text');
this.$el.find('div.pb-bar').width(progress.toString() + '%');
this.$el.find('div.pb-text').html(text);
},
});
"""
FRONTEND = """
define('nengo', ["jupyter-js-widgets"], function(widgets) {{
{widget}
return {{
NengoProgressBar: NengoProgressBar
}};
}});""".format(
widget=WIDGET
)
LEGACY_FRONTEND = """
require(["widgets/js/widget", "widgets/js/manager"],
function(widgets, manager) {{
if (typeof widgets.DOMWidgetView == 'undefined') {{
widgets = IPython;
}}
if (typeof manager.WidgetManager == 'undefined') {{
manager = IPython;
}}
{widget}
manager.WidgetManager.register_widget_view(
'NengoProgressBar', NengoProgressBar);
}});""".format(
widget=WIDGET
)
LEGACY_4_FRONTEND = """
define('nengo', ["widgets/js/widget"], function(widgets) {{
{widget}
return {{
NengoProgressBar: NengoProgressBar
}};
}});""".format(
widget=WIDGET
)
@classmethod
def load_frontend(cls, ipython):
"""Loads the JavaScript front-end code required by then widget."""
warnings.warn(
"IPythonProgressWidget is deprecated. Please upgrade to "
"IPython version 5 or later.",
DeprecationWarning,
)
if notebook_version[0] < 4:
ipython.run_cell_magic("javascript", "", cls.LEGACY_FRONTEND)
elif ipywidgets.version_info[0] < 5:
nb_ver_4x = notebook_version[0] == 4 and notebook_version[1] > 1
if notebook_version[0] > 4 or nb_ver_4x:
warnings.warn(
"Incompatible versions of notebook and ipywidgets "
"detected. Please update your ipywidgets package to "
"version 5 or above."
)
ipython.run_cell_magic("javascript", "", cls.LEGACY_4_FRONTEND)
else:
ipython.run_cell_magic("javascript", "", cls.FRONTEND)
class IPython2ProgressBar(ProgressBar):
"""IPython progress bar based on widgets."""
def __init__(self, task):
warnings.warn(
"IPython2ProgressBar is deprecated. Please upgrade to IPython "
"version 5 or later.",
DeprecationWarning,
)
super().__init__(task)
self._escaped_task = escape(task)
self._widget = IPythonProgressWidget()
self._initialized = False
def update(self, progress):
if not self._initialized:
display(self._widget)
self._initialized = True
self._widget.progress = progress.progress
if progress.finished:
self._widget.text = "{} finished in {}.".format(
self._escaped_task, timestamp2timedelta(progress.elapsed_seconds())
)
else:
self._widget.text = "{task}… {progress:.0f}%, ETA: {eta}".format(
task=self._escaped_task,
progress=100 * progress.progress,
eta=timestamp2timedelta(progress.eta()),
)
| 31.681159 | 84 | 0.578987 |
acf98537acf0fb0acc22a7dd1c48ca1b5816b8b4 | 1,768 | py | Python | lib/galaxy/tools/search/__init__.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/search/__init__.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/search/__init__.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | from galaxy.eggs import require
from galaxy.web.framework.helpers import to_unicode
require( "Whoosh" )
from whoosh.filedb.filestore import RamStorage
from whoosh.fields import Schema, STORED, ID, KEYWORD, TEXT
from whoosh.index import Index
from whoosh.scoring import BM25F
from whoosh.qparser import MultifieldParser
schema = Schema( id = STORED, title = TEXT, description = TEXT, help = TEXT )
class ToolBoxSearch( object ):
"""
Support searching tools in a toolbox. This implementation uses
the "whoosh" search library.
"""
def __init__( self, toolbox ):
"""
Create a searcher for `toolbox`.
"""
self.toolbox = toolbox
self.build_index()
def build_index( self ):
self.storage = RamStorage()
self.index = self.storage.create_index( schema )
writer = self.index.writer()
## TODO: would also be nice to search section headers.
for id, tool in self.toolbox.tools_by_id.iteritems():
writer.add_document( id=id, title=to_unicode(tool.name), description=to_unicode(tool.description), help=to_unicode(tool.help) )
writer.commit()
def search( self, query, return_attribute='id' ):
# Change field boosts for searcher to place more weight on title, description than help.
searcher = self.index.searcher( \
weighting=BM25F( field_B={ 'title_B' : 3, 'description_B' : 2, 'help_B' : 1 } \
) )
# Set query to search title, description, and help.
parser = MultifieldParser( [ 'title', 'description', 'help' ], schema = schema )
results = searcher.search( parser.parse( query ) )
return [ result[ return_attribute ] for result in results ]
| 41.116279 | 139 | 0.651018 |
acf985cd624f07fdb157fc822504803b9a1f811b | 9,519 | py | Python | scripts/vkconventions.py | guchong1988/Vulkan-Docs | 7a2ce5b486a46f9b697ff1e65ff22772d0030813 | [
"CC-BY-4.0"
] | null | null | null | scripts/vkconventions.py | guchong1988/Vulkan-Docs | 7a2ce5b486a46f9b697ff1e65ff22772d0030813 | [
"CC-BY-4.0"
] | null | null | null | scripts/vkconventions.py | guchong1988/Vulkan-Docs | 7a2ce5b486a46f9b697ff1e65ff22772d0030813 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2013-2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Working-group-specific style conventions,
# used in generation.
import re
import os
from conventions import ConventionsBase
# Modified from default implementation - see category_requires_validation() below
CATEGORIES_REQUIRING_VALIDATION = set(('handle', 'enum', 'bitmask'))
# Tokenize into "words" for structure types, approximately per spec "Implicit Valid Usage" section 2.7.2
# This first set is for things we recognize explicitly as words,
# as exceptions to the general regex.
# Ideally these would be listed in the spec as exceptions, as OpenXR does.
SPECIAL_WORDS = set((
'16Bit', # VkPhysicalDevice16BitStorageFeatures
'8Bit', # VkPhysicalDevice8BitStorageFeaturesKHR
'AABB', # VkGeometryAABBNV
'ASTC', # VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT
'D3D12', # VkD3D12FenceSubmitInfoKHR
'Float16', # VkPhysicalDeviceShaderFloat16Int8FeaturesKHR
'ImagePipe', # VkImagePipeSurfaceCreateInfoFUCHSIA
'Int64', # VkPhysicalDeviceShaderAtomicInt64FeaturesKHR
'Int8', # VkPhysicalDeviceShaderFloat16Int8FeaturesKHR
'MacOS', # VkMacOSSurfaceCreateInfoMVK
'Uint8', # VkPhysicalDeviceIndexTypeUint8FeaturesEXT
'Win32', # VkWin32SurfaceCreateInfoKHR
))
# A regex to match any of the SPECIAL_WORDS
EXCEPTION_PATTERN = r'(?P<exception>{})'.format(
'|'.join('(%s)' % re.escape(w) for w in SPECIAL_WORDS))
MAIN_RE = re.compile(
# the negative lookahead is to prevent the all-caps pattern from being too greedy.
r'({}|([0-9]+)|([A-Z][a-z]+)|([A-Z][A-Z]*(?![a-z])))'.format(EXCEPTION_PATTERN))
class VulkanConventions(ConventionsBase):
@property
def null(self):
"""Preferred spelling of NULL."""
return '`NULL`'
@property
def struct_macro(self):
"""Get the appropriate format macro for a structure.
Primarily affects generated valid usage statements.
"""
return 'slink:'
@property
def constFlagBits(self):
"""Returns True if static const flag bits should be generated, False if an enumerated type should be generated."""
return False
@property
def structtype_member_name(self):
"""Return name of the structure type member"""
return 'sType'
@property
def nextpointer_member_name(self):
"""Return name of the structure pointer chain member"""
return 'pNext'
@property
def valid_pointer_prefix(self):
"""Return prefix to pointers which must themselves be valid"""
return 'valid'
def is_structure_type_member(self, paramtype, paramname):
"""Determine if member type and name match the structure type member."""
return paramtype == 'VkStructureType' and paramname == self.structtype_member_name
def is_nextpointer_member(self, paramtype, paramname):
"""Determine if member type and name match the next pointer chain member."""
return paramtype == 'void' and paramname == self.nextpointer_member_name
def generate_structure_type_from_name(self, structname):
"""Generate a structure type name, like VK_STRUCTURE_TYPE_CREATE_INSTANCE_INFO"""
structure_type_parts = []
# Tokenize into "words"
for elem in MAIN_RE.findall(structname):
word = elem[0]
if word == 'Vk':
structure_type_parts.append('VK_STRUCTURE_TYPE')
else:
structure_type_parts.append(word.upper())
return '_'.join(structure_type_parts)
@property
def warning_comment(self):
"""Return warning comment to be placed in header of generated Asciidoctor files"""
return '// WARNING: DO NOT MODIFY! This file is automatically generated from the vk.xml registry'
@property
def file_suffix(self):
"""Return suffix of generated Asciidoctor files"""
return '.txt'
def api_name(self, spectype='api'):
"""Return API or specification name for citations in ref pages.ref
pages should link to for
spectype is the spec this refpage is for: 'api' is the Vulkan API
Specification. Defaults to 'api'. If an unrecognized spectype is
given, returns None.
"""
if spectype == 'api' or spectype is None:
return 'Vulkan'
else:
return None
@property
def xml_supported_name_of_api(self):
"""Return the supported= attribute used in API XML"""
return 'vulkan'
@property
def api_prefix(self):
"""Return API token prefix"""
return 'VK_'
@property
def write_contacts(self):
"""Return whether contact list should be written to extension appendices"""
return True
@property
def write_refpage_include(self):
"""Return whether refpage include should be written to extension appendices"""
return True
@property
def member_used_for_unique_vuid(self):
"""Return the member name used in the VUID-...-...-unique ID."""
return self.structtype_member_name
def is_externsync_command(self, protoname):
"""Returns True if the protoname element is an API command requiring
external synchronization
"""
return protoname is not None and 'vkCmd' in protoname
def is_api_name(self, name):
"""Returns True if name is in the reserved API namespace.
For Vulkan, these are names with a case-insensitive 'vk' prefix, or
a 'PFN_vk' function pointer type prefix.
"""
return name[0:2].lower() == 'vk' or name[0:6] == 'PFN_vk'
def specURL(self, spectype='api'):
"""Return public registry URL which ref pages should link to for the
current all-extensions HTML specification, so xrefs in the
asciidoc source that aren't to ref pages can link into it
instead. N.b. this may need to change on a per-refpage basis if
there are multiple documents involved.
"""
return 'https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html'
@property
def xml_api_name(self):
"""Return the name used in the default API XML registry for the default API"""
return 'vulkan'
@property
def registry_path(self):
"""Return relpath to the default API XML registry in this project."""
return 'xml/vk.xml'
@property
def specification_path(self):
"""Return relpath to the Asciidoctor specification sources in this project."""
return '{generated}/meta'
@property
def extra_refpage_headers(self):
"""Return any extra text to add to refpage headers."""
return 'include::../config/attribs.txt[]'
@property
def extension_index_prefixes(self):
"""Return a list of extension prefixes used to group extension refpages."""
return ['VK_KHR', 'VK_EXT', 'VK']
@property
def unified_flag_refpages(self):
"""Return True if Flags/FlagBits refpages are unified, False if
they're separate.
"""
return False
@property
def spec_reflow_path(self):
"""Return the path to the spec source folder to reflow"""
return os.getcwd()
@property
def spec_no_reflow_dirs(self):
"""Return a set of directories not to automatically descend into
when reflowing spec text
"""
return ('scripts', 'style')
@property
def zero(self):
return '`0`'
def category_requires_validation(self, category):
"""Return True if the given type 'category' always requires validation.
Overridden because Vulkan doesn't require "valid" text for basetype in the spec right now."""
return category in CATEGORIES_REQUIRING_VALIDATION
@property
def should_skip_checking_codes(self):
"""Return True if more than the basic validation of return codes should
be skipped for a command.
Vulkan mostly relies on the validation layers rather than API
builtin error checking, so these checks are not appropriate.
For example, passing in a VkFormat parameter will not potentially
generate a VK_ERROR_FORMAT_NOT_SUPPORTED code."""
return True
def extension_include_string(self, ext):
"""Return format string for include:: line for an extension appendix
file. ext is an object with the following members:
- name - extension string string
- vendor - vendor portion of name
- barename - remainder of name"""
return 'include::{{appendices}}/{name}{suffix}[]'.format(
name=ext.name, suffix=self.file_suffix)
@property
def refpage_generated_include_path(self):
"""Return path relative to the generated reference pages, to the
generated API include files."""
return "{generated}"
| 36.332061 | 122 | 0.669608 |
acf985ebf5910b38295e922ea4fd7a4fcbd9453c | 12,885 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 2 | 2020-03-11T12:19:45.000Z | 2020-03-11T15:37:53.000Z | #
# -*- coding: utf-8 -*-
# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The sonic_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import (
Facts,
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
to_request,
edit_config
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.interfaces_util import (
build_interfaces_create_request,
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
get_diff,
update_states,
normalize_interface_name
)
from ansible.module_utils._text import to_native
from ansible.module_utils.connection import ConnectionError
import traceback
LIB_IMP_ERR = None
ERR_MSG = None
try:
import requests
HAS_LIB = True
except Exception as e:
HAS_LIB = False
ERR_MSG = to_native(e)
LIB_IMP_ERR = traceback.format_exc()
PATCH = 'patch'
DELETE = 'delete'
class Interfaces(ConfigBase):
"""
The sonic_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'interfaces',
]
params = ('description', 'mtu', 'enabled')
delete_flag = False
def __init__(self, module):
super(Interfaces, self).__init__(module)
def get_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
interfaces_facts = facts['ansible_network_resources'].get('interfaces')
if not interfaces_facts:
return []
return interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
existing_interfaces_facts = self.get_interfaces_facts()
commands, requests = self.set_config(existing_interfaces_facts)
if commands and len(requests) > 0:
if not self._module.check_mode:
try:
edit_config(self._module, to_request(self._module, requests))
except ConnectionError as exc:
self._module.fail_json(msg=str(exc), code=exc.code)
result['changed'] = True
result['commands'] = commands
changed_interfaces_facts = self.get_interfaces_facts()
result['before'] = existing_interfaces_facts
if result['changed']:
result['after'] = changed_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
normalize_interface_name(want, self._module)
have = existing_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
# diff method works on dict, so creating temp dict
diff = get_diff(want, have)
# removing the dict in case diff found
if state == 'overridden':
have = [each_intf for each_intf in have if each_intf['name'].startswith('Ethernet')]
commands, requests = self._state_overridden(want, have, diff)
elif state == 'deleted':
commands, requests = self._state_deleted(want, have, diff)
elif state == 'merged':
commands, requests = self._state_merged(want, have, diff)
elif state == 'replaced':
commands, requests = self._state_replaced(want, have, diff)
return commands, requests
def _state_replaced(self, want, have, diff):
""" The command generator when state is replaced
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:param interface_type: interface type
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = self.filter_comands_to_change(diff, have)
requests = self.get_delete_interface_requests(commands, have)
requests.extend(self.get_modify_interface_requests(commands, have))
if commands and len(requests) > 0:
commands = update_states(commands, "replaced")
else:
commands = []
return commands, requests
def _state_overridden(self, want, have, diff):
""" The command generator when state is overridden
:param want: the desired configuration as a dictionary
:param obj_in_have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
commands_del = self.filter_comands_to_change(want, have)
requests = self.get_delete_interface_requests(commands_del, have)
del_req_count = len(requests)
if commands_del and del_req_count > 0:
commands_del = update_states(commands_del, "deleted")
commands.extend(commands_del)
commands_over = diff
requests.extend(self.get_modify_interface_requests(commands_over, have))
if commands_over and len(requests) > del_req_count:
commands_over = update_states(commands_over, "overridden")
commands.extend(commands_over)
return commands, requests
def _state_merged(self, want, have, diff):
""" The command generator when state is merged
:param want: the additive configuration as a dictionary
:param obj_in_have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = diff
requests = self.get_modify_interface_requests(commands, have)
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
else:
commands = []
return commands, requests
def _state_deleted(self, want, have, diff):
""" The command generator when state is deleted
:param want: the objects from which the configuration should be removed
:param obj_in_have: the current configuration as a dictionary
:param interface_type: interface type
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
# if want is none, then delete all the interfaces
if not want:
commands = have
else:
commands = want
requests = self.get_delete_interface_requests(commands, have)
if commands and len(requests) > 0:
commands = update_states(commands, "deleted")
else:
commands = []
return commands, requests
def filter_comands_to_delete(self, configs, have):
commands = []
for conf in configs:
if self.is_this_delete_required(conf, have):
temp_conf = dict()
temp_conf['name'] = conf['name']
temp_conf['description'] = ''
temp_conf['mtu'] = 9100
temp_conf['enabled'] = True
commands.append(temp_conf)
return commands
def filter_comands_to_change(self, configs, have):
commands = []
if configs:
for conf in configs:
if self.is_this_change_required(conf, have):
commands.append(conf)
return commands
def get_modify_interface_requests(self, configs, have):
self.delete_flag = False
commands = self.filter_comands_to_change(configs, have)
return self.get_interface_requests(commands, have)
def get_delete_interface_requests(self, configs, have):
self.delete_flag = True
commands = self.filter_comands_to_delete(configs, have)
return self.get_interface_requests(commands, have)
def get_interface_requests(self, configs, have):
requests = []
if not configs:
return requests
# Create URL and payload
for conf in configs:
name = conf["name"]
if self.delete_flag and name.startswith('Loopback'):
method = DELETE
url = 'data/openconfig-interfaces:interfaces/interface=%s' % quote(name, safe='')
request = {"path": url, "method": method}
else:
# Create Loopback in case not availble in have
if name.startswith('Loopback'):
have_conf = next((cfg for cfg in have if cfg['name'] == name), None)
if not have_conf:
loopback_create_request = build_interfaces_create_request(name)
requests.append(loopback_create_request)
method = PATCH
url = 'data/openconfig-interfaces:interfaces/interface=%s/config' % quote(name, safe='')
payload = self.build_create_payload(conf)
request = {"path": url, "method": method, "data": payload}
requests.append(request)
return requests
def is_this_delete_required(self, conf, have):
if conf['name'] == "eth0":
return False
intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None)
if intf:
if (intf['name'].startswith('Loopback') or not ((intf.get('description') is None or intf.get('description') == '') and
(intf.get('enabled') is None or intf.get('enabled') is True) and (intf.get('mtu') is None or intf.get('mtu') == 9100))):
return True
return False
def is_this_change_required(self, conf, have):
if conf['name'] == "eth0":
return False
ret_flag = False
intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None)
if intf:
# Check all parameter if any one is differen from existing
for param in self.params:
if conf.get(param) is not None and conf.get(param) != intf.get(param):
ret_flag = True
break
# if given interface is not present
else:
ret_flag = True
return ret_flag
def build_create_payload(self, conf):
temp_conf = dict()
temp_conf['name'] = conf['name']
if not temp_conf['name'].startswith('Loopback'):
if conf.get('enabled') is not None:
if conf.get('enabled'):
temp_conf['enabled'] = True
else:
temp_conf['enabled'] = False
if conf.get('description') is not None:
temp_conf['description'] = conf['description']
if conf.get('mtu') is not None:
temp_conf['mtu'] = conf['mtu']
payload = {'openconfig-interfaces:config': temp_conf}
return payload
| 36.295775 | 135 | 0.62988 |
acf987e1ebee1b7cc49a78621f159399e05d9c2b | 1,862 | py | Python | src/download_csv.py | UBC-MDS/seismophobia- | 7e23741a4ec226e3c4d9994999a2174023fd9d43 | [
"MIT"
] | null | null | null | src/download_csv.py | UBC-MDS/seismophobia- | 7e23741a4ec226e3c4d9994999a2174023fd9d43 | [
"MIT"
] | 24 | 2020-11-20T00:34:35.000Z | 2021-01-20T15:30:16.000Z | src/download_csv.py | UBC-MDS/seismophobia- | 7e23741a4ec226e3c4d9994999a2174023fd9d43 | [
"MIT"
] | 4 | 2020-11-19T23:23:23.000Z | 2020-11-19T23:25:40.000Z | # author: Trevor Kinsey (inspired by Tiffany Timbers)
# date: 2020-11-20
"""Downloads data from a url to a file specified by filepath.
Usage: download_data.py <url> <filepath>
Options:
<url> URL from where to download the data in csv format
<filepath> Path to desired local file location
"""
import os
import pandas as pd
from docopt import docopt
opt = docopt(__doc__)
def main(url, filepath):
"""Retrieves a dataset from URL and saves to the specified filepath
Parameters
----------
url : str
URL to the earthquake data set
file_path: str
Path to save the earthquake data set
Returns
-------
None
"""
data = pd.read_csv(url)
try:
data.to_csv(filepath, index=False)
except:
os.makedirs(os.path.dirname(filepath))
data.to_csv(filepath, index=False)
def test():
"""Tests for main() function
Parameters
----------
Raises
-------
AssertionError: If any test does not pass
"""
UNIT_TEST_URL = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/san-andreas/earthquake_data.csv'
UNIT_TEST_PATH ='unit_test'
UNIT_TEST_FILEPATH = os.path.join(UNIT_TEST_PATH, 'unit_test.csv')
main(UNIT_TEST_URL, UNIT_TEST_FILEPATH)
assert os.path.isfile(UNIT_TEST_FILEPATH), "File is not created at the specified path"
source_data = pd.read_csv(UNIT_TEST_URL)
saved_data = pd.read_csv(UNIT_TEST_FILEPATH)
assert saved_data.shape == (1013, 11), "Saved data does not match the shape of the original data"
# Delete the downloaded file
os.remove(UNIT_TEST_FILEPATH)
# Delete the unit test directory if no other files exist
if len(os.listdir(UNIT_TEST_PATH)) == 0:
os.rmdir(UNIT_TEST_PATH)
if __name__ == "__main__":
test()
main(opt["<url>"], opt["<filepath>"])
| 27.382353 | 115 | 0.669173 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.