hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
513455e1e49a2cc7f2af7ad92dbc3662b0e294bf | 761 | py | Python | day-4.py | shadowfool/advent-of-code-2017 | 9f2312c2cef9891c3bdb7c970eccc4eb48f714df | [
"MIT"
] | null | null | null | day-4.py | shadowfool/advent-of-code-2017 | 9f2312c2cef9891c3bdb7c970eccc4eb48f714df | [
"MIT"
] | null | null | null | day-4.py | shadowfool/advent-of-code-2017 | 9f2312c2cef9891c3bdb7c970eccc4eb48f714df | [
"MIT"
] | null | null | null |
input = [line.rstrip() for line in open('./inputs/day4.txt')]
badWords = 0
for line in input:
dictionary = {}
words = line.split(' ')
for word in words:
print(words)
if word in dictionary:
badWords = badWords + 1
break
dictionary[ word ] = 1
print(len(input) - badWords)
# ---- CHALLENGE 2 ------
badCount = 0
def letterMap(word=''):
mp = {}
for i, letter in enumerate(''.join(sorted(word))):
if letter not in mp: mp[ letter ] = 0
mp[ letter ] = mp[ letter ] + 1
return mp
for line in input:
words = line.split(' ')
words = [ letterMap(word) for word in words ]
if len(words) != len([dict(t) for t in set([tuple(d.items()) for d in words])]):
badCount = badCount + 1
print(len(input) - badCount) | 21.742857 | 82 | 0.599212 | 110 | 761 | 4.145455 | 0.354545 | 0.046053 | 0.059211 | 0.061404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0.243101 | 761 | 35 | 83 | 21.742857 | 0.776042 | 0.030223 | 0 | 0.16 | 0 | 0 | 0.025815 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0 | 0 | 0.08 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
513c933b5d0724d79a4413fb53c8512d831f68a7 | 7,464 | py | Python | prd_score_classifier.py | DrLSimon/precision-recall-distributions-icml19 | 364188eaa26ac1bf39ebf038136c79aeee97da3a | [
"Apache-2.0"
] | null | null | null | prd_score_classifier.py | DrLSimon/precision-recall-distributions-icml19 | 364188eaa26ac1bf39ebf038136c79aeee97da3a | [
"Apache-2.0"
] | null | null | null | prd_score_classifier.py | DrLSimon/precision-recall-distributions-icml19 | 364188eaa26ac1bf39ebf038136c79aeee97da3a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from prdataset import *
from torch.utils.data import DataLoader
from torchvision import transforms
import tqdm
from models import *
from inception_torch import InceptionV3
cuda = torch.cuda.is_available()
if cuda:
device = torch.device('cuda:0')
cudnn.benchmark = True
else:
device = torch.device('cpu')
def progressbar(iterable, leave=False):
return tqdm.tqdm(iterable, leave=leave)
def createTrainTestSets(source_folder, target_folder, noise=False):
transform_test = [transforms.ToTensor()]
if noise:
addGaussianNoise = lambda tensor: tensor+torch.randn(tensor.shape)*0.1
transform_test.append(transforms.Lambda(addGaussianNoise))
transform_train = transforms.Compose([
] + transform_test)
return SourceTargetDataset(source_folder, target_folder,
transform_train=transform_train,
transform_test=transforms.ToTensor())
class ClassifierTrainer:
def __init__(self, dataset, description):
self.dataset = dataset
self.totalLoss = np.inf
self.description = description
self.__load()
def __load(self):
if self.description == 'alex':
self.features = AlexDiscriminator().eval().to(device)
self.feat_size = 4096
elif self.description == 'vgg':
features = VGGDiscriminator().eval().to(device)
self.feat_size = 4096
elif self.description == "inception":
self.feat_size = dims = 2048
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
features = InceptionV3([block_idx], normalize_input=True)
self.features = features.eval().to(device)
else:
raise ValueError('Unknown classifier')
self.batch_size = 64
self.dataset.precomputeFeatures(self.features, self.batch_size, device)
def initClassifier(self):
nh=128
self.classifier = nn.Sequential(
nn.Linear(self.feat_size, 1, bias=False),
)
self.classifier.to(device).train()
def train(self):
self.totalLoss=0
for batch_num, (samples, flips) in enumerate(progressbar(self.train_loader)):
def closure():
self.optimizer.zero_grad()
predictions = self.classifier(samples.to(device))
loss = self.log_loss(predictions.squeeze(), flips.to(device))
loss.backward()
self.totalLoss += float(loss)
return loss
self.optimizer.step(closure)
def test(self):
self.classifier.eval()
self.dataset.eval()
error_I = 0
error_II = 0
cnt_I = 0
cnt_II = 0
for batch_num, (samples, flips) in enumerate(progressbar(self.train_loader)):
predictions = self.classifier(samples.to(device))
predictions = (predictions > 0)
flips = (flips > 0)
cnt_I += int((flips.to(device) == 0).sum())
cnt_II += int((flips.to(device) == 1).sum())
typeI = (predictions.squeeze() == 1) & (flips.to(device) == 0)
typeII = (predictions.squeeze() == 0) & (flips.to(device) == 1)
error_I += int(typeI.sum())
error_II += int(typeII.sum())
error_I = float(error_I) / float(cnt_I)
error_II = float(error_II) / float(cnt_II)
self.classifier.train()
self.dataset.train()
error = 0.5*(error_I + error_II)
self.scheduler.step(error)
self.pbar.set_postfix(loss=self.totalLoss, error=f'({error_I:.2}+{error_II:.2})/2={error:.2}', lr=self.optimizer.param_groups[0]['lr'])
return self.stopper.step(error)
def run(self, num_epochs, patience):
early_stopping = (patience >= 1)
if early_stopping:
from early_stopping import EarlyStopping
self.stopper = EarlyStopping(patience=patience)
self.initClassifier()
self.dataset.train()
self.train_loader = DataLoader(self.dataset, self.batch_size, shuffle=True, num_workers=0)
self.optimizer = optim.Adam(self.classifier.parameters(), lr=1e-3, weight_decay=1e-1, amsgrad=False)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min', patience=2, cooldown=3, factor=0.5)
self.log_loss = torch.nn.BCEWithLogitsLoss()
self.pbar = progressbar(range(num_epochs))
for ep in self.pbar:
if early_stopping:
with torch.no_grad():
shouldStop = self.test()
if shouldStop:
self.pbar.close()
break
self.train()
return self.classifier
def estimatePRD(classifier, dataset, num_angles, epsilon=1e-10):
if not (num_angles >= 3 and num_angles <= 1e6):
raise ValueError('num_angles must be in [3, 1e6] but is %d.' % num_angles)
dataset.eval()
classifier.eval()
test_loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
# Compute slopes for linearly spaced angles between [0, pi/2]
angles = np.linspace(epsilon, np.pi/2 - epsilon, num=num_angles)
slopes = np.tan(angles)
toTorch = lambda z: torch.from_numpy(z).unsqueeze(0).to(device)
with torch.no_grad():
fValsAndUs = [(float(classifier(Z.to(device))), int(U)) for Z, U in progressbar(test_loader)]
fVals = [val for val, U in fValsAndUs]
fVals = [np.min(fVals)-1] + fVals + [np.max(fVals)+1]
errorRates = []
for t in fVals:
fpr=sum([(fOfZ>=t) and U==0 for fOfZ,U in fValsAndUs]) / float(sum([U==0 for fOfZ,U in fValsAndUs]))
fnr=sum([(fOfZ<t) and U==1 for fOfZ,U in fValsAndUs]) / float(sum([U==1 for fOfZ,U in fValsAndUs]))
errorRates.append((float(fpr), float(fnr)))
precision = []
recall = []
for slope in slopes:
prec = min([slope*fnr+fpr for fpr,fnr in errorRates])
precision.append(prec)
rec = min([fnr+fpr/slope for fpr,fnr in errorRates])
recall.append(rec)
# handle numerical instabilities leaing to precision/recall just above 1
max_val = max(np.max(precision), np.max(recall))
if max_val > 1.001:
print(max_val)
raise ValueError('Detected value > 1.001, this should not happen.')
precision = np.clip(precision, 0, 1)
recall = np.clip(recall, 0, 1)
return precision, recall
class EnsembleClassifier(nn.Module):
def __init__(self):
super().__init__()
self.networks=[]
def append(self, net):
self.networks.append(net)
def forward(self, x):
preds = []
for net in self.networks:
preds.append(net(x))
return torch.median(torch.stack(preds), dim=0)[0]
def computePRD(source_folder, target_folder, num_angles=1001, num_runs=10, num_epochs=10, patience=0):
precisions = []
recalls = []
ensemble = EnsembleClassifier()
dataset = createTrainTestSets(source_folder, target_folder)
trainer = ClassifierTrainer(dataset, 'inception')
for k in progressbar(range(num_runs)):
classifier = trainer.run(num_epochs, patience)
ensemble.append(classifier)
precision, recall = estimatePRD(ensemble, trainer.dataset, num_angles)
return precision, recall
| 36.950495 | 143 | 0.624732 | 913 | 7,464 | 4.992333 | 0.250821 | 0.022817 | 0.014261 | 0.021062 | 0.120667 | 0.087758 | 0.070206 | 0.060114 | 0.047389 | 0.047389 | 0 | 0.017867 | 0.257637 | 7,464 | 201 | 144 | 37.134328 | 0.804728 | 0.017417 | 0 | 0.095238 | 0 | 0 | 0.025372 | 0.005593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.071429 | 0.005952 | 0.214286 | 0.005952 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5141a9a0670c91a2b930b40b597fedf7c7054b49 | 8,906 | py | Python | lambdaproject/settings/base.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 6 | 2015-04-05T01:28:23.000Z | 2022-02-06T17:29:47.000Z | lambdaproject/settings/base.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 2 | 2022-01-05T23:07:10.000Z | 2022-03-30T17:52:45.000Z | lambdaproject/settings/base.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 2 | 2022-02-06T17:29:53.000Z | 2022-02-26T17:23:09.000Z | import os
# Path to your LambdaCast instance (no / behind the path)
try:
from local import ABSOLUTE_PATH
except ImportError:
ABSOLUTE_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../.."
# Domain your instance should use, for example: 'http://example.com' (no / behind the path)
try:
from local import DOMAIN
except ImportError:
DOMAIN = 'http://localhost:8000'
ALLOWED_HOSTS = ['*',]
# Domain of your website, for example: 'http://example.com' (no / behind the path)
WEBSITE_URL = 'http://example.com'
# Name of your website, will be displayed in title, header and opengraph
SITE_NAME = 'LambdaCast'
# Name of the author of the rss feed
AUTHOR_NAME = 'Author Name'
# E-mail adress for the contact link in the sidebar on index page
CONTACT_EMAIL = 'root@example.com'
# URL or path to your logo that will be displayed above the right sidebar
LOGO_URL = DOMAIN + '/static/logo.png'
# Django settings for lambdaproject.project
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# If you use an virtualenv (you schould) enter it here
VIRTUALENV = ABSOLUTE_PATH + '/.venv/lib/python2.7/site-packages'
# The guys who will get an email if something is wrong
ADMINS = (
('name', 'root@localhost'),
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your database settings, sqlite is good for development and testing, not for deployment
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.sql', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'de-de'
# Language code for the OpenGraph implementation.
OG_LANGUAGE_CODE = 'de_DE'
LOCALE_PATHS = (
ABSOLUTE_PATH + '/locale',
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
try:
from local import MEDIA_ROOT
except ImportError:
MEDIA_ROOT = ABSOLUTE_PATH + '/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = DOMAIN + '/media/'
# Where do you want your upload cache to live (there should be some space left)
FILE_UPLOAD_TEMP_DIR = MEDIA_ROOT + '/upload'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ABSOLUTE_PATH + '/static_files/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ABSOLUTE_PATH + '/lambdaproject/static/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ThisOneIsNotUniqeSoPleaseChange'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lambdaproject.middleware.SettingsMiddleware',
'pages.middleware.PagesMiddleware',
'portal.middleware.SubmittalMiddleware',
)
ROOT_URLCONF = 'lambdaproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'lambdaproject.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ABSOLUTE_PATH + '/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
#'django.contrib.admindocs',
'taggit',
'portal',
'livestream',
'pages',
'djangotasks',
'taggit_templatetags',
'simple_open_graph',
'captcha',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = 'django.contrib.auth.views.login'
LOGOUT_URL = 'django.contrib.auth.views.logout'
# ehemalig "portal/appsettings.py"
ENCODING_OUTPUT_DIR = MEDIA_ROOT + '/encoded/'
# How can we reach this files (public access is needed)
ENCODED_BASE_URL = DOMAIN + '/media/encoded/'
THUMBNAILS_DIR = MEDIA_ROOT + '/thumbnails/'
THUMBNAILS_BASE_URL = DOMAIN + '/media/thumbnails/'
ENABLE_LIVESTREAMS = False
ENABLE_AUDIO = True
ENABLE_VIDEO = True
# Host and port for the mail server to send mails for new comments
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
USE_BITTORRENT = False
# example: "udp://tracker.example.com:80"
BITTORRENT_TRACKER_ANNOUNCE_URL = ''
# example: "udp://tracker.example1.com:80,udp://tracker.example2.com:80,udp://tracker.example3.com:80"
BITTORRENT_TRACKER_BACKUP = ''
BITTORRENT_FILES_DIR = MEDIA_ROOT + '/torrents/'
# Where does transmission expects the original files? (This directory must be writeable for both transmission and LambdaCast!)
BITTORRENT_DOWNLOADS_DIR = ''
# What is the URL of the BITTORRENT_FILES_DIR?
BITTORRENT_FILES_BASE_URL = DOMAIN + '/media/torrents/'
# Host and port Transmission is listining on (probably localhost
TRANSMISSION_HOST = '127.0.0.1'
TRANSMISSION_PORT = 9091
# Base-Dir vor Hotfolders, example: "/opt/hotfolder/"
HOTFOLDER_BASE_DIR = ''
HOTFOLDER_MOVE_TO_DIR = MEDIA_ROOT + '/raw/'
# django-simple-captcha
CAPTCHA_LETTER_ROTATION = None
CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.math_challenge'
CAPTCHA_NOISE_FUNCTIONS = None
CAPTCHA_FILTER_FUNCTIONS = None
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 33.355805 | 126 | 0.717045 | 1,144 | 8,906 | 5.473776 | 0.357517 | 0.035292 | 0.009582 | 0.011498 | 0.123283 | 0.0939 | 0.075695 | 0.066113 | 0.053338 | 0.040882 | 0 | 0.006688 | 0.177296 | 8,906 | 266 | 127 | 33.481203 | 0.84796 | 0.484505 | 0 | 0.054054 | 0 | 0 | 0.40062 | 0.254598 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.006757 | 0.047297 | 0 | 0.047297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514411ea3d0032d20f78be6935784f8081b90d34 | 2,072 | py | Python | sentiment_classifier/process.py | dang-trung/stocktwits-sentiment-classifier | 5b6a75abce3a6b701da81f616a0e5b63e9c0dba6 | [
"MIT"
] | null | null | null | sentiment_classifier/process.py | dang-trung/stocktwits-sentiment-classifier | 5b6a75abce3a6b701da81f616a0e5b63e9c0dba6 | [
"MIT"
] | 1 | 2020-11-18T19:15:50.000Z | 2020-11-24T02:21:33.000Z | sentiment_classifier/process.py | dang-trung/stocktwits-sentiment-classifier | 5b6a75abce3a6b701da81f616a0e5b63e9c0dba6 | [
"MIT"
] | null | null | null | """Text Pre-processing.
This module process text messages based on Chen et al. (2019).
Added some steps (such as escaping HTML symbols, or having a more detailed list
of stop and negative words).
"""
import html
import re
import string
import pandas as pd
# Repeated chars more than 3 times
repeat_regex = r'(\w)\1{2,}'
# Cashtag: $<word><opt .-><opt word>
cashtag_regex = r'\$\w+[.-]?\w?'
# Money: <$ or €><digits><opt word>
moneytag_regex = r'[\$€]\d+\w?'
# Numbers: <1 or more nums>
numbertag_regex = r'\d+[\.,]?\d?\w?'
# Hyperlinks: http<opt s>://<opt www.><words><. words><opt />
linktag_regex = r'https?://(www\.)?(\w+)(\.\w+)/?'
# Users: @<opt words>
usertag_regex = r'@\w+'
# Remove stopwords
stops = pd.read_csv('data/00_external/stopwords.csv', header=None)[0].to_list()
stop_set = '|'.join(stops)
stop_regex = rf"\b({stop_set})\s"
# Negative words
negs = pd.read_csv('data/00_external/negative.csv', header=None)[0].to_list()
neg_set = '|'.join(negs)
negtag_regex = rf"({neg_set})\s(\w?)"
# Remove punctuations
punctuation = string.punctuation
punctuation = punctuation.replace('!', '')
punctuation = punctuation.replace('?', '')
punctuation = punctuation.replace("'", '')
punc_regex = rf"[{punctuation}]"
def pre_process(text):
"""
Text-process to remove all words unnecessary for classifying sentiment
Parameters
----------
text : str
Text to be processed
Returns
-------
str
Processed text
"""
text = text.lower() # Lowercase
text = html.unescape(text) # Convert html codes to normal strings
text = re.sub(repeat_regex, r'\1\1\1', text)
text = re.sub(cashtag_regex, 'cashtag', text)
text = re.sub(moneytag_regex, 'moneytag', text)
text = re.sub(numbertag_regex, 'numbertag', text)
text = re.sub(linktag_regex, 'linktag', text)
text = re.sub(usertag_regex, 'usertag', text)
text = re.sub(stop_regex, '', text)
text = re.sub(punc_regex, '', text)
text = re.sub(negtag_regex, r' negtag_\2', text)
text = re.sub(r"'", '', text)
return text
| 29.183099 | 79 | 0.642857 | 295 | 2,072 | 4.420339 | 0.372881 | 0.07362 | 0.069018 | 0.089724 | 0.160276 | 0.132669 | 0 | 0 | 0 | 0 | 0 | 0.010514 | 0.173745 | 2,072 | 70 | 80 | 29.6 | 0.75 | 0.326255 | 0 | 0 | 0 | 0 | 0.189189 | 0.067568 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.114286 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514491bebe24982f7e39bca4c4425c0e236edb60 | 2,062 | py | Python | fredo/editor/brush_dialog.py | yasiupl/FreDo | 73bdc380dd82df171fe63998f0affa092e30759a | [
"BSD-3-Clause"
] | 6 | 2015-08-21T08:43:25.000Z | 2021-12-29T16:16:59.000Z | fredo/editor/brush_dialog.py | yasiupl/FreDo | 73bdc380dd82df171fe63998f0affa092e30759a | [
"BSD-3-Clause"
] | 2 | 2019-03-25T10:16:18.000Z | 2022-01-11T19:14:01.000Z | fredo/editor/brush_dialog.py | yasiupl/FreDo | 73bdc380dd82df171fe63998f0affa092e30759a | [
"BSD-3-Clause"
] | 2 | 2020-10-29T06:15:03.000Z | 2021-12-29T16:42:28.000Z | from PySide.QtGui import QDialog
from ..gui.brush_dialog import Ui_BrushDialog
from PySide.QtGui import QPixmap
from PySide.QtCore import Qt
from ..brushes import SquareBrush
import math
class BrushDialog(QDialog):
def __init__(self, parent=None, brush=None):
super(BrushDialog, self).__init__(parent)
self.ui = Ui_BrushDialog()
self.ui.setupUi(self)
self.ui.size_slider.valueChanged.connect(self.size_changed)
self.ui.brush_combo_box.currentIndexChanged.connect(self.brush_changed)
self.ui.brush_done_btn.clicked.connect(self.select_brush)
self.ui.brush_combo_box.setCurrentIndex(0)
self.brush_changed(0)
self.selected_brush = brush
self.ui.size_slider.setSliderPosition(10)
if brush:
self.ui.size_slider.setSliderPosition(brush.size)
degrees = brush.angle*180/(math.pi)
self.ui.angle_slider.setSliderPosition(degrees)
self.ui.magnitude_box.setValue(brush.magnitude)
def size_changed(self, value):
"Handle the slider drag event."
size = self.ui.brush_demo_label.size()
pixmap = QPixmap(100, 100)
pixmap.fill(Qt.white)
cx, cy = int(size.width()/2), int(size.height()/2)
self.current_brush.set_size(value)
self.current_brush.draw_marker(cx, cy, pixmap, 1)
self.ui.brush_demo_label.setPixmap(pixmap)
def brush_changed(self, index):
"Handle the brush type change"
if index == 0:
self.current_brush = SquareBrush(size=self.ui.size_slider.value())
def get_brush(self):
" Get the selected brush or `None` if dialog was closed. "
return self.selected_brush
def select_brush(self):
" Select the currentently configured brush params "
self.selected_brush = self.current_brush
self.selected_brush.set_magnitude(self.ui.magnitude_box.value())
radians = self.ui.angle_slider.value()*math.pi/180.0
self.selected_brush.set_angle(radians)
self.close()
| 34.366667 | 79 | 0.679922 | 271 | 2,062 | 4.98893 | 0.309963 | 0.066568 | 0.04068 | 0.047337 | 0.113905 | 0.056213 | 0 | 0 | 0 | 0 | 0 | 0.013052 | 0.21969 | 2,062 | 59 | 80 | 34.949153 | 0.827222 | 0.079049 | 0 | 0 | 0 | 0 | 0.078565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.130435 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51457008bd685f3f5dea47108bc2573ac5535321 | 1,510 | py | Python | src/lib/osta.py | anroots/osta-exporter | 14b05bb905b9df59f9e62e72b33c64a890eb973b | [
"Apache-2.0"
] | null | null | null | src/lib/osta.py | anroots/osta-exporter | 14b05bb905b9df59f9e62e72b33c64a890eb973b | [
"Apache-2.0"
] | null | null | null | src/lib/osta.py | anroots/osta-exporter | 14b05bb905b9df59f9e62e72b33c64a890eb973b | [
"Apache-2.0"
] | null | null | null | from json import JSONDecodeError
import requests
import sys
class Osta:
def __init__(self, logger, api_url):
self.api_url = api_url
self.logger = logger
def get_user_items(self, user_id):
self.logger.debug('Starting collection of osta.ee meters')
query_params = {
'userId': [user_id]
}
items = self.make_request('/items/active', query_params)
self.logger.debug('Received {} items from osta.ee'.format(len(items)))
return items
@staticmethod
def get_request_headers():
return {
'Accept': 'application/json',
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}
def make_request(self, uri, query_params):
self.logger.debug('Sending request to Osta API')
uri = self.api_url + uri
try:
r = requests.get(url=uri, params=query_params, headers=self.get_request_headers())
except requests.exceptions.RequestException as e:
self.logger.fatal(e)
self.logger.fatal('Received error from HTTP request, exiting')
sys.exit(1)
try:
response = r.json()
except JSONDecodeError as e:
self.logger.fatal('Osta HTTP endpoint returned invalid JSON, can not parse it')
self.logger.fatal(r.text)
sys.exit(1)
return response
| 32.826087 | 141 | 0.609272 | 190 | 1,510 | 4.726316 | 0.447368 | 0.100223 | 0.066815 | 0.053452 | 0.097996 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026827 | 0.284106 | 1,510 | 45 | 142 | 33.555556 | 0.803885 | 0 | 0 | 0.105263 | 0 | 0.026316 | 0.247682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.078947 | 0.026316 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5146da738f04b9b9d8f97c34d071f17da9198bde | 794 | py | Python | configs/fdf/deep_privacy_v1.py | skoskjei/DP-ATT | eb7380099f5c7e533fd0d247456b4a418529d62b | [
"MIT"
] | 1,128 | 2019-09-11T01:38:09.000Z | 2022-03-31T17:06:56.000Z | configs/fdf/deep_privacy_v1.py | skoskjei/DP-ATT | eb7380099f5c7e533fd0d247456b4a418529d62b | [
"MIT"
] | 45 | 2019-09-11T05:39:53.000Z | 2021-12-05T17:52:07.000Z | configs/fdf/deep_privacy_v1.py | skoskjei/DP-ATT | eb7380099f5c7e533fd0d247456b4a418529d62b | [
"MIT"
] | 185 | 2019-09-11T02:15:56.000Z | 2022-03-23T16:12:41.000Z |
_base_config_ = "base.py"
model_size = 512
model_url = "http://folk.ntnu.no/haakohu/checkpoints/step_42000000.ckpt"
models = dict(
scalar_pose_input=False,
max_imsize=128,
conv_size={
4: model_size,
8: model_size,
16: model_size,
32: model_size,
64: model_size//2,
128: model_size//4,
256: model_size//8,
512: model_size//16
},
generator=dict(
conv2d_config=dict(
conv=dict(
gain=2**0.5
)
),
type="DeepPrivacyV1"),
)
trainer = dict(
progressive=dict(
enabled=False,
lazy_regularization=True
),
batch_size_schedule={
128: 32,
256: 32
},
optimizer=dict(
learning_rate=0.0015
)
)
| 18.465116 | 72 | 0.540302 | 92 | 794 | 4.413043 | 0.565217 | 0.199507 | 0.049261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107692 | 0.345088 | 794 | 42 | 73 | 18.904762 | 0.673077 | 0 | 0 | 0.054054 | 0 | 0 | 0.098361 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514737538b6050cbe92637918e942f1823b10292 | 1,699 | py | Python | server/weather/RestWeatherProvider.py | EveryOtherUsernameWasAlreadyTaken/BIS | e132ce42dcc74e634231398dfecb08834d478cba | [
"MIT"
] | 3 | 2019-07-09T08:51:20.000Z | 2019-09-16T17:27:54.000Z | server/weather/RestWeatherProvider.py | thomasw-mitutoyo-ctl/BIS | 08525cc12164902dfe968ae41beb6de0cd5bc411 | [
"MIT"
] | 24 | 2019-06-17T12:33:35.000Z | 2020-03-27T08:17:35.000Z | server/weather/RestWeatherProvider.py | EveryOtherUsernameWasAlreadyTaken/BIS | e132ce42dcc74e634231398dfecb08834d478cba | [
"MIT"
] | 1 | 2020-03-24T17:54:07.000Z | 2020-03-24T17:54:07.000Z | import json
import logging
import threading
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
log = logging.getLogger(__name__)
class RestWeatherProvider(threading.Thread):
"""
The RestWeatherProvider serves the collected weather data using a simple http server. The weather data can be
obtained by doing a simple http GET request
"""
def __init__(self, repository, address, port):
super(RestWeatherProvider, self).__init__()
self.repository = repository
self.port = port
self.address = address
def run(self):
try:
log.info("Starting WeatherProvider")
# Create and start the http server
server = HTTPServer((self.address, self.port), self.request_handler)
server.serve_forever()
except Exception as e:
log.exception("WeatherProvider threw an exception: " + str(e))
def request_handler(self, *args):
HTTPRequestHandler(self.repository, *args)
class HTTPRequestHandler(BaseHTTPRequestHandler):
"""
HTTPRequestHandler for the RestWeatherProvider
"""
def __init__(self, repository, *args):
self.repository = repository
BaseHTTPRequestHandler.__init__(self, *args)
# noinspection PyPep8Naming
def do_GET(self):
"""
Handles the GET request and returns the weather in json format
"""
self.send_response(200)
self.send_header('Content-type', 'application/json;charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
data = self.repository.get_all_data()
self.wfile.write(str(json.dumps(data)))
| 30.339286 | 114 | 0.669806 | 183 | 1,699 | 6.054645 | 0.469945 | 0.075812 | 0.048736 | 0.037906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00387 | 0.239553 | 1,699 | 55 | 115 | 30.890909 | 0.853715 | 0.1907 | 0 | 0.064516 | 0 | 0 | 0.098784 | 0.043313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.129032 | 0 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514976db1636eff4f1c2e435911894cc18620e2c | 1,531 | py | Python | test/test_solver.py | akiFQC/pyqubo | 6a8033365562756328577eda42e255853e760488 | [
"Apache-2.0"
] | 1 | 2019-03-17T11:26:36.000Z | 2019-03-17T11:26:36.000Z | test/test_solver.py | akiFQC/pyqubo | 6a8033365562756328577eda42e255853e760488 | [
"Apache-2.0"
] | null | null | null | test/test_solver.py | akiFQC/pyqubo | 6a8033365562756328577eda42e255853e760488 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Recruit Communications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyqubo import solve_qubo, solve_ising, Spin
class TestSolver(unittest.TestCase):
@staticmethod
def create_number_partition_model():
s1, s2, s3 = Spin("s1"), Spin("s2"), Spin("s3")
H = (2 * s1 + 4 * s2 + 6 * s3) ** 2
return H.compile()
def test_solve_qubo(self):
model = TestSolver.create_number_partition_model()
qubo, offset = model.to_qubo()
solution = solve_qubo(qubo, num_reads=1, sweeps=10)
self.assertTrue(solution == {'s1': 0, 's2': 0, 's3': 1} or {'s1': 1, 's2': 1, 's3': 0})
def test_solve_ising(self):
model = TestSolver.create_number_partition_model()
linear, quad, offset = model.to_ising()
solution = solve_ising(linear, quad, num_reads=1, sweeps=10)
self.assertTrue(solution == {'s1': -1, 's2': -1, 's3': 1} or {'s1': 1, 's2': 1, 's3': -1})
if __name__ == '__main__':
unittest.main()
| 36.452381 | 98 | 0.665578 | 221 | 1,531 | 4.479638 | 0.475113 | 0.060606 | 0.063636 | 0.078788 | 0.205051 | 0.205051 | 0.2 | 0.109091 | 0.082828 | 0 | 0 | 0.042079 | 0.208361 | 1,531 | 41 | 99 | 37.341463 | 0.774752 | 0.371653 | 0 | 0.1 | 0 | 0 | 0.040084 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514b2ccf532fafc08943123f355c409988b89713 | 6,778 | py | Python | reporter.py | Danielto1404/ssat-msp-make-transfer | 36731ab79ba517d6c66516054ebd6179674a953e | [
"MIT"
] | null | null | null | reporter.py | Danielto1404/ssat-msp-make-transfer | 36731ab79ba517d6c66516054ebd6179674a953e | [
"MIT"
] | null | null | null | reporter.py | Danielto1404/ssat-msp-make-transfer | 36731ab79ba517d6c66516054ebd6179674a953e | [
"MIT"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Reporter class."""
import logging
import os
import time
from datetime import datetime
import mindspore.ops as ops
from mindspore.train.serialization import save_checkpoint
from tools import save_image
class Reporter(logging.Logger):
"""
This class includes several functions that can save images/checkpoints and print/save logging information.
Args:
args (class): Option class.
"""
def __init__(self, args):
super(Reporter, self).__init__("SSAT")
self.log_dir = os.path.join(args.outputs_dir, 'log')
self.imgs_dir = os.path.join(args.outputs_dir, "imgs")
self.ckpts_dir = os.path.join(args.outputs_dir, "ckpt")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir, exist_ok=True)
if not os.path.exists(self.imgs_dir):
os.makedirs(self.imgs_dir, exist_ok=True)
if not os.path.exists(self.ckpts_dir):
os.makedirs(self.ckpts_dir, exist_ok=True)
self.save_checkpoint_epochs = args.save_checkpoint_epochs
self.save_imgs = args.save_imgs
# console handler
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
self.addHandler(console)
# file handler
log_name = datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S') + '_.log'
self.log_fn = os.path.join(self.log_dir, log_name)
fh = logging.FileHandler(self.log_fn)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.addHandler(fh)
self.save_args(args)
self.step = 0
self.epoch = 0
self.dataset_size = args.dataset_size // args.device_num
self.device_num = args.device_num
self.print_iter = args.print_iter
self.G_loss = []
self.D_loss = []
def info(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.INFO):
self._log(logging.INFO, msg, args, **kwargs)
def save_args(self, args):
self.info('Args:')
args_dict = vars(args)
for key in args_dict.keys():
self.info('--> %s: %s', key, args_dict[key])
self.info('')
def epoch_start(self):
self.step_start_time = time.time()
self.epoch_start_time = time.time()
self.step = 0
self.epoch += 1
self.G_loss = []
self.D_loss = []
def step_end(self, res_G, res_D):
"""print log when step end."""
self.step += 1
loss_D = float(res_D.asnumpy())
loss_G = float(res_G.asnumpy())
self.G_loss.append(loss_G)
self.D_loss.append(loss_D)
if self.step % self.print_iter == 0:
step_cost = (time.time() - self.step_start_time) * 1000 / self.print_iter
losses = "G_loss: {:.2f}, D_loss:{:.2f}".format(loss_G, loss_D)
self.info("Epoch[{}] [{}/{}] step cost: {:.2f} ms, {}".format(
self.epoch, self.step, self.dataset_size, step_cost, losses))
self.step_start_time = time.time()
def epoch_end(self, net):
"""print log and save checkpoints when epoch end."""
epoch_cost = (time.time() - self.epoch_start_time) * 1000
per_step_time = epoch_cost / self.dataset_size
mean_loss_G = sum(self.G_loss) / self.dataset_size
mean_loss_D = sum(self.D_loss) / self.dataset_size
self.info("Epoch [{}] total cost: {:.2f} ms, per step: {:.2f} ms, G_loss: {:.2f}, D_loss: {:.2f}".format(
self.epoch, epoch_cost, per_step_time, mean_loss_G, mean_loss_D))
if self.epoch % self.save_checkpoint_epochs == 0:
save_checkpoint(net.G.gen, os.path.join(self.ckpts_dir, f"SSAT_G_{self.epoch}.ckpt"))
# save_checkpoint(net.G.dis_non_makeup, os.path.join(self.ckpts_dir, f"SSAT_D_non_makeup_{self.epoch}.ckpt"))
# save_checkpoint(net.G.dis_makeup, os.path.join(self.ckpts_dir, f"SSAT_D_makeup_{self.epoch}.ckpt"))
def visualizer(self, non_makeup, makeup, mapX, mapY, z_transfer, z_removal, transfer_g, removal_g,
z_rec_non_makeup, z_rec_makeup, z_cycle_non_makeup, z_cycle_makeup):
if self.save_imgs and self.step % self.dataset_size == 0:
_, C, H, W = non_makeup.shape
concat_2 = ops.Concat(axis=2)
concat_3 = ops.Concat(axis=3)
bmm = ops.BatchMatMul()
nearest_256 = ops.ResizeNearestNeighbor((H, W))
nearest_64 = ops.ResizeNearestNeighbor((H // 4, W // 4))
non_makeup_down = nearest_64(non_makeup)
n, c, h, w = non_makeup_down.shape
non_makeup_down_warp = bmm(non_makeup_down.reshape(n, c, h * w), mapY) # n*HW*1
non_makeup_down_warp = non_makeup_down_warp.reshape(n, c, h, w)
non_makeup_warp = nearest_256(non_makeup_down_warp)
makeup_down = nearest_64(makeup)
n, c, h, w = makeup_down.shape
makeup_down_warp = bmm(makeup_down.reshape(n, c, h * w), mapX) # n*HW*1
makeup_down_warp = makeup_down_warp.reshape(n, c, h, w)
makeup_warp = nearest_256(makeup_down_warp)
row_1 = concat_3((non_makeup, makeup_warp, transfer_g, z_transfer, z_rec_non_makeup, z_cycle_non_makeup))
row_2 = concat_3((makeup, non_makeup_warp, removal_g, z_removal, z_rec_makeup, z_cycle_makeup))
result = concat_2((row_1, row_2))
save_image(result, os.path.join(self.imgs_dir, f"{self.epoch}_result.jpg"))
def start_predict(self, direction):
self.predict_start_time = time.time()
self.direction = direction
self.info('==========start predict %s===============', self.direction)
def end_predict(self):
cost = (time.time() - self.predict_start_time) * 1000
per_step_cost = cost / self.dataset_size
self.info('total {} imgs cost {:.2f} ms, per img cost {:.2f}'.format(self.dataset_size, cost, per_step_cost))
self.info('==========end predict %s===============\n', self.direction)
| 43.729032 | 121 | 0.627176 | 961 | 6,778 | 4.182102 | 0.213319 | 0.042548 | 0.019905 | 0.005972 | 0.253546 | 0.155511 | 0.121423 | 0.070416 | 0.034337 | 0.034337 | 0 | 0.012932 | 0.235615 | 6,778 | 154 | 122 | 44.012987 | 0.762787 | 0.165683 | 0 | 0.073395 | 0 | 0.009174 | 0.071824 | 0.012328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082569 | false | 0 | 0.06422 | 0 | 0.155963 | 0.027523 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514d779997818ca67945865e73aa82b847c739ae | 3,302 | py | Python | docs/_build/html/_downloads/152c7b8f9bc6f2cd3750f0cb8ddc0be4/lesson_2_a.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 12 | 2018-06-28T13:40:53.000Z | 2022-01-07T12:46:15.000Z | docs/_build/html/_downloads/152c7b8f9bc6f2cd3750f0cb8ddc0be4/lesson_2_a.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 6 | 2019-04-29T16:55:38.000Z | 2022-03-04T17:00:15.000Z | docs/_build/html/_downloads/152c7b8f9bc6f2cd3750f0cb8ddc0be4/lesson_2_a.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 5 | 2019-04-21T15:42:55.000Z | 2021-08-16T10:53:30.000Z | #<hide>
"""
filtergraph:
Streaming part | Decoding part
|
(LiveThread:livethread) -->> (AVThread:avthread) --> {InfoFrameFilter:info_filter}
"""
#</hide>
#<hide>
import time
from valkka.core import *
#</hide>
"""<rtf>
Let's consider the following filtergraph:
::
Streaming part | Decoding part
|
(LiveThread:livethread) -->> (AVThread:avthread) --> {InfoFrameFilter:info_filter}
Like in the previous lessons, we are reading frames from an IP camera. Instead of churning them through a series of filters, we pass them to another, independently running thread that performs decoding (AVThread).
Let's list all the symbols used until now and the corresponding objects:
====== ============ ==================================
Symbol Base class Explanation
====== ============ ==================================
() Thread An independently running thread
>> Crossover between two threads
{} FrameFilter A framefilter
====== ============ ==================================
That's all you need to create complex filtergraphs with Valkka.
We start as usual, by constructing the filterchain from end-to-beginning:
<rtf>"""
# decoding part
info_filter =InfoFrameFilter("info_filter")
avthread =AVThread("avthread",info_filter)
"""<rtf>
We need a framefilter to feed the frames into AVThread. This framefilter is requested from the AVThread itself:
<rtf>"""
# streaming part
av_in_filter =avthread.getFrameFilter()
livethread =LiveThread("livethread")
"""<rtf>
Finally, proceed as before: pass *av_in_filter* as a parameter to the connection context, start threads, etc.
<rtf>"""
ctx =LiveConnectionContext(LiveConnectionType_rtsp, "rtsp://admin:nordic12345@192.168.1.41", 1, av_in_filter)
"""<rtf>
Start threads. Starting the threads should be done in end-to-beginning order (in the same order we constructed the filterchain).
<rtf>"""
avthread.startCall()
livethread.startCall()
# start decoding
avthread.decodingOnCall()
livethread.registerStreamCall(ctx)
livethread.playStreamCall(ctx)
time.sleep(5)
# stop decoding
# avthread.decodingOffCall()
"""<rtf>
Stop threads. Stop threads in beginning-to-end order (i.e., following the filtergraph from left to right).
<rtf>"""
livethread.stopCall()
avthread.stopCall()
print("bye")
"""<rtf>
You will see output like this:
::
InfoFrameFilter: info_filter start dump>>
InfoFrameFilter: FRAME : <AVBitmapFrame: timestamp=1525870759898 subsession_index=0 slot=1 / h=1080; w=1920; l=(1920,960,960); f=12>
InfoFrameFilter: PAYLOAD : [47 47 47 47 47 47 47 47 47 47 ]
InfoFrameFilter: timediff: -22
InfoFrameFilter: info_filter <<end dump
InfoFrameFilter: info_filter start dump>>
InfoFrameFilter: FRAME : <AVBitmapFrame: timestamp=1525870759938 subsession_index=0 slot=1 / h=1080; w=1920; l=(1920,960,960); f=12>
InfoFrameFilter: PAYLOAD : [47 47 47 47 47 47 47 47 47 47 ]
InfoFrameFilter: timediff: -11
InfoFrameFilter: info_filter <<end dump
...
...
So, instead of H264 packets, we have decoded bitmap frames here.
In the next lesson, we'll dump them on the screen.
<rtf>"""
| 31.447619 | 214 | 0.657783 | 394 | 3,302 | 5.467005 | 0.418782 | 0.033426 | 0.044568 | 0.051996 | 0.292479 | 0.262767 | 0.262767 | 0.262767 | 0.262767 | 0.192201 | 0 | 0.050208 | 0.197759 | 3,302 | 104 | 215 | 31.75 | 0.762929 | 0.086008 | 0 | 0 | 0 | 0 | 0.113487 | 0.060855 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514e969fdf154b0e8e5327483cdde2b37efd808d | 44,964 | py | Python | cheshire3/normalizer.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 3 | 2015-08-02T09:03:28.000Z | 2017-12-06T09:26:14.000Z | cheshire3/normalizer.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 5 | 2015-08-17T01:16:35.000Z | 2015-09-16T21:51:27.000Z | cheshire3/normalizer.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 6 | 2015-05-17T15:32:20.000Z | 2020-04-22T08:43:16.000Z | # -*- coding: utf-8 -ü-
import os
import re
import types
try:
from zopyx.txng3.ext import stemmer as Stemmer
except ImportError:
Stemmer = None
from cheshire3.baseObjects import Normalizer
from cheshire3.exceptions import (
ConfigFileException,
MissingDependencyException
)
class SimpleNormalizer(Normalizer):
"""Abstract Base Class for Normalizer.
Simply returns the data and should never be used as it will simply waste
CPU time.
"""
def __init__(self, session, config, parent):
Normalizer.__init__(self, session, config, parent)
def process_string(self, session, data):
"""Process a string into an alternative form."""
return data
def process_hash(self, session, data):
"""Process a hash of values into alternative forms."""
kw = {}
if not data:
return kw
process = self.process_string
#items = data.items()
#prox = items[0][1].has_key('positions')
for (k, d) in data.iteritems():
dv = d['text']
if type(dv) == list:
new = []
# Process list backwards so as not to munge character offsets
for x in range(len(dv) - 1, -1, -1):
dvi = dv[x]
ndvi = process(session, dvi)
if ndvi:
new.append(ndvi)
else:
try:
d['charOffsets'].pop(x)
except KeyError:
pass
new.reverse()
nd = d.copy()
nd['text'] = new
kw[k] = nd
continue
else:
new = process(session, d['text'])
if not new:
continue
if isinstance(new, dict):
# From string to hash
for nv in new.itervalues():
txt = nv['text']
if txt in kw:
kw[txt]['occurences'] += nv['occurences']
try:
kw[txt]['positions'].extend(nv['positions'])
except:
pass
try:
kw[txt]['proxLoc'].extend(nv['proxLoc'])
except:
pass
else:
kw[txt] = nv
else:
if new is not None:
try:
kw[new]['occurences'] += d['occurences']
try:
kw[new]['positions'].extend(d['positions'])
except:
pass
try:
kw[new]['proxLoc'].extend(d['proxLoc'])
except:
pass
except KeyError:
d = d.copy()
try:
d['positions'] = d['positions'][:]
except:
pass
try:
d['proxLoc'] = d['proxLoc'][:]
except:
pass
d['text'] = new
kw[new] = d
return kw
class DataExistsNormalizer(SimpleNormalizer):
""" Return '1' if any data exists, otherwise '0' """
def process_string(self, session, data):
if data:
return "1"
else:
return "0"
class TermExistsNormalizer(SimpleNormalizer):
""" Un-stoplist anonymizing normalizer. Eg for use with data mining """
_possibleSettings = {
'termlist': {
'docs': ("'splitChar' (defaulting to space) separated list of "
"terms. For each term, if it exists in this list, the "
"normalizer returns '1', otherwise '0'"),
'required': True
},
'splitChar': {
'docs': "Override for the character to split on"
},
'frequency': {
'docs': ("if 1, accumulate total occurences, otherwise add one "
"per term"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
tlstr = self.get_setting(session, 'termlist', '')
splitter = self.get_setting(session, 'splitChar', ' ')
self.termlist = tlstr.split(splitter)
self.frequency = self.get_setting(session, 'frequency', 0)
def process_string(self, session, data):
if data in self.termlist:
return "1"
else:
return "0"
def process_hash(self, session, data):
kw = {}
vals = data.values()
if not vals:
return kw
process = self.process_string
total = 0
for d in vals:
new = process(session, d['text'])
if new == "1":
if self.frequency:
total += d['occurences']
else:
total += 1
return str(total)
class UndocumentNormalizer(SimpleNormalizer):
""" Take a document as if it were a string and turn into a string """
def process_string(self, session, data):
return data.get_raw(session)
class CaseNormalizer(SimpleNormalizer):
""" Reduce text to lower case """
def process_string(self, session, data):
return data.lower()
class ReverseNormalizer(SimpleNormalizer):
""" Reverse string (eg for left truncation) """
def process_string(self, session, data):
return data[::-1]
class SpaceNormalizer(SimpleNormalizer):
""" Reduce multiple whitespace to single space character """
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
# all strings should be treated as unicode internally
# this is default for lxml - primary Record implementation
self.whitespace = re.compile("\s+", re.UNICODE)
def process_string(self, session, data):
data = data.strip()
data = self.whitespace.sub(' ', data)
return data
class ArticleNormalizer(SimpleNormalizer):
"""Remove leading english articles (the, a, an)"""
def process_string(self, session, data):
d = data.lower()
if (d[:4] == "the "):
return data[4:]
elif (d[:2] == "a "):
return data[2:]
elif (d[:3] == "an "):
return data[3:]
else:
return data
class NumericEntityNormalizer(SimpleNormalizer):
"""Replaces named XML entities with numeric ones.
Replace encoded XML entities matching a regular expression with the
equivalent numeric character entity
"""
_possibleSettings = {
'regexp': {
'docs': ("Regular expression of that matches characters to turn "
"into XML numeric character entities")
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
regex = self.get_setting(session,
'regexp',
'([\x0e-\x1f]|[\x7b-\xff])')
self.regexp = re.compile(regex)
self.function = lambda x: "&#%s;" % ord(x.group(1))
def process_string(self, session, data):
return self.regexp.sub(self.function, data)
# Non printable characters (Printable)
# self.asciiRe = re.compile('([\x0e-\x1f]|[\x7b-\xff])')
# Non useful characters (Stripper)
# self.asciiRe = re.compile('["%#@~!*{}]')
class PointlessCharacterNormalizer(SimpleNormalizer):
def process_string(self, session, data):
t = data.replace(u'\ufb00', 'ff')
t = t.replace(u'\ufb01', 'fi')
t = t.replace(u'\xe6', 'fi')
t = t.replace(u'\ufb02', 'fl')
t = t.replace(u'\u201c', '"')
t = t.replace(u'\u201d', '"')
t = t.replace(u'\u2019', "'")
t = t.replace(u'\u2026', " ")
return t
class RegexpNormalizer(SimpleNormalizer):
"""Strip, replace or keep data matching a regular expression."""
_possibleSettings = {
'char': {
'docs': ("Character(s) to replace matches in the regular "
"expression with. Defaults to empty string (eg strip "
"matches)")
},
'regexp': {
'docs': "Regular expression to match in the data.",
'required': True
},
'keep': {
'docs': ("Should instead keep only the matches. Boolean, defaults "
"to False"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.char = self.get_setting(session, 'char', '')
self.keep = self.get_setting(session, 'keep', 0)
regex = self.get_setting(session, 'regexp')
if regex:
self.regexp = re.compile(regex)
else:
raise ConfigFileException("Missing regexp setting for "
"%s." % (self.id))
def process_string(self, session, data):
if self.keep:
try:
l = self.regexp.findall(data)
except UnicodeDecodeError:
data = data.decode('utf-8')
l = self.regexp.findall(data)
return self.char.join(l)
else:
try:
return self.regexp.sub(self.char, data)
except UnicodeDecodeError:
data = data.decode('utf-8')
try:
return self.regexp.sub(self.char, data)
except:
raise
class NamedRegexpNormalizer(RegexpNormalizer):
"""A RegexpNormalizer with templating for named groups.
As RegexpNormalizer, but allow named groups and reconstruction of token
using a template and those groups.
"""
_possibleSettings = {
'template': {
'docs': ("Template using group names for replacement, as per % "
"substitution. Eg regexp = (?P<word>.+)/(?P<pos>.+) and "
"template = --%(pos)s--, cat/NN would generate --NN--")
}
}
def __init__(self, session, config, parent):
RegexpNormalizer.__init__(self, session, config, parent)
self.template = self.get_setting(session, 'template', '')
def process_string(self, session, data):
m = self.regexp.match(data)
if m:
try:
return self.template % m.groupdict()
except:
return ""
else:
return ""
class RegexpFilterNormalizer(SimpleNormalizer):
"""Normalizer to filter data with a regular expression.
If 'keep' setting is True:
filters out data that DOES NOT match 'regexp' setting
If 'keep' setting is False:
filters out data that DOES match the 'regexp' setting
"""
_possibleSettings = {
'regexp': {
'docs': "Regular expression to match in the data."
},
'keep': {
'docs': ("Should keep only data matching the regexp. Boolean "
"setting, defaults to True"),
'type': int
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
regex = self.get_setting(session,
'regexp',
'^[a-zA-Z\'][a-zA-Z\'.-]+[?!,;:]?$')
self.re = re.compile(regex)
self.keep = self.get_setting(session, 'keep', 1)
def process_string(self, session, data):
if self.re.match(data):
return data if self.keep else None
else:
return None if self.keep else data
def process_hash(self, session, data):
data = SimpleNormalizer.process_hash(self, session, data)
try:
del data[None]
except:
# may not have filtered anything
pass
return data
class PossessiveNormalizer(SimpleNormalizer):
""" Remove trailing 's or s' from words """
def process_string(self, session, data):
# Not totally correct... eg: it's == 'it is', not 'of it'
if (data[-2:] == "s'"):
return data[:-1]
elif (data[-2:] == "'s"):
return data[:-2]
else:
return data
class IntNormalizer(SimpleNormalizer):
""" Turn a string into an integer """
def process_string(self, session, data):
try:
return long(data)
except:
return None
class StringIntNormalizer(SimpleNormalizer):
""" Turn an integer into a 0 padded string, 12 chrs long """
def process_string(self, session, data):
try:
d = long(data)
return "%012d" % (d)
except:
return None
class FileAssistedNormalizer(SimpleNormalizer):
"""Base Class for Normalizers configured with an additional file.
Abstract class for Normalizers that can be configured using an additional
file e.g. for specifying a stoplist, or a list of acronyms and their
expansions.
"""
def _processPath(self, session, path):
fp = self.get_path(session, path)
if fp is None:
raise ConfigFileException("No {0} file specified for object with "
"id '{1}'.".format(path, self.id))
if (not os.path.isabs(fp)):
dfp = self.get_path(session, "defaultPath")
fp = os.path.join(dfp, fp)
try:
fh = open(fp, 'r')
except IOError as e:
raise ConfigFileException("{0} for object with id '{1}'."
"".format(str(e), self.id))
l = fh.readlines()
fh.close()
return l
class StoplistNormalizer(FileAssistedNormalizer):
"""Normalizer to remove words that occur in a stopword list."""
stoplist = {}
_possiblePaths = {
'stoplist': {
'docs': ("Path to file containing set of stop terms, one term "
"per line."),
'required': True
}
}
def __init__(self, session, config, parent):
FileAssistedNormalizer.__init__(self, session, config, parent)
self.stoplist = {}
lines = self._processPath(session, 'stoplist')
for sw in lines:
self.stoplist[sw.strip()] = 1
def process_string(self, session, data):
if (data in self.stoplist):
return None
else:
return data
class TokenExpansionNormalizer(FileAssistedNormalizer):
""" Expand acronyms or compound words.
Only works with tokens NOT exact strings.
"""
expansions = {}
_possiblePaths = {
'expansions': {
'docs': ("Path to file containing set of expansions, one "
"expansion per line. First token in line is taken to be "
"the thing to be expanded, remaining tokens are what "
"occurences should be replaced with."),
'required': True
}
}
_possibleSettings = {
'keepOriginal': {
'docs': ("Should the original token be kept as well as its "
"expansion (e.g. potentialy useful when browsing). "
"Defaults to False."),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
FileAssistedNormalizer.__init__(self, session, config, parent)
self.expansions = {}
self.keepOriginal = self.get_setting(session, 'keepOriginal', 0)
lines = self._processPath(session, 'expansions')
for exp in lines:
bits = unicode(exp).split()
self.expansions[bits[0]] = bits[1:]
def process_string(self, session, data):
try:
return ' '.join(self.expansions[data])
except KeyError:
return data
def process_hash(self, session, data):
kw = {}
if not len(data):
return kw
keep = self.keepOriginal
process = self.process_string
map = self.expansions
for d in data.itervalues():
if 'positions' in d or 'charOffsets' in d:
raise NotImplementedError
t = d['text']
if (t in map):
dlist = map[t]
for new in dlist:
if (new in kw):
kw[new]['occurences'] += 1
else:
nd = d.copy()
nd['text'] = new
kw[new] = nd
if keep:
kw[t] = d
else:
kw[t] = d
return kw
class StemNormalizer(SimpleNormalizer):
"""Use a Snowball stemmer to stem the terms."""
stemmer = None
_possibleSettings = {
'language': {
'docs': ("Language to create a stemmer for, defaults to "
"english."),
'options': ("danish|dutch|english|finnish|french|german|"
"italian|norwegian|porter|portuguese|russian|"
"spanish|swedish")
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
if Stemmer is None:
raise MissingDependencyException(self.objectType,
"zopyx.txng3.ext"
)
lang = self.get_setting(session, 'language', 'english')
try:
self.stemmer = Stemmer.Stemmer(lang)
except:
raise ConfigFileException("Unknown stemmer language: "
"%s" % (lang))
def process_string(self, session, data):
if (type(data) != type(u"")):
data = unicode(data, 'utf-8')
return self.stemmer.stem([data])[0]
class PhraseStemNormalizer(SimpleNormalizer):
"""Use a Snowball stemmer to stem multiple words in a phrase.
Deprecated: Should instead use normalizer after tokenizer and before
tokenMerger.
"""
stemmer = None
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
if Stemmer is None:
raise MissingDependencyException(self.objectType,
"zopyx.txng3.ext"
)
lang = self.get_setting(session, 'language', 'english')
self.punctuationRe = re.compile(
"((?<!s)'|[-.,]((?=\s)|$)|(^|(?<=\s))[-.,']|"
"[~`!@+=\#\&\^*()\[\]{}\\\|\":;<>?/])"
)
try:
self.stemmer = Stemmer.Stemmer(lang)
except:
raise ConfigFileException("Unknown stemmer language: %s" %
(lang))
def process_string(self, session, data):
if (type(data) != type(u"")):
data = unicode(data, 'utf-8')
s = self.punctuationRe.sub(' ', data)
wds = data.split()
stemmed = self.stemmer.stem(wds)
return ' '.join(stemmed)
class PhoneticNormalizer(SimpleNormalizer):
u"""Carries out phonetic normalization.
Currently fairly simple normalization after "Introduction to Information
Retrieval" by Christopher D. Manning, Prabhakar Raghavan & Hinrich Schütze
except that length of final term is configurable (not hard-coded to 4
characters.)"""
_possibleSettings = {
'termSize': {
'docs': ("Number of characters to reduce/pad the phonetically "
"normalized term to. If not a positive integer no "
"reduction/padding applied (default)."),
'type': int
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.nChars = self.get_setting(session, 'termSize', 0)
self.re0 = re.compile('[aeiouhwy]+', re.IGNORECASE | re.UNICODE)
self.re1 = re.compile('[bfpv]+', re.IGNORECASE | re.UNICODE)
self.re2 = re.compile('[cgjkqsxz]+', re.IGNORECASE | re.UNICODE)
self.re3 = re.compile('[dt]+', re.IGNORECASE | re.UNICODE)
self.re4 = re.compile('[l]+', re.IGNORECASE | re.UNICODE)
self.re5 = re.compile('[mn]+', re.IGNORECASE | re.UNICODE)
self.re6 = re.compile('[r]+', re.IGNORECASE | re.UNICODE)
def process_string(self, session, data):
# 0. Prepare by stripping leading/trailing whitespace
data = data.strip()
# 1. Retain the first letter of the term.
# 2. Change all occurrences of the following letters to '0' (zero):
# 'A', E', 'I', 'O', 'U', 'H', 'W', 'Y'.
# 3. Change letters to digits as follows:
# B, F, P, V to 1. C, G, J, K, Q, S, X, Z -> 2
# D,T to 3. L -> 4
# M, N -> 5
# R -> 6.
# 4. Repeatedly remove one out of each pair of consecutive identical
# digits.
tail = data[1:]
for i, regex in enumerate([self.re0, self.re1, self.re2, self.re3,
self.re4, self.re5, self.re6]):
tail = regex.sub(str(i), tail)
# 5. Remove all zeros from the resulting string.
tail = tail.replace('0', '')
result = data[0] + tail
if self.nChars:
# Pad the resulting string with trailing zeros and return the first
# self.nChars positions
result = '{0:0<{1}}'.format(result[:self.nChars], self.nChars)
if type(data) == unicode:
return unicode(result)
else:
return result
class DateStringNormalizer(SimpleNormalizer):
"""Turns a Date object into ISO8601 format."""
def process_string(self, session, data):
# str() defaults to iso8601 format
return str(data)
class DateYearNormalizer(SimpleNormalizer):
"""Normalizes a date in ISO8601 format to simply a year
Very crude implementation, simply returns first 4 characters.
"""
def process_string(self, session, data):
return data[:4]
class IdToFilenameNormalizer(SimpleNormalizer):
"""Turn an id into a filename with appropriate extension(s).
Extension to use is a configurable setting, defaults to .xml
"""
_possibleSettings = {
'extension': {
'docs': ("File extension (including leading period / stop) to "
"append to given id to produce and appropriate "
"filename."),
'type': str,
'default': '.xml'
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.ext = self.get_setting(session, 'extension', '.xml')
def process_string(self, session, data):
return str(data) + self.ext
class FilenameToIdNormalizer(SimpleNormalizer):
""" Turn a filename into an id by stripping off the filename extension.
Only strips off the final extension, including the period / stop.
"""
def process_string(self, session, data):
id, ext = os.path.splitext(data)
return id
class RangeNormalizer(SimpleNormalizer):
""" XXX: This is actually a job for a TokenMerger. Deprecated"""
def process_hash(self, session, data):
# Need to step through positions in order
kw = {}
vals = data.values()
if not vals:
return kw
prox = 'positions' in vals[0]
if not prox:
# Bad. Assume low -> high order
tmplist = [(d['text'], d) for d in vals]
else:
# Need to duplicate across occs, as all in same hash from record
tmplist = []
for d in vals:
for x in range(0, len(d['positions']), 2):
tmplist.append(("%s-%s" %
(d['positions'][x], d['positions'][x + 1]),
d))
tmplist.sort()
for t in range(0, len(tmplist), 2):
base = tmplist[t][1]
try:
text = base['text'] + " " + tmplist[t + 1][1]['text']
except:
text = base['text'] + " " + base['text']
base['text'] = text
try:
del base['positions']
except:
pass
kw[text] = base
return kw
class UnicodeCollationNormalizer(SimpleNormalizer):
""" Use pyuca to create sort key for string
Only, but Very, useful for sorting
"""
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
keyPath = self.get_path(session, 'keyFile', 'allkeys.txt')
# This is handy -- means if no pyuca, no problem
from pyuca import Collator
self.collator = Collator(keyPath)
def process_string(self, session, data):
# fix eszett sorting
data = data.replace(u'\u00DF', 'ss')
ints = self.collator.sort_key(data)
exp = ["%04d" % i for i in ints]
return ''.join(exp)
class DiacriticNormalizer(SimpleNormalizer):
"""Normalizer to turn XML entities into their closes ASCII approximation.
Slow implementation of Unicode 4.0 character decomposition.
Eg that &eacute; -> e
"""
map = {}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
# Decomposition as per Unicode 4.0 Data file
self.map = {
u"\u00A7": u"Section",
u"\u00A9": u"(c)",
# Exhaustive accented alphabetical, diacrytics and ligatures
u"\u00C0": u"\u0041",
u"\u00C1": u"\u0041",
u"\u00C2": u"\u0041",
u"\u00C3": u"\u0041",
u"\u00C4": u"\u0041",
u"\u00C5": u"\u0041",
u"\u00C6": u"AE",
u"\u00C7": u"\u0043",
u"\u00C8": u"\u0045",
u"\u00C9": u"\u0045",
u"\u00CA": u"\u0045",
u"\u00CB": u"\u0045",
u"\u00CC": u"\u0049",
u"\u00CD": u"\u0049",
u"\u00CE": u"\u0049",
u"\u00CF": u"\u0049",
u"\u00D0": u"\u0044",
u"\u00D1": u"\u004E",
u"\u00D2": u"\u004F",
u"\u00D3": u"\u004F",
u"\u00D4": u"\u004F",
u"\u00D5": u"\u004F",
u"\u00D6": u"\u004F",
u"\u00D7": u"x",
u"\u00D8": u"O",
u"\u00D9": u"\u0055",
u"\u00DA": u"\u0055",
u"\u00DB": u"\u0055",
u"\u00DC": u"\u0055",
u"\u00DD": u"\u0059",
u"\u00DE": u"TH",
u"\u00DF": u"ss",
u"\u00E0": u"\u0061",
u"\u00E1": u"\u0061",
u"\u00E2": u"\u0061",
u"\u00E3": u"\u0061",
u"\u00E4": u"\u0061",
u"\u00E5": u"\u0061",
u"\u00E6": u"\u0061\u0065",
u"\u00E7": u"\u0063",
u"\u00E8": u"\u0065",
u"\u00E9": u"\u0065",
u"\u00EA": u"\u0065",
u"\u00EB": u"\u0065",
u"\u00EC": u"\u0069",
u"\u00ED": u"\u0069",
u"\u00EE": u"\u0069",
u"\u00EF": u"\u0069",
u"\u00F0": u"\u0064",
u"\u00F1": u"\u006E",
u"\u00F2": u"\u006F",
u"\u00F3": u"\u006F",
u"\u00F4": u"\u006F",
u"\u00F5": u"\u006F",
u"\u00F6": u"\u006F",
u"\u00F7": u"/",
u"\u00F8": u"\u006F",
u"\u00F9": u"\u0075",
u"\u00FA": u"\u0075",
u"\u00FB": u"\u0075",
u"\u00FC": u"\u0075",
u"\u00FD": u"\u0079",
u"\u00FE": u"th",
u"\u00FF": u"\u0079",
u"\u0100": u"\u0041",
u"\u0101": u"\u0061",
u"\u0102": u"\u0041",
u"\u0103": u"\u0061",
u"\u0104": u"\u0041",
u"\u0105": u"\u0061",
u"\u0106": u"\u0043",
u"\u0107": u"\u0063",
u"\u0108": u"\u0043",
u"\u0109": u"\u0063",
u"\u010A": u"\u0043",
u"\u010B": u"\u0063",
u"\u010C": u"\u0043",
u"\u010D": u"\u0063",
u"\u010E": u"\u0044",
u"\u010F": u"\u0064",
u"\u0110": u"D",
u"\u0111": u"d",
u"\u0112": u"\u0045",
u"\u0113": u"\u0065",
u"\u0114": u"\u0045",
u"\u0115": u"\u0065",
u"\u0116": u"\u0045",
u"\u0117": u"\u0065",
u"\u0118": u"\u0045",
u"\u0119": u"\u0065",
u"\u011A": u"\u0045",
u"\u011B": u"\u0065",
u"\u011C": u"\u0047",
u"\u011D": u"\u0067",
u"\u011E": u"\u0047",
u"\u011F": u"\u0067",
u"\u0120": u"\u0047",
u"\u0121": u"\u0067",
u"\u0122": u"\u0047",
u"\u0123": u"\u0067",
u"\u0124": u"\u0048",
u"\u0125": u"\u0068",
u"\u0126": u"H",
u"\u0127": u"h",
u"\u0128": u"\u0049",
u"\u0129": u"\u0069",
u"\u012A": u"\u0049",
u"\u012B": u"\u0069",
u"\u012C": u"\u0049",
u"\u012D": u"\u0069",
u"\u012E": u"\u0049",
u"\u012F": u"\u0069",
u"\u0130": u"\u0049",
u"\u0131": u"i",
u"\u0132": u"\u0049",
u"\u0133": u"\u0069",
u"\u0134": u"\u004A",
u"\u0135": u"\u006A",
u"\u0136": u"\u004B",
u"\u0137": u"\u006B",
u"\u0138": u"k",
u"\u0139": u"\u004C",
u"\u013A": u"\u006C",
u"\u013B": u"\u004C",
u"\u013C": u"\u006C",
u"\u013D": u"\u004C",
u"\u013E": u"\u006C",
u"\u013F": u"\u004C",
u"\u0140": u"\u006C",
u"\u0141": u"L",
u"\u0142": u"l",
u"\u0143": u"\u004E",
u"\u0144": u"\u006E",
u"\u0145": u"\u004E",
u"\u0146": u"\u006E",
u"\u0147": u"\u004E",
u"\u0148": u"\u006E",
u"\u0149": u"\u02BC",
u"\u014A": u"N",
u"\u014B": u"n",
u"\u014C": u"\u004F",
u"\u014D": u"\u006F",
u"\u014E": u"\u004F",
u"\u014F": u"\u006F",
u"\u0150": u"\u004F",
u"\u0151": u"\u006F",
u"\u0152": u"OE",
u"\u0153": u"oe",
u"\u0154": u"\u0052",
u"\u0155": u"\u0072",
u"\u0156": u"\u0052",
u"\u0157": u"\u0072",
u"\u0158": u"\u0052",
u"\u0159": u"\u0072",
u"\u015A": u"\u0053",
u"\u015B": u"\u0073",
u"\u015C": u"\u0053",
u"\u015D": u"\u0073",
u"\u015E": u"\u0053",
u"\u015F": u"\u0073",
u"\u0160": u"\u0053",
u"\u0161": u"\u0073",
u"\u0162": u"\u0054",
u"\u0163": u"\u0074",
u"\u0164": u"\u0054",
u"\u0165": u"\u0074",
u"\u0166": u"T",
u"\u0167": u"t",
u"\u0168": u"\u0055",
u"\u0169": u"\u0075",
u"\u016A": u"\u0055",
u"\u016B": u"\u0075",
u"\u016C": u"\u0055",
u"\u016D": u"\u0075",
u"\u016E": u"\u0055",
u"\u016F": u"\u0075",
u"\u0170": u"\u0055",
u"\u0171": u"\u0075",
u"\u0172": u"\u0055",
u"\u0173": u"\u0075",
u"\u0174": u"\u0057",
u"\u0175": u"\u0077",
u"\u0176": u"\u0059",
u"\u0177": u"\u0079",
u"\u0178": u"\u0059",
u"\u0179": u"\u005A",
u"\u017A": u"\u007A",
u"\u017B": u"\u005A",
u"\u017C": u"\u007A",
u"\u017D": u"\u005A",
u"\u017E": u"\u007A",
u"\u017F": u"s",
# Big Gap, and scattered from here
u"\u01A0": u"\u004F",
u"\u01A1": u"\u006F",
u"\u01AF": u"\u0055",
u"\u01B0": u"\u0075",
u"\u01C4": u"\u0044",
u"\u01C5": u"\u0044",
u"\u01C6": u"\u0064",
u"\u01C7": u"\u004C",
u"\u01C8": u"\u004C",
u"\u01C9": u"\u006C",
u"\u01CA": u"\u004E",
u"\u01CB": u"\u004E",
u"\u01CC": u"\u006E",
u"\u01CD": u"\u0041",
u"\u01CE": u"\u0061",
u"\u01CF": u"\u0049",
u"\u01D0": u"\u0069",
u"\u01D1": u"\u004F",
u"\u01D2": u"\u006F",
u"\u01D3": u"\u0055",
u"\u01D4": u"\u0075",
u"\u01D5": u"\u0055",
u"\u01D6": u"\u0075",
u"\u01D7": u"\u0055",
u"\u01D8": u"\u0075",
u"\u01D9": u"\u0055",
u"\u01DA": u"\u0075",
u"\u01DB": u"\u0055",
u"\u01DC": u"\u0075",
u"\u01DE": u"\u0041",
u"\u01DF": u"\u0061",
u"\u01E0": u"\u0226",
u"\u01E1": u"\u0227",
u"\u01E2": u"\u00C6",
u"\u01E3": u"\u00E6",
u"\u01E6": u"\u0047",
u"\u01E7": u"\u0067",
u"\u01E8": u"\u004B",
u"\u01E9": u"\u006B",
u"\u01EA": u"\u004F",
u"\u01EB": u"\u006F",
u"\u01EC": u"\u004F",
u"\u01ED": u"\u006F",
u"\u01EE": u"\u01B7",
u"\u01EF": u"\u0292",
u"\u01F0": u"\u006A",
u"\u01F1": u"\u0044",
u"\u01F2": u"\u0044",
u"\u01F3": u"\u0064",
u"\u01F4": u"\u0047",
u"\u01F5": u"\u0067",
u"\u01F8": u"\u004E",
u"\u01F9": u"\u006E",
u"\u01FA": u"\u0041",
u"\u01FB": u"\u0061",
u"\u01FC": u"\u00C6",
u"\u01FD": u"\u00E6",
u"\u01FE": u"\u00D8",
u"\u01FF": u"\u00F8",
u"\u0200": u"\u0041",
u"\u0201": u"\u0061",
u"\u0202": u"\u0041",
u"\u0203": u"\u0061",
u"\u0204": u"\u0045",
u"\u0205": u"\u0065",
u"\u0206": u"\u0045",
u"\u0207": u"\u0065",
u"\u0208": u"\u0049",
u"\u0209": u"\u0069",
u"\u020A": u"\u0049",
u"\u020B": u"\u0069",
u"\u020C": u"\u004F",
u"\u020D": u"\u006F",
u"\u020E": u"\u004F",
u"\u020F": u"\u006F",
u"\u0210": u"\u0052",
u"\u0211": u"\u0072",
u"\u0212": u"\u0052",
u"\u0213": u"\u0072",
u"\u0214": u"\u0055",
u"\u0215": u"\u0075",
u"\u0216": u"\u0055",
u"\u0217": u"\u0075",
u"\u0218": u"\u0053",
u"\u0219": u"\u0073",
u"\u021A": u"\u0054",
u"\u021B": u"\u0074",
u"\u021E": u"\u0048",
u"\u021F": u"\u0068",
u"\u0226": u"\u0041",
u"\u0227": u"\u0061",
u"\u0228": u"\u0045",
u"\u0229": u"\u0065",
u"\u022A": u"\u004F",
u"\u022B": u"\u006F",
u"\u022C": u"\u004F",
u"\u022D": u"\u006F",
u"\u022E": u"\u004F",
u"\u022F": u"\u006F",
u"\u0230": u"\u004F",
u"\u0231": u"\u006F",
u"\u0232": u"\u0059",
u"\u0233": u"\u0079",
u"\u1E00": u"\u0041",
u"\u1E01": u"\u0061",
u"\u1E02": u"\u0042",
u"\u1E03": u"\u0062",
u"\u1E04": u"\u0042",
u"\u1E05": u"\u0062",
u"\u1E06": u"\u0042",
u"\u1E07": u"\u0062",
u"\u1E08": u"\u0043",
u"\u1E09": u"\u0063",
u"\u1E0A": u"\u0044",
u"\u1E0B": u"\u0064",
u"\u1E0C": u"\u0044",
u"\u1E0D": u"\u0064",
u"\u1E0E": u"\u0044",
u"\u1E0F": u"\u0064",
u"\u1E10": u"\u0044",
u"\u1E11": u"\u0064",
u"\u1E12": u"\u0044",
u"\u1E13": u"\u0064",
u"\u1E14": u"\u0045",
u"\u1E15": u"\u0065",
u"\u1E16": u"\u0045",
u"\u1E17": u"\u0065",
u"\u1E18": u"\u0045",
u"\u1E19": u"\u0065",
u"\u1E1A": u"\u0045",
u"\u1E1B": u"\u0065",
u"\u1E1C": u"\u0045",
u"\u1E1D": u"\u0065",
u"\u1E1E": u"\u0046",
u"\u1E1F": u"\u0066",
u"\u1E20": u"\u0047",
u"\u1E21": u"\u0067",
u"\u1E22": u"\u0048",
u"\u1E23": u"\u0068",
u"\u1E24": u"\u0048",
u"\u1E25": u"\u0068",
u"\u1E26": u"\u0048",
u"\u1E27": u"\u0068",
u"\u1E28": u"\u0048",
u"\u1E29": u"\u0068",
u"\u1E2A": u"\u0048",
u"\u1E2B": u"\u0068",
u"\u1E2C": u"\u0049",
u"\u1E2D": u"\u0069",
u"\u1E2E": u"\u0049",
u"\u1E2F": u"\u0069",
u"\u1E30": u"\u004B",
u"\u1E31": u"\u006B",
u"\u1E32": u"\u004B",
u"\u1E33": u"\u006B",
u"\u1E34": u"\u004B",
u"\u1E35": u"\u006B",
u"\u1E36": u"\u004C",
u"\u1E37": u"\u006C",
u"\u1E38": u"\u004C",
u"\u1E39": u"\u006C",
u"\u1E3A": u"\u004C",
u"\u1E3B": u"\u006C",
u"\u1E3C": u"\u004C",
u"\u1E3D": u"\u006C",
u"\u1E3E": u"\u004D",
u"\u1E3F": u"\u006D",
u"\u1E40": u"\u004D",
u"\u1E41": u"\u006D",
u"\u1E42": u"\u004D",
u"\u1E43": u"\u006D",
u"\u1E44": u"\u004E",
u"\u1E45": u"\u006E",
u"\u1E46": u"\u004E",
u"\u1E47": u"\u006E",
u"\u1E48": u"\u004E",
u"\u1E49": u"\u006E",
u"\u1E4A": u"\u004E",
u"\u1E4B": u"\u006E",
u"\u1E4C": u"\u004F",
u"\u1E4D": u"\u006F",
u"\u1E4E": u"\u004F",
u"\u1E4F": u"\u006F",
u"\u1E50": u"\u004F",
u"\u1E51": u"\u006F",
u"\u1E52": u"\u004F",
u"\u1E53": u"\u006F",
u"\u1E54": u"\u0050",
u"\u1E55": u"\u0070",
u"\u1E56": u"\u0050",
u"\u1E57": u"\u0070",
u"\u1E58": u"\u0052",
u"\u1E59": u"\u0072",
u"\u1E5A": u"\u0052",
u"\u1E5B": u"\u0072",
u"\u1E5C": u"\u0052",
u"\u1E5D": u"\u0072",
u"\u1E5E": u"\u0052",
u"\u1E5F": u"\u0072",
u"\u1E60": u"\u0053",
u"\u1E61": u"\u0073",
u"\u1E62": u"\u0053",
u"\u1E63": u"\u0073",
u"\u1E64": u"\u0053",
u"\u1E65": u"\u0073",
u"\u1E66": u"\u0053",
u"\u1E67": u"\u0073",
u"\u1E68": u"\u0053",
u"\u1E69": u"\u0073",
u"\u1E6A": u"\u0054",
u"\u1E6B": u"\u0074",
u"\u1E6C": u"\u0054",
u"\u1E6D": u"\u0074",
u"\u1E6E": u"\u0054",
u"\u1E6F": u"\u0074",
u"\u1E70": u"\u0054",
u"\u1E71": u"\u0074",
u"\u1E72": u"\u0055",
u"\u1E73": u"\u0075",
u"\u1E74": u"\u0055",
u"\u1E75": u"\u0075",
u"\u1E76": u"\u0055",
u"\u1E77": u"\u0075",
u"\u1E78": u"\u0055",
u"\u1E79": u"\u0075",
u"\u1E7A": u"\u0055",
u"\u1E7B": u"\u0075",
u"\u1E7C": u"\u0056",
u"\u1E7D": u"\u0076",
u"\u1E7E": u"\u0056",
u"\u1E7F": u"\u0076",
u"\u1E80": u"\u0057",
u"\u1E81": u"\u0077",
u"\u1E82": u"\u0057",
u"\u1E83": u"\u0077",
u"\u1E84": u"\u0057",
u"\u1E85": u"\u0077",
u"\u1E86": u"\u0057",
u"\u1E87": u"\u0077",
u"\u1E88": u"\u0057",
u"\u1E89": u"\u0077",
u"\u1E8A": u"\u0058",
u"\u1E8B": u"\u0078",
u"\u1E8C": u"\u0058",
u"\u1E8D": u"\u0078",
u"\u1E8E": u"\u0059",
u"\u1E8F": u"\u0079",
u"\u1E90": u"\u005A",
u"\u1E91": u"\u007A",
u"\u1E92": u"\u005A",
u"\u1E93": u"\u007A",
u"\u1E94": u"\u005A",
u"\u1E95": u"\u007A",
u"\u1E96": u"\u0068",
u"\u1E97": u"\u0074",
u"\u1E98": u"\u0077",
u"\u1E99": u"\u0079",
u"\u1E9A": u"\u0061",
u"\u1E9B": u"\u017F",
u"\u1EA0": u"\u0041",
u"\u1EA1": u"\u0061",
u"\u1EA2": u"\u0041",
u"\u1EA3": u"\u0061",
u"\u1EA4": u"\u0041",
u"\u1EA5": u"\u0061",
u"\u1EA6": u"\u0041",
u"\u1EA7": u"\u0061",
u"\u1EA8": u"\u0041",
u"\u1EA9": u"\u0061",
u"\u1EAA": u"\u0041",
u"\u1EAB": u"\u0061",
u"\u1EAC": u"\u0041",
u"\u1EAD": u"\u0061",
u"\u1EAE": u"\u0041",
u"\u1EAF": u"\u0061",
u"\u1EB0": u"\u0041",
u"\u1EB1": u"\u0061",
u"\u1EB2": u"\u0041",
u"\u1EB3": u"\u0061",
u"\u1EB4": u"\u0041",
u"\u1EB5": u"\u0061",
u"\u1EB6": u"\u0041",
u"\u1EB7": u"\u0061",
u"\u1EB8": u"\u0045",
u"\u1EB9": u"\u0065",
u"\u1EBA": u"\u0045",
u"\u1EBB": u"\u0065",
u"\u1EBC": u"\u0045",
u"\u1EBD": u"\u0065",
u"\u1EBE": u"\u0045",
u"\u1EBF": u"\u0065",
u"\u1EC0": u"\u0045",
u"\u1EC1": u"\u0065",
u"\u1EC2": u"\u0045",
u"\u1EC3": u"\u0065",
u"\u1EC4": u"\u0045",
u"\u1EC5": u"\u0065",
u"\u1EC6": u"\u0045",
u"\u1EC7": u"\u0065",
u"\u1EC8": u"\u0049",
u"\u1EC9": u"\u0069",
u"\u1ECA": u"\u0049",
u"\u1ECB": u"\u0069",
u"\u1ECC": u"\u004F",
u"\u1ECD": u"\u006F",
u"\u1ECE": u"\u004F",
u"\u1ECF": u"\u006F",
u"\u1ED0": u"\u004F",
u"\u1ED1": u"\u006F",
u"\u1ED2": u"\u004F",
u"\u1ED3": u"\u006F",
u"\u1ED4": u"\u004F",
u"\u1ED5": u"\u006F",
u"\u1ED6": u"\u004F",
u"\u1ED7": u"\u006F",
u"\u1ED8": u"\u004F",
u"\u1ED9": u"\u006F",
u"\u1EDA": u"\u004F",
u"\u1EDB": u"\u006F",
u"\u1EDC": u"\u004F",
u"\u1EDD": u"\u006F",
u"\u1EDE": u"\u004F",
u"\u1EDF": u"\u006F",
u"\u1EE0": u"\u004F",
u"\u1EE1": u"\u006F",
u"\u1EE2": u"\u004F",
u"\u1EE3": u"\u006F",
u"\u1EE4": u"\u0055",
u"\u1EE5": u"\u0075",
u"\u1EE6": u"\u0055",
u"\u1EE7": u"\u0075",
u"\u1EE8": u"\u0055",
u"\u1EE9": u"\u0075",
u"\u1EEA": u"\u0055",
u"\u1EEB": u"\u0075",
u"\u1EEC": u"\u0055",
u"\u1EED": u"\u0075",
u"\u1EEE": u"\u0055",
u"\u1EEF": u"\u0075",
u"\u1EF0": u"\u0055",
u"\u1EF1": u"\u0075",
u"\u1EF2": u"\u0059",
u"\u1EF3": u"\u0079",
u"\u1EF4": u"\u0059",
u"\u1EF5": u"\u0079",
u"\u1EF6": u"\u0059",
u"\u1EF7": u"\u0079",
u"\u1EF8": u"\u0059",
u"\u1EF9": u"\u0079"
}
def process_string(self, session, data):
d = []
m = self.map
if not data:
return None
# With scarcity of diacritics, this is faster than try/except
for c in data:
if (c in m):
d.append(m[c])
else:
d.append(c)
return ''.join(d)
| 33.134856 | 79 | 0.455231 | 4,943 | 44,964 | 4.102367 | 0.233461 | 0.034717 | 0.012082 | 0.031068 | 0.207318 | 0.176793 | 0.151297 | 0.127626 | 0.10001 | 0.086646 | 0 | 0.129748 | 0.382929 | 44,964 | 1,356 | 80 | 33.159292 | 0.601096 | 0.095121 | 0 | 0.212938 | 0 | 0.001797 | 0.229401 | 0.005109 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043127 | false | 0.008086 | 0.007188 | 0.006289 | 0.140162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514e96ff8378de7f403d3d5b87b39b9cacaa544f | 1,976 | py | Python | tests/tests_auth.py | wolfram74/flask_exploration | 6c83eee93830792969b8c6b4dbbbf6708c08ef9d | [
"MIT"
] | null | null | null | tests/tests_auth.py | wolfram74/flask_exploration | 6c83eee93830792969b8c6b4dbbbf6708c08ef9d | [
"MIT"
] | null | null | null | tests/tests_auth.py | wolfram74/flask_exploration | 6c83eee93830792969b8c6b4dbbbf6708c08ef9d | [
"MIT"
] | null | null | null | import unittest
from flask.ext.testing import TestCase
from project import app, db
from project.models import User, BlogPost
from base import BaseTestCase
class FlaskTestCase(BaseTestCase):
# def setUp(self):
# self.tester = app.test_client(self)
# self.good_cred = dict(username='admin', password='admin')
# self.bad_cred = dict(username='buttts', password='farts')
def test_login_good_auth(self):
response = self.client.post(
'/login', data=self.good_cred,
follow_redirects= True)
self.assertIn(b'log in successful', response.data)
self.assertEqual(response.status_code, 200)
def test_login_bad_auth(self):
response = self.client.post('/login', data=self.bad_cred, follow_redirects= True)
self.assertIn(b'Invalid credentials', response.data)
self.assertEqual(response.status_code, 200)
def test_logout_valid(self):
self.client.post(
'/login', data=self.good_cred,
follow_redirects= True)
response = self.client.get('/logout', content_type = 'html/text',follow_redirects= True)
self.assertEqual(response.status_code, 200)
self.assertIn('successful', response.data)
def test_logout_protected(self):
response = self.client.get('/logout', content_type = 'html/text',follow_redirects= True)
self.assertEqual(response.status_code, 200)
self.assertIn('Please log in', response.data)
def test_home_valid_if_authed(self):
self.client.post(
'/login', data=self.good_cred,
follow_redirects= True)
response = self.client.get('/', content_type = 'html/text')
self.assertEqual(response.status_code, 200)
def test_home_protected(self):
response = self.client.get('/', content_type = 'html/text', follow_redirects=True)
self.assertIn('Please log in', response.data)
if __name__== '__main__':
unittest.main()
| 37.283019 | 96 | 0.671053 | 243 | 1,976 | 5.263374 | 0.26749 | 0.062549 | 0.103987 | 0.089914 | 0.656763 | 0.656763 | 0.636435 | 0.555903 | 0.45817 | 0.412041 | 0 | 0.009603 | 0.209514 | 1,976 | 52 | 97 | 38 | 0.809219 | 0.091093 | 0 | 0.447368 | 0 | 0 | 0.087102 | 0 | 0 | 0 | 0 | 0 | 0.263158 | 1 | 0.157895 | false | 0 | 0.131579 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5150d680a6e92b183496e1677c8274249125d1ee | 3,794 | py | Python | magmap/settings/logs.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | magmap/settings/logs.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | magmap/settings/logs.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | # MagellanMapper logging
"""Logging utilities."""
import logging
from logging import handlers
import pathlib
class LogWriter:
"""File-like object to write standard output to logging functions.
Attributes:
fn_logging (func): Logging function
buffer (list[str]): String buffer.
"""
def __init__(self, fn_logging):
"""Create a writer for a logging function."""
self.fn_logger = fn_logging
self.buffer = []
def write(self, msg):
"""Write to logging function with buffering.
Args:
msg (str): Line to write, from which trailing newlines will be
removed.
"""
if msg.endswith("\n"):
# remove trailing newlines in buffer and pass to logging function
self.buffer.append(msg.rstrip("\n"))
self.fn_logger("".join(self.buffer))
self.buffer = []
else:
self.buffer.append(msg)
def flush(self):
"""Empty function, deferring to logging handler's flush."""
pass
def setup_logger():
"""Set up a basic root logger with a stream handler.
Returns:
:class:`logging.Logger`: Root logger for the application.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# set up handler for console
handler_stream = logging.StreamHandler()
handler_stream.setLevel(logging.INFO)
handler_stream.setFormatter(logging.Formatter(
"%(name)s - %(levelname)s - %(message)s"))
logger.addHandler(handler_stream)
return logger
def update_log_level(logger, level):
"""Update the logging level.
Args:
logger (:class:`logging.Logger`): Logger to update.
level (Union[str, int]): Level given either as a string corresponding
to ``Logger`` levels, or their corresponding integers, ranging
from 0 (``NOTSET``) to 50 (``CRITICAL``). For convenience,
values can be given from 0-5, which will be multiplied by 10.
Returns:
:class:`logging.Logger`: The logger for chained calls.
"""
if isinstance(level, str):
# specify level by level name
level = level.upper()
elif isinstance(level, int):
# specify by level integer (0-50)
if level < 10:
# for convenience, assume values under 10 are 10-fold
level *= 10
else:
return
try:
# set level for the logger and all its handlers
logger.setLevel(level)
for handler in logger.handlers:
handler.setLevel(level)
except (TypeError, ValueError) as e:
logger.error(e, exc_info=True)
return logger
def add_file_handler(logger, path, backups=5):
"""Add a rotating log file handler with a new log file.
Args:
logger (:class:`logging.Logger`): Logger to update.
path (str): Path to log.
backups (int): Number of backups to maintain; defaults to 5.
Returns:
:class:`logging.Logger`: The logger for chained calls.
"""
# check if log file already exists
pathl = pathlib.Path(path)
roll = pathl.is_file()
# create a rotations file handler to manage number of backups while
# manually managing rollover based on file presence rather than size
pathl.parent.mkdir(parents=True, exist_ok=True)
handler_file = handlers.RotatingFileHandler(path, backupCount=backups)
handler_file.setLevel(logger.level)
handler_file.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(handler_file)
if roll:
# create a new log file if exists, backing up the old one
handler_file.doRollover()
return logger
| 29.874016 | 77 | 0.624671 | 463 | 3,794 | 5.062635 | 0.358531 | 0.021331 | 0.038396 | 0.031997 | 0.116894 | 0.116894 | 0.116894 | 0.116894 | 0.081058 | 0 | 0 | 0.007305 | 0.278334 | 3,794 | 126 | 78 | 30.111111 | 0.848795 | 0.457828 | 0 | 0.134615 | 0 | 0 | 0.050976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0.019231 | 0.057692 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5151eb6104a2efda27edf90bc5d40eacc7c63499 | 3,322 | py | Python | src/training/har_train.py | sanglee/MC-ATON | 8393cdb20957bf2fe11633c062aa7979ca389cc4 | [
"Apache-2.0"
] | null | null | null | src/training/har_train.py | sanglee/MC-ATON | 8393cdb20957bf2fe11633c062aa7979ca389cc4 | [
"Apache-2.0"
] | null | null | null | src/training/har_train.py | sanglee/MC-ATON | 8393cdb20957bf2fe11633c062aa7979ca389cc4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Created : 2021/10/28 17:12
# @Author : Junhyung Kwon
# @Site :
# @File : har_train.py
# @Software : PyCharm
import os
import torch
import torch.nn.functional as F
from torch import nn, optim
from torch.optim.lr_scheduler import MultiStepLR
from tqdm.auto import tqdm
from attacker import LinfPGDAttack
from data import uci_har
from models import HARClassifier
from training import structured_har
from utils import AverageMeter
def train_iter(model, optimizer, criterion, data_loader, device, mode=0, comp_ratio=0.):
model.train()
iteration_loss = AverageMeter()
for i, (X, y) in enumerate(data_loader):
X, y = X.to(device), y.to(device)
output = model(X)
loss = criterion(output, y.long())
iteration_loss.update(loss.item(), X.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if comp_ratio > 0:
idxs, lams = structured_har(model, comp_ratio)
if comp_ratio > 0:
idxs, lams = structured_har(model, comp_ratio)
return iteration_loss.avg
def test_iter(model, data_loader, device, check_adv=False, epsilon=0.3, alpha=0.0073, k=7):
model.eval()
normal_acc = AverageMeter()
if check_adv:
adv = LinfPGDAttack(model, epsilon=epsilon, alpha=alpha, k=k)
off_acc = AverageMeter()
for i, (X, y) in enumerate(data_loader):
X, y = X.to(device), y.to(device)
output = model(X)
out = F.softmax(output, dim=1)
_, predicted = out.max(1)
idx = predicted.eq(y)
acc = idx.sum().item() / X.size(0)
normal_acc.update(acc)
if check_adv:
adv_x = adv.perturb(X, y.long())
out = model(adv_x)
out = F.softmax(out, dim=1)
_, predicted = out.max(1)
idx = predicted.eq(y)
acc = idx.sum().item() / X.size(0)
off_acc.update(acc)
if check_adv:
return normal_acc.avg, off_acc.avg
else:
return normal_acc.avg
def har_train(cuda_num=4, EPOCH=100, save_interval=5, resume=True, comp_ratio=0., model_dir='./simulation/HAR_UCI/'):
if not os.path.exists(model_dir):
os.mkdir(model_dir)
train_loader, test_loader = uci_har('/workspace/Dataset/TSData/uci_data/np/')
device = 'cuda:%d' % cuda_num
model = HARClassifier()
model.to(device)
if resume:
model.load_state_dict(torch.load(os.path.join(model_dir, 'comp0_0-model-epoch{}.pt'.format(99))))
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.91)
scheduler = MultiStepLR(optimizer, milestones=[55, 75, 90], gamma=0.1)
criterion = nn.CrossEntropyLoss()
for epoch in tqdm(range(EPOCH)):
train_loss = train_iter(model, optimizer, criterion, train_loader, device, comp_ratio)
val_acc = test_iter(model, test_loader, device)
scheduler.step()
print('Epoch {}\tTrain loss: {:.4f}\tValidation accuracy: {:.4f}'.format(epoch + 1, train_loss, val_acc))
if (epoch + 1) % save_interval == 0:
torch.save(model.state_dict(),
os.path.join(model_dir,
'comp{}-model-epoch{}.pt'.format(str(comp_ratio * 100).replace('.', '_'), epoch)))
| 30.477064 | 118 | 0.621915 | 460 | 3,322 | 4.345652 | 0.336957 | 0.036018 | 0.02001 | 0.015008 | 0.244122 | 0.194097 | 0.172086 | 0.172086 | 0.172086 | 0.172086 | 0 | 0.025549 | 0.245936 | 3,322 | 108 | 119 | 30.759259 | 0.772455 | 0.048164 | 0 | 0.256757 | 0 | 0 | 0.054517 | 0.033597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.148649 | 0 | 0.22973 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5153b4798f2410a6aa6709c5a6328570a85d91d6 | 3,052 | py | Python | src/blip_sdk/extensions/artificial_intelligence/ai_model/ai_model_extension.py | mirlarof/blip-sdk-python | f958149b2524d4340eeafad8739a33db71df45ed | [
"MIT"
] | 2 | 2021-07-02T20:10:48.000Z | 2021-07-13T20:51:18.000Z | src/blip_sdk/extensions/artificial_intelligence/ai_model/ai_model_extension.py | mirlarof/blip-sdk-python | f958149b2524d4340eeafad8739a33db71df45ed | [
"MIT"
] | 3 | 2021-06-24T13:27:21.000Z | 2021-07-30T15:37:43.000Z | src/blip_sdk/extensions/artificial_intelligence/ai_model/ai_model_extension.py | mirlarof/blip-sdk-python | f958149b2524d4340eeafad8739a33db71df45ed | [
"MIT"
] | 3 | 2021-06-23T19:53:20.000Z | 2022-01-04T17:50:44.000Z | from lime_python import Command
from ...extension_base import ExtensionBase
from .content_type import ContentType
from .uri_templates import UriTemplates
class AIModelExtension(ExtensionBase):
"""Extension to handle Blip Analytics Services."""
async def get_models_async(
self,
skip: int = 0,
take: int = 100,
ascending: bool = False,
**kwargs
) -> Command:
"""Search in all trained and/or published models.
Args:
skip (int): Number of models to be skipped.
take (int): Number of model to be take.
ascending (bool): Sets ascending alphabetical order.
kwargs: any other optional parameter not covered by the method
Returns:
Command: Command response
"""
models_resource_query = self.build_resource_query(
UriTemplates.MODELS,
{
'$skip': skip,
'$take': take,
'$ascending': ascending,
**kwargs
}
)
return await self.process_command_async(
self.create_get_command(
models_resource_query,
)
)
async def get_model_async(self, id: str) -> Command:
"""Get specific AI model.
Args:
id (str): Model id
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_get_command(
self.build_uri(UriTemplates.MODEL, id)
)
)
async def get_model_summary_async(self) -> Command:
"""Get model summary.
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_get_command(UriTemplates.MODELS_SUMMARY)
)
async def get_last_trained_or_published_model_async(self) -> Command:
"""Get last trained or published model.
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_get_command(UriTemplates.LAST_TRAINED_OR_PUBLISH_MODEL)
)
async def train_model_async(self) -> Command:
"""Train model.
Returns:
Command: Command response
"""
train_model_command = self.create_set_command(
UriTemplates.MODELS,
{},
ContentType.MODEL_TRAINING
)
return await self.process_command_async(train_model_command)
async def publish_model_async(self, id: str) -> Command:
"""Publish an existing artificial intelligence model.
Args:
id (str): model id
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_set_command(
UriTemplates.MODELS,
{
'id': id
},
ContentType.MODEL_PUBLISHING
)
)
| 27.495495 | 79 | 0.564548 | 298 | 3,052 | 5.57047 | 0.281879 | 0.059639 | 0.075904 | 0.104819 | 0.414458 | 0.393976 | 0.266265 | 0.266265 | 0.266265 | 0.266265 | 0 | 0.002056 | 0.362385 | 3,052 | 110 | 80 | 27.745455 | 0.850976 | 0.014417 | 0 | 0.210526 | 0 | 0 | 0.0108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.070175 | 0 | 0.192982 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5155a6295dbb373ee9af2abbcd7551a82f2a7146 | 1,683 | py | Python | src/datajunction/console.py | DataJunction/datajunction | d2293255bb7df0e5144c7e448a0ca2b590b6c20f | [
"MIT"
] | null | null | null | src/datajunction/console.py | DataJunction/datajunction | d2293255bb7df0e5144c7e448a0ca2b590b6c20f | [
"MIT"
] | null | null | null | src/datajunction/console.py | DataJunction/datajunction | d2293255bb7df0e5144c7e448a0ca2b590b6c20f | [
"MIT"
] | null | null | null | """
DataJunction (DJ) is a metric repository.
Usage:
dj compile [REPOSITORY] [-f] [--loglevel=INFO] [--reload]
Actions:
compile Compile repository
Options:
-f, --force Force indexing. [default: false]
--loglevel=LEVEL Level for logging. [default: INFO]
--reload Watch for changes. [default: false]
Released under the MIT license.
(c) 2018 Beto Dealmeida <roberto@dealmeida.net>
"""
import asyncio
import logging
from pathlib import Path
from docopt import docopt
from datajunction import __version__
from datajunction.cli import compile as compile_
from datajunction.errors import DJException
from datajunction.utils import get_settings, setup_logging
_logger = logging.getLogger(__name__)
async def main() -> None:
"""
Dispatch command.
"""
arguments = docopt(__doc__, version=__version__)
setup_logging(arguments["--loglevel"])
if arguments["REPOSITORY"] is None:
settings = get_settings()
repository = settings.repository
else:
repository = Path(arguments["REPOSITORY"])
try:
if arguments["compile"]:
try:
await compile_.run(
repository,
arguments["--force"],
arguments["--reload"],
)
except DJException as exc:
_logger.error(exc)
except asyncio.CancelledError:
_logger.info("Canceled")
def run() -> None:
"""
Run the DJ CLI.
"""
try:
asyncio.run(main())
except KeyboardInterrupt:
_logger.info("Stopping DJ")
if __name__ == "__main__":
run()
| 23.054795 | 63 | 0.608437 | 168 | 1,683 | 5.892857 | 0.428571 | 0.064646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003344 | 0.289364 | 1,683 | 72 | 64 | 23.375 | 0.824415 | 0.276887 | 0 | 0.083333 | 0 | 0 | 0.068045 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.222222 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5159b6cde26f7df203594b171f65dbf811705a8e | 1,286 | py | Python | phonon/registry.py | akellehe/phonon | 4b61fd6042af1bec7bc949bcc713a0dd0fcfcefb | [
"MIT"
] | 4 | 2015-03-30T22:46:35.000Z | 2020-09-08T02:03:53.000Z | phonon/registry.py | akellehe/phonon | 4b61fd6042af1bec7bc949bcc713a0dd0fcfcefb | [
"MIT"
] | 21 | 2015-02-03T23:12:36.000Z | 2017-09-15T21:03:24.000Z | phonon/registry.py | akellehe/phonon | 4b61fd6042af1bec7bc949bcc713a0dd0fcfcefb | [
"MIT"
] | 2 | 2016-08-14T20:18:52.000Z | 2019-09-30T16:02:22.000Z | import sys
import collections
import tornado
class Registry(object):
def __init__(self, max_entries=10000, ioloop=None):
self.models = collections.OrderedDict()
self.timeouts = {}
self.ioloop = ioloop or tornado.ioloop.IOLoop.current()
self.max_entries = max_entries
def register(self, model, *args, **kwargs):
if model.registry_key() in self.models:
self.models[model.registry_key()].merge(model)
self.ioloop.remove_timeout(self.timeouts[model.registry_key()])
else:
self.models[model.registry_key()] = model
self.timeouts[model.registry_key()] = self.ioloop.add_timeout(
model.TTL, self.on_expire, model, *args, **kwargs
)
def on_expire(self, model, *args, **kwargs):
del self.models[model.registry_key()]
del self.timeouts[model.registry_key()]
if not model.reference.dereference(callback=model.on_complete,
args=args,
kwargs=kwargs):
model.cache()
registry = Registry()
def configure(max_entries=10000):
global registry
registry = Registry(max_entries=max_entries)
def register(model):
registry.register(model)
| 28.577778 | 75 | 0.618974 | 144 | 1,286 | 5.375 | 0.305556 | 0.134367 | 0.144703 | 0.089147 | 0.289406 | 0.080103 | 0 | 0 | 0 | 0 | 0 | 0.010672 | 0.271384 | 1,286 | 44 | 76 | 29.227273 | 0.815368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.096774 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515bc2e00165e4793cb2c05d11188ceed1d51545 | 1,672 | py | Python | project_template/account/permission.py | AdityaBhalsod/django-rest-api-template | ae530c9c246d074707e26d9c4d6c2f15177bd1f7 | [
"Apache-2.0"
] | 3 | 2020-11-04T19:34:47.000Z | 2021-06-30T04:13:55.000Z | project_template/account/permission.py | AdityaBhalsod/django-rest-api-template | ae530c9c246d074707e26d9c4d6c2f15177bd1f7 | [
"Apache-2.0"
] | null | null | null | project_template/account/permission.py | AdityaBhalsod/django-rest-api-template | ae530c9c246d074707e26d9c4d6c2f15177bd1f7 | [
"Apache-2.0"
] | 1 | 2021-01-31T19:30:59.000Z | 2021-01-31T19:30:59.000Z | # -*- coding: utf-8 -*-
from rest_framework import permissions
from account.models import BlackList
class BlacklistPermission(permissions.BasePermission):
"""
Global permission check for blacklisted IPs.
"""
def has_permission(self, request, view):
ip_address = request.META["REMOTE_ADDR"]
blacklisted = BlackList.objects.filter(ip_address=ip_address).exists()
return not blacklisted
class BaseModelPermissions(permissions.DjangoModelPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def has_object_permission(self, request, view, obj):
has_permission = super().has_permission(request, view)
if has_permission and view.action == 'retrieve':
return self._queryset(view).viewable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'list':
return self._queryset(view).viewable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'update':
return self._queryset(view).editable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'partial_update':
return self._queryset(view).editable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'destroy':
return self._queryset(view).deletable().filter(pk=obj.pk).exists()
return False | 35.574468 | 78 | 0.641746 | 198 | 1,672 | 5.232323 | 0.353535 | 0.100386 | 0.043436 | 0.086873 | 0.40251 | 0.38417 | 0.357143 | 0.30888 | 0.30888 | 0.30888 | 0 | 0.000757 | 0.209928 | 1,672 | 47 | 79 | 35.574468 | 0.783497 | 0.040072 | 0 | 0.133333 | 0 | 0 | 0.158491 | 0.106918 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515c32b2748b2c0d803dfa8b97d5d1d27008566b | 1,498 | py | Python | 006_multiples.py | mkduer/code-nibbles | 3482b5159bc0fdc18079bf2de27a47a77ae4753a | [
"Apache-2.0"
] | null | null | null | 006_multiples.py | mkduer/code-nibbles | 3482b5159bc0fdc18079bf2de27a47a77ae4753a | [
"Apache-2.0"
] | null | null | null | 006_multiples.py | mkduer/code-nibbles | 3482b5159bc0fdc18079bf2de27a47a77ae4753a | [
"Apache-2.0"
] | null | null | null | from helpers import Helpers
import numpy as np
def multiples(numbers: [int]) -> [int]:
"""
Multiplies all of the values in the list excepting the value at the current index
e.g. original numbers = [4, 1, 6] returns the multiples = [6, 24, 4] where the
first value is the product of 1 * 6 and does not include 4.
:param numbers: the original list of numbers
:return: the list of multiplied values
"""
nonzero_indices = np.flatnonzero(numbers)
total_numbers = len(numbers)
total_non_zeros = nonzero_indices.size
# more than one zero
if total_numbers - total_non_zeros > 1:
return np.zeros(total_numbers, dtype=np.int16)
# if there are no zeros, divide each index from the total product
if total_numbers == total_non_zeros:
total_product = np.prod(numbers)
return (total_product / numbers).astype(int)
# one zero
total_product = np.zeros(total_numbers, dtype=np.int16)
np_numbers = np.asarray(numbers)
zero_index = np.where(np_numbers == 0)[0][0]
np_numbers[zero_index] = 1
multiplied_total = np.prod(np_numbers)
total_product[zero_index] = multiplied_total
return total_product
def main():
helper = Helpers()
randlist = helper.create_random_numbers_with_choice(6, 9)
print(f'original list:')
helper.multiline_print(randlist)
multiplied_values = multiples(randlist)
print(f'\nmultiplied values: {multiplied_values}')
if __name__ == '__main__':
main()
| 31.208333 | 85 | 0.695594 | 212 | 1,498 | 4.716981 | 0.367925 | 0.072 | 0.045 | 0.06 | 0.116 | 0.116 | 0.062 | 0 | 0 | 0 | 0 | 0.017812 | 0.212951 | 1,498 | 47 | 86 | 31.87234 | 0.830365 | 0.26502 | 0 | 0 | 0 | 0 | 0.058107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.259259 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515e110e923bd04c8ebed32a1acaa5cd9ac55ba1 | 943 | py | Python | 2016b/main.py | xdr940/iKaggle | cc0210e089e5f1af228f02bf67bb9a4459336722 | [
"MIT"
] | null | null | null | 2016b/main.py | xdr940/iKaggle | cc0210e089e5f1af228f02bf67bb9a4459336722 | [
"MIT"
] | null | null | null | 2016b/main.py | xdr940/iKaggle | cc0210e089e5f1af228f02bf67bb9a4459336722 | [
"MIT"
] | null | null | null | import pandas as pd
import pandas_profiling
from path import Path
import numpy as np
from scipy.stats import chi2_contingency
from collections import Counter
root = Path('/home/roit/datasets/kaggle/2016b')
dump_path = root/'dump'
ge_info = root/'gene_info'
exitnpy = False
if exitnpy==False:
genes_dic = []
genes = []
snps_sorted = pd.read_csv(dump_path/'sorted_cols_series.csv')
cnt=0
for file in ge_info.files():
gene = open(file).read()#str: rs1\n rs2\n...
ls = gene.split('\n')
ls.pop()
cnt+=len(ls)
genes_dic+=ls
genes.append(ls)
print(cnt)
vecs = np.zeros([len(genes),len(snps_sorted)])
for i in range(len(genes)):
for j in range(len(genes[i])):
col = genes_dic.index(genes[i][j])
vecs[i][col] = 1
np.save('vecs.npy',vecs)
else:
vecs = np.load('vecs.npy')
sum_vec =vecs.sum(axis=0)#行相加
print(sum_vec.sum()) | 21.431818 | 65 | 0.622481 | 148 | 943 | 3.851351 | 0.466216 | 0.042105 | 0.035088 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013793 | 0.231177 | 943 | 44 | 66 | 21.431818 | 0.772414 | 0.02333 | 0 | 0 | 0 | 0 | 0.092391 | 0.058696 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515f21eaa0b5ed2cf1b0e5658806e586107dfcd7 | 1,376 | py | Python | examples/mxnet/dis_kvstore/client.py | tqchen/dgl | d57ff78da11193fbbee7f37a69fcfe1c14da2ae4 | [
"Apache-2.0"
] | 2 | 2020-05-10T14:06:12.000Z | 2021-01-01T02:57:20.000Z | examples/mxnet/dis_kvstore/client.py | tqchen/dgl | d57ff78da11193fbbee7f37a69fcfe1c14da2ae4 | [
"Apache-2.0"
] | null | null | null | examples/mxnet/dis_kvstore/client.py | tqchen/dgl | d57ff78da11193fbbee7f37a69fcfe1c14da2ae4 | [
"Apache-2.0"
] | null | null | null | # This is a simple MXNet server demo shows how to use DGL distributed kvstore.
import dgl
import argparse
import mxnet as mx
ID = []
ID.append(mx.nd.array([0,1], dtype='int64'))
ID.append(mx.nd.array([2,3], dtype='int64'))
ID.append(mx.nd.array([4,5], dtype='int64'))
ID.append(mx.nd.array([6,7], dtype='int64'))
edata_partition_book = {'edata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
ndata_partition_book = {'ndata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
def start_client():
client = dgl.contrib.start_client(ip_config='ip_config.txt',
ndata_partition_book=ndata_partition_book,
edata_partition_book=edata_partition_book)
client.push(name='edata', id_tensor=ID[client.get_id()], data_tensor=mx.nd.array([[1.,1.,1.],[1.,1.,1.]]))
client.push(name='ndata', id_tensor=ID[client.get_id()], data_tensor=mx.nd.array([[2.,2.,2.],[2.,2.,2.]]))
client.barrier()
tensor_edata = client.pull(name='edata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
tensor_ndata = client.pull(name='ndata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
print(tensor_edata)
client.barrier()
print(tensor_ndata)
client.barrier()
if client.get_id() == 0:
client.shut_down()
if __name__ == '__main__':
start_client() | 32.761905 | 110 | 0.631541 | 227 | 1,376 | 3.656388 | 0.242291 | 0.048193 | 0.108434 | 0.060241 | 0.439759 | 0.343373 | 0.343373 | 0.245783 | 0.245783 | 0.245783 | 0 | 0.060262 | 0.167878 | 1,376 | 42 | 111 | 32.761905 | 0.664629 | 0.055233 | 0 | 0.111111 | 0 | 0 | 0.070054 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
516282162e672e92cb14c5b353f7f1dcb8f0e66a | 2,318 | py | Python | python-project/methods/KDE.py | ferjorosa/bayesian-latent-forests | 3d9e19f1d0be1e4cca0b390866589061a670cc20 | [
"Apache-2.0"
] | null | null | null | python-project/methods/KDE.py | ferjorosa/bayesian-latent-forests | 3d9e19f1d0be1e4cca0b390866589061a670cc20 | [
"Apache-2.0"
] | null | null | null | python-project/methods/KDE.py | ferjorosa/bayesian-latent-forests | 3d9e19f1d0be1e4cca0b390866589061a670cc20 | [
"Apache-2.0"
] | null | null | null | import statsmodels.api as sm
import numpy as np
import os
import time
import json
def apply(train_datasets, var_types_string, test_datasets, n_folds, result_path, filename, foldLog):
print("\n========================")
print("KDE")
print("========================")
results = {}
folds = {}
avg_learning_time = 0
avg_test_ll = 0
for i in range(1, n_folds + 1):
index = i-1
init_time = time.time()*1000
model = sm.nonparametric.KDEMultivariate(data=train_datasets[index], var_type=var_types_string, bw='normal_reference')
test_ll = np.log(model.pdf(test_datasets[index]))
test_ll = np.sum(test_ll)
end_time = time.time()*1000
learning_time = end_time - init_time
fold_result = {"test_ll": test_ll, "learning_time": learning_time}
folds["fold_" + str(i)] = fold_result
avg_learning_time = avg_learning_time + learning_time
avg_test_ll = avg_test_ll + test_ll
if foldLog:
print("----------------------------------------")
print("Fold (" + str(i) + "): ")
print("Test LL: " + str(test_ll))
print("Learning time: " + str(learning_time))
# Generate the average results and store them in the dictionary, then store them in a JSON file
avg_test_ll = avg_test_ll / n_folds
avg_learning_time = avg_learning_time / n_folds / 1000 # in seconds
results["average_test_ll"] = avg_test_ll
results["average_learning_time"] = avg_learning_time
results["folds"] = folds
store_json(results, result_path, filename)
print("----------------------------------------")
print("----------------------------------------")
print("Average Test LL: " + str(avg_test_ll))
print("Average learning time: " + str(avg_learning_time))
def store_json(results, path, filename):
if not os.path.exists(path):
os.makedirs(path)
if os.path.isfile(path + filename + "_results_KDE.json"):
os.remove(path + filename + "_results_KDE.json")
with open(path + filename + "_results_KDE.json", 'w') as fp:
json.dump(results, fp, sort_keys=True, indent=4)
else:
with open(path + filename + "_results_KDE.json", 'w') as fp:
json.dump(results, fp, sort_keys=True, indent=4)
| 36.793651 | 126 | 0.595772 | 301 | 2,318 | 4.325581 | 0.269103 | 0.078341 | 0.080645 | 0.067588 | 0.261137 | 0.18894 | 0.115207 | 0.115207 | 0.115207 | 0.115207 | 0 | 0.010538 | 0.222174 | 2,318 | 62 | 127 | 37.387097 | 0.711592 | 0.044866 | 0 | 0.14 | 0 | 0 | 0.180009 | 0.086386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.1 | 0 | 0.14 | 0.22 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5162cae16f1582e2cd15f8c44ff6385eae028502 | 3,471 | py | Python | pay-api/tests/unit/services/test_auth.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/services/test_auth.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/services/test_auth.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the auth Service.
Test-Suite to ensure that the auth Service is working as expected.
"""
import pytest
from werkzeug.exceptions import HTTPException
from pay_api.services.auth import check_auth
from pay_api.utils.constants import EDIT_ROLE, VIEW_ROLE
def test_auth_role_for_service_account(session, monkeypatch):
"""Assert the auth works for service account."""
def token_info(): # pylint: disable=unused-argument; mocks of library methods
return {
'username': 'service account',
'realm_access': {
'roles': [
'system',
'edit'
]
}
}
def mock_auth(): # pylint: disable=unused-argument; mocks of library methods
return 'test'
monkeypatch.setattr('pay_api.utils.user_context._get_token', mock_auth)
monkeypatch.setattr('pay_api.utils.user_context._get_token_info', token_info)
# Test one of roles
check_auth('CP0001234', one_of_roles=[EDIT_ROLE])
def test_auth_role_for_service_account_with_no_edit_role(session, monkeypatch):
"""Assert the auth works for service account."""
def token_info(): # pylint: disable=unused-argument; mocks of library methods
return {
'username': 'service account',
'realm_access': {
'roles': [
'system'
]
}
}
def mock_auth(): # pylint: disable=unused-argument; mocks of library methods
return 'test'
monkeypatch.setattr('pay_api.utils.user_context._get_token', mock_auth)
monkeypatch.setattr('pay_api.utils.user_context._get_token_info', token_info)
with pytest.raises(HTTPException) as excinfo:
# Test one of roles
check_auth('CP0001234', one_of_roles=[EDIT_ROLE])
assert excinfo.exception.code == 403
def test_auth_for_client_user_roles(session, public_user_mock):
"""Assert that the auth is working as expected."""
# token = jwt.create_jwt(get_claims(roles=[Role.EDITOR.value]), token_header)
# headers = {'Authorization': 'Bearer ' + token}
# def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
# return headers['Authorization']
# with app.test_request_context():
# monkeypatch.setattr('flask.request.headers.get', mock_auth)
# Test one of roles
check_auth('CP0001234', one_of_roles=[EDIT_ROLE])
# Test contains roles
check_auth('CP0001234', contains_role=EDIT_ROLE)
# Test for exception
with pytest.raises(HTTPException) as excinfo:
check_auth('CP0000000', contains_role=VIEW_ROLE)
assert excinfo.exception.code == 403
with pytest.raises(HTTPException) as excinfo:
check_auth('CP0000000', one_of_roles=[EDIT_ROLE])
assert excinfo.exception.code == 403
| 35.060606 | 91 | 0.682224 | 444 | 3,471 | 5.148649 | 0.317568 | 0.027559 | 0.030621 | 0.059055 | 0.53762 | 0.53762 | 0.506562 | 0.506562 | 0.476815 | 0.404199 | 0 | 0.021892 | 0.223567 | 3,471 | 98 | 92 | 35.418367 | 0.825974 | 0.422645 | 0 | 0.586957 | 0 | 0 | 0.16172 | 0.08086 | 0 | 0 | 0 | 0 | 0.065217 | 1 | 0.152174 | false | 0 | 0.086957 | 0.086957 | 0.326087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5164ffb41b1068e6e8353350ba4bc5a194d1426f | 2,848 | py | Python | testNim.py | PauloHSNeto/Ciencia-de-Computa-o-CourseEra | 230281ba7227348ed2d27bb20039aed223244d94 | [
"bzip2-1.0.6"
] | null | null | null | testNim.py | PauloHSNeto/Ciencia-de-Computa-o-CourseEra | 230281ba7227348ed2d27bb20039aed223244d94 | [
"bzip2-1.0.6"
] | null | null | null | testNim.py | PauloHSNeto/Ciencia-de-Computa-o-CourseEra | 230281ba7227348ed2d27bb20039aed223244d94 | [
"bzip2-1.0.6"
] | null | null | null | computador = 0
usuario = 0
rodada = 0
def computador_escolhe_jogada(n, m):
global computador
n = n - m
if (n == 1):
print(" ")
print("O computador tirou %s peça." % n)
print("Agora restam %s peças no tabuleiro." % n)
print(" ")
if (n == 0):
print ("Fim do jogo! O computador ganhou!")
partida()
else:
print(" ")
print("O computador tirou %s peça." % m)
print("Agora restam %s peças no tabuleiro." % n)
print(" ")
if (n == 0):
print ("Fim do jogo! O computador ganhou!")
partida()
return n
return m
def usuario_escolhe_jogada(n, m):
global usuario
print(" ")
n_user = int(input("Quantas peças você vai tirar? "))
print("Voce tirou %s peças." % n_user)
if (n_user <= m):
n = n - m
print(" ")
print("Agora restam apenas %s peças no tabuleiro." % n)
else:
while (n_user > m):
print("Oops! Jogada inválida! Tente de novo.")
print(" ")
n_user = int(input("Quantas peças você vai tirar? "))
if (n == 0):
print ("Vitoria do usuario")
return n_user
return n
return m
def partida():
global computador
global usuario
global rodada
while(rodada <= 3):
rodada = rodada + 1
if (rodada <= 3 ):
print(" ")
print("**** Rodada %s ****" % rodada)
print(" ")
n = int(input("Quantas peças? "))
m = int(input("Limite de peças por jogada? "))
if (((n )%(m + 1)) == 0):
while (n > 0):
print(" ")
print("Voce começa!")
usuario_escolhe_jogada(n,m)
if n > 0:
n = n - m
computador_escolhe_jogada(n,m)
n = n - m
computador = computador + 1
else:
print(" ")
print("Computador Começa!!")
while( n > 0):
computador_escolhe_jogada(n,m)
computador = computador + 1
n = n - m
if n > 0:
usuario_escolhe_jogada(n,m)
n = n - m
else:
print("**** Final do campeonato! ****")
print(" ")
print("Fim de Campeonato: Computador %s x 0 Usuario " % computador)
break
print("Bem-vindo ao jogo do NIM! Escolha:")
print(" ")
print("1 - para jogar uma partida isolada ")
tipo_jogo = int(input("2 - para jogar um campeonato "))
print(" ")
if ( tipo_jogo == 1 ):
print("Voce escolheu partida isolada!")
if ( tipo_jogo == 2):
print("Voce escolheu um campeonato!")
partida()
else:
pass
| 29.061224 | 79 | 0.466643 | 323 | 2,848 | 4.049536 | 0.20743 | 0.019878 | 0.06422 | 0.068807 | 0.441896 | 0.262997 | 0.262997 | 0.188073 | 0.188073 | 0.188073 | 0 | 0.013642 | 0.408006 | 2,848 | 97 | 80 | 29.360825 | 0.762159 | 0 | 0 | 0.597826 | 0 | 0 | 0.247542 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0.01087 | 0 | 0 | 0.086957 | 0.358696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
516615c0dc774d664f76be40cc3b724ef7f05aa9 | 15,206 | py | Python | mbuild/formats/hoomd_simulation.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | mbuild/formats/hoomd_simulation.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | mbuild/formats/hoomd_simulation.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | import warnings
import itertools
import numpy as np
import operator
from collections import namedtuple
import parmed as pmd
import mbuild as mb
from mbuild.utils.sorting import natural_sort
from mbuild.utils.io import import_
from mbuild.utils.conversion import RB_to_OPLS
from .hoomd_snapshot import to_hoomdsnapshot
hoomd = import_("hoomd")
hoomd.md = import_("hoomd.md")
hoomd.md.pair = import_("hoomd.md.pair")
hoomd.md.special_pair = import_("hoomd.md.special_pair")
hoomd.md.charge = import_("hoomd.md.charge")
hoomd.md.bond = import_("hoomd.md.bond")
hoomd.md.angle = import_("hoomd.md.angle")
hoomd.md.dihedral = import_("hoomd.md.dihedral")
hoomd.group = import_("hoomd.group")
def create_hoomd_simulation(structure, ref_distance=1.0, ref_mass=1.0,
ref_energy=1.0, r_cut=1.2, auto_scale=False,
snapshot_kwargs={},
pppm_kwargs={'Nx':8, 'Ny':8, 'Nz':8, 'order':4}):
""" Convert a parametrized pmd.Structure to hoomd.SimulationContext
Parameters
----------
structure : parmed.Structure
ParmEd Structure object
ref_distance : float, optional, default=1.0
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0
Reference energy for conversion to reduced units
r_cut : float, optional, default 1.2
Cutoff radius, in reduced units
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance,
largest mass value as ref_mass
and largest epsilon value as ref_energy
snapshot_kwargs : dict
Kwargs to pass to to_hoomdsnapshot
pppm_kwargs : dict
Kwargs to pass to hoomd's pppm function
Returns
------
hoomd_objects : list
List of hoomd objects created during conversion
ReferenceValues : namedtuple
Values used in scaling
Notes
-----
While the hoomd objects are returned, the
hoomd.SimulationContext is accessible via `hoomd.context.current`.
If you pass a non-parametrized pmd.Structure, you will not have
angle, dihedral, or force field information. You may be better off
creating a hoomd.Snapshot
Reference units should be expected to convert parmed Structure units :
angstroms, kcal/mol, and daltons
"""
if isinstance(structure, mb.Compound):
raise ValueError("You passed mb.Compound to create_hoomd_simulation, " +
"there will be no angles, dihedrals, or force field parameters. " +
"Please use " +
"hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, " +
"then create your own hoomd context " +
"and pass your hoomd.Snapshot " +
"to hoomd.init.read_snapshot()")
elif not isinstance(structure, pmd.Structure):
raise ValueError("Please pass a parmed.Structure to " +
"create_hoomd_simulation")
_check_hoomd_version()
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) >= 3:
warnings.warn("Warning when using Hoomd 3, potential API change " +
"where the hoomd context is not updated upon " +
"creation of forces - utilize " +
"the returned `hoomd_objects`")
hoomd_objects = [] # Potential adaptation for Hoomd v3 API
if auto_scale:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(set((atom.type,
atom.epsilon,
atom.sigma) for atom in structure.atoms))
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy)
if not hoomd.context.current:
hoomd.context.initialize("")
snapshot,_ = to_hoomdsnapshot(structure, ref_distance=ref_distance,
ref_mass=ref_mass, ref_energy=ref_energy, **snapshot_kwargs)
hoomd_objects.append(snapshot)
hoomd.init.read_snapshot(snapshot)
nl = hoomd.md.nlist.cell()
nl.reset_exclusions(exclusions=['1-2', '1-3'])
hoomd_objects.append(nl)
if structure.atoms[0].type != '':
print("Processing LJ and QQ")
lj = _init_hoomd_lj(structure, nl, r_cut=r_cut,
ref_distance=ref_distance, ref_energy=ref_energy)
qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs)
hoomd_objects.append(lj)
hoomd_objects.append(qq)
if structure.adjusts:
print("Processing 1-4 interactions, adjusting neighborlist exclusions")
lj_14, qq_14 = _init_hoomd_14_pairs(structure, nl,
ref_distance=ref_distance, ref_energy=ref_energy)
hoomd_objects.append(lj_14)
hoomd_objects.append(qq_14)
if structure.bond_types:
print("Processing harmonic bonds")
harmonic_bond = _init_hoomd_bonds(structure,
ref_distance=ref_distance, ref_energy=ref_energy)
hoomd_objects.append(harmonic_bond)
if structure.angle_types:
print("Processing harmonic angles")
harmonic_angle = _init_hoomd_angles(structure,
ref_energy=ref_energy)
hoomd_objects.append(harmonic_angle)
if structure.dihedral_types:
print("Processing periodic torsions")
periodic_torsions = _init_hoomd_dihedrals(structure,
ref_energy=ref_energy)
hoomd_objects.append(periodic_torsions)
if structure.rb_torsion_types:
print("Processing RB torsions")
rb_torsions = _init_hoomd_rb_torsions(structure,
ref_energy=ref_energy)
hoomd_objects.append(rb_torsions)
print("HOOMD SimulationContext updated from ParmEd Structure")
return hoomd_objects, ref_values
def _init_hoomd_lj(structure, nl, r_cut=1.2,
ref_distance=1.0, ref_energy=1.0):
""" LJ parameters """
# Identify the unique atom types before setting
atom_type_params = {}
for atom in structure.atoms:
if atom.type not in atom_type_params:
atom_type_params[atom.type] = atom.atom_type
# Set the hoomd parameters for self-interactions
lj = hoomd.md.pair.lj(r_cut, nl)
for name, atom_type in atom_type_params.items():
lj.pair_coeff.set(name, name,
sigma=atom_type.sigma/ref_distance,
epsilon=atom_type.epsilon/ref_energy)
# Cross interactions, mixing rules, NBfixes
all_atomtypes = sorted(atom_type_params.keys())
for a1, a2 in itertools.combinations_with_replacement(all_atomtypes, 2):
nb_fix_info = atom_type_params[a1].nbfix.get(a2, None)
# nb_fix_info = (rmin, eps, rmin14, eps14)
if nb_fix_info is None:
# No nbfix means use mixing rule to find cross-interaction
if structure.combining_rule == 'lorentz':
sigma = ((atom_type_params[a1].sigma + atom_type_params[a2].sigma)
/ (2 * ref_distance))
epsilon = ((atom_type_params[a1].epsilon *
atom_type_params[a2].epsilon) /
ref_energy**2)**0.5
elif structure.combining_rule == 'geometric':
sigma = ((atom_type_params[a1].sigma *
atom_type_params[a2].sigma) /
ref_distance**2)**0.5
epsilon = ((atom_type_params[a1].epsilon *
atom_type_params[a2].epsilon) /
ref_energy**2)**0.5
else:
raise ValueError(
"Mixing rule {} ".format(structure.combining_rule) +
"not supported, use lorentz")
else:
# If we have nbfix info, use it
sigma = nb_fix_info[0] / (ref_distance*(2 ** (1/6)))
epsilon = nb_fix_info[1] / ref_energy
lj.pair_coeff.set(a1, a2, sigma=sigma, epsilon=epsilon)
return lj
def _init_hoomd_qq(structure, nl, Nx=1, Ny=1, Nz=1, order=4, r_cut=1.2):
""" Charge interactions """
charged = hoomd.group.charged()
if len(charged) == 0:
print("No charged groups found, ignoring electrostatics")
return None
else:
qq = hoomd.md.charge.pppm(charged, nl)
qq.set_params(Nx, Ny, Nz, order, r_cut)
return qq
def _init_hoomd_14_pairs(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0):
"""Special_pairs to handle 14 scalings
See discussion: https://groups.google.com/forum/
#!topic/hoomd-users/iZ9WCpHczg0 """
# Update neighborlist to exclude 1-4 interactions,
# but impose a special_pair force to handle these pairs
nl.exclusions.append('1-4')
if hoomd.context.current.system_definition.getPairData().getN() == 0:
print("No 1,4 pairs found in hoomd snapshot")
return None, None
lj_14 = hoomd.md.special_pair.lj()
qq_14 = hoomd.md.special_pair.coulomb()
params_14 = {}
# Identify unique 14 scalings
for adjust in structure.adjusts:
t1 = adjust.atom1.type
t2 = adjust.atom2.type
ps = '-'.join(sorted([t1, t2]))
if ps not in params_14:
params_14[ps] = adjust.type
for name, adjust_type in params_14.items():
lj_14.pair_coeff.set(name,
sigma=adjust_type.sigma/ref_distance,
# The adjust epsilon alreayd carries the scaling
epsilon=adjust_type.epsilon/ref_energy,
# Do NOT use hoomd's alpha to modify any LJ terms
alpha=1,
r_cut=r_cut)
qq_14.pair_coeff.set(name,
alpha=adjust_type.chgscale,
r_cut=r_cut)
return lj_14, qq_14
def _init_hoomd_bonds(structure, ref_distance=1.0, ref_energy=1.0):
""" Harmonic bonds """
# Identify the unique bond types before setting
bond_type_params = {}
for bond in structure.bonds:
t1, t2 = bond.atom1.type, bond.atom2.type
t1, t2 = sorted([t1, t2], key=natural_sort)
if t1 != '' and t2 != '':
bond_type = ('-'.join((t1, t2)))
if bond_type not in bond_type_params:
bond_type_params[bond_type] = bond.type
# Set the hoomd parameters
harmonic_bond = hoomd.md.bond.harmonic()
for name, bond_type in bond_type_params.items():
# A (paramerized) parmed structure with no bondtype
# is because of constraints
if bond_type is None:
print("Bond with no bondtype detected, setting coefficients to 0")
harmonic_bond.bond_coeff.set(name,
k=0, r0=0)
else:
harmonic_bond.bond_coeff.set(name,
k=2 * bond_type.k * ref_distance**2 / ref_energy,
r0=bond_type.req / ref_distance)
return harmonic_bond
def _init_hoomd_angles(structure, ref_energy=1.0):
""" Harmonic angles """
# Identify the unique angle types before setting
angle_type_params = {}
for angle in structure.angles:
t1, t2, t3 = angle.atom1.type, angle.atom2.type, angle.atom3.type
t1, t3 = sorted([t1, t3], key=natural_sort)
angle_type = ('-'.join((t1, t2, t3)))
if angle_type not in angle_type_params:
angle_type_params[angle_type] = angle.type
# set the hoomd parameters
harmonic_angle = hoomd.md.angle.harmonic()
for name, angle_type in angle_type_params.items():
harmonic_angle.angle_coeff.set(name,
t0=np.deg2rad(angle_type.theteq),
k=2 * angle_type.k / ref_energy)
return harmonic_angle
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
""" Periodic dihedrals (dubbed harmonic dihedrals in HOOMD) """
# Identify the unique dihedral types before setting
# need Hoomd 2.8.0 to use proper dihedral implemtnation
# from this PR https://github.com/glotzerlab/hoomd-blue/pull/492
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) < 2 or float(version_numbers[1]) < 8:
from mbuild.exceptions import MBuildError
raise MBuildError("Please upgrade Hoomd to at least 2.8.0")
dihedral_type_params = {}
for dihedral in structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
if dihedral_type not in dihedral_type_params:
if isinstance(dihedral.type, pmd.DihedralType):
dihedral_type_params[dihedral_type] = dihedral.type
elif isinstance(dihedral.type, pmd.DihedralTypeList):
if len(dihedral.type) > 1:
warnings.warn("Multiple dihedral types detected" +
" for single dihedral, will ignore all except " +
" first diheral type")
dihedral_type_params[dihedral_type] = dihedral.type[0]
# Set the hoomd parameters
periodic_torsion = hoomd.md.dihedral.harmonic() # These are periodic torsions
for name, dihedral_type in dihedral_type_params.items():
periodic_torsion.dihedral_coeff.set(name,
k=2*dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per,
phi_0=np.deg2rad(dihedral_type.phase))
return periodic_torsion
def _init_hoomd_rb_torsions(structure, ref_energy=1.0):
""" RB dihedrals (implemented as OPLS dihedrals in HOOMD) """
# Identify the unique dihedral types before setting
dihedral_type_params = {}
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
if dihedral_type not in dihedral_type_params:
dihedral_type_params[dihedral_type] = dihedral.type
# Set the hoomd parameter
rb_torsion = hoomd.md.dihedral.opls()
for name, dihedral_type in dihedral_type_params.items():
F_coeffs = RB_to_OPLS(dihedral_type.c0 / ref_energy,
dihedral_type.c1 / ref_energy,
dihedral_type.c2 / ref_energy,
dihedral_type.c3 / ref_energy,
dihedral_type.c4 / ref_energy,
dihedral_type.c5 / ref_energy)
rb_torsion.dihedral_coeff.set(name, k1=F_coeffs[0],
k2=F_coeffs[1], k3=F_coeffs[2], k4=F_coeffs[3])
return rb_torsion
def _check_hoomd_version():
version = hoomd.__version__
version_numbers = version.split('.')
return version_numbers
| 40.657754 | 85 | 0.636262 | 1,963 | 15,206 | 4.723892 | 0.173714 | 0.037852 | 0.021137 | 0.008304 | 0.303677 | 0.249865 | 0.182897 | 0.144398 | 0.121212 | 0.111722 | 0 | 0.023191 | 0.268381 | 15,206 | 373 | 86 | 40.766756 | 0.810337 | 0.176378 | 0 | 0.164063 | 0 | 0 | 0.099699 | 0.010165 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035156 | false | 0.011719 | 0.082031 | 0 | 0.160156 | 0.039063 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51668e773b785fc937e32ac3a240021ec3f1a368 | 595 | py | Python | homework/jenya_s/homework12.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | homework/jenya_s/homework12.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | 4 | 2018-12-19T13:41:12.000Z | 2019-01-14T15:11:11.000Z | homework/jenya_s/homework12.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | import os
class Cpypl:
def __init__(self, directory):
self.directory = directory
self.extension_dict = {"c": (".c", ".h"),
"py": (".py", ".pyc"),
"pl": (".pl", ".pm"),
}
def file_list(self):
folder_files = os.listdir(self.directory)
extension = self.extension_dict
return [i for i in folder_files for j in extension if i.endswith(extension[j])]
a = Cpypl(r"C:\Users\admin\Desktop\python test")
print(a.file_list())
| 29.75 | 88 | 0.480672 | 66 | 595 | 4.181818 | 0.545455 | 0.141304 | 0.123188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.381513 | 595 | 19 | 89 | 31.315789 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0.097222 | 0.050347 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a8c5dccd774b45cfea010980c9e6fb6227679df | 3,307 | py | Python | Python/data/preprocess.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T18:49:34.000Z | 2022-02-11T18:49:34.000Z | Python/data/preprocess.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | null | null | null | Python/data/preprocess.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import scipy.ndimage.measurements as scipy_measurements
import miapy.data.transformation as miapy_tfm
class ClipNegativeTransform(miapy_tfm.Transform):
def __init__(self, entries=('images',)) -> None:
super().__init__()
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
m = np.min(img)
if m < 0:
print('Clipping... min: {}'.format(m))
img = np.clip(img, a_min=0, a_max=None)
sample[entry] = img
return sample
class CenterCentroidTransform(miapy_tfm.Transform):
def __init__(self, entries=('images',)) -> None:
super().__init__()
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
centroid_transform = []
# move centroid to center
com = scipy_measurements.center_of_mass(img > 0)
for axis in range(0, 3):
diff = com[axis] - int(img.shape[axis] / 2)
centroid_transform.append(-diff)
if abs(diff) > 1:
img = np.roll(img, int(-diff), axis=axis)
sample[entry] = img
# store the centroid transformation (will be written to metadata later)
sample['centroid_transform'] = np.array(centroid_transform)
return sample
class RandomRotateShiftTransform(miapy_tfm.Transform):
def __init__(self, do_rotate=True, shift_amount=0, entries=('images',)) -> None:
super().__init__()
self.entries = entries
self.do_rotate = do_rotate
self.shift_amount = shift_amount
print('Using RandomRotateShiftTransform({}, {})'.format(do_rotate, shift_amount))
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
# shift +/- shift_amount pixels
if self.shift_amount != 0:
# number of pixels to shift
n = np.random.randint(-self.shift_amount, self.shift_amount + 1)
# axis
k = np.random.randint(0, 3)
img = np.roll(img, n, axis=k)
# 3x rotate by 90 degree around a random axis
if self.do_rotate:
planes = [(0, 1), (0, 2), (1, 2)]
for i in range(0, 3):
k = np.random.randint(0, 3)
plane_idx = np.random.randint(0, 3)
img = np.rot90(img, k, planes[plane_idx])
sample[entry] = img
return sample
def get_bounding_box(img):
a = np.argwhere(img)
min0, min1, min2 = a.min(0)
max0, max1, max2 = a.max(0)
return [min0, max0, min1, max1, min2, max2]
# Apply reverse center centroid transform
def revert_centroid_transform(img, centroid_transform):
for axis in range(0, 3):
diff = -centroid_transform[axis]
if abs(diff) > 1:
img = np.roll(img, int(diff), axis=axis)
return img
| 30.063636 | 89 | 0.558512 | 398 | 3,307 | 4.462312 | 0.261307 | 0.04955 | 0.04223 | 0.033784 | 0.395833 | 0.366554 | 0.34009 | 0.292793 | 0.268018 | 0.268018 | 0 | 0.020947 | 0.335954 | 3,307 | 109 | 90 | 30.33945 | 0.787796 | 0.071969 | 0 | 0.486111 | 0 | 0 | 0.031036 | 0.009801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.041667 | 0 | 0.263889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a8e3a68bba6328be4a0daef3330c63f8527f035 | 1,828 | py | Python | conversion_service/config/settings/worker.py | das-g/osmaxx-postgis-conversion | c41aba1cb0fd112de12c8c0540584b7caa651150 | [
"MIT"
] | null | null | null | conversion_service/config/settings/worker.py | das-g/osmaxx-postgis-conversion | c41aba1cb0fd112de12c8c0540584b7caa651150 | [
"MIT"
] | null | null | null | conversion_service/config/settings/worker.py | das-g/osmaxx-postgis-conversion | c41aba1cb0fd112de12c8c0540584b7caa651150 | [
"MIT"
] | null | null | null | # pylint: skip-file
import random
import string
from .common import * # noqa
# we don't use user sessions, so it doesn't matter if we recreate the secret key on each startup
SECRET_KEY = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
# disable databases for the worker
DATABASES = {}
INSTALLED_APPS += (
# sentry
'raven.contrib.django.raven_compat',
)
# SENTRY
SENTRY_DSN = env.str('SENTRY_DSN', default=None)
if SENTRY_DSN:
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'release': env.str('SENTRY_RELEASE', default=''),
}
| 26.114286 | 96 | 0.461707 | 154 | 1,828 | 5.38961 | 0.558442 | 0.043373 | 0.086747 | 0.104819 | 0.210843 | 0.093976 | 0 | 0 | 0 | 0 | 0 | 0.002695 | 0.391138 | 1,828 | 69 | 97 | 26.492754 | 0.743037 | 0.089716 | 0 | 0.196429 | 0 | 0 | 0.308389 | 0.080869 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.053571 | 0 | 0.053571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9137e47101ff21c41e130f1251b26b67a1b350 | 708 | py | Python | test/espnet2/layers/test_log_mel.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | test/espnet2/layers/test_log_mel.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | test/espnet2/layers/test_log_mel.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | import torch
from espnet2.layers.log_mel import LogMel
def test_repr():
print(LogMel())
def test_forward():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9)
y, _ = layer(x)
assert y.shape == (2, 4, 2)
y, ylen = layer(x, torch.tensor([4, 2], dtype=torch.long))
assert (ylen == torch.tensor((4, 2), dtype=torch.long)).all()
def test_backward_leaf_in():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
y, _ = layer(x)
y.sum().backward()
def test_backward_not_leaf_in():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
x = x + 2
y, _ = layer(x)
y.sum().backward()
| 22.125 | 65 | 0.610169 | 121 | 708 | 3.396694 | 0.322314 | 0.068127 | 0.087591 | 0.109489 | 0.600973 | 0.600973 | 0.508516 | 0.377129 | 0.377129 | 0.377129 | 0 | 0.048825 | 0.218927 | 708 | 31 | 66 | 22.83871 | 0.694394 | 0 | 0 | 0.454545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.272727 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a97b4378066662b1ab8308caa3ca06c47283ec7 | 43,014 | py | Python | AESDataV3.py | JHerrmann01/AESDataManipulator | 836ee74326ee0c35435b5fe0bb9875d392b2cc7c | [
"Apache-2.0"
] | null | null | null | AESDataV3.py | JHerrmann01/AESDataManipulator | 836ee74326ee0c35435b5fe0bb9875d392b2cc7c | [
"Apache-2.0"
] | null | null | null | AESDataV3.py | JHerrmann01/AESDataManipulator | 836ee74326ee0c35435b5fe0bb9875d392b2cc7c | [
"Apache-2.0"
] | null | null | null | ###American Environmental Solutions Data Manipulation###
## Created by Jeremy Herrmann ##
##Import Libraries##
from __future__ import print_function
from os.path import join, dirname, abspath
import xlrd
from xlrd.sheet import ctype_text
import xlsxwriter
####################
def loadSpreadsheet():
fname = join(dirname(dirname(abspath(__file__))), 'AES/First Spreadsheet', 'GBZ65745 Excel SE855 GLENWOOD RD-1 (copy).xls')
xl_workbook = xlrd.open_workbook(fname)
xl_sheet = xl_workbook.sheet_by_name("Results")
return xl_workbook, xl_sheet
def grabSimpleInformation(xl_workbook, xl_sheet):
numSpaces = 0
generalAreas = {}
num_cols = xl_sheet.ncols
for row_idx in range(8, xl_sheet.nrows-7):
if(xl_sheet.cell(row_idx,0).value == "Mercury"):
Mercury_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "pH at 25C - Soil"):
Corrosivity_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Flash Point"):
Flashpoint_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Ignitability"):
Ignitability_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Reactivity Cyanide"):
Reactivity_Values_Cyanide_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Reactivity Sulfide"):
Reactivity_Values_Sulfide_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Total Cyanide (SW9010C Distill.)"):
Cyanide_Values_Raw = (xl_sheet.row(row_idx))
if(numSpaces%3 == 0):
generalAreas[int(row_idx)] = str(xl_sheet.cell(row_idx,0).value)
numSpaces +=1
if(xl_sheet.cell(row_idx,0).value == ""):
numSpaces += 1
return Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas
def sortGeneralAreas(generalAreas):
keys = generalAreas.keys()
sortedGenAreas = [[0 for i in range(2)]for i in range(len(keys))]
for x in range(0,len(keys)):
smallestKey = 100000
for key in generalAreas.keys():
if(key < smallestKey):
smallestKey = key
sortedGenAreas[x][0] = int(smallestKey)
sortedGenAreas[x][1] = str(generalAreas.pop(smallestKey))
return sortedGenAreas
def insertRowsIntoAreas(xl_sheet, sortedGenAreas):
rowsInArea = [[""]for i in range(len(sortedGenAreas))]
for x in range(0,len(sortedGenAreas)):
rowsInArea[x][0] = sortedGenAreas[x][1]
numAreas = len(sortedGenAreas)
for x in range(0 , numAreas):
if(x < numAreas-1):
for y in range(sortedGenAreas[x][0]+1, sortedGenAreas[x+1][0]-2):
rowsInArea[x].append(xl_sheet.row(y))
else:
for y in range(sortedGenAreas[x][0]+1, xl_sheet.nrows-7):
rowsInArea[x].append(xl_sheet.row(y))
return rowsInArea
print("Beginning program...")
#Loading the file to be parsed
xl_workbook, xl_sheet = loadSpreadsheet()
#Grabbing basic information
Company_Name = xl_sheet.cell(0, 0).value
Type_Samples_Collected_Raw = xl_sheet.row(4)
global firstIndex
firstIndex = 6
#Begin parsing to find simple useful information
Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas = grabSimpleInformation(xl_workbook, xl_sheet)
#Sort the general areas in increasing order(Row number)
sortedGenAreas = sortGeneralAreas(generalAreas)
#Insert the rows that belong to each respective area
rowsInArea = insertRowsIntoAreas(xl_sheet, sortedGenAreas)
print("Done Parsing")
print()
########################################################################################################################
def startWritingFinalFile():
workbook = xlsxwriter.Workbook('/home/jeremy/Desktop/AES/Excel_Reformatting.xlsx')
worksheet = workbook.add_worksheet()
return workbook, worksheet
#Refining a given row
def valueRefinerMetals(inputArrayRaw):
outputArray = []
pos = 0
units = str(inputArrayRaw[2].value)
divisor = 1
if(units[0:2] == "ug"):
divisor = 1000
for value in inputArrayRaw:
if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos == 0) or (pos == 2)):
if(pos == 0):
outputArray.append(str(value.value))
elif(pos == 2):
outputArray.append("ppm")
outputArray.append("")
elif(str(value.value).find("<") == -1):
outputArray.append(str(round((float(value.value)/divisor), 5)))
else:
outputArray.append("N.D.")
pos+=1
return(outputArray)
def isDetected(compound):
hasFloat = False
for x in compound:
try:
val = float(x)
hasFloat = True
break
except Exception as e:
val = ""
return hasFloat
def isNumber(value):
try:
val = float(value)
return True
except Exception as e:
return False
def removeUselessRows(rowsInArea, index):
y = 1
lenRow = (len(rowsInArea[index][1]))
while(y < len(rowsInArea[index])):
if not isDetected(rowsInArea[index][y]):
rowsInArea[index].remove(rowsInArea[index][y])
y -= 1
y += 1
if(len(rowsInArea[index]) == 1):
emptyArray = ["None Detected", "_", "_"]
for x in range(len(emptyArray), lenRow):
emptyArray.append("N.D.")
rowsInArea[index].append(emptyArray)
return rowsInArea[index]
def createBeginning(worksheet, currLine):
line = 1
x = len(Type_Samples_Collected)
offset = 4
finalLetter=""
if 64+x+offset > 90:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(64+((x+offset)%26))
finalLetter = firstLetter+secondLetter
else:
finalLetter = chr(64+x+offset)
for x in range(0, 5):
worksheet.merge_range("B"+str(line)+":"+finalLetter+str(line), "")
line += 1
return worksheet, currLine
def createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne):
formatOne.set_text_wrap(True)
Type_Samples_Collected = []
pos = 0
for value in Type_Samples_Collected_Raw:
if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos ==0)):
Type_Samples_Collected.append(value.value)
pos+=1
worksheet.write('B'+str(currLine), 'Parameter', formatOne)
worksheet.write('C'+str(currLine), 'Compounds Detected', formatOne)
worksheet.write('D'+str(currLine), 'Units', formatOne)
worksheet.write('E'+str(currLine), 'NYSDEC Part 375 Unrestricted Use Criteria', formatOne)
offset = 4
for x in range(1,len(Type_Samples_Collected)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne)
currLine += 1
return worksheet, currLine, Type_Samples_Collected
def addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo):
Mercury_Values = valueRefinerMetals(Mercury_Values_Raw)
offset = 2
worksheet.write('B'+str(currLine), 'Mercury 7471', formatOne)
for x in range(0, len(Mercury_Values)):
if(isNumber(Mercury_Values[x])):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatTwo)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatTwo)
else:
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Mercury_Values
def addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfPCBS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "PCBs By SW8082A":
indexOfPCBS = x
for x in range(1, len(rowsInArea[indexOfPCBS])):
rowsInArea[indexOfPCBS][x] = valueRefinerMetals(rowsInArea[indexOfPCBS][x])
rowsInArea[indexOfPCBS] = removeUselessRows(rowsInArea, indexOfPCBS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfPCBS])):
for y in range(0, len(rowsInArea[indexOfPCBS][x])):
if(isNumber(rowsInArea[indexOfPCBS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPCBS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'PCBS', formatOne)
else:
worksheet.write('B'+str(firstLine), 'PCBS',formatOne)
return worksheet, currLine
def addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfPesticides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Pesticides - Soil By SW8081B":
indexOfPesticides = x
for x in range(1, len(rowsInArea[indexOfPesticides])):
rowsInArea[indexOfPesticides][x] = valueRefinerMetals(rowsInArea[indexOfPesticides][x])
rowsInArea[indexOfPesticides] = removeUselessRows(rowsInArea, indexOfPesticides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfPesticides])):
for y in range(0, len(rowsInArea[indexOfPesticides][x])):
if(isNumber(rowsInArea[indexOfPesticides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPesticides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Pesticides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Pesticides', formatOne)
return worksheet, currLine
def addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfMetals = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Metals, Total":
indexOfMetals = x
for x in range(1, len(rowsInArea[indexOfMetals])):
rowsInArea[indexOfMetals][x] = valueRefinerMetals(rowsInArea[indexOfMetals][x])
rowsInArea[indexOfMetals] = removeUselessRows(rowsInArea, indexOfMetals)
firstLine = currLine
offset = 2
worksheet.write('B'+str(currLine), 'Metals, Total')
for x in range(1, len(rowsInArea[indexOfMetals])):
if(rowsInArea[indexOfMetals][x][0] != "Mercury"):
for y in range(0, len(rowsInArea[indexOfMetals][x])):
if(isNumber(rowsInArea[indexOfMetals][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+offset+y))+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfMetals][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Metals', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Metals', formatOne)
return worksheet, currLine
def addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo):
Cyanide_Values = valueRefinerMetals(Cyanide_Values_Raw)
worksheet.write('B'+str(currLine), 'Cyanide', formatOne)
offset = 2
for x in range(0, len(Cyanide_Values)):
if(isNumber(Cyanide_Values[x])):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatTwo)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatTwo)
else:
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Cyanide_Values
def addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfSemiVolatiles = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Semivolatiles By SW8270D":
indexOfSemiVolatiles = x
for x in range(1, len(rowsInArea[indexOfSemiVolatiles])):
rowsInArea[indexOfSemiVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfSemiVolatiles][x])
rowsInArea[indexOfSemiVolatiles] = removeUselessRows(rowsInArea, indexOfSemiVolatiles)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfSemiVolatiles])):
for y in range(0, len(rowsInArea[indexOfSemiVolatiles][x])):
if(isNumber(rowsInArea[indexOfSemiVolatiles][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSemiVolatiles][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'SemiVolatiles', formatOne)
else:
worksheet.write('B'+str(firstLine), 'SemiVolatiles', formatOne)
return worksheet, currLine
def addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfVolatiles = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Volatiles (TCL) By SW8260C":
indexOfVolatiles = x
for x in range(1, len(rowsInArea[indexOfVolatiles])):
rowsInArea[indexOfVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfVolatiles][x])
rowsInArea[indexOfVolatiles] = removeUselessRows(rowsInArea, indexOfVolatiles)
firstLine = currLine
offset = 2
worksheet.write('B'+str(currLine), 'Volatiles (TCL) By SW8260C')
for x in range(1, len(rowsInArea[indexOfVolatiles])):
for y in range(0, len(rowsInArea[indexOfVolatiles][x])):
if(isNumber(rowsInArea[indexOfVolatiles][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVolatiles][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Volatiles', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Volatiles', formatOne)
return worksheet, currLine
def createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne):
worksheet.set_row(currLine-1,50)
worksheet.write('B'+str(currLine), 'RCRA Characteristics ', formatOne)
worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'Regulatory Criteria', formatOne)
offset = 4
for x in range(1,len(Type_Samples_Collected)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne)
currLine += 1
return worksheet, currLine
def addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne):
Corrosivity_Values = valueRefinerMetals(Corrosivity_Values_Raw)
worksheet.write('B'+str(currLine), 'Corrosivity', formatOne)
offset = 2
for x in range(0,len(Corrosivity_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Corrosivity_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Corrosivity_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Corrosivity_Values
def addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formastOne):
Flashpoint_Values = []
pos = 0
for value in Flashpoint_Values_Raw:
if(pos == 0):
Flashpoint_Values.append(value.value)
Flashpoint_Values.append(" ")
Flashpoint_Values.append("Degree F")
Flashpoint_Values.append(">200 Degree F")
if((pos >= firstIndex and pos%2 == firstIndex%2)):
Flashpoint_Values.append(value.value)
pos+=1
offset = 1
for x in range(0,len(Flashpoint_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Flashpoint_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Flashpoint_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Flashpoint_Values
def addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne):
Ignitability_Values = []
pos = 0
for value in Ignitability_Values_Raw:
if(pos == 0):
Ignitability_Values.append(value.value)
Ignitability_Values.append(" ")
Ignitability_Values.append("Degree F")
Ignitability_Values.append("<140 Degree F")
if((pos >= firstIndex and pos%2 == firstIndex%2)):
Ignitability_Values.append(value.value)
pos+=1
offset = 1
for x in range(0,len(Ignitability_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Ignitability_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Ignitability_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Ignitability_Values
def addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne):
Reactivity_Values_Cyanide = valueRefinerMetals(Reactivity_Values_Cyanide_Raw)
worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Reactivity', formatOne)
worksheet.write('C'+str(currLine), 'Cyanide', formatOne)
offset = 2
for x in range(1,len(Reactivity_Values_Cyanide)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne)
currLine += 1
Reactivity_Values_Sulfide = valueRefinerMetals(Reactivity_Values_Sulfide_Raw)
worksheet.write('C'+str(currLine), 'Sulfide', formatOne)
for x in range(1,len(Reactivity_Values_Sulfide)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne)
currLine += 1
return worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide
def createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne):
worksheet.set_row(currLine-1,50)
worksheet.write('B'+str(currLine), 'Toxicity ', formatOne)
worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'TCLP Regulatory Criteria', formatOne)
x = len(Type_Samples_Collected)
offset = 4
finalLetter=""
if 64+x+offset > 90:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
print(firstLetter)
secondLetter = chr(64+((x+offset)%26))
print(secondLetter)
finalLetter = firstLetter+secondLetter
else:
finalLetter = chr(64+x+offset)
worksheet.merge_range("F"+str(currLine)+":"+finalLetter+str(currLine), "", formatOne)
currLine += 1
return worksheet, currLine
def addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfTCLPMetals = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Metals, TCLP":
indexOfTCLPMetals = x
for x in range(1, len(rowsInArea[indexOfTCLPMetals])):
rowsInArea[indexOfTCLPMetals][x] = valueRefinerMetals(rowsInArea[indexOfTCLPMetals][x])
rowsInArea[indexOfTCLPMetals] = removeUselessRows(rowsInArea, indexOfTCLPMetals)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfTCLPMetals])):
for y in range(0, len(rowsInArea[indexOfTCLPMetals][x])):
if(isNumber(rowsInArea[indexOfTCLPMetals][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPMetals][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Metals', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Metals', formatOne)
return worksheet, currLine
def addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfVOCS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Volatiles By SW8260C":
indexOfVOCS = x
for x in range(1, len(rowsInArea[indexOfVOCS])):
rowsInArea[indexOfVOCS][x] = valueRefinerMetals(rowsInArea[indexOfVOCS][x])
rowsInArea[indexOfVOCS] = removeUselessRows(rowsInArea, indexOfVOCS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfVOCS])):
for y in range(0, len(rowsInArea[indexOfVOCS][x])):
if(isNumber(rowsInArea[indexOfVOCS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVOCS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Vocs', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Vocs', formatOne)
return worksheet, currLine
def addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfSVOCS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Acid/Base-Neutral By SW8270D":
indexOfSVOCS = x
for x in range(1, len(rowsInArea[indexOfSVOCS])):
rowsInArea[indexOfSVOCS][x] = valueRefinerMetals(rowsInArea[indexOfSVOCS][x])
rowsInArea[indexOfSVOCS] = removeUselessRows(rowsInArea, indexOfSVOCS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfSVOCS])):
for y in range(0, len(rowsInArea[indexOfSVOCS][x])):
if(isNumber(rowsInArea[indexOfSVOCS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSVOCS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP SVocs', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP SVocs', formatOne)
return worksheet, currLine
def addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfTCLPPesticides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Pesticides By SW8081B":
indexOfTCLPPesticides = x
for x in range(1, len(rowsInArea[indexOfTCLPPesticides])):
rowsInArea[indexOfTCLPPesticides][x] = valueRefinerMetals(rowsInArea[indexOfTCLPPesticides][x])
rowsInArea[indexOfTCLPPesticides] = removeUselessRows(rowsInArea, indexOfTCLPPesticides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfTCLPPesticides])):
for y in range(0, len(rowsInArea[indexOfTCLPPesticides][x])):
if(isNumber(rowsInArea[indexOfTCLPPesticides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPPesticides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Pesticides', formatOne)
return worksheet, currLine
def addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfTCLPHerbicides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Herbicides By SW8151A":
indexOfHerbicides = x
for x in range(1, len(rowsInArea[indexOfHerbicides])):
rowsInArea[indexOfHerbicides][x] = valueRefinerMetals(rowsInArea[indexOfHerbicides][x])
rowsInArea[indexOfTCLPHerbicides] = removeUselessRows(rowsInArea, indexOfTCLPHerbicides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfHerbicides])):
for y in range(0, len(rowsInArea[indexOfHerbicides][x])):
if(isNumber(rowsInArea[indexOfHerbicides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfHerbicides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides / Herbicides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Pesticides / Herbicides', formatOne)
return worksheet, currLine
def addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfGasolineHydrocarbons = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Gasoline Range Hydrocarbons (C6-C10) By SW8015D":
indexOfGasolineHydrocarbons = x
for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])):
rowsInArea[indexOfGasolineHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfGasolineHydrocarbons][x])
indexOfDieselHydrocarbons = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TPH By SW8015D DRO":
indexOfDieselHydrocarbons = x
for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])):
rowsInArea[indexOfDieselHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfDieselHydrocarbons][x])
offset = 2
worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Total Petroleum Hydrocarbons', formatOne)
for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])):
for y in range(0, len(rowsInArea[indexOfGasolineHydrocarbons][x])):
if(isNumber(rowsInArea[indexOfGasolineHydrocarbons][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfGasolineHydrocarbons][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatOne)
currLine += 1
for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])):
for y in range(0, len(rowsInArea[indexOfDieselHydrocarbons][x])):
if(isNumber(rowsInArea[indexOfDieselHydrocarbons][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfDieselHydrocarbons][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatOne)
currLine += 1
return worksheet, currLine
print("Writing to Excel File...")
workbook, worksheet = startWritingFinalFile()
worksheet.set_column('B:B', 25)
worksheet.set_column('C:C', 30)
worksheet.set_column('E:E', 15)
worksheet.set_row(5,50)
#Important Information - Titles, etc..
formatOne = workbook.add_format()
formatOne.set_align('center')
formatOne.set_align('vcenter')
formatOne.set_font_name('Arial')
formatOne.set_font_size('12')
formatOne.set_border(6)
#Numbers within the text
formatTwo = workbook.add_format()
formatTwo.set_align('center')
formatTwo.set_align('vcenter')
formatTwo.set_font_name('Arial')
formatTwo.set_font_size('12')
formatTwo.set_border(6)
formatTwo.set_bg_color('#87CEFF')
formatTwo.set_bold()
#Current Line to overwrite each process
currLine = 6
#Heading for each column
worksheet, currLine, Type_Samples_Collected = createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne)
#Adding Mercury Values
worksheet, currLine, Mercury_Values = addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo)
#Adding PCB Values
worksheet, currLine = addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Pesticide Values
worksheet, currLine = addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Metal Values
worksheet, currLine = addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Cyanide Values
worksheet, currLine, Cyanide_Values = addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo)
#Adding Semi Volatile Organic Compounds
worksheet, currLine = addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Volatile Organic Compounds
worksheet, currLine = addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#RCRA Second Heading
worksheet, currLine = createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne)
#Adding Corrosivity(pH) Values
worksheet, currLine, Corrosivity_Values = addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne)
#Adding Flashpoint Values
worksheet, currLine, Flashpoint_Values = addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formatOne)
#Adding Ignitability Values
worksheet, currLine, Ignitability_Values = addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne)
#Adding Reactivity Values
worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide = addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne)
#Toxicity Third Heading
worksheet, currLine = createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne)
#Adding TCLP Metals(Barium / Lead)
worksheet, currLine = addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding TCLP VOCS
worksheet, currLine = addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding TCLP SVOCS
worksheet, currLine = addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding TCLP Pesticides
worksheet, currLine = addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding TCLP Herbicides
worksheet, currLine = addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding Total Petroleum Hydrocarbons
worksheet, currLine = addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Beginning information(Company Name, Address, Dates Samples were collected)
worksheet, currLine = createBeginning(worksheet, currLine)
workbook.close()
print("Done Writing")
| 46.907306 | 241 | 0.632561 | 4,692 | 43,014 | 5.72954 | 0.070546 | 0.024737 | 0.02377 | 0.020459 | 0.698732 | 0.662947 | 0.628538 | 0.542499 | 0.462486 | 0.43786 | 0 | 0.028124 | 0.236179 | 43,014 | 916 | 242 | 46.958515 | 0.790108 | 0.023341 | 0 | 0.48072 | 0 | 0 | 0.03106 | 0.001148 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03856 | false | 0 | 0.006427 | 0 | 0.083548 | 0.010283 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9823ed404374fd6e8abf22e09c47cf13d68464 | 6,282 | py | Python | lhrhost/robot/robot.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | null | null | null | lhrhost/robot/robot.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | null | null | null | lhrhost/robot/robot.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | 1 | 2018-08-03T17:17:31.000Z | 2018-08-03T17:17:31.000Z | """Abstractions for a liquid-handling robot."""
# Standard imports
import asyncio
import logging
# Local package imports
from lhrhost.robot.p_axis import Axis as PAxis
from lhrhost.robot.x_axis import Axis as XAxis
from lhrhost.robot.y_axis import Axis as YAxis
from lhrhost.robot.z_axis import Axis as ZAxis
from lhrhost.util.cli import Prompt
# Logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Robot(object):
"""High-level controller for 4-axis liquid-handling robot.
Currently the x-axis is moved manually by the user.
"""
def __init__(self):
"""Initialize member variables."""
self.p = PAxis()
self.z = ZAxis()
self.y = YAxis()
self.x = XAxis()
self.prompt = Prompt(end='', flush=True)
def register_messaging_stack(self, messaging_stack):
"""Associate a messaging stack with the robot.
The messaging stack is used for host-peripheral communication.
"""
messaging_stack.register_response_receivers(
self.p.protocol, self.z.protocol, self.y.protocol, self.x.protocol
)
messaging_stack.register_command_senders(
self.p.protocol, self.z.protocol, self.y.protocol, self.x.protocol
)
async def wait_until_initialized(self):
"""Wait until all axes are initialized."""
await asyncio.gather(
self.p.wait_until_initialized(),
self.z.wait_until_initialized(),
self.y.wait_until_initialized(),
self.x.wait_until_initialized()
)
async def synchronize_values(self):
"""Request the values of all protocol channels."""
await self.p.synchronize_values()
await self.z.synchronize_values()
await self.y.synchronize_values()
await self.x.synchronize_values()
async def load_calibrations(self):
"""Load calibration parameters from json files."""
self.p.load_calibration_json()
self.p.load_preset_json()
self.p.load_tunings_json()
self.z.load_calibration_json()
self.z.load_preset_json()
self.z.load_tunings_json()
self.y.load_calibration_json()
self.y.load_preset_json()
self.y.load_tunings_json()
self.x.load_calibration_json()
self.x.load_preset_json()
self.x.load_tunings_json()
async def ensure_sample_platform_configuration(self, configuration):
"""Ensure that the sample platform is configured as speified."""
await self.prompt(
'Please ensure that the sample platform modules are configured '
'following the "{}" configurationn:'.format(configuration)
)
self.x.configuration = configuration
async def go_to_alignment_hole(self):
"""Move the pipettor head to the alignment hole."""
await self.z.go_to_high_end_position()
await asyncio.gather(
self.y.go_to_alignment_hole(), self.x.go_to_alignment_hole()
)
await self.z.go_to_alignment_hole()
async def align_manually(self):
"""Do a manual alignment of x/y positioning."""
await self.go_to_alignment_hole()
await self.prompt(
'Please move the x-axis and the y-axis so that the pipette tip is '
'directly over the round alignment hole: '
)
await asyncio.gather(self.x.set_alignment(), self.y.set_alignment())
logger.info('Aligned to the zero position at the alignment hole.')
async def go_to_module_position(
self, module_name, x_position, y_position, z_position=None
):
"""Move the pipettor head to the specified x/y position of the module."""
module_type = self.x.get_module_type(module_name)
if (
self.x.current_preset_position is not None and
self.x.at_module(module_name)
):
await self.z.go_to_module_position(module_type, 'far above')
else:
await self.z.go_to_high_end_position()
await asyncio.gather(
self.x.go_to_module_position(module_name, x_position),
self.y.go_to_module_position(module_type, y_position)
)
if z_position is not None:
await self.z.go_to_module_position(module_type, 'far above')
async def intake(self, module_name, volume, height=None):
"""Intake fluid at the specified height.
Height should be a preset z-axis position or a physical z-axis position.
"""
module_type = self.x.get_module_type(module_name)
if height is not None:
try:
await self.z.go_to_module_position(module_type, height)
except KeyError:
await self.z.go_to_physical_position(height)
await self.p.intake(volume)
async def intake_precise(self, module_name, volume, height=None):
"""Intake fluid at the specified height.
Height should be a preset z-axis position or a physical z-axis position.
Volume should be either 20, 30, 40, 50, or 100.
"""
module_type = self.x.get_module_type(module_name)
if height is None:
if self.z.current_preset_position is not None:
height = self.z.current_preset_position[1]
else:
height = await self.z.physical_position
await self.z.go_to_module_position(module_type, 'above')
await self.p.go_to_pre_intake(volume)
try:
await self.z.go_to_module_position(module_type, height)
except KeyError:
await self.z.go_to_physical_position(height)
await self.p.intake(volume)
async def dispense(self, module_name, volume=None, height=None):
"""Dispense fluid at the specified height.
If volume is none, dispenses all syringe contents.
Height should be a preset z-axis position or a physical z-axis position.
"""
module_type = self.x.get_module_type(module_name)
if height is not None:
try:
await self.z.go_to_module_position(module_type, height)
except KeyError:
await self.z.go_to_physical_position(height)
await self.p.dispense(volume)
| 37.616766 | 81 | 0.650748 | 836 | 6,282 | 4.683014 | 0.19378 | 0.055172 | 0.03576 | 0.036782 | 0.407918 | 0.356833 | 0.314432 | 0.304215 | 0.304215 | 0.294508 | 0 | 0.002804 | 0.262018 | 6,282 | 166 | 82 | 37.843373 | 0.841674 | 0.053327 | 0 | 0.304348 | 0 | 0 | 0.054978 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017391 | false | 0 | 0.06087 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9d0c2e4e731186b891bcd4f534edfa2b33e353 | 1,599 | py | Python | references/stm32_parsing_sim/stm32parser.py | koson/OTA_update_STM32_using_ESP32 | 7fe7ae64d5290c0a453c29d787b5fe9456910e96 | [
"MIT"
] | 155 | 2020-02-15T06:54:15.000Z | 2021-09-16T07:19:19.000Z | references/stm32_parsing_sim/stm32parser.py | ksmola/OTA_update_STM32_using_ESP32 | dd616a1212da8f874e4826d63cfbd4c3b9ad2df2 | [
"MIT"
] | 8 | 2020-10-09T08:56:52.000Z | 2021-09-01T03:42:49.000Z | references/stm32_parsing_sim/stm32parser.py | ksmola/OTA_update_STM32_using_ESP32 | dd616a1212da8f874e4826d63cfbd4c3b9ad2df2 | [
"MIT"
] | 25 | 2020-03-16T04:41:12.000Z | 2021-08-19T11:49:40.000Z |
import time
import math as m
start = time.time()
def checksum(block):
data_chk = []
xor = '0'
for line in block:
for x in range(0, len(line) - 1, 2):
xor = hex(int(xor, 16) ^ int(line[x], 16) ^ int(line[x + 1], 16))
data_chk.append(xor)
xor = '0'
return data_chk
def stm_parser():
data, block, d_chk = [], [], []
addr = '0x08000000'
start, i, x = 0, 0, 0
with open("main.bin", "rb") as f:
byte = f.read(1)
while byte:
for b in byte:
data.append(hex(b))
byte = f.read(1)
while(start <= len(data)):
block.append(data[start : start + 256])
start = start + 256
count = int(len(block))
end = len(block) - 1
i_addr = int(addr, 16)
while(len(block[end]) % 256 != 0):
block[end].append(hex(255))
d_chk = checksum(block)
print("Start Flashing")
while(i < count):
l_addr = hex(int(i_addr % 65536))
h_addr = hex(int(i_addr / 65536))
i_addr += 256
a_chk = hex(int(l_addr, 16) ^ int(h_addr, 16))
print("Sending WRITE MEMORY Command...")
print("0x31", "0xCE")
time.sleep(0.1)
print("Sending Address...")
print(h_addr, l_addr, a_chk)
time.sleep(0.1)
print("Sending Data...")
# print(i + 1, block[i], d_chk[i])
print(i + 1, "BLOCK", d_chk[i])
time.sleep(0.1)
i+=1
print()
print("Done Flashing")
stm_parser()
print('Time (ms):', 1000*(time.time() - start))
| 20.766234 | 77 | 0.499062 | 230 | 1,599 | 3.378261 | 0.273913 | 0.030888 | 0.03861 | 0.042471 | 0.149292 | 0.110682 | 0 | 0 | 0 | 0 | 0 | 0.071698 | 0.337086 | 1,599 | 76 | 78 | 21.039474 | 0.661321 | 0.020013 | 0 | 0.137255 | 0 | 0 | 0.086957 | 0 | 0 | 0 | 0.011509 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.039216 | 0 | 0.098039 | 0.196078 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9d5971fc9f0f68b3b0383f6727b85c4065e172 | 3,034 | py | Python | sw/device/silicon_creator/lib/crypto/tests/testvectors/wycheproof/rsa_3072_verify_parse_testvectors.py | matutem/opentitan | a41c0a57568f1dc8263a4ecc3913f190750959f5 | [
"Apache-2.0"
] | null | null | null | sw/device/silicon_creator/lib/crypto/tests/testvectors/wycheproof/rsa_3072_verify_parse_testvectors.py | matutem/opentitan | a41c0a57568f1dc8263a4ecc3913f190750959f5 | [
"Apache-2.0"
] | null | null | null | sw/device/silicon_creator/lib/crypto/tests/testvectors/wycheproof/rsa_3072_verify_parse_testvectors.py | matutem/opentitan | a41c0a57568f1dc8263a4ecc3913f190750959f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import math
import sys
import hjson
def parse_hex_int(hex_str):
# int() throws an error message for empty string
if hex_str == '':
return 0
return int(hex_str, 16)
def parse_test(raw_data, n, e, t):
test = {'n': n, 'e': e}
test['msg'] = parse_hex_int(t['msg'])
# Message is expressed in hex notation, so the length in bytes is
# the number of characters / 2
test['msg_len'] = math.ceil(len(t['msg']) / 2)
test['signature'] = parse_hex_int(t['sig'])
notes = []
if t['comment']:
notes.append(t['comment'])
# Add notes from flags, if any
notes.extend([raw_data['notes'][flag] for flag in t['flags']])
# cases for expected result
if t['result'] == 'valid':
test['valid'] = True
elif t['result'] == 'invalid':
test['valid'] = False
elif t['result'] == 'acceptable':
if t['comment'] == 'short signature':
# We consider short signatures valid
test['valid'] = True
else:
# err on the side of caution and reject "acceptable" signatures otherwise
test['valid'] = False
notes.append('signature marked as acceptable by wycheproof')
else:
raise RuntimeError('Unexpected result type {}'.format(test['result']))
test['comment'] = 'wycheproof test with tcId={:d}, notes={}'.format(
t["tcId"], ', '.join(notes))
return test
def parse_test_group(raw_data, group):
tests = []
n = parse_hex_int(group['n'])
e = parse_hex_int(group['e'])
for t in group['tests']:
tests.append(parse_test(raw_data, n, e, t))
return tests
def parse_test_vectors(raw_data):
if raw_data['algorithm'] != 'RSASSA-PKCS1-v1_5':
raise RuntimeError('Unexpected algorithm: {}, expected {}'.format(
raw_data['algorithm'], 'RSASSA-PKCS1-v1_5'))
tests = []
for group in raw_data['testGroups']:
if group['sha'] != 'SHA-256':
raise RuntimeError(
'Unexpected hash function: {}, expected {}'.format(
group['sha'], 'SHA-256'))
tests.extend(parse_test_group(raw_data, group))
return tests
def main():
parser = argparse.ArgumentParser()
parser.add_argument('src',
metavar='FILE',
type=argparse.FileType('r'),
help='Read test vectors from this JSON file.')
parser.add_argument('dst',
metavar='FILE',
type=argparse.FileType('w'),
help='Write output to this file.')
args = parser.parse_args()
testvecs = parse_test_vectors(json.load(args.src))
args.src.close()
hjson.dump(testvecs, args.dst)
args.dst.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| 28.622642 | 85 | 0.586684 | 385 | 3,034 | 4.503896 | 0.366234 | 0.036332 | 0.031719 | 0.018454 | 0.122261 | 0.086505 | 0.056517 | 0 | 0 | 0 | 0 | 0.010412 | 0.271918 | 3,034 | 105 | 86 | 28.895238 | 0.774559 | 0.152933 | 0 | 0.2 | 0 | 0 | 0.202814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9e608dad9679e93ef744bc1105dbf95d043cce | 2,479 | py | Python | Tools/boot_now.py | wms124/PX4_1.4.1_Back-up | 9d6d903a8f46346281ae11104c47f1904da05e37 | [
"BSD-3-Clause"
] | 4,224 | 2015-01-02T11:51:02.000Z | 2020-10-27T23:42:28.000Z | Tools/boot_now.py | wms124/PX4_1.4.1_Back-up | 9d6d903a8f46346281ae11104c47f1904da05e37 | [
"BSD-3-Clause"
] | 11,736 | 2015-01-01T11:59:16.000Z | 2020-10-28T17:13:38.000Z | Tools/boot_now.py | wms124/PX4_1.4.1_Back-up | 9d6d903a8f46346281ae11104c47f1904da05e37 | [
"BSD-3-Clause"
] | 11,850 | 2015-01-02T14:54:47.000Z | 2020-10-28T16:42:47.000Z | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2012-2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
# send BOOT command to a device
import argparse
import serial, sys
from sys import platform as _platform
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Send boot command to a device")
parser.add_argument('--baud', action="store", type=int, default=115200, help="Baud rate of the serial port")
parser.add_argument('port', action="store", help="Serial port(s) to which the FMU may be attached")
args = parser.parse_args()
REBOOT = b'\x30'
EOC = b'\x20'
print("Sending reboot to %s" % args.port)
try:
port = serial.Serial(args.port, args.baud, timeout=0.5)
except Exception:
print("Unable to open %s" % args.port)
sys.exit(1)
port.write(REBOOT + EOC)
port.close()
sys.exit(0)
| 41.316667 | 108 | 0.702299 | 341 | 2,479 | 5.093842 | 0.533724 | 0.020725 | 0.019574 | 0.026482 | 0.133564 | 0.10593 | 0.078296 | 0.078296 | 0.078296 | 0.078296 | 0 | 0.013069 | 0.166599 | 2,479 | 59 | 109 | 42.016949 | 0.827686 | 0.632513 | 0 | 0 | 0 | 0 | 0.232782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aa4c977dc126a0fbb76c33235561df665c5a977 | 10,248 | py | Python | pyalp/stimulus/film.py | BaptisteLefebvre/pyalp | 05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a | [
"MIT"
] | 1 | 2020-11-09T09:23:11.000Z | 2020-11-09T09:23:11.000Z | pyalp/stimulus/film.py | BaptisteLefebvre/pyalp | 05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a | [
"MIT"
] | null | null | null | pyalp/stimulus/film.py | BaptisteLefebvre/pyalp | 05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a | [
"MIT"
] | 1 | 2020-11-09T09:23:19.000Z | 2020-11-09T09:23:19.000Z | import gc
import os
import pyalp.io
import pyalp.sequence
import pyalp.utils
from .base import Stimulus
class Film(Stimulus):
"""Film stimulus
Parameters
----------
bin_pathname: none | string, optional
Path name to the .bin file.
vec_pathname: none | string, optional
Path name to the .vec file.
rate: float, optional
Frame rate [Hz]. The default value is 30.0.
sequence_size: integer, optional
Number of frames each sequence. The default value is 200.
interactive: boolean, optional
Specify if it should prompt the input parameters. The default value is False.
verbose: boolean, optional
Verbose mode. The default value is False.
"""
dirname = os.path.join("E:", "BINVECS")
# dirname = os.path.expanduser(os.path.join("~", ".pyalp", "films")) # TODO remove.
def __init__(self, bin_pathname=None, vec_pathname=None, rate=30.0, sequence_size=200,
interactive=False, verbose=False):
Stimulus.__init__(self)
self.bin_pathname = bin_pathname
self.vec_pathname = vec_pathname
self.rate = rate
self.sequence_size = sequence_size
if interactive:
self.prompt_input_arguments()
# Read .vec file.
self.frame_ids = pyalp.io.load_vec(self.vec_pathname)
self.nb_frames = len(self.frame_ids)
self.nb_sequences = int(self.nb_frames / self.sequence_size)
self.nb_cycles = int(self.nb_sequences / 2)
# Read header of .bin file.
self.bin_header = pyalp.io.load_bin_header(self.bin_pathname)
if verbose:
self.print_settings()
def prompt_input_arguments(self, sep=""):
"""Prompt the input arguments.
Parameter
---------
sep: string, optional
Prompt separator. The default value is \"\"
"""
print(sep)
# Print all the user directories.
user_dirnames = os.listdir(self.dirname)
for user_dirname_id, user_dirname in enumerate(user_dirnames):
print(" {}. {}".format(user_dirname_id, user_dirname))
# Prompt user identifier.
prompt = "Enter the user number (e.g. 0): "
user_id = pyalp.utils.input(prompt, int)
user_dirname = user_dirnames[user_id]
user_pathname = os.path.join(self.dirname, user_dirname)
print(sep)
# Print all the .bin files.
bin_pathname = os.path.join(user_pathname, "Bin")
bin_filenames = [name for name in os.listdir(bin_pathname) if os.path.isfile(os.path.join(bin_pathname, name))]
for bin_filename_id, bin_filename in enumerate(bin_filenames):
print(" {}. {}".format(bin_filename_id, bin_filename))
# Prompt .bin filename identifier.
prompt = "Enter the .bin file number (e.g. 0): "
bin_id = pyalp.utils.input(prompt, int)
bin_filename = bin_filenames[bin_id]
self.bin_pathname = os.path.join(bin_pathname, bin_filename)
print(sep)
# Print all the .vec files.
vec_pathname = os.path.join(user_pathname, "Vec")
vec_filenames = [name for name in os.listdir(vec_pathname) if os.path.isfile(os.path.join(vec_pathname, name))]
for vec_filename_id, vec_filename in enumerate(vec_filenames):
print(" {}. {}".format(vec_filename_id, vec_filename))
# Prompt .vec filename identifier.
prompt = "Enter the .vec file number (e.g. 0): "
vec_id = pyalp.utils.input(prompt, int)
vec_filename = vec_filenames[vec_id]
self.vec_pathname = os.path.join(vec_pathname, vec_filename)
print(sep)
# Prompt the frame rate.
prompt = "Enter the frame rate [Hz] (e.g. {}): ".format(self.rate)
self.rate = pyalp.utils.input(prompt, float)
print(sep)
# Prompt the advanced features.
prompt = "Advanced features (y/n): "
advanced = pyalp.utils.input(prompt, lambda arg: arg == "y")
if advanced:
# Prompt the number of frames in each sequence.
prompt = "Number of frames in each sequence (e.g. {}): ".format(self.sequence_size)
self.sequence_size = pyalp.utils.input(prompt, int)
print(sep)
return
def print_settings(self):
"""Print settings."""
print("----------------- Film stimulus ------------------")
print(".bin pathname: {}".format(self.bin_pathname))
print(".vec pathname: {}".format(self.vec_pathname))
print("frame rate: {} Hz".format(self.rate))
print("sequence size: {}".format(self.sequence_size))
print("number of frames: {}".format(self.nb_frames))
print("number of sequences: {}".format(self.nb_sequences))
print("number of cycles: {}".format(self.nb_cycles))
print(".bin header: {}".format(self.bin_header))
print("--------------------------------------------------")
print("")
return
def display(self, device):
"""Display stimulus.
Parameter
---------
device: Device
ALP device.
"""
sequence_1 = None
sequence_2 = None
if self.nb_frames > 0 * self.sequence_size: # i.e. enough frames
# 1. Allocate 1st sequence of frames.
# Define 1st sequence of frames.
sequence_id_1 = 0
nb_frames = min(self.sequence_size, self.nb_frames - 0 * self.sequence_size)
sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
# Allocate memory for 1st sequence of frames.
device.allocate(sequence_1)
# Control the timing properties of 1st sequence display.
sequence_1.control_timing()
if self.nb_frames > 1 * self.sequence_size: # i.e. enough frames
# 2. Allocate 2nd sequence of frames.
# Define 2nd sequence of frames.
sequence_id_2 = 1
nb_frames = min(self.sequence_size, self.nb_frames - 1 * self.sequence_size)
sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
# Allocate memory for 2nd sequence of frames.
device.allocate(sequence_2)
# Control the timing properties of 2nd sequence display.
sequence_2.control_timing()
# 3. Play on DMD.
# Set up queue mode.
device.control_projection(queue_mode=True)
# Transmit and start 1st sequence of frames into memory.
if self.nb_frames > 0 * self.sequence_size: # i.e. enough frames
sequence_1.load()
sequence_1.start()
# Transmit and start 2nd sequence of frames into memory.
if self.nb_frames > 1 * self.sequence_size: # i.e. enough frames
sequence_2.load()
sequence_2.start()
# Force garbage collection.
gc.collect()
# 4. Repeat.
for cycle_id in range(1, self.nb_cycles):
# a. Wait completion of 1st sequence.
device.synchronize()
# b. Free 1st sequence.
sequence_1.free()
# c. Reallocate 1st sequence.
sequence_id_1 = 2 * cycle_id + 0
nb_frames = self.sequence_size
sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_1)
sequence_1.control_timing()
sequence_1.load()
sequence_1.start()
gc.collect()
# d. Wait completion of 2nd sequence.
device.synchronize()
# e. Free 2nd sequence.
sequence_2.free()
# f. Reallocate 2nd sequence.
sequence_id_2 = 2 * cycle_id + 1
nb_frames = self.sequence_size
sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_2)
sequence_2.control_timing()
sequence_2.load()
sequence_2.start()
gc.collect()
if self.nb_cycles > 0 and self.nb_frames > (self.nb_cycles * 2 + 0) * self.sequence_size:
# i.e. remaining frames
# a. Wait completion of 1st sequence.
device.synchronize()
# b. Free 1st sequence.
sequence_1.free()
# c. Reallocate 1st sequence.
sequence_id_1 = 2 * self.nb_cycles + 0
nb_frames = min(self.sequence_size, self.nb_frames - sequence_id_1 * self.sequence_size)
sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_1)
sequence_1.control_timing()
sequence_1.load()
sequence_1.start()
gc.collect()
if self.nb_cycles > 0 and self.nb_frames > (self.nb_cycles * 2 + 1) * self.sequence_size:
# i.e. remaining frames
# a. Wait completion of 2nd sequence.
device.synchronize()
# b. Free 2nd sequence.
sequence_id_2 = 2 * self.nb_cycles + 1
nb_frames = min(self.sequence_size, self.nb_frames - sequence_id_2 * self.sequence_size)
sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_2)
sequence_2.control_timing()
sequence_2.load()
sequence_2.start()
gc.collect()
# 5. Clean up.
try:
device.wait()
sequence_1.free()
sequence_2.free()
except AttributeError:
pass
return
| 37.265455 | 119 | 0.58509 | 1,241 | 10,248 | 4.638195 | 0.124899 | 0.064628 | 0.075052 | 0.041696 | 0.536484 | 0.437978 | 0.368485 | 0.344858 | 0.317408 | 0.308374 | 0 | 0.016391 | 0.309426 | 10,248 | 274 | 120 | 37.40146 | 0.796948 | 0.20404 | 0 | 0.424837 | 0 | 0 | 0.062815 | 0.006294 | 0 | 0 | 0 | 0.00365 | 0 | 1 | 0.026144 | false | 0.006536 | 0.039216 | 0 | 0.098039 | 0.143791 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aa5b513bf2a9038fbb780c73e2c734ede5749d9 | 1,328 | py | Python | python/example_code/s3/s3-python-example-download-file.py | AkhmadRiswanda/aws-doc-sdk-examples | 46dbd6e1002f4d5c056df3eb478c318501782a17 | [
"Apache-2.0"
] | null | null | null | python/example_code/s3/s3-python-example-download-file.py | AkhmadRiswanda/aws-doc-sdk-examples | 46dbd6e1002f4d5c056df3eb478c318501782a17 | [
"Apache-2.0"
] | null | null | null | python/example_code/s3/s3-python-example-download-file.py | AkhmadRiswanda/aws-doc-sdk-examples | 46dbd6e1002f4d5c056df3eb478c318501782a17 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import botocore
BUCKET_NAME = 'my-bucket' # replace with your bucket name
KEY = 'my_image_in_s3.jpg' # replace with your object key
s3 = boto3.resource('s3')
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, 'my_local_image.jpg')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
#snippet-sourcedescription:[s3-python-example-download-file.py demonstrates how to ...]
#snippet-keyword:[Python]
#snippet-keyword:[AWS SDK for Python (Boto3)]
#snippet-keyword:[Code Sample]
#snippet-keyword:[Amazon S3]
#snippet-service:[s3]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2018-06-25]
#snippet-sourceauthor:[jschwarzwalder]
| 32.390244 | 88 | 0.718373 | 189 | 1,328 | 5.005291 | 0.57672 | 0.052854 | 0.021142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029973 | 0.170934 | 1,328 | 40 | 89 | 33.2 | 0.829246 | 0.673946 | 0 | 0 | 0 | 0 | 0.230352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aab652c059c797506557e8a980477db680cb80f | 6,829 | py | Python | DPythonS89/test.py | Synchronicity89/Lean | 564af47ea980cf0524874643c7190da82236bcfb | [
"Apache-2.0"
] | null | null | null | DPythonS89/test.py | Synchronicity89/Lean | 564af47ea980cf0524874643c7190da82236bcfb | [
"Apache-2.0"
] | 1 | 2020-08-25T03:02:47.000Z | 2020-08-25T03:02:47.000Z | DPythonS89/test.py | Synchronicity89/Lean | 564af47ea980cf0524874643c7190da82236bcfb | [
"Apache-2.0"
] | null | null | null | from clr import AddReference
import pandas
AddReference("System")
AddReference("QuantConnect.Research")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Logging")
#AddReference("QuantConnect.Data")
from System import *
from QuantConnect import *
from QuantConnect.Logging import *
#from Data import *
#from QuantConnect.Data import *
from QuantConnect.Research import *
from datetime import datetime, timedelta
from custom_data import QuandlFuture, Nifty
import pandas as pd
#from System import *
#from QuantConnect import *
#from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData, PythonQuandl
from datetime import datetime
import decimal
class QuandlFuture(PythonQuandl):
'''Custom quandl data type for setting customized value column name. Value column is used for the primary trading calculations and charting.'''
def __init__(self):
# Define ValueColumnName: cannot be None, Empty or non-existant column name
# If ValueColumnName is "Close", do not use PythonQuandl, use Quandl:
# self.AddData[QuandlFuture](self.crude, Resolution.Daily)
self.ValueColumnName = "Settle"
class Nifty(PythonData):
'''NIFTY Custom Data Class'''
def GetSource(self, config, date, isLiveMode):
return SubscriptionDataSource("https://www.dropbox.com/s/rsmg44jr6wexn2h/CNXNIFTY.csv?dl=1", SubscriptionTransportMedium.RemoteFile);
def Reader(self, config, line, date, isLiveMode):
if not (line.strip() and line[0].isdigit()): return None
# New Nifty object
index = Nifty();
index.Symbol = config.Symbol
try:
# Example File Format:
# Date, Open High Low Close Volume Turnover
# 2011-09-13 7792.9 7799.9 7722.65 7748.7 116534670 6107.78
data = line.split(',')
index.Time = datetime.strptime(data[0], "%Y-%m-%d")
index.Value = decimal.Decimal(data[4])
index["Open"] = float(data[1])
index["High"] = float(data[2])
index["Low"] = float(data[3])
index["Close"] = float(data[4])
except ValueError:
# Do nothing
return None
return index
class SecurityHistoryTest():
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
self.symbol = self.qb.AddSecurity(security_type, symbol).Symbol
self.column = 'close'
def __str__(self):
return "{} on {}".format(self.symbol.ID, self.qb.StartDate)
def test_period_overload(self, period):
history = self.qb.History([self.symbol], period)
return history[self.column].unstack(level=0)
def test_daterange_overload(self, end):
start = end - timedelta(1)
history = self.qb.History([self.symbol], start, end)
return history[self.column].unstack(level=0)
class OptionHistoryTest(SecurityHistoryTest):
def test_daterange_overload(self, end, start = None):
if start is None:
start = end - timedelta(1)
history = self.qb.GetOptionHistory(self.symbol, start, end)
return history.GetAllData()
class FutureHistoryTest(SecurityHistoryTest):
def test_daterange_overload(self, end, start = None, maxFilter = 182):
if start is None:
start = end - timedelta(1)
self.qb.Securities[self.symbol].SetFilter(0, maxFilter) # default is 35 days
history = self.qb.GetFutureHistory(self.symbol, start, end)
return history.GetAllData()
class FutureContractHistoryTest():
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
self.symbol = symbol
self.column = 'close'
def test_daterange_overload(self, end):
start = end - timedelta(1)
history = self.qb.GetFutureHistory(self.symbol, start, end)
return history.GetAllData()
class OptionContractHistoryTest(FutureContractHistoryTest):
def test_daterange_overload(self, end):
start = end - timedelta(1)
history = self.qb.GetOptionHistory(self.symbol, start, end)
return history.GetAllData()
class CustomDataHistoryTest(SecurityHistoryTest):
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
if security_type == 'Nifty':
type = Nifty
self.column = 'close'
elif security_type == 'QuandlFuture':
type = QuandlFuture
self.column = 'settle'
else:
raise
self.symbol = self.qb.AddData(type, symbol, Resolution.Daily).Symbol
class MultipleSecuritiesHistoryTest(SecurityHistoryTest):
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
self.qb.AddEquity('SPY', Resolution.Daily)
self.qb.AddForex('EURUSD', Resolution.Daily)
self.qb.AddCrypto('BTCUSD', Resolution.Daily)
def test_period_overload(self, period):
history = self.qb.History(self.qb.Securities.Keys, period)
return history['close'].unstack(level=0)
class FundamentalHistoryTest():
def __init__(self):
self.qb = QuantBook()
def getFundamentals(self, ticker, selector, start, end):
return self.qb.GetFundamental(ticker, selector, start, end)
startDate = datetime(2014, 5, 9)
a = CompositeLogHandler()
securityTestHistory = MultipleSecuritiesHistoryTest(startDate, None, None)
#// Get the last 5 candles
periodHistory = securityTestHistory.test_period_overload(5)
#// Note there is no data for BTCUSD at 2014
#//symbol EURUSD SPY
#//time
#//2014-05-03 00:00:00 NaN 173.580655
#//2014-05-04 20:00:00 1.387185 NaN
#//2014-05-05 20:00:00 1.387480 NaN
#//2014-05-06 00:00:00 NaN 173.903690
#//2014-05-06 20:00:00 1.392925 NaN
#//2014-05-07 00:00:00 NaN 172.426958
#//2014-05-07 20:00:00 1.391070 NaN
#//2014-05-08 00:00:00 NaN 173.423752
#//2014-05-08 20:00:00 1.384265 NaN
#//2014-05-09 00:00:00 NaN 173.229931
Console.WriteLine(periodHistory)
count = periodHistory.shape[0]
Assert.AreEqual(10, count)
#// Get the one day of data
timedeltaHistory = securityTestHistory.test_period_overload(TimeSpan.FromDays(8));
firstIndex = timedeltaHistory.index.values[0]
#// EURUSD exchange time zone is NY but data is UTC so we have a 4 hour difference with algo TZ which is NY
Assert.AreEqual(datetime(startDate.years, startDate.days - 8, startDate.hours + 20), firstIndex); | 36.518717 | 147 | 0.661737 | 812 | 6,829 | 5.488916 | 0.294335 | 0.033655 | 0.023334 | 0.026924 | 0.337222 | 0.317029 | 0.298631 | 0.282477 | 0.248373 | 0.221898 | 0 | 0.059295 | 0.231952 | 6,829 | 187 | 148 | 36.518717 | 0.790467 | 0.220969 | 0 | 0.338983 | 0 | 0 | 0.042045 | 0.003977 | 0 | 0 | 0 | 0 | 0.016949 | 1 | 0.144068 | false | 0 | 0.101695 | 0.025424 | 0.432203 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aae6a12dc22ce122aa713fc2aeac3ad090fe5d0 | 2,827 | py | Python | map_swcf.py | torimcd/Goldblatt_etal_2020 | 0793b16ef2535db3482c31d84587d80b3578dd3b | [
"BSD-3-Clause"
] | 1 | 2021-12-03T15:11:31.000Z | 2021-12-03T15:11:31.000Z | map_swcf.py | torimcd/Goldblatt_etal_2021 | 0793b16ef2535db3482c31d84587d80b3578dd3b | [
"BSD-3-Clause"
] | null | null | null | map_swcf.py | torimcd/Goldblatt_etal_2021 | 0793b16ef2535db3482c31d84587d80b3578dd3b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Author: Victoria McDonald
email: vmcd@atmos.washington.edu
website: http://torimcd.github.com
license: BSD
"""
import matplotlib as mpl
mpl.use("Agg")
import os
import sys
import numpy as np
import netCDF4
import operator
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
download_path = '/home/vmcd/' # enter the path to the directory where you downloaded the archived data, eg '/home/user/Downloads'
filebase = download_path + 'FYSP_clouds_archive/CAM4/'
outfileloc = download_path + 'temp_data/' # this is the location to save the processed netcdf files to
current = '0.775'
cc = '0775'
# SWCF fraction variable
field = 'SWCF'
outfilebase = 'c4_swcf_'
casenames = {'07','0725','075','0775', '08','0825','085','0875','09','0925','095','0975','10', '1025', '105', '1075','11'}
# 1.0 case
outfile_10 = outfileloc + outfilebase + '10.nc'
if not os.path.isfile(outfile_10):
if os.path.isdir(outfileloc):
infile = filebase +'cam4_10.nc'
# calc cldlow global average per month
syscall = r"//usr//bin//cdo timmean -seltimestep,21/40 -select,name="+field+" "+infile+ " " +outfile_10
os.system(syscall)
for c in casenames:
# calc swcf
outfile_case = outfileloc+outfilebase+c+'.nc'
# check directly if the file exists
if not os.path.isfile(outfile_case):
if os.path.isdir(outfileloc):
infile = filebase +'cam4_' + c +'.nc'
# calc cldlow global average per month
syscall = r"//usr//bin//cdo timmean -seltimestep,21/40 -select,name="+field+" "+infile+ " " +outfile_case
os.system(syscall)
control = outfile_10
if os.path.isfile(control):
dsyear = netCDF4.Dataset(control)
control_swcf = dsyear.variables[field][:]
dsyear.close()
#plot the data
dsloc = outfileloc + outfilebase + cc +'.nc'
if os.path.isfile(dsloc):
# open the merged file and get out some variables
dsyear = netCDF4.Dataset(dsloc)
lons = dsyear.variables['lon'][:]
lats = dsyear.variables['lat'][:]
swcf = dsyear.variables[field][:]
swcf_units = dsyear.variables[field].units
dsyear.close() #close the file
swcf_diff = list(map(operator.sub, swcf, control_swcf))
#create plot
fig = plt.figure()
# setup the map
m = Basemap(lat_0=0,lon_0=0)
m.drawcoastlines()
m.drawcountries()
# Create 2D lat/lon arrays for Basemap
lon2d, lat2d = np.meshgrid(lons, lats)
# Plot
cs = m.pcolormesh(lon2d,lat2d,np.squeeze(swcf_diff), cmap='RdBu_r', latlon='True', vmin=-60, vmax=60, rasterized=True)
# This is the fix for the white lines between contour levels
cs.set_edgecolor("face")
# Add Colorbar
cbar = m.colorbar(cs, location='bottom', pad="10%")
cbar.set_label(swcf_units)
plt.title('Shortwave Cloud Forcing: ' + r'$\mathsf{S/S_0}$'+' = '+ current)
plt.show()
fig.savefig('swcf_map_diff_'+cc+'.pdf', bbox_inches='tight')
| 28.27 | 129 | 0.700389 | 416 | 2,827 | 4.677885 | 0.473558 | 0.018499 | 0.024666 | 0.011305 | 0.179342 | 0.169579 | 0.144913 | 0.144913 | 0.102775 | 0.102775 | 0 | 0.044981 | 0.15069 | 2,827 | 99 | 130 | 28.555556 | 0.765514 | 0.229926 | 0 | 0.107143 | 0 | 0 | 0.169846 | 0.011633 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ab017c82dd41b9fd3710bcfd371dbf19774599d | 6,706 | py | Python | src/mbic/mbic_full_model.py | davidanastasiu/antibiofilm | f50945d52bcfd97538a31d7627af6b3089fdd2cf | [
"MIT"
] | null | null | null | src/mbic/mbic_full_model.py | davidanastasiu/antibiofilm | f50945d52bcfd97538a31d7627af6b3089fdd2cf | [
"MIT"
] | null | null | null | src/mbic/mbic_full_model.py | davidanastasiu/antibiofilm | f50945d52bcfd97538a31d7627af6b3089fdd2cf | [
"MIT"
] | null | null | null | # AntiBiofilm Peptide Research
# Department of Computer Science and Engineering, Santa Clara University
# Author: Taylor Downey
# A python script that uses the optimized hyperparameters found for both
# the SVM and the SVR to create a prediction model
# Script prints the average RMSE of the full model when run with cross validation
#
# NOTE: Given the small number of training samples available, the average RMSE
# outputted will vary by about +- 5
# ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import json
import warnings
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.utils.validation import column_or_1d
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import RepeatedStratifiedKFold
warnings.filterwarnings("ignore")
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
def seperatePeptides(peptides, threshold):
columns = ['MBIC']
filterMBIC = (peptides[columns] <= threshold).all(axis=1)
lower_peptides = peptides[filterMBIC]
filterMBIC = (peptides[columns] > threshold).all(axis=1)
upper_peptides = peptides[filterMBIC]
return lower_peptides, upper_peptides
# ------------------------------------------------------------------------------
# Variables
# ------------------------------------------------------------------------------
training_filename = '../../data/mbic_training_data.csv'
svm_features_filename = 'mbic_svm_forward_selection_features.json'
svr_features_filename = 'mbic_svr_forward_selection_features.json'
svr_svm_results = 'full_model_results.txt'
# Optimized Hyperparameters
svm_c = 10
svm_g = 1000
svm_pca_comp = 6
svm_num_feat = 9
svr_c = 45
svr_g = 40
svr_pca_comp = 8
svr_num_feat = 9
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
def main():
# Prepare peptides for SVM
with open(svm_features_filename) as f:
svm_feat_dict = json.load(f)
svm_feat_dict = svm_feat_dict[0:svm_num_feat]
peptides_svm = pd.read_csv(training_filename)
peptides_svm.loc[(peptides_svm['MBIC'] > 64), 'MBIC'] = 0
peptides_svm.loc[(peptides_svm['MBIC'] != 0), 'MBIC'] = 1
# Filter out columns based on feat list
labels = peptides_svm.columns.values.tolist()
for l in labels:
if l == 'MBIC':
continue
if l not in svm_feat_dict:
peptides_svm = peptides_svm.drop(columns=[l])
y_svm = peptides_svm['MBIC'].to_numpy()
peptides_svm = peptides_svm.drop(columns=['MBIC'])
min_max_scaler = preprocessing.MinMaxScaler()
X_norm_svm = min_max_scaler.fit_transform(peptides_svm)
pca_svm = PCA(n_components=svm_pca_comp)
X_trans_svm = pca_svm.fit_transform(X_norm_svm)
SVC_rbf = SVC(kernel='rbf', C=svm_c, gamma=svm_g)
# Prepare peptides for SVR
with open(svr_features_filename) as f:
svr_feat_dict = json.load(f)
svr_feat_dict = svr_feat_dict[0:svr_num_feat]
peptides_svr = pd.read_csv(training_filename)
peptides_svr, _ = seperatePeptides(peptides_svr, 64)
# Filter out columns based on feat list
labels = peptides_svr.columns.values.tolist()
for l in labels:
if l == 'MBIC':
continue
if l not in svr_feat_dict:
peptides_svr = peptides_svr.drop(columns=[l])
y_svr = peptides_svr['MBIC'].to_numpy()
peptides_svr = peptides_svr.drop(columns=['MBIC'])
min_max_scaler_svr = preprocessing.MinMaxScaler()
X_norm_svr = min_max_scaler_svr.fit_transform(peptides_svr)
pca_svr = PCA(n_components=svr_pca_comp)
X_trans_svr = pca_svr.fit_transform(X_norm_svr)
SVR_rbf = SVR(kernel='rbf', C=svr_c, gamma=svr_g)
# Prepare test set of petides used by svr after training
peptides_test_svr = pd.read_csv(training_filename)
# Filter out columns based on feat list
labels = peptides_test_svr.columns.values.tolist()
for l in labels:
if l == 'MBIC':
continue
if l not in svr_feat_dict:
peptides_test_svr = peptides_test_svr.drop(columns=[l])
y_svr2 = peptides_test_svr['MBIC'].to_numpy()
peptides_test_svr = peptides_test_svr.drop(columns=['MBIC'])
# Apply svr transformations on test set of peptides for svr
X_norm_test_svr = min_max_scaler_svr.transform(peptides_test_svr)
X_trans_test_svr = pca_svr.transform(X_norm_test_svr)
# Cross validation applied to full model
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats = 20)
RMSE = []
cnt = 1
for train_index, test_index in rskf.split(X_trans_svm, y_svm):
X_train, X_test = X_trans_svm[train_index], X_trans_svm[test_index]
y_train, y_test = y_svm[train_index], y_svm[test_index]
y_train = y_train.reshape(-1,1)
y_train = column_or_1d(y_train, warn=False)
svm_fit = SVC_rbf.fit(X_train, y_train)
y_pred = svm_fit.predict(X_test)
train_index_svr = []
test_index_svr = []
y_train_svr = []
y_test_svr = []
for i in range(0, len(y_train)):
if(y_train[i] == 0):
continue
else:
train_index_svr.append(train_index[i])
X_train_svr = X_trans_svr[train_index_svr]
y_train_svr = y_svr[train_index_svr]
svr_fit = SVR_rbf.fit(X_train_svr, y_train_svr)
y_train_svr = []
for i in range(0, len(y_pred)):
if(y_pred[i] == 0):
continue
else:
test_index_svr.append(test_index[i])
X_test_svr = X_trans_test_svr[test_index_svr]
y_test_svr = y_svr2[test_index_svr]
y_pred_svr = SVR_rbf.predict(X_test_svr)
rmse = np.sqrt(mean_squared_error(y_test_svr, y_pred_svr))
cnt = cnt + 1
with open (svr_svm_results, 'a', encoding="utf-8") as sfile:
sfile.write(str(rmse) + '\n')
RMSE.append(rmse)
rmse_avg = np.average(RMSE)
print('RMSE average: ' + str(rmse_avg))
if __name__ == "__main__":
main()
| 35.294737 | 81 | 0.596779 | 861 | 6,706 | 4.325203 | 0.220674 | 0.031955 | 0.032223 | 0.012889 | 0.311762 | 0.256176 | 0.1442 | 0.121643 | 0.091568 | 0.055317 | 0 | 0.008141 | 0.230689 | 6,706 | 189 | 82 | 35.481481 | 0.713704 | 0.233224 | 0 | 0.141667 | 0 | 0 | 0.045588 | 0.026414 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.091667 | 0 | 0.116667 | 0.008333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5abddcac686664ae3a44fc39af000fbbf1daafbd | 874 | py | Python | ax/benchmark2/__init__.py | lyhyl/Ax | 44384a0cb1a622c9e395c95f683cfee25c7b61f6 | [
"MIT"
] | null | null | null | ax/benchmark2/__init__.py | lyhyl/Ax | 44384a0cb1a622c9e395c95f683cfee25c7b61f6 | [
"MIT"
] | null | null | null | ax/benchmark2/__init__.py | lyhyl/Ax | 44384a0cb1a622c9e395c95f683cfee25c7b61f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ax.benchmark2.benchmark import (
benchmark_full_run,
benchmark_replication,
benchmark_test,
)
from ax.benchmark2.benchmark_method import BenchmarkMethod
from ax.benchmark2.benchmark_problem import (
BenchmarkProblem,
SingleObjectiveBenchmarkProblem,
MultiObjectiveBenchmarkProblem,
)
from ax.benchmark2.benchmark_result import BenchmarkResult, AggregatedBenchmarkResult
__all__ = [
"BenchmarkMethod",
"BenchmarkProblem",
"SingleObjectiveBenchmarkProblem",
"MultiObjectiveBenchmarkProblem",
"BenchmarkResult",
"AggregatedBenchmarkResult",
"benchmark_replication",
"benchmark_test",
"benchmark_full_run",
]
| 28.193548 | 85 | 0.772311 | 83 | 874 | 7.951807 | 0.554217 | 0.036364 | 0.09697 | 0.151515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006766 | 0.154462 | 874 | 30 | 86 | 29.133333 | 0.886333 | 0.21968 | 0 | 0 | 0 | 0 | 0.273264 | 0.15805 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5abe5b6784894f2e606b06d1d7978dc1a255825c | 724 | py | Python | setup.py | nvaytet/metatoenv | 6d0b5f1093f4042d63f8acad435f0953633f6821 | [
"BSD-3-Clause"
] | null | null | null | setup.py | nvaytet/metatoenv | 6d0b5f1093f4042d63f8acad435f0953633f6821 | [
"BSD-3-Clause"
] | null | null | null | setup.py | nvaytet/metatoenv | 6d0b5f1093f4042d63f8acad435f0953633f6821 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, Command
from distutils.command.build_py import build_py
with open('README.md') as infile:
long_description = infile.read()
from psrecord import __version__
setup(
name='metatoenv',
version=__version__,
description=
'Generate a conda environment file from a conda meta.yaml recipe',
long_description=long_description,
url='https://github.com/nvaytet/metatoenv',
license='BSD-3-Clause',
author='Neil Vaytet',
packages=['metatoenv'],
provides=['metatoenv'],
scripts=['scripts/metatoenv'],
cmdclass={'build_py': build_py},
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
],
)
| 25.857143 | 70 | 0.685083 | 82 | 724 | 5.865854 | 0.621951 | 0.058212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001706 | 0.190608 | 724 | 27 | 71 | 26.814815 | 0.819113 | 0 | 0 | 0 | 0 | 0 | 0.346685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5abf1554e4e83fbb167495e7bf4f154fc338e021 | 28,931 | py | Python | git_timestamp/timestamp.py | zeitgitter/git-timestamp | 44d68c13036ba706d1b2d1d25773427b474fa39e | [
"MIT"
] | 16 | 2020-02-16T03:21:22.000Z | 2021-12-17T19:22:56.000Z | git_timestamp/timestamp.py | zeitgitter/git-timestamp | 44d68c13036ba706d1b2d1d25773427b474fa39e | [
"MIT"
] | 1 | 2021-11-02T10:07:18.000Z | 2021-11-02T10:07:18.000Z | git_timestamp/timestamp.py | zeitgitter/git-timestamp | 44d68c13036ba706d1b2d1d25773427b474fa39e | [
"MIT"
] | 2 | 2020-02-16T03:21:26.000Z | 2021-04-05T17:19:05.000Z | #!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
# (keep hashbang line for `make install`)
#
# git timestamp — Zeitgitter GIT Timestamping client
#
# Copyright (C) 2019-2021 Marcel Waldvogel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# This has not been modularized for ease of installation
import configargparse
import distutils.util
import os
import re
import sys
import tempfile
import time
import traceback
# Provided e.g. by `pip install python-gnupg` (try with `pip3` if `pip` does not work)
import gnupg
import pygit2 as git
import requests
import deltat
VERSION = '1.1.0'
class GitArgumentParser(configargparse.ArgumentParser):
"""Insert git config options between command line and default.
WARNING: There is no way to handle custom actions correctly by default, so
your custom actions need to include a `convert_default(value)` method."""
def __init__(self, *args, **kwargs):
super(GitArgumentParser, self).__init__(*args, **kwargs)
def repo_config(self, key):
"""`repo_config(key)` is similar to `repo.config[key]`, but `key` can
be a comma-separated list of keys. It returns the value of the first
which exists or raises `KeyError` if none is set.
"""
for k in key.split(','):
if k in repo.config:
return repo.config[k]
raise KeyError("Key%s `%s` not in git config" % ('s' if ',' in key else "", key))
def add_argument(self, *args, **kwargs):
global repo
if repo is None and 'gitopt' in kwargs:
# Called outside a repo (maybe for --help or --version):
# Ignore repo options
del kwargs['gitopt']
elif 'gitopt' in kwargs:
if 'help' in kwargs:
kwargs['help'] += '. '
else:
kwargs['help'] = ''
gitopt = kwargs['gitopt']
try:
if 'action' in kwargs and issubclass(kwargs['action'],
configargparse.Action):
try:
val = kwargs['action'].convert_default(
self.repo_config(gitopt))
except AttributeError:
raise NotImplementedError("Custom action `%r' passed "
"to GitArgumentParser does not support "
"`convert_default()' method." % kwargs['action'])
else:
val = self.repo_config(gitopt)
kwargs['help'] += "Defaults to '%s' from `git config %s`" % (
val, gitopt.replace(',', ' or '))
if 'default' in kwargs:
kwargs['help'] += "; fallback default: '%s'" % kwargs['default']
kwargs['default'] = val
if 'required' in kwargs:
del kwargs['required']
except KeyError:
kwargs['help'] += "Can be set by `git config %s`" % gitopt
if 'default' in kwargs:
kwargs['help'] += "; fallback default: '%s'" % kwargs['default']
del kwargs['gitopt']
return super(GitArgumentParser, self).add_argument(*args, **kwargs)
add = add_argument
def asciibytes(data):
"""For Python 2/3 compatibility:
If it is 'bytes' already, do nothing, otherwise convert to ASCII Bytes"""
if isinstance(data, bytes):
return data
else:
return data.encode('ASCII')
def timestamp_branch_name(fields):
"""Return the first field except 'www', 'igitt', '*stamp*', 'zeitgitter'
'localhost:8080' is returned as 'localhost-8080'"""
for f in fields:
i = f.replace(':', '-')
if (i != '' and i != 'www' and i != 'igitt' and i != 'zeitgitter'
and 'stamp' not in i and valid_name(i)):
return i + '-timestamps'
return 'zeitgitter-timestamps'
class DefaultTrueIfPresent(configargparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
values = True
else:
try:
values = self.convert_default(values)
except ValueError:
raise configargparse.ArgumentError(
self, "Requires boolean value")
setattr(namespace, self.dest, values)
@classmethod
def convert_default(cls, value):
return bool(distutils.util.strtobool(value))
server_aliases = {
"gitta": "gitta.zeitgitter.net",
"diversity": "diversity.zeitgitter.net"
}
def expanded_aliases():
return ', '.join(map(lambda t: "%s → %s" % t, server_aliases.items()))
def get_args():
"""Parse command line and git config parameters"""
parser = GitArgumentParser(
auto_env_var_prefix='timestamp_',
add_help=False,
description="""Interface to Zeitgitter, the network of
independent GIT timestampers.""",
epilog="""`--tag` takes precedence over `--branch`.
When in doubt, use `--tag` for single/rare timestamping,
and `--branch` for frequent timestamping.
`bool` values can be specified as true/false/yes/no/0/1.
Arguments with optional `bool` options default to true if
the argument is present, false if absent.
Environment variable `ZEITGITTER_FAKE_TIME` can be used for
repeatable tests against a local Zeitgitter server under test,
see there.""")
parser.add('--help', '-h',
action='help',
help="""Show this help message and exit. When called as
'git timestamp' (space, not dash), use `-h`, as `--help` is
captured by `git` itself.""")
parser.add('--version',
action='version',
version="git timestamp v%s" % VERSION,
help="Show program's version number and exit")
parser.add('--tag',
help="Create a new timestamped tag named TAG")
parser.add('--branch',
gitopt='timestamp.branch',
help="""Create a timestamped commit in branch BRANCH,
with identical contents as the specified commit.
Default name derived from servername, appending
`-timestamps`, and, possibly, by the effects of
`--append-branch-name`.""")
parser.add('--server',
default='https://gitta.zeitgitter.net',
gitopt='timestamp.server',
help="""Comma-separated list of Zeitgitter servers to obtain timestamps from. 'https://'
is optional. The following aliases are supported: """
+ expanded_aliases())
parser.add('--interval',
default='0s',
gitopt='timestamp.interval',
help="""Delay between timestamping against the different
timestampers. For consistent ordering of timestamps,
set this to at least <maximum clock skew>+1s.""")
parser.add('--append-branch-name',
default=True,
action=DefaultTrueIfPresent,
metavar='bool',
gitopt='timestamp.append-branch-name',
help="""Whether to append the branch name of the current branch
to the timestamp branch name, i.e., create per-branch
timestamp branches. (Default branch name will never be
appended.)""")
parser.add('--default-branch',
gitopt='timestamp.defaultBranch',
default="main,master",
help="""Comma-separated list of default branch names, i.e.
those, where the branch name will not automatically be
appended to. `git config init.defaultBranch`, if it exists,
is always appended to this list.""")
parser.add('--gnupg-home',
gitopt='timestamp.gnupg-home',
help="Where to store timestamper public keys")
parser.add('--enable',
nargs='?',
action=DefaultTrueIfPresent,
metavar='bool',
gitopt='timestamp.enable',
help="""Forcibly enable/disable timestamping operations; mainly
for use in `git config`""")
parser.add('--require-enable',
action='store_true',
help="""Disable operation unless `git config timestamp.enable`
has explicitely been set to true""")
parser.add('--quiet', '-q',
nargs='?',
action=DefaultTrueIfPresent,
metavar='bool',
gitopt='timestamp.quiet',
help="Suppress diagnostic messages, only print fatal errors")
parser.add('commit',
nargs='?',
default='HEAD',
metavar='COMMIT',
gitopt='timestamp.commit-branch',
help="""Which commit-ish to timestamp. Must be a branch name
for branch timestamps with `--append-branch-name`""")
arg = parser.parse_args()
arg.interval = deltat.parse_time(arg.interval)
arg.default_branch = arg.default_branch.split(',')
try:
arg.default_branch.append(repo.config['init.defaultBranch'])
except KeyError:
pass
if arg.enable == False:
sys.exit("Timestamping explicitely disabled")
if arg.require_enable and arg.enable != True:
sys.exit("Timestamping not explicitely enabled")
return arg
def ensure_gnupg_ready_for_scan_keys():
"""`scan_keys()` on older GnuPG installs returns an empty list when
`~/.gnupg/pubring.kbx` has not yet been created. `list_keys()` or most
other commands will create it. Trying to have no match (for speed).
Probing for the existance of `pubring.kbx` would be faster, but would
require guessing the path of GnuPG-Home."""
gpg.list_keys(keys='arbitrary.query@creates.keybox')
def validate_key_and_import(text, args):
"""Is this a single key? Then import it"""
ensure_gnupg_ready_for_scan_keys()
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
f.write(text)
f.close()
info = gpg.scan_keys(f.name)
os.unlink(f.name)
if len(info) != 1 or info[0]['type'] != 'pub' or len(info[0]['uids']) == 0:
sys.exit("Invalid key returned\n"
"Maybe not a Zeitgitter server or ~/.gnupg permission problem")
res = gpg.import_keys(text)
count = res.count # pylint: disable=maybe-no-member
if count == 1 and not args.quiet:
print("Imported new key %s: %s" %
(info[0]['keyid'], info[0]['uids'][0]))
return (info[0]['keyid'], info[0]['uids'][0])
def get_global_config_if_possible():
"""Try to return global git configuration, which normally lies in
`~/.gitconfig`.
However (https://github.com/libgit2/pygit2/issues/915),
`get_global_config()` fails, if the underlying file does not
exist yet. (The [paths may be
determined](https://github.com/libgit2/pygit2/issues/915#issuecomment-503300141)
by
`pygit2.option(pygit2.GIT_OPT_GET_SEARCH_PATH, pygit2.GIT_CONFIG_LEVEL_GLOBAL)`
and similar.)
Therefore, we do not simply `touch ~/.gitconfig` first, but
1. try `get_global_config()` (raises `IOError` in Python2, `OSError`
in Python3),
2. try `get_xdg_config()` (relying on the alternative global location
`$XDG_CONFIG_HOME/git/config`, typically aka `~/.config/git/config`
(this might fail due to the file not being there either (`OSError`,
`IOError`), or because the installed `libgit2`/`pygit2` is too old
(`AttributeError`; function added in 2014 only),
3. `touch ~/.gitconfig` and retry `get_global_config()`, and, as fallback
4. use the repo's `.git/config`, which should always be there."""
try:
return git.Config.get_global_config() # 1
except (IOError, OSError):
try:
return git.Config.get_xdg_config() # 2
except (IOError, OSError, AttributeError):
try:
sys.stderr.write("INFO: Creating global .gitconfig\n")
with open(os.path.join(
git.option( # pylint: disable=maybe-no-member
git.GIT_OPT_GET_SEARCH_PATH, # pylint: disable=maybe-no-member
git.GIT_CONFIG_LEVEL_GLOBAL), # pylint: disable=maybe-no-member
'.gitconfig'), 'a'):
pass
return git.Config.get_global_config() # 3
except (IOError, OSError):
sys.stderr.write("INFO: Cannot record key ID in global config,"
" falling back to repo config\n")
return repo.config # 4
# Not reached
def get_keyid(args):
"""Return keyid/fullname from git config, if known.
Otherwise, request it from server and remember TOFU-style"""
keyname = args.server
if keyname.startswith('http://'):
keyname = keyname[7:]
elif keyname.startswith('https://'):
keyname = keyname[8:]
while keyname.endswith('/'):
keyname = keyname[0:-1]
# Replace everything outside 0-9a-z with '-':
keyname = ''.join(map(lambda x:
x if (x >= '0' and x <= '9') or (x >= 'a' and x <= 'z') else '-', keyname))
try:
keyid = repo.config['timestamper.%s.keyid' % keyname]
keys = gpg.list_keys(keys=keyid)
if len(keys) == 0:
sys.stderr.write("WARNING: Key %s missing in keyring;"
" refetching timestamper key\n" % keyid)
raise KeyError("GPG Key not found") # Evil hack
return (keyid, repo.config['timestamper.%s.name' % keyname])
except KeyError:
# Obtain key in TOFU fashion and remember keyid
r = requests.get(args.server, params={'request': 'get-public-key-v1'},
timeout=30)
quit_if_http_error(args.server, r)
(keyid, name) = validate_key_and_import(r.text, args)
if not os.getenv('FORCE_GIT_REPO_CONFIG'):
gcfg = get_global_config_if_possible()
else:
gcfg = repo.config
gcfg['timestamper.%s.keyid' % keyname] = keyid
gcfg['timestamper.%s.name' % keyname] = name
return (keyid, name)
def sig_time():
"""Current time, unless in test mode"""
return int(os.getenv('ZEITGITTER_FAKE_TIME', time.time()))
def validate_timestamp(stamp):
"""Is this timestamp within ± of now?"""
now = sig_time()
# Allow a ±30 s window
return stamp > now - 30 and stamp < now + 30
def time_str(seconds):
"""Format Unix timestamp in ISO format"""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(seconds))
def validate_timestamp_zone_eol(header, text, offset):
"""Does this line end with a current timestamp and GMT?
Returns start of next line."""
stamp = text[offset:offset + 10]
try:
istamp = int(stamp)
sigtime = sig_time()
if not validate_timestamp(istamp):
sys.exit("Ignoring returned %s timestamp (%s) as possible falseticker\n"
"(off by %d seconds compared to this computer's time; check clock)"
% (header, time_str(istamp), istamp - sigtime))
except ValueError:
sys.exit("Returned %s timestamp '%s' is not a number" %
(header, stamp))
tz = text[offset + 10:offset + 17]
if tz != ' +0000\n':
sys.exit("Returned %s timezone is not GMT or not at end of line,\n"
"but '%s' instead of '%s'"
% (header, repr(tz), repr(' +0000\n')))
return offset + 17
def verify_signature_and_timestamp(keyid, signed, signature, args):
"""Is the signature valid
and the signature timestamp within range as well?"""
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
f.write(signature)
f.close()
verified = gpg.verify_data(f.name, signed)
if not verified.valid:
sys.exit("Not a valid OpenPGP signature")
os.remove(f.name)
if not validate_timestamp(int(verified.sig_timestamp)):
sigtime = sig_time()
sys.exit("Signature timestamp (%d, %s) too far off now (%d, %s)" %
(verified.sig_timestamp, time_str(verified.sig_timestamp),
sigtime, time_str(sigtime)))
if keyid != verified.key_id and keyid != verified.pubkey_fingerprint:
sys.exit("Received signature with key ID %s; but expected %s -- refusing" %
(verified.key_id, keyid))
def validate_tag(text, commit, keyid, name, args):
"""Check this tag head to toe"""
if len(text) > 8000:
sys.exit("Returned tag too long (%d > 8000)" % len(text))
if not re.match('^[ -~\n]*$', text, re.MULTILINE):
sys.exit("Returned tag does not only contain ASCII chars")
lead = '''object %s
type commit
tag %s
tagger %s ''' % (commit.id, args.tag, name)
if not text.startswith(lead):
sys.exit("Expected signed tag to start with:\n"
"> %s\n\nInstead, it started with:\n> %s\n"
% (lead.replace('\n', '\n> '), text.replace('\n', '\n> ')))
pos = validate_timestamp_zone_eol('tagger', text, len(lead))
if text[pos] != '\n':
sys.exit("Signed tag has unexpected data after 'tagger' header")
pgpstart = text.find('\n-----BEGIN PGP SIGNATURE-----\n\n', len(lead))
if pgpstart >= 0:
signed = asciibytes(text[:pgpstart + 1])
signature = text[pgpstart + 1:]
verify_signature_and_timestamp(keyid, signed, signature, args)
else:
sys.exit("No OpenPGP signature found")
def quit_if_http_error(server, r):
if r.status_code == 301:
sys.exit("Timestamping server URL changed from %s to %s\n"
"Please change this on the command line(s) or run\n"
" git config [--global] timestamp.server %s"
% (server, r.headers['Location'], r.headers['Location']))
if r.status_code != 200:
sys.exit("Timestamping request failed; server responded with %d %s"
% (r.status_code, r.reason))
def timestamp_tag(repo, keyid, name, args):
"""Obtain and add a signed tag"""
try:
commit = repo.revparse_single(args.commit)
except KeyError as e:
sys.exit("No such revision: '%s'" % (e,))
if not valid_name(args.tag):
sys.exit("Tag name '%s' is not valid for timestamping" % args.tag)
try:
r = repo.lookup_reference('refs/tags/' + args.tag)
sys.exit("Tag '%s' already in use" % args.tag)
except KeyError:
pass
try:
r = requests.post(args.server,
data={
'request': 'stamp-tag-v1',
'commit': commit.id,
'tagname': args.tag
}, allow_redirects=False)
quit_if_http_error(args.server, r)
validate_tag(r.text, commit, keyid, name, args)
tagid = repo.write(
git.GIT_OBJ_TAG, # pylint: disable=maybe-no-member
r.text)
repo.create_reference('refs/tags/%s' % args.tag, tagid)
except requests.exceptions.ConnectionError as e:
sys.exit("Cannot connect to server: %s" % e)
def validate_branch(text, keyid, name, data, args):
"""Check this branch commit head to toe"""
if len(text) > 8000:
sys.exit("Returned branch commit too long (%d > 8000)" % len(text))
if not re.match('^[ -~\n]*$', text, re.MULTILINE):
sys.exit("Returned branch commit does not only contain ASCII chars")
lead = 'tree %s\n' % data['tree']
if 'parent' in data:
lead += 'parent %s\n' % data['parent']
lead += '''parent %s
author %s ''' % (data['commit'], name)
if not text.startswith(lead):
sys.exit("Expected signed branch commit to start with:\n"
"> %s\n\nInstead, it started with:\n> %s\n"
% (lead.replace('\n', '\n> '), text.replace('\n', '\n> ')))
pos = validate_timestamp_zone_eol('tagger', text, len(lead))
follow = 'committer %s ' % name
if not text[pos:].startswith(follow):
sys.exit("Committer in signed branch commit does not match")
pos = validate_timestamp_zone_eol('committer', text, pos + len(follow))
if not text[pos:].startswith('gpgsig '):
sys.exit("Signed branch commit missing 'gpgsig' after 'committer'")
sig = re.match('^-----BEGIN PGP SIGNATURE-----\n \n'
'[ -~\n]+\n -----END PGP SIGNATURE-----\n\n',
text[pos + 7:], re.MULTILINE)
if not sig:
sys.exit("Incorrect OpenPGP signature in signed branch commit")
signature = sig.group()
# Everything except the signature
signed = asciibytes(text[:pos] + text[pos + 7 + sig.end() - 1:])
signature = signature.replace('\n ', '\n')
verify_signature_and_timestamp(keyid, signed, signature, args)
def valid_name(name):
"""Can be sanely, universally stored as file name.
pygit2.reference_is_valid_name() would be better, but is too new
[(2018-10-17)](https://github.com/libgit2/pygit2/commit/1a389cc0ba360f1fd53f1352da41c6a2fae92a66)
to rely on being available."""
return (re.match('^[_a-z][-._a-z0-9]{,99}$', name, re.IGNORECASE)
and '..' not in name and not '\n' in name)
def append_branch_name(repo, commit_name, branch_name, default_branches):
"""Appends current branch name if not the default branch"""
explanation = "for (implicit) options `--branch` and `--append-branch-name`"
if commit_name == 'HEAD':
try:
comref = repo.lookup_reference(commit_name)
comname = comref.target
except git.InvalidSpecError: # pylint: disable=maybe-no-member
# 1. If HEAD or it's target is invalid, we end up here
sys.exit("Invalid HEAD " + explanation)
# Two more options remain:
# 2. If HEAD points to a branch, then we now have its name (a `str`
# starting with 'refs/heads/') and can proceed;
# 3. if it is detached, it points to a commit (a `Oid`) and we fail;
# 4. there might be some other cases, which should fail as well.
# To be able to test for case 2, we convert `comname` to `str`.
if str(comname).startswith('refs/heads/'):
comname = comname[len('refs/heads/'):]
else:
sys.exit(("HEAD must point to branch, not %s\n" + explanation)
% comname)
else:
# 5. Explicit and non-HEAD commit given; check for branch name only: proceed;
try:
comref = repo.lookup_reference('refs/heads/' + commit_name)
comname = commit_name # Branch name itself
except (KeyError, git.InvalidSpecError): # pylint: disable=maybe-no-member
# 6. Explicit commit given, but it's neither HEAD nor tail^H^H^H^H
# a branch: fail
sys.exit(("%s must be a branch name " + explanation)
% commit_name)
# Now that we know which branch to timestamp (to), construct it.
if comname in default_branches:
return branch_name
else:
extended_name = "%s-%s" % (branch_name, comname)
if valid_name(extended_name):
return extended_name
else:
sys.exit(("Branch name %s is not valid for timestamping\n"
"(constructed from base timestamp branch %s and "
"source branch %s)\n" + explanation)
% (extended_name, branch_name, comname))
def timestamp_branch(repo, keyid, name, args, first):
"""Obtain and add branch commit; create/update branch head"""
# If the base name is already invalid, it cannot become valid by appending
if not valid_name(args.branch):
sys.exit("Branch name %s is not valid for timestamping" %
args.branch)
if args.append_branch_name:
args.branch = append_branch_name(repo, args.commit, args.branch, args.default_branch)
try:
commit = repo.revparse_single(args.commit)
except KeyError as e:
sys.exit("No such revision: '%s'" % (e,))
branch_head = None
data = {
'request': 'stamp-branch-v1',
'commit': commit.id,
'tree': commit.tree.id
}
try:
branch_head = repo.lookup_reference('refs/heads/' + args.branch)
if branch_head.target == commit.id:
# Would create a merge commit with the same parent twice
sys.exit("Cannot timestamp head of timestamp branch to itself")
data['parent'] = branch_head.target
try:
if (repo[branch_head.target].parent_ids[0] == commit.id or
repo[branch_head.target].parent_ids[1] == commit.id):
sys.exit("Already timestamped commit %s to branch %s" %
(commit.id.hex, args.branch))
except IndexError:
pass
except KeyError:
pass
if not first:
time.sleep(args.interval.total_seconds())
try:
r = requests.post(args.server, data=data, allow_redirects=False)
quit_if_http_error(args.server, r)
validate_branch(r.text, keyid, name, data, args)
commitid = repo.write(
git.GIT_OBJ_COMMIT, # pylint: disable=maybe-no-member
r.text)
repo.create_reference('refs/heads/' + args.branch,
commitid, force=True)
except requests.exceptions.ConnectionError as e:
sys.exit("Cannot connect to server: %s" % e)
def main():
global repo, gpg
requests.__title__ = 'git-timestamp/%s %s' % (VERSION, requests.__title__)
try:
# Depending on the version of pygit2, `git.discover_repository()`
# returns `None` or raises `KeyError`
path = git.discover_repository( # pylint: disable=maybe-no-member
os.getcwd())
except KeyError:
path = None
if path is not None:
repo = git.Repository(path)
else:
repo = None
args = get_args()
# Only check after parsing the arguments, so --version and --help work
if repo is None:
sys.exit("Not a git repository")
try:
gpg = gnupg.GPG(gnupghome=args.gnupg_home)
except TypeError:
traceback.print_exc()
sys.exit("*** `git timestamp` needs `python-gnupg`"
" module from PyPI, not `gnupg`\n"
" Possible remedy: `pip uninstall gnupg;"
" pip install python-gnupg`\n"
" (try `pip2`/`pip3` if it does not work with `pip`)")
if args.tag is not None or args.branch is not None:
# Single tag or branch against one timestamping server
if ',' in args.server:
(server, _) = args.server.split(',', 1)
args.server = server
print(f"WARNING: Cannot timestamp single tag/branch against"
" multiple servers;\nonly timestamping against {server}")
(keyid, name) = get_keyid(args)
if args.tag:
timestamp_tag(repo, keyid, name, args)
else:
timestamp_branch(repo, keyid, name, args, True)
else:
# Automatic branch, with support for multiple timestamping servers
success = True
first = True
for server in args.server.split(','):
if server in server_aliases:
server = server_aliases[server]
if ':' not in server:
server = 'https://' + server
fields = server.replace('/', '.').split('.')
args.branch = timestamp_branch_name(fields[1:])
args.server = server
try:
(keyid, name) = get_keyid(args)
timestamp_branch(repo, keyid, name, args, first)
first = False # Only on successful timestamp
except SystemExit as e:
sys.stderr.write(e.code + '\n')
success = False
if not success:
sys.exit(1)
if __name__ == "__main__":
main()
| 41.868307 | 103 | 0.587432 | 3,555 | 28,931 | 4.708861 | 0.198312 | 0.01589 | 0.009677 | 0.010753 | 0.201075 | 0.151314 | 0.130645 | 0.090502 | 0.079092 | 0.073238 | 0 | 0.009598 | 0.294148 | 28,931 | 690 | 104 | 41.928986 | 0.809951 | 0.186305 | 0 | 0.209213 | 0 | 0 | 0.283112 | 0.013133 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049904 | false | 0.011516 | 0.03071 | 0.003839 | 0.130518 | 0.009597 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac1bdcd4d5d7d445d4e50d14f8bb0137e1a3a22 | 7,335 | py | Python | src/environments/finite_diff_wave.py | jaberkow/Insight_Project | 5c24e39fa5ab949e5a99231758ac77d21f566905 | [
"MIT"
] | 6 | 2019-07-10T09:33:44.000Z | 2019-08-28T11:28:15.000Z | src/environments/finite_diff_wave.py | jaberkow/WaveRL | 5c24e39fa5ab949e5a99231758ac77d21f566905 | [
"MIT"
] | 4 | 2019-06-18T00:13:25.000Z | 2019-08-05T11:48:03.000Z | src/environments/finite_diff_wave.py | jaberkow/Insight_Project | 5c24e39fa5ab949e5a99231758ac77d21f566905 | [
"MIT"
] | 3 | 2019-08-15T06:43:31.000Z | 2020-09-03T05:05:17.000Z | """
Some elements of the finite difference routines were adapted from HP Langtangen's wonderful book on the FD method for python:
https://hplgit.github.io/fdm-book/doc/pub/book/html/._fdm-book-solarized001.html
"""
import numpy as np
from scipy.integrate import simps
class Wave1D:
"""
A utility class for simulating the wave equation in 1 dimension using a finite difference
"""
def __init__(self,config):
"""
Constructor 1 dimensional wave system
Inputs:
config: A dict containing parameters for the system, which must have the following keys:
time_interval: (float > 0) the temporal interval between time steps
wave_speed: (float > 0) the speed of standing waves on the bridge, related to material tension
system_length: (float > 0) the lengthe of the system
num_lattice_points: (int > 0) how many discrete points along the length of the system to use for
the finite difference scheme
num_force_points: (int > 0) how many pistons the system has
force_width: (int > 0) how wide the gaussian spread of each piston is
"""
self.dt = config['time_interval']
self.c_speed = config['wave_speed']
self.L = config['system_length']
self.Nx = config['num_lattice_points']
# How many points along the domain can impulse force be applied
self.num_force_points = config['num_force_points']
# Set the locations of the force application
self.force_locations = np.linspace(0.0,self.L,self.num_force_points+2)[1:self.num_force_points+1]
# How wide is the profile of each impulse force, must be > 0
self.force_width = config['force_width']
# Scale the force width by system length
self.force_width *= self.L
# The lattice spacing
self.dx = float(self.L)/float(self.Nx)
# Mesh points in space
self.x_mesh = np.linspace(0.0,self.L,self.Nx+1)
# The courant number
self.C = self.c_speed *self.dt/self.dx
self.C2 = self.C**2 #helper number
# Recalibrate the resolutions to account for rounding
self.dx = self.x_mesh[1] - self.x_mesh[0]
# We set up the conditions of the system before warmup period
# The system is always initially at rest
self.Velocity_0 = lambda x: 0
# We assume the system starts completely flat
self.Initial_Height = lambda x: 0
# Allocate memory for the recursive solution arrays
self.height = np.zeros(self.Nx + 1) # Solution array at new time level
self.height_n = np.zeros(self.Nx + 1) # Solution at 1 time level back
self.height_nm1 = np.zeros(self.Nx + 1) # Solution at 2 time levels back
self.height_traj=[]
self.action_traj=[]
self.reset()
def reset(self):
"""
Resets the state of the wave system
"""
# We reset the time and step index
self.t = 0
self.n = 0
# We set the force vals to zero
self.force_vals = np.zeros(self.num_force_points)
# We set the initial condition of the solution 1 time level back
for i in range(0,self.Nx+1):
self.height_n[i]=self.Initial_Height(self.x_mesh[i])
# We do a special first step for the finite difference scheme
for i in range(1,self.Nx):
self.height[i] =self.height_n[i] + self.dt*self.Velocity_0(self.x_mesh[i])
self.height[i]+=0.5*self.C2*(self.height_n[i-1] - 2*self.height_n[i] + self.height_n[i+1])
self.height[i]+=0.5*(self.dt**2)*self.impulse_term(self.x_mesh[i])
# Force boundary conditions
self.height[0]=0
self.height[self.Nx]=0
# Switch solution steps
self.height_nm1[:] = self.height_n
self.height_n[:] = self.height
def single_step(self):
"""
Run a single step of the wave equation finite difference dynamics
"""
self.t += self.dt
self.n += 1
for i in range(1,self.Nx):
self.height[i] = -self.height_nm1[i] + 2*self.height_n[i]
self.height[i] += self.C2*(self.height_n[i-1] - 2*self.height_n[i] + self.height_n[i+1])
self.height[i] += (self.dt**2)*self.impulse_term(self.x_mesh[i])
# Force boundary conditions
self.height[0] = 0
self.height[self.Nx] = 0
# Switch solution steps
self.height_nm1[:] = self.height_n
self.height_n[:] = self.height
def take_in_action(self,action):
"""
This method acts as the interface where the agent applies an action to environment.
For this simulator, it's simply a setter method for the force_vals attribute that
determine the profile of the impulse term.
"""
self.force_vals = np.copy(action)
def impulse_term(self,x):
"""
The function definition for the active damping terms
Inputs:
x - a scalar, position in the domain
force_vals - A vector of shape (self.num_force_points),
the (signed) values of the force at each piston point
"""
return np.sum(self.force_vals*np.exp(-0.5* ((x-self.force_locations)**2 )/self.force_width))
def get_impulse_profile(self):
"""
A utility function for returning an array representing the shape of the resulting impulse
force, this is used for rendering the history of actions taken by the agent.
Inputs:
force_vals - A vector of shape (self.num_force_points),
the (signed) values of the force at each piston point
"""
profile = []
for i in range(self.Nx+1):
profile.append(self.impulse_term(self.x_mesh[i]))
return np.array(profile)
def get_observation(self):
"""
This is an interface that returns the observation of the system, which is modeled
as the state of the wave system for the current timestep, previous timestep, and
twice previous timestep.
Outputs:
observation - An array of shape (1,self.Nx+1,3). observation[0,:,0]= self.height,
observation[0,:,1]=self.height_n, and observation[0,:,2]=self.height_nm1
"""
observation = np.zeros((1,self.Nx+1,3))
observation[0,:,0]= self.height
observation[0,:,1]=self.height_n
observation[0,:,2]=self.height_nm1
return observation
def energy(self):
"""
Computes the internal energy of the system based upon the integral functional for
the 1-D wave equation. Additionally we add an L2 norm regularizer
See http://web.math.ucsb.edu/~grigoryan/124A/lecs/lec7.pdf for details
"""
dudt = (self.height-self.height_nm1)/self.dt # Time derivative
dudx = np.gradient(self.height,self.x_mesh) # Space derivative
space_term = -self.height*np.gradient(dudx,self.x_mesh) # Alternative tension energy
energy_density = dudt**2 + (self.C**2)*(dudx**2)
energy_density += self.height**2 # Regularize with L2 norm
# Energy_density = dudt**2 + (self.c_speed**2)*space_term
return 0.5*simps(energy_density,self.x_mesh)
| 38.809524 | 125 | 0.625767 | 1,071 | 7,335 | 4.189542 | 0.251167 | 0.095832 | 0.039224 | 0.02407 | 0.267662 | 0.242701 | 0.202362 | 0.171607 | 0.171607 | 0.171607 | 0 | 0.020189 | 0.277437 | 7,335 | 188 | 126 | 39.015957 | 0.826415 | 0.46394 | 0 | 0.138889 | 0 | 0 | 0.023222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.027778 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac23e6adffbb28a348a5b76f4d9393d8fb8087e | 1,977 | py | Python | ejercicios/ahorcado/ahorcado_01.py | carlosviveros/Soluciones | 115f4fa929c7854ca497e4c994352adc64565456 | [
"MIT"
] | 1 | 2022-02-02T04:44:56.000Z | 2022-02-02T04:44:56.000Z | ejercicios/ahorcado/ahorcado_01.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | ejercicios/ahorcado/ahorcado_01.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
Contributor: Carolina Morán
Source:
https://github.com/CarolinaMoran03/Juego-de-ahorcado-con-frase/blob/main/Juego%20de%20ahorcado%20con%20frase
"""
participante=input("Ingrese nombre del participante: ")
print(participante.upper())
def run():
frases = ["Vive tu momento",
"Nunca subestimes el poder de la musica",
"Nunca olvides lo mucho que te ame tu familia", "Amo mi locura", "Que nadie te diga que no","Carlos Rivera", "No me ponga cero inge", "La fuerza estara contigo"]
cantidad = len(frases)
numero = 0
while numero < 1 or numero > cantidad:
numero = int(input("Ingrese el numero de frase que desea revelar (1 al {c}): ".format(c=cantidad)))
frase = frases[numero-1]
patron = ""
for i in frase:
if i == " ":
patron += " "
else:
patron += "_"
patron = list(patron)
presentar(patron)
vidas = 5
cont = 0
a = 10
while vidas > 0:
letra = input("Ingrese letra: ")
x = 0
for i in frase:
if letra.lower() == i.lower():
patron[x] = letra
x += 1
if letra in patron:
print("Felicitaciones ganaste", a, " puntos")
cont += a
presentar(patron)
if "_" not in patron:
print("FELICIDADES",participante.upper(), "Acabas De Adivinar La Frase")
print("Obtuvistes:", cont, " Puntos")
break
if letra not in patron:
vidas -= 1
print("Te Equivocaste, Te Quedan", +vidas, "Intentos")
presentar(patron)
else:
print("Chale",participante.upper(), "Acabas de Perder, Como Cuando La Perdistes A Ella")
print("Tienes:", cont, " Puntos, Gracias Por Participar")
def presentar(patron):
p = ""
for i in patron:
p = p + i
print(p)
if __name__ == "__main__":
run() | 29.507463 | 175 | 0.571067 | 239 | 1,977 | 4.682008 | 0.493724 | 0.053619 | 0.016086 | 0.01966 | 0.023235 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016047 | 0.306525 | 1,977 | 67 | 176 | 29.507463 | 0.800146 | 0.103187 | 0 | 0.137255 | 0 | 0 | 0.293718 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0 | 0 | 0.039216 | 0.156863 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac34357cfbf72b629548e23a5587b2da7dd9eb3 | 946 | py | Python | core/model/encoder/encoder_base.py | baophuc27/answer-generation | 36ab9f84f8d4df90abd2bd0255a5229afbd65892 | [
"MIT"
] | 3 | 2021-03-25T12:29:49.000Z | 2021-06-14T13:15:49.000Z | core/model/encoder/encoder_base.py | baophuc27/answer-generation | 36ab9f84f8d4df90abd2bd0255a5229afbd65892 | [
"MIT"
] | null | null | null | core/model/encoder/encoder_base.py | baophuc27/answer-generation | 36ab9f84f8d4df90abd2bd0255a5229afbd65892 | [
"MIT"
] | null | null | null | import torch.nn as nn
from abc import ABC,abstractmethod
class EncoderBase(nn.Module):
@abstractmethod
def __init__(self,pretrained_emb,__C):
"""Constructor of encoder module should take pretrained embedding as
an argument because of later comparison of different types of embeddings.
Args:
pretrained_emb ([Tensor]): Extracted pretrained embedding.
__C (object): Config object
"""
super(EncoderBase,self).__init__()
self.pretrained_emb = pretrained_emb
self.__C = __C
@abstractmethod
def forward(self,question,answer):
"""Base encoder method in full answer generation
Args:
question ([Tensor]): Index of questions after tokenized and padded
answer ([Tensor]): Index of answers after tokenized and padded
Raises:
NotImplementedError
"""
raise NotImplementedError
| 30.516129 | 81 | 0.650106 | 100 | 946 | 5.95 | 0.54 | 0.087395 | 0.060504 | 0.070588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.283298 | 946 | 30 | 82 | 31.533333 | 0.877581 | 0.4926 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac34cb506d949289482e24d712032fea0a5bf81 | 5,912 | py | Python | python/pynamics/frame.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 2 | 2018-08-20T22:01:18.000Z | 2021-04-19T00:50:56.000Z | python/pynamics/frame.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 3 | 2017-10-24T03:10:17.000Z | 2017-10-24T03:15:27.000Z | python/pynamics/frame.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 2 | 2017-03-03T23:04:17.000Z | 2021-03-20T20:33:53.000Z | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.tree_node import TreeNode
from pynamics.vector import Vector
from pynamics.rotation import Rotation, RotationalVelocity
from pynamics.name_generator import NameGenerator
from pynamics.quaternion import Quaternion
import sympy
class Frame(NameGenerator):
def __init__(self,name,system):
super(Frame,self).__init__()
self.connections={}
self.connections['R'] = {}
self.connections['w'] = {}
self.precomputed={}
self.precomputed['R'] = {}
self.precomputed['w'] = {}
self.tree={}
self.tree['R'] = TreeNode(self)
self.tree['w'] = TreeNode(self)
self.reps = {}
self.name = name
self.x = Vector()
self.y = Vector()
self.z = Vector()
self.x_sym = sympy.Symbol(name+'.x')
self.y_sym = sympy.Symbol(name+'.y')
self.z_sym = sympy.Symbol(name+'.z')
self.syms = sympy.Matrix([self.x_sym,self.y_sym,self.z_sym])
self.x.add_component(self,[1,0,0])
self.y.add_component(self,[0,1,0])
self.z.add_component(self,[0,0,1])
r = Rotation(self,self,sympy.Matrix.eye(3),Quaternion(0,0,0,0))
w = RotationalVelocity(self,self,sympy.Number(0)*self.x,Quaternion(0,0,0,0))
self.add_generic(r,'R')
self.add_generic(w,'w')
self.system = system
self.system.add_frame(self)
def add_generic(self,rotation,my_type):
self.connections[my_type][rotation.other(self)] = rotation
def add_precomputed_generic(self,rotation,my_type):
self.precomputed[my_type][rotation.other(self)] = rotation
@property
def principal_axes(self):
return [self.x,self.y,self.z]
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def get_generic(self,other,my_type):
if other in self.connections[my_type]:
return self.connections[my_type][other]
elif other in self.precomputed[my_type]:
return self.precomputed[my_type][other]
else:
path = self.tree['R'].path_to(other.tree['R'])
path = [item.myclass for item in path]
from_frames = path[:-1]
to_frames = path[1:]
if my_type=='R':
items = [from_frame.connections[my_type][to_frame].get_r_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
q_items = [from_frame.connections[my_type][to_frame].get_rq_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
elif my_type=='w':
items = [from_frame.connections[my_type][to_frame].get_w_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
item_final= items.pop(0)
if my_type=='R':
q_item_final= q_items.pop(0)
for item,to_frame in zip(items,to_frames[1:]):
item_final = item*item_final
for q_item,to_frame in zip(q_items,to_frames[1:]):
q_item_final = q_item*q_item_final
result = Rotation(self,to_frame,item_final,q_item_final)
elif my_type=='w':
for item,to_frame in zip(items,to_frames[1:]):
item_final += item
result = RotationalVelocity(self,to_frame,item_final,Quaternion(0,0,0,0))
self.add_precomputed_generic(result,my_type)
to_frame.add_precomputed_generic(result,my_type)
return result
def get_r_to(self,other):
return self.get_generic(other,'R').get_r_to(other)
def get_r_from(self,other):
return self.get_generic(other,'R').get_r_from(other)
def get_rq_to(self,other):
return self.get_generic(other,'R').get_rq_to(other)
def get_rq_from(self,other):
return self.get_generic(other,'R').get_rq_from(other)
def get_w_from(self,other):
return self.get_generic(other,'w').get_w_from(other)
def get_w_to(self,other):
return self.get_generic(other,'w').get_w_to(other)
def set_generic(self,other,item,my_type):
if my_type=='R':
result = Rotation(self, other, item,Quaternion(0,0,0,0))
elif my_type=='w':
result = RotationalVelocity(self, other, item,Quaternion(0,0,0,0))
self.add_generic(result,my_type)
other.add_generic(result,my_type)
def set_parent_generic(self,parent,item,my_type):
self.set_generic(parent,item,my_type)
parent.tree[my_type].add_branch(self.tree[my_type])
def set_child_generic(self,child,item,my_type):
self.set_generic(child,item,my_type)
self.tree[my_type].add_branch(child.tree[my_type])
def set_w(self,other,w):
self.set_child_generic(other,w,'w')
def rotate_fixed_axis(self,fromframe,axis,q,system):
import pynamics.misc_tools
if not all([pynamics.misc_tools.is_literal(item) for item in axis]):
raise(Exception('not all axis variables are constant'))
rotation = Rotation.build_fixed_axis(fromframe,self,axis,q,system)
rotational_velocity = RotationalVelocity.build_fixed_axis(fromframe,self,axis,q,system)
self.set_parent_generic(fromframe,rotation,'R')
self.set_parent_generic(fromframe,rotational_velocity,'w')
self.add_generic(rotation,'R')
self.add_generic(rotational_velocity,'w')
fromframe.add_generic(rotation,'R')
fromframe.add_generic(rotational_velocity,'w')
fromframe.tree['R'].add_branch(self.tree['R'])
fromframe.tree['w'].add_branch(self.tree['w'])
| 36.493827 | 156 | 0.625169 | 826 | 5,912 | 4.238499 | 0.12954 | 0.053128 | 0.008569 | 0.020566 | 0.402742 | 0.32048 | 0.232791 | 0.226792 | 0.17595 | 0.140817 | 0 | 0.008792 | 0.249662 | 5,912 | 161 | 157 | 36.720497 | 0.780433 | 0.019452 | 0 | 0.067227 | 0 | 0 | 0.012785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151261 | false | 0 | 0.067227 | 0.07563 | 0.327731 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac3d5779a8d78c93d249f8739858eed7b56674a | 6,828 | py | Python | couchbase_core/mapper.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 189 | 2015-01-07T18:34:31.000Z | 2022-03-21T17:41:56.000Z | couchbase_core/mapper.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 24 | 2015-05-19T14:00:16.000Z | 2022-03-16T22:01:30.000Z | couchbase_core/mapper.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 60 | 2015-03-10T22:12:50.000Z | 2022-03-07T21:57:40.000Z | from typing import *
import enum
import datetime
import warnings
from couchbase.exceptions import InvalidArgumentException
Src = TypeVar('Src')
Dest = TypeVar('Dest')
Functor = TypeVar('Functor', bound=Callable[[Src], Dest])
SrcToDest = TypeVar('SrcToDest', bound=Callable[[Src], Dest])
DestToSrc = TypeVar('DestToSrc', bound=Callable[[Dest], Src])
class Bijection(Generic[Src, Dest, SrcToDest, DestToSrc]):
def __init__(
self,
src_to_dest, # type: SrcToDest
dest_to_src=None, # type: DestToSrc
parent=None # type: Bijection[Dest,Src]
):
# type: (...) -> None
"""
Bijective mapping for JSON serialisation/deserialisation
:param src_to_dest: callable to convert Src type to Dest
:param dest_to_src: callable to convert Dest type to Src
:param parent: interanl use only - used to construct the inverse
"""
self._src_to_dest = src_to_dest
if parent:
self._inverse = parent
else:
self._inverse = Bijection(dest_to_src, parent=self)
def __neg__(self):
# type: (...) -> Bijection[Dest,Src]
"""
Generate the inverse of this bijection (Dest to Src)
:return: the inverse of this bijection
"""
return self._inverse
def __call__(self,
src # type: Src
):
# type: (...) -> Dest
"""
Return the Src to Dest transform on src
:param src: source to be transformed
:return: transformed data as type Dest
"""
return self._src_to_dest(src)
def identity(input: Src) -> Src:
return input
class Identity(Bijection[Src, Src, identity, identity]):
def __init__(self, type: Type[Src]):
self._type = type
super(Identity, self).__init__(self, self)
def __call__(self, x: Src) -> Src:
if not isinstance(x, self._type):
raise InvalidArgumentException(
"Argument must be of type {} but got {}".format(
self._type, x))
return x
Enum_Type = TypeVar('Enum_Type', bound=enum.Enum)
class EnumToStr(Generic[Enum_Type]):
def __init__(self, type: Type[Enum_Type], enforce=True):
self._type = type
self._enforce = enforce
def __call__(self, src: Enum_Type) -> str:
if not self._enforce and isinstance(
src, str) and src in map(lambda x: x.value, self._type):
warnings.warn("Using deprecated string parameter {}".format(src))
return src
if not isinstance(src, self._type):
raise InvalidArgumentException(
"Argument must be of type {} but got {}".format(
self._type, src))
return src.value
class StrToEnum(Generic[Enum_Type]):
def __init__(self, type: Enum_Type):
self._type = type
def __call__(self, dest: str
) -> Enum_Type:
return self._type(dest)
class StringEnum(
Bijection[Enum_Type, str, EnumToStr[Enum_Type], StrToEnum[Enum_Type]]):
def __init__(self, type: Type[Enum_Type]):
super(StringEnum, self).__init__(EnumToStr(type), StrToEnum(type))
class StringEnumLoose(
Bijection[Enum_Type, str, EnumToStr[Enum_Type], StrToEnum[Enum_Type]]):
def __init__(self, type: Type[Enum_Type]):
"""
Like StringEnum bijection, but allows use of string constants as src (falling back to identity transform)
:param type: type of enum
"""
super(
StringEnumLoose,
self).__init__(
EnumToStr(
type,
False),
StrToEnum(type))
NumberType = TypeVar('NumberType', bound=Union[float, int])
class TimedeltaToSeconds(object):
def __init__(self, dest_type: Type[NumberType]):
self._numtype = dest_type
def __call__(self, td: datetime.timedelta) -> float:
if isinstance(td, (float, int)):
return self._numtype(td)
return self._numtype(td.total_seconds())
def _seconds_to_timedelta(seconds: NumberType) -> datetime.timedelta:
try:
return datetime.timedelta(seconds=seconds)
except (OverflowError, ValueError) as e:
raise InvalidArgumentException(
"Invalid duration arg: {} ".format(seconds)) from e
class Timedelta(Bijection[datetime.timedelta, NumberType,
TimedeltaToSeconds, _seconds_to_timedelta]):
def __init__(self, dest_type: Type[NumberType]):
super(
Timedelta,
self).__init__(
TimedeltaToSeconds(dest_type),
_seconds_to_timedelta)
class Division(Bijection[float, float, float.__mul__, float.__mul__]):
def __init__(self, divisor):
super(Division, self).__init__((1 / divisor).__mul__, divisor.__mul__)
Orig_Mapping = TypeVar(
'OrigMapping', bound=Mapping[str, Mapping[str, Bijection]])
class BijectiveMapping(object):
def __init__(self,
fwd_mapping: Orig_Mapping
):
"""
Bijective mapping for JSON serialisation/deserialisation.
Will calculate the reverse mapping of the given forward mapping.
:param fwd_mapping: the forward mapping from Src to Dest
"""
self.mapping = dict()
self.reverse_mapping = dict()
for src_key, transform_dict in fwd_mapping.items():
self.mapping[src_key] = {}
for dest_key, transform in transform_dict.items():
self.mapping[src_key][dest_key] = transform
self.reverse_mapping[dest_key] = {src_key: -transform}
@staticmethod
def convert(mapping: Orig_Mapping,
raw_info: Mapping[str, Any]) -> Mapping[str, Any]:
converted = {}
for k, v in raw_info.items():
entry = mapping.get(k, {k: Identity(object)})
for dest, transform in entry.items():
try:
converted[dest] = transform(v)
except InvalidArgumentException as e:
raise InvalidArgumentException(
"Problem processing argument {}: {}".format(
k, e.message))
return converted
def sanitize_src(self, src_data):
return src_data
def to_dest(self, src_data):
"""
Convert src data to destination format
:param src_data: source data
:return: the converted data
"""
return self.convert(self.mapping, src_data)
def to_src(self, dest_data):
"""
Convert dest_data to source format
:param dest_data: destination data
:return: the converted data
"""
return self.convert(self.reverse_mapping, dest_data)
| 30.756757 | 113 | 0.605448 | 760 | 6,828 | 5.180263 | 0.185526 | 0.032512 | 0.02794 | 0.01905 | 0.20193 | 0.1651 | 0.139192 | 0.11303 | 0.11303 | 0.080264 | 0 | 0.000208 | 0.295255 | 6,828 | 221 | 114 | 30.895928 | 0.817955 | 0.161248 | 0 | 0.180451 | 0 | 0 | 0.042713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.165414 | false | 0 | 0.037594 | 0.022556 | 0.383459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac41b3cce04df264e1419de46ced2afc4ce1d2c | 2,836 | py | Python | thedoorman/run.py | FocusedSupport/thedoorman | 4f53a921e1bd97d9ff193482e790fa5757f54e7d | [
"MIT"
] | null | null | null | thedoorman/run.py | FocusedSupport/thedoorman | 4f53a921e1bd97d9ff193482e790fa5757f54e7d | [
"MIT"
] | 29 | 2017-03-03T16:21:59.000Z | 2019-03-11T19:20:24.000Z | thedoorman/run.py | FocusedSupport/thedoorman | 4f53a921e1bd97d9ff193482e790fa5757f54e7d | [
"MIT"
] | null | null | null | import threading
import sys
import os
import signal
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "components/slack")))
from slackbot.bot import Bot
from pydispatch import dispatcher
from components.dispatcher.signals import Signals, Senders
import components.devices.doorbell_monitor as dm
import components.devices.camera as cam
import components.devices.lock as lock
import components.devices.gpio_cleanup as gpio
import components.devices.speakers as spkr
import components.devices.speech as speech
import components.slack.slack_sender as ss
import components.slack.slack_uploader as slackUpload
import components.slack.imagebin_uploader as imagebinUpload
import components.slack.imgur_uploader as imgurUpload
import components.slack.user_manager as um
def main():
start_device_processing()
start_slack_processing()
def start_device_processing():
monitor = threading.Thread(target=dm.DoorbellMonitor)
monitor.daemon = True
print("Starting doorbell monitor")
monitor.start()
audio = threading.Thread(target=spkr.Speakers)
audio.daemon = True
print("Starting audio")
audio.start()
tts = threading.Thread(target=speech.Speech)
tts.daemon = True
print("Starting Text to Speech")
tts.start()
camera = threading.Thread(target=cam.Camera)
camera.daemon = True
print("Starting camera")
camera.start()
lock_control = threading.Thread(target=lock.Lock)
lock_control.daemon = True
print("Starting lock control")
lock_control.start()
gpio_cleanup = threading.Thread(target=gpio.GPIOCleanup)
gpio_cleanup.daemon = True
print("Starting GPIO cleanup module")
gpio_cleanup.start()
def start_slack_processing():
sender = threading.Thread(target=ss.SlackSender)
sender.daemon = True
print("Starting Slack Sender")
sender.start()
#slack_uploader = threading.Thread(target=slackUpload.SlackUploader)
#slack_uploader.daemon = True
#print("Starting Slack file uploader")
#slack_uploader.start()
#imagebinUploader = threading.Thread(target=imagebinUpload.ImagebinUploader)
#imagebinUploader.daemon = True
#print("Starting Imagebin Uploader")
#imagebinUploader.start()
imgurUploader = threading.Thread(target=imgurUpload.ImgurUploader)
imgurUploader.daemon = True
print("Starting imgur Uploader")
imgurUploader.start()
bot = Bot()
print("Starting Slack bot")
user_manager = um.UserManager()
user_manager.set_users(bot._client.users)
bot.run()
def cleanup():
print("Caught interrupt...")
dispatcher.send(Signals.CLEANUP, sender=Senders.SLACKBOT)
dispatcher.send(Signals.EXIT, sender=Senders.SLACKBOT)
exit(0)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
cleanup() | 28.646465 | 93 | 0.744358 | 336 | 2,836 | 6.16369 | 0.244048 | 0.084983 | 0.1014 | 0.111057 | 0.02704 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000419 | 0.159379 | 2,836 | 99 | 94 | 28.646465 | 0.868289 | 0.11213 | 0 | 0 | 0 | 0 | 0.091995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.253521 | 0 | 0.309859 | 0.140845 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac4b51a79d3af0cebbea2eb96498b7f916e244a | 605 | py | Python | python/utils/random-sample-with-probabilities.py | leakycup/misc | 5cce8cbd7057bf2598c8076ffc257606edb7141e | [
"Apache-2.0"
] | null | null | null | python/utils/random-sample-with-probabilities.py | leakycup/misc | 5cce8cbd7057bf2598c8076ffc257606edb7141e | [
"Apache-2.0"
] | null | null | null | python/utils/random-sample-with-probabilities.py | leakycup/misc | 5cce8cbd7057bf2598c8076ffc257606edb7141e | [
"Apache-2.0"
] | null | null | null | import sys
import codecs
import numpy as np
#UTF8Writer = codecs.getwriter('utf8')
#sys.stdout = UTF8Writer(sys.stdout)
input_file = sys.argv[1]
probabilities_file = sys.argv[2]
sample_size = int(sys.argv[3])
input_list = []
probabilities_list = []
with codecs.open(input_file, 'r', 'utf-8') as f:
for line in f:
input_list.append(line.strip())
with codecs.open(probabilities_file, 'r', 'utf-8') as f:
for line in f:
probabilities_list.append(float(line.strip()))
for line in np.random.choice(input_list, p=probabilities_list, size=sample_size, replace=False):
print (line)
| 24.2 | 96 | 0.710744 | 94 | 605 | 4.446809 | 0.414894 | 0.050239 | 0.064593 | 0.043062 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0 | 0.015564 | 0.150413 | 605 | 24 | 97 | 25.208333 | 0.797665 | 0.119008 | 0 | 0.125 | 0 | 0 | 0.022599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac695ccecbc9a0acac17b74afec8f55e9ba28d1 | 1,286 | py | Python | ivy/functional/backends/mxnet/old/linear_algebra.py | Neel-Renavikar/ivy | 644ab189a3a3fc52b1f3f86563226106e549eea3 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/mxnet/old/linear_algebra.py | Neel-Renavikar/ivy | 644ab189a3a3fc52b1f3f86563226106e549eea3 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/mxnet/old/linear_algebra.py | Neel-Renavikar/ivy | 644ab189a3a3fc52b1f3f86563226106e549eea3 | [
"Apache-2.0"
] | null | null | null | """
Collection of MXNet linear algebra functions, wrapped to fit Ivy syntax and signature.
"""
# global
import mxnet as _mx
import numpy as _np
# local
import ivy as _ivy
from typing import Union, Tuple
def matrix_norm(x, p=2, axes=None, keepdims=False):
axes = (-2, -1) if axes is None else axes
if isinstance(axes, int):
raise Exception('if specified, axes must be a length-2 sequence of ints,'
'but found {} of type {}'.format(axes, type(axes)))
return _mx.nd.norm(x, p, axes, keepdims=keepdims)
cholesky = lambda x: _mx.np.linalg.cholesky(x.as_np_ndarray()).as_nd_ndarray()
def vector_to_skew_symmetric_matrix(vector):
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = _mx.nd.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = _mx.nd.zeros(batch_shape + [1, 1])
# BS x 1 x 3
row1 = _mx.nd.concat(*(zs, -a3s, a2s), dim=-1)
row2 = _mx.nd.concat(*(a3s, zs, -a1s), dim=-1)
row3 = _mx.nd.concat(*(-a2s, a1s, zs), dim=-1)
# BS x 3 x 3
return _mx.nd.concat(*(row1, row2, row3), dim=-2)
def qr(x, mode):
return _mx.np.linalg.qr(x, mode=mode)
| 27.361702 | 86 | 0.620529 | 216 | 1,286 | 3.564815 | 0.384259 | 0.036364 | 0.020779 | 0.019481 | 0.04026 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043216 | 0.226283 | 1,286 | 46 | 87 | 27.956522 | 0.730653 | 0.120529 | 0 | 0 | 0 | 0 | 0.06983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.166667 | 0.041667 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac93a900f8dd76c156f7ea7f46e47f6ba5ffc11 | 759 | py | Python | 01-introduction to python for data science/04-numpy/baseball-players-bmi.py | thelc127/Data-Scientist-Career-Track-Datacamp | 56d0ec0ece7fa9127e72b0da598c89f15f31b6b3 | [
"MIT"
] | 2 | 2021-05-21T04:59:19.000Z | 2021-05-21T08:32:41.000Z | 01-introduction to python for data science/04-numpy/baseball-players-bmi.py | thelc127/Data-Scientist-Career-Track-Datacamp | 56d0ec0ece7fa9127e72b0da598c89f15f31b6b3 | [
"MIT"
] | null | null | null | 01-introduction to python for data science/04-numpy/baseball-players-bmi.py | thelc127/Data-Scientist-Career-Track-Datacamp | 56d0ec0ece7fa9127e72b0da598c89f15f31b6b3 | [
"MIT"
] | null | null | null | # Create a numpy array from the weight_lb list with the correct units. Multiply by 0.453592 to go from pounds to kilograms.
# Store the resulting numpy array as np_weight_kg.
# Use np_height_m and np_weight_kg to calculate the BMI of each player.
# Use the following equation:
# BMI = weight(kg) / height (m3)
# save the resulting numpy array as bmi
# Print out bmi.
# height and weight are available as regular lists
# Import numpy
import numpy as np
# Create array from height_in with metric units: np_height_m
np_height_m = np.array(height_in) * 0.0254
# Create array from weight_lb with metric units: np_weight_kg
np_weight_kg = np.array(weight_lb) * 0.453592
# Calculate the BMI: bmi
bmi = np_weight_kg / np_height_m ** 2
# Print out bmi
print(bmi)
| 31.625 | 123 | 0.764163 | 137 | 759 | 4.065693 | 0.357664 | 0.086176 | 0.089767 | 0.064632 | 0.086176 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033493 | 0.173913 | 759 | 23 | 124 | 33 | 0.854864 | 0.750988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acb9e1e62ddcd1f2667eff52694832dd21f1914 | 411 | py | Python | editor/templatetags/stamp.py | andersshenholm/editor | 052844de68101c5cdc6d9343e3e095ba816cd34c | [
"Apache-2.0"
] | 51 | 2015-04-19T23:27:04.000Z | 2022-03-25T01:43:43.000Z | editor/templatetags/stamp.py | andersshenholm/editor | 052844de68101c5cdc6d9343e3e095ba816cd34c | [
"Apache-2.0"
] | 428 | 2015-01-05T10:56:32.000Z | 2022-03-29T14:33:23.000Z | editor/templatetags/stamp.py | andersshenholm/editor | 052844de68101c5cdc6d9343e3e095ba816cd34c | [
"Apache-2.0"
] | 71 | 2015-01-28T20:06:15.000Z | 2022-03-25T02:35:40.000Z | from django.template import Library
from editor.models import STAMP_STATUS_CHOICES
register = Library()
@register.inclusion_tag('stamp.html')
def stamp(status):
label = ''
if status=='draft':
return {'status': 'draft', 'label': 'Draft'}
for s_status, s_label in STAMP_STATUS_CHOICES:
if status == s_status:
label = s_label
return {'status': status, 'label': label}
| 27.4 | 52 | 0.664234 | 52 | 411 | 5.076923 | 0.423077 | 0.125 | 0.136364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.211679 | 411 | 14 | 53 | 29.357143 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0.114355 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acc8629a6ec5a5ef8fb0a2628a406996eb759d6 | 864 | py | Python | ubxlib/ubx_cfg_nmea.py | monocilindro/ubxlib | 378e86b7766f670b9a8966ee038275a2155bac54 | [
"MIT"
] | 3 | 2020-05-03T17:12:21.000Z | 2021-01-16T13:45:07.000Z | ubxlib/ubx_cfg_nmea.py | monocilindro/ubxlib | 378e86b7766f670b9a8966ee038275a2155bac54 | [
"MIT"
] | 35 | 2020-08-29T09:35:15.000Z | 2022-03-18T19:42:34.000Z | ubxlib/ubx_cfg_nmea.py | monocilindro/ubxlib | 378e86b7766f670b9a8966ee038275a2155bac54 | [
"MIT"
] | 4 | 2020-04-24T03:29:07.000Z | 2021-01-13T15:52:53.000Z | from .cid import UbxCID
from .frame import UbxFrame
from .types import CH, U1, X1, X4, Padding
class UbxCfgNmea_(UbxFrame):
CID = UbxCID(UbxCID.CLASS_CFG, 0x17)
NAME = 'UBX-CFG-NMEA'
class UbxCfgNmeaPoll(UbxCfgNmea_):
NAME = UbxCfgNmea_.NAME + '-POLL'
def __init__(self):
super().__init__()
def _cls_response(self):
return UbxCfgNmea
class UbxCfgNmea(UbxCfgNmea_):
def __init__(self):
super().__init__()
self.f.add(X1('filter'))
self.f.add(U1('nmeaVersion'))
self.f.add(U1('numSV'))
self.f.add(X1('flags'))
self.f.add(X4('gnssToFilter'))
self.f.add(U1('svNumbering'))
self.f.add(U1('mainTalkerId'))
self.f.add(U1('gsvTalkerId'))
self.f.add(U1('version'))
self.f.add(CH(2, 'bdsTalkerId'))
self.f.add(Padding(6, 'res1'))
| 24 | 42 | 0.603009 | 112 | 864 | 4.446429 | 0.383929 | 0.110442 | 0.176707 | 0.120482 | 0.080321 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02719 | 0.233796 | 864 | 35 | 43 | 24.685714 | 0.725076 | 0 | 0 | 0.153846 | 0 | 0 | 0.12963 | 0 | 0 | 0 | 0.00463 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.115385 | 0.038462 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ace70d75fc497eeb5ae372bf715cbea09eaaf13 | 390 | py | Python | Lesson_2/up.py | idel28102001/lessons | f88f5034d8c275175dacf66ba5d0342622c1aa50 | [
"Apache-2.0"
] | null | null | null | Lesson_2/up.py | idel28102001/lessons | f88f5034d8c275175dacf66ba5d0342622c1aa50 | [
"Apache-2.0"
] | null | null | null | Lesson_2/up.py | idel28102001/lessons | f88f5034d8c275175dacf66ba5d0342622c1aa50 | [
"Apache-2.0"
] | null | null | null | print('Загадайте число')
num = 'да'
l = 4
while num == 'да':
l -= 1
num = input(f'Количество цифр вашего числа меньше {l}? : ') ## да или нет
num_2 = 'да'
dig = ''
while l > 0:
number = 10
while num_2 == 'да':
number -= 1
num_2 = input(f'Ваша {l}-e цифра меньше {number}? :') # да или нет
l -= 1
dig = str(number) + dig
num_2 = 'да'
print(dig)
| 21.666667 | 78 | 0.525641 | 63 | 390 | 3.190476 | 0.428571 | 0.079602 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04059 | 0.305128 | 390 | 17 | 79 | 22.941176 | 0.701107 | 0.053846 | 0 | 0.235294 | 0 | 0 | 0.282192 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acfa73951bf5dd915adc32a7981cab8b5aacd86 | 4,247 | py | Python | twrap/metrics.py | itsnarsi/twrap | cc3128428e37fe0a363e5b18fd7fa0039a963365 | [
"MIT"
] | null | null | null | twrap/metrics.py | itsnarsi/twrap | cc3128428e37fe0a363e5b18fd7fa0039a963365 | [
"MIT"
] | null | null | null | twrap/metrics.py | itsnarsi/twrap | cc3128428e37fe0a363e5b18fd7fa0039a963365 | [
"MIT"
] | null | null | null | # @Author: Narsi Reddy <cibitaw1>
# @Date: 2018-09-22T17:38:05-05:00
# @Email: sainarsireddy@outlook.com
# @Last modified by: narsi
# @Last modified time: 2019-02-13T22:46:56-06:00
import torch
torch.manual_seed(29)
from torch import nn
import numpy as np
np.random.seed(29)
import torch.nn.functional as F
from torch.autograd.function import Function
from torch.nn.parameter import Parameter
from math import exp
"""
CLASSIFICATION METRICS
"""
def accuracy_topk(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def accuracy(output, target):
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].view(-1).float().sum(0, keepdim=True)
res = correct_k.mul_(100.0 / batch_size)
return res
def binary_accuracy(output, target):
res = torch.mean(target.eq(torch.round(output)).float()) * 100
return res
"""
SUPER RESOLUTION
"""
# https://github.com/Po-Hsun-Su/pytorch-ssim
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel, sigma = 1.5):
_1D_window = gaussian(window_size, sigma).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(nn.Module):
def __init__(self, window_size = 5, channel = 24, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
self.window = create_window(window_size, self.channel, sigma = 5)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel, sigma = 5)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
class SSIM_LOSS(nn.Module):
def __init__(self, window_size = 5, channel = 1,size_average = True):
super(SSIM_LOSS, self).__init__()
self.SSIM = SSIM(window_size, channel, size_average)
def forward(self, img1, img2):
return 1-self.SSIM(img1, img2)
def psnr(output, target):
mse = F.mse_loss(output, target)
return -10. * logX(mse)
def logX(x, d = 10.0):
""" Log10: log base 10 for tensorflow
"""
numerator = torch.log(x)
denominator = np.log(d)
return numerator / denominator
| 31.932331 | 104 | 0.645632 | 620 | 4,247 | 4.256452 | 0.254839 | 0.079576 | 0.025009 | 0.043577 | 0.305419 | 0.228875 | 0.228875 | 0.228875 | 0.147025 | 0.068966 | 0 | 0.053835 | 0.217094 | 4,247 | 132 | 105 | 32.174242 | 0.73985 | 0.07935 | 0 | 0.170455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.079545 | 0.011364 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acfefc74de42e7336d5b91fd88fe5402716e7ad | 4,428 | py | Python | configs/resnet/contrast_r18_config.py | alecpeltekian/ImgClassification | cf4eca33027ca423623ff965fac354dcfce396d3 | [
"Apache-2.0"
] | null | null | null | configs/resnet/contrast_r18_config.py | alecpeltekian/ImgClassification | cf4eca33027ca423623ff965fac354dcfce396d3 | [
"Apache-2.0"
] | null | null | null | configs/resnet/contrast_r18_config.py | alecpeltekian/ImgClassification | cf4eca33027ca423623ff965fac354dcfce396d3 | [
"Apache-2.0"
] | null | null | null | # ### ===============================================================
# ### ===============================================================
# ### Modify the dataset loading settings
# dataset settings
dataset_type = 'ContrastDataset'
data_root = '/mnt/cadlabnas/datasets/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
data = dict(
samples_per_gpu=8, # BATCH_SIZE
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file='train.txt',
data_prefix= data_root + 'RenalDonors/',
pipeline=train_pipeline),
pipeline=train_pipeline
),
val=dict(
type=dataset_type,
ann_file='val.txt',
data_prefix= data_root + 'RenalDonors/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file='test.txt',
data_prefix= data_root + 'RenalDonors/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='accuracy', metric_options=dict(topk=(1,)))
# Set up working dir to save files and logs.
work_dir = '/home/alec/Desktop/ImgClassification/working_dir'
### ===============================================================
### ===============================================================
### Modify the model settings
# model settings
model = dict(
type='ImageClassifier',
pretrained='torchvision://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3,),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=2,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1,),
))
### ===============================================================
### ===============================================================
### Modify the schedule settings
# The original learning rate (LR) is set for 8-GPU training.
# We divide it by 4 since we only use one GPU.
# optimizer
optimizer_lr = 0.0001 #0.01 / 4
# optimizer
optimizer = dict(type='SGD', lr=optimizer_lr, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
# warmup='linear',
# warmup_iters=500,
# warmup_ratio=0.001,
step=[5, 10])
runner = dict(type='EpochBasedRunner', max_epochs=25)
### ===============================================================
### ===============================================================
### Modify the default runtime settings
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50, #50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
# run train iter 1 time (overall 1 time which includes: div num_images by batch_size, and mult by dataset_repeat_times)
# run validation iter 1 time
# only setting workflow = [('train', 1)] will not backpropagate validation error/loss through the network
workflow = [('train', 1), ('val', 1)]
### ===============================================================
### ===============================================================
### Miscellaneous settings
# Set seed thus the results are more reproducible
seed = 0
#set_random_seed(0, deterministic=False)
gpu_ids = range(1)
### ===============================================================
### ===============================================================
### testing/prediction/evaluation phase - Model settings
# get the root path to the model checkpoints
ckp_root = work_dir #'/home/tsm/Code/mmdetection/demo/tutorial_exps/'
| 30.965035 | 137 | 0.539747 | 470 | 4,428 | 4.929787 | 0.468085 | 0.079413 | 0.012948 | 0.018127 | 0.197669 | 0.182132 | 0.11653 | 0.099266 | 0.099266 | 0.054381 | 0 | 0.02608 | 0.168699 | 4,428 | 142 | 138 | 31.183099 | 0.603369 | 0.393631 | 0 | 0.148148 | 0 | 0 | 0.179862 | 0.036126 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad4e2861aebece133d34e92b30760d0b61fc3a9 | 662 | py | Python | roots/FinalModifiedBisection.py | Seek/LaTechNumeric | dabef2040e84bf25cabab07fe20a6434ce52197b | [
"MIT"
] | null | null | null | roots/FinalModifiedBisection.py | Seek/LaTechNumeric | dabef2040e84bf25cabab07fe20a6434ce52197b | [
"MIT"
] | null | null | null | roots/FinalModifiedBisection.py | Seek/LaTechNumeric | dabef2040e84bf25cabab07fe20a6434ce52197b | [
"MIT"
] | null | null | null | import sys
EPS = sys.float_info.epsilon
#Define the function
def f(x):
return (x+1)**2 - 1
def bisect(f, x1, x2, eps, maxn):
assert f(x1)*f(x2) < 0, \
"We cannot find a root if the function does not change signs"
xl = x1
xu = x2
xr = 0
fl = f(xl)
err = 1000
for i in range(maxn):
r = (xl + xu)/2
print(r)
fr = f(r)
if not abs(r - 0) < EPS:
err = abs((r-xr)/r) * 100
if err < eps:
print("Error =" + str(err))
break
v = fl * fr
if v < 0:
xu = r
if v > 0:
xl = r
fl = fr
else:
err = 0
xr = r
return r
print("Computing the roots of x**2 - 2")
r = bisect(f, -1.5, 10, 0.00001, 100)
print("Root = " + str(r))
| 16.146341 | 62 | 0.539275 | 131 | 662 | 2.717557 | 0.435115 | 0.061798 | 0.022472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08137 | 0.294562 | 662 | 40 | 63 | 16.55 | 0.680942 | 0.028701 | 0 | 0 | 0 | 0 | 0.162246 | 0 | 0 | 0 | 0 | 0 | 0.029412 | 1 | 0.058824 | false | 0 | 0.029412 | 0.029412 | 0.147059 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad61d1f671afea82e39013fc63a8845b6a3671b | 3,606 | py | Python | monique_worker_py/worker.py | biocad/monique-worker-py | 56b0ab2e218b80e3a83d7987cd8dd8993a3d66a7 | [
"BSD-3-Clause"
] | null | null | null | monique_worker_py/worker.py | biocad/monique-worker-py | 56b0ab2e218b80e3a83d7987cd8dd8993a3d66a7 | [
"BSD-3-Clause"
] | null | null | null | monique_worker_py/worker.py | biocad/monique-worker-py | 56b0ab2e218b80e3a83d7987cd8dd8993a3d66a7 | [
"BSD-3-Clause"
] | null | null | null | import zmq
import logging
import argparse
from monique_worker_py.config import read_worker_config
from monique_worker_py.qmessage import qmessage_from_json, create_qmessage
class Worker:
def __init__(self, worker_name, algo):
self.worker_name = worker_name
self.algo = algo
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True, help='Path to config file')
args = parser.parse_args()
self.worker_config = read_worker_config(args.config)
def run(self):
"""Runs application"""
logging.basicConfig(level=self.worker_config.log_level,
format='%(asctime)s %(name)-12s %(levelname)-8s {}: %(message)s'.format(self.worker_name),
datefmt='%Y-%m-%d %H:%M',
filename=self.worker_config.log_path,
filemode='a')
logging.info("connecting to queue...")
# setup connection
context = zmq.Context()
# Socket to receive messages from controller
from_controller = context.socket(zmq.PULL)
from_controller.connect(self.worker_config.controller_pull_address())
# Socket to send messages to controller
to_controller = context.socket(zmq.PUSH)
to_controller.connect(self.worker_config.controller_push_address())
logging.info("connected to queue.")
# waiting for the message...
while True:
in_message = from_controller.recv()
logging.info('message received.')
# parsing message to QMessage
qmessage = qmessage_from_json(in_message)
logging.debug('message tags: {}; message cnt: {}'.format(qmessage.tags, qmessage.cnt))
# get config from Task
task = qmessage.cnt.contents
config = task.get_config()
logging.info('config parsed')
logging.debug('config content: {}'.format(config))
try:
logging.info('start working...')
# that is the MAIN PLACE. We run given algorithm with config received.
wr = self.algo(config)
logging.debug('worker result: {}, worker version: {}'.format(wr.result, wr.version))
logging.info('finished working!')
# prepare result QMessage...
completed_task = task.task_completed(wr)
# prepare result QMessage...
completed_message = create_qmessage(completed_task)
# and sending it back to the queue.
logging.info('sending message with completed task...')
logging.debug('message: {}'.format(completed_message.to_json()))
to_controller.send(completed_message.to_json())
logging.info("message sent :)")
except Exception as e:
logging.error('failed with error: {}'.format(e))
# if exception happened then format result QMessage with another method...
failed_message = qmessage.qmessage_failed(self.worker_name, e)
# and sending it back.
logging.info("sending message with failed task...")
logging.debug('message: {}'.format(failed_message.to_json()))
to_controller.send(failed_message.to_json())
logging.info("message sent :(")
class WorkerResult:
"""Class to format worker result."""
def __init__(self, result, version):
self.result = result
self.version = version
| 37.5625 | 118 | 0.598724 | 388 | 3,606 | 5.399485 | 0.304124 | 0.052506 | 0.038186 | 0.018138 | 0.157518 | 0.102148 | 0.033413 | 0 | 0 | 0 | 0 | 0.001185 | 0.297837 | 3,606 | 95 | 119 | 37.957895 | 0.826224 | 0.13117 | 0 | 0 | 0 | 0 | 0.139826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.087719 | 0 | 0.175439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad6cbcddff8d2b547541b05fb14cdfa5518b9b3 | 1,081 | py | Python | helpers/sendSMS.py | cheikhmbackeseck37/insuris | 3362ca445d489e23d57a76bbd6d263f3a5f0b519 | [
"MIT"
] | 12 | 2019-08-02T07:58:16.000Z | 2022-01-31T23:45:08.000Z | helpers/sendSMS.py | domambia/csdigital-gs1kenya-internal-erp | 6736d0e9a3a51653689f8ae921cf811f378d9d8e | [
"MIT"
] | 8 | 2019-08-02T08:06:18.000Z | 2022-03-11T23:45:17.000Z | helpers/sendSMS.py | cheikhmbackeseck37/insuris | 3362ca445d489e23d57a76bbd6d263f3a5f0b519 | [
"MIT"
] | 11 | 2019-07-31T16:23:36.000Z | 2022-01-29T08:30:07.000Z | # works with both python 2 and 3
from __future__ import print_function
from datetime import datetime
import africastalking
class SMS:
def __init__(self):
self.username = "gs1kenya"
self.api_key = "0902d36a02514da9fa33a11586683f8d76e5207ea544363e7d41149e6c9a6718"
africastalking.initialize(self.username, self.api_key)
self.sms = africastalking.SMS
def send(self, phone, message):
try:
response = self.sms.send(str(message), ["+254"+str(phone)])
except Exception as e:
message = """
Dear, Omambia Mogaka.
Ref: Message Notification
------------------------
There was an error in sending message to your other employee.
The Error is: {}
Thank You,
Humble Developer, Most adored,
GS1 Kenya
Date: {} .
"""
print (message.format(str(e), datetime.now))
| 38.607143 | 89 | 0.518964 | 97 | 1,081 | 5.670103 | 0.659794 | 0.050909 | 0.036364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081818 | 0.389454 | 1,081 | 27 | 90 | 40.037037 | 0.751515 | 0.027752 | 0 | 0 | 0 | 0 | 0.509056 | 0.083889 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.24 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5adaa94654c12d575666ad0a6b6cf47ac7a0cb0e | 1,120 | py | Python | examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
] | 17 | 2019-11-15T06:27:05.000Z | 2021-10-02T14:24:25.000Z | examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
] | null | null | null | examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
] | 4 | 2020-09-03T17:01:34.000Z | 2021-11-05T04:09:24.000Z | import os
import pandas as pd
import matplotlib.pyplot as plt
from examples.cartpole_example.cartpole_dynamics import RAD_TO_DEG, DEG_TO_RAD
if __name__ == '__main__':
#df_model = pd.read_csv(os.path.join("data", "pendulum_data_PID.csv"))
#df_nn = pd.read_csv(os.path.join("data", "pendulum_data_PID_NN_model.csv"))
df_meas = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val.csv"))
df_nn = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val_NN_model.csv"))
fig,axes = plt.subplots(3,1, figsize=(10,10), sharex=True)
axes[0].plot(df_meas['time'], df_meas['p'], "k", label='p system')
axes[0].plot(df_nn['time'], df_nn['p'], "r", label='p NN')
axes[0].set_title("Position (m)")
axes[0].set_ylim(-10, 10.0)
axes[1].plot(df_meas['time'], df_meas['theta'] * RAD_TO_DEG, "k", label='theta system')
axes[1].plot(df_nn['time'], df_nn['theta']*RAD_TO_DEG, "r", label='theta NN')
axes[2].plot(df_meas['time'], df_meas['u'], label="u")
axes[2].plot(df_nn['time'], df_nn['u'], label="u")
for ax in axes:
ax.grid(True)
ax.legend()
| 35 | 91 | 0.655357 | 200 | 1,120 | 3.39 | 0.315 | 0.047198 | 0.053097 | 0.064897 | 0.421829 | 0.421829 | 0.262537 | 0.262537 | 0.262537 | 0.262537 | 0 | 0.019792 | 0.142857 | 1,120 | 31 | 92 | 36.129032 | 0.686458 | 0.128571 | 0 | 0 | 0 | 0 | 0.175565 | 0.068789 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae33bc829b96d32b0f8a98265306f77e0baf4b1 | 3,327 | py | Python | tests/test_views.py | localmed/django-assetfiles | 34089780126989f49e6b890b85a90047704fde37 | [
"MIT"
] | null | null | null | tests/test_views.py | localmed/django-assetfiles | 34089780126989f49e6b890b85a90047704fde37 | [
"MIT"
] | 2 | 2017-02-11T20:10:46.000Z | 2017-02-11T20:10:56.000Z | tests/test_views.py | localmed/django-assetfiles | 34089780126989f49e6b890b85a90047704fde37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django_nose.tools import *
from tests.base import AssetfilesTestCase
class TestServe(AssetfilesTestCase):
def test_returns_not_found_without_an_asset(self):
response = self.client.get('/static/non/existent/file.css')
assert_equal(response.status_code, 404)
def test_returns_static_files(self):
self.mkfile('static/css/static.css', 'body { color: red; }')
response = self.client.get('/static/css/static.css')
assert_contains(response, 'body { color: red; }')
def test_returns_static_files_with_correct_content_type(self):
self.mkfile('static/css/static.css')
response = self.client.get('/static/css/static.css')
assert_equal(response.get('content-type'), 'text/css')
def test_returns_static_files_with_extra_extensions(self):
self.mkfile('app-1/static/js/jquery.plugin.js', '$.fn.plugin = {};')
response = self.client.get('/static/js/jquery.plugin.js')
assert_contains(response, '$.fn.plugin = {};')
def test_returns_app_static_files(self):
self.mkfile('app-1/static/css/app_static.css', 'body { color: blue; }')
response = self.client.get('/static/css/app_static.css')
assert_contains(response, 'body { color: blue; }')
def test_processes_scss_files(self):
self.mkfile('static/css/simple.scss',
'$c: red; body { color: $c; }')
response = self.client.get('/static/css/simple.css')
assert_contains(response, 'body {\n color: red; }')
def test_returns_processed_scss_files_with_correct_content_type(self):
self.mkfile('static/css/simple.scss',
'$c: red; body { color: $c; }')
response = self.client.get('/static/css/simple.css')
assert_equal(response.get('content-type'), 'text/css')
def test_processes_app_scss_files(self):
self.mkfile('app-1/static/css/app.scss',
'$c: yellow; body { color: $c; }')
response = self.client.get('/static/css/app.css')
assert_contains(response, 'body {\n color: yellow; }')
def test_processes_scss_files_with_deps(self):
self.mkfile('static/css/folder/_dep.scss', '$c: black;')
self.mkfile('static/css/with_deps.scss',
'@import "folder/dep"; body { color: $c; }')
response = self.client.get('/static/css/with_deps.css')
assert_contains(response, 'body {\n color: black; }')
def test_processes_scss_files_with_app_deps(self):
self.mkfile('app-1/static/css/folder/_dep.scss', '$c: white;')
self.mkfile('static/css/with_app_deps.scss',
'@import "folder/dep"; body { color: $c; }')
response = self.client.get('/static/css/with_app_deps.css')
assert_contains(response, 'body {\n color: white; }')
def test_processes_asset_files_with_unicode_chars(self):
self.mkfile('static/css/simple.scss',
'$c: "é"; a::before { content: $c; }')
self.mkfile('static/js/simple.coffee', 'a = foo: "é#{2}3"')
response = self.client.get('/static/css/simple.css')
assert_contains(response, 'a::before {\n content: "é"; }')
response = self.client.get('/static/js/simple.js')
assert_contains(response, 'foo: "é" + 2 + "3"')
| 44.36 | 79 | 0.643823 | 439 | 3,327 | 4.681093 | 0.177677 | 0.113869 | 0.105109 | 0.122628 | 0.73528 | 0.636983 | 0.525547 | 0.421411 | 0.36691 | 0.273966 | 0 | 0.004473 | 0.193568 | 3,327 | 74 | 80 | 44.959459 | 0.761461 | 0.006312 | 0 | 0.241379 | 0 | 0 | 0.351695 | 0.175242 | 0 | 0 | 0 | 0 | 0.206897 | 1 | 0.189655 | false | 0 | 0.086207 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae4b5cc0aeea03900ca797b02f8cd9bb0c7e4f9 | 8,083 | py | Python | CHT/cht_data.py | aryam7/WASP | 39f3ac2e8ad3b97124b52cc17e97902e3ec1fbc9 | [
"Apache-2.0"
] | 72 | 2015-03-01T20:59:06.000Z | 2022-03-28T08:48:39.000Z | CHT/cht_data.py | bmvdgeijn/WASP | d3b8447fd7719fffa00b856fd1f27c845554693e | [
"Apache-2.0"
] | 93 | 2015-01-14T23:49:12.000Z | 2022-03-26T16:31:52.000Z | CHT/cht_data.py | aryam7/WASP | 39f3ac2e8ad3b97124b52cc17e97902e3ec1fbc9 | [
"Apache-2.0"
] | 51 | 2015-02-19T23:49:17.000Z | 2021-12-16T01:40:37.000Z | import sys
import gzip
import os
import numpy as np
import util
class TestSNP:
def __init__(self, name, geno_hap1, geno_hap2, AS_target_ref, AS_target_alt,
hetps, totals, counts):
self.name = name
self.geno_hap1 = geno_hap1
self.geno_hap2 = geno_hap2
self.AS_target_ref = AS_target_ref
self.AS_target_alt = AS_target_alt
self.hetps = hetps
self.totals = totals
self.counts = counts
def is_het(self):
"""returns True if the test SNP is heterozygous"""
return self.geno_hap1 != self.geno_hap2
def is_homo_ref(self):
"""Returns True if test SNP is homozygous for reference allele"""
return self.geno_hap1 == 0 and self.geno_hap2 == 0
def is_homo_alt(self):
"""Returns True if test SNP is homozygous for non-reference allele"""
return self.geno_hap1 == 1 and self.geno_hap2 == 1
dup_snp_warn = True
def parse_test_snp(snpinfo, shuffle=False):
global dup_snp_warn
snp_id = snpinfo[2]
tot = 0 if snpinfo[16] == "NA" else float(snpinfo[16])
if snpinfo[6] == "NA":
geno_hap1 = 0
geno_hap2 = 0
else:
geno_hap1 = int(snpinfo[6].strip().split("|")[0])
geno_hap2 = int(snpinfo[6].strip().split("|")[1])
count = 0 if snpinfo[15] == "NA" else int(snpinfo[15])
if snpinfo[9].strip() == "NA" or geno_hap1 == geno_hap2:
# SNP is homozygous, so there is no AS info
return TestSNP(snp_id, geno_hap1, geno_hap2, [], [], [], tot, count)
else:
# positions of target SNPs
snp_locs = np.array([int(y.strip()) for y in snpinfo[9].split(';')])
# counts of reads that match reference overlapping linked 'target' SNPs
snp_as_ref = np.array([int(y) for y in snpinfo[12].split(';')])
# counts of reads that match alternate allele
snp_as_alt = np.array([int(y) for y in snpinfo[13].split(';')])
# heterozygote probabilities
snp_hetps = np.array([np.float64(y.strip())
for y in snpinfo[10].split(';')])
# linkage probabilities, not currently used
snp_linkageps = np.array([np.float64(y.strip())
for y in snpinfo[11].split(';')])
# same SNP should not be provided multiple times, this
# can create problems with combined test. Warn and filter
# duplicate SNPs
uniq_loc, uniq_idx = np.unique(snp_locs, return_index=True)
if dup_snp_warn and uniq_loc.shape[0] != snp_locs.shape[0]:
sys.stderr.write("WARNING: discarding SNPs that are repeated "
"multiple times in same line\n")
# only warn once
dup_snp_warn = False
snp_as_ref = snp_as_ref[uniq_idx]
snp_as_alt = snp_as_alt[uniq_idx]
snp_hetps = snp_hetps[uniq_idx]
# linkage probabilities currently not used
snp_linkageps = snp_linkageps[uniq_idx]
if shuffle:
# permute allele-specific read counts by flipping them randomly at
# each SNP
for y in range(len(snp_as_ref)):
if random.randint(0, 1) == 1:
temp = snp_as_ref[y]
snp_as_ref[y] = snp_as_alt[y]
snp_as_alt[y] = temp
return TestSNP(snp_id, geno_hap1, geno_hap2, snp_as_ref,
snp_as_alt, snp_hetps, tot, count)
def open_input_files(in_filename):
if not os.path.exists(in_filename) or not os.path.isfile(in_filename):
raise IOError("input file %s does not exist or is not a "
"regular file\n" % in_filename)
# read file that contains list of input files
in_file = open(in_filename, "rt")
infiles = []
for line in in_file:
# open each input file and read first line
filename = line.rstrip()
sys.stderr.write(" " + filename + "\n")
if (not filename) or (not os.path.exists(filename)) or \
(not os.path.isfile(filename)):
sys.stderr.write("input file '%s' does not exist or is not a "
"regular file\n" % in_file)
exit(2)
if util.is_gzipped(filename):
f = gzip.open(filename, "rt")
else:
f = open(filename, "rt")
# skip header
f.readline()
infiles.append(f)
in_file.close()
if len(infiles) == 0:
sys.stderr.write("no input files specified in file '%s'\n" % in_filename)
exit(2)
return infiles
def read_count_matrices(input_filename, shuffle=False, skip=0,
min_counts=0, min_as_counts=0, sample=0):
"""Given an input file that contains paths to input files for all individuals, and returns
matrix of observed read counts, and matrix of expected read counts
"""
infiles = open_input_files(input_filename)
is_finished = False
count_matrix = []
expected_matrix = []
line_num = 0
skip_num = 0
while not is_finished:
is_comment = False
line_num += 1
count_line = []
expected_line = []
num_as = 0
for i in range(len(infiles)):
# read next row from this input file
line = infiles[i].readline().strip()
if line.startswith("#") or line.startswith("CHROM"):
# skip comment lines and header line
is_comment = True
elif line:
if is_finished:
raise IOError("All input files should have same number of lines. "
"LINE %d is present in file %s, but not in all input files\n"
% (line_num, infiles[i].name))
if is_comment:
raise IOError("Comment and header lines should be consistent accross "
"all input files. LINE %d is comment or header line in some input files "
"but not in file %s" % (line_num, infiles[i].name))
# parse test SNP and associated info from input file row
new_snp = parse_test_snp(line.split(), shuffle=shuffle)
if new_snp.is_het():
num_as += np.sum(new_snp.AS_target_ref) + \
np.sum(new_snp.AS_target_alt)
count_line.append(new_snp.counts)
expected_line.append(new_snp.totals)
else:
# out of lines from at least one file, assume we are finished
is_finished = True
if not is_finished and not is_comment:
if skip_num < skip:
# skip this row
skip_num += 1
else:
if(sum(count_line) >= min_counts and num_as >= min_as_counts):
# this line exceeded minimum number of read counts and AS counts
count_matrix.append(count_line)
expected_matrix.append(expected_line)
skip_num = 0
count_matrix = np.array(count_matrix, dtype=int)
expected_matrix = np.array(expected_matrix, dtype=np.float64)
sys.stderr.write("count_matrix dimension: %s\n" % str(count_matrix.shape))
sys.stderr.write("expect_matrix dimension: %s\n" % str(expected_matrix.shape))
nrow = count_matrix.shape[0]
if (sample > 0) and (sample < count_matrix.shape[0]):
# randomly sample subset of rows without replacement
sys.stderr.write("randomly sampling %d target regions\n" % sample)
samp_index = np.arange(nrow)
np.random.shuffle(samp_index)
samp_index = samp_index[:sample]
count_matrix = count_matrix[samp_index,]
expected_matrix = expected_matrix[samp_index,]
sys.stderr.write("new count_matrix dimension: %s\n" % str(count_matrix.shape))
sys.stderr.write("new expect_matrix dimension: %s\n" % str(expected_matrix.shape))
return count_matrix, expected_matrix
| 35.143478 | 107 | 0.584189 | 1,085 | 8,083 | 4.17235 | 0.204608 | 0.016567 | 0.027833 | 0.014358 | 0.22907 | 0.177159 | 0.121935 | 0.121935 | 0.096311 | 0.0592 | 0 | 0.014765 | 0.321292 | 8,083 | 229 | 108 | 35.296943 | 0.810427 | 0.153285 | 0 | 0.0625 | 0 | 0 | 0.097762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048611 | false | 0 | 0.034722 | 0 | 0.138889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae519116b2d3198ee0c6685afe6a91a67c62aa2 | 1,023 | py | Python | restaurant/admin.backup.py | syahnur197/restaurant-backend | a0f320b69f3fed293555634f6ac094eaa0574c45 | [
"MIT"
] | null | null | null | restaurant/admin.backup.py | syahnur197/restaurant-backend | a0f320b69f3fed293555634f6ac094eaa0574c45 | [
"MIT"
] | null | null | null | restaurant/admin.backup.py | syahnur197/restaurant-backend | a0f320b69f3fed293555634f6ac094eaa0574c45 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from .models import Image, Product
"""
To register generics
"""
class ImageInline(GenericStackedInline):
model = Image
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = (
'id',
'created',
'modified',
'content_type',
'object_id',
'image',
)
list_filter = ('created', 'modified', 'content_type')
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = (
'id',
'created',
'modified',
'status',
'activate_date',
'deactivate_date',
'name',
'description',
'restaurant',
'unit_price',
'discount_price',
)
list_filter = (
'created',
'modified',
'activate_date',
'deactivate_date',
'restaurant',
)
search_fields = ('name',)
inlines = [
ImageInline,
]
| 19.673077 | 66 | 0.57087 | 86 | 1,023 | 6.627907 | 0.476744 | 0.105263 | 0.059649 | 0.091228 | 0.150877 | 0.150877 | 0.150877 | 0 | 0 | 0 | 0 | 0 | 0.304008 | 1,023 | 51 | 67 | 20.058824 | 0.800562 | 0 | 0 | 0.380952 | 0 | 0 | 0.228141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae5219951f2f425f340756b442acd6b639dbefb | 1,025 | py | Python | test/twistedutils/test_deferred_deque.py | Wizmann/STUP-Protocol | e06a3442082e5061d2be32be3ffd681675e7ffb5 | [
"MIT"
] | 14 | 2017-05-06T10:14:32.000Z | 2018-07-17T02:58:00.000Z | test/twistedutils/test_deferred_deque.py | Wizmann/STUP-Protocol | e06a3442082e5061d2be32be3ffd681675e7ffb5 | [
"MIT"
] | 2 | 2017-06-13T05:40:18.000Z | 2017-06-13T16:23:01.000Z | test/twistedutils/test_deferred_deque.py | Wizmann/STUP-Protocol | e06a3442082e5061d2be32be3ffd681675e7ffb5 | [
"MIT"
] | 4 | 2017-06-09T20:20:54.000Z | 2018-07-17T02:58:10.000Z | #coding=utf-8
from __future__ import absolute_import
import pytest
import twisted
from twisted.trial import unittest
from twisted.internet.defer import Deferred
from twisted.python import log
from stup.twistedutils.deferred_deque import *
class DeferredDequeueTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.buffer = []
super(DeferredDequeueTest, self).__init__(*args, **kwargs)
def test_all(self):
dd = DeferredDeque()
dd.append_left('a')
dd.append_right('b')
self.assertEqual(list(dd.pending), ['a', 'b'])
dd.pop_right().addCallback(lambda x: self.buffer.append(x))
self.assertEqual(self.buffer, ['b'])
dd.pop_left().addCallback(lambda x: self.buffer.append(x))
self.assertEqual(self.buffer, ['b', 'a'])
dd.pop_right().addCallback(lambda x: self.buffer.append(x))
self.assertEqual(self.buffer, ['b', 'a'])
dd.append_left('c')
self.assertEqual(self.buffer, ['b', 'a', 'c'])
| 27.702703 | 67 | 0.656585 | 130 | 1,025 | 5.015385 | 0.353846 | 0.122699 | 0.116564 | 0.153374 | 0.358896 | 0.358896 | 0.317485 | 0.317485 | 0.317485 | 0.317485 | 0 | 0.001212 | 0.195122 | 1,025 | 36 | 68 | 28.472222 | 0.789091 | 0.011707 | 0 | 0.166667 | 0 | 0 | 0.012871 | 0 | 0 | 0 | 0 | 0 | 0.208333 | 1 | 0.083333 | false | 0 | 0.291667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae7e92c23080d64d3b2328bffafe05bd7e29760 | 1,845 | py | Python | quickspy/net/netengine.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | 1 | 2020-07-11T13:41:40.000Z | 2020-07-11T13:41:40.000Z | quickspy/net/netengine.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | null | null | null | quickspy/net/netengine.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | null | null | null | from lxml import etree
import socket
import re
import aiohttp
from quickspy.color import *
class Response:
def __init__(self, byte, encoding='utf-8'):
global ENCODING
ENCODING = encoding
self.url = None
self.html = None
self.byte = byte
self.HTML = None
self.status = None
def get_html(self):
if self.html is None:
self.html = self.get_byte().decode(ENCODING)
return self.html
def get_byte(self):
return self.byte
def xpath(self, exp):
temp = self.get_HTML()
return temp.xpath(exp)
def findall(self, exp):
return re.findall(self.get_html(), exp)
def get_HTML(self):
if self.HTML is None:
self.HTML = etree.HTML(self.get_html())
return self.HTML
def get_url(self):
return self.url
def gettitle(self):
temp = self.get_HTML()
return temp.xpath('//title/text()')[0]
class NetEngine:
def __init__(self):
#打开aiohttp 的http接口
self.session = aiohttp.ClientSession()
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('localhost', 2546))
except socket.error as msg:
print(RED(f'at Quickspy.init :{msg}'))
async def close(self):
await self.session.close()
self.s.close()
async def get(self, url, timeout=10):
async with self.session.get(url, timeout=timeout) as response:
print(f'netengine:timeot = {timeout}')
temp = await response.read()
_response = Response(temp)
_response.url = response.url
_response.status = response.status
self.s.send("eval self.nemanager.reg('default').add()".encode())
return _response | 24.932432 | 77 | 0.58374 | 229 | 1,845 | 4.60262 | 0.318777 | 0.060721 | 0.041746 | 0.048387 | 0.166983 | 0.129032 | 0.129032 | 0.072106 | 0.072106 | 0.072106 | 0 | 0.00625 | 0.306233 | 1,845 | 74 | 78 | 24.932432 | 0.817188 | 0.009214 | 0 | 0.037037 | 0 | 0 | 0.065098 | 0.019147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.092593 | 0.055556 | 0.444444 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae91679bd447b62dfc5e7a20c1d3f70d03392e4 | 1,577 | py | Python | yamlapi/demo/tool/read_write_json.py | Ironkubi/yamlapi | efd80cf15a182b0dde03e923f6b3d86c43e5a355 | [
"MIT"
] | 19 | 2020-05-29T09:28:42.000Z | 2022-02-21T06:09:42.000Z | yamlapi/demo/tool/read_write_json.py | Ironkubi/yamlapi | efd80cf15a182b0dde03e923f6b3d86c43e5a355 | [
"MIT"
] | 1 | 2020-03-05T05:45:19.000Z | 2020-07-12T03:08:40.000Z | yamlapi/demo/tool/read_write_json.py | Ironkubi/yamlapi | efd80cf15a182b0dde03e923f6b3d86c43e5a355 | [
"MIT"
] | 7 | 2020-10-21T02:24:44.000Z | 2022-02-21T06:09:22.000Z | import demjson
from setting.project_config import *
def read_json(json_absolute_path):
"""
读取json文件
:param json_absolute_path: 参数为需要读取的json文件的绝对路径
:return:
"""
with open(json_absolute_path, "r", encoding="utf-8") as f:
data_list = demjson.decode(f.read(), encoding="utf-8")
return data_list
# 返回一个数据列表
def write_json(json_relative, data_list):
"""
写入json文件
:param json_relative: 第一个参数为需要写入的json文件的相对路径
:param data_list: 第二个参数为需要转换的数据
:return:
"""
with open(yaml_path + json_relative, "wb") as f:
f.write(demjson.encode(data_list, encoding="utf-8"))
return json_relative
# 返回一个json文件的相对路径
def merge_json():
"""
合并所有json文件的方法
:return:
"""
json_list = []
for root, dirs, files in os.walk(yaml_path):
# root为当前目录路径
# dirs为当前路径下所有子目录,list格式
# files为当前路径下所有非目录子文件,list格式
for i in files:
if os.path.splitext(i)[1] == '.json':
# os.path.splitext()把路径拆分为文件名+扩展名
if i != first_test_case_file:
json_list.append(os.path.join(root, i))
else:
the_first_json = os.path.join(root, first_test_case_file)
json_list.append(the_first_json)
# 加入第一个json文件
json_list.reverse()
# 反转排序
temporary_list = []
for i in json_list:
if i:
j = read_json(i)
# 调用读取json文件的方法
if j:
temporary_list.extend(j)
# 往列表里逐步添加元素
return temporary_list
# 返回一个临时列表
| 23.537313 | 77 | 0.590996 | 182 | 1,577 | 4.901099 | 0.395604 | 0.044843 | 0.053812 | 0.040359 | 0.069507 | 0.069507 | 0.069507 | 0 | 0 | 0 | 0 | 0.003666 | 0.30818 | 1,577 | 66 | 78 | 23.893939 | 0.813932 | 0.223843 | 0 | 0 | 0 | 0 | 0.020105 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aed22439a90e2493b4099bcd8a18e6edf2db414 | 2,254 | py | Python | python/LPG/CRUD.py | andreluizdsantos/Curso_ADS | bdff1f96cfc22f91423bc14f383f3e69b93deb6f | [
"MIT"
] | 1 | 2020-08-31T16:53:18.000Z | 2020-08-31T16:53:18.000Z | python/LPG/CRUD.py | andreluizdsantos/Curso_ADS | bdff1f96cfc22f91423bc14f383f3e69b93deb6f | [
"MIT"
] | null | null | null | python/LPG/CRUD.py | andreluizdsantos/Curso_ADS | bdff1f96cfc22f91423bc14f383f3e69b93deb6f | [
"MIT"
] | null | null | null | import sqlite3 #importa a bibliotéca sqlite3
desc = ["Código", "Nome", "Telefone"]
lista = [None, None]
menu = [' 1 - Cadastrar:', ' 2 - Consultar:', ' 3 - Excluir/Criar Tabela:', ' 9 - Sair e Salvar']
conector = sqlite3.connect('teste.db') #conecta o banco de dados
cursor = conector.cursor() #inicia o cursor
while True:
print(f"*CRUD* Teste\nBanco de Dados 'teste.db'\n{menu[0]}\n{menu[1]}\n{menu[2]}\n{menu[3]}")
op = int(input('Digite uma opção: '))
if op == 1:
lista[0] = str(input('Digite o nome: '))
lista[1] = str(input('Digite o telefone: '))
print(lista)
sql = "insert into cadastro (nome, tel) values (?, ?)" #objeto sql recebe comando para inserir os dados, e ? aponta para a posição do conteudo da lista
cursor.execute(sql, lista)#o cursor executa o conteudo do objeto sql, e usa a lista para preecher os valores
print("...dados inseridos com sucesso!")
elif op == 2:
sql = "select * from cadastro" #o objeto recebe a consulta dos dados na tabela
cursor.execute(sql) #o cursor executa o conteudo do objeto sql
dados = cursor.fetchall() #o objeto dados recebe os dados obtidos na consulta da tabela
print("Dados da tabela 'cadastro'")
print(f"{len(dados)} registros Encontrados")
print("-" * 37)
print(f"{desc[0]:^7} {desc[1]:^20} {desc[2]:^8}")
print("- " * 19)
for d in dados:
print(f"{d[0]:^7} {d[1]:20} {d[2]:^8}")
print("-" * 37)
elif op == 3:
sql = "drop table if exists cadastro" #objeto sql recebe comando para excluir a tabela caso exista
cursor.execute(sql) #o cursor executa o conteudo do objeto sql
sql = "create table if not exists cadastro (id integer primary key autoincrement, nome varchar(30), tel varchar(10))" #objeto sql recebe comando para cria tabela
cursor.execute(sql) #o cursor executa o conteudo do objeto sql
print('Tabela cadastro excluida e recriada')
elif op == 9:
conector.commit() #conector executado commit gravando os dados no banco
break
else:
print('Opção inválida!')
cursor.close() #fecha o cursor
conector.close() #desconecta o banco
print("\nFim do programa")
| 49 | 170 | 0.632209 | 327 | 2,254 | 4.357798 | 0.382263 | 0.044211 | 0.044912 | 0.042105 | 0.192281 | 0.137544 | 0.137544 | 0.137544 | 0.113684 | 0.113684 | 0 | 0.023865 | 0.237799 | 2,254 | 45 | 171 | 50.088889 | 0.805588 | 0.291482 | 0 | 0.119048 | 0 | 0.095238 | 0.424147 | 0.034134 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02381 | 0 | 0.02381 | 0.309524 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5afabc1c92de296fe846af7a2b027458e2772a60 | 3,355 | py | Python | test/query_test.py | mochen1228/PyQuakes | 8e57c72d45e33812e9af5bd01fbce6c96bcd936d | [
"MIT"
] | 2 | 2021-06-07T21:23:30.000Z | 2021-06-08T17:07:52.000Z | test/query_test.py | mochen1228/PyQuakes | 8e57c72d45e33812e9af5bd01fbce6c96bcd936d | [
"MIT"
] | null | null | null | test/query_test.py | mochen1228/PyQuakes | 8e57c72d45e33812e9af5bd01fbce6c96bcd936d | [
"MIT"
] | 1 | 2021-06-07T21:30:09.000Z | 2021-06-07T21:30:09.000Z | import os
import sys
import unittest
from datetime import datetime
import requests
sys.path.append(os.path.abspath('..'))
from src.earthquake_query import EarthquakeQuery
from src.timeframe import TimeFrame
from src.location import Rectangle, Circle, RadiusUnit, GeoRectangle
from src.enum.contributor import Contributor
class TestEarthquakeQuery(unittest.TestCase):
def test_constructor_time_location(self):
# Test the EarthquakeQuery constructor to see if it can successfully set time and location
location = [Rectangle(), Circle(latitude=1, longitude=1, radius_unit=RadiusUnit.KM, radius=100)]
time = [TimeFrame(datetime(2010, 1, 1), datetime(2011, 1, 1))]
query = EarthquakeQuery(time=time, location=location)
self.assertEqual(location, query.get_location())
self.assertEqual(time, query.get_time())
def test_constructor_kwargs_no_key(self):
# Test the kwargs in the constructor to see if a ValueError
# is raised if the client is trying to set a non-existing parameter
self.assertRaises(ValueError, EarthquakeQuery, nokey="test")
def test_constructor_kwargs_set(self):
# Test if the client can set the other and extension parameters using kwargs in the constructor
minmagnitude = 5
contributor = Contributor.CONTRIBUTOR_AK
query = EarthquakeQuery(minmagnitude=minmagnitude, contributor=contributor)
self.assertEqual(minmagnitude, query.get_min_magnitude())
self.assertEqual(contributor, query.get_contributor())
def test_search_by_event_id(self):
# Test search by event id
event_id = "usc000lvb5"
url = "https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&eventid={}".format(event_id)
response = requests.get(url)
json1 = response.json()
detail = EarthquakeQuery.search_by_event_id(event_id=event_id)
self.assertEqual(json1, detail.get_raw_json())
def test_set_methods(self):
# Test set methods
query = EarthquakeQuery()
# set time
timeframe = TimeFrame(start_time=datetime(2010, 1, 1), end_time=datetime(2011, 1, 1))
time = [timeframe]
query.set_time(time)
self.assertEqual(query.get_time(), time)
# set location
EarthquakeQuery.set_geocode_key_path("../key.txt")
location = [GeoRectangle("Los Angeles")]
query.set_location(location)
self.assertEqual(query.get_location(), location)
# set min magnitude
min_magnitude = 5.0
query.set_min_magnitude(min_magnitude)
self.assertEqual(query.get_min_magnitude(), min_magnitude)
# set contributor
query.set_contributor(Contributor.CONTRIBUTOR_AK)
self.assertEqual(query.get_contributor(), Contributor.CONTRIBUTOR_AK)
def test_get_parameters(self):
# Test get query parameters method
start_time = datetime(2014, 1, 1)
end_time = datetime(2014, 1, 2)
query = EarthquakeQuery(time=[TimeFrame(start_time, end_time)])
parameter = {"time": [
{"starttime": start_time.isoformat().split(".")[0], "endtime": end_time.isoformat().split(".")[0]}],
"limit": 20000}
self.assertEqual(parameter, query.get_query_parameters())
if __name__ == '__main__':
unittest.main()
| 40.914634 | 112 | 0.692697 | 402 | 3,355 | 5.604478 | 0.276119 | 0.066578 | 0.035508 | 0.040834 | 0.068797 | 0.01953 | 0 | 0 | 0 | 0 | 0 | 0.021805 | 0.207154 | 3,355 | 81 | 113 | 41.419753 | 0.825188 | 0.129955 | 0 | 0 | 0 | 0.017544 | 0.050224 | 0 | 0 | 0 | 0 | 0 | 0.192982 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.280702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5afce0b13a04c2e681d1b3f1a01d8ccfeafdc806 | 2,256 | py | Python | 2021/day_10.py | tony-sappe/aoc-2021 | 526bec249467c5a28bbf68516c1918b8be9c8045 | [
"MIT"
] | 1 | 2022-02-19T10:13:54.000Z | 2022-02-19T10:13:54.000Z | 2021/day_10.py | tony-sappe/aoc-2021 | 526bec249467c5a28bbf68516c1918b8be9c8045 | [
"MIT"
] | null | null | null | 2021/day_10.py | tony-sappe/aoc-2021 | 526bec249467c5a28bbf68516c1918b8be9c8045 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Iterable, List, Tuple
Sample_Input = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]
"""
def parse_input(input: str) -> tuple:
return input.strip().split("\n")
def find_errors(lines: Iterable[str]) -> int:
illegal_chars = {")": 3, "]": 57, "}": 1197, ">": 25137}
_, _, errors = parse_lines(lines)
return sum([illegal_chars[e] for e in errors])
def complete_incomplete(lines: Iterable[str]) -> int:
closing_chars = {"(": ")", "[": "]", "{": "}", "<": ">"}
score_chars = {")": 1, "]": 2, "}": 3, ">": 4}
_, incomplete, _ = parse_lines(lines)
scores = []
for line in incomplete:
score = 0
for symbol in line[::-1]:
score *= 5
score += score_chars[closing_chars[symbol]]
scores.append(score)
scores.sort()
return scores[len(scores) // 2]
def parse_lines(lines: Iterable[str]) -> Tuple[List[int], List[int], List[int]]:
errors = []
incomplete = []
complete = []
for line in lines:
status, value = checker(line)
if status == "complete":
complete.append(line)
elif status == "open":
incomplete.append(value)
else:
errors.append(value)
return complete, incomplete, errors
def checker(line: str) -> Tuple[str, str]:
open_chars = {")": "(", "]": "[", "}": "{", ">": "<"}
stack = []
for l in line:
if l in "([{<":
stack.append(l)
else:
if len(stack) == 0:
return ("error", l)
last_char = stack.pop()
if open_chars[l] != last_char:
return ("error", l)
if len(stack) == 0:
return ("complete", "")
else:
return ("open", stack)
if __name__ == "__main__":
input_data = (Path.cwd() / "2021" / "data" / f"{Path(__file__).stem}_input.txt").read_text()
lines = parse_input(input_data)
print(f"Error Score is: {find_errors(lines)}")
print(f"Incomplete Score is: {complete_incomplete(lines)}")
| 26.541176 | 96 | 0.47828 | 231 | 2,256 | 4.497836 | 0.311688 | 0.023099 | 0.046198 | 0.036574 | 0.032724 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015504 | 0.256649 | 2,256 | 84 | 97 | 26.857143 | 0.604055 | 0 | 0 | 0.106061 | 0 | 0 | 0.192819 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.030303 | 0.015152 | 0.227273 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8502fa85f9bc5db59d062f1bedd1d8a262689de3 | 8,733 | py | Python | examples/6tisch/simple-node/simulations/simulation.py | lucasschnugger/contiki-ng | d61a7c60790382168f3ef4823e80a32e1c307f29 | [
"BSD-3-Clause"
] | null | null | null | examples/6tisch/simple-node/simulations/simulation.py | lucasschnugger/contiki-ng | d61a7c60790382168f3ef4823e80a32e1c307f29 | [
"BSD-3-Clause"
] | null | null | null | examples/6tisch/simple-node/simulations/simulation.py | lucasschnugger/contiki-ng | d61a7c60790382168f3ef4823e80a32e1c307f29 | [
"BSD-3-Clause"
] | null | null | null | import os, time, shutil, random, re
from xml.etree import cElementTree as ET
def run_test(cooja, dir, test, seed):
# run test simulation with seed
command = f"java -jar {cooja} -nogui={dir}{test} -random-seed={seed}"
os.system(command)
def remove_command_in_test(dir, test):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
for subelement in element:
if subelement.tag == "commands":
element.remove(subelement)
root.write(file)
def update_firmware_in_test(dir, test, firmware_network, firmware_joining):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
if element.tag == "motetype":
motetype = ""
for subelement in element:
if subelement.tag == "identifier":
motetype = subelement.text
for subelement in element:
if subelement.tag == "firmware":
firmware = firmware_joining if motetype == "z1-joining-node" else firmware_network
subelement.text = f"[CONTIKI_DIR]/examples/6tisch/simple-node/{firmware}"
root.write(file)
def add_scriptrunner_in_test(dir, test):
file = f"{dir}{test}"
scriptrunner = ""
if test.startswith("join-"):
scriptrunner = open(f"{scripts_dir}join.js").read()
elif test.startswith("create-"):
scriptrunner = open(f"{scripts_dir}create.js").read()
elif test.startswith("rejoin-"):
scriptrunner = open(f"{scripts_dir}rejoin.js").read()
root = ET.parse(file)
for element in root.iter():
if element.tag == "simconf":
plugin = ET.Element("plugin")
plugin.text = "org.contikios.cooja.plugins.ScriptRunner"
conf = ET.Element("plugin_config")
script = ET.Element("script")
script.text = scriptrunner
active = ET.Element("active")
active.text = "true"
conf.append(script)
conf.append(active)
plugin.append(conf)
element.append(plugin)
root.write(file)
def add_mobility_in_test(dir, test):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
if element.tag == "simconf":
plugin = ET.Element("plugin")
plugin.text = "Mobility"
conf = ET.Element("plugin_config")
positions = ET.Element("positions")
positions.set("EXPORT", "copy")
positions.text = f"[CONTIKI_DIR]/examples/6tisch/simple-node/simulations/positions/{test.replace('.csc', '.dat')}"
conf.append(positions)
plugin.append(conf)
element.append(plugin)
root.write(file)
def add_powertracker_in_test(dir, test):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
if element.tag == "simconf":
plugin = ET.Element("plugin")
plugin.text = "PowerTracker"
element.append(plugin)
root.write(file)
def check_if_test_successful(test_output_file_path, test):
f = open(test_output_file_path, "r")
test_output = f.read()
if("TEST FAILED" in test_output):
print("##### Test failed: 'TEST FAILED' found #####")
return False
if("TEST OK" not in test_output):
print("##### Test failed: 'TEST OK' not found #####")
return False
if(test.startswith("join") and "Network created with " not in test_output):
print("##### Test failed: network not established #####")
return False
# network_established_time = float(test_output.split("Network established time: ")[1].split(". First")[0])
# first_eb_time = float(test_output.split("First EB time: ")[1].split(". Join")[0])
# join_time = float(test_output.split("Join time:")[1].split(". Parents")[0])
# parents_considered = int(test_output.split("Parents considered: ")[1].split(".\n")[0])
#
# if join_time >= network_established_time: #if joining EB joins before network is completely established
# print("##### Test failed: Joining node finishes network #####")
# return False
return True
def add_testlog_parameters_csv(seed, test, firmware, test_output_file_path):
test_params = test.split(".")[0].split("-")
nodes = test_params[2]
topology = test_params[1]
firmware_params = firmware.split(".")[0].split("-")
tsch_version = firmware_params[1]
channels = re.sub("[^0-9]", "", firmware_params[2])
assoc_timeout = re.sub("[^0-9]", "", firmware_params[3])
csv_result = [seed, nodes, channels, topology, tsch_version, assoc_timeout]
csv_result_str = ",".join([str(elem) for elem in csv_result])
file = open(test_output_file_path, "r")
new_file_content = ""
for line in file:
if "," in line:
new_file_content += f"{csv_result_str},{line}"
else:
new_file_content += line
file.close()
file = open(test_output_file_path, "w")
file.write(new_file_content)
file.close()
def add_testlog_firmwares(firmware_network, firmware_joining, test_output_file_path):
file = open(test_output_file_path, "r")
content = file.read()
file.close()
new_file_content = f"z1-network-node firmware: {firmware_network}\n" + f"z1-joining-node firmware: {firmware_joining}\n" + content
file = open(test_output_file_path, "w")
file.write(new_file_content)
file.close()
if os.path.isdir("/home/user/"):
cooja_jar = "/home/user/contiki-ng/tools/cooja/dist/cooja.jar"
run_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/"
sim_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/"
tests_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/tests/"
scripts_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/scriptrunners/"
pos_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/positions/"
log_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/logs/"
if not os.path.isdir(tests_dir):
os.mkdir(tests_dir)
if not os.path.isdir(scripts_dir):
os.mkdir(scripts_dir)
if not os.path.isdir(pos_dir):
os.mkdir(pos_dir)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
# seeds = [15557,65890,237601,268521,537634,571714,881378,928542,963159,978437]
seeds = random.sample(range(0,999999), 15) # 15 random seeds
seeds.sort()
firmwares = [
{"joining": "node-custom-16c-16s-2eb.z1", "network": "node-network-16c.z1"}
# {"joining": "node-classic-16c-180s-2eb.z1", "network": "node-network-16c.z1"}
]
tests = [f for f in os.listdir(tests_dir) if os.path.isfile(f"{tests_dir}{f}")]
tests.sort()
print("")
print(f"Running tests {tests}.")
print(f"Running tests on firmwares {firmwares}.")
print(f"Running tests on seeds {seeds}.")
os.chdir(run_dir) # change working directory to run_dir
for test in tests: # run each test from tests_dir
for seed in seeds: # run test with each seed
for firmware in firmwares: # run test with every firmware
shutil.copy(f"{tests_dir}{test}", f"{run_dir}{test}") # copy test to run_dir
remove_command_in_test(run_dir, test) # remove commands in simulation test file
update_firmware_in_test(run_dir, test, firmware["network"], firmware["joining"]) # change firmware in file
add_mobility_in_test(run_dir, test) # add mobility plugin + file to test
add_powertracker_in_test(run_dir, test) # add powertracker plugin to test
add_scriptrunner_in_test(run_dir, test) # add scriptrunner to test for extraction of data and controlling test
print(f"\n\n ########### Now running test '{test}' with firmware '{firmware}' and seed '{seed}' ##############\n")
run_test(cooja_jar, run_dir, test, seed) # run simulation with seed
local_seed = seed
while not check_if_test_successful(f"{run_dir}COOJA.testlog", test): # evaluate if test is OK
local_seed = random.randint(0,999999)
run_test(cooja_jar, run_dir, test, local_seed)
add_testlog_parameters_csv(local_seed, test, firmware["joining"], f"{run_dir}COOJA.testlog") # add test parameters to csv line in file
add_testlog_firmwares(firmware["network"], firmware["joining"], f"{run_dir}COOJA.testlog") # add node firmwares used to top of file
os.rename(f"{run_dir}COOJA.testlog", f"{log_dir}{test.split('.')[0]}_{firmware['joining'].split('.')[0]}_{local_seed}.testlog") # move simulation result log
os.remove(f"{run_dir}{test}") # delete test from run_dir
time.sleep(1)
| 42.6 | 169 | 0.642276 | 1,170 | 8,733 | 4.640171 | 0.17094 | 0.029656 | 0.016578 | 0.035366 | 0.441886 | 0.354025 | 0.302081 | 0.212931 | 0.180328 | 0.172223 | 0 | 0.018689 | 0.215733 | 8,733 | 204 | 170 | 42.808824 | 0.773982 | 0.142792 | 0 | 0.29697 | 0 | 0.042424 | 0.248187 | 0.119124 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.012121 | 0 | 0.090909 | 0.048485 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8506b84f31a4d3a7cbcf45361a19e0b5e9647f9e | 5,217 | py | Python | web2py-appliances-master/TinyWebsite/controllers/pages.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | web2py-appliances-master/TinyWebsite/controllers/pages.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | web2py-appliances-master/TinyWebsite/controllers/pages.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | #from gluon.debug import dbg
def show_page():
"""
Show the requested page
"""
from gluon.tools import prettydate
manager_toolbar = ManagerToolbar('page')
if request.args(0) and request.args(0).isdigit():
page = db.page(request.args(0))
else:
page = db(db.page.url==request.args(0)).select().first()
#if the page has no content, we select the fisrt child (len < 8 to avoid having a page with just "<br />")
if page and len(page.content) < 8:
child = db(db.page.parent==page).select(orderby=db.page.rank|db.page.title).first()
if child:
page=child
if not page:
if request.args(0) and request.args(0).lower() == 'images':
redirect(URL('images'))
else:
page = db(db.page.is_index==True).select().first()
disqus_shortname = None
if page.allow_disqus and WEBSITE_PARAMETERS.disqus_shortname:
disqus_shortname = WEBSITE_PARAMETERS.disqus_shortname
pretty_date = prettydate(page.modified_on, T)
header_component = db.page_component(page.header_component)
left_sidebar_component = db.page_component(page.left_sidebar_component)
right_sidebar_component = db.page_component(page.right_sidebar_component)
left_footer_component = db.page_component(page.left_footer_component)
middle_footer_component = db.page_component(page.middle_footer_component)
right_footer_component = db.page_component(page.right_footer_component)
central_component = db.page_component(page.central_component)
return dict(page=page,
header_component=header_component,
left_sidebar_enabled=page.left_sidebar_enabled,
right_sidebar_enabled=page.right_sidebar_enabled,
left_sidebar_component=left_sidebar_component,
right_sidebar_component=right_sidebar_component,
left_footer_component=left_footer_component,
middle_footer_component=middle_footer_component,
right_footer_component=right_footer_component,
central_component=central_component,
manager_toolbar=manager_toolbar,
pretty_date=pretty_date,
disqus_shortname=disqus_shortname)
@auth.requires_membership('manager')
def delete_page():
if request.args(0) and request.args(0).isdigit():
page = db.page(request.args(0))
else:
page = db(db.page.url==request.args(0)).select().first()
if len(request.args) and page:
form = FORM.confirm(T('Yes, I really want to delete this page'),{T('Back'):URL('show_page', args=page.id)})
if form.accepted:
#remove images linked to the page
pathname = path.join(request.folder,'static','images', 'pages_content', str(form.vars.id))
if path.exists(pathname):
shutil.rmtree(pathname)
#remove the page
db(db.page.id==page.id).delete()
session.flash = T('Page deleted')
redirect(URL('default', 'index'))
return dict(page=page, form=form)
@auth.requires_membership('manager')
def edit_page():
"""
"""
advanced_fields = ["{}_{}__row".format(db.page, field) for field in
[db.page.rank.name,
db.page.url.name,
db.page.is_index.name,
db.page.is_enabled.name,
db.page.header_component.name,
db.page.left_sidebar_enabled.name,
db.page.right_sidebar_enabled.name,
db.page.left_footer_component.name,
db.page.middle_footer_component.name,
db.page.right_footer_component.name,
db.page.central_component.name,
db.page.allow_disqus.name,
db.page.max_content_height.name]
]
page_id = request.args(0)
if page_id:
if page_id.isdigit():
page = db.page(page_id)
else:
page = db(db.page.url==page_id).select().first()
if len(request.args) and page:
crud.settings.update_deletable = False
form = crud.update(db.page,page,next=URL('show_page', args=page.id))
my_extra_element = XML("""
<div id="wysiwyg_management">
<ul class="nav nav-pills">
<li id="activate_wysiwyg" class="active">
<a href="#">%s</a>
</li>
<li id="remove_wysiwyg" >
<a href="#">%s</a>
</li>
</ul>
</div>
""" %(T('WYSIWYG view'),T('HTML view')))
form[0][4].append( my_extra_element)
else:
#Hide the "content" of the page : the page has no title
#and this is impossible to initialise the upload field with page.url
db.page.content.readable = db.page.content.writable = False
form = crud.create(db.page,next='edit_page/[id]')
return dict(form=form, advanced_fields=advanced_fields)
| 44.589744 | 115 | 0.593253 | 627 | 5,217 | 4.736842 | 0.239234 | 0.072727 | 0.040404 | 0.056566 | 0.382492 | 0.269697 | 0.10101 | 0.10101 | 0.082492 | 0.07138 | 0 | 0.004087 | 0.296531 | 5,217 | 116 | 116 | 44.974138 | 0.805177 | 0.062105 | 0 | 0.191919 | 0 | 0 | 0.116327 | 0.009265 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.010101 | 0 | 0.070707 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
850800f055d743a3abfb7230c39b201f6bb9fe52 | 4,307 | py | Python | tests/unit/test_units.py | timothygebhard/hsr4hci | 0b38c26fac2fee9e564a9ab981fca715d5577e1e | [
"BSD-3-Clause"
] | 1 | 2022-03-24T04:33:06.000Z | 2022-03-24T04:33:06.000Z | tests/unit/test_units.py | timothygebhard/hsr4hci | 0b38c26fac2fee9e564a9ab981fca715d5577e1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_units.py | timothygebhard/hsr4hci | 0b38c26fac2fee9e564a9ab981fca715d5577e1e | [
"BSD-3-Clause"
] | null | null | null | """
Tests for units.py
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from astropy.units import Quantity, UnitsError, UnitConversionError
import numpy as np
import pytest
from hsr4hci.units import (
flux_ratio_to_magnitudes,
InstrumentUnitsContext,
magnitude_to_flux_ratio,
)
# -----------------------------------------------------------------------------
# TEST CASES
# -----------------------------------------------------------------------------
def test__instrument_units_context() -> None:
"""
Test `hsr4hci.units.InstrumentUnitsContext`.
"""
# Case 1 (illegal constructor argument: pixscale)
with pytest.raises(UnitsError) as units_error:
InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
assert "Argument 'pixscale' to function" in str(units_error)
# Case 2 (illegal constructor argument: lambda_over_d)
with pytest.raises(UnitsError) as units_error:
InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'gram'),
)
assert "Argument 'lambda_over_d' to function" in str(units_error)
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
# Case 3 (conversion from pixel to arcsec / lambda_over_d)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.0271
assert quantity.to('lambda_over_d').value == 0.28229166666666666
# Case 4 (context is re-usable)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.0271
assert quantity.to('lambda_over_d').value == 0.28229166666666666
# Case 5 (context is local; conversions do not work outside the context)
with pytest.raises(UnitConversionError) as unit_conversion_error:
_ = quantity.to('arcsec').value
assert "'pix' and 'arcsec' (angle) are not" in str(unit_conversion_error)
# Case 6 (conversion from arcsec to pixel / lambda_over_d)
with instrument_units_context:
quantity = Quantity(1.0, 'arcsec')
assert quantity.to('pixel').value == 36.90036900369004
assert quantity.to('lambda_over_d').value == 10.416666666666666
# Case 7 (conversion from lambda_over_d to arcsec / pixel)
with instrument_units_context:
quantity = Quantity(1.0, 'lambda_over_d')
assert quantity.to('arcsec').value == 0.096
assert quantity.to('pixel').value == 3.5424354243542435
# Case 8 (contexts can be overwritten / re-defined)
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(0.271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.96, 'arcsec'),
)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.271
assert quantity.to('lambda_over_d').value == 0.2822916666666667
# Case 9 (different contexts can co-exist)
context_a = InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
context_b = InstrumentUnitsContext(
pixscale=Quantity(0.271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.96, 'arcsec'),
)
quantity = Quantity(1.0, 'pixel')
with context_a:
assert quantity.to('arcsec').value == 0.0271
with context_b:
assert quantity.to('arcsec').value == 0.271
def test__flux_ratio_to_magnitudes() -> None:
"""
Test `hsr4hci.units.flux_ratio_to_magnitudes`.
"""
assert flux_ratio_to_magnitudes(100) == -5
assert np.allclose(
flux_ratio_to_magnitudes(np.array([100, 0.01])), np.array([-5, 5])
)
def test__magnitude_to_flux_ratio() -> None:
"""
Test `hsr4hci.units.magnitude_to_flux_ratio`.
"""
assert magnitude_to_flux_ratio(-5) == 100
assert np.allclose(
magnitude_to_flux_ratio(np.array([-5, 5])), np.array([100, 0.01])
)
| 34.18254 | 79 | 0.618296 | 495 | 4,307 | 5.183838 | 0.19798 | 0.062354 | 0.068589 | 0.057288 | 0.533905 | 0.494544 | 0.464147 | 0.397506 | 0.367498 | 0.367498 | 0 | 0.066512 | 0.200604 | 4,307 | 125 | 80 | 34.456 | 0.678769 | 0.221036 | 0 | 0.453333 | 0 | 0 | 0.107664 | 0 | 0 | 0 | 0 | 0 | 0.253333 | 1 | 0.04 | false | 0 | 0.053333 | 0 | 0.093333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
850ce0650d166e24edaa3081144f34f40dfdab65 | 45,045 | py | Python | pynmrstar/entry.py | uwbmrb/PyNMRSTAR | c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171 | [
"MIT"
] | 16 | 2017-02-02T05:00:50.000Z | 2021-05-25T11:13:15.000Z | pynmrstar/entry.py | uwbmrb/PyNMRSTAR | c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171 | [
"MIT"
] | 29 | 2016-07-14T21:02:18.000Z | 2021-06-26T17:24:07.000Z | pynmrstar/entry.py | bmrb-io/PyNMRSTAR | 55df5bf7de192e7a6c95f37e0756f09e3f504170 | [
"MIT"
] | 4 | 2016-04-14T16:29:49.000Z | 2017-02-28T02:01:57.000Z | import hashlib
import json
import logging
import warnings
from io import StringIO
from typing import TextIO, BinaryIO, Union, List, Optional, Dict, Any, Tuple
from pynmrstar import definitions, utils, loop as loop_mod, parser as parser_mod, saveframe as saveframe_mod
from pynmrstar._internal import _json_serialize, _interpret_file, _get_entry_from_database, write_to_file
from pynmrstar.exceptions import InvalidStateError
from pynmrstar.schema import Schema
class Entry(object):
"""An object oriented representation of a BMRB entry. You can initialize this
object several ways; (e.g. from a file, from the official database,
from scratch) see the class methods below. """
def __contains__(self, item: Any):
""" Check if the given item is present in the entry. """
# Prepare for processing
if isinstance(item, (list, tuple)):
to_process: List[Union[str, saveframe_mod.Saveframe, loop_mod.Loop]] = list(item)
elif isinstance(item, (loop_mod.Loop, saveframe_mod.Saveframe, str)):
to_process = [item]
else:
return False
for item in to_process:
if isinstance(item, saveframe_mod.Saveframe):
if item not in self._frame_list:
return False
elif isinstance(item, (loop_mod.Loop, str)):
found = False
for saveframe in self._frame_list:
if item in saveframe:
found = True
break
if not found:
return False
else:
return False
return True
def __delitem__(self, item: Union['saveframe_mod.Saveframe', int, str]) -> None:
"""Remove the indicated saveframe."""
if isinstance(item, int):
try:
del self._frame_list[item]
except IndexError:
raise IndexError(f'Index out of range: no saveframe at index: {item}')
else:
self.remove_saveframe(item)
def __eq__(self, other) -> bool:
"""Returns True if this entry is equal to another entry, false
if it is not equal."""
if not isinstance(other, Entry):
return False
return (self.entry_id, self._frame_list) == (other.entry_id, other._frame_list)
def __getitem__(self, item: Union[int, str]) -> 'saveframe_mod.Saveframe':
"""Get the indicated saveframe."""
try:
return self._frame_list[item]
except TypeError:
return self.get_saveframe_by_name(item)
def __init__(self, **kwargs) -> None:
""" You should not directly instantiate an Entry using this method.
Instead use the class methods:
:py:meth:`Entry.from_database`, :py:meth:`Entry.from_file`,
:py:meth:`Entry.from_string`, :py:meth:`Entry.from_scratch`,
:py:meth:`Entry.from_json`, and :py:meth:`Entry.from_template`"""
# Default initializations
self._entry_id: Union[str, int] = 0
self._frame_list: List[saveframe_mod.Saveframe] = []
self.source: Optional[str] = None
# They initialized us wrong
if len(kwargs) == 0:
raise ValueError("You should not directly instantiate an Entry using this method. Instead use the "
"class methods: Entry.from_database(), Entry.from_file(), Entry.from_string(), "
"Entry.from_scratch(), and Entry.from_json().")
if 'the_string' in kwargs:
# Parse from a string by wrapping it in StringIO
star_buffer: StringIO = StringIO(kwargs['the_string'])
self.source = "from_string()"
elif 'file_name' in kwargs:
star_buffer = _interpret_file(kwargs['file_name'])
self.source = f"from_file('{kwargs['file_name']}')"
# Creating from template (schema)
elif 'all_tags' in kwargs:
self._entry_id = kwargs['entry_id']
saveframe_categories: dict = {}
schema = utils.get_schema(kwargs['schema'])
schema_obj = schema.schema
for tag in [schema_obj[x.lower()] for x in schema.schema_order]:
category = tag['SFCategory']
if category not in saveframe_categories:
saveframe_categories[category] = True
templated_saveframe = saveframe_mod.Saveframe.from_template(category, category + "_1",
entry_id=self._entry_id,
all_tags=kwargs['all_tags'],
default_values=kwargs['default_values'],
schema=schema)
self._frame_list.append(templated_saveframe)
entry_saveframe = self.get_saveframes_by_category('entry_information')[0]
entry_saveframe['NMR_STAR_version'] = schema.version
entry_saveframe['Original_NMR_STAR_version'] = schema.version
return
else:
# Initialize a blank entry
self._entry_id = kwargs['entry_id']
self.source = "from_scratch()"
return
# Load the BMRB entry from the file
parser: parser_mod.Parser = parser_mod.Parser(entry_to_parse_into=self)
parser.parse(star_buffer.read(), source=self.source, convert_data_types=kwargs.get('convert_data_types', False))
def __iter__(self) -> saveframe_mod.Saveframe:
""" Yields each of the saveframes contained within the entry. """
for saveframe in self._frame_list:
yield saveframe
def __len__(self) -> int:
""" Returns the number of saveframes in the entry."""
return len(self._frame_list)
def __repr__(self) -> str:
"""Returns a description of the entry."""
return f"<pynmrstar.Entry '{self._entry_id}' {self.source}>"
def __setitem__(self, key: Union[int, str], item: 'saveframe_mod.Saveframe') -> None:
"""Set the indicated saveframe."""
# It is a saveframe
if isinstance(item, saveframe_mod.Saveframe):
# Add by ordinal
if isinstance(key, int):
self._frame_list[key] = item
# TODO: Consider stripping this behavior out - it isn't clear it is useful
else:
# Add by key
contains_frame: bool = False
for pos, frame in enumerate(self._frame_list):
if frame.name == key:
if contains_frame:
raise ValueError(f"Cannot replace the saveframe with the name '{frame.name} "
f"because multiple saveframes in the entry have the same name. "
f'This library does not allow that normally, as it is '
f'invalid NMR-STAR. Did you manually edit the Entry.frame_list '
f'object? Please use the Entry.add_saveframe() method instead to '
f'add new saveframes.')
self._frame_list[pos] = item
contains_frame = True
if not contains_frame:
raise ValueError(f"Saveframe with name '{key}' does not exist and therefore cannot be "
f"written to. Use the add_saveframe() method to add new saveframes.")
else:
raise ValueError("You can only assign a saveframe to an entry splice. You attempted to assign: "
f"'{repr(item)}'")
def __str__(self, skip_empty_loops: bool = False, skip_empty_tags: bool = False, show_comments: bool = True) -> str:
"""Returns the entire entry in STAR format as a string."""
sf_strings = []
seen_saveframes = {}
for saveframe_obj in self:
if saveframe_obj.category in seen_saveframes:
sf_strings.append(saveframe_obj.format(skip_empty_loops=skip_empty_loops,
skip_empty_tags=skip_empty_tags, show_comments=False))
else:
sf_strings.append(saveframe_obj.format(skip_empty_loops=skip_empty_loops,
skip_empty_tags=skip_empty_tags, show_comments=show_comments))
seen_saveframes[saveframe_obj.category] = True
return f"data_{self.entry_id}\n\n" + "\n".join(sf_strings)
@property
def category_list(self) -> List[str]:
""" Returns a list of the unique categories present in the entry. """
category_list = []
for saveframe in self._frame_list:
category = saveframe.category
if category and category not in category_list:
category_list.append(category)
return list(category_list)
@property
def empty(self) -> bool:
""" Check if the entry has no data. Ignore the structural tags."""
for saveframe in self._frame_list:
if not saveframe.empty:
return False
return True
@property
def entry_id(self) -> Union[str, int]:
""" When read, fetches the entry ID.
When set, updates the entry ID for the Entry, and updates all the tags which
are foreign keys of the Entry_ID. (For example, Entry.ID and
Citation.Entry_ID will be updated, if present.)
"""
return self._entry_id
@entry_id.setter
def entry_id(self, value: Union[str, int]) -> None:
self._entry_id = value
schema = utils.get_schema()
for saveframe in self._frame_list:
for tag in saveframe.tags:
fqtn = (saveframe.tag_prefix + "." + tag[0]).lower()
try:
if schema.schema[fqtn]['entryIdFlg'] == 'Y':
tag[1] = self._entry_id
except KeyError:
pass
for loop in saveframe.loops:
for tag in loop.tags:
fqtn = (loop.category + "." + tag).lower()
try:
if schema.schema[fqtn]['entryIdFlg'] == 'Y':
loop[tag] = [self._entry_id] * len(loop[tag])
except KeyError:
pass
@property
def frame_dict(self) -> Dict[str, 'saveframe_mod.Saveframe']:
"""Returns a dictionary of saveframe name -> saveframe object mappings."""
fast_dict = dict((frame.name, frame) for frame in self._frame_list)
# If there are no duplicates then continue
if len(fast_dict) == len(self._frame_list):
return fast_dict
# Figure out where the duplicate is
frame_dict = {}
for frame in self._frame_list:
if frame.name in frame_dict:
raise InvalidStateError("The entry has multiple saveframes with the same name. That is not allowed in "
"the NMR-STAR format. Please remove or rename one. Duplicate name: "
f"'{frame.name}'. Furthermore, please use Entry.add_saveframe() and "
f"Entry.remove_saveframe() rather than manually editing the Entry.frame_list "
f"list, which will prevent this state from existing in the future.")
frame_dict[frame.name] = frame
return frame_dict
@property
def frame_list(self) -> List['saveframe_mod.Saveframe']:
return self._frame_list
@classmethod
def from_database(cls, entry_num: Union[str, int], convert_data_types: bool = False):
"""Create an entry corresponding to the most up to date entry on
the public BMRB server. (Requires ability to initiate outbound
HTTP connections.)
Setting convert_data_types to True will automatically convert
the data loaded from the file into the corresponding python type as
determined by loading the standard BMRB schema. This would mean that
all floats will be represented as decimal.Decimal objects, all integers
will be python int objects, strings and vars will remain strings, and
dates will become datetime.date objects. When printing str() is called
on all objects. Other that converting uppercase "E"s in scientific
notation floats to lowercase "e"s this should not cause any change in
the way re-printed NMR-STAR objects are displayed."""
return _get_entry_from_database(entry_num, convert_data_types=convert_data_types)
@classmethod
def from_file(cls, the_file: Union[str, TextIO, BinaryIO], convert_data_types: bool = False):
"""Create an entry by loading in a file. If the_file starts with
http://, https://, or ftp:// then we will use those protocols to
attempt to open the file.
Setting convert_data_types to True will automatically convert
the data loaded from the file into the corresponding python type as
determined by loading the standard BMRB schema. This would mean that
all floats will be represented as decimal.Decimal objects, all integers
will be python int objects, strings and vars will remain strings, and
dates will become datetime.date objects. When printing str() is called
on all objects. Other that converting uppercase "E"s in scientific
notation floats to lowercase "e"s this should not cause any change in
the way re-printed NMR-STAR objects are displayed."""
return cls(file_name=the_file, convert_data_types=convert_data_types)
@classmethod
def from_json(cls, json_dict: Union[dict, str]):
"""Create an entry from JSON (serialized or unserialized JSON)."""
# If they provided a string, try to load it using JSON
if not isinstance(json_dict, dict):
try:
json_dict = json.loads(json_dict)
except (TypeError, ValueError):
raise ValueError("The JSON you provided was neither a Python dictionary nor a JSON string.")
# Make sure it has the correct keys
if "saveframes" not in json_dict:
raise ValueError("The JSON you provide must be a hash and must contain the key 'saveframes' - even if the "
"key points to 'None'.")
if "entry_id" not in json_dict and "bmrb_id" not in json_dict:
raise ValueError("The JSON you provide must be a hash and must contain the key 'entry_id' - even if the"
" key points to 'None'.")
# Until the migration is complete, 'bmrb_id' is a synonym for
# 'entry_id'
if 'entry_id' not in json_dict:
json_dict['entry_id'] = json_dict['bmrb_id']
# Create an entry from scratch and populate it
ret = Entry.from_scratch(json_dict['entry_id'])
ret._frame_list = [saveframe_mod.Saveframe.from_json(x) for x in json_dict['saveframes']]
ret.source = "from_json()"
# Return the new loop
return ret
@classmethod
def from_string(cls, the_string: str, convert_data_types: bool = False):
"""Create an entry by parsing a string.
Setting convert_data_types to True will automatically convert
the data loaded from the file into the corresponding python type as
determined by loading the standard BMRB schema. This would mean that
all floats will be represented as decimal.Decimal objects, all integers
will be python int objects, strings and vars will remain strings, and
dates will become datetime.date objects. When printing str() is called
on all objects. Other that converting uppercase "E"s in scientific
notation floats to lowercase "e"s this should not cause any change in
the way re-printed NMR-STAR objects are displayed."""
return cls(the_string=the_string, convert_data_types=convert_data_types)
@classmethod
def from_scratch(cls, entry_id: Union[str, int]):
"""Create an empty entry that you can programmatically add to.
You must pass a value corresponding to the Entry ID.
(The unique identifier "xxx" from "data_xxx".)"""
return cls(entry_id=entry_id)
@classmethod
def from_template(cls, entry_id, all_tags=False, default_values=False, schema=None) -> 'Entry':
""" Create an entry that has all of the saveframes and loops from the
schema present. No values will be assigned. Specify the entry
ID when calling this method.
The optional argument 'all_tags' forces all tags to be included
rather than just the mandatory tags.
The optional argument 'default_values' will insert the default
values from the schema.
The optional argument 'schema' allows providing a custom schema."""
schema = utils.get_schema(schema)
entry = cls(entry_id=entry_id, all_tags=all_tags, default_values=default_values, schema=schema)
entry.source = f"from_template({schema.version})"
return entry
def add_saveframe(self, frame) -> None:
"""Add a saveframe to the entry."""
if not isinstance(frame, saveframe_mod.Saveframe):
raise ValueError("You can only add instances of saveframes using this method. You attempted to add "
f"the object: '{repr(frame)}'.")
# Do not allow the addition of saveframes with the same name
# as a saveframe which already exists in the entry
if frame.name in self.frame_dict:
raise ValueError(f"Cannot add a saveframe with name '{frame.name}' since a saveframe with that "
f"name already exists in the entry.")
self._frame_list.append(frame)
def compare(self, other) -> List[str]:
"""Returns the differences between two entries as a list.
Non-equal entries will always be detected, but specific differences
detected depends on order of entries."""
diffs = []
if self is other:
return []
if isinstance(other, str):
if str(self) == other:
return []
else:
return ['String was not exactly equal to entry.']
elif not isinstance(other, Entry):
return ['Other object is not of class Entry.']
try:
if str(self.entry_id) != str(other.entry_id):
diffs.append(f"Entry ID does not match between entries: '{self.entry_id}' vs '{other.entry_id}'.")
if len(self._frame_list) != len(other.frame_list):
diffs.append(f"The number of saveframes in the entries are not equal: '{len(self._frame_list)}' vs "
f"'{len(other.frame_list)}'.")
for frame in self._frame_list:
other_frame_dict = other.frame_dict
if frame.name not in other_frame_dict:
diffs.append(f"No saveframe with name '{frame.name}' in other entry.")
else:
comp = frame.compare(other_frame_dict[frame.name])
if len(comp) > 0:
diffs.append(f"Saveframes do not match: '{frame.name}'.")
diffs.extend(comp)
except AttributeError as err:
diffs.append(f"An exception occurred while comparing: '{err}'.")
return diffs
def add_missing_tags(self, schema: 'Schema' = None, all_tags: bool = False) -> None:
""" Automatically adds any missing tags (according to the schema)
to all saveframes and loops and sorts the tags. """
for saveframe in self._frame_list:
saveframe.add_missing_tags(schema=schema, all_tags=all_tags)
def delete_empty_saveframes(self) -> None:
""" Deprecated. Please use `py:meth:pynmrstar.Entry.remove_empty_saveframes`. """
warnings.warn('Deprecated. Please use remove_empty_saveframes() instead.', DeprecationWarning)
return self.remove_empty_saveframes()
def format(self, skip_empty_loops: bool = True, skip_empty_tags: bool = False, show_comments: bool = True) -> str:
""" The same as calling str(Entry), except that you can pass options
to customize how the entry is printed.
skip_empty_loops will omit printing loops with no tags at all. (A loop with null tags is not "empty".)
skip_empty_tags will omit tags in the saveframes and loops which have no non-null values.
show_comments will show the standard comments before a saveframe."""
return self.__str__(skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags,
show_comments=show_comments)
def get_json(self, serialize: bool = True) -> Union[dict, str]:
""" Returns the entry in JSON format. If serialize is set to
False a dictionary representation of the entry that is
serializeable is returned instead."""
frames = [x.get_json(serialize=False) for x in self._frame_list]
entry_dict = {
"entry_id": self.entry_id,
"saveframes": frames
}
if serialize:
return json.dumps(entry_dict, default=_json_serialize)
else:
return entry_dict
def get_loops_by_category(self, value: str) -> List['loop_mod.Loop']:
"""Allows fetching loops by category."""
value = utils.format_category(value).lower()
results = []
for frame in self._frame_list:
for one_loop in frame.loops:
if one_loop.category.lower() == value:
results.append(one_loop)
return results
def get_saveframe_by_name(self, saveframe_name: str) -> 'saveframe_mod.Saveframe':
"""Allows fetching a saveframe by name."""
frames = self.frame_dict
if saveframe_name in frames:
return frames[saveframe_name]
else:
raise KeyError(f"No saveframe with name '{saveframe_name}'")
def get_saveframes_by_category(self, value: str) -> List['saveframe_mod.Saveframe']:
"""Allows fetching saveframes by category."""
return self.get_saveframes_by_tag_and_value("sf_category", value)
def get_saveframes_by_tag_and_value(self, tag_name: str, value: Any) -> List['saveframe_mod.Saveframe']:
"""Allows fetching saveframe(s) by tag and tag value."""
ret_frames = []
for frame in self._frame_list:
results = frame.get_tag(tag_name)
if results != [] and results[0] == value:
ret_frames.append(frame)
return ret_frames
def get_tag(self, tag: str, whole_tag: bool = False) -> list:
""" Given a tag (E.g. _Assigned_chem_shift_list.Data_file_name)
return a list of all values for that tag. Specify whole_tag=True
and the [tag_name, tag_value] pair will be returned."""
if "." not in str(tag):
raise ValueError("You must provide the tag category to call this method at the entry level. For "
"example, you must provide 'Entry.Title' rather than 'Title' as the tag if calling"
" this at the Entry level. You can call Saveframe.get_tag('Title') without issue.")
results = []
for frame in self._frame_list:
results.extend(frame.get_tag(tag, whole_tag=whole_tag))
return results
def get_tags(self, tags: list) -> Dict[str, list]:
""" Given a list of tags, get all of the tags and return the
results in a dictionary."""
# All tags
if tags is None or not isinstance(tags, list):
raise ValueError("Please provide a list of tags.")
results = {}
for tag in tags:
results[tag] = self.get_tag(tag)
return results
def normalize(self, schema: Optional['Schema'] = None) -> None:
""" Sorts saveframes, loops, and tags according to the schema
provided (or BMRB default if none provided).
Also re-assigns ID tag values and updates tag links to ID values."""
# Assign all the ID tags, and update all links to ID tags
my_schema = utils.get_schema(schema)
# Sort the saveframes according to ID, if an ID exists. Otherwise, still sort by category
ordering = my_schema.category_order
def sf_key(_: saveframe_mod.Saveframe) -> [int, Union[int, float]]:
""" Helper function to sort the saveframes.
Returns (category order, saveframe order) """
# If not a real category, generate an artificial but stable order > the real saveframes
try:
category_order = ordering.index(_.tag_prefix)
except (ValueError, KeyError):
if _.category is None:
category_order = float('infinity')
else:
category_order = len(ordering) + abs(int(hashlib.sha1(str(_.category).encode()).hexdigest(), 16))
# See if there is an ID tag, and it is a number
saveframe_id = float('infinity')
try:
saveframe_id = int(_.get_tag("ID")[0])
except (ValueError, KeyError, IndexError, TypeError):
# Either there is no ID, or it is not a number. By default it will sort at the end of saveframes of its
# category. Note that the entry_information ID tag has a different meaning, but since there should
# only ever be one saveframe of that category, the sort order for it can be any value.
pass
return category_order, saveframe_id
def loop_key(_) -> Union[int, float]:
""" Helper function to sort the loops."""
try:
return ordering.index(_.category)
except ValueError:
# Generate an arbitrary sort order for loops that aren't in the schema but make sure that they
# always come after loops in the schema
return len(ordering) + abs(int(hashlib.sha1(str(_.category).encode()).hexdigest(), 16))
# Go through all the saveframes
for each_frame in self._frame_list:
each_frame.sort_tags(schema=my_schema)
# Iterate through the loops
for each_loop in each_frame:
each_loop.sort_tags(schema=my_schema)
# See if we can sort the rows (in addition to tags)
try:
each_loop.sort_rows("Ordinal")
except ValueError:
pass
each_frame.loops.sort(key=loop_key)
self._frame_list.sort(key=sf_key)
# Calculate all the categories present
categories: set = set()
for each_frame in self._frame_list:
categories.add(each_frame.category)
# tag_prefix -> tag -> original value -> mapped value
mapping: dict = {}
# Reassign the ID tags first
for each_category in categories:
# First in the saveframe tags
id_counter: int = 1
for each_frame in self.get_saveframes_by_category(each_category):
for tag in each_frame.tags:
tag_schema = my_schema.schema.get(f"{each_frame.tag_prefix}.{tag[0]}".lower())
if not tag_schema:
continue
# Make sure the capitalization of the tag is correct
tag[0] = tag_schema['Tag field']
if tag_schema['lclSfIdFlg'] == 'Y':
# If it's an Entry_ID tag, set it that way
if tag_schema['entryIdFlg'] == 'Y':
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{tag[1]}'] = self._entry_id
tag[1] = self._entry_id
# Must be an integer to avoid renumbering the chem_comp ID, for example
elif tag_schema['BMRB data type'] == "int":
prev_tag = tag[1]
if isinstance(tag[1], str):
tag[1] = str(id_counter)
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{prev_tag}'] = str(id_counter)
else:
tag[1] = id_counter
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{prev_tag}'] = id_counter
# We need to still store all the other tag values too
else:
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{tag[1]}'] = tag[1]
else:
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{tag[1]}'] = tag[1]
# Then in the loop
for loop in each_frame:
for x, tag in enumerate(loop.tags):
tag_schema = my_schema.schema.get(f"{loop.category}.{tag}".lower())
if not tag_schema:
continue
# Make sure the tags have the proper capitalization
loop.tags[x] = tag_schema['Tag field']
for row in loop.data:
# We don't re-map loop IDs, but we should still store them
mapping[f'{loop.category[1:]}.{tag}.{row[x]}'] = row[x]
if tag_schema['lclSfIdFlg'] == 'Y':
# If it's an Entry_ID tag, set it that way
if tag_schema['entryIdFlg'] == 'Y':
row[x] = self._entry_id
# Must be an integer to avoid renumbering the chem_comp ID, for example
elif tag_schema['BMRB data type'] == "int":
if row[x] in definitions.NULL_VALUES:
if isinstance(row[x], str):
row[x] = str(id_counter)
else:
row[x] = id_counter
# Handle chem_comp and it's ilk
else:
parent_id_tag = f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}"
parent_id_value = each_frame.get_tag(parent_id_tag)[0]
if isinstance(row[x], str):
row[x] = str(parent_id_value)
else:
row[x] = parent_id_value
id_counter += 1
# Now fix any other references
for saveframe in self:
for tag in saveframe.tags:
tag_schema = my_schema.schema.get(f"{saveframe.tag_prefix}.{tag[0]}".lower())
if not tag_schema:
continue
if tag_schema['Foreign Table'] and tag_schema['Sf pointer'] != 'Y':
if tag[1] in definitions.NULL_VALUES:
if tag_schema['Nullable']:
continue
else:
logging.warning("A foreign key tag that is not nullable was set to "
f"a null value. Tag: {saveframe.tag_prefix}.{tag[1]} Primary key: "
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']} "
f"Value: {tag[1]}")
try:
tag[1] = mapping[f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}.{tag[1]}"]
except KeyError:
logging.warning(f'The tag {saveframe.tag_prefix}.{tag[0]} has value {tag[1]} '
f'but there is no valid primary key.')
# Now apply the remapping to loops...
for loop in saveframe:
for x, tag in enumerate(loop.tags):
tag_schema = my_schema.schema.get(f"{loop.category}.{tag}".lower())
if not tag_schema:
continue
if tag_schema['Foreign Table'] and tag_schema['Sf pointer'] != 'Y':
for row in loop.data:
if row[x] in definitions.NULL_VALUES:
if tag_schema['Nullable']:
continue
else:
logging.warning("A foreign key reference tag that is not nullable was set to "
f"a null value. Tag: {loop.category}.{tag} Foreign key: "
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']} "
f"Value: {row[x]}")
try:
row[x] = mapping[
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}.{row[x]}"]
except KeyError:
if (loop.category == '_Atom_chem_shift' or loop.category == '_Entity_comp_index') and \
(tag == 'Atom_ID' or tag == 'Comp_ID'):
continue
logging.warning(f'The tag {loop.category}.{tag} has value {row[x]} '
f'but there is no valid primary key '
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']} "
f"with the tag value.")
# If there is both a label tag and an ID tag, do the reassignment
# We found a framecode reference
if tag_schema['Foreign Table'] and tag_schema['Foreign Column'] == 'Sf_framecode':
# Check if there is a tag pointing to the 'ID' tag
for conditional_tag in loop.tags:
conditional_tag_schema = my_schema.schema.get(f"{loop.category}.{conditional_tag}".lower())
if not conditional_tag_schema:
continue
if conditional_tag_schema['Foreign Table'] == tag_schema['Foreign Table'] and \
conditional_tag_schema['Foreign Column'] == 'ID' and \
conditional_tag_schema['entryIdFlg'] != 'Y':
# We found the matching tag
tag_pos = loop.tag_index(conditional_tag)
for row in loop.data:
# Check if the tag is null
if row[x] in definitions.NULL_VALUES:
if tag_schema['Nullable']:
continue
else:
logging.info(f"A foreign saveframe reference tag that is not nullable was "
f"set to a null value. Tag: {loop.category}.{tag} "
"Foreign saveframe: "
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}"
)
continue
try:
row[tag_pos] = self.get_saveframe_by_name(row[x][1:]).get_tag('ID')[0]
except IndexError:
logging.info(f"Getting {self.get_saveframe_by_name(row[x][1:]).get_tag('ID')}")
except KeyError:
logging.warning(f"Missing frame of type {tag} pointed to by {conditional_tag}")
# Renumber the 'ID' column in a loop
for each_frame in self._frame_list:
for loop in each_frame.loops:
if loop.tag_index('ID') is not None and loop.category != '_Experiment':
loop.renumber_rows('ID')
def print_tree(self) -> None:
"""Prints a summary, tree style, of the frames and loops in
the entry."""
print(repr(self))
frame: saveframe_mod.Saveframe
for pos, frame in enumerate(self):
print(f"\t[{pos}] {repr(frame)}")
for pos2, one_loop in enumerate(frame):
print(f"\t\t[{pos2}] {repr(one_loop)}")
def remove_empty_saveframes(self) -> None:
""" This method will remove all empty saveframes in an entry
(the loops in the saveframe must also be empty for the saveframe
to be deleted). "Empty" means no values in tags, not no tags present."""
for pos, entry in enumerate(self._frame_list):
if entry.empty:
del self._frame_list[pos]
def remove_saveframe(self, item: Union[str, List[str], Tuple[str], 'saveframe_mod.Saveframe',
List['saveframe_mod.Saveframe'], Tuple['saveframe_mod.Saveframe']]) -> None:
""" Removes one or more saveframes from the entry. You can remove saveframes either by passing the saveframe
object itself, the saveframe name (as a string), or a list or tuple of either."""
parsed_list: list
if isinstance(item, tuple):
parsed_list = list(item)
elif isinstance(item, list):
parsed_list = item
elif isinstance(item, (str, saveframe_mod.Saveframe)):
parsed_list = [item]
else:
raise ValueError('The item you provided was not one or more saveframe objects or saveframe names (strings).'
f' Item type: {type(item)}')
frames_to_remove = []
for saveframe in parsed_list:
if isinstance(saveframe, str):
try:
frames_to_remove.append(self.frame_dict[saveframe])
except KeyError:
raise ValueError('At least one saveframe specified to remove was not found in this saveframe. '
f'First missing saveframe: {saveframe}')
elif isinstance(saveframe, saveframe_mod.Saveframe):
if saveframe not in self._frame_list:
raise ValueError('At least one loop specified to remove was not found in this saveframe. First '
f'missing loop: {saveframe}')
frames_to_remove.append(saveframe)
else:
raise ValueError('One of the items you provided was not a saveframe object or saveframe name '
f'(string). Item: {repr(saveframe)}')
self._frame_list = [_ for _ in self._frame_list if _ not in frames_to_remove]
def rename_saveframe(self, original_name: str, new_name: str) -> None:
""" Renames a saveframe and updates all pointers to that
saveframe in the entry with the new name."""
# Strip off the starting $ in the names
if original_name.startswith("$"):
original_name = original_name[1:]
if new_name.startswith("$"):
new_name = new_name[1:]
# Make sure there is no saveframe called what the new name is
if [x.name for x in self._frame_list].count(new_name) > 0:
raise ValueError(f"Cannot rename the saveframe '{original_name}' as '{new_name}' because a "
f"saveframe with that name already exists in the entry.")
# This can raise a ValueError, but no point catching it since it really is a ValueError if they provide a name
# of a saveframe that doesn't exist in the entry.
change_frame = self.get_saveframe_by_name(original_name)
# Update the saveframe
change_frame.name = new_name
# What the new references should look like
old_reference = "$" + original_name
new_reference = "$" + new_name
# Go through all the saveframes
for each_frame in self:
# Iterate through the tags
for each_tag in each_frame.tags:
if each_tag[1] == old_reference:
each_tag[1] = new_reference
# Iterate through the loops
for each_loop in each_frame:
for each_row in each_loop:
for pos, val in enumerate(each_row):
if val == old_reference:
each_row[pos] = new_reference
def validate(self, validate_schema: bool = True, schema: 'Schema' = None,
validate_star: bool = True) -> List[str]:
"""Validate an entry in a variety of ways. Returns a list of
errors found. 0-length list indicates no errors found. By
default all validation modes are enabled.
validate_schema - Determines if the entry is validated against
the NMR-STAR schema. You can pass your own custom schema if desired,
otherwise the cached schema will be used.
validate_star - Determines if the STAR syntax checks are ran."""
errors = []
# They should validate for something...
if not validate_star and not validate_schema:
errors.append("Validate() should be called with at least one validation method enabled.")
if validate_star:
# Check for saveframes with same name
saveframe_names = sorted(x.name for x in self)
for ordinal in range(0, len(saveframe_names) - 2):
if saveframe_names[ordinal] == saveframe_names[ordinal + 1]:
errors.append(f"Multiple saveframes with same name: '{saveframe_names[ordinal]}'")
# Check for dangling references
fdict = self.frame_dict
for each_frame in self:
# Iterate through the tags
for each_tag in each_frame.tags:
tag_copy = str(each_tag[1])
if (tag_copy.startswith("$")
and tag_copy[1:] not in fdict):
errors.append(f"Dangling saveframe reference '{each_tag[1]}' in "
f"tag '{each_frame.tag_prefix}.{each_tag[0]}'")
# Iterate through the loops
for each_loop in each_frame:
for each_row in each_loop:
for pos, val in enumerate(each_row):
val = str(val)
if val.startswith("$") and val[1:] not in fdict:
errors.append(f"Dangling saveframe reference '{val}' in tag "
f"'{each_loop.category}.{each_loop.tags[pos]}'")
# Ask the saveframes to check themselves for errors
for frame in self:
errors.extend(frame.validate(validate_schema=validate_schema, schema=schema, validate_star=validate_star))
return errors
def write_to_file(self, file_name: str, format_: str = "nmrstar", show_comments: bool = True,
skip_empty_loops: bool = False, skip_empty_tags: bool = False) -> None:
""" Writes the entry to the specified file in NMR-STAR format.
Optionally specify:
show_comments=False to disable the comments that are by default inserted. Ignored when writing json.
skip_empty_loops=False to force printing loops with no tags at all (loops with null tags are still printed)
skip_empty_tags=True will omit tags in the saveframes and loops which have no non-null values.
format_=json to write to the file in JSON format."""
write_to_file(self, file_name=file_name, format_=format_, show_comments=show_comments,
skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags)
| 47.971246 | 120 | 0.561772 | 5,379 | 45,045 | 4.549359 | 0.109314 | 0.014017 | 0.020187 | 0.012259 | 0.341997 | 0.286339 | 0.248416 | 0.226799 | 0.207797 | 0.188795 | 0 | 0.00235 | 0.357598 | 45,045 | 938 | 121 | 48.022388 | 0.843315 | 0.224198 | 0 | 0.281961 | 0 | 0 | 0.176936 | 0.043336 | 0 | 0 | 0 | 0.001066 | 0 | 1 | 0.075306 | false | 0.007005 | 0.017513 | 0.001751 | 0.175131 | 0.007005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
850e1110baca1c14a7d48fb08c645b57e4e9158c | 2,862 | py | Python | tests/compilation/yaml/test_yaml_load_inclusion.py | lasta/preacher | 5e50f8eb930fac72a788e7614eb5a85903f7bde6 | [
"MIT"
] | null | null | null | tests/compilation/yaml/test_yaml_load_inclusion.py | lasta/preacher | 5e50f8eb930fac72a788e7614eb5a85903f7bde6 | [
"MIT"
] | null | null | null | tests/compilation/yaml/test_yaml_load_inclusion.py | lasta/preacher | 5e50f8eb930fac72a788e7614eb5a85903f7bde6 | [
"MIT"
] | null | null | null | import os
from io import StringIO
from pytest import mark, raises
from preacher.compilation.yaml import YamlError, load
@mark.parametrize(('content', 'expected_message'), [
('!include []', '", line 1, column 1'),
('!include {}', '", line 1, column 1'),
])
def test_given_invalid_inclusion(content, expected_message):
stream = StringIO(content)
with raises(YamlError) as error_info:
load(stream)
assert expected_message in str(error_info.value)
def test_given_recursive_inclusion_error(mocker):
included_stream = StringIO('\n !foo')
open_mock = mocker.patch('builtins.open')
open_mock.return_value = included_stream
stream = StringIO('!include foo.yml')
with raises(YamlError) as error_info:
load(stream)
message = str(error_info.value)
assert '!foo' in message
assert '", line 1, column 1' in message
assert '", line 2, column 2' in message
def test_given_recursive_inclusion(mocker):
stream = StringIO('''
list:
- !include item.yml
- key: !include value.yml
recursive: !include recursive.yml
''')
answer_map = {
os.path.join('base', 'dir', 'item.yml'): 'item',
os.path.join('base', 'dir', 'value.yml'): 'value',
os.path.join('base', 'dir', 'recursive.yml'): '!include inner.yml',
os.path.join('base', 'dir', 'inner.yml'): 'inner',
}
open_mock = mocker.patch('builtins.open')
open_mock.side_effect = lambda path: StringIO(answer_map[path])
actual = load(stream, origin=os.path.join('base', 'dir'))
assert actual == {
'list': [
'item',
{'key': 'value'},
],
'recursive': 'inner',
}
def test_given_wildcard_inclusion(mocker):
iglob_mock = mocker.patch('glob.iglob')
iglob_mock.side_effect = lambda path, recursive: iter([f'glob:{path}:{recursive}'])
stream = StringIO(r'''
'asterisk': !include '*.yml'
'double-asterisk': !include '**.yml'
'question': !include '?.yml'
'parenthesis-only-opening': !include '[.yml'
'parenthesis-only-closing': !include '].yml'
'empty-parenthesis': !include '[].yml'
'filled-parenthesis': !include '[abc].yml'
''')
open_mock = mocker.patch('builtins.open')
open_mock.side_effect = lambda path: StringIO(path)
actual = load(stream, origin='base/path/')
assert isinstance(actual, dict)
assert actual['asterisk'] == ['glob:base/path/*.yml:True']
assert actual['double-asterisk'] == ['glob:base/path/**.yml:True']
assert actual['question'] == ['glob:base/path/?.yml:True']
assert actual['parenthesis-only-closing'] == 'base/path/].yml'
assert actual['parenthesis-only-opening'] == 'base/path/[.yml'
assert actual['empty-parenthesis'] == 'base/path/[].yml'
assert actual['filled-parenthesis'] == ['glob:base/path/[abc].yml:True']
| 33.670588 | 87 | 0.636268 | 347 | 2,862 | 5.144092 | 0.233429 | 0.053782 | 0.036975 | 0.039216 | 0.386555 | 0.202801 | 0.202801 | 0.185434 | 0.07507 | 0.07507 | 0 | 0.003454 | 0.190776 | 2,862 | 84 | 88 | 34.071429 | 0.767271 | 0 | 0 | 0.128571 | 0 | 0 | 0.364081 | 0.087701 | 0 | 0 | 0 | 0 | 0.185714 | 1 | 0.057143 | false | 0 | 0.057143 | 0 | 0.114286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
850f175e41fd56a4797f57b9509f4632b0f87cf8 | 657 | py | Python | death_functions.py | Yamgrenade/Gou | fa4fea253ef1a7d6fdc4f59b51d27d7442cc3ded | [
"MIT"
] | null | null | null | death_functions.py | Yamgrenade/Gou | fa4fea253ef1a7d6fdc4f59b51d27d7442cc3ded | [
"MIT"
] | 9 | 2019-08-30T15:02:25.000Z | 2019-10-03T17:33:54.000Z | death_functions.py | Yamgrenade/Gou | fa4fea253ef1a7d6fdc4f59b51d27d7442cc3ded | [
"MIT"
] | 1 | 2020-07-13T16:29:19.000Z | 2020-07-13T16:29:19.000Z | import tcod as libtcod
from render_functions import RenderOrder
from game_states import GameStates
from game_messages import Message
def kill_player(player):
player.char = '%'
player.color = libtcod.dark_red
return Message('YOU DIED', libtcod.red), GameStates.PLAYER_DEAD
def kill_monster(monster):
death_message = Message('{0} has been slain.'.format(monster.name), libtcod.orange)
monster.char = '%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = monster.name + ' remains'
monster.render_order = RenderOrder.CORPSE
return death_message | 26.28 | 87 | 0.730594 | 84 | 657 | 5.583333 | 0.488095 | 0.070362 | 0.06823 | 0.081023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001855 | 0.179604 | 657 | 25 | 88 | 26.28 | 0.868275 | 0 | 0 | 0 | 0 | 0 | 0.056231 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8515a99b3eb2ef4eb36f62853a3bd0233a514cfc | 4,458 | py | Python | tests/cache_test.py | arnib/ssm-cache-python | 298bfc38a15cfd4b0de42412a335b61b9971ba22 | [
"MIT"
] | 1 | 2020-05-25T08:26:55.000Z | 2020-05-25T08:26:55.000Z | tests/cache_test.py | benkehoe/ssm-cache-python | c21c536b7ba38494bfccafea311a853f50360609 | [
"MIT"
] | null | null | null | tests/cache_test.py | benkehoe/ssm-cache-python | c21c536b7ba38494bfccafea311a853f50360609 | [
"MIT"
] | null | null | null | import os
import sys
from datetime import datetime, timedelta
import boto3
from moto import mock_ssm
from . import TestBase
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from ssm_cache import SSMParameter, InvalidParam
@mock_ssm
class TestSSMCache(TestBase):
def setUp(self):
names = ["my_param", "my_param_1", "my_param_2", "my_param_3"]
self._create_params(names)
def test_creation(self):
# single string
cache = SSMParameter("my_param")
self.assertEqual(1, len(cache._names))
self.assertTrue(cache._with_decryption)
self.assertIsNone(cache._max_age)
self.assertIsNone(cache._last_refresh_time)
# list of params
cache = SSMParameter(["my_param_1", "my_param_2"])
self.assertEqual(2, len(cache._names))
# invalid params
with self.assertRaises(ValueError):
SSMParameter()
with self.assertRaises(ValueError):
SSMParameter(None)
with self.assertRaises(ValueError):
SSMParameter([])
def test_should_refresh(self):
# without max age
cache = SSMParameter("my_param")
self.assertFalse(cache._should_refresh())
# with max age and no data
cache = SSMParameter("my_param", max_age=10)
self.assertTrue(cache._should_refresh())
# with max age and last refreshed date OK
cache._last_refresh_time = datetime.utcnow()
self.assertFalse(cache._should_refresh())
# with max age and last refreshed date KO
cache._last_refresh_time = datetime.utcnow() - timedelta(seconds=20)
self.assertTrue(cache._should_refresh())
def test_main(self):
cache = SSMParameter("my_param")
my_value = cache.value()
self.assertEqual(my_value, self.PARAM_VALUE)
def test_unexisting(self):
cache = SSMParameter("my_param_invalid_name")
with self.assertRaises(InvalidParam):
cache.value()
def test_not_configured(self):
cache = SSMParameter(["param_1", "param_2"])
with self.assertRaises(InvalidParam):
cache.value("param_3")
def test_main_with_expiration(self):
cache = SSMParameter("my_param", max_age=300) # 5 minutes expiration time
my_value = cache.value()
self.assertEqual(my_value, self.PARAM_VALUE)
def test_main_without_encryption(self):
cache = SSMParameter("my_param", with_decryption=False)
my_value = cache.value()
self.assertEqual(my_value, self.PARAM_VALUE)
def test_main_with_multiple_params(self):
cache = SSMParameter(["my_param_1", "my_param_2", "my_param_3"])
# one by one
my_value_1 = cache.value("my_param_1")
my_value_2 = cache.value("my_param_2")
my_value_3 = cache.value("my_param_3")
self.assertEqual(my_value_1, self.PARAM_VALUE)
self.assertEqual(my_value_2, self.PARAM_VALUE)
self.assertEqual(my_value_3, self.PARAM_VALUE)
with self.assertRaises(TypeError):
cache.value() # name is required
# or all together
my_value_1, my_value_2, my_value_3 = cache.values()
self.assertEqual(my_value_1, self.PARAM_VALUE)
self.assertEqual(my_value_2, self.PARAM_VALUE)
self.assertEqual(my_value_3, self.PARAM_VALUE)
# or a subset
my_value_1, my_value_2 = cache.values(["my_param_1", "my_param_2"])
self.assertEqual(my_value_1, self.PARAM_VALUE)
self.assertEqual(my_value_2, self.PARAM_VALUE)
def test_main_with_explicit_refresh(self):
cache = SSMParameter("my_param") # will not expire
class InvalidCredentials(Exception):
pass
def do_something():
my_value = cache.value()
if my_value == self.PARAM_VALUE:
raise InvalidCredentials()
try:
do_something()
except InvalidCredentials:
# manually update value
self._create_params(["my_param"], "new_value")
cache.refresh() # force refresh
do_something() # won't fail anymore
def test_main_lambda_handler(self):
cache = SSMParameter("my_param")
def lambda_handler(event, context):
secret_value = cache.value()
return 'Hello from Lambda with secret %s' % secret_value
lambda_handler(None, None)
| 35.951613 | 82 | 0.651189 | 551 | 4,458 | 4.972777 | 0.219601 | 0.061314 | 0.061314 | 0.09635 | 0.537226 | 0.377007 | 0.287956 | 0.281022 | 0.25146 | 0.211679 | 0 | 0.013162 | 0.250112 | 4,458 | 123 | 83 | 36.243902 | 0.806461 | 0.071108 | 0 | 0.354839 | 0 | 0 | 0.071532 | 0.005092 | 0 | 0 | 0 | 0 | 0.27957 | 1 | 0.139785 | false | 0.010753 | 0.075269 | 0 | 0.247312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85184c796f1842707b6d4adf1b17e780221e13fb | 1,403 | py | Python | autograd/optim.py | brandontrabucco/autograd | 38687c67d253a1347c1bba6445169e43f1db63e4 | [
"MIT"
] | null | null | null | autograd/optim.py | brandontrabucco/autograd | 38687c67d253a1347c1bba6445169e43f1db63e4 | [
"MIT"
] | null | null | null | autograd/optim.py | brandontrabucco/autograd | 38687c67d253a1347c1bba6445169e43f1db63e4 | [
"MIT"
] | null | null | null | """Author: Brandon Trabucco, Copyright 2019
Implements dynamic computational graphs with an interface like pytorch.
Also uses the ADAM optimizer."""
import numpy as np
import autograd.nodes
####################
#### OPTIMIZERS ####
####################
class Adam(autograd.nodes.Optimizer):
def __init__(self, alpha=0.0001, beta_one=0.9, beta_two=0.999, epsilon=1e-8):
"""Creates an ADAM optimizer."""
super(Adam, self).__init__("adam")
self.t = 0
self.alpha = alpha
self.beta_one = beta_one
self.beta_two = beta_two
self.epsilon = epsilon
self.m = None
self.v = None
def forward(self, variable):
"""Computes the result of this operation."""
if self.m is None:
self.m = np.zeros(variable.shape)
if self.v is None:
self.v = np.zeros(variable.shape)
return variable.data
def backward(self, gradient, variable):
"""Computes the gradient with respect to *args."""
self.t += 1
self.m = self.beta_one * self.m + (1 - self.beta_one) * gradient
self.v = self.beta_two * self.v + (1 - self.beta_two) * gradient**2
m_hat = self.m / (1 + self.beta_one**self.t)
v_hat = self.v / (1 + self.beta_two**self.t)
return [self.alpha * m_hat / np.sqrt(v_hat + self.epsilon)]
| 31.886364 | 82 | 0.570919 | 189 | 1,403 | 4.111111 | 0.365079 | 0.082368 | 0.056628 | 0.05148 | 0.087516 | 0.087516 | 0 | 0 | 0 | 0 | 0 | 0.023833 | 0.282252 | 1,403 | 43 | 83 | 32.627907 | 0.747766 | 0.188881 | 0 | 0 | 0 | 0 | 0.003899 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
851ada71457a99d2701c22836c3b69a5e678b2e0 | 872 | py | Python | src/code_challenges/8_5_20.py | rupol/Hash-Tables-Lecture | 5b692ad08a0604e81c7d12d09e912925fa12c512 | [
"MIT"
] | null | null | null | src/code_challenges/8_5_20.py | rupol/Hash-Tables-Lecture | 5b692ad08a0604e81c7d12d09e912925fa12c512 | [
"MIT"
] | null | null | null | src/code_challenges/8_5_20.py | rupol/Hash-Tables-Lecture | 5b692ad08a0604e81c7d12d09e912925fa12c512 | [
"MIT"
] | null | null | null | '''
Print out all of the strings in the following array in alphabetical order, each on a separate line.
['Waltz', 'Tango', 'Viennese Waltz', 'Foxtrot', 'Cha Cha', 'Samba', 'Rumba', 'Paso Doble', 'Jive']
The expected output is:
'Cha Cha'
'Foxtrot'
'Jive'
'Paso Doble'
'Rumba'
'Samba'
'Tango'
'Viennese Waltz'
'Waltz'
You may use whatever programming language you'd like.
Verbalize your thought process as much as possible before writing any code. Run through the UPER problem solving framework while going through your thought process.
'''
def alpha_print(array):
# sort alphabetically
array.sort()
# loop through sorted array
for item in array:
# print each item
print(item)
array_1 = ['Waltz', 'Tango', 'Viennese Waltz', 'Foxtrot',
'Cha Cha', 'Samba', 'Rumba', 'Paso Doble', 'Jive']
array_1.sort()
print(*array_1, sep="\n")
| 27.25 | 164 | 0.683486 | 123 | 872 | 4.813008 | 0.544715 | 0.065878 | 0.091216 | 0.077703 | 0.199324 | 0.199324 | 0.199324 | 0.199324 | 0.199324 | 0.199324 | 0 | 0.004243 | 0.18922 | 872 | 31 | 165 | 28.129032 | 0.833098 | 0.680046 | 0 | 0 | 0 | 0 | 0.237918 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.125 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
851b02d0eb709b35d28a8b9da557a4b933cbb342 | 7,740 | py | Python | builders/layers/scenegenerators.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | 9 | 2021-08-18T17:49:42.000Z | 2022-02-22T02:15:07.000Z | builders/layers/scenegenerators.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | null | null | null | builders/layers/scenegenerators.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | 1 | 2021-10-02T19:53:03.000Z | 2021-10-02T19:53:03.000Z | import tensorflow as tf
import tensorflow_addons as tfa
import builders
from builders.layers.helpers import build_cnn, get_normalization_2d
from builders.layers import stylegan
from builders.layers.spectral import SNConv2D
from builders.layers.syncbn import SyncBatchNormalization
def build_mask_net(hidden_channel_dim, mask_size, norm='batch'):
output_dim = 1
cur_size = 1
model = tf.keras.models.Sequential()
while cur_size < mask_size:
model.add(tf.keras.layers.UpSampling2D(size=(2, 2), interpolation='nearest'))
model.add(tf.keras.layers.Conv2D(hidden_channel_dim, kernel_size=3, padding='same'))
model.add(get_normalization_2d(norm))
model.add(tf.keras.layers.Activation('relu'))
cur_size *= 2
if cur_size != mask_size:
raise ValueError('Mask size must be a power of 2')
model.add(tf.keras.layers.Conv2D(output_dim, kernel_size=1, padding='same'))
return model
class AppearanceEncoder(tf.keras.layers.Layer):
def __init__(self, arch, normalization='none', activation='relu',
padding='same', vecs_size=1024, pooling='avg'):
super().__init__()
cnn, channels = build_cnn(arch=arch,
normalization=normalization,
activation=activation,
pooling=pooling,
padding=padding)
self.cnn = tf.keras.models.Sequential()
self.cnn.add(cnn)
self.cnn.add(tf.keras.layers.GlobalMaxPooling2D())
self.cnn.add(tf.keras.layers.Dense(vecs_size))
def call(self, crops):
return self.cnn(crops)
class LayoutToImageGenerator(tf.keras.layers.Layer):
def __init__(self, input_shape, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer='batch',
padding_type='same', final_activation='tanh', extra_mult=2):
super().__init__()
assert (n_blocks >= 0)
if norm_layer == 'batch':
norm_layer_builder = tf.keras.layers.BatchNormalization
elif norm_layer == 'instance':
norm_layer_builder = tfa.layers.InstanceNormalization
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Lambda(
lambda x: tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]]),
input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(ngf, kernel_size=7, padding='valid'))
model.add(norm_layer_builder())
model.add(tf.keras.layers.Activation('relu'))
# downsample
for i in range(n_downsampling):
n_filters = int(ngf * (2 ** i) * extra_mult)
model.add(tf.keras.layers.Conv2D(n_filters, kernel_size=3, strides=2, padding='same'))
model.add(norm_layer_builder())
model.add(tf.keras.layers.Activation('relu'))
# resnet blocks
n_filters = int(ngf * (2 ** (n_downsampling - 1)) * extra_mult)
for i in range(n_blocks):
model.add(builders.layers.resnet.ResidualBlock(n_filters, padding=padding_type, activation='relu', normalization=norm_layer))
# upsample
for i in range(n_downsampling):
n_filters = int(ngf * (2 ** (n_downsampling - i)) * extra_mult / 2)
model.add(tf.keras.layers.Conv2DTranspose(n_filters, kernel_size=3, strides=2, padding='same', output_padding=1))
model.add(norm_layer_builder())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Lambda(
lambda x: tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]])))
model.add(tf.keras.layers.Conv2D(output_nc, kernel_size=7, padding='valid'))
model.add(tf.keras.layers.Activation(final_activation))
self.model = model
def call(self, input):
return self.model(input)
class LayoutToImageGeneratorSPADESArch(tf.keras.layers.Layer):
def __init__(self, nf=512, image_size=128, input_c=205,
random_input=False, use_sn=False, add_noise=True,
norm='instance'):
super().__init__()
self.nf = nf
self.image_size = image_size
self.Conv2D = tf.keras.layers.Conv2D if not use_sn else SNConv2D
if norm == 'instance':
self.Norm = tfa.layers.InstanceNormalization
elif norm == 'batch':
self.Norm = SyncBatchNormalization
self.use_sn = use_sn
self.add_noise = add_noise
self.random_input = random_input
layout_in = tf.keras.Input((image_size, image_size, input_c))
if not self.random_input:
self.initial_image = tf.Variable(tf.ones((1, 4, 4, self.nf)), trainable=True, name='initial_image', aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
x = tf.tile(self.initial_image, [tf.shape(layout_in)[0], 1, 1, 1])
else:
self.initial_image = tf.random.normal((tf.shape(layout_in)[0], 256), 0., 1.)
x = tf.keras.layers.Dense(self.nf * 4 * 4)(self.initial_image)
x = tf.reshape(x, [tf.shape(layout_in)[0], 4, 4, self.nf])
x = self.mescheder_resblock(x, layout_in, c=self.nf)
x = self.upsample(x, 2) # 8
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=self.nf)
x = self.upsample(x, 2) # 16
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=self.nf)
x = self.upsample(x, 2) # 32
cur_nf = self.nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = self.upsample(x, 2) # 64
cur_nf = cur_nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = self.upsample(x, 2) # 128
if self.image_size == 256:
cur_nf = cur_nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = self.upsample(x, 2) # 256
cur_nf = cur_nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = tf.keras.layers.LeakyReLU(0.2)(x)
x = self.Conv2D(3, kernel_size=3, padding='same')(x)
x = tf.keras.layers.Activation('tanh')(x)
self.model = tf.keras.Model(
name='stylegan_spades_mix_generator',
inputs=[layout_in],
outputs=x)
def adaptive_spacial_instance_norm(self, x, layout):
if self.add_noise:
x = stylegan.AddNoiseToEachChannel()(x)
x = tf.keras.layers.LeakyReLU(0.2)(x)
x = self.Norm()(x)
x = stylegan.LayoutBasedModulation(self.use_sn)([x, layout])
return x
def upsample(self, x, scale_factor=2):
h, w = x.shape[1], x.shape[2]
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize(x, size=new_size, method='bilinear')
def mescheder_resblock(self, x, layout, c=1024, learn_skip=False):
out = self.Conv2D(c, kernel_size=3, padding='same')(x)
out = self.adaptive_spacial_instance_norm(out, layout)
out = self.Conv2D(c, kernel_size=3, padding='same')(out)
out = self.adaptive_spacial_instance_norm(out, layout)
if learn_skip:
x = self.Conv2D(c, kernel_size=3, padding='same')(x)
x = self.adaptive_spacial_instance_norm(x, layout)
return x + out
def call(self, input):
return self.model(input)
| 40.736842 | 166 | 0.624548 | 1,053 | 7,740 | 4.392213 | 0.162393 | 0.045405 | 0.07027 | 0.055351 | 0.46227 | 0.424865 | 0.387459 | 0.321081 | 0.278486 | 0.238919 | 0 | 0.022903 | 0.249742 | 7,740 | 189 | 167 | 40.952381 | 0.773549 | 0.006718 | 0 | 0.310811 | 0 | 0 | 0.028653 | 0.003777 | 0 | 0 | 0 | 0 | 0.006757 | 1 | 0.067568 | false | 0 | 0.047297 | 0.02027 | 0.182432 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
851ccf02531e4e7ff2823ea6d2824dfc6a043bbd | 3,993 | py | Python | nbviewer/tests/base.py | AI-Collaboratory/nbviewer | 1a40e04cc8aad67aa96bb840603f8f568c08d44d | [
"BSD-3-Clause-Clear"
] | 1,840 | 2015-01-01T13:25:44.000Z | 2022-03-17T08:33:01.000Z | nbviewer/tests/base.py | AI-Collaboratory/nbviewer | 1a40e04cc8aad67aa96bb840603f8f568c08d44d | [
"BSD-3-Clause-Clear"
] | 605 | 2015-01-01T16:45:01.000Z | 2022-03-14T15:25:25.000Z | nbviewer/tests/base.py | AI-Collaboratory/nbviewer | 1a40e04cc8aad67aa96bb840603f8f568c08d44d | [
"BSD-3-Clause-Clear"
] | 513 | 2015-01-07T20:54:49.000Z | 2022-02-17T16:04:30.000Z | """Base class for nbviewer tests.
Derived from IPython.html notebook test case in 2.0
"""
# -----------------------------------------------------------------------------
# Copyright (C) Jupyter Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# -----------------------------------------------------------------------------
import os
import sys
import time
from contextlib import contextmanager
from subprocess import DEVNULL as devnull
from subprocess import Popen
from unittest import skipIf
from unittest import TestCase
import requests
from tornado.escape import to_unicode
from tornado.log import app_log
from nbviewer.providers.github.client import AsyncGitHubClient
from nbviewer.utils import url_path_join
class NBViewerTestCase(TestCase):
"""A base class for tests that need a running nbviewer server."""
port = 12341
environment_variables = {}
def assertIn(self, observed, expected, *args, **kwargs):
return super().assertIn(
to_unicode(observed), to_unicode(expected), *args, **kwargs
)
def assertNotIn(self, observed, expected, *args, **kwargs):
return super().assertNotIn(
to_unicode(observed), to_unicode(expected), *args, **kwargs
)
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
while True:
try:
requests.get(cls.url())
except Exception:
time.sleep(0.1)
else:
break
@classmethod
def wait_until_dead(cls):
"""Wait for the server to stop getting requests after shutdown"""
while True:
try:
requests.get(cls.url())
except Exception:
break
else:
time.sleep(0.1)
@classmethod
def get_server_cmd(cls):
return [sys.executable, "-m", "nbviewer", "--port=%d" % cls.port]
@classmethod
def setup_class(cls):
server_cmd = cls.get_server_cmd()
cls.server = Popen(
server_cmd,
stdout=devnull,
stderr=devnull,
# Set environment variables if any
env=dict(os.environ, **cls.environment_variables),
)
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.server.terminate()
cls.wait_until_dead()
@classmethod
def url(cls, *parts):
return url_path_join("http://localhost:%i" % cls.port, *parts)
class FormatMixin(object):
@classmethod
def url(cls, *parts):
return url_path_join(
"http://localhost:%i" % cls.port, "format", cls.key, *parts
)
class FormatHTMLMixin(object):
key = "html"
class FormatSlidesMixin(object):
key = "slides"
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, "Expected status %d, got %d" % (
real_status,
status,
)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status"
def skip_unless_github_auth(f):
"""Decorates a function to skip a test unless credentials are available for
AsyhncGitHubClient to authenticate.
Avoids noisy test failures on PRs due to GitHub API rate limiting with a
valid token that might obscure test failures that are actually meaningful.
Paraameters
-----------
f: callable
test function to decorate
Returns
-------
callable
unittest.skipIf decorated function
"""
cl = AsyncGitHubClient(log=app_log)
can_auth = "access_token" in cl.auth or (
"client_id" in cl.auth and "client_secret" in cl.auth
)
return skipIf(not can_auth, "github creds not available")(f)
| 27.163265 | 79 | 0.604308 | 461 | 3,993 | 5.136659 | 0.399132 | 0.041385 | 0.030405 | 0.02027 | 0.179899 | 0.179899 | 0.162162 | 0.127534 | 0.090372 | 0.053209 | 0 | 0.003775 | 0.270223 | 3,993 | 146 | 80 | 27.349315 | 0.808854 | 0.246682 | 0 | 0.293478 | 0 | 0 | 0.063226 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.119565 | false | 0 | 0.141304 | 0.054348 | 0.413043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8523860c41be0e36cd1d844b36558c0d7b16343b | 4,960 | py | Python | active_reward_learning/common/policy.py | david-lindner/idrl | 54cfad330b0598ad4f6621796f2411644e50a6ba | [
"MIT"
] | 9 | 2021-11-20T18:14:38.000Z | 2022-03-20T16:29:48.000Z | active_reward_learning/common/policy.py | david-lindner/idrl | 54cfad330b0598ad4f6621796f2411644e50a6ba | [
"MIT"
] | null | null | null | active_reward_learning/common/policy.py | david-lindner/idrl | 54cfad330b0598ad4f6621796f2411644e50a6ba | [
"MIT"
] | null | null | null | import datetime
import os
import pickle
from abc import ABC, abstractmethod
import gym
import numpy as np
class BasePolicy(ABC):
@abstractmethod
def get_action(self, obs, deterministic=True):
raise NotImplementedError()
def evaluate(self, env, N=10, rollout=True):
if not rollout:
print("Warning: Rolling out policy despite rollout=False")
res = 0
for _ in range(N):
obs = env.reset()
done = False
while not done:
a = self.get_action(obs)
obs, reward, done, _ = env.step(a)
res += reward
return res / N
class EpsGreedyPolicy(BasePolicy):
def __init__(self, greedy_policy: BasePolicy, eps: float, action_space: gym.Space):
self.greedy = greedy_policy
self.eps = eps
self.action_space = action_space
def get_action(self, obs, deterministic=False):
if deterministic or np.random.random() > self.eps:
return self.greedy.get_action(obs, deterministic=True)
else:
return self.action_space.sample()
class TabularPolicy(BasePolicy):
def __init__(self, policy: np.ndarray):
self.matrix = np.copy(policy)
def get_action(self, state, deterministic=True):
if deterministic:
return np.argmax(self.matrix[state, :])
else:
return np.random.choice(
range(self.matrix.shape[1]), p=self.matrix[state, :]
)
def evaluate(self, env, N=1, rollout=False):
assert env.observation_type == "state"
if rollout:
return super().evaluate(env, N)
else:
return env.evaluate_policy(self)
def __eq__(self, other):
return np.all(self.matrix == other.matrix)
class FixedPolicy(BasePolicy):
def __init__(self, policy: np.ndarray):
self.matrix = np.copy(policy)
def get_action(self, state, deterministic=True):
t = int(state[-1])
return self.matrix[t]
def __eq__(self, other):
return np.all(self.matrix == other.matrix)
class LinearPolicy(BasePolicy):
def __init__(self, w, obs_mean=None, obs_std=None, env=None):
self.w = w
self.obs_mean = obs_mean
self.obs_std = obs_std
if env is not None:
self.alow = env.action_space.low
self.ahigh = env.action_space.high
else:
self.alow = -np.inf
self.ahigh = np.inf
def normalize(self, obs):
if self.obs_mean is not None and self.obs_std is not None:
return (obs - self.obs_mean) / self.obs_std
else:
return obs
def get_action(self, obs, deterministic=True):
obs = self.normalize(obs)
a = np.dot(self.w, obs)
a = np.clip(a, self.alow, self.ahigh)
return a
def save(self, path):
policy_dict = {
"w": list(self.w),
"mean": list(self.obs_mean),
"std": list(self.obs_std),
}
with open(path, "wb") as f:
pickle.dump(policy_dict, f)
@classmethod
def load(cls, path, env=None):
with open(path, "rb") as f:
policy_dict = pickle.load(f)
policy = cls(
policy_dict["w"],
obs_mean=policy_dict["mean"],
obs_std=policy_dict["std"],
env=env,
)
return policy
class StableBaselinesPolicy(BasePolicy):
def __init__(self, model):
# save and load the model as a workaround for creating a copy of the policy
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
filename = f"tmp_model_{timestamp}.zip"
model.save(filename)
self.model = model.__class__.load(filename)
try:
os.remove(filename)
except FileNotFoundError:
pass
def get_action(self, obs, deterministic=True):
a, _ = self.model.predict(obs, deterministic=deterministic)
return a
class CombinedPolicy(BasePolicy):
def __init__(self, policies, p=None):
self.policies = policies
for policy in self.policies:
assert issubclass(policy.__class__, BasePolicy)
if p is None:
n = len(self.policies)
p = np.ones(n) / n
self.p = p
def get_action(self, obs, deterministic=True):
policy_idx = np.random.choice(np.arange(len(self.policies)), p=self.p)
policy = self.policies[policy_idx]
return policy.get_action(obs, deterministic=deterministic)
class GaussianNoisePolicy(BasePolicy):
def __init__(self, policy: BasePolicy, sigma: float):
self.policy = policy
self.sigma = sigma
def get_action(self, obs, deterministic=False):
action = self.policy.get_action(obs, deterministic=deterministic)
action += np.random.normal(loc=0, scale=self.sigma, size=action.shape)
return action
| 30.060606 | 87 | 0.599194 | 620 | 4,960 | 4.640323 | 0.229032 | 0.036496 | 0.033368 | 0.044491 | 0.242961 | 0.208551 | 0.177963 | 0.10219 | 0.10219 | 0.10219 | 0 | 0.002002 | 0.29496 | 4,960 | 164 | 88 | 30.243902 | 0.820703 | 0.014718 | 0 | 0.174242 | 0 | 0 | 0.023541 | 0.005118 | 0 | 0 | 0 | 0 | 0.015152 | 1 | 0.166667 | false | 0.007576 | 0.045455 | 0.015152 | 0.401515 | 0.007576 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8524f725e1219424eff7106f2965054cd2eeb4a1 | 8,384 | py | Python | raft/servers/zre_server.py | adsharma/raft | 49c7bfd472af4c97cc69d7e7e4ffc26808ed88db | [
"MIT"
] | 4 | 2021-01-20T20:29:19.000Z | 2021-09-21T18:20:08.000Z | raft/servers/zre_server.py | adsharma/raft | 49c7bfd472af4c97cc69d7e7e4ffc26808ed88db | [
"MIT"
] | 11 | 2021-01-07T19:06:42.000Z | 2021-08-22T17:57:27.000Z | raft/servers/zre_server.py | adsharma/raft | 49c7bfd472af4c97cc69d7e7e4ffc26808ed88db | [
"MIT"
] | null | null | null | import asyncio
import logging
import threading
import uuid
from cachetools import TTLCache
from pyre import Pyre
from serde.msgpack import from_msgpack, to_msgpack
from typing import List, Union
from ..boards.memory_board import MemoryBoard
from ..messages.append_entries import AppendEntriesMessage, LogEntry, Command
from ..messages.base import BaseMessage, Peer
from ..states.state import State
from .server import HashedLog, Server
logger = logging.getLogger("raft")
class ZREServer(Server):
"This implementation is suitable for multi-process testing"
DIGEST_SIZE = 32
def __init__(
self,
group,
name,
state: State,
node: Pyre,
# DBM file that stores stable storage state for raft
stable_storage,
log=None,
messageBoard=None,
parent=None,
):
if log is None:
log = HashedLog()
log.append(LogEntry(term=0)) # According to the raft spec
if messageBoard is None:
messageBoard = MemoryBoard()
super().__init__(
node.uuid().hex,
state,
log,
messageBoard,
[],
set(),
set(),
_stable_storage=stable_storage,
)
self.group = group
self._node = node
self._human_name = name
self._outstanding_index = TTLCache(maxsize=128, ttl=10)
# Sometimes several instances of consensus are arranged in a
# hierarchy. In order to become a candidate in the child consensus,
# you have to be a leader in the parent. Note that in the presence
# of failures, the parent and the child consensus could have
# different leaders at the same time.
self._parent = parent
def add_neighbor(self, neighbor: Peer):
loop = asyncio.get_event_loop()
task = loop.create_task(self.quorum_set(str(neighbor), "add"))
self._neighbors.append(neighbor)
return task
def remove_neighbor(self, neighbor: Peer):
loop = asyncio.get_event_loop()
task = loop.create_task(self.quorum_set(str(neighbor), "remove"))
self._neighbors.remove(neighbor)
if neighbor in self._quorum:
self._quorum.remove(neighbor)
if neighbor in self._live_quorum:
self._live_quorum.remove(neighbor)
return task
def quorum_update(self, entries: List[LogEntry]) -> None:
for entry in entries:
assert entry.command == Command.QUORUM_PUT
if entry.value == "add":
self._quorum.add(entry.key)
elif entry.value == "remove":
if entry.key in self._quorum:
self._quorum.remove(entry.key)
if entry.key in self._live_quorum:
self._live_quorum.remove(entry.key)
# TODO: if the leader is removed, needs to step down
self._total_nodes = len(self._quorum)
async def send_message(self, message: Union[BaseMessage, bytes]):
logger.debug(f"sending: {self._state}: {message}")
if isinstance(message, AppendEntriesMessage):
self._outstanding_index[message.id] = message
if isinstance(message, bytes):
self._node.shout(self.group, b"/raft " + message)
else:
if message.receiver == self._name:
await self._receive_message(message)
return
elif message.receiver is not None:
# Disambiguate in cases where a peer is in multiple groups
message.group = self.group
message_bytes = to_msgpack(message, ext_dict=BaseMessage.EXT_DICT_REVERSED)
digest = message.hash().digest()
assert len(digest) == self.DIGEST_SIZE
message_bytes = digest + message_bytes
if message.receiver is None:
self._node.shout(self.group, b"/raft " + message_bytes)
else:
if type(message.receiver) != str:
raise Exception(
f"Expected node.uuid().hex here, got: {message.receiver}"
)
self._node.whisper(
uuid.UUID(message.receiver), # type: ignore
b"/raft " + message_bytes,
)
async def receive_message(self, message_bytes: bytes):
try:
message_hash, message_bytes = (
message_bytes[0 : self.DIGEST_SIZE],
message_bytes[self.DIGEST_SIZE :],
)
message = from_msgpack(
BaseMessage, message_bytes, ext_dict=BaseMessage.EXT_DICT
)
if message_hash != message.hash().digest():
raise Exception(f"message hash {message_hash} doesn't match {message}")
except Exception as e:
logger.info(f"Got exception: {e}")
return
if message.group is not None and message.group != self.group:
return
await self._receive_message(message)
async def _receive_message(self, message: BaseMessage):
await self.on_message(message)
await self.post_message(message)
async def post_message(self, message):
await self._messageBoard.post_message(message)
async def on_message(self, message):
logger.debug(f"---------- on_message start -----------")
logger.debug(f"{self._state}: {message}")
state, response = await self._state.on_message(message)
logger.debug(f"{state}: {response}")
logger.debug(f"---------- on_message end -----------")
self._state = state
async def wait_for(self, expected_index, expected_id) -> None:
def check_condition():
return (
self._commitIndex >= expected_index
and self._log[expected_index].id == expected_id
)
async with self._condition:
await self._condition.wait_for(check_condition)
entries = [
e
for e in self._server._log[expected_index : self._commitIndex + 1]
if e.command == Command.QUORUM_PUT
]
self.quorum_update(entries)
self._condition_event.set()
async def set(self, key: str, value: str):
leader = self._state.leader
if leader is not None:
append_entries = AppendEntriesMessage(
self._name,
leader,
self._currentTerm,
entries=[
LogEntry(
term=self._currentTerm,
index=self._commitIndex,
key=key,
value=value,
)
],
)
expected_index = self._commitIndex + 1
await self.send_message(append_entries)
self._condition_event = threading.Event()
return (self.wait_for, expected_index, append_entries.id)
else:
raise Exception("Leader not found")
async def get(self, key: str):
return await self._messageBoard.get(key)
async def quorum_set(self, neighbor: str, op: str):
leader = self._state.leader
if leader is not None:
if leader != self._name:
# Let the leader handle this
async def nop():
pass
return nop
append_entries = AppendEntriesMessage(
self._name,
leader,
self._currentTerm,
id="set", # Just so all nodes compute the same hash
entries=[
LogEntry(
command=Command.QUORUM_PUT,
term=self._currentTerm,
index=self._commitIndex,
key=neighbor,
value=op,
)
],
)
expected_index = self._commitIndex + 1
await self.send_message(append_entries)
self._condition_event = threading.Event()
return (self.wait_for, expected_index, append_entries.entries[0].id)
else:
if self._currentTerm > 0:
raise Exception("Leader not found")
| 35.982833 | 87 | 0.563216 | 892 | 8,384 | 5.115471 | 0.223094 | 0.028928 | 0.019724 | 0.015122 | 0.30835 | 0.229235 | 0.193294 | 0.174885 | 0.117028 | 0.117028 | 0 | 0.002574 | 0.351145 | 8,384 | 232 | 88 | 36.137931 | 0.836213 | 0.072638 | 0 | 0.233503 | 0 | 0 | 0.052039 | 0 | 0 | 0 | 0 | 0.00431 | 0.010152 | 1 | 0.025381 | false | 0.005076 | 0.06599 | 0.005076 | 0.152284 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8525422df5c9d907c2e3e52b44632726d4428acd | 6,828 | py | Python | loops/ornekUygulama.py | mrtyasar/PythonLearn | b8fa5d97b9c811365db8457f42f1e1d04e4dc8a4 | [
"Apache-2.0"
] | null | null | null | loops/ornekUygulama.py | mrtyasar/PythonLearn | b8fa5d97b9c811365db8457f42f1e1d04e4dc8a4 | [
"Apache-2.0"
] | null | null | null | loops/ornekUygulama.py | mrtyasar/PythonLearn | b8fa5d97b9c811365db8457f42f1e1d04e4dc8a4 | [
"Apache-2.0"
] | null | null | null | #----------------------------------------------#
# Karakter Dizilerinin İçeriğini Karşılaştırma
#----------------------------------------------#
#elimizde iki farklı metin var
ilkMetin = "asdasfddgdhfjfdgdşfkgjdfklgşjdfklgjdfkghdfjghjklsdhajlsdhjkjhkhjjh"
ikinciMetin = "sdfsuıdoryeuıfsjkdfhdjklghjdfklruseldhfjlkdshfljskeeuf"
#programımızda ilkMetin de olup ikinciMetin de yer almayan öğeleri ayırmak istiyoruz
for s in ilkMetin:#ilkMetin'deki s adını verdiğimiz her bir öğe için
if not s in ikinciMetin: #eğer s adlı bir öğe ikinciMetin de yoksa
print(s)#s adlı öğeyi ekrana yazdır
#a
#a
#ş
#ş
#a
#ikinciMetin de olan fakat ilkMetin de olmayan öğeleri bulalım
for m in ikinciMetin:
if not m in ilkMetin:
print(m)
#u ı o r y e u ı r u e e e u
#birden fazla aynı öğenin yazılmasını istemiyorsak
fark = ""
for p in ikinciMetin: #ikinciMetin de p dediğim bütün öğeler için
if not p in ilkMetin: # eğer p ilkMetin in içide yoksa
if not p in fark: # eğer p öğesi fark yoksa
fark += p#bu öğeyi fark değişkenine ekle
print(fark)#u ı o r y e
#eğer karakter dizi ile birleşirme gerçekleştiriyorsak bu işlem değişkenin önceki değerini değiştirmez
a = "istihza"
print(a + ".com")#istihza.com
print(a)#istihza
#bu işlemin kalıcı hale getirmek için ise yeni işlemi yeni bir değişkene atayarak yaparız
a = a +".com" # a += ".com" olarakda yazabilirdik
print(a)#istihza.com
#yukarıdaki işlemi şöyle kolayca da yazabilirdik:
firstString = "asadlaskdnlnceıfeşsdje9"
twoString = "asşdlmasejmşşvawldad"
cikarma = ""
for e in firstString:
if not e in twoString and not e in cikarma:
cikarma += e
print(cikarma)#k n c ı f 9
"""
#--------------------------------------#
# DOSYALARIN İÇERİĞİNİ KARŞILAŞTIRMA
#--------------------------------------#
#değişkenlerimizi karşılaştırmıştık
#şimdi ise dosyaları karşılaştıralım
#elimizde isimler1.txt ve isimler2.txt adlı iki dosya var
d1 = open("isimler1.txt")#dosyayı açıyoruz
d1Satirlar = d1.readlines() #satırları okuyoruz
d2 = open("isimler2.txt")#dosyayı açıyoruz
d2Satirlar = d2.readlines()#satırları okuyoruz
for i in d2Satirlar:
if not i in d1Satirlar:
print(i)
d1.close()
d2.close()
"""
#-----------------------------------------#
# KARAKTER DİZİSİNDEKİ KARAKTERLERİ SAYMA
#-----------------------------------------#
#metin de her harfin kaç kere geçtiğini gösteren program:
metin = """Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcı
tarafından 90’lı yılların başında geliştirilmeye başlanmıştır. Çoğu insan,
isminin Python olmasına aldanarak, bu programlama dilinin, adını piton
yılanından aldığını düşünür. Ancak zannedildiğinin aksine bu programlama
dilinin adı piton yılanından gelmez. Guido Van Rossum bu programlama dilini,
The Monty Python adlı bir İngiliz komedi grubunun, Monty Python’s Flying
Circus adlı gösterisinden esinlenerek adlandırmıştır. Ancak her ne kadar
gerçek böyle olsa da, Python programlama dilinin pek çok yerde bir yılan
figürü ile temsil edilmesi neredeyse bir gelenek halini almıştır."""
harf = input("Sorgulamak istediğiniz harf: ")
number = ""
for l in metin: #metin in içinde s adını verdiğimiz her bi öğe için:
if harf == l: #eğer kullancıdan gelen harf l ile aynıysa
number += harf #kullanıcıdan gelen bu harfi sayı değişkenine ata
print(len(number))
#eğer 5 tane a varsa number değişkenine aaaaa yazar
#print fonksiyonu sayesinde number ın eleman sayısını öğreniriz
#bunun yerine şöyle de yazabiliriz
metinOne =""" Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcı
tarafından 90’lı yılların başında geliştirilmeye başlanmıştır. Çoğu insan,
isminin Python olmasına aldanarak, bu programlama dilinin, adını piton
yılanından aldığını düşünür. Ancak zannedildiğinin aksine bu programlama
dilinin adı piton yılanından gelmez. Guido Van Rossum bu programlama dilini,
The Monty Python adlı bir İngiliz komedi grubunun, Monty Python’s Flying
Circus adlı gösterisinden esinlenerek adlandırmıştır. Ancak her ne kadar
gerçek böyle olsa da, Python programlama dilinin pek çok yerde bir yılan
figürü ile temsil edilmesi neredeyse bir gelenek halini almıştır."""
harfOne = input("Sorgulamak istediğin harf:")
sayi = 0
for i in metinOne:
if harfOne == i:
sayi += 1
print(sayi)
#eğer kullanıcıdan gelen harf kullanıldıysa ona bir ekle diyoru
#böylelikle o harf kaç kere kullanıldıysa sayinin değeri 1 artacak
#-----------------------------------------------#
# DOSYA İÇİNDEKİ KARAKTERLERİ SAYMA
#-----------------------------------------------#
#bir önceki metnin değişken olarka değilde dosya içinde okunan bir metin farzedelim
hakkinda = open("hakkında.txt",encoding="utf-8")#dosyamızı açıyoruz
#harfTwo = input("Sorgulamak istenilen harf") #kullanıcıdan harf istiyoruz
sayiOne = 0 #değerini sıfır yapıyoruz
for karakterDizisi in hakkinda: #dosyanın içindeki karakterDizi adlı her bir öğe için
for karakter in karakterDizisi: #karakterDizisinin karakter adlı her bir öğe için
if harfTwo == karakter: #eğer kullanıcın verdiği harf karaktere eşitse
sayiOne += 1 # sayiOne değerine 1 ekle
#print(sayiOne) #ekrana sayiOne yazdır
hakkinda.close() #bütün işlemleri kaydetmek için dosyayı kapatıyoruz
#eğer bir satırın ayrı bir karakter dizisi olduğunu görmek için repr() yararlanırız
"""
for karakterDizisi in hakkina:
print(repr(karakterDizisi))
Çıktı:
'Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcı\n'
'tarafından 90’lı yılların başında geliştirilmeye başlanmıştır. Çoğu insan,\n'
"""
"""
Bu çıktıya çok dikkatlice bakın. repr() fonksiyonu sayesinde Python’ın alttan alta neler çevirdiğini bariz bir biçimde görüyoruz. Karakter dizisinin başlangıç ve bitişini gösteren tırnak işaretleri ve \n kaçış dizilerinin görünür vaziyette olması sayesinde her bir satırın ayrı bir karakter dizisi olduğunu daha net bir şekilde görebiliyoruz.
Biz yazdığımız kodlarda, kullanıcıdan bir harf girmesini istiyoruz. Kullandığımız algoritma gereğince bu harfi metindeki karakter dizileri içinde geçen her bir karakterle tek tek karşılaştırmamız gerekiyor. input() metodu aracılığıyla kullanıcıdan tek bir karakter alıyoruz. Kullandığımız for döngüsü ise bize bir karakter yerine her satırda bir karakter dizisi veriyor. Dolayısıyla mesela kullanıcı ‘a’ harfini sorgulamışsa, ilk for döngüsü bu harfin karşısına ‘Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcın’ adlı karakter dizisini çıkaracaktır. Dolayısıyla bizim bir seviye daha alta inerek, ilk for döngüsünden elde edilen değişken üzerinde başka bir for döngüsü daha kurmamız gerekiyor. Bu yüzden şöyle bir kod yazıyoruz:
"""
"""
for karakter_dizisi in hakkında:
for karakter in karakter_dizisi:
"""
| 35.936842 | 747 | 0.733304 | 896 | 6,828 | 5.603795 | 0.393973 | 0.025891 | 0.01673 | 0.017526 | 0.29038 | 0.280223 | 0.267875 | 0.25234 | 0.25234 | 0.230034 | 0 | 0.005268 | 0.165934 | 6,828 | 189 | 748 | 36.126984 | 0.873573 | 0.323228 | 0 | 0.285714 | 0 | 0 | 0.597217 | 0.055276 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85284fae5ffd5fddbef7db877d1222f08e75fa2f | 16,667 | py | Python | bubble/bubble_runner.py | cp105/ai611_project | 2d0bbd8052a6425eefc7301e18ddf9ad4404a8fb | [
"Apache-2.0"
] | 1 | 2020-05-18T03:18:11.000Z | 2020-05-18T03:18:11.000Z | bubble/bubble_runner.py | cp105/ai611_project | 2d0bbd8052a6425eefc7301e18ddf9ad4404a8fb | [
"Apache-2.0"
] | null | null | null | bubble/bubble_runner.py | cp105/ai611_project | 2d0bbd8052a6425eefc7301e18ddf9ad4404a8fb | [
"Apache-2.0"
] | 1 | 2020-11-02T08:46:32.000Z | 2020-11-02T08:46:32.000Z | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from dopamine.discrete_domains import run_experiment
from dopamine.utils.example_viz_lib import MyRunner
from dopamine.agents.implicit_quantile import implicit_quantile_agent
from dopamine.utils import agent_visualizer
from dopamine.utils import atari_plotter
from dopamine.utils import bar_plotter
from dopamine.utils import line_plotter
from dopamine.utils import plotter
import gin
import numpy as np
import tensorflow.compat.v1 as tf
import pygame
import gin.tf
@gin.configurable
def create_runner(base_dir, schedule='continuous_train_and_eval', level=0):
"""Creates an Bubble Runner.
- originally copied via run_experiment.create_runner
Args:
level: the initial stage level to start (reset condition)
"""
assert base_dir is not None
from dopamine.discrete_domains.run_experiment import TrainRunner
from dopamine.discrete_domains.run_experiment import create_agent
# Continuously runs training and evaluation until max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return BubbleRunner(base_dir, create_agent, game_level=level)
# Continuously runs training until max num_iterations is hit.
elif schedule == 'continuous_train':
return TrainRunner(base_dir, create_agent)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
@gin.configurable
class BubbleRunner(run_experiment.Runner):
"""BubbleRunner
- customized for bubble runner
Args:
proc_queue: instance of `multiprocessing.Queue`
"""
def __init__(self, base_dir, create_agent_fn, proc_queue=None, game_level=0):
'''initialize bubble-runner'''
print('! BubbleRunner(%s)' % (base_dir))
assert create_agent_fn is not None
BubbleRunner.init_logger(base_dir)
super(BubbleRunner, self).__init__(base_dir, create_agent_fn)
self.proc_queue = proc_queue
self.game_level = game_level
def post_message(self, data):
self.proc_queue.put(data) if self.proc_queue is not None else None
def current(self):
import time
return int(round(time.time() * 1000))
def _initialize_episode(self):
env = self._environment
obs = env.reset(self.game_level) if self.game_level > 0 else env.reset()
return self._agent.begin_episode(obs)
def _run_one_step(self, action):
observation, reward, is_terminal, info = self._environment.step(action)
return observation, reward, is_terminal, info
def _run_one_episode(self):
step_number = 0
total_reward = 0.
agent_lives = 0
action = self._initialize_episode()
is_terminal = False
is_death = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal, info = self._run_one_step(action)
curr_lives = int(info['lives']) if 'lives' in info else 0
total_reward += reward
step_number += 1
#! end the episode if death.
is_death = True if curr_lives < agent_lives else is_death
agent_lives = curr_lives
#! determine terminal & EOE
if (self.end_on_death and is_death):
break
# TODO(steve) - need to clip reward really?!!
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or step_number == self._max_steps_per_episode):
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, info)
self._end_episode(reward)
#! report status and returns
self.post_message({'episode': {'length': step_number, 'return': total_reward}})
return step_number, total_reward, int(info['score']), int(info['level'])
def _run_one_phase(self, min_steps, statistics, run_mode_str):
step_count = 0
num_episodes = 0
sum_returns = 0.
time_started = self.current()
self.post_message({'phase': {'steps': min_steps, 'mode': run_mode_str, 'level':self.game_level }})
while step_count < min_steps:
episode_length, episode_return, episode_score, episode_level = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
sec_per_step = ((self.current() - time_started)/1000.0/step_count)
sec_remained = int((min_steps - step_count)*sec_per_step)
time_display = '{:1.1f}m'.format(sec_remained/60) if sec_remained > 60*5 else '{}s'.format(sec_remained)
sys.stdout.write('Steps: {:6.0f} {:2.0f}% '.format(step_count, step_count/min_steps*100.) +
'Remains: {} '.format(time_display) +
'Episode[{}].len: {} '.format(num_episodes, episode_length) +
'Return: {:.1f} S:{} L:{}'.format(episode_return, episode_score, episode_level)+
' \r')
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_one_iteration(self, iteration):
# print('! run_one_iteration({}) - L{}'.format(iteration, self.game_level))
ret = super(BubbleRunner, self)._run_one_iteration(iteration)
self.game_level = min(99, self.game_level + 1)
return ret
@staticmethod
def init_logger(base_dir):
'''initialize logger to save into file'''
import logging, os
# get TF logger
log = logging.getLogger('tensorflow')
log.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if os.path.exists(os.path.join(base_dir, 'tensorflow.log')):
fh = logging.FileHandler(os.path.join(base_dir, 'tensorflow.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
log.addHandler(fh)
# print log header..
tf.logging.info('---'*32)
tf.logging.info('BubbleRunner() starts!!')
tf.logging.info('---'*32)
class VizBubbleRunner(BubbleRunner):
"""VizBubbleRunner: runner to visualize playing w/ checkpoint"""
def __init__(self, base_dir, trained_agent_ckpt_path, create_agent_fn, use_legacy_checkpoint = False, game_level = 0):
print('! VizBubbleRunner({})'.format(base_dir))
self._trained_agent_ckpt_path = trained_agent_ckpt_path
self._use_legacy_checkpoint = use_legacy_checkpoint
super(VizBubbleRunner, self).__init__(base_dir, create_agent_fn, game_level=game_level)
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
self._agent.reload_checkpoint(self._trained_agent_ckpt_path, self._use_legacy_checkpoint)
self._start_iteration = 0
def _run_one_iteration(self, iteration):
from dopamine.discrete_domains import iteration_statistics
statistics = iteration_statistics.IterationStatistics()
tf.logging.info('Starting iteration %d', iteration)
_, _ = self._run_eval_phase(statistics)
return statistics.data_lists
def _run_one_episode(self):
step_number = 0
total_reward = 0.
action = self._initialize_episode()
is_terminal = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal, info = self._run_one_step(action)
total_reward += reward
step_number += 1
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or step_number == self._max_steps_per_episode):
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, info)
self._end_episode(reward)
return step_number, total_reward
def visualize(self, record_path, num_global_steps=500):
'''customize viz for bubble
- origin from MyRunner.visualize()
'''
print('RUN> visualize(%s, %d)'%(record_path, num_global_steps))
if not tf.gfile.Exists(record_path):
tf.gfile.MakeDirs(record_path)
self._agent.eval_mode = True
# Set up the game playback rendering.
atari_params = {'environment': self._environment,
'width': 240,
'height': 224 }
atari_plot = atari_plotter.AtariPlotter(parameter_dict=atari_params)
# Plot the rewards received next to it.
reward_params = {'x': atari_plot.parameters['width'],
'xlabel': 'Timestep',
'ylabel': 'Reward',
'title': 'Rewards',
'get_line_data_fn': self._agent.get_rewards}
#reward_plot = line_plotter.LinePlotter(parameter_dict=reward_params)
reward_plot = MyLinePlotter(parameter_dict=reward_params)
action_names = ['Action {}'.format(x) for x in range(self._agent.num_actions)]
# Plot Observation at left-bottom
obsrv_params = {
'x': atari_plot.parameters['x'],
'y': atari_plot.parameters['height'] - 10,
'width': atari_plot.parameters['width'],
'height': atari_plot.parameters['height'],
}
obsrv_plot = MyObservationPlotter(parameter_dict=obsrv_params)
# Plot Q-values (DQN) or Q-value distributions (Rainbow).
q_params = {'x': atari_plot.parameters['width'],
'y': atari_plot.parameters['height'],
'legend': action_names }
if 'DQN' in self._agent.__class__.__name__:
q_params['xlabel'] = 'Timestep'
q_params['ylabel'] = 'Q-Value'
q_params['title'] = 'Q-Values'
q_params['get_line_data_fn'] = self._agent.get_q_values
q_plot = MyLinePlotter(parameter_dict = q_params)
else:
q_params['xlabel'] = 'Return'
q_params['ylabel'] = 'Return probability'
q_params['title'] = 'Return distribution'
q_params['get_bar_data_fn'] = self._agent.get_probabilities
q_plot = MyBarPlotter(parameter_dict = q_params)
# Screen Size
screen_width = (atari_plot.parameters['width'] + reward_plot.parameters['width'])
screen_height = (atari_plot.parameters['height'] + q_plot.parameters['height'])
# Dimensions need to be divisible by 2:
screen_width += 1 if screen_width % 2 > 0 else 0
screen_height += 1 if screen_height % 2 > 0 else 0
# build visualizer.
visualizer = agent_visualizer.AgentVisualizer(
record_path=record_path, plotters=[
atari_plot, reward_plot, obsrv_plot, q_plot
],
screen_width=screen_width, screen_height=screen_height)
# run loop in global_step
global_step = 0
while global_step < num_global_steps:
initial_observation = self._environment.reset()
action = self._agent.begin_episode(initial_observation)
while True:
observation, reward, is_terminal, info = self._environment.step(action)
global_step += 1
obsrv_plot.setObservation(observation)
visualizer.visualize()
if self._environment.game_over or global_step >= num_global_steps:
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, info)
self._end_episode(reward)
visualizer.generate_video()
class MyObservationPlotter(plotter.Plotter):
"""MyObservationPlotter: plot observation via step()"""
_defaults = { 'x': 0, 'y': 0 }
def __init__(self, parameter_dict = {}, screen_size = 84):
super(MyObservationPlotter, self).__init__(parameter_dict)
self.width = self.parameters['width'] if 'width' in self.parameters else screen_size
self.height = self.parameters['height'] if 'height' in self.parameters else screen_size
self.game_surface = pygame.Surface((screen_size, screen_size))
self.obs = None
def setObservation(self, obs):
self.obs = obs
def draw(self):
numpy_surface = np.frombuffer(self.game_surface.get_buffer(), dtype=np.int32)
if self.obs is not None:
obs = self.obs
# obs = np.transpose(obs)
# obs = np.swapaxes(obs, 1, 2)
# obs = obs[0] | (obs[0] << 8) | (obs[0] << 16) # must be grey-scale image (or single channel)
np.copyto(numpy_surface, obs.ravel())
return pygame.transform.scale(self.game_surface, (self.width, self.height))
class MyLinePlotter(line_plotter.LinePlotter):
"""MyLinePlotter: plot observation via step()"""
def __init__(self, parameter_dict):
myDef = {'font': {
'family': 'DejaVu Sans',
'weight': 'regular',
'size': 26 },
'figsize': (12, 9),
}
myDef.update(parameter_dict)
super(MyLinePlotter, self).__init__(parameter_dict = myDef)
#! use 2nd axes for score
self.ax1 = self.plot.axes
self.ax2 = self.ax1.twinx() if 1>0 else None
self.ax2.set_ylabel('Score', color='b') if self.ax2 else None
def draw(self):
import pygame
"""Draw the line plot.
If `parameter_dict` contains a 'legend' key pointing to a list of labels,
this will be used as the legend labels in the plot.
Returns:
object to be rendered by AgentVisualizer.
"""
self._setup_plot() # draw
num_colors = len(self.parameters['colors'])
max_xlim = 0
line_data = self.parameters['get_line_data_fn']()
for i in range(len(line_data)):
plot_axes = self.ax2 if self.ax2 and i + 1 >= len(line_data) else self.ax1
plot_axes.plot(line_data[i],
linewidth=self.parameters['linewidth'],
color=self.parameters['colors'][i % num_colors])
max_xlim = max(max_xlim, len(line_data[i]))
min_xlim = max(0, max_xlim - self.parameters['max_width'])
self.plot.set_xlim(min_xlim, max_xlim)
if 'legend' in self.parameters:
self.plot.legend(self.parameters['legend'])
self.fig.canvas.draw()
# Now transfer to surface.
width, height = self.fig.canvas.get_width_height()
if self.plot_surface is None:
self.plot_surface = pygame.Surface((width, height))
plot_buffer = np.frombuffer(self.fig.canvas.buffer_rgba(), np.uint32)
surf_buffer = np.frombuffer(self.plot_surface.get_buffer(),
dtype=np.int32)
np.copyto(surf_buffer, plot_buffer)
return pygame.transform.smoothscale(
self.plot_surface,
(self.parameters['width'], self.parameters['height']))
class MyBarPlotter(bar_plotter.BarPlotter):
"""MyBarPlotter: plot observation via step()"""
def __init__(self, parameter_dict):
myDef = {'font': {
'family': 'DejaVu Sans',
'weight': 'regular',
'size': 26 },
}
myDef.update(parameter_dict)
super(MyBarPlotter, self).__init__(parameter_dict = myDef)
def draw(self):
return super(MyBarPlotter, self).draw()
| 42.845758 | 122 | 0.638927 | 2,016 | 16,667 | 5.022321 | 0.204365 | 0.016 | 0.016889 | 0.011358 | 0.298765 | 0.211358 | 0.167012 | 0.140741 | 0.117136 | 0.106074 | 0 | 0.009966 | 0.259495 | 16,667 | 388 | 123 | 42.956186 | 0.810404 | 0.130557 | 0 | 0.25784 | 0 | 0 | 0.068101 | 0.00354 | 0 | 0 | 0 | 0.002577 | 0.006969 | 1 | 0.076655 | false | 0 | 0.087108 | 0.003484 | 0.229965 | 0.013937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8528bb78af4c1e85b701d3605f8b9052440870fe | 10,830 | py | Python | Packs/HatchingTriage/Integrations/HatchingTriage/HatchingTriage.py | hatching/content | 5e00808969b9d56c3f5cbdcb9068b65ac1a6de84 | [
"MIT"
] | 1 | 2021-04-20T10:58:15.000Z | 2021-04-20T10:58:15.000Z | Packs/HatchingTriage/Integrations/HatchingTriage/HatchingTriage.py | hatching/content | 5e00808969b9d56c3f5cbdcb9068b65ac1a6de84 | [
"MIT"
] | null | null | null | Packs/HatchingTriage/Integrations/HatchingTriage/HatchingTriage.py | hatching/content | 5e00808969b9d56c3f5cbdcb9068b65ac1a6de84 | [
"MIT"
] | 1 | 2021-04-20T20:02:06.000Z | 2021-04-20T20:02:06.000Z | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
class Client(BaseClient):
def __init__(self, base_url, *args, **kwarg):
super().__init__(base_url, *args, **kwarg)
def test_module(client: Client) -> str:
r = client._http_request(
"GET", "users", resp_type="response", ok_codes=(200, 401, 404)
)
if r.status_code == 404:
return "Page not found, possibly wrong base_url"
if r.status_code == 401:
return "Bad API Key"
return "ok"
def query_samples(client, **args) -> CommandResults:
params = {"subset": args.get("subset")}
r = client._http_request("GET", "samples", params=params)
results = CommandResults(
outputs_prefix="Triage.samples", outputs_key_field="id", outputs=r["data"]
)
return results
def submit_sample(client: Client, **args) -> CommandResults:
data = {"kind": args.get("kind"), "interactive": False}
if args.get("profiles", []):
profiles_data = []
for i in args.get("profiles", "").split(","):
profiles_data.append({"profile": i, "pick": "sample"})
data["profiles"] = profiles_data
if data["kind"] == "url":
data.update({"url": args.get("data")})
r = client._http_request("POST", "samples", json_data=data)
elif data["kind"] == "file":
file_path = demisto.getFilePath(demisto.args().get("data")).get("path")
with open(file_path, "rb") as f:
files = {"file": f}
r = client._http_request("POST", "samples", json_data=data, files=files)
else:
return_error(
f'Type of sample needs to be selected, either "file" or "url", the selected type was: {data["kind"]}'
)
results = CommandResults(
outputs_prefix="Triage.submissions", outputs_key_field="id", outputs=r
)
return results
def get_sample(client: Client, **args) -> CommandResults:
sample_id = args.get("sample_id")
r = client._http_request("GET", f"samples/{sample_id}")
results = CommandResults(
outputs_prefix="Triage.samples", outputs_key_field="id", outputs=r
)
return results
def get_sample_summary(client: Client, **args) -> CommandResults:
sample_id = args.get("sample_id")
r = client._http_request("GET", f"samples/{sample_id}/summary")
results = CommandResults(
outputs_prefix="Triage.sample-summaries", outputs_key_field="sample", outputs=r
)
return results
def delete_sample(client: Client, **args) -> str:
sample_id = args.get("sample_id")
client._http_request("DELETE", f"samples/{sample_id}")
return f"Sample {sample_id} successfully deleted"
def set_sample_profile(client: Client, **args) -> str:
"""
Used to move a submitted sample from static analysis to behavioural by giving it a profile to run under
"""
sample_id = args.get("sample_id")
data = {
"auto": argToBoolean(args.get("auto", True)),
"pick": argToList(args.get("pick", [])),
}
if args.get("profiles"):
data.update({"profiles": [{"profile": args.get("profiles", "")}]})
data = json.dumps(data)
client._http_request("POST", f"samples/{sample_id}/profile", data=data)
return f"Profile successfully set for sample {sample_id}"
def get_static_report(client: Client, **args) -> CommandResults:
"""
Get's the static analysis report from a given sample
"""
sample_id = args.get("sample_id")
r = client._http_request("GET", f"samples/{sample_id}/reports/static")
results = CommandResults(
outputs_prefix="Triage.sample.reports.static",
outputs_key_field="sample.sample",
outputs=r,
)
return results
def get_report_triage(client: Client, **args) -> CommandResults:
"""
Outputs a score, should map to a DBot score
"""
sample_id = args.get("sample_id")
task_id = args.get("task_id")
r = client._http_request("GET", f"samples/{sample_id}/{task_id}/report_triage.json")
results = CommandResults(
outputs_prefix="Triage.sample.reports.triage",
outputs_key_field="sample.id",
outputs=r,
)
return results
def get_kernel_monitor(client: Client, **args) -> dict:
sample_id = args.get("sample_id")
task_id = args.get("task_id")
r = client._http_request(
"GET", f"samples/{sample_id}/{task_id}/logs/onemon.json", resp_type="text"
)
return_results("Kernel monitor results:")
results = fileResult(f"{sample_id}-{task_id}-kernel-monitor.json", r)
return results
def get_pcap(client: Client, **args) -> dict:
sample_id = args.get("sample_id")
task_id = args.get("task_id")
r = client._http_request(
"GET", f"samples/{sample_id}/{task_id}/dump.pcap", resp_type="response"
)
filename = f"{sample_id}.pcap"
file_content = r.content
return_results("PCAP results:")
return fileResult(filename, file_content)
def get_dumped_files(client: Client, **args) -> dict:
sample_id = args.get("sample_id")
task_id = args.get("task_id")
file_name = args.get("file_name")
r = client._http_request(
"GET", f"samples/{sample_id}/{task_id}/{file_name}", resp_type="content"
)
results = fileResult(f"{file_name}", r)
return results
def get_users(client: Client, **args) -> CommandResults:
if args.get("userID"):
url_suffix = f'users/{args.get("userID")}'
else:
url_suffix = "users"
r = client._http_request("GET", url_suffix)
# Depending on the api endpoint used, the results are either in the 'data' key or not
if r.get("data"):
r = r["data"]
results = CommandResults(
outputs_prefix="Triage.users", outputs_key_field="id", outputs=r
)
return results
def create_user(client: Client, **args) -> CommandResults:
data = {
"username": args.get("username"),
"first_name": args.get("firstName"),
"last_name": args.get("lastName"),
"password": args.get("password"),
"permissions": argToList(args.get("permissions")),
}
data = json.dumps(data)
r = client._http_request("POST", "users", data=data)
results = CommandResults(
outputs_prefix="Triage.users", outputs_key_field="id", outputs=r
)
return results
def delete_user(client: Client, **args) -> str:
userID = args.get("userID")
client._http_request("DELETE", f"users/{userID}")
results = "User successfully deleted"
return results
def create_apikey(client: Client, **args) -> CommandResults:
userID = args.get("userID")
name = args.get("name")
data = json.dumps({"name": name})
r = client._http_request("POST", f"users/{userID}/apikeys", data=data)
results = CommandResults(
outputs_prefix="Triage.apikey", outputs_key_field="key", outputs=r
)
return results
def get_apikey(client: Client, **args) -> CommandResults:
userID = args.get("userID")
r = client._http_request("GET", f"users/{userID}/apikeys")
results = CommandResults(
outputs_prefix="Triage.apikey", outputs_key_field="key", outputs=r.get("data")
)
return results
def delete_apikey(client: Client, **args) -> str:
userID = args.get("userID")
apiKeyName = args.get("name")
client._http_request("DELETE", f"users/{userID}/apikeys/{apiKeyName}")
results = f"API key {apiKeyName} was successfully deleted"
return results
def get_profile(client: Client, **args) -> CommandResults:
profileID = args.get("profileID")
if profileID:
url_suffix = f"profiles/{profileID}"
else:
url_suffix = "profiles"
r = client._http_request("GET", url_suffix)
if not profileID and r.get("data"):
r = r["data"]
results = CommandResults(
outputs_prefix="Triage.profiles", outputs_key_field="id", outputs=r
)
return results
def create_profile(client: Client, **args) -> CommandResults:
data = json.dumps(
{
"name": args.get("name"),
"tags": argToList(args.get("tags")),
"timeout": int(args.get("timeout", 120)),
"network": args.get("network"),
"browser": args.get("browser"),
}
)
r = client._http_request("POST", "profiles", data=data)
results = CommandResults(
outputs_prefix="Triage.profiles", outputs_key_field="id", outputs=r
)
return results
def update_profile(client: Client, **args) -> str:
profileID = args.get("profileID")
data = {}
for arg in args:
if arg == "timeout":
data[arg] = int(args.get(arg, 60))
if arg == "tags":
data[arg] = argToList(args.get(arg))
if arg == "timeout":
data[arg] = args.get(arg, None)
client._http_request("PUT", f"profiles/{profileID}", data=json.dumps(data))
results = "Profile updated successfully"
return results
def delete_profile(client: Client, **args) -> str:
profileID = args.get("profileID")
client._http_request("DELETE", f"profiles/{profileID}")
results = f"Profile {profileID} successfully deleted"
return results
def main():
params = demisto.params()
args = demisto.args()
client = Client(
params.get("base_url"),
verify=params.get("Verify SSL"),
headers={"Authorization": f'Bearer {params.get("API Key")}'},
)
commands = {
"test-module": test_module,
"triage-query-samples": query_samples,
"triage-submit-sample": submit_sample,
"triage-get-sample": get_sample,
"triage-get-sample-summary": get_sample_summary,
"triage-delete-sample": delete_sample,
"triage-set-sample-profile": set_sample_profile,
"triage-get-static-report": get_static_report,
"triage-get-report-triage": get_report_triage,
"triage-get-kernel-monitor": get_kernel_monitor,
"triage-get-pcap": get_pcap,
"triage-get-dumped-file": get_dumped_files,
"triage-get-users": get_users,
"triage-create-user": create_user,
"triage-delete-user": delete_user,
"triage-create-api-key": create_apikey,
"triage-get-api-key": get_apikey,
"triage-delete-api-key": delete_apikey,
"triage-get-profiles": get_profile,
"triage-create-profile": create_profile,
"triage-update-profile": update_profile,
"triage-delete-profile": delete_profile,
}
command = demisto.command()
if command in commands:
return_results(commands[command](client, **args)) # type: ignore
else:
return_error(f"Command {command} is not available in this integration")
if __name__ in ["__main__", "__builtin__", "builtins"]:
main()
| 27.912371 | 113 | 0.634257 | 1,329 | 10,830 | 4.987961 | 0.139955 | 0.050686 | 0.058983 | 0.046161 | 0.43717 | 0.345452 | 0.309549 | 0.267159 | 0.255695 | 0.211344 | 0 | 0.002361 | 0.217729 | 10,830 | 387 | 114 | 27.984496 | 0.780099 | 0.027516 | 0 | 0.284615 | 0 | 0.003846 | 0.237768 | 0.070291 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0.003846 | 0.015385 | 0 | 0.203846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
516b7b328aa661fb5cfc6cbe4557688406fd3782 | 488 | py | Python | doi_request/controller.py | joffilyfe/doi_request | 870c6b346d7b28789e45cfdae01dcc0f47dafd43 | [
"BSD-2-Clause"
] | null | null | null | doi_request/controller.py | joffilyfe/doi_request | 870c6b346d7b28789e45cfdae01dcc0f47dafd43 | [
"BSD-2-Clause"
] | null | null | null | doi_request/controller.py | joffilyfe/doi_request | 870c6b346d7b28789e45cfdae01dcc0f47dafd43 | [
"BSD-2-Clause"
] | null | null | null | import logging
from tasks.celery import registry_dispatcher_document
logger = logging.getLogger(__name__)
class Depositor(object):
def deposit_by_pids(self, pids_list):
"""
Receive a list of pids and collection to registry their dois.
scl
"""
for item in pids_list:
collection, code = item.split('_')
registry_dispatcher_document.delay(code, collection)
logger.info('enqueued deposit for "%s"', item)
| 23.238095 | 69 | 0.651639 | 57 | 488 | 5.350877 | 0.666667 | 0.118033 | 0.170492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.266393 | 488 | 20 | 70 | 24.4 | 0.851955 | 0.133197 | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5173f3bf7eb06fb274030ee5d5c05a3722df00ba | 18,234 | py | Python | main.py | KlaudijaMedeksaite/GBUI-voice-project | bd2cd979483e3ac43de5009d148e2e0403f50eda | [
"MIT"
] | null | null | null | main.py | KlaudijaMedeksaite/GBUI-voice-project | bd2cd979483e3ac43de5009d148e2e0403f50eda | [
"MIT"
] | null | null | null | main.py | KlaudijaMedeksaite/GBUI-voice-project | bd2cd979483e3ac43de5009d148e2e0403f50eda | [
"MIT"
] | null | null | null | import time
import playsound
import os
import random
from gtts.lang import tts_langs
from deep_translator import (GoogleTranslator)
import pickle
import json
# my classes
import random_test
import mike
import lang_tests
import level_one
import level_two
import level_three
import extras
# game progress vars
parts = {"l1_colours": "0", "l1_numbers": "0", "l1_animals": "0", "l2_greetings": "0", "l2_transport": "0",
"l2_sports": "0", "l3_food": "0", "l3_clothes": "0", "l3_buildings": "0"}
progress = {"name": "", "language": "",
"level": "0", "lvlpts": "0", "partsDone": parts, "points": "0", "saveTime": "0"}
def set_l1(p1=False, p2=False, p3=False, all=False):
if all:
parts['l1_colours'] = '1'
parts['l1_animals'] = '1'
parts['l1_numbers'] = '1'
else:
if p1:
parts['l1_colours'] = p1
if p2:
parts['l1_animals'] = p2
if p3:
parts['l1_numbers'] = p3
def set_l2(p1=False, p2=False, p3=False, all=False):
if all:
parts['l2_greetings'] = '1'
parts['l2_transport'] = '1'
parts['l2_sports'] = '1'
else:
if p1:
parts['l2_greetings'] = p1
if p2:
parts['l2_transport'] = p2
if p3:
parts['l2_sports'] = p3
def set_l3(p1=False, p2=False, p3=False, all=False):
if all:
parts['l3_food'] = '1'
parts['l3_clothes'] = '1'
parts['l3_buildings'] = '1'
else:
if p1:
parts['l3_food'] = p1
if p2:
parts['l3_clothes'] = p2
if p3:
parts['l3_buildings'] = p3
# LANG METHODS
def get_name():
confirmed = False
mike.mike('Welcome to uno lingo. What is your name?')
name = mike.record_audio()
print(name)
while confirmed == False:
voice_confirm = mike.record_audio(
"Your name is " + name + ". Is that correct? Say yes to confirm")
if "yes" in voice_confirm:
confirmed = True
else:
name = mike.record_audio('What is your name?')
print(name)
mike.mike('Nice to meet you, ' + name+'.')
mike.mike("My name is Mike.")
return name
def choose_language():
valid = False
chosen = False
langs_dict = GoogleTranslator.get_supported_languages(as_dict=True)
print("Languages available: ")
for key in langs_dict:
if langs_dict[key] in tts_langs():
print(key)
while valid == False:
while chosen == False:
choice = mike.record_audio("Please select a new language to learn")
try:
languageToLearn = extras.get_language_short(choice)
if languageToLearn in tts_langs():
valid = True
if valid == True:
response = mike.record_audio(
"You have chosen " + choice+", say yes to start learning this language.")
if 'yes' in response:
chosen = True
return languageToLearn
except:
return 0
# SETUP - LEVELS
def get_level():
response = ''
name = progress['name']
mike.mike(
name + ", would you like to take a test to find out what language level you are on?")
while len(response) < 1:
response = mike.record_audio()
if 'yes' in response:
level = test_Level()
elif 'no' in response:
mike.mike('What level are you on?')
level = mike.record_audio()
level = int(level)
while level > 3 or level < 1:
level = mike.record_audio(
"I'm sorry, please choose a level between one and four.")
set_parts(level)
progress['partsDone'] = parts
return str(level)
def test_Level():
percent = 0
percent = lang_tests.test_l1(progress['language'])
if percent >= 70:
percent = lang_tests.test_l2(progress['language'])
if percent >= 70:
level = 3
else:
level = 2
elif percent >= 40:
level = 2
else:
level = 1
mike.mike("The tests have determined you are at level " + str(level))
return level
# PLAY - LEVELS
def begin_level():
# "switch" for levels here
if progress['level'] == '1':
load_lvl_1()
elif progress['level'] == '2':
load_lvl_2()
elif progress['level'] == '3':
load_lvl_3()
return 0
def set_parts(lvl, p1=False, p2=False, p3=False):
if lvl == "1":
set_l1(p1, p2, p3)
elif lvl == "2":
set_l1(all="1")
set_l2(p1, p2, p3)
elif lvl == "3":
set_l1(all="1")
set_l2(all="1")
set_l3(p1, p2, p3)
def part_choice(lvl):
user_input = " "
top = []
topicStr = ""
validChoice = False
if lvl == '1':
top.append(progress['partsDone']['l1_colours'])
top.append(progress['partsDone']['l1_animals'])
top.append(progress['partsDone']['l1_numbers'])
topics = ["colours", "animals", "numbers"]
elif lvl == '2':
top.append(progress['partsDone']['l2_greetings'])
top.append(progress['partsDone']['l2_transport'])
top.append(progress['partsDone']['l2_sports'])
topics = ["greetings", "transport", "sports"]
elif lvl == '3':
print("level: ", lvl)
top.append(progress['partsDone']['l3_food'])
top.append(progress['partsDone']['l3_clothes'])
top.append(progress['partsDone']['l3_buildings'])
topics = ["food", "clothes", "buildings"]
else:
Exception
topic = []
j = 0
while j < 3:
if top[j] == "0":
topic.append(str(topics[j]))
topicStr = topicStr + str(topics[j]) + ", "
j = j+1
topicStr = topicStr[:-2]
if topicStr == "":
return 10
else:
user_input = mike.record_audio(
"Please select a topic. Your options include " + topicStr)
while validChoice == False:
print("You said: " + user_input)
if user_input == "":
user_input = " "
if user_input in topicStr:
validChoice = True
# return user_input
else:
if user_input == " ":
user_input = mike.record_audio(
"That is not a valid option. Please select one of the following: " + topicStr)
else:
user_input = mike.record_audio(
user_input + " is not a valid option. Please select one of the following: " + topicStr)
x = 0
while x < len(topics):
if user_input in str(topics[x]):
part_no = x
return part_no
x = x+1
return part_no
# Easiest level, teach colours, numbers and animals
def load_lvl_1():
save_progress()
mike.mike("\n\nLevel One")
print("---------------------------------------------------------")
load_this = part_choice('1')
if load_this == 0:
l1_colours()
load_lvl_1()
if load_this == 1:
l1_animals()
load_lvl_1()
if load_this == 2:
l1_numbers()
load_lvl_1()
if load_this == 10:
mike.mike("\n\nCongratulations, you have finished level one!")
print("Total points this level: ", progress['points'])
load_lvl_2()
def l1_numbers():
points = progress['points']
lan = progress['language']
newP = level_one.numbers(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l1_numbers'] = "1"
progress['points'] = points
def l1_animals():
points = progress['points']
lan = progress['language']
newP = level_one.animals(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l1_animals'] = "1"
progress['points'] = points
def l1_colours():
points = progress['points']
lan = progress['language']
newP = level_one.colours(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l1_colours'] = "1"
progress['points'] = points
# Slightly more difficult level, teach more complex words and some adjectives
def load_lvl_2():
save_progress()
mike.mike("\n\nLevel Two")
print("---------------------------------------------------------")
load_this = part_choice('2')
if load_this == 0:
l2_greetings()
load_lvl_2()
if load_this == 1:
l2_transport()
load_lvl_2()
if load_this == 2:
l2_sports()
load_lvl_2()
if load_this == 10:
mike.mike("\n\nCongratulations, you have finished level two!")
print("Total points this level: ", progress['points'])
load_lvl_3()
# Medium level, teach basic sentences
def l2_transport():
points = progress['points']
lan = progress['language']
newP = level_two.transport(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l2_transport'] = "1"
progress['points'] = points
def l2_sports():
points = progress['points']
lan = progress['language']
newP = level_two.sports(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l2_sports'] = "1"
progress['points'] = points
def l2_greetings():
points = progress['points']
lan = progress['language']
newP = level_two.greetings(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l2_greetings'] = "1"
progress['points'] = points
def load_lvl_3():
save_progress()
mike.mike("\n\nLevel Three")
print("---------------------------------------------------------")
load_this = part_choice('3')
if load_this == 0:
l3_food()
load_lvl_3()
if load_this == 1:
l3_clothes()
load_lvl_3()
if load_this == 2:
l3_buildings()
load_lvl_3()
if load_this == 10:
mike.mike("\n\nCongratulations, you have finished level three!")
print("Total points this level: ", progress['points'])
def l3_food():
points = progress['points']
lan = progress['language']
newP = level_three.food(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l3_food'] = "1"
progress['points'] = points
def l3_clothes():
points = progress['points']
lan = progress['language']
newP = level_three.clothes(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l3_clothes'] = "1"
progress['points'] = points
def l3_buildings():
points = progress['points']
lan = progress['language']
newP = level_three.buildings(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l3_buildings'] = "1"
progress['points'] = points
def graduate():
mike.mike("Congratulations, you have completed the course")
print("---------------------------------------------------------")
user_answer = mike.record_audio("Say new game to learn a new language.")
return user_answer
# SAVES
def save_progress():
res = load_progress(progress['name'])
newPrint = res
check = "'language': " + "'"+str(progress['language']+"'")
progress['saveTime'] = extras.make_time()
if(not(check in str(res))):
try:
with open('saves/'+progress['name'] + '.txt', 'ab+') as f:
pickle.dump(progress, f)
except:
os.makedirs("saves")
with open('saves/'+progress['name'] + '.txt', 'ab+') as f:
pickle.dump(progress, f)
else:
with open('saves/'+progress['name'] + '.txt', 'wb') as f:
pickle.dump("", f)
x = 0
while x < len(newPrint):
if check in str(newPrint[x]):
if str(progress['saveTime'] >= newPrint[x]['saveTime']):
if progress in newPrint:
del newPrint[x]
else:
newPrint[x] = progress
x = x + 1
for n in newPrint:
with open('saves/'+progress['name'] + '.txt', 'ab+') as f:
pickle.dump(n, f)
def load_progress(n):
results = []
try:
with open('saves/'+n + '.txt', 'rb') as f:
try:
while True:
results.append(pickle.load(f))
except EOFError:
pass
return results
except:
print("no saved file")
return 0
def check_saves(name, language=False):
langsSaved = []
savedP = load_progress(name)
if language:
try:
savedP = str(savedP).replace('\'', '\"')
ps = json.loads(str(savedP))
for p in ps:
if p['language'] == language:
level = p['level']
progress['partsDone'] = p['partsDone']
return level
except:
return 0
else:
try:
savedP = str(savedP).replace("'',", "")
savedP = str(savedP).replace('\'', '\"')
ps = json.loads(str(savedP))
for p in ps:
langsSaved.append(p['language'])
return langsSaved
except:
return langsSaved
def choose_save(saves):
valid = False
mike.mike("saved languages include ")
for s in saves:
mike.mike(extras.get_language_long(s))
language = mike.record_audio(
"Which language would you like to continue learning?")
print("You have chosen: " + language)
while valid == False:
try:
language = extras.get_language_short(language)
except:
return
if language in saves:
level = check_saves(progress['name'], language)
progress['language'] = language
progress['level'] = str(level)
valid = True
else:
language = mike.record_audio(
"I'm sorry, that is not a valid option. Choose a different language.")
print("You have chosen: " + language)
# PART METHODS
def startup():
# get users name
name = get_name()
progress['name'] = name
# check if user has saved file
saves = check_saves(name)
if saves:
answered = False
answer = ""
while answered == False:
answer = mike.record_audio(
"You have saved progress available. Would you like to continue progress?")
if "yes" in answer:
answered = True
progBool = True
if "no" in answer:
answered = True
progBool = False
else:
progBool = False
if progBool == True:
# if yes - ask which save to load
choose_save(saves)
return
else:
# ask language to learn
progress['language'] = choose_language()
progress['level'] = get_level()
save_progress()
return
# TEST METHODS
def print_prog():
print("Name: " + progress['name'])
print("Language: " + progress['language'])
print("Level: " + progress['level'])
print("Parts: ", progress['partsDone'])
def easy_set(name=False, lang=False, level=False):
if name and lang and level:
progress['name'] = name
progress['language'] = lang
progress['level'] = level
else:
progress['name'] = "John"
progress['language'] = 'es'
progress['level'] = '1'
set_parts(progress['level'])
if __name__ == "__main__":
answer = "new game"
while(answer == "new game"):
startup()
begin_level()
answer = graduate()
if answer == "quit game":
mike.mike("Goodbye, ", progress['name'])
exit()
| 27.585477 | 111 | 0.530109 | 2,083 | 18,234 | 4.530005 | 0.116659 | 0.034337 | 0.033383 | 0.024799 | 0.448177 | 0.362124 | 0.297266 | 0.293133 | 0.293133 | 0.234103 | 0 | 0.018863 | 0.305144 | 18,234 | 660 | 112 | 27.627273 | 0.725888 | 0.048426 | 0 | 0.437624 | 0 | 0 | 0.230143 | 0.042773 | 0.00198 | 0 | 0 | 0 | 0 | 1 | 0.059406 | false | 0.00198 | 0.029703 | 0 | 0.126733 | 0.09505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51754b6a4b3d3608f20c547765de1308a45663f9 | 2,135 | py | Python | changes/utils/shards.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
] | 443 | 2015-01-03T16:28:39.000Z | 2021-04-26T16:39:46.000Z | changes/utils/shards.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
] | 12 | 2015-07-30T19:07:16.000Z | 2016-11-07T23:11:21.000Z | changes/utils/shards.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
] | 47 | 2015-01-09T10:04:00.000Z | 2020-11-18T17:58:19.000Z | import heapq
from flask import current_app
from typing import Any, Callable, cast, Dict, List, Tuple, TypeVar # NOQA
Normalized = TypeVar('Normalized')
def shard(objects, max_shards, object_stats, avg_time, normalize_object_name=cast(Callable[[str], Normalized], lambda x: x)):
# type: (List[str], int, Dict[Normalized, int], int, Callable[[str], Normalized]) -> List[Tuple[int, List[str]]]
"""
Breaks a set of objects into shards.
Args:
objects (list): A list of object names.
max_shards (int): Maximum amount of shards over which to distribute the objects.
test_stats (dict): A mapping from normalized object name to duration.
avg_test_time (int): Average duration of a single object.
normalize_object_name (str -> Tuple[str, ...]): a function that normalizes object names.
This function can return anything, as long as it is consistent with `test_stats`.
Returns:
list: Shards. Each element is a pair containing the weight for that
shard and the object names assigned to that shard.
"""
def get_object_duration(test_name):
# type: (str) -> int
normalized = normalize_object_name(test_name)
result = object_stats.get(normalized)
if result is None:
if object_stats:
current_app.logger.info('No existing duration found for test %r', test_name)
result = avg_time
return result
# don't use more shards than there are objects
num_shards = min(len(objects), max_shards)
# Each element is a pair (weight, objects).
groups = [(0, []) for _ in range(num_shards)] # type: List[Tuple[int, List[str]]]
# Groups is already a proper heap, but we'll call this to guarantee it.
heapq.heapify(groups)
weighted_tests = [(get_object_duration(t), t) for t in objects]
for weight, test in sorted(weighted_tests, reverse=True):
group_weight, group_tests = heapq.heappop(groups)
group_weight += 1 + weight
group_tests.append(test)
heapq.heappush(groups, (group_weight, group_tests))
return groups
| 41.862745 | 125 | 0.671194 | 295 | 2,135 | 4.728814 | 0.39322 | 0.028674 | 0.04086 | 0.022939 | 0.061649 | 0.034409 | 0 | 0 | 0 | 0 | 0 | 0.001226 | 0.236066 | 2,135 | 50 | 126 | 42.7 | 0.854077 | 0.44918 | 0 | 0 | 0 | 0 | 0.043088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5177c744c2f039219154881a88b3bbbb875e7877 | 5,989 | py | Python | main.py | Joeization/pyGaen | 172db8a1609da7d6c698d552e5d915df912e9e2d | [
"MIT"
] | 1 | 2019-06-16T16:13:18.000Z | 2019-06-16T16:13:18.000Z | main.py | Joeization/pyGaen | 172db8a1609da7d6c698d552e5d915df912e9e2d | [
"MIT"
] | null | null | null | main.py | Joeization/pyGaen | 172db8a1609da7d6c698d552e5d915df912e9e2d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pygame
try:
import pygame._view
except ImportError:
pass
from choice import *
from bgm import *
from dialog import *
from settings import *
from text import *
from log import *
def main():
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((800, 600), 0, 32)
pygame.display.set_caption('alpha')
imglib = {}
imglib['load'] = pygame.image.load(resource_path('img/load.png')).convert_alpha()
screen.blit(imglib['load'], (0, 0))
pygame.display.update()
imgres = open(resource_path('src/img.txt'), 'r')
for img in imgres:
tag, tar = map(str, img.strip().split(' '))
imglib[tag] = pygame.image.load(resource_path(tar)).convert_alpha()
sfxlib = {}
sfxres = open(resource_path('src/sfx.txt'), 'r')
for sfx in sfxres:
tag, tar = map(str, sfx.strip().split(' '))
sfxlib[tag] = resource_path(tar)
sfplayer = Bgm('')
ft18 = pygame.font.SysFont('simhei', 18)
ft24 = pygame.font.SysFont('simhei', 24)
ftpk = (ft24, ft18)
setting = Settings(ft18)
cho = Text(resource_path('src/cho.ga'))
dia = Text(resource_path('src/dia.ga'))
dialoglib = {}
choicelib = {}
dpos = 'main'
cpos = '-1'
pick = -1
vmode = 0
'''
0 = normal
1 = image
2 = log
'''
clock = pygame.time.Clock()
san = 0
ddone = False
if dia.has():
while True:
ne = dia.parse()
if ne[0] == -1:
break
elif ne[0] == 0:
dialoglib[ne[7]] = ne
ddone = True
del dia
if cho.has():
while True:
ne = cho.parse()
if ne[0] == -1:
break
elif ne[0] == 1:
choicelib[ne[2]] = ne
del cho
if not ddone:
pygame.quit()
sys.exit()
ddone = False
cdone = False
ce = []
log = Log()
while True:
if not ddone:
dg = Dialog(dialoglib[dpos][1], dialoglib[dpos][2], dialoglib[dpos][3],
dialoglib[dpos][4], dialoglib[dpos][5], dialoglib[dpos][6],
dialoglib[dpos][8], dialoglib[dpos][9])
log.add(dg.log())
ddone = True
cpos = dg.ask()
if not cdone:
if cpos != '-1':
ce = []
for chi in choicelib[cpos][1]:
ce.append(Choice(chi[0], ft18, chi[1], chi[2], chi[3]))
cdone = True
(x, y) = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 3:
if vmode == 0:
vmode = 3
elif vmode == 3:
vmode = 0
if event.button == 4:
if vmode == 0:
vmode = 2
if event.button == 5:
if vmode == 2:
vmode = 0
if event.button == 1:
scl = setting.click((x, y), dpos, cpos, san)
if scl[0] == 0:
#reverse show
pass
elif scl[0] == 1:
#save
pass
elif scl[0] == 2:
#load
dg.reset()
dpos = scl[1][0]
cpos = scl[1][1]
san = scl[1][2]
if vmode == 0 and scl[0] == -1:
if cpos != u'-1':
for c in ce:
(lx, ly) = cgetpos(c.id())
if (x >= lx and x <= lx + 350 and
y >= ly and y <= ly + 50):
pick = c.id()
if pick != -1:
pass
else:
if dg.check():
if dg.nxt() != '-1':
if dg.nxt() == '-2':
pygame.quit()
sys.exit()
dg.reset()
dpos = dg.next(san)
ce = []
ddone = False
cdone = False
screen.blit(imglib['bk'], (0, 0))
if vmode == 0:
dg.blit(screen, whe(dg.wh()), imglib,
sfxlib, sfplayer, pygame.time.get_ticks(), ftpk)
if len(ce) > 0:
for c in ce:
(lx, ly) = cgetpos(c.id())
if (x >= lx and x <= lx + 350 and
y >= ly and y <= ly + 50):
c.blit(screen, (lx, ly), imglib['chiy'])
else:
c.blit(screen, (lx, ly), imglib['chin'])
else:
dg.showimg(screen, whe(dg.wh()), imglib, False)
if vmode == 1:
dg.showimg(screen, whe(dg.wh()), imglib, False)
elif vmode == 2:
screen.blit(imglib['lg'], (200, 100))
log.blit(screen, ft24)
setting.blit(screen, imglib, (x, y))
pygame.display.update()
if pick != -1:
pygame.time.delay(300)
dg.reset()
log.add(ce[pick].log())
dpos = ce[pick].to()
san += ce[pick].w()
ddone = False
cdone = False
ce = []
cpos = -1
pick = -1
clock.tick(60)
#if python says run, then we should run
if __name__ == '__main__':
main()
| 27.855814 | 85 | 0.393054 | 640 | 5,989 | 3.64375 | 0.254688 | 0.044597 | 0.025729 | 0.02187 | 0.175386 | 0.108919 | 0.090909 | 0.090909 | 0.062607 | 0.042882 | 0 | 0.03899 | 0.477542 | 5,989 | 214 | 86 | 27.985981 | 0.706296 | 0.016029 | 0 | 0.361446 | 0 | 0 | 0.020051 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006024 | false | 0.024096 | 0.054217 | 0 | 0.060241 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5178c24558ef71ed70c83cdfa68e492916a169e6 | 3,045 | py | Python | ModelDGI.py | shiqitao/AutoGraph | 41f5956c859ff0fb6f87109d5f8731276bdcc2ef | [
"MIT"
] | null | null | null | ModelDGI.py | shiqitao/AutoGraph | 41f5956c859ff0fb6f87109d5f8731276bdcc2ef | [
"MIT"
] | null | null | null | ModelDGI.py | shiqitao/AutoGraph | 41f5956c859ff0fb6f87109d5f8731276bdcc2ef | [
"MIT"
] | null | null | null | import numpy as np
import torch
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from torch.nn import PReLU
from torch_geometric.nn import GCNConv, DeepGraphInfomax
from Result import Result
class Encoder(torch.nn.Module):
def __init__(self, hidden, data):
super(Encoder, self).__init__()
self.conv = GCNConv(data.num_features, hidden, cached=True)
self.prelu = PReLU(hidden)
def forward(self, x, edge_index):
x = self.conv(x, edge_index)
return self.prelu(x)
def corruption(x, edge_index):
return x[torch.randperm(x.size(0))], edge_index
def main_model_dgi(data, hidden, if_all=False):
torch.backends.cudnn.deterministic = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = DeepGraphInfomax(
hidden_channels=hidden,
encoder=Encoder(hidden, data),
summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)),
corruption=corruption)
data.split_train_valid()
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
best_acc_valid = 0
for epoch in range(10):
model.train()
optimizer.zero_grad()
pos_z, neg_z, summary = model(data.x, data.edge_index)
lr = LogisticRegression().fit(pos_z[data.mask_train].detach().cpu().numpy().reshape(-1, hidden),
data.y[data.mask_train].cpu().numpy())
valid_pred = lr.predict(pos_z[data.mask_valid].detach().cpu().numpy().reshape(-1, hidden))
acc_valid = accuracy_score(data.y[data.mask_valid].cpu().numpy(),
valid_pred)
if acc_valid > best_acc_valid:
best_acc_valid = acc_valid
result = pos_z
loss = model.loss(pos_z.to(device), neg_z.to(device), summary.to(device))
loss.backward()
optimizer.step()
lr = LogisticRegression().fit(result[data.mask_train].detach().cpu().numpy().reshape(-1, hidden),
data.y[data.mask_train].cpu().numpy())
train_pred = lr.predict(result[data.mask_train].detach().cpu().numpy().reshape(-1, hidden))
all_pred = lr.predict(result.detach().cpu().numpy().reshape(-1, hidden))
if if_all:
return Result(
result=torch.tensor(np.eye(data.num_class)[all_pred]).float().cpu(),
loss_train=-1,
loss_valid=-1,
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy(),
train_pred),
acc_valid=best_acc_valid,
epoch=10,
)
else:
return Result(
result=all_pred[data.mask_test],
loss_train=-1,
loss_valid=-1,
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy(),
train_pred),
acc_valid=best_acc_valid,
epoch=10,
)
| 34.602273 | 104 | 0.60821 | 393 | 3,045 | 4.516539 | 0.267176 | 0.04507 | 0.051268 | 0.059155 | 0.290704 | 0.272676 | 0.226479 | 0.226479 | 0.226479 | 0.221408 | 0 | 0.010676 | 0.261741 | 3,045 | 87 | 105 | 35 | 0.778915 | 0 | 0 | 0.231884 | 0 | 0 | 0.002956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.101449 | 0.014493 | 0.231884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51792436ea974d474c1f1d84607900ce05ba5464 | 4,496 | py | Python | conf/views.py | OnlineJudgeNextGeneration/qduoj2 | c4889d70850bd91ae7f662c02524d0555b6a3ce7 | [
"MIT"
] | 1 | 2018-01-28T07:48:13.000Z | 2018-01-28T07:48:13.000Z | conf/views.py | OnlineJudgeNextGeneration/qduoj2 | c4889d70850bd91ae7f662c02524d0555b6a3ce7 | [
"MIT"
] | null | null | null | conf/views.py | OnlineJudgeNextGeneration/qduoj2 | c4889d70850bd91ae7f662c02524d0555b6a3ce7 | [
"MIT"
] | null | null | null | import hashlib
from django.utils import timezone
from account.decorators import super_admin_required
from judge.dispatcher import process_pending_task
from judge.languages import languages, spj_languages
from options.options import SysOptions
from utils.api import APIView, CSRFExemptAPIView, validate_serializer
from .models import JudgeServer
from .serializers import (CreateEditWebsiteConfigSerializer,
CreateSMTPConfigSerializer, EditSMTPConfigSerializer,
JudgeServerHeartbeatSerializer,
JudgeServerSerializer, TestSMTPConfigSerializer)
class SMTPAPI(APIView):
@super_admin_required
def get(self, request):
smtp = SysOptions.smtp_config
if not smtp:
return self.success(None)
smtp.pop("password")
return self.success(smtp)
@validate_serializer(CreateSMTPConfigSerializer)
@super_admin_required
def post(self, request):
SysOptions.smtp_config = request.data
return self.success()
@validate_serializer(EditSMTPConfigSerializer)
@super_admin_required
def put(self, request):
smtp = SysOptions.smtp_config
data = request.data
for item in ["server", "port", "email", "tls"]:
smtp[item] = data[item]
if "password" in data:
smtp["password"] = data["password"]
SysOptions.smtp_config = smtp
return self.success()
class SMTPTestAPI(APIView):
@super_admin_required
@validate_serializer(TestSMTPConfigSerializer)
def post(self, request):
return self.success({"result": True})
class WebsiteConfigAPI(APIView):
def get(self, request):
ret = {key: getattr(SysOptions, key) for key in
["website_base_url", "website_name", "website_name_shortcut",
"website_footer", "allow_register", "submission_list_show_all"]}
return self.success(ret)
@validate_serializer(CreateEditWebsiteConfigSerializer)
@super_admin_required
def post(self, request):
for k, v in request.data.items():
setattr(SysOptions, k, v)
return self.success()
class JudgeServerAPI(APIView):
@super_admin_required
def get(self, request):
servers = JudgeServer.objects.all().order_by("-last_heartbeat")
return self.success({"token": SysOptions.judge_server_token,
"servers": JudgeServerSerializer(servers, many=True).data})
@super_admin_required
def delete(self, request):
hostname = request.GET.get("hostname")
if hostname:
JudgeServer.objects.filter(hostname=hostname).delete()
return self.success()
class JudgeServerHeartbeatAPI(CSRFExemptAPIView):
@validate_serializer(JudgeServerHeartbeatSerializer)
def post(self, request):
data = request.data
client_token = request.META.get("HTTP_X_JUDGE_SERVER_TOKEN")
if hashlib.sha256(SysOptions.judge_server_token.encode("utf-8")).hexdigest() != client_token:
return self.error("Invalid token")
service_url = data.get("service_url")
try:
server = JudgeServer.objects.get(hostname=data["hostname"])
server.judger_version = data["judger_version"]
server.cpu_core = data["cpu_core"]
server.memory_usage = data["memory"]
server.cpu_usage = data["cpu"]
server.service_url = service_url
server.ip = request.META["HTTP_X_REAL_IP"]
server.last_heartbeat = timezone.now()
server.save()
except JudgeServer.DoesNotExist:
JudgeServer.objects.create(hostname=data["hostname"],
judger_version=data["judger_version"],
cpu_core=data["cpu_core"],
memory_usage=data["memory"],
cpu_usage=data["cpu"],
ip=request.META["REMOTE_ADDR"],
service_url=service_url,
last_heartbeat=timezone.now(),
)
# 新server上线 处理队列中的,防止没有新的提交而导致一直waiting
process_pending_task()
return self.success()
class LanguagesAPI(APIView):
def get(self, request):
return self.success({"languages": languages, "spj_languages": spj_languages})
| 37.466667 | 101 | 0.627002 | 437 | 4,496 | 6.270023 | 0.290618 | 0.043796 | 0.068248 | 0.045985 | 0.147445 | 0.078467 | 0.056934 | 0.030657 | 0 | 0 | 0 | 0.001238 | 0.281139 | 4,496 | 119 | 102 | 37.781513 | 0.846535 | 0.00823 | 0 | 0.244898 | 0 | 0 | 0.08324 | 0.015706 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0.030612 | 0.091837 | 0.020408 | 0.377551 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
517946e1d2fb85e5f713dbfc6bcd270e2147f166 | 15,026 | py | Python | gamse/pipelines/__init__.py | wangleon/gamse | ed2a3730469a1eeef3def1beca990e9d2641a53b | [
"Apache-2.0"
] | 10 | 2019-04-10T15:05:50.000Z | 2021-11-28T15:31:38.000Z | gamse/pipelines/__init__.py | wangleon/gamse | ed2a3730469a1eeef3def1beca990e9d2641a53b | [
"Apache-2.0"
] | 15 | 2020-04-07T07:29:27.000Z | 2022-02-19T15:47:04.000Z | gamse/pipelines/__init__.py | wangleon/gamse | ed2a3730469a1eeef3def1beca990e9d2641a53b | [
"Apache-2.0"
] | 2 | 2020-04-02T09:04:27.000Z | 2020-10-14T15:29:10.000Z | import os
import re
import sys
import shutil
import logging
logger = logging.getLogger(__name__)
import configparser
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from ..utils.obslog import read_obslog
from ..utils.misc import write_system_info
from . import common
from . import (feros, foces, hds, hires, levy, lhrs, sarg, xinglong216hrs)
instrument_lst = [
('foces', 'Fraunhofer', 'FOCES'),
('xinglong216hrs', 'Xinglong216', 'HRS'),
('hires', 'Keck-I', 'HIRES'),
('levy', 'APF', 'Levy'),
('hds', 'Subaru', 'HDS'),
('lhrs', 'LAMOST', 'HRS'),
('feros', 'MPG/ESO-2.2m', 'FEROS'),
]
def reduce_echelle():
"""Automatically select the instrument and reduce echelle spectra
accordingly.
Available instruments include:
* *FOCES*: FOCES on 2m Fraunhofer Telescope in Wendelstein Observatory,
Germany.
* *Xinglong216HRS*: HRS on 2.16m telescope in Xinglong Station, China.
"""
log_filename = 'gamse.log'
# initialize running log
log_fmt = ' '.join(['*',
'%(asctime)s.%(msecs)03d',
'[%(levelname)s]',
'%(name)s - %(lineno)d - %(funcName)s():'+os.linesep,
' %(message)s'+os.linesep+'-'*80,
])
# check if there's already an existing log file
if os.path.exists(log_filename):
# if logfile already exists, rename it with its creation time
time_str = None
file1 = open(log_filename)
for row in file1:
# find the first time string in the contents
mobj = re.search('(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})', row)
if mobj:
time_str = mobj.group()
break
file1.close()
if time_str is None:
# time string not found
# rename it to gamse.DDDD.log, where DDD is an increasing number
i = 1
while(True):
newfilename = 'gamse.{}.log'.format(i)
if os.path.exists(newfilename):
i += 1
continue
else:
break
else:
# time string is found, rename it to gamse.YYYY-MM-DDTHH-MM-SS.log
time_str = time_str.replace(':', '-')
newfilename = 'gamse.{}.log'.format(time_str)
# rename the existing gamse.log file
shutil.move(log_filename, newfilename)
# load config file in current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# the level of running log depends on the mode in the config
mode = config['reduce']['mode']
if mode == 'normal':
level = logging.INFO
elif mode == 'debug':
level = logging.DEBUG
else:
level = logging.INFO
# initialize running log
logging.basicConfig(
filename = log_filename,
level = level,
format = log_fmt,
datefmt = '%Y-%m-%dT%H:%M:%S',
)
logger = logging.getLogger(__name__)
# write some system info into the running log
write_system_info()
# find telescope and instrument from config file
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
logger.info('Start reducing {}, {} data'.format(telescope, instrument))
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).reduce_rawdata()
exit()
print('Unknown Instrument: {} - {}'.format(telescope, instrument))
def make_obslog():
"""Scan the path to the raw FITS files and generate an observing log.
Before generating the observing log file, this function will scan the local
directory and look for *all* files with their names ending with ".cfg", and
read them as config files.
The config files are used to find the name of the instrument that the data
was obtained with.
"""
config_file_lst = []
# find local config file
for fname in os.listdir(os.curdir):
if fname.endswith('.cfg'):
config_file_lst.append(fname)
# load ALL local config files
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# find the telescope and instrument name
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).make_obslog()
exit()
print('Unknown Instrument: {} - {}'.format(telescope, instrument))
def make_config():
"""Print a list of supported instrument and generate a config file according
to user's selection.
"""
# display a list of supported instruments
print('List of supported instruments:')
for i, row in enumerate(instrument_lst):
telescope = row[1]
instrument = row[2]
print('[{}] {}/{}'.format(i+1, telescope, instrument))
# select instrument
while(True):
string = input('Select the instrument: ')
if string.isdigit():
select = int(string)
break
else:
print('Error: invalid input')
continue
# use individual functions in each pipeline
modulename = instrument_lst[select-1][0]
eval(modulename).make_config()
def show_onedspec():
"""Show 1-D spectra in a pop-up window.
Args:
filename_lst (list): List of filenames of 1-D spectra.
"""
# load obslog
logname_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.obslog')]
if len(logname_lst)==0:
logtable = None
else:
logtable = read_obslog(logname_lst[0])
# load config files in the current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
filename_lst = []
for arg in sys.argv[2:]:
# first, check if argument is a filename.
if os.path.exists(arg):
filename_lst.append(arg)
# if not a filename, try to find the corresponding items in obslog
else:
if config is None:
config = load_config('\S*\.cfg$')
if logtable is None:
logtable = load_obslog('\S*\.obslog$')
# if arg is a number, find the corresponding filename in obslog
if arg.isdigit():
arg = int(arg)
section = config['reduce']
for logitem in logtable:
if arg == logitem['frameid']:
# get the path to the 1d spectra
odspath = section.get('odspath', None)
if odspath is None:
odspath = section.get('oned_spec')
# get the filename suffix for 1d spectra
oned_suffix = config['reduce'].get('oned_suffix')
fname = '{}_{}.fits'.format(
logitem['fileid'], oned_suffix)
filename = os.path.join(odspath, fname)
if os.path.exists(filename):
filename_lst.append(filename)
break
if len(filename_lst)==0:
exit()
spec_lst = []
for filename in filename_lst:
data = fits.getdata(filename)
# determine the column name of flux that will be shown
if 'flux' in data.dtype.names:
flux_key = 'flux'
elif 'flux_sum' in data.dtype.names:
flux_key = 'flux_sum'
else:
flux_key = ''
pass
if 'fiber' in data.dtype.names:
# multi fiber
for fiber in np.unique(data['fiber']):
spec = {}
mask = data['fiber']==fiber
for row in data[mask]:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
spec[order] = (wave, flux)
label = os.path.basename(filename) + ' Fiber {}'.format(fiber)
spec_lst.append((spec, label))
else:
spec = {}
for row in data:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
spec[order] = (wave, flux)
label = os.path.basename(filename)
spec_lst.append((spec, label))
################################################
fig = plt.figure(figsize=(15, 8), dpi=150)
ax = fig.add_axes([0.07, 0.1, 0.88, 0.8])
def plot_order(order):
ax.cla()
ax.currentorder = order
wave_min, wave_max = 1e9, 0
flux_min = 1e9
for i, (spec, label) in enumerate(spec_lst):
if order in spec:
wave = spec[order][0]
flux = spec[order][1]
ax.plot(wave, flux, '-', alpha=0.8, lw=0.8, label=label)
wave_min = min(wave_min, wave.min())
wave_max = max(wave_max, wave.max())
flux_min = min(flux_min, flux.min())
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.1)
ax.set_xlabel(u'Wavelength (\xc5)', fontsize=12)
ax.set_ylabel('Flux', fontsize=12)
ax.set_title('Order %d'%(order), fontsize=14)
ax.set_xlim(wave_min, wave_max)
ax.axhline(y=0, color='k', ls='--', lw=0.5)
if flux_min > 0:
ax.set_ylim(0,)
ax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
fig.canvas.draw()
def on_key(event):
if event.key == 'up':
can_plot = False
for spec, label in spec_lst:
if ax.currentorder + 1 in spec:
can_plot=True
break
if can_plot:
plot_order(ax.currentorder + 1)
elif event.key == 'down':
can_plot = False
for spec, label in spec_lst:
if ax.currentorder - 1 in spec:
can_plot=True
break
if can_plot:
plot_order(ax.currentorder - 1)
else:
pass
order0 = list(spec_lst[0][0].keys())[0]
plot_order(order0)
fig.canvas.mpl_connect('key_press_event', on_key)
plt.show()
def plot_spectra1d():
# load config files in the current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# find telescope and instrument from config file
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).plot_spectra1d()
exit()
def convert_onedspec():
"""Convert one-dimensional spectra.
"""
config = common.load_config('\S*\.cfg$', verbose=False)
logtable = common.load_obslog('\S*\.obslog$', fmt='astropy', verbose=False)
section = config['reduce']
odspath = section.get('odspath', None)
oned_suffix = section.get('oned_suffix')
filename_lst = []
if len(sys.argv)==2:
# no addtional args. convert all of the onedspec
for fname in sorted(os.listdir(odspath)):
if fname.endswith('.fits') or fname.endswith('.fit'):
filename = os.path.join(odspath, fname)
filename_lst.append(filename)
else:
for arg in sys.argv[2:]:
if os.path.exists(arg):
filename_lst.append(arg)
elif os.path.exists(os.path.join(odspath, arg)):
filename_lst.append(os.path.join(odspath, arg))
else:
if arg.isdigit():
arg = int(arg)
for logitem in logtable:
if arg == logitem['frameid'] or arg == logitem['fileid']:
pattern = str(logitem['fileid'])+'\S*'
for fname in sorted(os.listdir(odspath)):
filename = os.path.join(odspath, fname)
if os.path.isfile(filename) \
and re.match(pattern, fname):
filename_lst.append(filename)
for filename in filename_lst:
data = fits.getdata(filename)
if 'flux' in data.dtype.names:
flux_key = 'flux'
elif 'flux_sum' in data.dtype.names:
flux_key = 'flux_sum'
else:
pass
spec = {}
for row in data:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
if wave[0]> wave[-1]:
wave = wave[::-1]
flux = flux[::-1]
spec[order] = (wave, flux)
ascii_prefix = os.path.splitext(os.path.basename(filename))[0]
target_path = os.path.join(odspath, ascii_prefix)
target_fname = '{}_order_{:03d}.txt'.format(ascii_prefix, order)
target_filename = os.path.join(target_path, target_fname)
if not os.path.exists(target_path):
os.mkdir(target_path)
if os.path.exists(target_filename):
print('Warning: {} is overwritten'.format(target_filename))
outfile = open(target_filename, 'w')
for w, f in zip(wave, flux):
outfile.write('{:11.5f} {:+16.8e}'.format(w, f)+os.linesep)
outfile.close()
print('Convert {} to {} files with ASCII formats in {}'.format(
filename, len(data), target_path))
| 34.542529 | 80 | 0.535605 | 1,704 | 15,026 | 4.623826 | 0.212441 | 0.01523 | 0.01485 | 0.010661 | 0.365402 | 0.335576 | 0.312476 | 0.304353 | 0.294454 | 0.248763 | 0 | 0.012464 | 0.348596 | 15,026 | 434 | 81 | 34.62212 | 0.792501 | 0.140423 | 0 | 0.436306 | 0 | 0.003185 | 0.082482 | 0.004713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025478 | false | 0.009554 | 0.044586 | 0 | 0.070064 | 0.022293 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
517c72830a031351a68c7d69fc4b0b87b4a8a950 | 975 | py | Python | src/django/api/management/commands/processfixtures.py | azavea/open-apparel-registry | 20f7a6d502d9152c85ee7f2696b25b6badf98924 | [
"MIT"
] | 32 | 2019-01-26T05:04:03.000Z | 2022-03-11T15:09:09.000Z | src/django/api/management/commands/processfixtures.py | azavea/open-apparel-registry | 20f7a6d502d9152c85ee7f2696b25b6badf98924 | [
"MIT"
] | 1,586 | 2019-01-15T21:54:42.000Z | 2022-03-31T17:38:14.000Z | src/django/api/management/commands/processfixtures.py | azavea/open-apparel-registry | 20f7a6d502d9152c85ee7f2696b25b6badf98924 | [
"MIT"
] | 7 | 2019-02-28T03:32:46.000Z | 2021-11-04T17:03:46.000Z | from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Run all processing steps on data loaded from fixtures'
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startid',
type=int,
help='The start of the list ID range to process',
default=2,
)
parser.add_argument(
'-e',
'--endid',
type=int,
help='The end of the list ID range to process',
default=16,
)
def handle(self, *args, **options):
start_id = options['startid']
end_id = options['endid']
for list_id in range(start_id, end_id):
for action in ('parse', 'geocode', 'match'):
call_command('batch_process',
'--list-id', list_id,
'--action', action)
| 29.545455 | 66 | 0.524103 | 108 | 975 | 4.62037 | 0.490741 | 0.06012 | 0.056112 | 0.096192 | 0.128257 | 0.128257 | 0.128257 | 0.128257 | 0 | 0 | 0 | 0.00487 | 0.368205 | 975 | 32 | 67 | 30.46875 | 0.805195 | 0 | 0 | 0.148148 | 0 | 0 | 0.217436 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
518478984b76dc79b0984af18a409fa92718c2e2 | 10,703 | py | Python | ngram/language_model.py | brightp-py/rnng-and-rts | c1251de9bd4c35531cb46dbfb8b2c989ab5a1f33 | [
"MIT"
] | null | null | null | ngram/language_model.py | brightp-py/rnng-and-rts | c1251de9bd4c35531cb46dbfb8b2c989ab5a1f33 | [
"MIT"
] | null | null | null | ngram/language_model.py | brightp-py/rnng-and-rts | c1251de9bd4c35531cb46dbfb8b2c989ab5a1f33 | [
"MIT"
] | null | null | null | #!/bin/env python
"""
language_model.py.
Written by joshualoehr.
https://github.com/joshualoehr/ngram-language-model
Edited by Brighton Pauli, 4/20/2022.
"""
import argparse
from itertools import product
from pathlib import Path
import numpy as np
import nltk
from preprocess import preprocess, EOS, UNK
def load_data(data_dir):
"""Load train and test corpora from a directory.
Directory must contain two files: train.txt and test.txt.
Newlines will be stripped out.
Args:
data_dir (Path) -- pathlib.Path of the directory to use.
Returns:
The train and test sets, as lists of sentences.
"""
train_path = data_dir.joinpath('train.txt').absolute().as_posix()
test_path = data_dir.joinpath('test.txt').absolute().as_posix()
with open(train_path, 'r') as file:
train_data = [line.strip() for line in file.readlines()]
with open(test_path, 'r') as file:
test_data = [line.strip() for line in file.readlines()]
return train_data, test_data
class LanguageModel:
"""An n-gram language model trained on a given corpus.
For a given n and given training corpus, constructs an n-gram language
model for the corpus by:
1. preprocessing the corpus (adding SOS/EOS/UNK tokens)
2. calculating (smoothed) probabilities for each n-gram
Also contains methods for calculating the perplexity of the model
against another corpus, and for generating sentences.
"""
def __init__(self, train_data, n_val, laplace=1):
"""Create a LanguageModel object.
Args:
train_data (list of str): list of sentences comprising the training
corpus.
n (int): the order of language model to build (i.e. 1 for unigram,
2 for bigram, etc.).
laplace (int): lambda multiplier to use for laplace smoothing
(default 1 for add-1 smoothing).
"""
self.n_val = n_val
self.laplace = laplace
self.tokens = preprocess(train_data, n_val)
self.vocab = nltk.FreqDist(self.tokens)
self.model = self._create_model()
self.masks = list(reversed(list(product((0, 1), repeat=n_val))))
def _smooth(self):
"""Apply Laplace smoothing to n-gram frequency distribution.
Here, n_grams refers to the n-grams of the tokens in the training
corpus, while m_grams refers to the first (n-1) tokens of each n-gram.
Returns:
dict: Mapping of each n-gram (tuple of str) to its Laplace-smoothed
probability (float).
"""
vocab_size = len(self.vocab)
n_grams = nltk.ngrams(self.tokens, self.n_val)
n_vocab = nltk.FreqDist(n_grams)
m_grams = nltk.ngrams(self.tokens, self.n_val-1)
m_vocab = nltk.FreqDist(m_grams)
def smoothed_count(n_gram, n_count):
m_gram = n_gram[:-1]
m_count = m_vocab[m_gram]
numer = (n_count + self.laplace)
denom = (m_count + self.laplace * vocab_size)
return numer / denom
return {n_gram: smoothed_count(n_gram, count)
for n_gram, count in n_vocab.items()}
def _create_model(self):
"""Create a probability distribution for vocab of the training corpus.
If building a unigram model, the probabilities are simple relative
frequencies of each token with the entire corpus.
Otherwise, the probabilities are Laplace-smoothed relative frequencies.
Returns:
A dict mapping each n-gram (tuple of str) to its probability
(float).
"""
if self.n_val == 1:
num_tokens = len(self.tokens)
return {(unigram,): count / num_tokens
for unigram, count in self.vocab.items()}
return self._smooth()
def _convert_oov(self, ngram):
"""Convert, if necessary, a given n-gram to one known by the model.
Starting with the unmodified ngram, check each possible permutation of
the n-gram with each index of the n-gram containing either the original
token or <UNK>. Stop when the model contains an entry for that
permutation.
This is achieved by creating a 'bitmask' for the n-gram tuple, and
swapping out each flagged token for <UNK>. Thus, in the worst case,
this function checks 2^n possible n-grams before returning.
Returns:
The n-gram with <UNK> tokens in certain positions such that the
model contains an entry for it.
"""
def mask(ngram, bitmask):
return tuple(
token if flag else UNK for token, flag in zip(ngram, bitmask)
)
ngram = (ngram,) if isinstance(ngram, str) else ngram
for possible_known in [mask(ngram, bitmask) for bitmask in self.masks]:
if possible_known in self.model:
return possible_known
raise LookupError(f"Model failed to find n-gram {str(ngram)}.")
def perplexity(self, test_data):
"""Calculate the perplexity of the model against a given test corpus.
Args:
test_data (list of str): sentences comprising the training corpus.
Returns:
The perplexity of the model as a float.
"""
test_tokens = preprocess(test_data, self.n_val)
test_ngrams = nltk.ngrams(test_tokens, self.n_val)
total = len(test_tokens)
known_ngrams = (self._convert_oov(ngram) for ngram in test_ngrams)
probabilities = [self.model[ngram] for ngram in known_ngrams]
return np.exp((-1/total) * sum(map(np.log, probabilities)))
def sentence_surprisal(self, sent):
"""Return the surprisal for each token in the sentence.
Args:
sent (tuple OR str): sequence of words to get surprisals of.
Returns:
numpy array of the same length as sent, where each number
corresponds to the surprisal of the token at the same index.
"""
if isinstance(sent, str):
sent = sent.split()
probs = []
prev = ["<s>"] * (self.n_val - 1)
for word in sent:
prev.append(word)
key = self._convert_oov(prev)
print('\t', key)
probs.append(self.model[key])
del prev[0]
return -np.log(np.array(probs))
def _best_candidate(self, prev, i, without=None):
"""Choose the most likely next token given the previous (n-1) tokens.
If selecting the first word of the sentence (after the SOS tokens),
the i'th best candidate will be selected, to create variety.
If no candidates are found, the EOS token is returned with a
probability of 1.
Args:
prev (tuple of str): the previous n-1 tokens of the sentence.
i (int): which candidate to select if not the most probable one.
without (list of str): tokens to exclude from the candidates list.
Returns:
A tuple with the next most probable token and its corresponding
probability.
"""
blacklist = ["UNK"]
if without:
blacklist += without
candidates = ((ngram[-1], prob) for ngram, prob in self.model.items()
if ngram[:-1] == prev)
candidates = filter(
lambda candidate: candidate[0] not in blacklist, candidates)
candidates = sorted(
candidates, key=lambda candidate: candidate[1], reverse=True)
if len(candidates) == 0:
return (EOS, 1)
return candidates[0 if prev != () and prev[-1] != "<s>" else i]
def generate_sentences(self, num, min_len=12, max_len=24):
"""Generate num random sentences using the language model.
Sentences always begin with the SOS token and end with the EOS token.
While unigram model sentences will only exclude the UNK token, n>1
models will also exclude all other words already in the sentence.
Args:
num (int): the number of sentences to generate.
min_len (int): minimum allowed sentence length.
max_len (int): maximum allowed sentence length.
Yields:
A tuple with the generated sentence and the combined probability
(in log-space) of all of its n-grams.
"""
for i in range(num):
sent, total_prob = ["<s>"] * max(1, self.n_val-1), 1
while sent[-1] != EOS:
prev = () if self.n_val == 1 else tuple(sent[-(self.n_val-1):])
blacklist = sent + ([EOS] if len(sent) < min_len else [])
next_token, next_prob = self._best_candidate(
prev, i, without=blacklist)
sent.append(next_token)
total_prob *= next_prob
if len(sent) >= max_len:
sent.append(EOS)
yield ' '.join(sent), -1/np.log(total_prob)
if __name__ == '__main__':
parser = argparse.ArgumentParser("N-gram Language Model")
parser.add_argument('--data', type=str, required=True,
help='Location of the data directory containing '
'train.txt and test.txt')
parser.add_argument('--n', type=int, required=True,
help='Order of N-gram model to create (i.e. 1 for '
'unigram, 2 for bigram, etc.)')
parser.add_argument('--laplace', type=float, default=0.01,
help='Lambda parameter for Laplace smoothing (default '
'is 0.01 -- use 1 for add-1 smoothing)')
parser.add_argument('--num', type=int, default=10,
help='Number of sentences to generate (default 10)')
args = parser.parse_args()
# Load and prepare train/test data
data_path = Path(args.data)
train, test = load_data(data_path)
print("Loading {}-gram model...".format(args.n))
lm = LanguageModel(train, args.n, laplace=args.laplace)
print("Vocabulary size: {}".format(len(lm.vocab)))
# print("Generating sentences...")
# for sentence, prob in lm.generate_sentences(args.num):
# print("{} ({:.5f})".format(sentence, prob))
print("Generating surprisals...")
sent1 = "I brought salt and pepper ."
print(f"\t{sent1}")
print('\t', lm.sentence_surprisal(sent1))
sent2 = "I brought pepper and salt ."
print(f"\t{sent2}")
print('\t', lm.sentence_surprisal(sent2))
perplexity = lm.perplexity(test)
print("Model perplexity: {:.3f}".format(perplexity))
print("")
| 36.780069 | 79 | 0.608521 | 1,420 | 10,703 | 4.497183 | 0.226056 | 0.015659 | 0.012527 | 0.008456 | 0.112277 | 0.054494 | 0.036956 | 0.036956 | 0.008143 | 0 | 0 | 0.009165 | 0.296552 | 10,703 | 290 | 80 | 36.906897 | 0.839022 | 0.387181 | 0 | 0 | 0 | 0 | 0.094842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087302 | false | 0 | 0.047619 | 0.007937 | 0.230159 | 0.079365 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51854f42a0ad4da0db518cc4b31ddad382a8ad3b | 11,484 | py | Python | code/pyorg/globals/mt_seg.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 12 | 2020-01-08T01:33:02.000Z | 2022-03-16T00:25:34.000Z | code/pyorg/globals/mt_seg.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 8 | 2019-12-19T19:34:56.000Z | 2022-03-10T10:11:28.000Z | code/pyorg/globals/mt_seg.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 2 | 2022-03-30T13:12:22.000Z | 2022-03-30T18:12:10.000Z | """
Collection of functions for helping to segment microtubes in tomograms
# Author: Antonio Martinez-Sanchez (Max Planck Institute for Biochemistry)
# Date: 1.07.17
"""
import csv
import vtk
from .utils import *
from sklearn.cluster import MeanShift
__author__ = 'Antonio Martinez-Sanchez'
# Clean an directory contents (directory is preserved)
# dir: directory path
def clean_dir(dir):
for root, dirs, files in os.walk(dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def get_sub_copy(tomo, sub_pt, sub_shape):
'''
Returns the a subvolume of a tomogram from a center and a shape
:param tomo: input tomogram
:param sub_pt: subtomogram center point
:param sub_shape: output subtomogram shape (all dimensions must be even)
:return: a copy with the subvolume or a VOI
'''
# Initialization
nx, ny, nz = int(sub_shape[0]), int(sub_shape[1]), int(sub_shape[2])
mx, my, mz = tomo.shape[0], tomo.shape[1], tomo.shape[2]
mx1, my1, mz1 = mx - 1, my - 1, mz - 1
hl_x, hl_y, hl_z = int(nx * .5), int(ny * .5), int(nz * .5)
x, y, z = int(round(sub_pt[0])), int(round(sub_pt[1])), int(round(sub_pt[2]))
# Compute bounding restriction
# off_l_x, off_l_y, off_l_z = x - hl_x + 1, y - hl_y + 1, z - hl_z + 1
off_l_x, off_l_y, off_l_z = x - hl_x, y - hl_y, z - hl_z
# off_h_x, off_h_y, off_h_z = x + hl_x + 1, y + hl_y + 1, z + hl_z + 1
off_h_x, off_h_y, off_h_z = x + hl_x, y + hl_y, z + hl_z
dif_l_x, dif_l_y, dif_l_z = 0, 0, 0
dif_h_x, dif_h_y, dif_h_z = nx, ny, nz
if off_l_x < 0:
# dif_l_x = abs(off_l_x) - 1
dif_l_x = abs(off_l_x)
off_l_x = 0
if off_l_y < 0:
# dif_l_y = abs(off_l_y) - 1
dif_l_y = abs(off_l_y)
off_l_y = 0
if off_l_z < 0:
# dif_l_z = abs(off_l_z) - 1
dif_l_z = abs(off_l_z)
off_l_z = 0
if off_h_x >= mx:
dif_h_x = nx - off_h_x + mx1
off_h_x = mx1
if off_h_y >= my:
dif_h_y = ny - off_h_y + my1
off_h_y = my1
if off_h_z >= mz:
dif_h_z = nz - off_h_z + mz1
off_h_z = mz1
# Make the subvolume copy
hold_sv = np.zeros(shape=np.asarray(sub_shape, dtype=np.int), dtype=tomo.dtype)
hold_sv[dif_l_x:dif_h_x, dif_l_y:dif_h_y, dif_l_z:dif_h_z] = tomo[off_l_x:off_h_x, off_l_y:off_h_y, off_l_z:off_h_z]
return hold_sv
# Read microtubes centerline samples and group them by microtube ID
# fname: CSV file name
# coords_cols: X, Y and Z column numbers in the CSV file
# id_col: microtube ID colum number in the CSV file
# Returns: a dictionary indexed with the microtube id with centerline coodinates in a list
def read_csv_mts(fname, coords_cols, id_col):
# Initialization
mt_dict = None
# Open the file to read
with open(fname, 'r') as in_file:
reader = csv.reader(in_file)
# Reading loop
coords, ids = list(), list()
for row in reader:
x, y, z, idx = float(row[coords_cols[0]]), float(row[coords_cols[1]]), float(row[coords_cols[2]]),\
int(row[id_col])
coords.append(np.asarray((x, y, z), dtype=np.float))
ids.append(idx)
# Dictionary creation
mt_dict = dict.fromkeys(set(ids))
for key in mt_dict.keys():
mt_dict[key] = list()
for key, coord in zip(ids, coords):
mt_dict[key].append(coord)
return mt_dict
# Converts as set of points into a binary mask
# points: iterable with the points coordinates
# mask_shape: shape of the output mask
# inv: if False (default) then True-fg and False-bg, otherwise these values are inverted
# Returns: a 3D numpy binray array
def points_to_mask(points, mask_shape, inv=False):
mask = np.zeros(shape=mask_shape, dtype=np.bool)
for point in points:
i, j, k = int(round(point[0])), int(round(point[1])), int(round(point[2]))
if (i < 0) or (j < 0) or (k < 0) or \
(i >= mask_shape[0]) or (j >= mask_shape[1]) or (k >= mask_shape[2]):
continue
else:
mask[i, j, k] = True
if inv:
return np.invert(mask)
else:
return mask
# Mean shift clustering for points in 3D space
# coords: points 3D coordinates in numpy array with size [n_points, 3]
# bandwith: bandwidth used in RBF kernel
# cluster_all: if True standard behaviour, if not all points and clustered so orphand point are
# associated to trivial clusters
# Returns: cluster labels array [n_points]
def cluster_3d_mean_shift(coords, bandwidth, cluster_all=False):
# Input parsing
if (not isinstance(coords, np.ndarray)) or (len(coords.shape) != 2) or (coords.shape[1] != 3):
error_msg = 'Input coords must be numpy array of 3D coordinates (size=[n_points, 3]).'
raise pexceptions.PySegInputError(expr='cluster_3D_mean_shift', msg=error_msg)
if bandwidth <= 0:
error_msg = 'Input bandwith must be greater than zero.'
raise pexceptions.PySegInputError(expr='cluster_3D_mean_shift', msg=error_msg)
bw_f = float(bandwidth)
# Call to MeanShift
mshift = MeanShift(bandwidth=bw_f, cluster_all=True, bin_seeding=True)
mshift.fit(coords)
labels = np.asarray(mshift.labels_)
# Orphans processing
if cluster_all:
labels_max = labels.max()
for i, lbl in enumerate(labels):
if lbl == -1:
labels_max += 1
labels[i] = labels_max
return labels
# Computes center of gravity for every cluster
# coords: coordinates array [n_points, 3]
# labels: cluster labels array [n_points]
def clusters_cg(coords, labels):
# Input parsing
if (not isinstance(coords, np.ndarray)) or (len(coords.shape) != 2) or (coords.shape[1] != 3):
error_msg = 'Input coords must be numpy array of 3D coordinates (size=[n_points, 3]).'
raise pexceptions.PySegInputError(expr='clusters_cg', msg=error_msg)
if (not isinstance(labels, np.ndarray)) or (len(labels.shape) != 1) or \
(labels.shape[0] != coords.shape[0]):
error_msg = 'Input labels must be array with size=[n_points].'
raise pexceptions.PySegInputError(expr='clusters_cg', msg=error_msg)
# Center of gravity loop computations
u_labels = np.unique(labels)
n_lbls = len(u_labels)
n_points_lut = dict.fromkeys(u_labels)
cgs = dict.fromkeys(u_labels)
for lbl in u_labels:
cgs[lbl] = np.zeros(shape=3, dtype=np.float)
n_points_lut[lbl] = 0
for point, lbl in zip(coords, labels):
cgs[lbl] += point
n_points_lut[lbl] += 1
# Averaging loop
for lbl in u_labels:
cgs[lbl] *= (1./float(n_points_lut[lbl]))
return np.asarray(list(cgs.values()), dtype=np.float)
# Converts cluster of points int a vtkPolyData
# points: array with 3D points coordinates [n_points, 3]
# labels: array with point labels [n_points] with cluster labels,
# if None default every point correspond with a cluster
# centers: cluster centers array [n_unique_labels] (default None)
def clusters_to_poly(points, labels=None, centers=None):
# Input parsing
if (not isinstance(points, np.ndarray)) or (len(points.shape) != 2) or (points.shape[1] != 3):
error_msg = 'Input coords must be numpy array of 3D coordinates (size=[n_points, 3]).'
raise pexceptions.PySegInputError(expr='points_to_poly', msg=error_msg)
if labels is not None:
if (not isinstance(labels, np.ndarray)) or (len(labels.shape) != 1) or \
(labels.shape[0] != points.shape[0]):
error_msg = 'Input labels must be array with size=[n_points].'
raise pexceptions.PySegInputError(expr='points_to_poly', msg=error_msg)
if centers is not None:
if not isinstance(centers, np.ndarray):
error_msg = 'Input centers must be array with size=[n_unique_labels].'
raise pexceptions.PySegInputError(expr='points_to_poly', msg=error_msg)
# Initialization
poly = vtk.vtkPolyData()
p_points = vtk.vtkPoints()
p_cells = vtk.vtkCellArray()
plabels = vtk.vtkIntArray()
plabels.SetNumberOfComponents(1)
plabels.SetName('label')
pcenters = vtk.vtkIntArray()
pcenters.SetNumberOfComponents(1)
pcenters.SetName('center')
# Points loop
for i, point in enumerate(points):
p_points.InsertNextPoint(point)
p_cells.InsertNextCell(1)
p_cells.InsertCellPoint(i)
if labels is None:
plabels.InsertTuple1(i, i)
else:
plabels.InsertTuple1(i, labels[i])
if centers is None:
pcenters.InsertTuple1(i, 1)
else:
pcenters.InsertTuple1(i, -1)
# Inserting centers
if centers is not None:
for i in range(centers.shape[0]):
p_i = points.shape[0] + i
p_points.InsertNextPoint(centers[i])
p_cells.InsertNextCell(1)
p_cells.InsertCellPoint(p_i)
plabels.InsertTuple1(p_i, -1)
pcenters.InsertTuple1(p_i, 1)
# Building the polydata
poly.SetPoints(p_points)
poly.SetVerts(p_cells)
poly.GetCellData().AddArray(plabels)
poly.GetCellData().AddArray(pcenters)
return poly
# Computes rotation angles of from an input vector to fit reference [0,0,1] vector having a free Euler angle
# in Relion format
# First Euler angle (Rotation) is assumed 0
# v_in: input vector
# mode: either 'active' (default) or 'pasive'
# Returns: a 2-tuple with the Euler angles in Relion format
def vect_to_zrelion(v_in, mode='active'):
# Normalization
v_m = np.asarray((v_in[1], v_in[0], v_in[2]), dtype=np.float32)
try:
n = v_m / math.sqrt((v_m*v_m).sum())
except ZeroDivisionError:
print('WARNING (vect_rotation_ref): vector with module 0 cannot be rotated!')
return 0., 0., 0.
# Computing angles in Extrinsic ZYZ system
alpha = np.arccos(n[2])
beta = np.arctan2(n[1], n[0])
# Transform to Relion system (intrinsic ZY'Z'' where rho is free)
rot, tilt, psi = 0., unroll_angle(math.degrees(alpha), deg=True), \
unroll_angle(180.-math.degrees(beta), deg=True)
# By default is active, invert if passive
if mode == 'passive':
M = rot_mat_relion(rot, tilt, psi, deg=True)
rot, tilt, psi = rot_mat_eu_relion(M.T, deg=True)
return rot, tilt, psi
def randomize_voxel_mask(vol, mask, ref='fg'):
"""
Function to randomize voxel density value in masked volumes
:param vol: volume with the density map
:param mask: volume with the binary mask (fg: True, bg: False)
:param ref: 'fg' (default) indicates that (ref: fg, ref: bg)
:return: a copy of vol but with the pixel in region marked as 'fg' in 'ref'
"""
# Initialization
o_vol = np.copy(vol)
# Finding 'bg' and reference
bg_ids = np.where(mask == False)
if ref == 'fg':
ref_ids = np.where(mask)
else:
ref_ids = np.where(mask == False)
# Randomization
rnd_ids = np.random.randint(0, len(ref_ids[0]), size=len(bg_ids[0]))
for i in range(len(bg_ids[0])):
rnd_id = rnd_ids[i]
x, y, z = bg_ids[0][i], bg_ids[1][i], bg_ids[2][i]
rnd_x, rnd_y, rnd_z = ref_ids[0][rnd_id], ref_ids[1][rnd_id], ref_ids[2][rnd_id]
o_vol[x, y, z] = vol[rnd_x, rnd_y, rnd_z]
return o_vol
| 36.113208 | 120 | 0.638628 | 1,818 | 11,484 | 3.858086 | 0.191969 | 0.011976 | 0.00499 | 0.03493 | 0.233105 | 0.204448 | 0.185486 | 0.156401 | 0.156401 | 0.150413 | 0 | 0.016786 | 0.247823 | 11,484 | 317 | 121 | 36.227129 | 0.795207 | 0.271334 | 0 | 0.145161 | 0 | 0 | 0.077213 | 0.007891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0.005376 | 0.021505 | 0 | 0.123656 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
518655f8a3ef06d9584a599c64e86a70b2d75a88 | 1,725 | py | Python | Medium/396.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 6 | 2017-09-25T18:05:50.000Z | 2019-03-27T00:23:15.000Z | Medium/396.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 1 | 2017-10-29T12:04:41.000Z | 2018-08-16T18:00:37.000Z | Medium/396.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | null | null | null | # ------------------------------
# 396. Rotate Function
#
# Description:
# Given an array of integers A and let n to be its length.
#
# Assume Bk to be an array obtained by rotating the array A k positions clock-wise,
# we define a "rotation function" F on A as follow:
#
# F(k) = 0 * Bk[0] + 1 * Bk[1] + ... + (n-1) * Bk[n-1].
#
# Calculate the maximum value of F(0), F(1), ..., F(n-1).
#
# Note:
# n is guaranteed to be less than 105.
#
# Example:
# A = [4, 3, 2, 6]
#
# F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
# F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
# F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
# F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
#
# So the maximum value of F(0), F(1), F(2), F(3) is F(3) = 26.
#
# Version: 1.0
# 10/09/19 by Jianfa
# ------------------------------
class Solution:
def maxRotateFunction(self, A: List[int]) -> int:
if not A:
return 0
n = len(A)
lastValue = 0
for i in range(n):
lastValue += i * A[i]
maxValue = lastValue
summ = sum(A)
for i in range(1, n):
# F(k) - F(k-1) = summ - n * A[n-k]
# F(k) = F(k-1) + summ - n * A[n-k]
currentValue = lastValue + summ - n * A[n-i]
if currentValue > maxValue:
maxValue = currentValue
lastValue = currentValue
return maxValue
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Math solution, O(3n) complexity
# F(k) - F(k-1) = summ - n * A[n-k]
# F(k) = F(k-1) + summ - n * A[n-k] | 28.278689 | 84 | 0.441159 | 277 | 1,725 | 2.718412 | 0.33935 | 0.023904 | 0.023904 | 0.046481 | 0.12749 | 0.12749 | 0.12749 | 0.12749 | 0.12749 | 0.069057 | 0 | 0.090909 | 0.330435 | 1,725 | 61 | 85 | 28.278689 | 0.561039 | 0.613333 | 0 | 0 | 0 | 0 | 0.012698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51874c89c5717dcfb607658189272652ac7ef4e4 | 892 | py | Python | word2vec_train.py | kyucheolsim/gs-word2vec | 14a08f4320cd913694dab5980a5c4467c0ed9613 | [
"MIT"
] | null | null | null | word2vec_train.py | kyucheolsim/gs-word2vec | 14a08f4320cd913694dab5980a5c4467c0ed9613 | [
"MIT"
] | 2 | 2021-03-31T20:05:56.000Z | 2021-12-13T20:46:58.000Z | word2vec_train.py | kyucheolsim/gs-word2vec | 14a08f4320cd913694dab5980a5c4467c0ed9613 | [
"MIT"
] | null | null | null | from konlpy.tag import Mecab
from gensim.models.word2vec import Word2Vec
from W2VData import tokenize, W2VData
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
ADD_POS = True
LOWER = True
SG = 0
ITER = 10
MIN_COUNT = 3
EMBED_SIZE = 100
VOCAB_SIZE = 10000
tokenizer = Mecab()
sentences = W2VData('./data/nsmc/ratings_all.txt', tokenizer, tokenize, ADD_POS, LOWER)
# train
model = Word2Vec(sentences = sentences, size = EMBED_SIZE, window = 5, min_count = MIN_COUNT, max_vocab_size = VOCAB_SIZE, max_final_vocab = VOCAB_SIZE, workers = 4, sg = SG, iter = ITER)
# save lots of memory (not trainable)
model.init_sims(replace = True)
# save KeyedVectors (small and fast load, not trainable) instead of full model
model.wv.save("./model/word2vec_sg%s_mc%s_es%s_vc%s_pos_lower.kv" % (SG, MIN_COUNT, EMBED_SIZE, len(model.wv.vocab)))
| 30.758621 | 187 | 0.748879 | 140 | 892 | 4.6 | 0.514286 | 0.049689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027202 | 0.134529 | 892 | 28 | 188 | 31.857143 | 0.806995 | 0.132287 | 0 | 0 | 0 | 0 | 0.152146 | 0.09883 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
518a5d915b98f8e18ba518f211b0e46a72f71a18 | 3,338 | py | Python | india-covid19india-py/main.py | vpt101/covid-19_data_analysis | 1d02385ad75b650e584e119a8891433aa70e90d8 | [
"CC0-1.0"
] | null | null | null | india-covid19india-py/main.py | vpt101/covid-19_data_analysis | 1d02385ad75b650e584e119a8891433aa70e90d8 | [
"CC0-1.0"
] | null | null | null | india-covid19india-py/main.py | vpt101/covid-19_data_analysis | 1d02385ad75b650e584e119a8891433aa70e90d8 | [
"CC0-1.0"
] | null | null | null | # encoding: utf-8
import sys
sys.path.append(r'./Ind')
from IndStatePlotter import IndStatePlotter
from IndTypes import IndType
from ModellingMode import ModellingMode
import Meta as cc
from Ind import IndParser
from Ind import IndStateAnalyzer as Isa
# from IndStateAnalyzer import IndStateAnalyzer
DEFAULT_MODE = ModellingMode.FIRST_SECOND
drawChart = lambda pdf, stateCode: IndStatePlotter.basicChart(pdf, stateCode)
lorentz = lambda isa, df, stateCode: fitter(stateCode, isa.lorentzianModel)
poly = lambda isa, df, stateCode: fitter(stateCode, isa.polyModel)
exp = lambda isa, df, stateCode: fitter(stateCode, isa.expModel)
gauss = lambda isa, df, stateCode: fitter(stateCode, isa.gaussianModel)
def model(stateCode, fittingFunc=poly, mode=DEFAULT_MODE):
[idf, sdf, isa] = ind(stateCode, mode=mode)
IndStatePlotter.chartSingleSeries(sdf, stateCode)
"""
IndStatePlotter.chartMultipleSeries(idf[idf['Status'].str.contains(IndType.CONFIRMED.value)],
[*cc.IndStateAbbrMap.keys()])
"""
fittingFunc(isa, sdf, stateCode)
# IndStatePlotter.drawAllCharts()
def ind(stateCode, routine=None, mode=DEFAULT_MODE):
indParser = IndParser.IndParser()
df = indParser.fetchStateWiseData()
"""
print(df.columns)
print(df.head(2))
print(Meta.StateAbbrMap['UP'])
"""
isa = Isa.IndStateAnalyzer(df, mode)
print ('Running for ' + stateCode)
if routine is None:
pdf = isa.singleStateMetric(stateCode,
IndType.CONFIRMED.value,
lambda series : Isa.movavg(5, series))
else:
pdf = isa.singleStateMetric(stateCode,
IndType.CONFIRMED.value,
lambda series : routine(series))
return [df, pdf, isa]
def csp(countryName, provinceName):
global pcs, snl # Just to make it easy when running via iPython
from cov19sir import PlottingCs
pcs = PlottingCs.PlottingCs()
snl = pcs.loadOneCountry(countryName, provinceName)
pcs.trendPeltEbf(snl, 7)
pcs.defaultEstimate(snl, 10, '8th')
return snl
def indChart(smoothingFunc, stateCode):
[idf, sdf, isa] = ind(stateCode)
drawChart(smoothingFunc(sdf), stateCode)
return [idf, sdf, isa]
def fitter(stateCode, fitterFunction):
[params, model, result] = fitterFunction(stateCode)
IndStatePlotter.chartLmfitModel(result)
IndStatePlotter.predict(model, params)
if __name__ == '__main__':
province = None
# country = 'USA'
country = 'IN'
# country = 'UK'
# province = 'KL'
# csp(cc.longName(country), cc.inStateName(province))
model('UP')
print ('Done')
## To run it in an interactive Python Shell
## exec(open('main.py').read())
## OR
## In IPython
## import main as m
## [idf, sdf] = m.indChart(lambda x: x.tail(60), 'KL')
## OR for the lorentzianModel() :: [idf, sdf] = m.indChart(lambda x: x.tail(60), 'KL')
"""
https://stackoverflow.com/questions/3433486/how-to-do-exponential-and-logarithmic-curve-fitting-in-python-i-found-only-poly
https://lmfit.github.io/lmfit-py/builtin_models.html
"""
| 30.623853 | 123 | 0.639305 | 362 | 3,338 | 5.859116 | 0.411602 | 0.035361 | 0.020745 | 0.037718 | 0.18199 | 0.162188 | 0.162188 | 0.090523 | 0.090523 | 0.029231 | 0 | 0.007955 | 0.246854 | 3,338 | 108 | 124 | 30.907407 | 0.83572 | 0.146495 | 0 | 0.075472 | 0 | 0 | 0.015183 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0 | 0.150943 | 0 | 0.301887 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
518bffc883589adb5a823d3a5c5dcc651a157b64 | 4,203 | py | Python | test/test.py | mtdsousa/antlr4-verilog | c2238beb56a38ac098cd6e06e0ac8d7de7c1eaad | [
"MIT"
] | 3 | 2022-02-15T15:51:43.000Z | 2022-02-21T13:18:09.000Z | test/test.py | mtdsousa/antlr4-verilog | c2238beb56a38ac098cd6e06e0ac8d7de7c1eaad | [
"MIT"
] | 1 | 2022-02-21T12:35:10.000Z | 2022-02-21T16:45:56.000Z | test/test.py | mtdsousa/antlr4-verilog-python | c2238beb56a38ac098cd6e06e0ac8d7de7c1eaad | [
"MIT"
] | null | null | null | '''
Copyright (c) 2022 Marco Diniz Sousa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
'''
import unittest
from antlr4_verilog import InputStream, CommonTokenStream, ParseTreeWalker
from antlr4_verilog.verilog import VerilogLexer, VerilogParser, VerilogParserListener
from antlr4_verilog.systemverilog import SystemVerilogLexer, SystemVerilogParser, SystemVerilogParserListener
class TestVerilog(unittest.TestCase):
def setUp(self):
design = '''
module ha(a, b, sum, c);
input a, b;
output sum, c;
assign sum = a ^ b;
assign c = a & b;
endmodule
'''
lexer = VerilogLexer(InputStream(design))
stream = CommonTokenStream(lexer)
parser = VerilogParser(stream)
self.tree = parser.source_text()
self.walker = ParseTreeWalker()
def test_module_identifier(self):
class ModuleIdentifierListener(VerilogParserListener):
def exitModule_declaration(self, ctx):
self.identifier = ctx.module_identifier().getText()
listener = ModuleIdentifierListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, 'ha')
def test_module_inputs(self):
class ModuleInputListener(VerilogParserListener):
def __init__(self):
self.declarations = []
def exitInput_declaration(self, ctx):
for child in ctx.list_of_port_identifiers().getChildren():
if isinstance(child, VerilogParser.Port_identifierContext):
self.declarations.append(child.identifier().getText())
listener = ModuleInputListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.declarations, ['a', 'b'])
class TestSystemVerilog(unittest.TestCase):
def setUp(self):
design = '''
module hello;
string s = "Hello";
initial begin
$display("%s", s);
end
endmodule
'''
lexer = SystemVerilogLexer(InputStream(design))
stream = CommonTokenStream(lexer)
parser = SystemVerilogParser(stream)
self.tree = parser.source_text()
self.walker = ParseTreeWalker()
def test_module_identifier(self):
class ModuleIdentifierListener(SystemVerilogParserListener):
def exitModule_declaration(self, ctx):
self.identifier = ctx.module_ansi_header().module_identifier().getText()
listener = ModuleIdentifierListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, 'hello')
def test_variable_assignment(self):
class VariableAssignmentListener(SystemVerilogParserListener):
def exitVariable_decl_assignment(self, ctx):
self.identifier = ctx.variable_identifier().getText()
self.expression = ctx.expression().getText()
listener = VariableAssignmentListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, 's')
self.assertEqual(listener.expression, '"Hello"')
def test_system_task(self):
class SystemTaskListener(SystemVerilogParserListener):
def exitSystem_tf_call(self, ctx):
self.identifier = ctx.system_tf_identifier().getText()
listener = SystemTaskListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, '$display')
if __name__ == '__main__':
unittest.main()
| 38.916667 | 109 | 0.655722 | 405 | 4,203 | 6.693827 | 0.358025 | 0.020657 | 0.050904 | 0.040575 | 0.357433 | 0.339727 | 0.302103 | 0.272593 | 0.272593 | 0.215419 | 0 | 0.002248 | 0.259101 | 4,203 | 107 | 110 | 39.280374 | 0.868337 | 0.142279 | 0 | 0.324675 | 0 | 0 | 0.118187 | 0 | 0 | 0 | 0 | 0 | 0.077922 | 1 | 0.168831 | false | 0 | 0.051948 | 0 | 0.311688 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |