id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3298360 | #-*- coding: utf-8 -*-
"""
Twilio account config
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from configurations import values
class Twilio(object):
#: Account SID
TWILIO_ACCOUNT_SID = values.SecretValue(environ_prefix=None)
#: Auth token
TWILIO_AUTH_TOKEN = values.SecretValue(environ_prefix=None)
#: Default phone number
TWILIO_PHONE_NUMBER = values.SecretValue(environ_prefix=None)
TWILIO_SKIP_SIGNATURE_VALIDATION = values.BooleanValue(False, environ_prefix=None)
TWILIO_CALLBACK_DOMAIN = values.Value("", environ_prefix=None)
TWILIO_CALLBACK_USE_HTTPS = values.BooleanValue(True, environ_prefix=None)
| StarcoderdataPython |
3394981 | import os
import sys
import warnings
import importlib
import inspect
import os.path as osp
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import Sequence
from tensorflow.python.keras import callbacks as callbacks_module
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.callbacks import History
from torch.utils.data import DataLoader, Dataset
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.data.io import makedirs_from_filepath
from graphgallery.gallery import Model
from graphgallery.utils import Progbar
# TensorFlow 2.1.x
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
# This is caused by `tf.gather` and it will be solved in future tensorflow version.
warnings.filterwarnings(
'ignore',
message='.*Converting sparse IndexedSlices to a dense Tensor of unknown shape.*')
# TensorFlow 2.4.0
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=...) to a dense Tensor of unknown shape.
# This may consume a large amount of memory.
warnings.filterwarnings(
'ignore', message='.*to a dense Tensor of unknown shape.*')
def format_doc(d):
msg = ""
for i, (k, v) in enumerate(d.items()):
if v != "UNSPECIDIED":
msg += f"({i + 1}) `{k}`, Default is `{v}` \n"
else:
msg += f"({i + 1}) `{k}`, UNSPECIDIED argument\n"
return msg
def doc_dict(func):
ArgSpec = inspect.getfullargspec(func)
args = ArgSpec.args if ArgSpec.args else []
args = args[1:] if args[0] == "self" else args
defaults = ArgSpec.defaults if ArgSpec.defaults else []
delta_l = len(args) - len(defaults)
defaults = ["UNSPECIDIED"] * delta_l + list(defaults)
d = dict(zip(args, defaults))
return d
def make_docs(*func):
d = {}
for f in func:
d.update(doc_dict(f))
return format_doc(d)
def unravel_batch(batch):
inputs = labels = out_index = None
if isinstance(batch, (list, tuple)):
inputs = batch[0]
labels = batch[1]
if len(batch) > 2:
out_index = batch[-1]
else:
inputs = batch
if isinstance(labels, (list, tuple)) and len(labels) == 1:
labels = labels[0]
if isinstance(out_index, (list, tuple)) and len(out_index) == 1:
out_index = out_index[0]
return inputs, labels, out_index
class Trainer(Model):
def setup_cfg(self):
"""load the default config function `default_cfg_setup` for the corresponding task.
Raises
------
RuntimeError
the default config function `default_cfg_setup` not found in the file `graphgallery.gallery.[task].default`
"""
# nodeclas/linkpred/...
task_module = self.__module__.split('.')[2]
# graphgallery.gallery
gallery_module = '.'.join(__name__.split('.')[:-1])
try:
default_setup = importlib.import_module(f".{task_module}.default", gallery_module)
except ModuleNotFoundError:
raise RuntimeError(f"default setup function `{gallery_module}.{task_module}.default.default_cfg_setup` not found!")
default_setup.default_cfg_setup(self.cfg)
@np.deprecate(old_name="make_data",
message=("the method `trainer.make_data` is currently deprecated from 0.9.0,"
" please use `trainer.setup_graph` instead."))
def make_data(self, *args, **kwargs):
return self.setup_graph(*args, **kwargs)
def setup_graph(self, graph, graph_transform=None, device=None, **kwargs):
"""This method is used for process your inputs, which accepts
only keyword arguments in your defined method 'data_step'.
This method will process the inputs, and transform them into tensors.
Commonly used keyword arguments:
--------------------------------
graph: graphgallery graph classes.
graph_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for the entire graph, it is used first.
device: device for preparing data, if None, it defaults to `self.device`
adj_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for adjacency matrix.
attr_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for attribute matrix.
other arguments (if have) will be passed into method 'data_step'.
"""
self.empty_cache()
model = self.model
if model is not None and hasattr(model, 'empty_cache'):
model.empty_cache()
self.graph = gf.get(graph_transform)(graph)
cfg = self.cfg.data
if device is not None:
self.data_device = gf.device(device, self.backend)
else:
self.data_device = self.device
cfg.device = device
_, kwargs = gf.wrapper(self.data_step)(**kwargs)
kwargs['graph_transform'] = graph_transform
cfg.merge_from_dict(kwargs)
for k, v in kwargs.items():
if k.endswith("transform"):
setattr(self.transform, k, gf.get(v))
return self
def data_step(self, *args, **kwargs):
"""Implement you data processing function here"""
raise NotImplementedError
def build(self, **kwargs):
"""This method is used for build your model, which
accepts only keyword arguments in your defined method 'model_step'.
Note:
-----
This method should be called after `process`.
Commonly used keyword arguments:
--------------------------------
hids: int or a list of them,
hidden units for each hidden layer.
acts: string or a list of them,
activation functions for each layer.
dropout: float scalar,
dropout used in the model.
lr: float scalar,
learning rate used for the model.
weight_decay: float scalar,
weight decay used for the model weights.
bias: bool,
whether to use bias in each layer.
use_tfn: bool,
this argument is only used for TensorFlow backend, if `True`, it will decorate
the model training and testing with `tf.function` (See `graphgallery.nn.modes.TFKeras`).
By default, it was `True`, which can accelerate the training and inference, by it may cause
several errors.
other arguments (if have) will be passed into your method 'model_step'.
"""
if self._graph is None:
raise RuntimeError("Please call 'trainer.setup_graph(graph)' first.")
use_tfn = kwargs.get("use_tfn", True)
if self.backend == "tensorflow":
with tf.device(self.device):
self.model, kwargs = gf.wrapper(self.model_step)(**kwargs)
if use_tfn:
self.model.use_tfn()
else:
kwargs.pop("use_tfn", None)
model, kwargs = gf.wrapper(self.model_step)(**kwargs)
self.model = model.to(self.device)
self.cfg.model.merge_from_dict(kwargs)
return self
def model_step(self, *args, **kwargs):
"""Implement you model building function here"""
raise NotImplementedError
def fit(self, train_data, val_data=None, **kwargs):
cache = self.cache
cfg = self.cfg.fit
cfg.merge_from_dict(kwargs)
ckpt_cfg = cfg.ModelCheckpoint
es_cfg = cfg.EarlyStopping
pb_cfg = cfg.Progbar
log_cfg = cfg.Logger
if log_cfg.enabled:
log_cfg.name = log_cfg.name or self.name
logger = gg.utils.setup_logger(output=log_cfg.filepath, name=log_cfg.name)
model = self.model
if model is None:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
if not isinstance(train_data, (Sequence, DataLoader, Dataset)):
train_data = self.train_loader(train_data)
if cfg.cache_train_data:
cache.train_data = train_data
validation = val_data is not None
if validation:
if not isinstance(val_data, (Sequence, DataLoader, Dataset)):
val_data = self.test_loader(val_data)
if cfg.cache_val_data:
cache.val_data = val_data
# Setup callbacks
callbacks = callbacks_module.CallbackList()
history = History()
callbacks.append(history)
cfg, callbacks = setup_callbacks(cfg, callbacks, validation)
callbacks.set_model(model)
self.callbacks = callbacks
model.stop_training = False
verbose = cfg.verbose
assert not (verbose and log_cfg.enabled), "Progbar and Logger cannot be used together! You must set `verbose=0` when Logger is enabled."
if verbose:
if verbose <= 2:
progbar = Progbar(target=cfg.epochs,
width=pb_cfg.width,
verbose=verbose)
print("Training...")
elif log_cfg.enabled:
logger.info("Training...")
logs = gf.BunchDict()
callbacks.on_train_begin()
# for some initialization
if hasattr(model, 'on_train_begin'):
model.on_train_begin()
try:
for epoch in range(cfg.epochs):
if verbose > 2:
progbar = Progbar(target=len(train_data),
width=pb_cfg.width,
verbose=verbose - 2)
callbacks.on_epoch_begin(epoch)
train_logs = self.train_step(train_data)
if hasattr(train_data, 'on_epoch_end'):
train_data.on_epoch_end()
logs.update({k: to_item(v) for k, v in train_logs.items()})
if validation:
valid_logs = self.test_step(val_data)
logs.update({("val_" + k): to_item(v) for k, v in valid_logs.items()})
if hasattr(val_data, 'on_epoch_end'):
val_data.on_epoch_end()
callbacks.on_train_batch_end(len(train_data), logs)
callbacks.on_epoch_end(epoch, logs)
if verbose > 2:
print(f"Epoch {epoch+1}/{cfg.epochs}")
progbar.update(len(train_data), logs.items())
elif verbose:
progbar.update(epoch + 1, logs.items())
elif log_cfg.enabled:
logger.info(f"Epoch {epoch+1}/{cfg.epochs}\n{gg.utils.create_table(logs)}")
if model.stop_training:
if log_cfg.enabled:
logger.info(f"Early Stopping at Epoch {epoch}")
else:
print(f"Early Stopping at Epoch {epoch}", file=sys.stderr)
break
callbacks.on_train_end()
if ckpt_cfg.enabled:
if ckpt_cfg.save_weights_only:
model.load_weights(ckpt_cfg.path)
else:
self.model = model.load(ckpt_cfg.path)
finally:
# to avoid unexpected termination of the model
if ckpt_cfg.enabled and ckpt_cfg.remove_weights:
self.remove_weights()
return history
def evaluate(self, test_data, **kwargs):
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
cache = self.cache
cfg = self.cfg.evaluate
cfg.merge_from_dict(kwargs)
if not isinstance(test_data, (Sequence, DataLoader, Dataset)):
test_data = self.test_loader(test_data)
if cfg.cache_test_data:
cache.test_data = test_data
if cfg.verbose:
print("Testing...")
progbar = Progbar(target=len(test_data),
width=cfg.Progbar.width,
verbose=cfg.verbose)
logs = gf.BunchDict(**self.test_step(test_data))
logs.update({k: to_item(v) for k, v in logs.items()})
progbar.update(len(test_data), logs.items())
return logs
def train_step(self, sequence):
model = self.model
model.reset_metrics()
results = None
for epoch, batch in enumerate(sequence):
self.callbacks.on_train_batch_begin(epoch)
inputs, labels, out_index = unravel_batch(batch)
results = model.train_step_on_batch(x=inputs,
y=labels,
out_index=out_index,
device=self.device)
return results
def test_step(self, sequence):
model = self.model
model.reset_metrics()
results = None
for batch in sequence:
inputs, labels, out_index = unravel_batch(batch)
results = model.test_step_on_batch(x=inputs,
y=labels,
out_index=out_index,
device=self.device)
return results
def predict(self, predict_data=None, transform=None):
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
cache = self.cache
cfg = self.cfg.predict
cfg.transform = transform
if not isinstance(predict_data, (Sequence, DataLoader, Dataset)):
predict_data = self.predict_loader(predict_data)
if cfg.cache_predict_data:
cache.predict_data = predict_data
logits = self.predict_step(predict_data)
self.transform.logit_transform = T = gf.get(transform)
logits = T(logits)
return logits.squeeze()
def predict_step(self, sequence):
logits = []
model = self.model
for batch in sequence:
inputs, labels, out_index = unravel_batch(batch)
logit = model.predict_step_on_batch(x=inputs,
out_index=out_index,
device=self.device)
logits.append(logit)
return np.vstack(logits)
def train_loader(self, inputs, **kwargs):
raise NotImplementedError
def test_loader(self, inputs, **kwargs):
return self.train_loader(inputs, **kwargs)
def predict_loader(self, inputs, **kwargs):
return self.test_loader(inputs, **kwargs)
def _test_predict(self, index):
logit = self.predict(index)
predict_class = logit.argmax(1)
labels = self.graph.node_label[index]
return (predict_class == labels).mean()
def reset_weights(self):
# TODO: add pytorch support
"""reset the model to the first time."""
model = self.model
if self.backup is None:
raise RuntimeError(
"You must store the `backup` before `reset_weights`."
"`backup` will be automatically stored when the model is built."
)
for w, wb in zip(model.weights, self.backup):
w.assign(wb)
@ property
def model(self):
return self._model
@ model.setter
def model(self, m):
# Back up
# if isinstance(m, tf.keras.Model) and m.weights:
# self.backup = tf.identity_n(m.weights)
# TODO assert m is None or isinstance(m, tf.keras.Model) or torch.nn.Module
self._model = m
def reset_optimizer(self):
# TODO: add pytorch support
model = self.model
if not hasattr(model, 'optimizer'):
raise RuntimeError("The model has not attribute `optimizer`!")
for var in model.optimizer.variables():
var.assign(tf.zeros_like(var))
def reset_lr(self, value):
# TODO: add pytorch support
model = self.model
if not hasattr(model, 'optimizer'):
raise RuntimeError("The model has not attribute `optimizer`!")
model.optimizer.learning_rate.assign(value)
def remove_weights(self):
filepath = self.cfg.fit.ModelCheckpoint.path
if self.backend == "tensorflow":
remove_extra_tf_files(filepath)
if osp.exists(filepath):
os.remove(filepath)
def help(self, return_msg=False):
"""return help message for the `trainer`"""
msg = f"""
**************************************Help Message for {self.name}******************************************
|First, setup a graph, run `trainer.setup_graph`, the reqiured argument are: |
{make_docs(self.setup_graph, self.data_step)}
|Second, build your model, run `trainer.build`, the reqiured argument are: |
{make_docs(self.build, self.model_step)}
|Third, train your model, run `trainer.fit`, the reqiured argument are: |
{make_docs(self.fit)}
|Finall and optionally, evaluate your model, run `trainer.evaluate`, the reqiured argument are: |
{make_docs(self.evaluate)}
"""
if return_msg:
return msg
else:
print(msg)
# def __getattr__(self, attr):
# ##### FIXME: This may cause ERROR ######
# try:
# return self.__dict__[attr]
# except KeyError:
# if hasattr(self, "_model") and hasattr(self._model, attr):
# return getattr(self._model, attr)
# raise AttributeError(
# f"'{self.name}' and '{self.name}.model' objects have no attribute '{attr}'"
# )
def to_item(value):
if value is None:
return value
elif hasattr(value, 'numpy'):
value = value.numpy()
if hasattr(value, 'item'):
value = value.item()
return value
def remove_extra_tf_files(filepath):
# for tensorflow weights that saved without h5 formate
for ext in (".data-00000-of-00001", ".index"):
path = filepath + ext
if osp.exists(path):
os.remove(path)
filedir = osp.split(osp.realpath(filepath))[0]
path = osp.join(filedir, "checkpoint")
if osp.exists(path):
os.remove(path)
def setup_callbacks(cfg, callbacks, validation):
ckpt_cfg = cfg.ModelCheckpoint
es_cfg = cfg.EarlyStopping
tb_cfg = cfg.TensorBoard
if not validation:
if ckpt_cfg.enabled and ckpt_cfg.monitor.startswith("val_"):
ckpt_cfg.enabled = False
ckpt_cfg.monitor = ckpt_cfg.monitor[4:]
if es_cfg.enabled and es_cfg.monitor.startswith("val_"):
es_cfg.enabled = False
es_cfg.monitor = es_cfg.monitor[4:]
if es_cfg.enabled:
es_callback = EarlyStopping(monitor=es_cfg.monitor,
patience=es_cfg.patience,
mode=es_cfg.mode,
verbose=es_cfg.verbose,
baseline=es_cfg.baseline,
restore_best_weights=es_cfg.restore_best_weights)
callbacks.append(es_callback)
if ckpt_cfg.enabled:
if not ckpt_cfg.path.endswith(gg.file_ext()):
ckpt_cfg.path += gg.file_ext()
makedirs_from_filepath(ckpt_cfg.path)
mc_callback = ModelCheckpoint(ckpt_cfg.path,
mode=ckpt_cfg.mode,
monitor=ckpt_cfg.monitor,
save_best_only=ckpt_cfg.save_best_only,
save_weights_only=ckpt_cfg.save_weights_only,
verbose=ckpt_cfg.verbose)
callbacks.append(mc_callback)
if tb_cfg.enabled:
callbacks.append(tf.keras.callbacks.TensorBoard(tb_cfg.log_dir,
write_graph=tb_cfg.write_graph,
update_freq=tb_cfg.update_freq,
histogram_freq=tb_cfg.histogram_freq,
write_images=tb_cfg.write_images))
return cfg, callbacks
| StarcoderdataPython |
1759769 | # Generated by Django 3.2.7 on 2021-09-10 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_auto_20210910_0956'),
]
operations = [
migrations.AddField(
model_name='pixelcount',
name='black_or_white',
field=models.CharField(default='equal', max_length=5),
preserve_default=False,
),
]
| StarcoderdataPython |
182290 | <gh_stars>0
import pulsar as psr
def load_ref_system():
""" Returns d-xylulose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 2.5413 -0.0840 0.1586
C 1.0785 0.2582 -0.2141
C 0.1112 -0.8211 0.3626
C -1.2253 -0.1569 0.7445
C -2.2588 0.0443 -0.3568
O 3.4244 0.9777 -0.0639
O 0.9169 0.4391 -1.6002
O -0.0484 -1.9341 -0.4765
O -1.4298 0.1858 1.8916
O -3.2317 1.0078 -0.0810
H 2.8853 -0.9935 -0.3744
H 2.6417 -0.2675 1.2428
H 0.8021 1.2642 0.1802
H 0.5702 -1.2943 1.2640
H -2.7365 -0.9295 -0.5869
H -1.7845 0.4260 -1.2801
H 3.3328 1.2462 -0.9702
H 1.2920 -0.3159 -2.0399
H -0.5096 -1.6545 -1.2597
H -3.5841 0.8358 0.7843
""")
| StarcoderdataPython |
29751 | <filename>models/tree.py
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def configure(app):
db.init_app(app)
app.db = db
class Tree(db.Model):
__tablename__ = 'tree'
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(255), nullable=False)
age = db.Column(db.Integer(), nullable=False)
# specie_id = db.Column(db.Integer, db.ForeignKey('specie.id'), nullable=False)
def __repr__(self):
return '<Tree %r>' % self.description | StarcoderdataPython |
1779604 | """
Accounts views.
"""
# Django
from django.urls import reverse_lazy
from django.contrib import messages
from django.shortcuts import redirect, render
from django.contrib.auth import (authenticate,
login,
logout,
update_session_auth_hash)
from django.contrib.auth.views import (PasswordResetView,
PasswordResetConfirmView,
PasswordResetCompleteView)
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView
# Forms
from .forms import (UpdateUserForm,
PasswordChangeForm,
UserRequestForm)
def login_view(request):
"""Login view."""
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('home')
else:
return render(
request,
'accounts/login.html',
{'error': _('Invalid username and password')}
)
return render(
request=request,
template_name='accounts/login.html',
context={
'page_title': 'Login',
}
)
class PasswordResetView(SuccessMessageMixin, PasswordResetView):
"""Password reset class view"""
template_name = 'accounts/password_reset.html'
email_template_name = 'accounts/password_reset_email.html'
subject_template_name = 'accounts/password_reset_subject'
success_message = _('Instructions have been sent via email to reset '
'your password, if an account exists with the email you entered, '
'you should receive them shortly.')
success_url = reverse_lazy('accounts:login')
extra_context = {'page_title': _('Password recovery')}
class PasswordResetConfirmView(PasswordResetConfirmView):
"""Password reset confirm class view"""
template_name = 'accounts/password_reset_confirm.html'
success_url = reverse_lazy('accounts:password_reset_complete')
extra_context = {'page_title': _('Password recovery')}
class PasswordResetCompleteView(PasswordResetCompleteView):
"""Password reset complete class view"""
template_name = 'accounts/password_reset_complete.html'
extra_context = {'page_title': _('Password recovery')}
@login_required
def logout_view(request):
"""Logout a user view."""
logout(request)
return redirect('accounts:login')
@login_required
def profile_view(request):
"""Profile view."""
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
if user_form.is_valid():
user_form.save()
messages.success(
request, _('Profile updated successfully'))
return redirect('accounts:profile')
else:
for field in user_form.errors:
user_form[field].field.widget.attrs['class'] += ' is-invalid'
messages.error(request, _(
'It was not possible to update your profile'))
else:
user_form = UpdateUserForm(instance=request.user)
return render(
request=request,
template_name='accounts/profile.html',
context={
'page_title': _('User profile'),
'user_form': user_form,
}
)
@login_required
def change_password_view(request):
"""Change password view."""
if request.method == 'POST':
pass_form = PasswordChangeForm(request.user, request.POST)
if pass_form.is_valid():
user = pass_form.save()
update_session_auth_hash(request, user) # Important!
messages.success(
request, _('Your password was updated successfully'))
return redirect('accounts:change_password')
else:
for field in pass_form.errors:
pass_form[field].field.widget.attrs['class'] += ' is-invalid'
messages.error(request, _(
'It was not possible to update your password'))
else:
pass_form = PasswordChangeForm(request.user)
return render(
request=request,
template_name='accounts/change_password.html',
context={
'page_title': _('Change password'),
'pass_form': pass_form,
}
)
class CreateUserRequestView(SuccessMessageMixin, CreateView):
"""Create a new user request."""
template_name = 'accounts/user_request.html'
form_class = UserRequestForm
success_url = reverse_lazy('accounts:login')
success_message = _('You have requested a user. If it is approved, you will'
' receive an email with your temporary password.')
extra_context = {'page_title': _('User request')}
| StarcoderdataPython |
3295162 | from .common import * # noqa
ALLOWED_HOSTS = [
'maestromusicpros.com',
'www.maestromusicpros.com',
#'www.djangoproject.com',
#'djangoproject.com',
'www.djangoproject.localhost',
#'polar-inlet-43860.herokuapp.com',
#'pacific-lowlands-80447.herokuapp.com',
#'docs.djangoproject.com',
#'dashboard.djangoproject.com',
] + SECRETS.get('allowed_hosts', [])
LOCALE_MIDDLEWARE_EXCLUDED_HOSTS = ['docs.djangoproject.com']
DEBUG = True
THUMBNAIL_DEBUG = DEBUG
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': SECRETS.get('memcached_host', '127.0.0.1:11211'),
'BINARY': True,
'OPTIONS': {
'tcp_nodelay': True,
'ketama': True
}
},
'docs-pages': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': SECRETS.get('redis_host', 'localhost:6379'),
'OPTIONS': {
'DB': 2,
},
},
}
CSRF_COOKIE_SECURE = True
LOGGING["handlers"]["syslog"] = {
"formatter": "full",
"level": "DEBUG",
"class": "logging.handlers.SysLogHandler",
"address": "/dev/log",
"facility": "local4",
}
LOGGING["loggers"]["django.request"]["handlers"].append("syslog")
MEDIA_ROOT = str(DATA_DIR.joinpath('media'))
MEDIA_URL = 'https://media.djangoproject.com/'
MIDDLEWARE = (
['django.middleware.cache.UpdateCacheMiddleware'] +
MIDDLEWARE +
['django.middleware.cache.FetchFromCacheMiddleware']
)
SESSION_COOKIE_SECURE = True
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = str(DATA_DIR.joinpath('static'))
#STATIC_URL = 'https://static.djangoproject.com/'
# Docs settings
DOCS_BUILD_ROOT = DATA_DIR.joinpath('data', 'docbuilds')
# django-hosts settings
HOST_SCHEME = 'https'
PARENT_HOST = 'djangoproject.com'
# django-push settings
PUSH_SSL_CALLBACK = True
# Log errors to Sentry instead of email, if available.
if 'sentry_dsn' in SECRETS and not DEBUG:
INSTALLED_APPS.append('raven.contrib.django.raven_compat')
RAVEN_CONFIG = {'dsn': SECRETS['sentry_dsn']}
# RECAPTCHA KEYS
# Defaults will trigger 'captcha.recaptcha_test_key_error' system check
if 'recaptcha_public_key' in SECRETS:
RECAPTCHA_PUBLIC_KEY = SECRETS.get('recaptcha_public_key')
RECAPTCHA_PRIVATE_KEY = SECRETS.get('recaptcha_private_key')
| StarcoderdataPython |
42973 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def swahili(path):
"""Swahili
Attitudes towards the Swahili language among Kenyan school children
A dataset with 480 observations on the following 4 variables.
`Province`
`NAIROBI` or `PWANI`
`Sex`
`female` or `male`
`Attitude.Score`
Score (out a possible 200 points) on a survey of attitude towards the
Swahili language
`School`
Code for the school: `A` through `L`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `swahili.csv`.
Returns:
Tuple of np.ndarray `x_train` with 480 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'swahili.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Swahili.csv'
maybe_download_and_extract(path, url,
save_file_name='swahili.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| StarcoderdataPython |
1783443 | <reponame>daniele21/financial_anomaly_detection<filename>core/network/encoder.py
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class LSTM_Encoder(nn.Module):
def __init__(self,
in_features,
reduce_factor: int,
layers: int,
seed: int = 2021):
super(LSTM_Encoder, self).__init__()
self.name = 'LSTM Encoder'
self.in_feature = in_features
# self.window = params['window']-1 if params['window'] is not None else None
torch.manual_seed(seed)
self.lstm_layers = []
for layer in range(layers):
hidden_size = in_features // reduce_factor
self.lstm_layers.append(nn.LSTM(input_size=in_features,
hidden_size=hidden_size,
num_layers=1,
batch_first=True))
in_features = hidden_size
self.last_size = hidden_size
for i, lstm_layer in enumerate(self.lstm_layers):
self.add_module(f'LSTM Layer - {i}', lstm_layer)
def forward(self, x):
h_state = None
# x = x.unsqueeze(0)
y_lstm = x
for lstm_layer in self.lstm_layers:
y_lstm, (h_state, c_state) = lstm_layer(y_lstm)
out = h_state
return out
| StarcoderdataPython |
1615479 | <reponame>CYTMWIA/MyGarage<filename>GuaDao/Steam/SteamRequestsSession.py
import requests
import time
class SteamRequestsSession(requests.sessions.Session):
def __init__(self):
self.last_request_time = 0
self.request_interval = 10
super().__init__()
self.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0'
})
def request(self, *args,**kwargs):
while time.time()<self.last_request_time+self.request_interval:
time.sleep(0.001)
self.last_request_time=time.time()
rsp = super().request(*args,**kwargs)
if rsp.status_code == 429:
raise Exception('Too Many Requests')
return rsp | StarcoderdataPython |
18739 | <reponame>krzysztoffiok/twitter_sentiment_to_usnavy<gh_stars>1-10
import pandas as pd
import numpy as np
import datatable as dt
import re
"""
Basic pre-processing of Twitter text from SemEval2017 data set.
"""
# replace repeating characters so that only 2 repeats remain
def repoo(x):
repeat_regexp = re.compile(r'(\S+)(\1{2,})')
repl = r'\2'
return repeat_regexp.sub(repl=r'\2', string=x)
file_names = ["./semeval_data/source_data/semtrain.csv", "./semeval_data/source_data/semtest.csv"]
for file_name in file_names:
df = dt.fread(file_name).to_pandas()
df_sampled = df.copy()
sample_size = len(df_sampled)
# preprocess data
import re
# change all pic.twitter.com to "IMAGE"
df_sampled["text"] = df_sampled["text"].str.replace(
'pic.twitter.com/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _IMAGE ', regex=True)
# # get rid of some instances of IMG
df_sampled["text"] = df_sampled["text"].str.replace(
'https://pbs.twimg.com/media/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', 'IMAGE ',
regex=True)
# get rid of some instances of https://twitter.com -> to RETWEET
df_sampled["text"] = df_sampled["text"].str.replace(
'https://twitter.com(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _RETWEET ',
regex=True)
# change all URLS to "URL"
df_sampled["text"] = df_sampled["text"].str.replace(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _URL ', regex=True)
# get rid of character repeats
for i in range(10):
df_sampled["text"] = df_sampled["text"].map(lambda x: repoo(str(x)))
# get rid of endline signs
df_sampled["text"] = df_sampled["text"].str.replace("\n", "")
# save to file the sampled DF
df_sampled[["sentiment", "text"]].to_csv(f"{file_name[:-4]}_filtered.csv")
| StarcoderdataPython |
1783308 | <gh_stars>1-10
# Generated by Django 2.1.3 on 2018-12-18 16:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_story', '0002_auto_20181218_1619'),
]
operations = [
migrations.AddField(
model_name='story',
name='solution',
field=models.TextField(blank=True),
),
]
| StarcoderdataPython |
3258733 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('jobs', '0011_auto_20150908_1225'),
]
operations = [
migrations.CreateModel(
name='ExportProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='', max_length=100)),
('max_extent', models.IntegerField()),
('group', models.OneToOneField(related_name='export_profile', to='auth.Group')),
],
options={
'db_table': 'export_profiles',
'managed': True,
},
),
migrations.RemoveField(
model_name='groupprofile',
name='group',
),
migrations.DeleteModel(
name='GroupProfile',
),
]
| StarcoderdataPython |
118852 | <gh_stars>1-10
import cv2
grayImage = cv2.imread('pic2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE)
cv2.imwrite('pic2Gray.png', grayImage)
| StarcoderdataPython |
3327849 | <reponame>cyrusimap/CalDAVTester
##
# Copyright (c) 2006-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Verifier that checks a multistatus response to make sure that the specified hrefs
are returned with appropriate status codes.
"""
from src.utils import processHrefSubstitutions
from xml.etree.cElementTree import ElementTree
from StringIO import StringIO
import urllib
class Verifier(object):
def verify(self, manager, uri, response, respdata, args):
# If no hrefs requested, then assume none should come back
okhrefs = args.get("okhrefs", [])
nohrefs = args.get("nohrefs", [])
badhrefs = args.get("badhrefs", [])
statushrefs = {}
for arg in args.keys():
try:
code = int(arg)
statushrefs.setdefault(code, []).append(args[arg])
except ValueError:
pass
count = args.get("count", [])
totalcount = args.get("totalcount", [])
responsecount = args.get("responsecount", [])
prefix = args.get("prefix", [])
ignoremissing = args.get("ignoremissing", None)
if len(prefix):
prefix = prefix[0] if prefix[0] != "-" else ""
else:
prefix = uri
okhrefs = processHrefSubstitutions(okhrefs, prefix)
nohrefs = processHrefSubstitutions(nohrefs, prefix)
badhrefs = processHrefSubstitutions(badhrefs, prefix)
count = [int(eval(i)) for i in count]
totalcount = [int(eval(i)) for i in totalcount]
responsecount = [int(eval(i)) for i in responsecount]
if "okhrefs" in args or "nohrefs" in args or "badhrefs" in args:
doOKBad = True
elif statushrefs:
doOKBad = False
else:
doOKBad = None
# Process the multistatus response, extracting all hrefs
# and comparing with the set defined for this test. Report any
# mismatches.
# Must have MULTISTATUS response code
if response.status != 207:
return False, " HTTP Status for Request: %d\n" % (response.status,)
try:
tree = ElementTree(file=StringIO(respdata))
except Exception:
return False, " HTTP response is not valid XML: %s\n" % (respdata,)
ok_status_hrefs = []
bad_status_hrefs = []
status_code_hrefs = {}
for response in tree.findall("{DAV:}response"):
# Get href for this response
href = response.findall("{DAV:}href")
if href is None or len(href) != 1:
return False, " Incorrect/missing DAV:Href element in response"
href = urllib.unquote(href[0].text).rstrip("/")
# Verify status
status = response.findall("{DAV:}status")
if len(status) == 1:
statustxt = status[0].text
status = False
if statustxt.startswith("HTTP/1.1 ") and (len(statustxt) >= 10):
status = (statustxt[9] == "2")
try:
code = int(statustxt[9:12])
except ValueError:
code = 0
else:
propstatus = response.findall("{DAV:}propstat")
if len(propstatus) > 0:
statustxt = "OK"
status = True
else:
status = False
code = 0
if status:
ok_status_hrefs.append(href)
else:
bad_status_hrefs.append(href)
status_code_hrefs.setdefault(code, set()).add(href)
ok_result_set = set(ok_status_hrefs)
ok_test_set = set(okhrefs)
no_test_set = set(nohrefs)
bad_result_set = set(bad_status_hrefs)
bad_test_set = set(badhrefs)
result = True
resulttxt = ""
# Check for count
if len(count) == 1:
if len(ok_result_set) != count[0] + 1:
result = False
resulttxt += " %d items returned, but %d items expected" % (len(ok_result_set) - 1, count[0],)
return result, resulttxt
# Check for total count
if len(totalcount) > 0:
# Add the 2nd value to the 1st if it exists
if len(totalcount) == 2:
totalcount[0] += totalcount[1]
if len(ok_result_set) != totalcount[0]:
result = False
resulttxt += " %d items returned, but %d items expected" % (len(ok_result_set), totalcount[0],)
return result, resulttxt
# Check for response count
if len(responsecount) == 1:
responses = len(ok_result_set) + len(bad_result_set)
if responses != responsecount[0]:
result = False
resulttxt += " %d responses returned, but %d responses expected" % (responses, responsecount[0],)
return result, resulttxt
if doOKBad:
# Now do set difference
ok_missing = ok_test_set.difference(ok_result_set)
ok_extras = ok_result_set.difference(ok_test_set) if ignoremissing is None else set()
no_extras = ok_result_set.intersection(no_test_set)
bad_missing = bad_test_set.difference(bad_result_set)
bad_extras = bad_result_set.difference(bad_test_set) if ignoremissing is None else set()
if len(ok_missing) + len(ok_extras) + len(no_extras) + len(bad_missing) + len(bad_extras) != 0:
if len(ok_missing) != 0:
l = list(ok_missing)
resulttxt += " %d Items not returned in report (OK):" % (len(ok_missing),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
if len(ok_extras) != 0:
l = list(ok_extras)
resulttxt += " %d Unexpected items returned in report (OK):" % (len(ok_extras),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
if len(no_extras) != 0:
l = list(no_extras)
resulttxt += " %d Unwanted items returned in report (OK):" % (len(no_extras),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
if len(bad_missing) != 0:
l = list(bad_missing)
resulttxt += " %d Items not returned in report (BAD):" % (len(bad_missing),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
if len(bad_extras) != 0:
l = list(bad_extras)
resulttxt += " %d Unexpected items returned in report (BAD):" % (len(bad_extras),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
result = False
if not doOKBad:
l = list(set(statushrefs.keys()) - set(status_code_hrefs.keys()))
if l:
resulttxt += " %d Status Codes not returned in report:" % (len(l),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
result = False
l = list(set(status_code_hrefs.keys()) - set(statushrefs.keys()))
if l:
resulttxt += " %d Unexpected Status Codes returned in report:" % (len(l),)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
result = False
for code in set(statushrefs.keys()) & set(status_code_hrefs.keys()):
l = list(set(*statushrefs[code]) - status_code_hrefs[code])
if l:
resulttxt += " %d Items not returned in report for %d:" % (len(l), code,)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
result = False
l = list(status_code_hrefs[code] - set(*statushrefs[code]))
if l:
resulttxt += " %d Unexpected items returned in report for %d:" % (len(l), code,)
for i in l:
resulttxt += " " + str(i)
resulttxt += "\n"
result = False
return result, resulttxt
| StarcoderdataPython |
82759 | from kivy.uix.screenmanager import ScreenManager
class ScreenManagement(ScreenManager):
pass
| StarcoderdataPython |
3342214 | import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import serializers, status
from rest_framework.test import APIClient
from api.models import Business, Tag, Task
from business.serializers import BusinessSerializer, BusinessDetailSerializer
BUSINESS_URL = reverse('business:business-list')
def image_upload_url(business_id):
""" Return URL for business image upload """
return reverse('business:business-upload-image', args=[business_id])
def detail_url(business_id):
""" Return business detail URL """
return reverse('business:business-detail', args=[business_id])
def sample_tag(user, name='sales team'):
""" Create and return a sample tag """
return Tag.objects.create(user=user, name=name)
def sample_task(user, name='sending sales info to the order management'):
""" create and return a sample task """
return Task.objects.create(user=user, name=name)
def sample_business(user, **params):
""" Create and return a sample recipe """
defaults = {
'title': 'sales',
}
defaults.update(params)
return Business.objects.create(user=user, **defaults)
class PublicBusinessApiTests(TestCase):
""" Test unauthenticated business API enpoint """
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
""" Test authentication is required """
res = self.client.get(BUSINESS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateBusinessApiTests(TestCase):
""" Test untehnticated business API enpoint """
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_business(self):
""" Test retrieving a list of businesses """
sample_business(user=self.user)
sample_business(user=self.user)
res = self.client.get(BUSINESS_URL)
business = Business.objects.all().order_by('-id')
serializer = BusinessSerializer(business, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_business_limited_to_user(self):
""" Test retrieving business for user """
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
sample_business(user=user2)
sample_business(user=self.user)
res =self.client.get(BUSINESS_URL)
business = Business.objects.filter(user=self.user)
serializer = BusinessSerializer(business, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_business_detail(self):
""" Test viewing a business detail """
business = sample_business(user=self.user)
business.tag.add(sample_tag(user=self.user))
business.task.add(sample_task(user=self.user))
url = detail_url(business.id)
res = self.client.get(url)
serializer = BusinessDetailSerializer(business)
self.assertEqual(res.data, serializer.data)
def test_create_basic_business(self):
""" Test creating business """
payload = {
'title': 'sales',
}
res = self.client.post(BUSINESS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
business = Business.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(business, key))
def test_create_business_with_tags(self):
""" Test creating a business with tags """
tag1 = sample_tag(user=self.user, name='sales team')
tag2 = sample_tag(user=self.user, name='order management team')
payload = {
'title': 'sales',
'tag': [tag1.id, tag2.id]
}
res = self.client.post(BUSINESS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
business = Business.objects.get(id=res.data['id'])
tags = business.tag.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_business_with_task(self):
""" Test creating a business with tasks """
task1 = sample_task(user=self.user, name='finding new customers')
task2 = sample_task(user=self.user, name='collecting feedback from customers')
payload = {
'title': 'sales',
'task': [task1.id, task2.id]
}
res = self.client.post(BUSINESS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
business = Business.objects.get(id=res.data['id'])
tasks = business.task.all()
self.assertEqual(tasks.count(), 2)
self.assertIn(task1, tasks)
self.assertIn(task2, tasks)
def test_partial_update_business(self):
""" Test updating a business with patch """
business = sample_business(user=self.user)
business.tag.add(sample_tag(user=self.user))
business.task.add(sample_task(user=self.user))
new_tag = sample_tag(user=self.user, name='sending sample request to export admin team')
payload = { 'title': 'sales', 'tag': [new_tag.id] }
url = detail_url(business.id)
self.client.patch(url, payload)
business.refresh_from_db()
self.assertEqual(business.title, payload['title'])
tags = business.tag.all()
self.assertEqual(tags.count(), 1)
self.assertIn(new_tag, tags)
def test_full_update_business(self):
""" Test updating a business with put """
business = sample_business(user=self.user)
business.tag.add(sample_tag(user=self.user))
payload = {
'title': 'sales',
}
url = detail_url(business.id)
self.client.put(url, payload)
business.refresh_from_db()
self.assertEqual(business.title, payload['title'])
tags = business.tag.all()
self.assertEqual(tags.count(), 0)
class BusinessImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
self.business = sample_business(user=self.user)
def tearDown(self):
self.business.image.delete()
def test_upload_image_to_business(self):
""" Test uploading an image to business """
url = image_upload_url(self.business.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='jpeg')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.business.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.business.image.path))
def test_upload_image_bad_request(self):
""" Test uploading an invalid image """
url = image_upload_url(self.business.id)
res = self.client.post(url, {'image': 'notImage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_business_by_tags(self):
""" Test filtering business with specific tags """
business1 = sample_business(user=self.user, title='skim milk')
business2 = sample_business(user=self.user, title='butter')
tag1 = sample_tag(user=self.user, name='sales manager')
tag2 = sample_tag(user=self.user, name='order management officer')
business1.tag.add(tag1)
business2.tag.add(tag2)
business3= sample_business(user=self.user, title='whole milk')
res = self.client.get(
BUSINESS_URL,
{'tag': f'{tag1.id},{tag2.id}'}
)
serializer1 = BusinessSerializer(business1)
serializer2 = BusinessSerializer(business2)
serializer3 = BusinessSerializer(business3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_business_by_task(self):
""" Test filtering business with specific tasks """
business1 = sample_business(user=self.user, title='butter')
business2 = sample_business(user=self.user, title='skim milk')
task1 = sample_task(user=self.user, name='finding a new customer')
task2 = sample_task(user=self.user, name='analysing sales trends')
business1.task.add(task1)
business2.task.add(task2)
business3 = sample_business(user=self.user, title='entering orders')
res = self.client.get(
BUSINESS_URL,
{'task': f'{task1.id},{task2.id}'}
)
serializer1 = BusinessSerializer(business1)
serializer2 = BusinessSerializer(business2)
serializer3 = BusinessSerializer(business3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| StarcoderdataPython |
118444 | <filename>Bot/extensions/fun/level.py
import hikari
import lightbulb
import utils
from imports import *
from Bot.DataBase.levelsys import DBLevel
from easy_pil import Editor, Font, load_image_async, Canvas
buttons = {
"ison": ["Activate Level System", "Deactivate Level System"],
"doubleexp": ["Activate Double Exp", "Deactivate Double Exp"],
}
async def build_setting_rows(bot, settings):
rows: t.list[lightbulb.ActionRow] = []
row = bot.rest.build_action_row()
i = 0
for button in settings:
label = buttons.get(button)
if label:
i += 1
if i % 5 == 0 and i != 0:
rows.append(row)
row = bot.rest.build_action_row()
if settings[button] == 0:
label = label[0]
else:
label = label[1]
(
row.add_button(
hikari.ButtonStyle.SUCCESS if settings[button] == 0 else hikari.ButtonStyle.DANGER,
button
)
.set_label(label)
.set_emoji("✅" if settings[button] == 0 else "⛔")
.add_to_container()
)
rows.append(row)
row = bot.rest.build_action_row()
select = row.add_select_menu("change_exp_multiplier")
select.set_placeholder("Select Exp Multiplier")
select.add_option("0.25", "0.25").set_description("25% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("0.5", "0.5").set_description("50% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("0.75", "0.75").set_description("75% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("1", "1").set_description("Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("1.25", "1.25").set_description("125% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("1.5", "1.5").set_description("150% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("1.75", "1.75").set_description("175% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_option("2", "2").set_description("200% of the Normal Exp").set_emoji("➡").add_to_menu()
select.add_to_container()
rows.append(row)
return rows
level_plugin = lightbulb.Plugin("fun.level")
def getmaxexp(lvl):
expleft = 5 * (lvl ** 2) + (50 * lvl) + 100
return expleft
def getlevelprogress(lvl, exp):
expleft = 5 * (lvl ** 2) + (50 * lvl) + 100
progress = int(round((exp / expleft) * 100, 0))
return progress
def checknewlvl(authorid, guildid):
return (DBLevel(level_plugin.app.db).checkLVL(authorid, guildid))
@level_plugin.listener(hikari.GuildMessageCreateEvent)
async def on_message(event: hikari.GuildMessageCreateEvent):
if not event.is_human: return
if event.member.is_bot: return
exp = random.randint(10, 25)
res = DBLevel(event.app.db).isindatabaseguilds(event.get_guild().id)
if not res:
DBLevel(event.app.db).addtoguilds(event.get_guild().id)
settings = DBLevel(event.app.db).get_settings(event.get_guild().id)
if not settings: return
if settings["ison"] == 0: return
exp = exp * settings["xpmult"]
if settings["doubleexp"] == 1:
exp = exp * 2
res = DBLevel(event.app.db).isindatabase(event.member.id, event.guild_id)
if not res:
DBLevel(event.app.db).add(event.member.id, exp, event.get_guild().id, event.member.username,
str(event.member.make_avatar_url()))
return
if not (int(time.time()) - int(datetime.datetime.timestamp(res[3]))) > 60: return
print('new_exp')
DBLevel(event.app.db).addEXP(event.member.id, event.get_guild().id, exp)
newlvl = checknewlvl(event.member.id, event.get_guild().id)
if newlvl:
channels = DBLevel(event.app.db).getlvlupchannels(event.guild_id)
for i in channels:
channel = event.get_guild().get_channel(i)
data = DBLevel(event.app.db).isindatabase(event.member.id, event.guild_id)
await channel.send(
settings["lvlmsg"].format(
user=event.member.username,
usermention=event.member.mention,
level=newlvl,
xp=data[2],
oldlevel=newlvl - 1,
nextlevelxp=getmaxexp(newlvl),
)
)
@level_plugin.command()
@lightbulb.check_exempt(utils.mod_check)
@lightbulb.command("level", "the level system")
@lightbulb.implements(lightbulb.PrefixCommandGroup, lightbulb.SlashCommandGroup)
async def level(self, ctx):
pass
@level.child()
@lightbulb.check_exempt(utils.mod_check)
@lightbulb.command("channels", "see all the levelup channels")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def channels(ctx):
res = DBLevel(ctx.app.db).getlvlupchannels(ctx.guild_id)
if not res:
embed = hikari.Embed(title="❌ Error", description="No channels set", color=utils.Color.red().__str__(),
timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
return
embed = hikari.Embed(title="✅ Levelup-Channels",
description="\n".join([ctx.get_guild().get_channel(i).mention for i in res]),
color=utils.Color.green().__str__(), timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
@level.child()
@lightbulb.check_exempt(utils.mod_check)
@lightbulb.option("channel", "the channel to add", type=hikari.TextableGuildChannel, required=True)
@lightbulb.command("setchannel", "set a levelup channel")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def setchannel(ctx):
res = DBLevel(ctx.app.db).getlvlupchannels(ctx.guild_id)
channel = ctx.options.channel
if channel.id in res:
embed = hikari.Embed(title="❌ Error",
description="This channel is already set",
color=utils.Color.red().__str__(), timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
return
DBLevel(ctx.app.db).add_lvlup_channel(ctx.guild_id, channel.id)
embed = hikari.Embed(title="✅ Success",
description=f"{ctx.get_guild().get_channel(channel.id).mention} has been added to the levelup channels",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
return
@level.child()
@lightbulb.check_exempt(utils.mod_check)
@lightbulb.option("channel", "the channel to remove", type=hikari.TextableGuildChannel, required=True)
@lightbulb.command("removechannel", "remove a levelup channel")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def removechannel(ctx):
res = DBLevel(ctx.app.db).getlvlupchannels(ctx.guild_id)
channel = ctx.options.channel
if channel.id not in res:
embed = hikari.Embed(title="❌ Error",
description="This channel is not set",
color=utils.Color.red().__str__(), timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
return
DBLevel(ctx.app.db).remove_lvlup_channels(ctx.guild_id, channel.id)
embed = hikari.Embed(title="✅ Success",
description=f"{ctx.get_guild().get_channel(channel.id).mention} has been removed from the levelup channels",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
return
@level.child()
@lightbulb.check_exempt(utils.mod_check)
@lightbulb.command("settings", "settings for levelsystem")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def settings(ctx):
settings = DBLevel(ctx.app.db).get_settings(ctx.guild_id)
if not settings:
embed = hikari.Embed(title="❌ Error",
description="There are no settings for this server",
color=utils.Color.red().__str__(), timestamp=utils.get_time())
if ctx.interaction:
await ctx.respond(embed=embed, flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embed=embed, delete_after=5)
return
embed = hikari.Embed(title="Level Settings",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
levelup_channels = "\n".join([ctx.get_guild().get_channel(x).name for x in settings["channels"]])
embed.add_field("Levelsysten Status", "```✅ - on```" if settings["ison"] == 1 else "```❌ - off```", inline=True)
embed.add_field("XP Multiplier", "```{}```".format(settings["xpmult"]), inline=True)
embed.add_field("Double XP", "```✅ - on```" if settings["doubleexp"] == 1 else "```❌ - off```", inline=True)
embed.add_field("Levelup Channels", "```{}```".format(levelup_channels) if levelup_channels else "```❌ - empty```",
inline=True)
# TODO: add levelup Message
# embed.add_field("Levelup Message","```{}```".format(settings["levelup_message"]) if settings["levelup_message"] else "```❌ - empty```")
rows = await build_setting_rows(ctx.app, settings)
await ctx.respond(embed=embed, components=rows)
@level.child()
@lightbulb.command("leaderboard", "leaderboard for levelsystem")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def leaderboard(ctx):
res = DBLevel(ctx.app.db).gettop10(ctx.guild_id)
users = ""
for i, usr in enumerate(res):
if not i > 9: break
member = ctx.get_guild().get_member(usr[1])
if not member:
member = await ctx.app.rest.fetch_user(usr[1])
users += f"`{i + 1}.` {member.username} - Lvl: {usr[5]} - Exp: {usr[2]}\n"
users += 'You can see the full leaderboard [here](http://172.16.58.3:13488/)'
embed = hikari.Embed(title="Level Leaderboard",
description=users,
color=utils.Color.green().__str__(), timestamp=utils.get_time())
await ctx.respond(embed=embed)
@level.child()
@lightbulb.command("help", "help for levelsystem")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def help(ctx):
embed = hikari.Embed(title="✅ Level System Help",
description="This is the help for the level system",
color=utils.Color.green().__str__())
embed.add_field("How does it work?",
"When a User sends a Message, he gets XP. When he reaches a certain amount of XP, he gets a message in the levelup channel and levels up.\n"
"By Default there is a 60 second Cooldown before you get XP again. This means that a User can spam as much as he wants, but only gets XP every 60 seconds."
"You can change this with the level Cooldown command.\n"
"```/level cooldown <seconds>```", inline=False)
embed.add_field("How do I add a levelup channel?",
"```/level addchannel <channel>```", inline=False)
embed.add_field("How do I remove a levelup channel?",
"```/level removechannel <channel>```", inline=False)
embed.add_field("The Level Settings",
"```/level settings```\n"
"You can change the settings for the level system here.\n"
"By clicking on the Level System Button you can either turn it on or off.\n"
"By clicking on the Double XP Button you can either turn it on or off.\n"
"You can also Select a Custom Exp Multiplier by clicking on `Select Exp Multiplier` and choosing the wanted Multiplier\n"
, inline=False)
embed2 = hikari.Embed(color=utils.Color.green().__str__())
embed2.set_image("https://cdn.discordapp.com/attachments/948961120662728724/960141932754001981/unknown.png")
embed3 = hikari.Embed(color=utils.Color.green().__str__())
embed3.add_field("How can i see my or others Level?",
"```/level <user>\n/rank <user>```", inline=False)
embed3.add_field("How to edit the Levelup Message?",
"```/level editmessage```", inline=False)
embed3.add_field("How to see current custom Levelup Message?",
"```/level message```", inline=False)
embed3.add_field("Which Parameter can I use in the Levelup Message?",
"```{user} - The User who leveled up\n{usermention} - The Users Mention who leveled up\n{level} - The Level the User reached\n{xp} - The XP the User has\n{oldlevel} - The old Level of the User\n{nextlevelxp} - The XP the User needs to reach the next Level```",
inline=False)
if ctx.interaction:
await ctx.respond(embeds=[embed, embed2, embed3], flags=hikari.MessageFlag.EPHEMERAL)
else:
await ctx.respond(embeds=[embed, embed2, embed3], delete_after=5)
@level.child()
@lightbulb.option("member", "The Member you want to see the Level of", hikari.Member, required=False)
@lightbulb.command("rank", "Shows the Level of a Member")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def level(ctx):
member: hikari.Member = ctx.options.member if ctx.options.member else ctx.author
data = DBLevel(ctx.app.db).isindatabase(member.id, ctx.guild_id)
if not data:
DBLevel(ctx.app.db).add(member.id, 0, ctx.guild_id, member.username, str(member.make_avatar_url()))
data = DBLevel(ctx.app.db).isindatabase(member.id, ctx.guild_id)
user_data = {
"xp": data[2],
"level": data[5],
"next_level_exp": getmaxexp(data[5]),
"percentage": getlevelprogress(data[5], data[2]),
"name": data[6],
}
avatar = member.make_avatar_url()
if not avatar:
avatar = "https://cdn.discordapp.com/embed/avatars/0.png"
avatar = str(avatar)
background = await load_image_async(
"https://cdn.discordapp.com/attachments/909399946669744178/934857056542933103/Levelcard.png")
background = Editor(background).resize((900, 300))
# background = Editor(Canvas((900,300),color="#141414"))
profile_picture = await load_image_async(avatar)
profile = Editor(profile_picture).resize((150, 150)).circle_image()
poppins = Font.poppins(size=40)
poppins_small = Font.poppins(size=20)
card_right_shape = [(600, 0), (750, 300), (900, 300), (900, 0)]
background.polygon(card_right_shape, color="#282828")
background.paste(profile, (30, 30))
background.rectangle((30, 220), width=650, height=40, color="#FFFFFF", radius=20)
background.bar((30, 220), max_width=650, height=40, percentage=user_data["percentage"], color="#282828", radius=20)
background.text((200, 40), user_data["name"], font=poppins, color="#FFFFFF")
background.rectangle((200, 100), width=350, height=2, fill="#FFFFFF")
background.text(
(200, 130),
f"Level - {user_data['level']} | XP - {user_data['xp']}/{user_data['next_level_exp']}",
font=poppins_small,
color="#FFFFFF"
)
embed = hikari.Embed(color=utils.Color.green().__str__())
embed.set_image(background.image_bytes)
filename = f"{ctx.author.id}-{ctx.guild_id}.png"
# background.save(f"../data/Images/Cache/{filename}")
if ctx.interaction:
await ctx.respond(flags=hikari.MessageFlag.EPHEMERAL, attachment=background.image_bytes)
else:
await ctx.respond(delete_after=5, attachment=background.image_bytes)
@level_plugin.listener(hikari.events.InteractionCreateEvent)
async def on_interaction_create(event: hikari.events.InteractionCreateEvent):
e = event
if isinstance(e.interaction, hikari.ComponentInteraction):
i: hikari.ComponentInteraction = e.interaction
if not i.guild_id: return
if i.custom_id in buttons:
res = utils.mod_check_without_ctx(e.app, i.guild_id, i.member)
if not res or not i.member.permissions & hikari.Permissions.ADMINISTRATOR:
embed = hikari.Embed(title="❌ Error",
description="You are not allowed to use this",
color=utils.Color.red().__str__(), timestamp=utils.get_time())
await i.create_initial_response(hikari.ResponseType.MESSAGE_CREATE, embed=embed,
flags=hikari.MessageFlag.EPHEMERAL)
return
else:
settings = DBLevel(e.app.db).get_settings(i.guild_id)
if i.custom_id in buttons:
old_value = settings[i.custom_id]
new_value = "abcdefghijklmnopqrstuvwxyz"
match old_value:
case 0:
new_value = 1
case 1:
new_value = 0
if new_value == "abcdefghijklmnopqrstuvwxyz": return
DBLevel(e.app.db).update_settings(i.guild_id, i.custom_id, new_value)
settings = DBLevel(e.app.db).get_settings(i.guild_id)
embed = hikari.Embed(title="✅ Success",
description="The setting have been updated",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
embed.add_field(f"Updated: {i.custom_id}",
f"``` {'❌ - off' if new_value == 0 else '✅ - on'}```",
inline=True)
await i.create_initial_response(hikari.ResponseType.MESSAGE_CREATE, embed=embed,
flags=hikari.MessageFlag.EPHEMERAL)
embed = hikari.Embed(title="Level Settings",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
levelup_channels = "\n".join([i.get_guild().get_channel(x).name for x in settings["channels"]])
embed.add_field("Levelsysten Status", "```✅ - on```" if settings["ison"] == 1 else "```❌ - off```",
inline=True)
embed.add_field("XP Multiplier", "```{}```".format(settings["xpmult"]), inline=True)
embed.add_field("Double XP", "```✅ - on```" if settings["doubleexp"] == 1 else "```❌ - off```",
inline=True)
embed.add_field("Levelup Channels",
"```{}```".format(levelup_channels) if levelup_channels else "```❌ - empty```",
inline=True)
rows = await build_setting_rows(e.app, settings)
await i.message.edit(embed=embed, components=rows)
elif i.custom_id == "change_exp_multiplier":
res = utils.mod_check_without_ctx(e.app, i.guild_id, i.member)
if not res or not i.member.permissions & hikari.Permissions.ADMINISTRATOR:
embed = hikari.Embed(title="❌ Error",
description="You are not allowed to use this",
color=utils.Color.red().__str__(), timestamp=utils.get_time())
await i.create_initial_response(hikari.ResponseType.MESSAGE_CREATE, embed=embed,
flags=hikari.MessageFlag.EPHEMERAL)
return
else:
new_mult = i.values[0]
new_mult = float(new_mult)
DBLevel(e.app.db).update_settings(i.guild_id, "xpmult", new_mult)
settings = DBLevel(e.app.db).get_settings(i.guild_id)
embed = hikari.Embed(title="✅ Success",
description="The settings have been updated",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
embed.add_field(f"Updated: XP Multiplier",
f"```{new_mult}```",
inline=True)
await i.create_initial_response(hikari.ResponseType.MESSAGE_CREATE, embed=embed,
flags=hikari.MessageFlag.EPHEMERAL)
embed = hikari.Embed(title="Level Settings",
color=utils.Color.green().__str__(), timestamp=utils.get_time())
levelup_channels = "\n".join([i.get_guild().get_channel(x).name for x in settings["channels"]])
embed.add_field("Levelsysten Status", "```✅ - on```" if settings["ison"] == 1 else "```❌ - off```",
inline=True)
embed.add_field("XP Multiplier", "```{}```".format(settings["xpmult"]), inline=True)
embed.add_field("Double XP", "```✅ - on```" if settings["doubleexp"] == 1 else "```❌ - off```",
inline=True)
embed.add_field("Levelup Channels",
"```{}```".format(levelup_channels) if levelup_channels else "```❌ - empty```",
inline=True)
rows = await build_setting_rows(e.app, settings)
await i.message.edit(embed=embed, components=rows)
def load(bot):
bot.add_plugin(level_plugin)
def unload(bot):
bot.remove_plugin(level_plugin)
| StarcoderdataPython |
1626469 | from jupyterhub.auth import Authenticator
from tornado import gen
from traitlets import (
Unicode,
Int
)
import pymysql
from passlib.hash import phpass
class WordPressAuthenticator(Authenticator):
dbhost = Unicode("localhost", config=True, help="URL or IP address of the database server")
dbport = Int(3306, min=1, max=65535, config=True, help="port of the database server")
dbuser = Unicode(config=True, help="user name to access your wordpress database")
dbpassword = Unicode(config=True, help="password to access your wordpress database")
dbname = Unicode("wordpress", config=True, help="database name that your wordpress uses")
table_prefix = Unicode("wp_", config=True, help="table prefix for your wordpress")
@gen.coroutine
def authenticate(self, handler, data):
args = {}
args["host"] = self.dbhost
args["user"] = self.dbuser
args["password"] = <PASSWORD>.dbpassword
args["db"] = self.dbname
args["charset"] = "utf8mb4"
args["cursorclass"] = pymysql.cursors.Cursor
with pymysql.connect(**args) as cursor:
sql = "SELECT " \
"`user_pass` " \
"FROM " \
"`{0}users` " \
"WHERE " \
"`user_login` = %s" \
.format(self.table_prefix)
if cursor.execute(sql, (data["username"], )) == 0:
return None
if phpass.verify(data["password"],cursor.fetchone()[0]) == True:
return data["username"]
return None
| StarcoderdataPython |
3237457 | <reponame>kushalmangtani/grouping-content
'''
https://coderpad.io/MGMYMWXZ
Problem defination:
This function would traverse dirPath and return a mapping of
(f -> a list of files that have the exact same content as f)
Examples:
/
/a.txt
/b.bin
/c.jpg
/dir1/j.whatever
/dir2/subdir1/q.whatever
/dir2/subdir1/j.whatever
If a.txt == b.bin (content-wise):
One possible:
a.txt -> [b.bin, c.jpg, /dir2/subdir1/j.whatever]
dir1/j.whatever -> []
dir2/subdir1/q.whatever -> []
Another:
b.bin -> [a.txt, c.jpg, /dir2/subdir1/j.whatever]
dir1/j.whatever -> []
dir2/subdir1/q.whatever -> []
You're allowed to use:
- listDir(dir: Path): List[Path] // list all files in dir
- isDir(path: Path): Boolean //
- you can read a file (byte-by-byte or content) // readFile(f)
'''
def list_all_files(path):
''' returns a list of all files in this dir path'''
files = []
for p in path:
# is a dir
if(is_dir(p)):
files = files + list_all_files(p)
# is a file
else:
files.append(p)
return files
def is_same(file1,file2):
''' returns a boolean indicating file is same or not. use md5 to compare contents of file '''
file1_content,file2_content
with open(file1) as f:
file1_content = f.read()
with open(file1) as f:
file2_content = f.read()
if file1_content == file2_content:
return True
else:
return False
# This method has Time Complexity : O(n^2). It can be improved to O(n) by using a auxillary data structure like priority queue.
def groupFilesByContent(dirPath):
''' returns a Map<String,List<Path>>. the key is path and the value is a list of files that have the same content as key'''
grouped_files = {}
file_list = list_all_files(dirPath)
# file_list size = 0
if(file_list.size() == 0):
return grouped_files
# file_list size = 1
if(file_list.size() == 1):
return grouped_files[file_list[0]] = []
# file_lise size > 1
# add first elemnt as key with empty list
grouped_files[file_list[0]] = []
# traverse from 1 to n
for file in file_list[1:]:
# file matches any of the keys - append file
for key in grouped_files.keys():
if(is_same(file_list[key] , file_list[file] )):
temp_list = file_list[key]
temp_list.append( file_list[file] )
file_list[key] = temp_list
# file does not matches any key - create a entry in hashmap for it
grouped_files[file] = []
def main():
groupFilesByContent("/Users/kmangtani/")
if __name__ == __main__:
main()
| StarcoderdataPython |
1666415 | # -*- coding: utf-8 -*-
import torch
# Addition:syntax 1
x = torch.ones(5, 3)
y = torch.rand(5, 3)
print(x + y)
# Addition: syntax 2
x = torch.ones(5, 3)
y = torch.rand(5, 3)
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
# Addition: syntax 3, in-place
x = torch.ones(5, 3)
y = torch.rand(5, 3)
y.add_(x)
print(y)
# Resizing: If you want to resize/reshape tensor:
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8)
print(x.size())
print(y.size())
print(z.size())
# one element tensor, use .item() to get the value
x = torch.randn(1)
print(x)
print(x.item())
| StarcoderdataPython |
1675942 | <reponame>HelloHiGw/sim_v1.0
import point
def real2grid(rpoint_list, ratio):
"""
将实际坐标系坐标点转化为栅格坐标系坐标点
:param rpoint_list: 实际坐标系坐标点列表
:param ratio: 栅格对应的实际长度
:return gpoint_list: 栅格坐标系坐标点列表
"""
gpoint_list = []
for rp in rpoint_list:
gp_x = rp.x//ratio + 1
gp_y = rp.y//ratio + 1
gpoint_list.append(point.Point(gp_x, gp_y))
return gpoint_list
def grid2real(gpoint_list, ratio):
"""
将栅格坐标系坐标点(栅格中心)转化为实际坐标系坐标点
:param gpoint_list: 栅格坐标系坐标点列表
:param ratio: 栅格对应的实际长度
:return rpoint_list: 实际坐标系坐标点列表
"""
rpoint_list = []
for gp in gpoint_list:
rp_x = gp.x * ratio - ratio//2
rp_y = gp.y * ratio - ratio//2
rpoint_list.append([rp_x, rp_y])
return rpoint_list | StarcoderdataPython |
3309935 | import sys
import os
from pathlib import Path
from datetime import datetime
import logging
import time
from . import command_line_interface as cli
from . import maxquant as mq
from . import simsi_output
from . import thermo_raw as raw
from . import maracluster as cluster
from . import tmt_processing
from . import transfer
from . import evidence
from . import version
__version__ = version.get_version_from_pyproject()
__copyright__ = '''Copyright (c) 2021-2022 <NAME> & <NAME>. All rights reserved. Written by <NAME> (<EMAIL>) and <NAME> (<EMAIL>) at the Chair of Proteomics and Bioanalytics at the Technical University of Munich.'''
logger = logging.getLogger(__name__)
def main(argv):
mq_txt_folder, raw_folder, pvals, output_folder, num_threads, tmt_correction_file, ms_level, tmt_requantify = cli.parse_args(
argv)
if not output_folder.is_dir():
output_folder.mkdir(parents=True)
module_name = ".".join(__name__.split(".")[:-1])
file_logger = logging.FileHandler(output_folder / Path('SIMSI.log'))
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
file_logger.setFormatter(formatter)
logging.getLogger(module_name).addHandler(file_logger)
starttime = datetime.now()
logger.info(f'SIMSI-Transfer version {__version__}')
logger.info(f'{__copyright__}')
logger.info(f'Issued command: {os.path.basename(__file__)} {" ".join(map(str, argv))}')
logger.info(f'Input parameters:')
logger.info(f"MaxQuant txt folder = {mq_txt_folder}")
logger.info(f"Raw file folder = {raw_folder}")
logger.info(f"Stringencies = {','.join(map(str, pvals))}")
logger.info(f"Output folder = {output_folder}")
logger.info(f"Number of threads = {num_threads}")
logger.info(f"TMT correction file = {tmt_correction_file}")
logger.info(f"TMT MS level = {ms_level}")
logger.info('')
logger.info(f'Starting SIMSI-Transfer')
logger.info('')
logger.info(f'Converting .raw files')
mzml_folder = output_folder / Path('mzML')
mzml_files = raw.convert_raw_mzml_batch(raw_folder, mzml_folder, num_threads)
logger.info(f'Clustering .mzML files')
cluster_result_folder = output_folder / Path('maracluster_output')
cluster.cluster_mzml_files(mzml_files, pvals, cluster_result_folder, num_threads)
logger.info(f'Reading in MaxQuant msmsscans.txt file')
msmsscans_mq, tmt = mq.read_msmsscans_txt(mq_txt_folder, tmt_requantify)
if tmt_requantify:
logger.info(f'Extracting correct reporter ion intensities from .mzML files')
extracted_folder = output_folder / Path('extracted')
tmt_processing.extract_tmt_reporters(mzml_files, extracted_folder, tmt_correction_file, num_threads)
corrected_tmt = tmt_processing.assemble_corrected_tmt_table(extracted_folder)
msmsscans_mq = tmt_processing.merge_with_corrected_tmt(msmsscans_mq, corrected_tmt)
logger.info(f'Reading in MaxQuant msms.txt file and filtering out decoy hits')
msms_mq = mq.read_msms_txt(mq_txt_folder)
# TODO: check if we should also transfer decoys
msms_mq = msms_mq[msms_mq['Reverse'] != '+']
logger.info(f'Reading in MaxQuant evidence.txt file and filtering out decoy hits')
evidence_mq = mq.read_evidence_txt(mq_txt_folder)
evidence_mq = evidence_mq[evidence_mq['Reverse'] != '+']
rawfile_metadata = mq.get_rawfile_metadata(evidence_mq)
logger.info(f'Reading in MaxQuant allPeptides.txt file')
allpeptides_mq = mq.read_allpeptides_txt(mq_txt_folder)
statistics = dict()
for pval in ['p' + str(i) for i in pvals]:
logger.info('')
logger.info(f'Starting MaxQuant and MaRaCluster file merge for {pval}.')
cluster_results = cluster.read_cluster_results(cluster_result_folder, pval)
annotated_clusters = simsi_output.annotate_clusters(msmsscans_mq, msms_mq, rawfile_metadata, cluster_results)
del cluster_results
simsi_output.export_annotated_clusters(annotated_clusters, output_folder, pval)
logger.info(f'Finished file merge.')
logger.info(f'Starting cluster-based identity transfer for {pval}.')
annotated_clusters = transfer.flag_ambiguous_clusters(annotated_clusters)
msmsscans_simsi = transfer.transfer(annotated_clusters)
simsi_output.export_msmsscans(msmsscans_simsi, output_folder, pval)
logger.info(f'Finished identity transfer.')
logger.info(f'Building SIMSI-Transfer msms.txt file for {pval}.')
msms_simsi = simsi_output.remove_unidentified_scans(msmsscans_simsi)
simsi_output.export_msms(msms_simsi, output_folder, pval)
logger.info(f'Finished SIMSI-Transfer msms.txt assembly.')
statistics[pval] = simsi_output.count_clustering_parameters(msms_simsi)
logger.info(f'Starting SIMSI-Transfer evidence.txt building for {pval}.')
evidence_simsi = evidence.build_evidence(msms_simsi, evidence_mq, allpeptides_mq, tmt)
simsi_output.export_simsi_evidence_file(evidence_simsi, output_folder, pval)
logger.info(f'Finished SIMSI-Transfer evidence.txt building.')
logger.info('')
endtime = datetime.now()
logger.info(f'Successfully finished transfers for all stringencies.')
logger.info('')
logger.info(f"SIMSI-Transfer finished in {(endtime - starttime).total_seconds()} seconds (wall clock).")
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
145780 | <gh_stars>10-100
import os
import re
import random
import torch
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
import pandas as pd
def pad_tensor(adj_nodes_list, mask=False):
"""Function pads the neighbourhood nodes before passing through the
aggregator.
Args:
adj_nodes_list (list): the list of node neighbours
mask (bool, optional): if true, create a tensor with 1s and 0s for masking. Defaults to False.
Returns:
tuple: one of two tensors containing the padded tensor and mask (if true)
"""
max_len = max([len(adj_nodes) for adj_nodes in adj_nodes_list])
padded_nodes = []
_mask = []
for adj_nodes in adj_nodes_list:
x = list(adj_nodes)
x += [0] * (max_len - len(adj_nodes))
padded_nodes.append(x)
_mask.append([1] * len(adj_nodes) + [0] * (max_len - len(adj_nodes)))
if not mask:
return torch.tensor(padded_nodes)
# returning the mask as well
return torch.tensor(padded_nodes), torch.tensor(_mask)
def base_modified_neighbours(adj_nodes_list, idx_mapping):
"""function maps the node indices to new indices using a mapping
dictionary.
Args:
adj_nodes_list (list): list of list containing the node ids
idx_mapping (dict): node id to mapped node id
Returns:
list: list of list containing the new mapped node ids
"""
new_adj_nodes_list = []
for adj_nodes in adj_nodes_list:
new_adj_nodes = []
for node in adj_nodes:
new_adj_nodes.append(idx_mapping[node])
new_adj_nodes_list.append(new_adj_nodes)
return new_adj_nodes_list
def get_rel_ids(adj_lists, neigh_sizes, node_ids):
"""Function to get all the rel ids
Arguments:
adj_lists {dict} -- dictionary containing list of list
neigh_sizes {list} -- list containing the sample size of the neighbours
node_ids {list} -- contains the initial train ids
Returns:
set -- returns the set of relations that are part of the training
"""
all_rels = []
nodes = node_ids
for sample_size in neigh_sizes:
to_neighs = [adj_lists[node] for node in nodes]
_neighs = [sorted(to_neigh, key=lambda x: x[2], reverse=True)[:sample_size]
if len(to_neigh) >= sample_size else to_neigh for to_neigh in to_neighs]
_node_rel = []
# nodes = []
for neigh in _neighs:
for node, rel, hp in neigh:
all_rels.append(rel)
nodes.append(node)
all_rels = set(all_rels)
return all_rels
def prune_graph(adj_lists, relations):
"""The function is used to prune graph based on the relations
that are present in the training
Arguments:
adj_lists {dict} -- dictionary containing the graph
relations {set} -- list of relation ids
Returns:
dict -- pruned graph
"""
pruned_adj_list = {}
for node, adj in adj_lists.items():
pruned_adj_list[node] = []
for neigh_node, rel, hp in adj:
if rel in relations:
pruned_adj_list[node].append((neigh_node, rel, hp))
return pruned_adj_list
def convert_index_to_int(adj_lists):
"""Function to convert the node indices to int
"""
new_adj_lists = {}
for node, neigh in adj_lists.items():
new_adj_lists[int(node)] = neigh
return new_adj_lists
def normt_spm(mx, method='in'):
if method == 'in':
mx = mx.transpose()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
if method == 'sym':
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -0.5).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = mx.dot(r_mat_inv).transpose().dot(r_mat_inv)
return mx
def spm_to_tensor(sparse_mx):
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack(
(sparse_mx.row, sparse_mx.col))).long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def create_dirs(path):
"""create directories if path doesn't exist
Arguments:
path {str} -- path of the directory
"""
os.makedirs(path, exist_ok=True)
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
# if you are suing GPU
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# TODO: move to a better place
def mask_l2_loss(a, b, mask):
return l2_loss(a[mask], b[mask])
def l2_loss(a, b):
return ((a - b)**2).sum() / (len(a) * 2)
def get_device():
if torch.cuda.is_available():
device = torch.device("cuda:0")
cuda_device = 0
else:
device = torch.device('cpu')
cuda_device = -1
return device, cuda_device
def save_model(model, save_path):
"""The function is used to save the model
Arguments:
model {nn.Model} -- the model
save_path {str} -- model save path
"""
# TODO: test this module
torch.save(model.state_dict(), save_path)
| StarcoderdataPython |
1627128 | # -*- coding: utf-8 -*-
"""
Mapping for Artifactory apis to python objects
"""
__copyright__ = "Copyright (C) 2016 Veritas Technologies LLC. All rights reserved."
| StarcoderdataPython |
1696766 | from django.apps import AppConfig
class BotAdminConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'bot_admin'
| StarcoderdataPython |
4813221 | <filename>test_falsy.py
"""Test the falsy module."""
import pytest
import falsy
@pytest.mark.parametrize('value', [
False,
'false',
'f',
'no',
'n',
'none',
'null',
'nil',
])
def test_falsy(value):
"""Test is_ with falsy values."""
assert falsy.is_(value)
@pytest.mark.parametrize('value', [
True,
])
def test_truthy(value):
"""Test is_ with truthy values."""
assert not falsy.is_(value)
| StarcoderdataPython |
3276714 | import requests, pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn import tree
from sklearn.model_selection import train_test_split
import numpy as np
from pandas import DataFrame
#load csv data
#fields = ['Name','Median Gross Rent(monthly)','Total Household Income(yearly)','Total population', 'Transportation To Work','Wage','Health Insurance Coverage', 'Employment Status', 'Bedrooms', 'Total Contract Rent', 'Total Rent Asked', 'Total Gross Rent', 'Aggregate Gross Rent', 'Median Value', 'Total Price Asked', 'Selected Monthly Owner Costs',
#'Monthly Housing Cost', 'Median Value 18']
df = pd.read_csv('data_census_location.csv')
df2 = pd.read_csv('data_census_location.csv')
count_zero = 0
count_one =0
count_two =0
indexlist = []
datalist =[]
graph_index = 0
idx = 0
for index, row in df.iterrows():
for index1, row1 in df2.iterrows():
#index = index+1
county1 = row['CountyName']
county2 = row1['CountyName']
metro1 = row['Metro']
metro2 = row1['Metro']
region1 = row['RegionID']
region2 = row1['RegionID']
state1 = row['State']
state2 = row1['State']
city1 = row['City']
city2 = row1['City']
graph_index = 0
if(state1 == state2):
graph_index = 1
if(metro1 == metro2 and state1 == state2):
graph_index = 2
if(county1 == county2 and metro1 == metro2 and state1 == state2):
graph_index = 3
if(city1 == city2 and county1 == county2 and metro1 == metro2 and state1 == state2):
graph_index = 4
if(region1 == region2):
graph_index = -1
idx = idx+1
datalist.append(graph_index)
print(graph_index)
print(idx)
print(datalist)
print("The length is: ", len(datalist))
f=open('matrix.txt','w')
for i in datalist:
f.write(str(i)+'\n')
f.close()
"""
past = row['Avg 17']
current = row['Avg 18']
if ((current-past)/current <.045):
datalist.append(0.0)
count_zero +=1
if ((current-past)/current >.045 and (current-past)/current <.075):
datalist.append(1.0)
count_one +=1
if ((current-past)/current >.075):
datalist.append(2.0)
count_two +=1
if ((current-past)/current >.06):
datalist.append(1.0)
count_one +=1
else:
datalist.append(0.0)
count_zero +=1 """
#df['neighborhood Index'] = indexlist
#print("number of zero: ",count_zero)Index
#print("number of one: ",count_one)index
#print("number of two: ",count_two)
""" n_neighborhood = len(pd.unique(df['RegionName']))
n_county = len(pd.unique(df['CountyName']))
n_city = len(pd.unique(df['City']))
n_metro = len(pd.unique(df['Metro']))
n_state = len(pd.unique(df['State']))
n_RegionID = len(pd.unique(df['RegionID']))
print("neighborhood: ", n_neighborhood)
print("county: ", n_county)
print("city: ", n_city)
print("metro: ", n_metro)
print("state: ", n_state)
print("RegionID: ", n_RegionID) """
#df.to_csv('data_census_neighborhood_index.csv', encoding='utf-8', index=False) | StarcoderdataPython |
3241966 | <reponame>qfoxic/grapher-aws
PRICING_DATABASE = {'ap-northeast-1': {'linux': {'c1.medium': 0.158,
'c1.xlarge': 0.632,
'c3.2xlarge': 0.511,
'c3.4xlarge': 1.021,
'c3.8xlarge': 2.043,
'c3.large': 0.128,
'c3.xlarge': 0.255,
'c4.2xlarge': 0.504,
'c4.4xlarge': 1.008,
'c4.8xlarge': 2.016,
'c4.large': 0.126,
'c4.xlarge': 0.252,
'c5.18xlarge': 3.852,
'c5.2xlarge': 0.428,
'c5.4xlarge': 0.856,
'c5.9xlarge': 1.926,
'c5.large': 0.107,
'c5.xlarge': 0.214,
'c5d.18xlarge': 4.392,
'c5d.2xlarge': 0.488,
'c5d.4xlarge': 0.976,
'c5d.9xlarge': 2.196,
'c5d.large': 0.122,
'c5d.xlarge': 0.244,
'cc2.8xlarge': 2.349,
'cr1.8xlarge': 4.105,
'd2.2xlarge': 1.688,
'd2.4xlarge': 3.376,
'd2.8xlarge': 6.752,
'd2.xlarge': 0.844,
'g2.2xlarge': 0.898,
'g2.8xlarge': 3.592,
'g3.16xlarge': 6.32,
'g3.4xlarge': 1.58,
'g3.8xlarge': 3.16,
'g3s.xlarge': 1.04,
'hs1.8xlarge': 5.4,
'i2.2xlarge': 2.001,
'i2.4xlarge': 4.002,
'i2.8xlarge': 8.004,
'i2.xlarge': 1.001,
'i3.16xlarge': 5.856,
'i3.2xlarge': 0.732,
'i3.4xlarge': 1.464,
'i3.8xlarge': 2.928,
'i3.large': 0.183,
'i3.metal': 5.856,
'i3.xlarge': 0.366,
'm1.large': 0.243,
'm1.medium': 0.122,
'm1.small': 0.061,
'm1.xlarge': 0.486,
'm2.2xlarge': 0.575,
'm2.4xlarge': 1.15,
'm2.xlarge': 0.287,
'm3.2xlarge': 0.77,
'm3.large': 0.193,
'm3.medium': 0.096,
'm3.xlarge': 0.385,
'm4.10xlarge': 2.58,
'm4.16xlarge': 4.128,
'm4.2xlarge': 0.516,
'm4.4xlarge': 1.032,
'm4.large': 0.129,
'm4.xlarge': 0.258,
'm5.12xlarge': 2.976,
'm5.24xlarge': 5.952,
'm5.2xlarge': 0.496,
'm5.4xlarge': 0.992,
'm5.large': 0.124,
'm5.xlarge': 0.248,
'm5d.12xlarge': 3.504,
'm5d.24xlarge': 7.008,
'm5d.2xlarge': 0.584,
'm5d.4xlarge': 1.168,
'm5d.large': 0.146,
'm5d.xlarge': 0.292,
'p2.16xlarge': 24.672,
'p2.8xlarge': 12.336,
'p2.xlarge': 1.542,
'p3.16xlarge': 33.552,
'p3.2xlarge': 4.194,
'p3.8xlarge': 16.776,
'r3.2xlarge': 0.798,
'r3.4xlarge': 1.596,
'r3.8xlarge': 3.192,
'r3.large': 0.2,
'r3.xlarge': 0.399,
'r4.16xlarge': 5.12,
'r4.2xlarge': 0.64,
'r4.4xlarge': 1.28,
'r4.8xlarge': 2.56,
'r4.large': 0.16,
'r4.xlarge': 0.32,
'r5.12xlarge': 3.648,
'r5.24xlarge': 7.296,
'r5.2xlarge': 0.608,
'r5.4xlarge': 1.216,
'r5.large': 0.152,
'r5.xlarge': 0.304,
'r5d.12xlarge': 4.176,
'r5d.24xlarge': 8.352,
'r5d.2xlarge': 0.696,
'r5d.4xlarge': 1.392,
'r5d.large': 0.174,
'r5d.xlarge': 0.348,
't1.micro': 0.026,
't2.2xlarge': 0.4864,
't2.large': 0.1216,
't2.medium': 0.0608,
't2.micro': 0.0152,
't2.nano': 0.0076,
't2.small': 0.0304,
't2.xlarge': 0.2432,
't3.2xlarge': 0.4352,
't3.large': 0.1088,
't3.medium': 0.0544,
't3.micro': 0.0136,
't3.nano': 0.0068,
't3.small': 0.0272,
't3.xlarge': 0.2176,
'x1.16xlarge': 9.671,
'x1.32xlarge': 19.341,
'x1e.16xlarge': 19.344,
'x1e.2xlarge': 2.418,
'x1e.32xlarge': 38.688,
'x1e.4xlarge': 4.836,
'x1e.8xlarge': 9.672,
'x1e.xlarge': 1.209,
'z1d.12xlarge': 5.448,
'z1d.2xlarge': 0.908,
'z1d.3xlarge': 1.362,
'z1d.6xlarge': 2.724,
'z1d.large': 0.227,
'z1d.xlarge': 0.454},
'windows': {'c1.medium': 0.258,
'c1.xlarge': 1.032,
'c3.2xlarge': 0.925,
'c3.4xlarge': 1.849,
'c3.8xlarge': 3.699,
'c3.large': 0.231,
'c3.xlarge': 0.462,
'c4.2xlarge': 0.872,
'c4.4xlarge': 1.744,
'c4.8xlarge': 3.672,
'c4.large': 0.218,
'c4.xlarge': 0.436,
'c5.18xlarge': 7.164,
'c5.2xlarge': 0.796,
'c5.4xlarge': 1.592,
'c5.9xlarge': 3.582,
'c5.large': 0.199,
'c5.xlarge': 0.398,
'c5d.18xlarge': 7.704,
'c5d.2xlarge': 0.856,
'c5d.4xlarge': 1.712,
'c5d.9xlarge': 3.852,
'c5d.large': 0.214,
'c5d.xlarge': 0.428,
'cc2.8xlarge': 2.919,
'cr1.8xlarge': 4.42,
'd2.2xlarge': 1.909,
'd2.4xlarge': 3.678,
'd2.8xlarge': 7.43,
'd2.xlarge': 0.975,
'g2.2xlarge': 1.01,
'g2.8xlarge': 3.87,
'g3.16xlarge': 9.264,
'g3.4xlarge': 2.316,
'g3.8xlarge': 4.632,
'g3s.xlarge': 1.224,
'hs1.8xlarge': 5.714,
'i2.2xlarge': 2.226,
'i2.4xlarge': 4.451,
'i2.8xlarge': 8.903,
'i2.xlarge': 1.113,
'i3.16xlarge': 8.8,
'i3.2xlarge': 1.1,
'i3.4xlarge': 2.2,
'i3.8xlarge': 4.4,
'i3.large': 0.275,
'i3.metal': 8.8,
'i3.xlarge': 0.55,
'm1.large': 0.353,
'm1.medium': 0.177,
'm1.small': 0.088,
'm1.xlarge': 0.706,
'm2.2xlarge': 0.705,
'm2.4xlarge': 1.41,
'm2.xlarge': 0.352,
'm3.2xlarge': 1.166,
'm3.large': 0.292,
'm3.medium': 0.146,
'm3.xlarge': 0.583,
'm4.10xlarge': 4.42,
'm4.16xlarge': 7.072,
'm4.2xlarge': 0.884,
'm4.4xlarge': 1.768,
'm4.large': 0.221,
'm4.xlarge': 0.442,
'm5.12xlarge': 5.184,
'm5.24xlarge': 10.368,
'm5.2xlarge': 0.864,
'm5.4xlarge': 1.728,
'm5.large': 0.216,
'm5.xlarge': 0.432,
'm5d.12xlarge': 5.712,
'm5d.24xlarge': 11.424,
'm5d.2xlarge': 0.952,
'm5d.4xlarge': 1.904,
'm5d.large': 0.238,
'm5d.xlarge': 0.476,
'p2.16xlarge': 27.616,
'p2.8xlarge': 13.808,
'p2.xlarge': 1.726,
'p3.16xlarge': 36.496,
'p3.2xlarge': 4.562,
'p3.8xlarge': 18.248,
'r3.2xlarge': 1.177,
'r3.4xlarge': 2.276,
'r3.8xlarge': 4.25,
'r3.large': 0.3,
'r3.xlarge': 0.599,
'r4.16xlarge': 8.064,
'r4.2xlarge': 1.008,
'r4.4xlarge': 2.016,
'r4.8xlarge': 4.032,
'r4.large': 0.252,
'r4.xlarge': 0.504,
'r5.12xlarge': 5.856,
'r5.24xlarge': 11.712,
'r5.2xlarge': 0.976,
'r5.4xlarge': 1.952,
'r5.large': 0.244,
'r5.xlarge': 0.488,
'r5d.12xlarge': 6.384,
'r5d.24xlarge': 12.768,
'r5d.2xlarge': 1.064,
'r5d.4xlarge': 2.128,
'r5d.large': 0.266,
'r5d.xlarge': 0.532,
't1.micro': 0.033,
't2.2xlarge': 0.5484,
't2.large': 0.1496,
't2.medium': 0.0788,
't2.micro': 0.0198,
't2.nano': 0.0099,
't2.small': 0.0396,
't2.xlarge': 0.2842,
't3.2xlarge': 0.5824,
't3.large': 0.1364,
't3.medium': 0.0728,
't3.micro': 0.0228,
't3.nano': 0.0114,
't3.small': 0.0456,
't3.xlarge': 0.2912,
'x1.16xlarge': 12.615,
'x1.32xlarge': 25.229,
'x1e.16xlarge': 22.288,
'x1e.2xlarge': 2.786,
'x1e.32xlarge': 44.576,
'x1e.4xlarge': 5.572,
'x1e.8xlarge': 11.144,
'x1e.xlarge': 1.393,
'z1d.12xlarge': 7.656,
'z1d.2xlarge': 1.276,
'z1d.3xlarge': 1.914,
'z1d.6xlarge': 3.828,
'z1d.large': 0.319,
'z1d.xlarge': 0.638}},
'ap-northeast-2': {'linux': {'c3.2xlarge': 0.46,
'c3.4xlarge': 0.919,
'c3.8xlarge': 1.839,
'c3.large': 0.115,
'c3.xlarge': 0.23,
'c4.2xlarge': 0.454,
'c4.4xlarge': 0.907,
'c4.8xlarge': 1.815,
'c4.large': 0.114,
'c4.xlarge': 0.227,
'c5.18xlarge': 3.456,
'c5.2xlarge': 0.384,
'c5.4xlarge': 0.768,
'c5.9xlarge': 1.728,
'c5.large': 0.096,
'c5.xlarge': 0.192,
'c5d.18xlarge': 3.96,
'c5d.2xlarge': 0.44,
'c5d.4xlarge': 0.88,
'c5d.9xlarge': 1.98,
'c5d.large': 0.11,
'c5d.xlarge': 0.22,
'd2.2xlarge': 1.688,
'd2.4xlarge': 3.376,
'd2.8xlarge': 6.752,
'd2.xlarge': 0.844,
'g2.2xlarge': 0.898,
'g2.8xlarge': 3.592,
'i2.2xlarge': 2.001,
'i2.4xlarge': 4.002,
'i2.8xlarge': 8.004,
'i2.xlarge': 1.001,
'i3.16xlarge': 5.856,
'i3.2xlarge': 0.732,
'i3.4xlarge': 1.464,
'i3.8xlarge': 2.928,
'i3.large': 0.183,
'i3.xlarge': 0.366,
'm3.2xlarge': 0.732,
'm3.large': 0.183,
'm3.medium': 0.091,
'm3.xlarge': 0.366,
'm4.10xlarge': 2.46,
'm4.16xlarge': 3.936,
'm4.2xlarge': 0.492,
'm4.4xlarge': 0.984,
'm4.large': 0.123,
'm4.xlarge': 0.246,
'm5.12xlarge': 2.832,
'm5.24xlarge': 5.664,
'm5.2xlarge': 0.472,
'm5.4xlarge': 0.944,
'm5.large': 0.118,
'm5.xlarge': 0.236,
'm5d.12xlarge': 3.336,
'm5d.24xlarge': 6.672,
'm5d.2xlarge': 0.556,
'm5d.4xlarge': 1.112,
'm5d.large': 0.139,
'm5d.xlarge': 0.278,
'p2.16xlarge': 23.44,
'p2.8xlarge': 11.72,
'p2.xlarge': 1.465,
'p3.16xlarge': 33.872,
'p3.2xlarge': 4.234,
'p3.8xlarge': 16.936,
'r3.2xlarge': 0.798,
'r3.4xlarge': 1.596,
'r3.8xlarge': 3.192,
'r3.large': 0.2,
'r3.xlarge': 0.399,
'r4.16xlarge': 5.12,
'r4.2xlarge': 0.64,
'r4.4xlarge': 1.28,
'r4.8xlarge': 2.56,
'r4.large': 0.16,
'r4.xlarge': 0.32,
'r5.12xlarge': 3.648,
'r5.24xlarge': 7.296,
'r5.2xlarge': 0.608,
'r5.4xlarge': 1.216,
'r5.large': 0.152,
'r5.xlarge': 0.304,
'r5d.12xlarge': 4.152,
'r5d.24xlarge': 8.304,
'r5d.2xlarge': 0.692,
'r5d.4xlarge': 1.384,
'r5d.large': 0.173,
'r5d.xlarge': 0.346,
't2.2xlarge': 0.4608,
't2.large': 0.1152,
't2.medium': 0.0576,
't2.micro': 0.0144,
't2.nano': 0.0072,
't2.small': 0.0288,
't2.xlarge': 0.2304,
't3.2xlarge': 0.416,
't3.large': 0.104,
't3.medium': 0.052,
't3.micro': 0.013,
't3.nano': 0.0065,
't3.small': 0.026,
't3.xlarge': 0.208,
'x1.16xlarge': 9.671,
'x1.32xlarge': 19.341,
'x1e.16xlarge': 19.344,
'x1e.2xlarge': 2.418,
'x1e.32xlarge': 38.688,
'x1e.4xlarge': 4.836,
'x1e.8xlarge': 9.672,
'x1e.xlarge': 1.209},
'windows': {'c3.2xlarge': 0.828,
'c3.4xlarge': 1.655,
'c3.8xlarge': 3.311,
'c3.large': 0.207,
'c3.xlarge': 0.414,
'c4.2xlarge': 0.822,
'c4.4xlarge': 1.643,
'c4.8xlarge': 3.471,
'c4.large': 0.206,
'c4.xlarge': 0.411,
'c5.18xlarge': 6.768,
'c5.2xlarge': 0.752,
'c5.4xlarge': 1.504,
'c5.9xlarge': 3.384,
'c5.large': 0.188,
'c5.xlarge': 0.376,
'c5d.18xlarge': 7.272,
'c5d.2xlarge': 0.808,
'c5d.4xlarge': 1.616,
'c5d.9xlarge': 3.636,
'c5d.large': 0.202,
'c5d.xlarge': 0.404,
'd2.2xlarge': 2.056,
'd2.4xlarge': 4.112,
'd2.8xlarge': 8.408,
'd2.xlarge': 1.028,
'g2.2xlarge': 1.266,
'g2.8xlarge': 5.064,
'i2.2xlarge': 2.369,
'i2.4xlarge': 4.738,
'i2.8xlarge': 9.476,
'i2.xlarge': 1.185,
'i3.16xlarge': 8.8,
'i3.2xlarge': 1.1,
'i3.4xlarge': 2.2,
'i3.8xlarge': 4.4,
'i3.large': 0.275,
'i3.xlarge': 0.55,
'm3.2xlarge': 1.1,
'm3.large': 0.275,
'm3.medium': 0.137,
'm3.xlarge': 0.55,
'm4.10xlarge': 4.3,
'm4.16xlarge': 6.88,
'm4.2xlarge': 0.86,
'm4.4xlarge': 1.72,
'm4.large': 0.215,
'm4.xlarge': 0.43,
'm5.12xlarge': 5.04,
'm5.24xlarge': 10.08,
'm5.2xlarge': 0.84,
'm5.4xlarge': 1.68,
'm5.large': 0.21,
'm5.xlarge': 0.42,
'm5d.12xlarge': 5.544,
'm5d.24xlarge': 11.088,
'm5d.2xlarge': 0.924,
'm5d.4xlarge': 1.848,
'm5d.large': 0.231,
'm5d.xlarge': 0.462,
'p2.16xlarge': 26.384,
'p2.8xlarge': 13.192,
'p2.xlarge': 1.649,
'p3.16xlarge': 36.816,
'p3.2xlarge': 4.602,
'p3.8xlarge': 18.408,
'r3.2xlarge': 1.166,
'r3.4xlarge': 2.332,
'r3.8xlarge': 4.664,
'r3.large': 0.292,
'r3.xlarge': 0.583,
'r4.16xlarge': 8.064,
'r4.2xlarge': 1.008,
'r4.4xlarge': 2.016,
'r4.8xlarge': 4.032,
'r4.large': 0.252,
'r4.xlarge': 0.504,
'r5.12xlarge': 5.856,
'r5.24xlarge': 11.712,
'r5.2xlarge': 0.976,
'r5.4xlarge': 1.952,
'r5.large': 0.244,
'r5.xlarge': 0.488,
'r5d.12xlarge': 6.36,
'r5d.24xlarge': 12.72,
'r5d.2xlarge': 1.06,
'r5d.4xlarge': 2.12,
'r5d.large': 0.265,
'r5d.xlarge': 0.53,
't2.2xlarge': 0.5228,
't2.large': 0.1432,
't2.medium': 0.0756,
't2.micro': 0.019,
't2.nano': 0.0095,
't2.small': 0.038,
't2.xlarge': 0.2714,
't3.2xlarge': 0.5632,
't3.large': 0.1316,
't3.medium': 0.0704,
't3.micro': 0.0222,
't3.nano': 0.0111,
't3.small': 0.0444,
't3.xlarge': 0.2816,
'x1.16xlarge': 12.615,
'x1.32xlarge': 25.229,
'x1e.16xlarge': 22.288,
'x1e.2xlarge': 2.786,
'x1e.32xlarge': 44.576,
'x1e.4xlarge': 5.572,
'x1e.8xlarge': 11.144,
'x1e.xlarge': 1.393}},
'ap-northeast-3': {'linux': {'c4.2xlarge': 0.504,
'c4.4xlarge': 1.008,
'c4.8xlarge': 2.016,
'c4.large': 0.126,
'c4.xlarge': 0.252,
'd2.2xlarge': 1.688,
'd2.4xlarge': 3.376,
'd2.8xlarge': 6.752,
'd2.xlarge': 0.844,
'i3.16xlarge': 5.856,
'i3.2xlarge': 0.732,
'i3.4xlarge': 1.464,
'i3.8xlarge': 2.928,
'i3.large': 0.183,
'i3.xlarge': 0.366,
'm4.10xlarge': 2.58,
'm4.16xlarge': 4.128,
'm4.2xlarge': 0.516,
'm4.4xlarge': 1.032,
'm4.large': 0.129,
'm4.xlarge': 0.258,
'r4.16xlarge': 5.12,
'r4.2xlarge': 0.64,
'r4.4xlarge': 1.28,
'r4.8xlarge': 2.56,
'r4.large': 0.16,
'r4.xlarge': 0.32,
't2.2xlarge': 0.4864,
't2.large': 0.1216,
't2.medium': 0.0608,
't2.micro': 0.0152,
't2.nano': 0.0076,
't2.small': 0.0304,
't2.xlarge': 0.2432},
'windows': {'c4.2xlarge': 0.872,
'c4.4xlarge': 1.744,
'c4.8xlarge': 3.672,
'c4.large': 0.218,
'c4.xlarge': 0.436,
'd2.2xlarge': 2.056,
'd2.4xlarge': 4.112,
'd2.8xlarge': 8.408,
'd2.xlarge': 1.028,
'i3.16xlarge': 8.8,
'i3.2xlarge': 1.1,
'i3.4xlarge': 2.2,
'i3.8xlarge': 4.4,
'i3.large': 0.275,
'i3.xlarge': 0.55,
'm4.10xlarge': 4.42,
'm4.16xlarge': 7.072,
'm4.2xlarge': 0.884,
'm4.4xlarge': 1.768,
'm4.large': 0.221,
'm4.xlarge': 0.442,
'r4.16xlarge': 8.064,
'r4.2xlarge': 1.008,
'r4.4xlarge': 2.016,
'r4.8xlarge': 4.032,
'r4.large': 0.252,
'r4.xlarge': 0.504,
't2.2xlarge': 0.5484,
't2.large': 0.1496,
't2.medium': 0.0788,
't2.micro': 0.0198,
't2.nano': 0.0099,
't2.small': 0.0396,
't2.xlarge': 0.2842}},
'ap-south-1': {'linux': {'c4.2xlarge': 0.4,
'c4.4xlarge': 0.8,
'c4.8xlarge': 1.6,
'c4.large': 0.1,
'c4.xlarge': 0.2,
'c5.18xlarge': 3.06,
'c5.2xlarge': 0.34,
'c5.4xlarge': 0.68,
'c5.9xlarge': 1.53,
'c5.large': 0.085,
'c5.xlarge': 0.17,
'c5d.18xlarge': 3.564,
'c5d.2xlarge': 0.396,
'c5d.4xlarge': 0.792,
'c5d.9xlarge': 1.782,
'c5d.large': 0.099,
'c5d.xlarge': 0.198,
'd2.2xlarge': 1.653,
'd2.4xlarge': 3.306,
'd2.8xlarge': 6.612,
'd2.xlarge': 0.827,
'i2.2xlarge': 1.933,
'i2.4xlarge': 3.867,
'i2.8xlarge': 7.733,
'i2.xlarge': 0.967,
'i3.16xlarge': 5.664,
'i3.2xlarge': 0.708,
'i3.4xlarge': 1.416,
'i3.8xlarge': 2.832,
'i3.large': 0.177,
'i3.xlarge': 0.354,
'm4.10xlarge': 2.1,
'm4.16xlarge': 3.36,
'm4.2xlarge': 0.42,
'm4.4xlarge': 0.84,
'm4.large': 0.105,
'm4.xlarge': 0.21,
'm5.12xlarge': 2.424,
'm5.24xlarge': 4.848,
'm5.2xlarge': 0.404,
'm5.4xlarge': 0.808,
'm5.large': 0.101,
'm5.xlarge': 0.202,
'm5d.12xlarge': 2.928,
'm5d.24xlarge': 5.856,
'm5d.2xlarge': 0.488,
'm5d.4xlarge': 0.976,
'm5d.large': 0.122,
'm5d.xlarge': 0.244,
'p2.16xlarge': 27.488,
'p2.8xlarge': 13.744,
'p2.xlarge': 1.718,
'r3.2xlarge': 0.758,
'r3.4xlarge': 1.516,
'r3.8xlarge': 3.032,
'r3.large': 0.19,
'r3.xlarge': 0.379,
'r4.16xlarge': 4.384,
'r4.2xlarge': 0.548,
'r4.4xlarge': 1.096,
'r4.8xlarge': 2.192,
'r4.large': 0.137,
'r4.xlarge': 0.274,
'r5.12xlarge': 3.12,
'r5.24xlarge': 6.24,
'r5.2xlarge': 0.52,
'r5.4xlarge': 1.04,
'r5.large': 0.13,
'r5.xlarge': 0.26,
'r5d.12xlarge': 3.624,
'r5d.24xlarge': 7.248,
'r5d.2xlarge': 0.604,
'r5d.4xlarge': 1.208,
'r5d.large': 0.151,
'r5d.xlarge': 0.302,
't2.2xlarge': 0.3968,
't2.large': 0.0992,
't2.medium': 0.0496,
't2.micro': 0.0124,
't2.nano': 0.0062,
't2.small': 0.0248,
't2.xlarge': 0.1984,
't3.2xlarge': 0.3584,
't3.large': 0.0896,
't3.medium': 0.0448,
't3.micro': 0.0112,
't3.nano': 0.0056,
't3.small': 0.0224,
't3.xlarge': 0.1792,
'x1.16xlarge': 6.881,
'x1.32xlarge': 13.762},
'windows': {'c4.2xlarge': 0.768,
'c4.4xlarge': 1.536,
'c4.8xlarge': 3.256,
'c4.large': 0.192,
'c4.xlarge': 0.384,
'c5.18xlarge': 6.372,
'c5.2xlarge': 0.708,
'c5.4xlarge': 1.416,
'c5.9xlarge': 3.186,
'c5.large': 0.177,
'c5.xlarge': 0.354,
'c5d.18xlarge': 6.876,
'c5d.2xlarge': 0.764,
'c5d.4xlarge': 1.528,
'c5d.9xlarge': 3.438,
'c5d.large': 0.191,
'c5d.xlarge': 0.382,
'd2.2xlarge': 2.021,
'd2.4xlarge': 4.042,
'd2.8xlarge': 8.268,
'd2.xlarge': 1.011,
'i2.2xlarge': 2.301,
'i2.4xlarge': 4.603,
'i2.8xlarge': 9.205,
'i2.xlarge': 1.151,
'i3.16xlarge': 8.608,
'i3.2xlarge': 1.076,
'i3.4xlarge': 2.152,
'i3.8xlarge': 4.304,
'i3.large': 0.269,
'i3.xlarge': 0.538,
'm4.10xlarge': 3.94,
'm4.16xlarge': 6.304,
'm4.2xlarge': 0.788,
'm4.4xlarge': 1.576,
'm4.large': 0.197,
'm4.xlarge': 0.394,
'm5.12xlarge': 4.632,
'm5.24xlarge': 9.264,
'm5.2xlarge': 0.772,
'm5.4xlarge': 1.544,
'm5.large': 0.193,
'm5.xlarge': 0.386,
'm5d.12xlarge': 5.136,
'm5d.24xlarge': 10.272,
'm5d.2xlarge': 0.856,
'm5d.4xlarge': 1.712,
'm5d.large': 0.214,
'm5d.xlarge': 0.428,
'p2.16xlarge': 30.432,
'p2.8xlarge': 15.216,
'p2.xlarge': 1.902,
'r3.2xlarge': 1.126,
'r3.4xlarge': 2.252,
'r3.8xlarge': 4.504,
'r3.large': 0.282,
'r3.xlarge': 0.563,
'r4.16xlarge': 7.328,
'r4.2xlarge': 0.916,
'r4.4xlarge': 1.832,
'r4.8xlarge': 3.664,
'r4.large': 0.229,
'r4.xlarge': 0.458,
'r5.12xlarge': 5.328,
'r5.24xlarge': 10.656,
'r5.2xlarge': 0.888,
'r5.4xlarge': 1.776,
'r5.large': 0.222,
'r5.xlarge': 0.444,
'r5d.12xlarge': 5.832,
'r5d.24xlarge': 11.664,
'r5d.2xlarge': 0.972,
'r5d.4xlarge': 1.944,
'r5d.large': 0.243,
'r5d.xlarge': 0.486,
't2.2xlarge': 0.4588,
't2.large': 0.1272,
't2.medium': 0.0676,
't2.micro': 0.017,
't2.nano': 0.0085,
't2.small': 0.034,
't2.xlarge': 0.2394,
't3.2xlarge': 0.5056,
't3.large': 0.1172,
't3.medium': 0.0632,
't3.micro': 0.0204,
't3.nano': 0.0102,
't3.small': 0.0408,
't3.xlarge': 0.2528,
'x1.16xlarge': 9.825,
'x1.32xlarge': 19.65}},
'ap-southeast-1': {'linux': {'c1.medium': 0.164,
'c1.xlarge': 0.655,
'c3.2xlarge': 0.529,
'c3.4xlarge': 1.058,
'c3.8xlarge': 2.117,
'c3.large': 0.132,
'c3.xlarge': 0.265,
'c4.2xlarge': 0.462,
'c4.4xlarge': 0.924,
'c4.8xlarge': 1.848,
'c4.large': 0.115,
'c4.xlarge': 0.231,
'c5.18xlarge': 3.528,
'c5.2xlarge': 0.392,
'c5.4xlarge': 0.784,
'c5.9xlarge': 1.764,
'c5.large': 0.098,
'c5.xlarge': 0.196,
'c5d.18xlarge': 4.032,
'c5d.2xlarge': 0.448,
'c5d.4xlarge': 0.896,
'c5d.9xlarge': 2.016,
'c5d.large': 0.112,
'c5d.xlarge': 0.224,
'd2.2xlarge': 1.74,
'd2.4xlarge': 3.48,
'd2.8xlarge': 6.96,
'd2.xlarge': 0.87,
'g2.2xlarge': 1.0,
'g2.8xlarge': 4.0,
'g3.16xlarge': 6.68,
'g3.4xlarge': 1.67,
'g3.8xlarge': 3.34,
'hs1.8xlarge': 5.57,
'i2.2xlarge': 2.035,
'i2.4xlarge': 4.07,
'i2.8xlarge': 8.14,
'i2.xlarge': 1.018,
'i3.16xlarge': 5.984,
'i3.2xlarge': 0.748,
'i3.4xlarge': 1.496,
'i3.8xlarge': 2.992,
'i3.large': 0.187,
'i3.xlarge': 0.374,
'm1.large': 0.233,
'm1.medium': 0.117,
'm1.small': 0.058,
'm1.xlarge': 0.467,
'm2.2xlarge': 0.592,
'm2.4xlarge': 1.183,
'm2.xlarge': 0.296,
'm3.2xlarge': 0.784,
'm3.large': 0.196,
'm3.medium': 0.098,
'm3.xlarge': 0.392,
'm4.10xlarge': 2.5,
'm4.16xlarge': 4.0,
'm4.2xlarge': 0.5,
'm4.4xlarge': 1.0,
'm4.large': 0.125,
'm4.xlarge': 0.25,
'm5.12xlarge': 2.88,
'm5.24xlarge': 5.76,
'm5.2xlarge': 0.48,
'm5.4xlarge': 0.96,
'm5.large': 0.12,
'm5.xlarge': 0.24,
'm5a.12xlarge': 2.592,
'm5a.24xlarge': 5.184,
'm5a.2xlarge': 0.432,
'm5a.4xlarge': 0.864,
'm5a.large': 0.108,
'm5a.xlarge': 0.216,
'm5d.12xlarge': 3.384,
'm5d.24xlarge': 6.768,
'm5d.2xlarge': 0.564,
'm5d.4xlarge': 1.128,
'm5d.large': 0.141,
'm5d.xlarge': 0.282,
'p2.16xlarge': 27.488,
'p2.8xlarge': 13.744,
'p2.xlarge': 1.718,
'p3.16xlarge': 33.872,
'p3.2xlarge': 4.234,
'p3.8xlarge': 16.936,
'r3.2xlarge': 0.798,
'r3.4xlarge': 1.596,
'r3.8xlarge': 3.192,
'r3.large': 0.2,
'r3.xlarge': 0.399,
'r4.16xlarge': 5.12,
'r4.2xlarge': 0.64,
'r4.4xlarge': 1.28,
'r4.8xlarge': 2.56,
'r4.large': 0.16,
'r4.xlarge': 0.32,
'r5.12xlarge': 3.648,
'r5.24xlarge': 7.296,
'r5.2xlarge': 0.608,
'r5.4xlarge': 1.216,
'r5.large': 0.152,
'r5.xlarge': 0.304,
'r5a.12xlarge': 3.264,
'r5a.24xlarge': 6.528,
'r5a.2xlarge': 0.544,
'r5a.4xlarge': 1.088,
'r5a.large': 0.136,
'r5a.xlarge': 0.272,
'r5d.12xlarge': 4.176,
'r5d.24xlarge': 8.352,
'r5d.2xlarge': 0.696,
'r5d.4xlarge': 1.392,
'r5d.large': 0.174,
'r5d.xlarge': 0.348,
't1.micro': 0.02,
't2.2xlarge': 0.4672,
't2.large': 0.1168,
't2.medium': 0.0584,
't2.micro': 0.0146,
't2.nano': 0.0073,
't2.small': 0.0292,
't2.xlarge': 0.2336,
't3.2xlarge': 0.4224,
't3.large': 0.1056,
't3.medium': 0.0528,
't3.micro': 0.0132,
't3.nano': 0.0066,
't3.small': 0.0264,
't3.xlarge': 0.2112,
'x1.16xlarge': 9.671,
'x1.32xlarge': 19.341,
'z1d.12xlarge': 5.424,
'z1d.2xlarge': 0.904,
'z1d.3xlarge': 1.356,
'z1d.6xlarge': 2.712,
'z1d.large': 0.226,
'z1d.xlarge': 0.452},
'windows': {'c1.medium': 0.266,
'c1.xlarge': 1.065,
'c3.2xlarge': 0.953,
'c3.4xlarge': 1.906,
'c3.8xlarge': 3.813,
'c3.large': 0.238,
'c3.xlarge': 0.477,
'c4.2xlarge': 0.83,
'c4.4xlarge': 1.66,
'c4.8xlarge': 3.504,
'c4.large': 0.207,
'c4.xlarge': 0.415,
'c5.18xlarge': 6.84,
'c5.2xlarge': 0.76,
'c5.4xlarge': 1.52,
'c5.9xlarge': 3.42,
'c5.large': 0.19,
'c5.xlarge': 0.38,
'c5d.18xlarge': 7.344,
'c5d.2xlarge': 0.816,
'c5d.4xlarge': 1.632,
'c5d.9xlarge': 3.672,
'c5d.large': 0.204,
'c5d.xlarge': 0.408,
'd2.2xlarge': 1.961,
'd2.4xlarge': 3.782,
'd2.8xlarge': 7.638,
'd2.xlarge': 1.001,
'g2.2xlarge': 1.16,
'g2.8xlarge': 4.278,
'g3.16xlarge': 9.624,
'g3.4xlarge': 2.406,
'g3.8xlarge': 4.812,
'hs1.8xlarge': 5.901,
'i2.2xlarge': 2.337,
'i2.4xlarge': 4.674,
'i2.8xlarge': 9.348,
'i2.xlarge': 1.169,
'i3.16xlarge': 8.928,
'i3.2xlarge': 1.116,
'i3.4xlarge': 2.232,
'i3.8xlarge': 4.464,
'i3.large': 0.279,
'i3.xlarge': 0.558,
'm1.large': 0.373,
'm1.medium': 0.187,
'm1.small': 0.093,
'm1.xlarge': 0.747,
'm2.2xlarge': 0.742,
'm2.4xlarge': 1.483,
'm2.xlarge': 0.371,
'm3.2xlarge': 1.288,
'm3.large': 0.322,
'm3.medium': 0.161,
'm3.xlarge': 0.644,
'm4.10xlarge': 4.34,
'm4.16xlarge': 6.944,
'm4.2xlarge': 0.868,
'm4.4xlarge': 1.736,
'm4.large': 0.217,
'm4.xlarge': 0.434,
'm5.12xlarge': 5.088,
'm5.24xlarge': 10.176,
'm5.2xlarge': 0.848,
'm5.4xlarge': 1.696,
'm5.large': 0.212,
'm5.xlarge': 0.424,
'm5a.12xlarge': 4.8,
'm5a.24xlarge': 9.6,
'm5a.2xlarge': 0.8,
'm5a.4xlarge': 1.6,
'm5a.large': 0.2,
'm5a.xlarge': 0.4,
'm5d.12xlarge': 5.592,
'm5d.24xlarge': 11.184,
'm5d.2xlarge': 0.932,
'm5d.4xlarge': 1.864,
'm5d.large': 0.233,
'm5d.xlarge': 0.466,
'p2.16xlarge': 30.432,
'p2.8xlarge': 15.216,
'p2.xlarge': 1.902,
'p3.16xlarge': 36.816,
'p3.2xlarge': 4.602,
'p3.8xlarge': 18.408,
'r3.2xlarge': 1.25,
'r3.4xlarge': 2.363,
'r3.8xlarge': 4.6,
'r3.large': 0.313,
'r3.xlarge': 0.625,
'r4.16xlarge': 8.064,
'r4.2xlarge': 1.008,
'r4.4xlarge': 2.016,
'r4.8xlarge': 4.032,
'r4.large': 0.252,
'r4.xlarge': 0.504,
'r5.12xlarge': 5.856,
'r5.24xlarge': 11.712,
'r5.2xlarge': 0.976,
'r5.4xlarge': 1.952,
'r5.large': 0.244,
'r5.xlarge': 0.488,
'r5a.12xlarge': 5.472,
'r5a.24xlarge': 10.944,
'r5a.2xlarge': 0.912,
'r5a.4xlarge': 1.824,
'r5a.large': 0.228,
'r5a.xlarge': 0.456,
'r5d.12xlarge': 6.384,
'r5d.24xlarge': 12.768,
'r5d.2xlarge': 1.064,
'r5d.4xlarge': 2.128,
'r5d.large': 0.266,
'r5d.xlarge': 0.532,
't1.micro': 0.02,
't2.2xlarge': 0.5292,
't2.large': 0.1448,
't2.medium': 0.0764,
't2.micro': 0.0192,
't2.nano': 0.0096,
't2.small': 0.0384,
't2.xlarge': 0.2746,
't3.2xlarge': 0.5696,
't3.large': 0.1332,
't3.medium': 0.0712,
't3.micro': 0.0224,
't3.nano': 0.0112,
't3.small': 0.0448,
't3.xlarge': 0.2848,
'x1.16xlarge': 12.615,
'x1.32xlarge': 25.229,
'z1d.12xlarge': 7.632,
'z1d.2xlarge': 1.272,
'z1d.3xlarge': 1.908,
'z1d.6xlarge': 3.816,
'z1d.large': 0.318,
'z1d.xlarge': 0.636}},
'ap-southeast-2': {'linux': {'c1.medium': 0.164,
'c1.xlarge': 0.655,
'c3.2xlarge': 0.529,
'c3.4xlarge': 1.058,
'c3.8xlarge': 2.117,
'c3.large': 0.132,
'c3.xlarge': 0.265,
'c4.2xlarge': 0.522,
'c4.4xlarge': 1.042,
'c4.8xlarge': 2.085,
'c4.large': 0.13,
'c4.xlarge': 0.261,
'c5.18xlarge': 3.996,
'c5.2xlarge': 0.444,
'c5.4xlarge': 0.888,
'c5.9xlarge': 1.998,
'c5.large': 0.111,
'c5.xlarge': 0.222,
'c5d.18xlarge': 4.536,
'c5d.2xlarge': 0.504,
'c5d.4xlarge': 1.008,
'c5d.9xlarge': 2.268,
'c5d.large': 0.126,
'c5d.xlarge': 0.252,
'd2.2xlarge': 1.74,
'd2.4xlarge': 3.48,
'd2.8xlarge': 6.96,
'd2.xlarge': 0.87,
'g2.2xlarge': 0.898,
'g2.8xlarge': 3.592,
'g3.16xlarge': 7.016,
'g3.4xlarge': 1.754,
'g3.8xlarge': 3.508,
'g3s.xlarge': 1.154,
'hs1.8xlarge': 5.57,
'i2.2xlarge': 2.035,
'i2.4xlarge': 4.07,
'i2.8xlarge': 8.14,
'i2.xlarge': 1.018,
'i3.16xlarge': 5.984,
'i3.2xlarge': 0.748,
'i3.4xlarge': 1.496,
'i3.8xlarge': 2.992,
'i3.large': 0.187,
'i3.xlarge': 0.374,
'm1.large': 0.233,
'm1.medium': 0.117,
'm1.small': 0.058,
'm1.xlarge': 0.467,
'm2.2xlarge': 0.592,
'm2.4xlarge': 1.183,
'm2.xlarge': 0.296,
'm3.2xlarge': 0.745,
'm3.large': 0.186,
'm3.medium': 0.093,
'm3.xlarge': 0.372,
'm4.10xlarge': 2.5,
'm4.16xlarge': 4.0,
'm4.2xlarge': 0.5,
'm4.4xlarge': 1.0,
'm4.large': 0.125,
'm4.xlarge': 0.25,
'm5.12xlarge': 2.88,
'm5.24xlarge': 5.76,
'm5.2xlarge': 0.48,
'm5.4xlarge': 0.96,
'm5.large': 0.12,
'm5.xlarge': 0.24,
'm5d.12xlarge': 3.408,
'm5d.24xlarge': 6.816,
'm5d.2xlarge': 0.568,
'm5d.4xlarge': 1.136,
'm5d.large': 0.142,
'm5d.xlarge': 0.284,
'p2.16xlarge': 24.672,
'p2.8xlarge': 12.336,
'p2.xlarge': 1.542,
'p3.16xlarge': 33.872,
'p3.2xlarge': 4.234,
'p3.8xlarge': 16.936,
'r3.2xlarge': 0.798,
'r3.4xlarge': 1.596,
'r3.8xlarge': 3.192,
'r3.large': 0.2,
'r3.xlarge': 0.399,
'r4.16xlarge': 5.1072,
'r4.2xlarge': 0.6384,
'r4.4xlarge': 1.2768,
'r4.8xlarge': 2.5536,
'r4.large': 0.1596,
'r4.xlarge': 0.3192,
'r5.12xlarge': 3.624,
'r5.24xlarge': 7.248,
'r5.2xlarge': 0.604,
'r5.4xlarge': 1.208,
'r5.large': 0.151,
'r5.xlarge': 0.302,
'r5d.12xlarge': 4.176,
'r5d.24xlarge': 8.352,
'r5d.2xlarge': 0.696,
'r5d.4xlarge': 1.392,
'r5d.large': 0.174,
'r5d.xlarge': 0.348,
't1.micro': 0.02,
't2.2xlarge': 0.4672,
't2.large': 0.1168,
't2.medium': 0.0584,
't2.micro': 0.0146,
't2.nano': 0.0073,
't2.small': 0.0292,
't2.xlarge': 0.2336,
't3.2xlarge': 0.4224,
't3.large': 0.1056,
't3.medium': 0.0528,
't3.micro': 0.0132,
't3.nano': 0.0066,
't3.small': 0.0264,
't3.xlarge': 0.2112,
'x1.16xlarge': 9.671,
'x1.32xlarge': 19.341,
'x1e.16xlarge': 19.344,
'x1e.2xlarge': 2.418,
'x1e.32xlarge': 38.688,
'x1e.4xlarge': 4.836,
'x1e.8xlarge': 9.672,
'x1e.xlarge': 1.209},
'windows': {'c1.medium': 0.266,
'c1.xlarge': 1.065,
'c3.2xlarge': 0.953,
'c3.4xlarge': 1.906,
'c3.8xlarge': 3.813,
'c3.large': 0.238,
'c3.xlarge': 0.477,
'c4.2xlarge': 0.89,
'c4.4xlarge': 1.778,
'c4.8xlarge': 3.741,
'c4.large': 0.222,
'c4.xlarge': 0.445,
'c5.18xlarge': 7.308,
'c5.2xlarge': 0.812,
'c5.4xlarge': 1.624,
'c5.9xlarge': 3.654,
'c5.large': 0.203,
'c5.xlarge': 0.406,
'c5d.18xlarge': 7.848,
'c5d.2xlarge': 0.872,
'c5d.4xlarge': 1.744,
'c5d.9xlarge': 3.924,
'c5d.large': 0.218,
'c5d.xlarge': 0.436,
'd2.2xlarge': 1.961,
'd2.4xlarge': 3.782,
'd2.8xlarge': 7.638,
'd2.xlarge': 1.001,
'g2.2xlarge': 1.058,
'g2.8xlarge': 3.87,
'g3.16xlarge': 9.96,
'g3.4xlarge': 2.49,
'g3.8xlarge': 4.98,
'g3s.xlarge': 1.338,
'hs1.8xlarge': 5.901,
'i2.2xlarge': 2.337,
'i2.4xlarge': 4.674,
'i2.8xlarge': 9.348,
'i2.xlarge': 1.169,
'i3.16xlarge': 8.928,
'i3.2xlarge': 1.116,
'i3.4xlarge': 2.232,
'i3.8xlarge': 4.464,
'i3.large': 0.279,
'i3.xlarge': 0.558,
'm1.large': 0.373,
'm1.medium': 0.187,
'm1.small': 0.093,
'm1.xlarge': 0.747,
'm2.2xlarge': 0.742,
'm2.4xlarge': 1.483,
'm2.xlarge': 0.371,
'm3.2xlarge': 1.249,
'm3.large': 0.312,
'm3.medium': 0.156,
'm3.xlarge': 0.624,
'm4.10xlarge': 4.34,
'm4.16xlarge': 6.944,
'm4.2xlarge': 0.868,
'm4.4xlarge': 1.736,
'm4.large': 0.217,
'm4.xlarge': 0.434,
'm5.12xlarge': 5.088,
'm5.24xlarge': 10.176,
'm5.2xlarge': 0.848,
'm5.4xlarge': 1.696,
'm5.large': 0.212,
'm5.xlarge': 0.424,
'm5d.12xlarge': 5.616,
'm5d.24xlarge': 11.232,
'm5d.2xlarge': 0.936,
'm5d.4xlarge': 1.872,
'm5d.large': 0.234,
'm5d.xlarge': 0.468,
'p2.16xlarge': 27.616,
'p2.8xlarge': 13.808,
'p2.xlarge': 1.726,
'p3.16xlarge': 36.816,
'p3.2xlarge': 4.602,
'p3.8xlarge': 18.408,
'r3.2xlarge': 1.25,
'r3.4xlarge': 2.363,
'r3.8xlarge': 4.6,
'r3.large': 0.313,
'r3.xlarge': 0.625,
'r4.16xlarge': 8.0512,
'r4.2xlarge': 1.0064,
'r4.4xlarge': 2.0128,
'r4.8xlarge': 4.0256,
'r4.large': 0.2516,
'r4.xlarge': 0.5032,
'r5.12xlarge': 5.832,
'r5.24xlarge': 11.664,
'r5.2xlarge': 0.972,
'r5.4xlarge': 1.944,
'r5.large': 0.243,
'r5.xlarge': 0.486,
'r5d.12xlarge': 6.384,
'r5d.24xlarge': 12.768,
'r5d.2xlarge': 1.064,
'r5d.4xlarge': 2.128,
'r5d.large': 0.266,
'r5d.xlarge': 0.532,
't1.micro': 0.02,
't2.2xlarge': 0.5292,
't2.large': 0.1448,
't2.medium': 0.0764,
't2.micro': 0.0192,
't2.nano': 0.0096,
't2.small': 0.0384,
't2.xlarge': 0.2746,
't3.2xlarge': 0.5696,
't3.large': 0.1332,
't3.medium': 0.0712,
't3.micro': 0.0224,
't3.nano': 0.0112,
't3.small': 0.0448,
't3.xlarge': 0.2848,
'x1.16xlarge': 12.615,
'x1.32xlarge': 25.229,
'x1e.16xlarge': 22.288,
'x1e.2xlarge': 2.786,
'x1e.32xlarge': 44.576,
'x1e.4xlarge': 5.572,
'x1e.8xlarge': 11.144,
'x1e.xlarge': 1.393}},
'ca-central-1': {'linux': {'c4.2xlarge': 0.438,
'c4.4xlarge': 0.876,
'c4.8xlarge': 1.75,
'c4.large': 0.11,
'c4.xlarge': 0.218,
'c5.18xlarge': 3.348,
'c5.2xlarge': 0.372,
'c5.4xlarge': 0.744,
'c5.9xlarge': 1.674,
'c5.large': 0.093,
'c5.xlarge': 0.186,
'c5d.18xlarge': 3.816,
'c5d.2xlarge': 0.424,
'c5d.4xlarge': 0.848,
'c5d.9xlarge': 1.908,
'c5d.large': 0.106,
'c5d.xlarge': 0.212,
'd2.2xlarge': 1.518,
'd2.4xlarge': 3.036,
'd2.8xlarge': 6.072,
'd2.xlarge': 0.759,
'g3.16xlarge': 5.664,
'g3.4xlarge': 1.416,
'g3.8xlarge': 2.832,
'i3.16xlarge': 5.504,
'i3.2xlarge': 0.688,
'i3.4xlarge': 1.376,
'i3.8xlarge': 2.752,
'i3.large': 0.172,
'i3.xlarge': 0.344,
'm4.10xlarge': 2.22,
'm4.16xlarge': 3.552,
'm4.2xlarge': 0.444,
'm4.4xlarge': 0.888,
'm4.large': 0.111,
'm4.xlarge': 0.222,
'm5.12xlarge': 2.568,
'm5.24xlarge': 5.136,
'm5.2xlarge': 0.428,
'm5.4xlarge': 0.856,
'm5.large': 0.107,
'm5.xlarge': 0.214,
'm5d.12xlarge': 3.024,
'm5d.24xlarge': 6.048,
'm5d.2xlarge': 0.504,
'm5d.4xlarge': 1.008,
'm5d.large': 0.126,
'm5d.xlarge': 0.252,
'p3.16xlarge': 26.928,
'p3.2xlarge': 3.366,
'p3.8xlarge': 13.464,
'r4.16xlarge': 4.672,
'r4.2xlarge': 0.584,
'r4.4xlarge': 1.168,
'r4.8xlarge': 2.336,
'r4.large': 0.146,
'r4.xlarge': 0.292,
'r5.12xlarge': 3.312,
'r5.24xlarge': 6.624,
'r5.2xlarge': 0.552,
'r5.4xlarge': 1.104,
'r5.large': 0.138,
'r5.xlarge': 0.276,
'r5d.12xlarge': 3.792,
'r5d.24xlarge': 7.584,
'r5d.2xlarge': 0.632,
'r5d.4xlarge': 1.264,
'r5d.large': 0.158,
'r5d.xlarge': 0.316,
't2.2xlarge': 0.4096,
't2.large': 0.1024,
't2.medium': 0.0512,
't2.micro': 0.0128,
't2.nano': 0.0064,
't2.small': 0.0256,
't2.xlarge': 0.2048,
't3.2xlarge': 0.3712,
't3.large': 0.0928,
't3.medium': 0.0464,
't3.micro': 0.0116,
't3.nano': 0.0058,
't3.small': 0.0232,
't3.xlarge': 0.1856,
'x1.16xlarge': 7.336,
'x1.32xlarge': 14.672},
'windows': {'c4.2xlarge': 0.806,
'c4.4xlarge': 1.612,
'c4.8xlarge': 3.406,
'c4.large': 0.202,
'c4.xlarge': 0.402,
'c5.18xlarge': 6.66,
'c5.2xlarge': 0.74,
'c5.4xlarge': 1.48,
'c5.9xlarge': 3.33,
'c5.large': 0.185,
'c5.xlarge': 0.37,
'c5d.18xlarge': 7.128,
'c5d.2xlarge': 0.792,
'c5d.4xlarge': 1.584,
'c5d.9xlarge': 3.564,
'c5d.large': 0.198,
'c5d.xlarge': 0.396,
'd2.2xlarge': 1.886,
'd2.4xlarge': 3.772,
'd2.8xlarge': 7.728,
'd2.xlarge': 0.943,
'g3.16xlarge': 8.608,
'g3.4xlarge': 2.152,
'g3.8xlarge': 4.304,
'i3.16xlarge': 8.448,
'i3.2xlarge': 1.056,
'i3.4xlarge': 2.112,
'i3.8xlarge': 4.224,
'i3.large': 0.264,
'i3.xlarge': 0.528,
'm4.10xlarge': 4.06,
'm4.16xlarge': 6.496,
'm4.2xlarge': 0.812,
'm4.4xlarge': 1.624,
'm4.large': 0.203,
'm4.xlarge': 0.406,
'm5.12xlarge': 4.776,
'm5.24xlarge': 9.552,
'm5.2xlarge': 0.796,
'm5.4xlarge': 1.592,
'm5.large': 0.199,
'm5.xlarge': 0.398,
'm5d.12xlarge': 5.232,
'm5d.24xlarge': 10.464,
'm5d.2xlarge': 0.872,
'm5d.4xlarge': 1.744,
'm5d.large': 0.218,
'm5d.xlarge': 0.436,
'p3.16xlarge': 29.872,
'p3.2xlarge': 3.734,
'p3.8xlarge': 14.936,
'r4.16xlarge': 7.616,
'r4.2xlarge': 0.952,
'r4.4xlarge': 1.904,
'r4.8xlarge': 3.808,
'r4.large': 0.238,
'r4.xlarge': 0.476,
'r5.12xlarge': 5.52,
'r5.24xlarge': 11.04,
'r5.2xlarge': 0.92,
'r5.4xlarge': 1.84,
'r5.large': 0.23,
'r5.xlarge': 0.46,
'r5d.12xlarge': 6.0,
'r5d.24xlarge': 12.0,
'r5d.2xlarge': 1.0,
'r5d.4xlarge': 2.0,
'r5d.large': 0.25,
'r5d.xlarge': 0.5,
't2.2xlarge': 0.4716,
't2.large': 0.1304,
't2.medium': 0.0692,
't2.micro': 0.0174,
't2.nano': 0.0087,
't2.small': 0.0348,
't2.xlarge': 0.2458,
't3.2xlarge': 0.5184,
't3.large': 0.1204,
't3.medium': 0.0648,
't3.micro': 0.0208,
't3.nano': 0.0104,
't3.small': 0.0416,
't3.xlarge': 0.2592,
'x1.16xlarge': 10.28,
'x1.32xlarge': 20.56}},
'eu-central-1': {'linux': {'c3.2xlarge': 0.516,
'c3.4xlarge': 1.032,
'c3.8xlarge': 2.064,
'c3.large': 0.129,
'c3.xlarge': 0.258,
'c4.2xlarge': 0.454,
'c4.4xlarge': 0.909,
'c4.8xlarge': 1.817,
'c4.large': 0.114,
'c4.xlarge': 0.227,
'c5.18xlarge': 3.492,
'c5.2xlarge': 0.388,
'c5.4xlarge': 0.776,
'c5.9xlarge': 1.746,
'c5.large': 0.097,
'c5.xlarge': 0.194,
'c5d.18xlarge': 3.996,
'c5d.2xlarge': 0.444,
'c5d.4xlarge': 0.888,
'c5d.9xlarge': 1.998,
'c5d.large': 0.111,
'c5d.xlarge': 0.222,
'd2.2xlarge': 1.588,
'd2.4xlarge': 3.176,
'd2.8xlarge': 6.352,
'd2.xlarge': 0.794,
'g2.2xlarge': 0.772,
'g2.8xlarge': 3.088,
'g3.16xlarge': 5.7,
'g3.4xlarge': 1.425,
'g3.8xlarge': 2.85,
'g3s.xlarge': 0.938,
'i2.2xlarge': 2.026,
'i2.4xlarge': 4.051,
'i2.8xlarge': 8.102,
'i2.xlarge': 1.013,
'i3.16xlarge': 5.952,
'i3.2xlarge': 0.744,
'i3.4xlarge': 1.488,
'i3.8xlarge': 2.976,
'i3.large': 0.186,
'i3.metal': 5.952,
'i3.xlarge': 0.372,
'm3.2xlarge': 0.632,
'm3.large': 0.158,
'm3.medium': 0.079,
'm3.xlarge': 0.315,
'm4.10xlarge': 2.4,
'm4.16xlarge': 3.84,
'm4.2xlarge': 0.48,
'm4.4xlarge': 0.96,
'm4.large': 0.12,
'm4.xlarge': 0.24,
'm5.12xlarge': 2.76,
'm5.24xlarge': 5.52,
'm5.2xlarge': 0.46,
'm5.4xlarge': 0.92,
'm5.large': 0.115,
'm5.xlarge': 0.23,
'm5d.12xlarge': 3.264,
'm5d.24xlarge': 6.528,
'm5d.2xlarge': 0.544,
'm5d.4xlarge': 1.088,
'm5d.large': 0.136,
'm5d.xlarge': 0.272,
'p2.16xlarge': 21.216,
'p2.8xlarge': 10.608,
'p2.xlarge': 1.326,
'p3.16xlarge': 30.584,
'p3.2xlarge': 3.823,
'p3.8xlarge': 15.292,
'r3.2xlarge': 0.8,
'r3.4xlarge': 1.6,
'r3.8xlarge': 3.201,
'r3.large': 0.2,
'r3.xlarge': 0.4,
'r4.16xlarge': 5.1216,
'r4.2xlarge': 0.6402,
'r4.4xlarge': 1.2804,
'r4.8xlarge': 2.5608,
'r4.large': 0.16005,
'r4.xlarge': 0.3201,
'r5.12xlarge': 3.648,
'r5.24xlarge': 7.296,
'r5.2xlarge': 0.608,
'r5.4xlarge': 1.216,
'r5.large': 0.152,
'r5.xlarge': 0.304,
'r5d.12xlarge': 4.152,
'r5d.24xlarge': 8.304,
'r5d.2xlarge': 0.692,
'r5d.4xlarge': 1.384,
'r5d.large': 0.173,
'r5d.xlarge': 0.346,
't2.2xlarge': 0.4288,
't2.large': 0.1072,
't2.medium': 0.0536,
't2.micro': 0.0134,
't2.nano': 0.0067,
't2.small': 0.0268,
't2.xlarge': 0.2144,
't3.2xlarge': 0.384,
't3.large': 0.096,
't3.medium': 0.048,
't3.micro': 0.012,
't3.nano': 0.006,
't3.small': 0.024,
't3.xlarge': 0.192,
'x1.16xlarge': 9.337,
'x1.32xlarge': 18.674,
'x1e.16xlarge': 18.672,
'x1e.2xlarge': 2.334,
'x1e.32xlarge': 37.344,
'x1e.4xlarge': 4.668,
'x1e.8xlarge': 9.336,
'x1e.xlarge': 1.167},
'windows': {'c3.2xlarge': 0.848,
'c3.4xlarge': 1.696,
'c3.8xlarge': 3.392,
'c3.large': 0.212,
'c3.xlarge': 0.424,
'c4.2xlarge': 0.822,
'c4.4xlarge': 1.645,
'c4.8xlarge': 3.473,
'c4.large': 0.206,
'c4.xlarge': 0.411,
'c5.18xlarge': 6.804,
'c5.2xlarge': 0.756,
'c5.4xlarge': 1.512,
'c5.9xlarge': 3.402,
'c5.large': 0.189,
'c5.xlarge': 0.378,
'c5d.18xlarge': 7.308,
'c5d.2xlarge': 0.812,
'c5d.4xlarge': 1.624,
'c5d.9xlarge': 3.654,
'c5d.large': 0.203,
'c5d.xlarge': 0.406,
'd2.2xlarge': 1.809,
'd2.4xlarge': 3.478,
'd2.8xlarge': 7.03,
'd2.xlarge': 0.925,
'g2.2xlarge': 0.906,
'g2.8xlarge': 3.366,
'g3.16xlarge': 8.644,
'g3.4xlarge': 2.161,
'g3.8xlarge': 4.322,
'g3s.xlarge': 1.122,
'i2.2xlarge': 2.267,
'i2.4xlarge': 4.532,
'i2.8xlarge': 9.064,
'i2.xlarge': 1.133,
'i3.16xlarge': 8.896,
'i3.2xlarge': 1.112,
'i3.4xlarge': 2.224,
'i3.8xlarge': 4.448,
'i3.large': 0.278,
'i3.metal': 8.896,
'i3.xlarge': 0.556,
'm3.2xlarge': 1.136,
'm3.large': 0.284,
'm3.medium': 0.142,
'm3.xlarge': 0.567,
'm4.10xlarge': 4.24,
'm4.16xlarge': 6.784,
'm4.2xlarge': 0.848,
'm4.4xlarge': 1.696,
'm4.large': 0.212,
'm4.xlarge': 0.424,
'm5.12xlarge': 4.968,
'm5.24xlarge': 9.936,
'm5.2xlarge': 0.828,
'm5.4xlarge': 1.656,
'm5.large': 0.207,
'm5.xlarge': 0.414,
'm5d.12xlarge': 5.472,
'm5d.24xlarge': 10.944,
'm5d.2xlarge': 0.912,
'm5d.4xlarge': 1.824,
'm5d.large': 0.228,
'm5d.xlarge': 0.456,
'p2.16xlarge': 24.16,
'p2.8xlarge': 12.08,
'p2.xlarge': 1.51,
'p3.16xlarge': 33.528,
'p3.2xlarge': 4.191,
'p3.8xlarge': 16.764,
'r3.2xlarge': 1.18,
'r3.4xlarge': 2.228,
'r3.8xlarge': 4.069,
'r3.large': 0.325,
'r3.xlarge': 0.65,
'r4.16xlarge': 8.0656,
'r4.2xlarge': 1.0082,
'r4.4xlarge': 2.0164,
'r4.8xlarge': 4.0328,
'r4.large': 0.25205,
'r4.xlarge': 0.5041,
'r5.12xlarge': 5.856,
'r5.24xlarge': 11.712,
'r5.2xlarge': 0.976,
'r5.4xlarge': 1.952,
'r5.large': 0.244,
'r5.xlarge': 0.488,
'r5d.12xlarge': 6.36,
'r5d.24xlarge': 12.72,
'r5d.2xlarge': 1.06,
'r5d.4xlarge': 2.12,
'r5d.large': 0.265,
'r5d.xlarge': 0.53,
't2.2xlarge': 0.4908,
't2.large': 0.1352,
't2.medium': 0.0716,
't2.micro': 0.018,
't2.nano': 0.009,
't2.small': 0.036,
't2.xlarge': 0.2554,
't3.2xlarge': 0.5312,
't3.large': 0.1236,
't3.medium': 0.0664,
't3.micro': 0.0212,
't3.nano': 0.0106,
't3.small': 0.0424,
't3.xlarge': 0.2656,
'x1.16xlarge': 12.281,
'x1.32xlarge': 24.562,
'x1e.16xlarge': 21.616,
'x1e.2xlarge': 2.702,
'x1e.32xlarge': 43.232,
'x1e.4xlarge': 5.404,
'x1e.8xlarge': 10.808,
'x1e.xlarge': 1.351}},
'eu-north-1': {'linux': {'c5.18xlarge': 3.276,
'c5.2xlarge': 0.364,
'c5.4xlarge': 0.728,
'c5.9xlarge': 1.638,
'c5.large': 0.091,
'c5.xlarge': 0.182,
'c5d.18xlarge': 3.744,
'c5d.2xlarge': 0.416,
'c5d.4xlarge': 0.832,
'c5d.9xlarge': 1.872,
'c5d.large': 0.104,
'c5d.xlarge': 0.208,
'd2.2xlarge': 1.396,
'd2.4xlarge': 2.792,
'd2.8xlarge': 5.584,
'd2.xlarge': 0.698,
'i3.16xlarge': 5.216,
'i3.2xlarge': 0.652,
'i3.4xlarge': 1.304,
'i3.8xlarge': 2.608,
'i3.large': 0.163,
'i3.xlarge': 0.326,
'm5.12xlarge': 2.448,
'm5.24xlarge': 4.896,
'm5.2xlarge': 0.408,
'm5.4xlarge': 0.816,
'm5.large': 0.102,
'm5.xlarge': 0.204,
'm5d.12xlarge': 2.88,
'm5d.24xlarge': 5.76,
'm5d.2xlarge': 0.48,
'm5d.4xlarge': 0.96,
'm5d.large': 0.12,
'm5d.xlarge': 0.24,
'r5.12xlarge': 3.216,
'r5.24xlarge': 6.432,
'r5.2xlarge': 0.536,
'r5.4xlarge': 1.072,
'r5.large': 0.134,
'r5.xlarge': 0.268,
'r5d.12xlarge': 3.648,
'r5d.24xlarge': 7.296,
'r5d.2xlarge': 0.608,
'r5d.4xlarge': 1.216,
'r5d.large': 0.152,
'r5d.xlarge': 0.304,
't3.2xlarge': 0.3456,
't3.large': 0.0864,
't3.medium': 0.0432,
't3.micro': 0.0108,
't3.nano': 0.0054,
't3.small': 0.0216,
't3.xlarge': 0.1728},
'windows': {'c5.18xlarge': 6.588,
'c5.2xlarge': 0.732,
'c5.4xlarge': 1.464,
'c5.9xlarge': 3.294,
'c5.large': 0.183,
'c5.xlarge': 0.366,
'c5d.18xlarge': 7.056,
'c5d.2xlarge': 0.784,
'c5d.4xlarge': 1.568,
'c5d.9xlarge': 3.528,
'c5d.large': 0.196,
'c5d.xlarge': 0.392,
'd2.2xlarge': 1.764,
'd2.4xlarge': 3.528,
'd2.8xlarge': 7.24,
'd2.xlarge': 0.882,
'i3.16xlarge': 8.16,
'i3.2xlarge': 1.02,
'i3.4xlarge': 2.04,
'i3.8xlarge': 4.08,
'i3.large': 0.255,
'i3.xlarge': 0.51,
'm5.12xlarge': 4.656,
'm5.24xlarge': 9.312,
'm5.2xlarge': 0.776,
'm5.4xlarge': 1.552,
'm5.large': 0.194,
'm5.xlarge': 0.388,
'm5d.12xlarge': 5.088,
'm5d.24xlarge': 10.176,
'm5d.2xlarge': 0.848,
'm5d.4xlarge': 1.696,
'm5d.large': 0.212,
'm5d.xlarge': 0.424,
'r5.12xlarge': 5.424,
'r5.24xlarge': 10.848,
'r5.2xlarge': 0.904,
'r5.4xlarge': 1.808,
'r5.large': 0.226,
'r5.xlarge': 0.452,
'r5d.12xlarge': 5.856,
'r5d.24xlarge': 11.712,
'r5d.2xlarge': 0.976,
'r5d.4xlarge': 1.952,
'r5d.large': 0.244,
'r5d.xlarge': 0.488,
't3.2xlarge': 0.4928,
't3.large': 0.114,
't3.medium': 0.0616,
't3.micro': 0.02,
't3.nano': 0.01,
't3.small': 0.04,
't3.xlarge': 0.2464}},
'eu-west-1': {'linux': {'a1.2xlarge': 0.2304,
'a1.4xlarge': 0.4608,
'a1.large': 0.0576,
'a1.medium': 0.0288,
'a1.xlarge': 0.1152,
'c1.medium': 0.148,
'c1.xlarge': 0.592,
'c3.2xlarge': 0.478,
'c3.4xlarge': 0.956,
'c3.8xlarge': 1.912,
'c3.large': 0.12,
'c3.xlarge': 0.239,
'c4.2xlarge': 0.453,
'c4.4xlarge': 0.905,
'c4.8xlarge': 1.811,
'c4.large': 0.113,
'c4.xlarge': 0.226,
'c5.18xlarge': 3.456,
'c5.2xlarge': 0.384,
'c5.4xlarge': 0.768,
'c5.9xlarge': 1.728,
'c5.large': 0.096,
'c5.xlarge': 0.192,
'c5d.18xlarge': 3.924,
'c5d.2xlarge': 0.436,
'c5d.4xlarge': 0.872,
'c5d.9xlarge': 1.962,
'c5d.large': 0.109,
'c5d.xlarge': 0.218,
'c5n.18xlarge': 4.392,
'c5n.2xlarge': 0.488,
'c5n.4xlarge': 0.976,
'c5n.9xlarge': 2.196,
'c5n.large': 0.122,
'c5n.xlarge': 0.244,
'cc2.8xlarge': 2.25,
'cr1.8xlarge': 3.75,
'd2.2xlarge': 1.47,
'd2.4xlarge': 2.94,
'd2.8xlarge': 5.88,
'd2.xlarge': 0.735,
'f1.16xlarge': 14.52,
'f1.2xlarge': 1.815,
'f1.4xlarge': 3.63,
'g2.2xlarge': 0.702,
'g2.8xlarge': 2.808,
'g3.16xlarge': 4.84,
'g3.4xlarge': 1.21,
'g3.8xlarge': 2.42,
'g3s.xlarge': 0.796,
'h1.16xlarge': 4.152,
'h1.2xlarge': 0.519,
'h1.4xlarge': 1.038,
'h1.8xlarge': 2.076,
'hs1.8xlarge': 4.9,
'i2.2xlarge': 1.876,
'i2.4xlarge': 3.751,
'i2.8xlarge': 7.502,
'i2.xlarge': 0.938,
'i3.16xlarge': 5.504,
'i3.2xlarge': 0.688,
'i3.4xlarge': 1.376,
'i3.8xlarge': 2.752,
'i3.large': 0.172,
'i3.metal': 5.504,
'i3.xlarge': 0.344,
'm1.large': 0.19,
'm1.medium': 0.095,
'm1.small': 0.047,
'm1.xlarge': 0.379,
'm2.2xlarge': 0.55,
'm2.4xlarge': 1.1,
'm2.xlarge': 0.275,
'm3.2xlarge': 0.585,
'm3.large': 0.146,
'm3.medium': 0.073,
'm3.xlarge': 0.293,
'm4.10xlarge': 2.22,
'm4.16xlarge': 3.552,
'm4.2xlarge': 0.444,
'm4.4xlarge': 0.888,
'm4.large': 0.111,
'm4.xlarge': 0.222,
'm5.12xlarge': 2.568,
'm5.24xlarge': 5.136,
'm5.2xlarge': 0.428,
'm5.4xlarge': 0.856,
'm5.large': 0.107,
'm5.xlarge': 0.214,
'm5a.12xlarge': 2.304,
'm5a.24xlarge': 4.608,
'm5a.2xlarge': 0.384,
'm5a.4xlarge': 0.768,
'm5a.large': 0.096,
'm5a.xlarge': 0.192,
'm5d.12xlarge': 3.024,
'm5d.24xlarge': 6.048,
'm5d.2xlarge': 0.504,
'm5d.4xlarge': 1.008,
'm5d.large': 0.126,
'm5d.xlarge': 0.252,
'p2.16xlarge': 15.552,
'p2.8xlarge': 7.776,
'p2.xlarge': 0.972,
'p3.16xlarge': 26.44,
'p3.2xlarge': 3.305,
'p3.8xlarge': 13.22,
'r3.2xlarge': 0.741,
'r3.4xlarge': 1.482,
'r3.8xlarge': 2.964,
'r3.large': 0.185,
'r3.xlarge': 0.371,
'r4.16xlarge': 4.7424,
'r4.2xlarge': 0.5928,
'r4.4xlarge': 1.1856,
'r4.8xlarge': 2.3712,
'r4.large': 0.1482,
'r4.xlarge': 0.2964,
'r5.12xlarge': 3.384,
'r5.24xlarge': 6.768,
'r5.2xlarge': 0.564,
'r5.4xlarge': 1.128,
'r5.large': 0.141,
'r5.xlarge': 0.282,
'r5a.12xlarge': 3.048,
'r5a.24xlarge': 6.096,
'r5a.2xlarge': 0.508,
'r5a.4xlarge': 1.016,
'r5a.large': 0.127,
'r5a.xlarge': 0.254,
'r5d.12xlarge': 3.84,
'r5d.24xlarge': 7.68,
'r5d.2xlarge': 0.64,
'r5d.4xlarge': 1.28,
'r5d.large': 0.16,
'r5d.xlarge': 0.32,
't1.micro': 0.02,
't2.2xlarge': 0.4032,
't2.large': 0.1008,
't2.medium': 0.05,
't2.micro': 0.0126,
't2.nano': 0.0063,
't2.small': 0.025,
't2.xlarge': 0.2016,
't3.2xlarge': 0.3648,
't3.large': 0.0912,
't3.medium': 0.0456,
't3.micro': 0.0114,
't3.nano': 0.0057,
't3.small': 0.0228,
't3.xlarge': 0.1824,
'x1.16xlarge': 8.003,
'x1.32xlarge': 16.006,
'x1e.16xlarge': 16.0,
'x1e.2xlarge': 2.0,
'x1e.32xlarge': 32.0,
'x1e.4xlarge': 4.0,
'x1e.8xlarge': 8.0,
'x1e.xlarge': 1.0,
'z1d.12xlarge': 4.992,
'z1d.2xlarge': 0.832,
'z1d.3xlarge': 1.248,
'z1d.6xlarge': 2.496,
'z1d.large': 0.208,
'z1d.xlarge': 0.416},
'windows': {'c1.medium': 0.21,
'c1.xlarge': 0.84,
'c3.2xlarge': 0.752,
'c3.4xlarge': 1.504,
'c3.8xlarge': 3.008,
'c3.large': 0.188,
'c3.xlarge': 0.376,
'c4.2xlarge': 0.821,
'c4.4xlarge': 1.641,
'c4.8xlarge': 3.334,
'c4.large': 0.205,
'c4.xlarge': 0.41,
'c5.18xlarge': 6.768,
'c5.2xlarge': 0.752,
'c5.4xlarge': 1.504,
'c5.9xlarge': 3.384,
'c5.large': 0.188,
'c5.xlarge': 0.376,
'c5d.18xlarge': 7.236,
'c5d.2xlarge': 0.804,
'c5d.4xlarge': 1.608,
'c5d.9xlarge': 3.618,
'c5d.large': 0.201,
'c5d.xlarge': 0.402,
'c5n.18xlarge': 7.704,
'c5n.2xlarge': 0.856,
'c5n.4xlarge': 1.712,
'c5n.9xlarge': 3.852,
'c5n.large': 0.214,
'c5n.xlarge': 0.428,
'cc2.8xlarge': 2.57,
'cr1.8xlarge': 3.831,
'd2.2xlarge': 1.691,
'd2.4xlarge': 3.242,
'd2.8xlarge': 6.558,
'd2.xlarge': 0.866,
'g2.2xlarge': 0.767,
'g2.8xlarge': 3.086,
'g3.16xlarge': 7.784,
'g3.4xlarge': 1.946,
'g3.8xlarge': 3.892,
'g3s.xlarge': 0.98,
'h1.16xlarge': 7.096,
'h1.2xlarge': 0.887,
'h1.4xlarge': 1.774,
'h1.8xlarge': 3.548,
'hs1.8xlarge': 4.931,
'i2.2xlarge': 1.946,
'i2.4xlarge': 3.891,
'i2.8xlarge': 7.782,
'i2.xlarge': 0.973,
'i3.16xlarge': 8.448,
'i3.2xlarge': 1.056,
'i3.4xlarge': 2.112,
'i3.8xlarge': 4.224,
'i3.large': 0.264,
'i3.metal': 8.448,
'i3.xlarge': 0.528,
'm1.large': 0.299,
'm1.medium': 0.149,
'm1.small': 0.075,
'm1.xlarge': 0.598,
'm2.2xlarge': 0.69,
'm2.4xlarge': 1.38,
'm2.xlarge': 0.345,
'm3.2xlarge': 1.033,
'm3.large': 0.258,
'm3.medium': 0.129,
'm3.xlarge': 0.517,
'm4.10xlarge': 4.06,
'm4.16xlarge': 6.496,
'm4.2xlarge': 0.812,
'm4.4xlarge': 1.624,
'm4.large': 0.203,
'm4.xlarge': 0.406,
'm5.12xlarge': 4.776,
'm5.24xlarge': 9.552,
'm5.2xlarge': 0.796,
'm5.4xlarge': 1.592,
'm5.large': 0.199,
'm5.xlarge': 0.398,
'm5a.12xlarge': 4.512,
'm5a.24xlarge': 9.024,
'm5a.2xlarge': 0.752,
'm5a.4xlarge': 1.504,
'm5a.large': 0.188,
'm5a.xlarge': 0.376,
'm5d.12xlarge': 5.232,
'm5d.24xlarge': 10.464,
'm5d.2xlarge': 0.872,
'm5d.4xlarge': 1.744,
'm5d.large': 0.218,
'm5d.xlarge': 0.436,
'p2.16xlarge': 18.496,
'p2.8xlarge': 9.248,
'p2.xlarge': 1.156,
'p3.16xlarge': 29.384,
'p3.2xlarge': 3.673,
'p3.8xlarge': 14.692,
'r3.2xlarge': 1.08,
'r3.4xlarge': 1.944,
'r3.8xlarge': 3.5,
'r3.large': 0.29,
'r3.xlarge': 0.581,
'r4.16xlarge': 7.6864,
'r4.2xlarge': 0.9608,
'r4.4xlarge': 1.9216,
'r4.8xlarge': 3.8432,
'r4.large': 0.2402,
'r4.xlarge': 0.4804,
'r5.12xlarge': 5.592,
'r5.24xlarge': 11.184,
'r5.2xlarge': 0.932,
'r5.4xlarge': 1.864,
'r5.large': 0.233,
'r5.xlarge': 0.466,
'r5a.12xlarge': 5.256,
'r5a.24xlarge': 10.512,
'r5a.2xlarge': 0.876,
'r5a.4xlarge': 1.752,
'r5a.large': 0.219,
'r5a.xlarge': 0.438,
'r5d.12xlarge': 6.048,
'r5d.24xlarge': 12.096,
'r5d.2xlarge': 1.008,
'r5d.4xlarge': 2.016,
'r5d.large': 0.252,
'r5d.xlarge': 0.504,
't1.micro': 0.02,
't2.2xlarge': 0.4652,
't2.large': 0.1288,
't2.medium': 0.068,
't2.micro': 0.0172,
't2.nano': 0.0086,
't2.small': 0.034,
't2.xlarge': 0.2426,
't3.2xlarge': 0.512,
't3.large': 0.1188,
't3.medium': 0.064,
't3.micro': 0.0206,
't3.nano': 0.0103,
't3.small': 0.0412,
't3.xlarge': 0.256,
'x1.16xlarge': 10.947,
'x1.32xlarge': 21.894,
'x1e.16xlarge': 18.944,
'x1e.2xlarge': 2.368,
'x1e.32xlarge': 37.888,
'x1e.4xlarge': 4.736,
'x1e.8xlarge': 9.472,
'x1e.xlarge': 1.184,
'z1d.12xlarge': 7.2,
'z1d.2xlarge': 1.2,
'z1d.3xlarge': 1.8,
'z1d.6xlarge': 3.6,
'z1d.large': 0.3,
'z1d.xlarge': 0.6}},
'eu-west-2': {'linux': {'c4.2xlarge': 0.476,
'c4.4xlarge': 0.95,
'c4.8xlarge': 1.902,
'c4.large': 0.119,
'c4.xlarge': 0.237,
'c5.18xlarge': 3.636,
'c5.2xlarge': 0.404,
'c5.4xlarge': 0.808,
'c5.9xlarge': 1.818,
'c5.large': 0.101,
'c5.xlarge': 0.202,
'c5d.18xlarge': 4.14,
'c5d.2xlarge': 0.46,
'c5d.4xlarge': 0.92,
'c5d.9xlarge': 2.07,
'c5d.large': 0.115,
'c5d.xlarge': 0.23,
'd2.2xlarge': 1.544,
'd2.4xlarge': 3.087,
'd2.8xlarge': 6.174,
'd2.xlarge': 0.772,
'i3.16xlarge': 5.792,
'i3.2xlarge': 0.724,
'i3.4xlarge': 1.448,
'i3.8xlarge': 2.896,
'i3.large': 0.181,
'i3.xlarge': 0.362,
'm4.10xlarge': 2.32,
'm4.16xlarge': 3.712,
'm4.2xlarge': 0.464,
'm4.4xlarge': 0.928,
'm4.large': 0.116,
'm4.xlarge': 0.232,
'm5.12xlarge': 2.664,
'm5.24xlarge': 5.328,
'm5.2xlarge': 0.444,
'm5.4xlarge': 0.888,
'm5.large': 0.111,
'm5.xlarge': 0.222,
'm5d.12xlarge': 3.144,
'm5d.24xlarge': 6.288,
'm5d.2xlarge': 0.524,
'm5d.4xlarge': 1.048,
'm5d.large': 0.131,
'm5d.xlarge': 0.262,
'p3.16xlarge': 28.712,
'p3.2xlarge': 3.589,
'p3.8xlarge': 14.356,
'r4.16xlarge': 4.992,
'r4.2xlarge': 0.624,
'r4.4xlarge': 1.248,
'r4.8xlarge': 2.496,
'r4.large': 0.156,
'r4.xlarge': 0.312,
'r5.12xlarge': 3.552,
'r5.24xlarge': 7.104,
'r5.2xlarge': 0.592,
'r5.4xlarge': 1.184,
'r5.large': 0.148,
'r5.xlarge': 0.296,
'r5d.12xlarge': 4.056,
'r5d.24xlarge': 8.112,
'r5d.2xlarge': 0.676,
'r5d.4xlarge': 1.352,
'r5d.large': 0.169,
'r5d.xlarge': 0.338,
't2.2xlarge': 0.4224,
't2.large': 0.1056,
't2.medium': 0.052,
't2.micro': 0.0132,
't2.nano': 0.0066,
't2.small': 0.026,
't2.xlarge': 0.2112,
't3.2xlarge': 0.3776,
't3.large': 0.0944,
't3.medium': 0.0472,
't3.micro': 0.0118,
't3.nano': 0.0059,
't3.small': 0.0236,
't3.xlarge': 0.1888,
'x1.16xlarge': 8.403,
'x1.32xlarge': 16.806},
'windows': {'c4.2xlarge': 0.844,
'c4.4xlarge': 1.686,
'c4.8xlarge': 3.558,
'c4.large': 0.211,
'c4.xlarge': 0.421,
'c5.18xlarge': 6.948,
'c5.2xlarge': 0.772,
'c5.4xlarge': 1.544,
'c5.9xlarge': 3.474,
'c5.large': 0.193,
'c5.xlarge': 0.386,
'c5d.18xlarge': 7.452,
'c5d.2xlarge': 0.828,
'c5d.4xlarge': 1.656,
'c5d.9xlarge': 3.726,
'c5d.large': 0.207,
'c5d.xlarge': 0.414,
'd2.2xlarge': 1.912,
'd2.4xlarge': 3.823,
'd2.8xlarge': 7.83,
'd2.xlarge': 0.956,
'i3.16xlarge': 8.736,
'i3.2xlarge': 1.092,
'i3.4xlarge': 2.184,
'i3.8xlarge': 4.368,
'i3.large': 0.273,
'i3.xlarge': 0.546,
'm4.10xlarge': 4.16,
'm4.16xlarge': 6.656,
'm4.2xlarge': 0.832,
'm4.4xlarge': 1.664,
'm4.large': 0.208,
'm4.xlarge': 0.416,
'm5.12xlarge': 4.872,
'm5.24xlarge': 9.744,
'm5.2xlarge': 0.812,
'm5.4xlarge': 1.624,
'm5.large': 0.203,
'm5.xlarge': 0.406,
'm5d.12xlarge': 5.352,
'm5d.24xlarge': 10.704,
'm5d.2xlarge': 0.892,
'm5d.4xlarge': 1.784,
'm5d.large': 0.223,
'm5d.xlarge': 0.446,
'p3.16xlarge': 31.656,
'p3.2xlarge': 3.957,
'p3.8xlarge': 15.828,
'r4.16xlarge': 7.936,
'r4.2xlarge': 0.992,
'r4.4xlarge': 1.984,
'r4.8xlarge': 3.968,
'r4.large': 0.248,
'r4.xlarge': 0.496,
'r5.12xlarge': 5.76,
'r5.24xlarge': 11.52,
'r5.2xlarge': 0.96,
'r5.4xlarge': 1.92,
'r5.large': 0.24,
'r5.xlarge': 0.48,
'r5d.12xlarge': 6.264,
'r5d.24xlarge': 12.528,
'r5d.2xlarge': 1.044,
'r5d.4xlarge': 2.088,
'r5d.large': 0.261,
'r5d.xlarge': 0.522,
't2.2xlarge': 0.4844,
't2.large': 0.1336,
't2.medium': 0.0704,
't2.micro': 0.0178,
't2.nano': 0.0089,
't2.small': 0.0352,
't2.xlarge': 0.2522,
't3.2xlarge': 0.5248,
't3.large': 0.122,
't3.medium': 0.0656,
't3.micro': 0.021,
't3.nano': 0.0105,
't3.small': 0.042,
't3.xlarge': 0.2624,
'x1.16xlarge': 11.347,
'x1.32xlarge': 22.694}},
'eu-west-3': {'linux': {'c5.18xlarge': 3.636,
'c5.2xlarge': 0.404,
'c5.4xlarge': 0.808,
'c5.9xlarge': 1.818,
'c5.large': 0.101,
'c5.xlarge': 0.202,
'c5d.18xlarge': 4.14,
'c5d.2xlarge': 0.46,
'c5d.4xlarge': 0.92,
'c5d.9xlarge': 2.07,
'c5d.large': 0.115,
'c5d.xlarge': 0.23,
'd2.2xlarge': 1.544,
'd2.4xlarge': 3.088,
'd2.8xlarge': 6.176,
'd2.xlarge': 0.772,
'i3.16xlarge': 5.792,
'i3.2xlarge': 0.724,
'i3.4xlarge': 1.448,
'i3.8xlarge': 2.896,
'i3.large': 0.181,
'i3.xlarge': 0.362,
'm5.12xlarge': 2.688,
'm5.24xlarge': 5.376,
'm5.2xlarge': 0.448,
'm5.4xlarge': 0.896,
'm5.large': 0.112,
'm5.xlarge': 0.224,
'm5d.12xlarge': 3.168,
'm5d.24xlarge': 6.336,
'm5d.2xlarge': 0.528,
'm5d.4xlarge': 1.056,
'm5d.large': 0.132,
'm5d.xlarge': 0.264,
'r4.16xlarge': 4.992,
'r4.2xlarge': 0.624,
'r4.4xlarge': 1.248,
'r4.8xlarge': 2.496,
'r4.large': 0.156,
'r4.xlarge': 0.312,
'r5.12xlarge': 3.552,
'r5.24xlarge': 7.104,
'r5.2xlarge': 0.592,
'r5.4xlarge': 1.184,
'r5.large': 0.148,
'r5.xlarge': 0.296,
'r5d.12xlarge': 4.056,
'r5d.24xlarge': 8.112,
'r5d.2xlarge': 0.676,
'r5d.4xlarge': 1.352,
'r5d.large': 0.169,
'r5d.xlarge': 0.338,
't2.2xlarge': 0.4224,
't2.large': 0.1056,
't2.medium': 0.0528,
't2.micro': 0.0132,
't2.nano': 0.0066,
't2.small': 0.0264,
't2.xlarge': 0.2112,
't3.2xlarge': 0.3776,
't3.large': 0.0944,
't3.medium': 0.0472,
't3.micro': 0.0118,
't3.nano': 0.0059,
't3.small': 0.0236,
't3.xlarge': 0.1888,
'x1.16xlarge': 8.403,
'x1.32xlarge': 16.806},
'windows': {'c5.18xlarge': 6.948,
'c5.2xlarge': 0.772,
'c5.4xlarge': 1.544,
'c5.9xlarge': 3.474,
'c5.large': 0.193,
'c5.xlarge': 0.386,
'c5d.18xlarge': 7.452,
'c5d.2xlarge': 0.828,
'c5d.4xlarge': 1.656,
'c5d.9xlarge': 3.726,
'c5d.large': 0.207,
'c5d.xlarge': 0.414,
'd2.2xlarge': 1.912,
'd2.4xlarge': 3.824,
'd2.8xlarge': 7.832,
'd2.xlarge': 0.956,
'i3.16xlarge': 8.736,
'i3.2xlarge': 1.092,
'i3.4xlarge': 2.184,
'i3.8xlarge': 4.368,
'i3.large': 0.273,
'i3.xlarge': 0.546,
'm5.12xlarge': 4.896,
'm5.24xlarge': 9.792,
'm5.2xlarge': 0.816,
'm5.4xlarge': 1.632,
'm5.large': 0.204,
'm5.xlarge': 0.408,
'm5d.12xlarge': 5.376,
'm5d.24xlarge': 10.752,
'm5d.2xlarge': 0.896,
'm5d.4xlarge': 1.792,
'm5d.large': 0.224,
'm5d.xlarge': 0.448,
'r4.16xlarge': 7.936,
'r4.2xlarge': 0.992,
'r4.4xlarge': 1.984,
'r4.8xlarge': 3.968,
'r4.large': 0.248,
'r4.xlarge': 0.496,
'r5.12xlarge': 5.76,
'r5.24xlarge': 11.52,
'r5.2xlarge': 0.96,
'r5.4xlarge': 1.92,
'r5.large': 0.24,
'r5.xlarge': 0.48,
'r5d.12xlarge': 6.264,
'r5d.24xlarge': 12.528,
'r5d.2xlarge': 1.044,
'r5d.4xlarge': 2.088,
'r5d.large': 0.261,
'r5d.xlarge': 0.522,
't2.2xlarge': 0.4844,
't2.large': 0.1336,
't2.medium': 0.0708,
't2.micro': 0.0178,
't2.nano': 0.0089,
't2.small': 0.0356,
't2.xlarge': 0.2522,
't3.2xlarge': 0.5248,
't3.large': 0.122,
't3.medium': 0.0656,
't3.micro': 0.021,
't3.nano': 0.0105,
't3.small': 0.042,
't3.xlarge': 0.2624,
'x1.16xlarge': 11.347,
'x1.32xlarge': 22.694}},
'sa-east-1': {'linux': {'c1.medium': 0.179,
'c1.xlarge': 0.718,
'c3.2xlarge': 0.65,
'c3.4xlarge': 1.3,
'c3.8xlarge': 2.6,
'c3.large': 0.163,
'c3.xlarge': 0.325,
'c4.2xlarge': 0.618,
'c4.4xlarge': 1.235,
'c4.8xlarge': 2.47,
'c4.large': 0.155,
'c4.xlarge': 0.309,
'c5.18xlarge': 4.716,
'c5.2xlarge': 0.524,
'c5.4xlarge': 1.048,
'c5.9xlarge': 2.358,
'c5.large': 0.131,
'c5.xlarge': 0.262,
'i3.16xlarge': 9.152,
'i3.2xlarge': 1.144,
'i3.4xlarge': 2.288,
'i3.8xlarge': 4.576,
'i3.large': 0.286,
'i3.xlarge': 0.572,
'm1.large': 0.233,
'm1.medium': 0.117,
'm1.small': 0.058,
'm1.xlarge': 0.467,
'm2.2xlarge': 0.645,
'm2.4xlarge': 1.291,
'm2.xlarge': 0.323,
'm3.2xlarge': 0.761,
'm3.large': 0.19,
'm3.medium': 0.095,
'm3.xlarge': 0.381,
'm4.10xlarge': 3.18,
'm4.16xlarge': 5.088,
'm4.2xlarge': 0.636,
'm4.4xlarge': 1.272,
'm4.large': 0.159,
'm4.xlarge': 0.318,
'm5.12xlarge': 3.672,
'm5.24xlarge': 7.344,
'm5.2xlarge': 0.612,
'm5.4xlarge': 1.224,
'm5.large': 0.153,
'm5.xlarge': 0.306,
'r3.2xlarge': 1.399,
'r3.4xlarge': 2.799,
'r3.8xlarge': 5.597,
'r3.large': 0.35,
'r3.xlarge': 0.7,
'r4.16xlarge': 8.96,
'r4.2xlarge': 1.12,
'r4.4xlarge': 2.24,
'r4.8xlarge': 4.48,
'r4.large': 0.28,
'r4.xlarge': 0.56,
't1.micro': 0.027,
't2.2xlarge': 0.5952,
't2.large': 0.1488,
't2.medium': 0.0744,
't2.micro': 0.0186,
't2.nano': 0.0093,
't2.small': 0.0372,
't2.xlarge': 0.2976,
't3.2xlarge': 0.5376,
't3.large': 0.1344,
't3.medium': 0.0672,
't3.micro': 0.0168,
't3.nano': 0.0084,
't3.small': 0.0336,
't3.xlarge': 0.2688,
'x1.16xlarge': 13.005,
'x1.32xlarge': 26.01},
'windows': {'c1.medium': 0.259,
'c1.xlarge': 1.038,
'c3.2xlarge': 0.982,
'c3.4xlarge': 1.964,
'c3.8xlarge': 3.928,
'c3.large': 0.246,
'c3.xlarge': 0.491,
'c4.2xlarge': 0.986,
'c4.4xlarge': 1.971,
'c4.8xlarge': 4.126,
'c4.large': 0.247,
'c4.xlarge': 0.493,
'c5.18xlarge': 8.028,
'c5.2xlarge': 0.892,
'c5.4xlarge': 1.784,
'c5.9xlarge': 4.014,
'c5.large': 0.223,
'c5.xlarge': 0.446,
'i3.16xlarge': 12.096,
'i3.2xlarge': 1.512,
'i3.4xlarge': 3.024,
'i3.8xlarge': 6.048,
'i3.large': 0.378,
'i3.xlarge': 0.756,
'm1.large': 0.357,
'm1.medium': 0.179,
'm1.small': 0.089,
'm1.xlarge': 0.715,
'm2.2xlarge': 0.845,
'm2.4xlarge': 1.691,
'm2.xlarge': 0.423,
'm3.2xlarge': 1.265,
'm3.large': 0.316,
'm3.medium': 0.158,
'm3.xlarge': 0.633,
'm4.10xlarge': 5.02,
'm4.16xlarge': 8.032,
'm4.2xlarge': 1.004,
'm4.4xlarge': 2.008,
'm4.large': 0.251,
'm4.xlarge': 0.502,
'm5.12xlarge': 5.88,
'm5.24xlarge': 11.76,
'm5.2xlarge': 0.98,
'm5.4xlarge': 1.96,
'm5.large': 0.245,
'm5.xlarge': 0.49,
'r3.2xlarge': 1.767,
'r3.4xlarge': 3.394,
'r3.8xlarge': 6.788,
'r3.large': 0.442,
'r3.xlarge': 0.884,
'r4.16xlarge': 11.904,
'r4.2xlarge': 1.488,
'r4.4xlarge': 2.976,
'r4.8xlarge': 5.952,
'r4.large': 0.372,
'r4.xlarge': 0.744,
't1.micro': 0.037,
't2.2xlarge': 0.6572,
't2.large': 0.1768,
't2.medium': 0.0924,
't2.micro': 0.0232,
't2.nano': 0.0116,
't2.small': 0.0464,
't2.xlarge': 0.3386,
't3.2xlarge': 0.6848,
't3.large': 0.162,
't3.medium': 0.0856,
't3.micro': 0.026,
't3.nano': 0.013,
't3.small': 0.052,
't3.xlarge': 0.3424,
'x1.16xlarge': 15.949,
'x1.32xlarge': 31.898}},
'us-east-1': {'linux': {'a1.2xlarge': 0.204,
'a1.4xlarge': 0.408,
'a1.large': 0.051,
'a1.medium': 0.0255,
'a1.xlarge': 0.102,
'c1.medium': 0.13,
'c1.xlarge': 0.52,
'c3.2xlarge': 0.42,
'c3.4xlarge': 0.84,
'c3.8xlarge': 1.68,
'c3.large': 0.105,
'c3.xlarge': 0.21,
'c4.2xlarge': 0.398,
'c4.4xlarge': 0.796,
'c4.8xlarge': 1.591,
'c4.large': 0.1,
'c4.xlarge': 0.199,
'c5.18xlarge': 3.06,
'c5.2xlarge': 0.34,
'c5.4xlarge': 0.68,
'c5.9xlarge': 1.53,
'c5.large': 0.085,
'c5.xlarge': 0.17,
'c5d.18xlarge': 3.456,
'c5d.2xlarge': 0.384,
'c5d.4xlarge': 0.768,
'c5d.9xlarge': 1.728,
'c5d.large': 0.096,
'c5d.xlarge': 0.192,
'c5n.18xlarge': 3.888,
'c5n.2xlarge': 0.432,
'c5n.4xlarge': 0.864,
'c5n.9xlarge': 1.944,
'c5n.large': 0.108,
'c5n.xlarge': 0.216,
'cc2.8xlarge': 2.0,
'cr1.8xlarge': 3.5,
'd2.2xlarge': 1.38,
'd2.4xlarge': 2.76,
'd2.8xlarge': 5.52,
'd2.xlarge': 0.69,
'f1.16xlarge': 13.2,
'f1.2xlarge': 1.65,
'f1.4xlarge': 3.3,
'g2.2xlarge': 0.65,
'g2.8xlarge': 2.6,
'g3.16xlarge': 4.56,
'g3.4xlarge': 1.14,
'g3.8xlarge': 2.28,
'g3s.xlarge': 0.75,
'h1.16xlarge': 3.744,
'h1.2xlarge': 0.468,
'h1.4xlarge': 0.936,
'h1.8xlarge': 1.872,
'hs1.8xlarge': 4.6,
'i2.2xlarge': 1.705,
'i2.4xlarge': 3.41,
'i2.8xlarge': 6.82,
'i2.xlarge': 0.853,
'i3.16xlarge': 4.992,
'i3.2xlarge': 0.624,
'i3.4xlarge': 1.248,
'i3.8xlarge': 2.496,
'i3.large': 0.156,
'i3.metal': 4.992,
'i3.xlarge': 0.312,
'm1.large': 0.175,
'm1.medium': 0.087,
'm1.small': 0.044,
'm1.xlarge': 0.35,
'm2.2xlarge': 0.49,
'm2.4xlarge': 0.98,
'm2.xlarge': 0.245,
'm3.2xlarge': 0.532,
'm3.large': 0.133,
'm3.medium': 0.067,
'm3.xlarge': 0.266,
'm4.10xlarge': 2.0,
'm4.16xlarge': 3.2,
'm4.2xlarge': 0.4,
'm4.4xlarge': 0.8,
'm4.large': 0.1,
'm4.xlarge': 0.2,
'm5.12xlarge': 2.304,
'm5.24xlarge': 4.608,
'm5.2xlarge': 0.384,
'm5.4xlarge': 0.768,
'm5.large': 0.096,
'm5.xlarge': 0.192,
'm5a.12xlarge': 2.064,
'm5a.24xlarge': 4.128,
'm5a.2xlarge': 0.344,
'm5a.4xlarge': 0.688,
'm5a.large': 0.086,
'm5a.xlarge': 0.172,
'm5d.12xlarge': 2.712,
'm5d.24xlarge': 5.424,
'm5d.2xlarge': 0.452,
'm5d.4xlarge': 0.904,
'm5d.large': 0.113,
'm5d.xlarge': 0.226,
'p2.16xlarge': 14.4,
'p2.8xlarge': 7.2,
'p2.xlarge': 0.9,
'p3.16xlarge': 24.48,
'p3.2xlarge': 3.06,
'p3.8xlarge': 12.24,
'p3dn.24xlarge': 31.212,
'r3.2xlarge': 0.665,
'r3.4xlarge': 1.33,
'r3.8xlarge': 2.66,
'r3.large': 0.166,
'r3.xlarge': 0.333,
'r4.16xlarge': 4.256,
'r4.2xlarge': 0.532,
'r4.4xlarge': 1.064,
'r4.8xlarge': 2.128,
'r4.large': 0.133,
'r4.xlarge': 0.266,
'r5.12xlarge': 3.024,
'r5.24xlarge': 6.048,
'r5.2xlarge': 0.504,
'r5.4xlarge': 1.008,
'r5.large': 0.126,
'r5.xlarge': 0.252,
'r5a.12xlarge': 2.712,
'r5a.24xlarge': 5.424,
'r5a.2xlarge': 0.452,
'r5a.4xlarge': 0.904,
'r5a.large': 0.113,
'r5a.xlarge': 0.226,
'r5d.12xlarge': 3.456,
'r5d.24xlarge': 6.912,
'r5d.2xlarge': 0.576,
'r5d.4xlarge': 1.152,
'r5d.large': 0.144,
'r5d.xlarge': 0.288,
't1.micro': 0.02,
't2.2xlarge': 0.3712,
't2.large': 0.0928,
't2.medium': 0.0464,
't2.micro': 0.0116,
't2.nano': 0.0058,
't2.small': 0.023,
't2.xlarge': 0.1856,
't3.2xlarge': 0.3328,
't3.large': 0.0832,
't3.medium': 0.0416,
't3.micro': 0.0104,
't3.nano': 0.0052,
't3.small': 0.0208,
't3.xlarge': 0.1664,
'x1.16xlarge': 6.669,
'x1.32xlarge': 13.338,
'x1e.16xlarge': 13.344,
'x1e.2xlarge': 1.668,
'x1e.32xlarge': 26.688,
'x1e.4xlarge': 3.336,
'x1e.8xlarge': 6.672,
'x1e.xlarge': 0.834,
'z1d.12xlarge': 4.464,
'z1d.2xlarge': 0.744,
'z1d.3xlarge': 1.116,
'z1d.6xlarge': 2.232,
'z1d.large': 0.186,
'z1d.xlarge': 0.372},
'windows': {'c1.medium': 0.21,
'c1.xlarge': 0.84,
'c3.2xlarge': 0.752,
'c3.4xlarge': 1.504,
'c3.8xlarge': 3.008,
'c3.large': 0.188,
'c3.xlarge': 0.376,
'c4.2xlarge': 0.766,
'c4.4xlarge': 1.532,
'c4.8xlarge': 3.091,
'c4.large': 0.192,
'c4.xlarge': 0.383,
'c5.18xlarge': 6.372,
'c5.2xlarge': 0.708,
'c5.4xlarge': 1.416,
'c5.9xlarge': 3.186,
'c5.large': 0.177,
'c5.xlarge': 0.354,
'c5d.18xlarge': 6.768,
'c5d.2xlarge': 0.752,
'c5d.4xlarge': 1.504,
'c5d.9xlarge': 3.384,
'c5d.large': 0.188,
'c5d.xlarge': 0.376,
'c5n.18xlarge': 7.2,
'c5n.2xlarge': 0.8,
'c5n.4xlarge': 1.6,
'c5n.9xlarge': 3.6,
'c5n.large': 0.2,
'c5n.xlarge': 0.4,
'cc2.8xlarge': 2.57,
'cr1.8xlarge': 3.831,
'd2.2xlarge': 1.601,
'd2.4xlarge': 3.062,
'd2.8xlarge': 6.198,
'd2.xlarge': 0.821,
'g2.2xlarge': 0.767,
'g2.8xlarge': 2.878,
'g3.16xlarge': 7.504,
'g3.4xlarge': 1.876,
'g3.8xlarge': 3.752,
'g3s.xlarge': 0.934,
'h1.16xlarge': 6.688,
'h1.2xlarge': 0.836,
'h1.4xlarge': 1.672,
'h1.8xlarge': 3.344,
'hs1.8xlarge': 4.931,
'i2.2xlarge': 1.946,
'i2.4xlarge': 3.891,
'i2.8xlarge': 7.782,
'i2.xlarge': 0.973,
'i3.16xlarge': 7.936,
'i3.2xlarge': 0.992,
'i3.4xlarge': 1.984,
'i3.8xlarge': 3.968,
'i3.large': 0.248,
'i3.metal': 7.936,
'i3.xlarge': 0.496,
'm1.large': 0.299,
'm1.medium': 0.149,
'm1.small': 0.075,
'm1.xlarge': 0.598,
'm2.2xlarge': 0.69,
'm2.4xlarge': 1.38,
'm2.xlarge': 0.345,
'm3.2xlarge': 1.036,
'm3.large': 0.259,
'm3.medium': 0.13,
'm3.xlarge': 0.518,
'm4.10xlarge': 3.84,
'm4.16xlarge': 6.144,
'm4.2xlarge': 0.768,
'm4.4xlarge': 1.536,
'm4.large': 0.192,
'm4.xlarge': 0.384,
'm5.12xlarge': 4.512,
'm5.24xlarge': 9.024,
'm5.2xlarge': 0.752,
'm5.4xlarge': 1.504,
'm5.large': 0.188,
'm5.xlarge': 0.376,
'm5a.12xlarge': 4.272,
'm5a.24xlarge': 8.544,
'm5a.2xlarge': 0.712,
'm5a.4xlarge': 1.424,
'm5a.large': 0.178,
'm5a.xlarge': 0.356,
'm5d.12xlarge': 4.92,
'm5d.24xlarge': 9.84,
'm5d.2xlarge': 0.82,
'm5d.4xlarge': 1.64,
'm5d.large': 0.205,
'm5d.xlarge': 0.41,
'p2.16xlarge': 17.344,
'p2.8xlarge': 8.672,
'p2.xlarge': 1.084,
'p3.16xlarge': 27.424,
'p3.2xlarge': 3.428,
'p3.8xlarge': 13.712,
'p3dn.24xlarge': 35.628,
'r3.2xlarge': 1.045,
'r3.4xlarge': 1.944,
'r3.8xlarge': 3.5,
'r3.large': 0.291,
'r3.xlarge': 0.583,
'r4.16xlarge': 7.2,
'r4.2xlarge': 0.9,
'r4.4xlarge': 1.8,
'r4.8xlarge': 3.6,
'r4.large': 0.225,
'r4.xlarge': 0.45,
'r5.12xlarge': 5.232,
'r5.24xlarge': 10.464,
'r5.2xlarge': 0.872,
'r5.4xlarge': 1.744,
'r5.large': 0.218,
'r5.xlarge': 0.436,
'r5a.12xlarge': 4.92,
'r5a.24xlarge': 9.84,
'r5a.2xlarge': 0.82,
'r5a.4xlarge': 1.64,
'r5a.large': 0.205,
'r5a.xlarge': 0.41,
'r5d.12xlarge': 5.664,
'r5d.24xlarge': 11.328,
'r5d.2xlarge': 0.944,
'r5d.4xlarge': 1.888,
'r5d.large': 0.236,
'r5d.xlarge': 0.472,
't1.micro': 0.02,
't2.2xlarge': 0.4332,
't2.large': 0.1208,
't2.medium': 0.0644,
't2.micro': 0.0162,
't2.nano': 0.0081,
't2.small': 0.032,
't2.xlarge': 0.2266,
't3.2xlarge': 0.48,
't3.large': 0.1108,
't3.medium': 0.06,
't3.micro': 0.0196,
't3.nano': 0.0098,
't3.small': 0.0392,
't3.xlarge': 0.24,
'x1.16xlarge': 9.613,
'x1.32xlarge': 19.226,
'x1e.16xlarge': 16.288,
'x1e.2xlarge': 2.036,
'x1e.32xlarge': 32.576,
'x1e.4xlarge': 4.072,
'x1e.8xlarge': 8.144,
'x1e.xlarge': 1.018,
'z1d.12xlarge': 6.672,
'z1d.2xlarge': 1.112,
'z1d.3xlarge': 1.668,
'z1d.6xlarge': 3.336,
'z1d.large': 0.278,
'z1d.xlarge': 0.556}},
'us-east-2': {'linux': {'a1.2xlarge': 0.204,
'a1.4xlarge': 0.408,
'a1.large': 0.051,
'a1.medium': 0.0255,
'a1.xlarge': 0.102,
'c4.2xlarge': 0.398,
'c4.4xlarge': 0.796,
'c4.8xlarge': 1.591,
'c4.large': 0.1,
'c4.xlarge': 0.199,
'c5.18xlarge': 3.06,
'c5.2xlarge': 0.34,
'c5.4xlarge': 0.68,
'c5.9xlarge': 1.53,
'c5.large': 0.085,
'c5.xlarge': 0.17,
'c5d.18xlarge': 3.456,
'c5d.2xlarge': 0.384,
'c5d.4xlarge': 0.768,
'c5d.9xlarge': 1.728,
'c5d.large': 0.096,
'c5d.xlarge': 0.192,
'c5n.18xlarge': 3.888,
'c5n.2xlarge': 0.432,
'c5n.4xlarge': 0.864,
'c5n.9xlarge': 1.944,
'c5n.large': 0.108,
'c5n.xlarge': 0.216,
'd2.2xlarge': 1.38,
'd2.4xlarge': 2.76,
'd2.8xlarge': 5.52,
'd2.xlarge': 0.69,
'g3.16xlarge': 4.56,
'g3.4xlarge': 1.14,
'g3.8xlarge': 2.28,
'g3s.xlarge': 0.75,
'h1.16xlarge': 3.744,
'h1.2xlarge': 0.468,
'h1.4xlarge': 0.936,
'h1.8xlarge': 1.872,
'i2.2xlarge': 1.705,
'i2.4xlarge': 3.41,
'i2.8xlarge': 6.82,
'i2.xlarge': 0.853,
'i3.16xlarge': 4.992,
'i3.2xlarge': 0.624,
'i3.4xlarge': 1.248,
'i3.8xlarge': 2.496,
'i3.large': 0.156,
'i3.metal': 4.992,
'i3.xlarge': 0.312,
'm4.10xlarge': 2.0,
'm4.16xlarge': 3.2,
'm4.2xlarge': 0.4,
'm4.4xlarge': 0.8,
'm4.large': 0.1,
'm4.xlarge': 0.2,
'm5.12xlarge': 2.304,
'm5.24xlarge': 4.608,
'm5.2xlarge': 0.384,
'm5.4xlarge': 0.768,
'm5.large': 0.096,
'm5.xlarge': 0.192,
'm5a.12xlarge': 2.064,
'm5a.24xlarge': 4.128,
'm5a.2xlarge': 0.344,
'm5a.4xlarge': 0.688,
'm5a.large': 0.086,
'm5a.xlarge': 0.172,
'm5d.12xlarge': 2.712,
'm5d.24xlarge': 5.424,
'm5d.2xlarge': 0.452,
'm5d.4xlarge': 0.904,
'm5d.large': 0.113,
'm5d.xlarge': 0.226,
'p2.16xlarge': 14.4,
'p2.8xlarge': 7.2,
'p2.xlarge': 0.9,
'p3.16xlarge': 24.48,
'p3.2xlarge': 3.06,
'p3.8xlarge': 12.24,
'r3.2xlarge': 0.664,
'r3.4xlarge': 1.328,
'r3.8xlarge': 2.656,
'r3.large': 0.166,
'r3.xlarge': 0.332,
'r4.16xlarge': 4.256,
'r4.2xlarge': 0.532,
'r4.4xlarge': 1.064,
'r4.8xlarge': 2.128,
'r4.large': 0.133,
'r4.xlarge': 0.266,
'r5.12xlarge': 3.024,
'r5.24xlarge': 6.048,
'r5.2xlarge': 0.504,
'r5.4xlarge': 1.008,
'r5.large': 0.126,
'r5.xlarge': 0.252,
'r5a.12xlarge': 2.712,
'r5a.24xlarge': 5.424,
'r5a.2xlarge': 0.452,
'r5a.4xlarge': 0.904,
'r5a.large': 0.113,
'r5a.xlarge': 0.226,
'r5d.12xlarge': 3.456,
'r5d.24xlarge': 6.912,
'r5d.2xlarge': 0.576,
'r5d.4xlarge': 1.152,
'r5d.large': 0.144,
'r5d.xlarge': 0.288,
't2.2xlarge': 0.3712,
't2.large': 0.0928,
't2.medium': 0.0464,
't2.micro': 0.0116,
't2.nano': 0.0058,
't2.small': 0.023,
't2.xlarge': 0.1856,
't3.2xlarge': 0.3328,
't3.large': 0.0832,
't3.medium': 0.0416,
't3.micro': 0.0104,
't3.nano': 0.0052,
't3.small': 0.0208,
't3.xlarge': 0.1664,
'x1.16xlarge': 6.669,
'x1.32xlarge': 13.338},
'windows': {'c4.2xlarge': 0.766,
'c4.4xlarge': 1.532,
'c4.8xlarge': 3.091,
'c4.large': 0.192,
'c4.xlarge': 0.383,
'c5.18xlarge': 6.372,
'c5.2xlarge': 0.708,
'c5.4xlarge': 1.416,
'c5.9xlarge': 3.186,
'c5.large': 0.177,
'c5.xlarge': 0.354,
'c5d.18xlarge': 6.768,
'c5d.2xlarge': 0.752,
'c5d.4xlarge': 1.504,
'c5d.9xlarge': 3.384,
'c5d.large': 0.188,
'c5d.xlarge': 0.376,
'c5n.18xlarge': 7.2,
'c5n.2xlarge': 0.8,
'c5n.4xlarge': 1.6,
'c5n.9xlarge': 3.6,
'c5n.large': 0.2,
'c5n.xlarge': 0.4,
'd2.2xlarge': 1.601,
'd2.4xlarge': 3.062,
'd2.8xlarge': 6.198,
'd2.xlarge': 0.821,
'g3.16xlarge': 7.504,
'g3.4xlarge': 1.876,
'g3.8xlarge': 3.752,
'g3s.xlarge': 0.934,
'h1.16xlarge': 6.688,
'h1.2xlarge': 0.836,
'h1.4xlarge': 1.672,
'h1.8xlarge': 3.344,
'i2.2xlarge': 1.946,
'i2.4xlarge': 3.891,
'i2.8xlarge': 7.782,
'i2.xlarge': 0.973,
'i3.16xlarge': 7.936,
'i3.2xlarge': 0.992,
'i3.4xlarge': 1.984,
'i3.8xlarge': 3.968,
'i3.large': 0.248,
'i3.metal': 7.936,
'i3.xlarge': 0.496,
'm4.10xlarge': 3.84,
'm4.16xlarge': 6.144,
'm4.2xlarge': 0.768,
'm4.4xlarge': 1.536,
'm4.large': 0.192,
'm4.xlarge': 0.384,
'm5.12xlarge': 4.512,
'm5.24xlarge': 9.024,
'm5.2xlarge': 0.752,
'm5.4xlarge': 1.504,
'm5.large': 0.188,
'm5.xlarge': 0.376,
'm5a.12xlarge': 4.272,
'm5a.24xlarge': 8.544,
'm5a.2xlarge': 0.712,
'm5a.4xlarge': 1.424,
'm5a.large': 0.178,
'm5a.xlarge': 0.356,
'm5d.12xlarge': 4.92,
'm5d.24xlarge': 9.84,
'm5d.2xlarge': 0.82,
'm5d.4xlarge': 1.64,
'm5d.large': 0.205,
'm5d.xlarge': 0.41,
'p2.16xlarge': 17.344,
'p2.8xlarge': 8.672,
'p2.xlarge': 1.084,
'p3.16xlarge': 27.424,
'p3.2xlarge': 3.428,
'p3.8xlarge': 13.712,
'r3.2xlarge': 1.045,
'r3.4xlarge': 1.944,
'r3.8xlarge': 3.5,
'r3.large': 0.291,
'r3.xlarge': 0.583,
'r4.16xlarge': 7.2,
'r4.2xlarge': 0.9,
'r4.4xlarge': 1.8,
'r4.8xlarge': 3.6,
'r4.large': 0.225,
'r4.xlarge': 0.45,
'r5.12xlarge': 5.232,
'r5.24xlarge': 10.464,
'r5.2xlarge': 0.872,
'r5.4xlarge': 1.744,
'r5.large': 0.218,
'r5.xlarge': 0.436,
'r5a.12xlarge': 4.92,
'r5a.24xlarge': 9.84,
'r5a.2xlarge': 0.82,
'r5a.4xlarge': 1.64,
'r5a.large': 0.205,
'r5a.xlarge': 0.41,
'r5d.12xlarge': 5.664,
'r5d.24xlarge': 11.328,
'r5d.2xlarge': 0.944,
'r5d.4xlarge': 1.888,
'r5d.large': 0.236,
'r5d.xlarge': 0.472,
't2.2xlarge': 0.4332,
't2.large': 0.1208,
't2.medium': 0.0644,
't2.micro': 0.0162,
't2.nano': 0.0081,
't2.small': 0.032,
't2.xlarge': 0.2266,
't3.2xlarge': 0.48,
't3.large': 0.1108,
't3.medium': 0.06,
't3.micro': 0.0196,
't3.nano': 0.0098,
't3.small': 0.0392,
't3.xlarge': 0.24,
'x1.16xlarge': 9.613,
'x1.32xlarge': 19.226}},
'us-west-1': {'linux': {'c1.medium': 0.148,
'c1.xlarge': 0.592,
'c3.2xlarge': 0.478,
'c3.4xlarge': 0.956,
'c3.8xlarge': 1.912,
'c3.large': 0.12,
'c3.xlarge': 0.239,
'c4.2xlarge': 0.498,
'c4.4xlarge': 0.997,
'c4.8xlarge': 1.993,
'c4.large': 0.124,
'c4.xlarge': 0.249,
'c5.18xlarge': 3.816,
'c5.2xlarge': 0.424,
'c5.4xlarge': 0.848,
'c5.9xlarge': 1.908,
'c5.large': 0.106,
'c5.xlarge': 0.212,
'c5d.18xlarge': 4.32,
'c5d.2xlarge': 0.48,
'c5d.4xlarge': 0.96,
'c5d.9xlarge': 2.16,
'c5d.large': 0.12,
'c5d.xlarge': 0.24,
'd2.2xlarge': 1.563,
'd2.4xlarge': 3.125,
'd2.8xlarge': 6.25,
'd2.xlarge': 0.781,
'f1.16xlarge': 15.304,
'f1.2xlarge': 1.913,
'f1.4xlarge': 3.826,
'g2.2xlarge': 0.702,
'g2.8xlarge': 2.808,
'g3.16xlarge': 6.136,
'g3.4xlarge': 1.534,
'g3.8xlarge': 3.068,
'g3s.xlarge': 1.009,
'i2.2xlarge': 1.876,
'i2.4xlarge': 3.751,
'i2.8xlarge': 7.502,
'i2.xlarge': 0.938,
'i3.16xlarge': 5.504,
'i3.2xlarge': 0.688,
'i3.4xlarge': 1.376,
'i3.8xlarge': 2.752,
'i3.large': 0.172,
'i3.metal': 5.504,
'i3.xlarge': 0.344,
'm1.large': 0.19,
'm1.medium': 0.095,
'm1.small': 0.047,
'm1.xlarge': 0.379,
'm2.2xlarge': 0.55,
'm2.4xlarge': 1.1,
'm2.xlarge': 0.275,
'm3.2xlarge': 0.616,
'm3.large': 0.154,
'm3.medium': 0.077,
'm3.xlarge': 0.308,
'm4.10xlarge': 2.34,
'm4.16xlarge': 3.744,
'm4.2xlarge': 0.468,
'm4.4xlarge': 0.936,
'm4.large': 0.117,
'm4.xlarge': 0.234,
'm5.12xlarge': 2.688,
'm5.24xlarge': 5.376,
'm5.2xlarge': 0.448,
'm5.4xlarge': 0.896,
'm5.large': 0.112,
'm5.xlarge': 0.224,
'm5d.12xlarge': 3.192,
'm5d.24xlarge': 6.384,
'm5d.2xlarge': 0.532,
'm5d.4xlarge': 1.064,
'm5d.large': 0.133,
'm5d.xlarge': 0.266,
'r3.2xlarge': 0.741,
'r3.4xlarge': 1.482,
'r3.8xlarge': 2.964,
'r3.large': 0.185,
'r3.xlarge': 0.371,
'r4.16xlarge': 4.7424,
'r4.2xlarge': 0.5928,
'r4.4xlarge': 1.1856,
'r4.8xlarge': 2.3712,
'r4.large': 0.1482,
'r4.xlarge': 0.2964,
'r5.12xlarge': 3.36,
'r5.24xlarge': 6.72,
'r5.2xlarge': 0.56,
'r5.4xlarge': 1.12,
'r5.large': 0.14,
'r5.xlarge': 0.28,
'r5d.12xlarge': 3.888,
'r5d.24xlarge': 7.776,
'r5d.2xlarge': 0.648,
'r5d.4xlarge': 1.296,
'r5d.large': 0.162,
'r5d.xlarge': 0.324,
't1.micro': 0.025,
't2.2xlarge': 0.4416,
't2.large': 0.1104,
't2.medium': 0.0552,
't2.micro': 0.0138,
't2.nano': 0.0069,
't2.small': 0.0276,
't2.xlarge': 0.2208,
't3.2xlarge': 0.3968,
't3.large': 0.0992,
't3.medium': 0.0496,
't3.micro': 0.0124,
't3.nano': 0.0062,
't3.small': 0.0248,
't3.xlarge': 0.1984,
'z1d.12xlarge': 5.064,
'z1d.2xlarge': 0.844,
'z1d.3xlarge': 1.266,
'z1d.6xlarge': 2.532,
'z1d.large': 0.211,
'z1d.xlarge': 0.422},
'windows': {'c1.medium': 0.228,
'c1.xlarge': 0.912,
'c3.2xlarge': 0.752,
'c3.4xlarge': 1.504,
'c3.8xlarge': 3.008,
'c3.large': 0.188,
'c3.xlarge': 0.376,
'c4.2xlarge': 0.866,
'c4.4xlarge': 1.733,
'c4.8xlarge': 3.536,
'c4.large': 0.216,
'c4.xlarge': 0.433,
'c5.18xlarge': 7.128,
'c5.2xlarge': 0.792,
'c5.4xlarge': 1.584,
'c5.9xlarge': 3.564,
'c5.large': 0.198,
'c5.xlarge': 0.396,
'c5d.18xlarge': 7.632,
'c5d.2xlarge': 0.848,
'c5d.4xlarge': 1.696,
'c5d.9xlarge': 3.816,
'c5d.large': 0.212,
'c5d.xlarge': 0.424,
'd2.2xlarge': 1.931,
'd2.4xlarge': 3.861,
'd2.8xlarge': 7.906,
'd2.xlarge': 0.965,
'g2.2xlarge': 0.819,
'g2.8xlarge': 3.086,
'g3.16xlarge': 9.08,
'g3.4xlarge': 2.27,
'g3.8xlarge': 4.54,
'g3s.xlarge': 1.193,
'i2.2xlarge': 2.116,
'i2.4xlarge': 4.232,
'i2.8xlarge': 8.464,
'i2.xlarge': 1.058,
'i3.16xlarge': 8.448,
'i3.2xlarge': 1.056,
'i3.4xlarge': 2.112,
'i3.8xlarge': 4.224,
'i3.large': 0.264,
'i3.metal': 8.448,
'i3.xlarge': 0.528,
'm1.large': 0.314,
'm1.medium': 0.157,
'm1.small': 0.078,
'm1.xlarge': 0.627,
'm2.2xlarge': 0.75,
'm2.4xlarge': 1.5,
'm2.xlarge': 0.375,
'm3.2xlarge': 1.12,
'm3.large': 0.28,
'm3.medium': 0.14,
'm3.xlarge': 0.56,
'm4.10xlarge': 4.18,
'm4.16xlarge': 6.688,
'm4.2xlarge': 0.836,
'm4.4xlarge': 1.672,
'm4.large': 0.209,
'm4.xlarge': 0.418,
'm5.12xlarge': 4.896,
'm5.24xlarge': 9.792,
'm5.2xlarge': 0.816,
'm5.4xlarge': 1.632,
'm5.large': 0.204,
'm5.xlarge': 0.408,
'm5d.12xlarge': 5.4,
'm5d.24xlarge': 10.8,
'm5d.2xlarge': 0.9,
'm5d.4xlarge': 1.8,
'm5d.large': 0.225,
'm5d.xlarge': 0.45,
'r3.2xlarge': 1.134,
'r3.4xlarge': 2.111,
'r3.8xlarge': 3.8,
'r3.large': 0.32,
'r3.xlarge': 0.633,
'r4.16xlarge': 7.6864,
'r4.2xlarge': 0.9608,
'r4.4xlarge': 1.9216,
'r4.8xlarge': 3.8432,
'r4.large': 0.2402,
'r4.xlarge': 0.4804,
'r5.12xlarge': 5.568,
'r5.24xlarge': 11.136,
'r5.2xlarge': 0.928,
'r5.4xlarge': 1.856,
'r5.large': 0.232,
'r5.xlarge': 0.464,
'r5d.12xlarge': 6.096,
'r5d.24xlarge': 12.192,
'r5d.2xlarge': 1.016,
'r5d.4xlarge': 2.032,
'r5d.large': 0.254,
'r5d.xlarge': 0.508,
't1.micro': 0.035,
't2.2xlarge': 0.5036,
't2.large': 0.1384,
't2.medium': 0.0732,
't2.micro': 0.0184,
't2.nano': 0.0092,
't2.small': 0.0368,
't2.xlarge': 0.2618,
't3.2xlarge': 0.544,
't3.large': 0.1268,
't3.medium': 0.068,
't3.micro': 0.0216,
't3.nano': 0.0108,
't3.small': 0.0432,
't3.xlarge': 0.272,
'z1d.12xlarge': 7.272,
'z1d.2xlarge': 1.212,
'z1d.3xlarge': 1.818,
'z1d.6xlarge': 3.636,
'z1d.large': 0.303,
'z1d.xlarge': 0.606}},
'us-west-2': {'linux': {'a1.2xlarge': 0.204,
'a1.4xlarge': 0.408,
'a1.large': 0.051,
'a1.medium': 0.0255,
'a1.xlarge': 0.102,
'c1.medium': 0.13,
'c1.xlarge': 0.52,
'c3.2xlarge': 0.42,
'c3.4xlarge': 0.84,
'c3.8xlarge': 1.68,
'c3.large': 0.105,
'c3.xlarge': 0.21,
'c4.2xlarge': 0.398,
'c4.4xlarge': 0.796,
'c4.8xlarge': 1.591,
'c4.large': 0.1,
'c4.xlarge': 0.199,
'c5.18xlarge': 3.06,
'c5.2xlarge': 0.34,
'c5.4xlarge': 0.68,
'c5.9xlarge': 1.53,
'c5.large': 0.085,
'c5.xlarge': 0.17,
'c5d.18xlarge': 3.456,
'c5d.2xlarge': 0.384,
'c5d.4xlarge': 0.768,
'c5d.9xlarge': 1.728,
'c5d.large': 0.096,
'c5d.xlarge': 0.192,
'c5n.18xlarge': 3.888,
'c5n.2xlarge': 0.432,
'c5n.4xlarge': 0.864,
'c5n.9xlarge': 1.944,
'c5n.large': 0.108,
'c5n.xlarge': 0.216,
'cc2.8xlarge': 2.0,
'cr1.8xlarge': 3.5,
'd2.2xlarge': 1.38,
'd2.4xlarge': 2.76,
'd2.8xlarge': 5.52,
'd2.xlarge': 0.69,
'f1.16xlarge': 13.2,
'f1.2xlarge': 1.65,
'f1.4xlarge': 3.3,
'g2.2xlarge': 0.65,
'g2.8xlarge': 2.6,
'g3.16xlarge': 4.56,
'g3.4xlarge': 1.14,
'g3.8xlarge': 2.28,
'g3s.xlarge': 0.75,
'h1.16xlarge': 3.744,
'h1.2xlarge': 0.468,
'h1.4xlarge': 0.936,
'h1.8xlarge': 1.872,
'hs1.8xlarge': 4.6,
'i2.2xlarge': 1.705,
'i2.4xlarge': 3.41,
'i2.8xlarge': 6.82,
'i2.xlarge': 0.853,
'i3.16xlarge': 4.992,
'i3.2xlarge': 0.624,
'i3.4xlarge': 1.248,
'i3.8xlarge': 2.496,
'i3.large': 0.156,
'i3.metal': 4.992,
'i3.xlarge': 0.312,
'm1.large': 0.175,
'm1.medium': 0.087,
'm1.small': 0.044,
'm1.xlarge': 0.35,
'm2.2xlarge': 0.49,
'm2.4xlarge': 0.98,
'm2.xlarge': 0.245,
'm3.2xlarge': 0.532,
'm3.large': 0.133,
'm3.medium': 0.067,
'm3.xlarge': 0.266,
'm4.10xlarge': 2.0,
'm4.16xlarge': 3.2,
'm4.2xlarge': 0.4,
'm4.4xlarge': 0.8,
'm4.large': 0.1,
'm4.xlarge': 0.2,
'm5.12xlarge': 2.304,
'm5.24xlarge': 4.608,
'm5.2xlarge': 0.384,
'm5.4xlarge': 0.768,
'm5.large': 0.096,
'm5.xlarge': 0.192,
'm5a.12xlarge': 2.064,
'm5a.24xlarge': 4.128,
'm5a.2xlarge': 0.344,
'm5a.4xlarge': 0.688,
'm5a.large': 0.086,
'm5a.xlarge': 0.172,
'm5d.12xlarge': 2.712,
'm5d.24xlarge': 5.424,
'm5d.2xlarge': 0.452,
'm5d.4xlarge': 0.904,
'm5d.large': 0.113,
'm5d.xlarge': 0.226,
'p2.16xlarge': 14.4,
'p2.8xlarge': 7.2,
'p2.xlarge': 0.9,
'p3.16xlarge': 24.48,
'p3.2xlarge': 3.06,
'p3.8xlarge': 12.24,
'p3dn.24xlarge': 31.212,
'r3.2xlarge': 0.665,
'r3.4xlarge': 1.33,
'r3.8xlarge': 2.66,
'r3.large': 0.166,
'r3.xlarge': 0.333,
'r4.16xlarge': 4.256,
'r4.2xlarge': 0.532,
'r4.4xlarge': 1.064,
'r4.8xlarge': 2.128,
'r4.large': 0.133,
'r4.xlarge': 0.266,
'r5.12xlarge': 3.024,
'r5.24xlarge': 6.048,
'r5.2xlarge': 0.504,
'r5.4xlarge': 1.008,
'r5.large': 0.126,
'r5.xlarge': 0.252,
'r5a.12xlarge': 2.712,
'r5a.24xlarge': 5.424,
'r5a.2xlarge': 0.452,
'r5a.4xlarge': 0.904,
'r5a.large': 0.113,
'r5a.xlarge': 0.226,
'r5d.12xlarge': 3.456,
'r5d.24xlarge': 6.912,
'r5d.2xlarge': 0.576,
'r5d.4xlarge': 1.152,
'r5d.large': 0.144,
'r5d.xlarge': 0.288,
't1.micro': 0.02,
't2.2xlarge': 0.3712,
't2.large': 0.0928,
't2.medium': 0.0464,
't2.micro': 0.0116,
't2.nano': 0.0058,
't2.small': 0.023,
't2.xlarge': 0.1856,
't3.2xlarge': 0.3328,
't3.large': 0.0832,
't3.medium': 0.0416,
't3.micro': 0.0104,
't3.nano': 0.0052,
't3.small': 0.0208,
't3.xlarge': 0.1664,
'x1.16xlarge': 6.669,
'x1.32xlarge': 13.338,
'x1e.16xlarge': 13.344,
'x1e.2xlarge': 1.668,
'x1e.32xlarge': 26.688,
'x1e.4xlarge': 3.336,
'x1e.8xlarge': 6.672,
'x1e.xlarge': 0.834,
'z1d.12xlarge': 4.464,
'z1d.2xlarge': 0.744,
'z1d.3xlarge': 1.116,
'z1d.6xlarge': 2.232,
'z1d.large': 0.186,
'z1d.xlarge': 0.372},
'windows': {'c1.medium': 0.21,
'c1.xlarge': 0.84,
'c3.2xlarge': 0.752,
'c3.4xlarge': 1.504,
'c3.8xlarge': 3.008,
'c3.large': 0.188,
'c3.xlarge': 0.376,
'c4.2xlarge': 0.766,
'c4.4xlarge': 1.532,
'c4.8xlarge': 3.091,
'c4.large': 0.192,
'c4.xlarge': 0.383,
'c5.18xlarge': 6.372,
'c5.2xlarge': 0.708,
'c5.4xlarge': 1.416,
'c5.9xlarge': 3.186,
'c5.large': 0.177,
'c5.xlarge': 0.354,
'c5d.18xlarge': 6.768,
'c5d.2xlarge': 0.752,
'c5d.4xlarge': 1.504,
'c5d.9xlarge': 3.384,
'c5d.large': 0.188,
'c5d.xlarge': 0.376,
'c5n.18xlarge': 7.2,
'c5n.2xlarge': 0.8,
'c5n.4xlarge': 1.6,
'c5n.9xlarge': 3.6,
'c5n.large': 0.2,
'c5n.xlarge': 0.4,
'cc2.8xlarge': 2.57,
'cr1.8xlarge': 3.831,
'd2.2xlarge': 1.601,
'd2.4xlarge': 3.062,
'd2.8xlarge': 6.198,
'd2.xlarge': 0.821,
'g2.2xlarge': 0.767,
'g2.8xlarge': 2.878,
'g3.16xlarge': 7.504,
'g3.4xlarge': 1.876,
'g3.8xlarge': 3.752,
'g3s.xlarge': 0.934,
'h1.16xlarge': 6.688,
'h1.2xlarge': 0.836,
'h1.4xlarge': 1.672,
'h1.8xlarge': 3.344,
'hs1.8xlarge': 4.931,
'i2.2xlarge': 1.946,
'i2.4xlarge': 3.891,
'i2.8xlarge': 7.782,
'i2.xlarge': 0.973,
'i3.16xlarge': 7.936,
'i3.2xlarge': 0.992,
'i3.4xlarge': 1.984,
'i3.8xlarge': 3.968,
'i3.large': 0.248,
'i3.metal': 7.936,
'i3.xlarge': 0.496,
'm1.large': 0.299,
'm1.medium': 0.149,
'm1.small': 0.075,
'm1.xlarge': 0.598,
'm2.2xlarge': 0.69,
'm2.4xlarge': 1.38,
'm2.xlarge': 0.345,
'm3.2xlarge': 1.036,
'm3.large': 0.259,
'm3.medium': 0.13,
'm3.xlarge': 0.518,
'm4.10xlarge': 3.84,
'm4.16xlarge': 6.144,
'm4.2xlarge': 0.768,
'm4.4xlarge': 1.536,
'm4.large': 0.192,
'm4.xlarge': 0.384,
'm5.12xlarge': 4.512,
'm5.24xlarge': 9.024,
'm5.2xlarge': 0.752,
'm5.4xlarge': 1.504,
'm5.large': 0.188,
'm5.xlarge': 0.376,
'm5a.12xlarge': 4.272,
'm5a.24xlarge': 8.544,
'm5a.2xlarge': 0.712,
'm5a.4xlarge': 1.424,
'm5a.large': 0.178,
'm5a.xlarge': 0.356,
'm5d.12xlarge': 4.92,
'm5d.24xlarge': 9.84,
'm5d.2xlarge': 0.82,
'm5d.4xlarge': 1.64,
'm5d.large': 0.205,
'm5d.xlarge': 0.41,
'p2.16xlarge': 17.344,
'p2.8xlarge': 8.672,
'p2.xlarge': 1.084,
'p3.16xlarge': 27.424,
'p3.2xlarge': 3.428,
'p3.8xlarge': 13.712,
'p3dn.24xlarge': 35.628,
'r3.2xlarge': 1.045,
'r3.4xlarge': 1.944,
'r3.8xlarge': 3.5,
'r3.large': 0.291,
'r3.xlarge': 0.583,
'r4.16xlarge': 7.2,
'r4.2xlarge': 0.9,
'r4.4xlarge': 1.8,
'r4.8xlarge': 3.6,
'r4.large': 0.225,
'r4.xlarge': 0.45,
'r5.12xlarge': 5.232,
'r5.24xlarge': 10.464,
'r5.2xlarge': 0.872,
'r5.4xlarge': 1.744,
'r5.large': 0.218,
'r5.xlarge': 0.436,
'r5a.12xlarge': 4.92,
'r5a.24xlarge': 9.84,
'r5a.2xlarge': 0.82,
'r5a.4xlarge': 1.64,
'r5a.large': 0.205,
'r5a.xlarge': 0.41,
'r5d.12xlarge': 5.664,
'r5d.24xlarge': 11.328,
'r5d.2xlarge': 0.944,
'r5d.4xlarge': 1.888,
'r5d.large': 0.236,
'r5d.xlarge': 0.472,
't1.micro': 0.02,
't2.2xlarge': 0.4332,
't2.large': 0.1208,
't2.medium': 0.0644,
't2.micro': 0.0162,
't2.nano': 0.0081,
't2.small': 0.032,
't2.xlarge': 0.2266,
't3.2xlarge': 0.48,
't3.large': 0.1108,
't3.medium': 0.06,
't3.micro': 0.0196,
't3.nano': 0.0098,
't3.small': 0.0392,
't3.xlarge': 0.24,
'x1.16xlarge': 9.613,
'x1.32xlarge': 19.226,
'x1e.16xlarge': 16.288,
'x1e.2xlarge': 2.036,
'x1e.32xlarge': 32.576,
'x1e.4xlarge': 4.072,
'x1e.8xlarge': 8.144,
'x1e.xlarge': 1.018,
'z1d.12xlarge': 6.672,
'z1d.2xlarge': 1.112,
'z1d.3xlarge': 1.668,
'z1d.6xlarge': 3.336,
'z1d.large': 0.278,
'z1d.xlarge': 0.556}}}
| StarcoderdataPython |
3266463 | #!/usr/bin/env python3
import sys
import argparse
from Bio import SeqIO
from gffpal.gff import GFFRecord, Strand
from gffpal.attributes import GFFAttributes
def cli(prog, args):
parser = argparse.ArgumentParser(
prog=prog,
description=""" Converts a tab-separated blast-like file to a GFF3.
The table should have a header, and column names should match
mmseqs labels.
"""
)
parser.add_argument(
"genome",
type=argparse.FileType('r'),
help="The source.",
)
parser.add_argument(
"infile",
type=argparse.FileType('r'),
help="Input fasta file.",
)
parser.add_argument(
"-s", "--source",
type=str,
default="mitefinder",
help="The source.",
)
parser.add_argument(
"-o", "--outfile",
default=sys.stdout,
type=argparse.FileType('w'),
help="Output GFF3 file. Default stdout.",
)
return parser.parse_args(args)
def split_desc(seq, seqids):
split_id = seq.id.split("|")
seqid = seqids[int(split_id[1]) - 1]
lborder_start = int(split_id[2])
lborder_end = int(split_id[3])
rborder_start = int(split_id[4])
rborder_end = int(split_id[5])
score = float(split_id[-1].split(":", maxsplit=1)[1])
return seqid, lborder_start, lborder_end, rborder_start, rborder_end, score
def get_region_feature(i, seqid, left, right, score):
start = min(left + right)
end = max(left + right)
region_id = f"repeat_region{i}"
attributes = GFFAttributes(
id=region_id,
ontology_term=["SO:0000657", "SO:repeat_region"],
custom={"mitefinder_score": score},
)
record = GFFRecord(
seqid=seqid,
source="MiteFinderII",
type="repeat_region",
start=start,
end=end,
score=score,
strand=Strand.UNKNOWN,
attributes=attributes,
)
return record
def get_tir_feature(i, seqid, pos):
start = min(pos)
end = max(pos)
region_id = f"repeat_region{i}"
attributes = GFFAttributes(
parent=[region_id],
ontology_term=["SO:0000481", "SO:terminal_inverted_repeat"]
)
record = GFFRecord(
seqid=seqid,
source="MiteFinderII",
type="terminal_inverted_repeat",
start=start,
end=end,
score=None,
strand=Strand.UNKNOWN,
attributes=attributes,
)
return record
def get_mite_feature(i, seqid, left, right):
start = max(left)
end = min(right)
assert start < end
region_id = f"repeat_region{i}"
attributes = GFFAttributes(
parent=[region_id],
ontology_term=["SO:0000338", "SO:MITE"]
)
record = GFFRecord(
seqid=seqid,
source="MiteFinderII",
type="MITE",
start=start,
end=end,
score=None,
strand=Strand.UNKNOWN,
attributes=attributes,
)
return record
def main():
args = cli(sys.argv[0], sys.argv[1:])
seqs = SeqIO.parse(args.infile, format="fasta")
genome = SeqIO.parse(args.genome, format="fasta")
seqids = [s.id for s in genome]
i = 1
for seq in seqs:
(seqid, lborder_start, lborder_end,
rborder_start, rborder_end, score) = split_desc(seq, seqids)
region = get_region_feature(
i,
seqid,
[lborder_start, lborder_end],
[rborder_start, rborder_end],
score
)
ltir = get_tir_feature(i, seqid, [lborder_start, lborder_end])
rtir = get_tir_feature(i, seqid, [rborder_start, rborder_end])
mid = get_mite_feature(
i,
seqid,
[lborder_start, lborder_end],
[rborder_start, rborder_end],
)
print(region, file=args.outfile)
print(ltir, file=args.outfile)
print(mid, file=args.outfile)
print(rtir, file=args.outfile)
i += 1
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
1796968 | <reponame>sbanwart/data-science
import math, random
from matplotlib import pyplot as plt
from collections import Counter
def uniform_pdf(x):
return 1 if x >= 0 and x < 1 else 0
def uniform_cdf(x):
"return the probability that a uniform random variable is <= x"
if x < 0: return 0 # uniform random is never less than 0
elif x < 1: return x # e.g. P(X <= 0.4) = 0.4
else: return 1 # uniform random is always less than 1
def normal_pdf(x, mu = 0, sigma = 1):
sqrt_two_pi = math.sqrt(2 * math.pi)
return (math.exp(-(x - mu) ** 2 / 2 / sigma ** 2) / (sqrt_two_pi * sigma))
def normal_cdf(x, mu = 0, sigma = 1):
return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2
def inverse_normal_cdf(p, mu = 0, sigma = 1, tolerance = 0.00001):
"""find approximate inverse using binary search"""
# if not standard, compute standard and rescale
if mu != 0 or sigma != 1:
return mu + sigma * inverse_normal_cdf(p, tolerance = tolerance)
low_z, low_p = -10.0, 0 # normal_cdf(-10) is (very close to) 0
hi_z, hi_p = 10.0, 1 # normal_cdf(10) is (very close to) 1
while hi_z - low_z > tolerance:
mid_z = (low_z + hi_z) / 2 # consider the midpoint
mid_p = normal_cdf(mid_z) # and the cdf's value there
if mid_p < p:
# midpoint is still too low, search above it
low_z, low_p = mid_z, mid_p
elif mid_p > p:
# midpoint is still too high, search below it
hi_z, hi_p = mid_z, mid_p
else:
break
return mid_z
def bernoulli_trial(p):
return 1 if random.random() < p else 0
def binomial(n, p):
return sum(bernoulli_trial(p) for _ in range(n))
def make_hist(p, n, num_points):
data = [binomial(n, p) for _ in range(num_points)]
# use a bar chart to show the actual binomial samples
histogram = Counter(data)
plt.bar([x - 0.4 for x in histogram.keys()],
[v / num_points for v in histogram.values()],
0.8, color = '0.75')
mu = p * n
sigma = math.sqrt(n * p * (1 - p))
# use a line chart to show the normal approximation
xs = range(min(data), max(data) + 1)
ys = [normal_cdf(i + 0.5, mu, sigma) - normal_cdf(i - 0.5, mu, sigma) for i in xs]
plt.plot(xs, ys)
plt.title("Binomial Distribution vs. Normal Approximation")
plt.show()
#xs = [x / 10.0 for x in range(-50, 50)]
#plt.plot(xs, [normal_pdf(x, sigma = 1) for x in xs], '-', label = 'mu=0,sigma=1')
#plt.plot(xs, [normal_pdf(x, sigma = 2) for x in xs], '--', label = 'mu=0,sigma=2')
#plt.plot(xs, [normal_pdf(x, sigma = 0.5) for x in xs], ':', label = 'mu=0,sigma=0.5')
#plt.plot(xs, [normal_pdf(x, mu = -1) for x in xs], '-', label = 'mu=-1,sigma=1')
#plt.legend()
#plt.title("Various Normal pdfs")
#plt.show()
#xs = [x / 10.0 for x in range(-50, 50)]
#plt.plot(xs, [normal_cdf(x, sigma = 1) for x in xs], '-', label = 'mu=0,sigma=1')
#plt.plot(xs, [normal_cdf(x, sigma = 2) for x in xs], '--', label = 'mu=0,sigma=2')
#plt.plot(xs, [normal_cdf(x, sigma = 0.5) for x in xs], ':', label = 'mu=0,sigma=0.5')
#plt.plot(xs, [normal_cdf(x, mu = -1) for x in xs], '-', label = 'mu=-1,sigma=1')
#plt.legend()
#plt.title("Various Normal cdfs")
#plt.show()
| StarcoderdataPython |
3200087 | from typing import Iterable
Trend = Iterable[float] | StarcoderdataPython |
1630236 | import logging
from pyferm import pyferm
logging.basicConfig(
format="%(asctime)s %(levelname)-10s %(message)s", level=logging.DEBUG
)
p = pyferm()
p.start()
| StarcoderdataPython |
3277052 | from submission_code.nlp_tools import tokenizer
from onmt.translate.translator import build_translator
from argparse import Namespace
import math
import os
def tokenize_eng(text):
return tokenizer.ner_tokenizer(text)[0]
def predict(invocations, model_dir, model_file, result_cnt=5):
"""
Function called by the evaluation script to interface the participants submission_code
`predict` function accepts the natural language invocations as input, and returns
the predicted commands along with confidences as output. For each invocation,
`result_cnt` number of predicted commands are expected to be returned.
Args:
1. invocations : `list (str)` : list of `n_batch` (default 16) natural language invocations
2. result_cnt : `int` : number of predicted commands to return for each invocation
Returns:
1. commands : `list [ list (str) ]` : a list of list of strings of shape (n_batch, result_cnt)
2. confidences: `list[ list (float) ]` : confidences corresponding to the predicted commands
confidence values should be between 0.0 and 1.0.
Shape: (n_batch, result_cnt)
"""
opt = Namespace(models=[
os.path.join(model_dir, file) for file in model_file
], n_best=5,
avg_raw_probs=False,
alpha=0.0, batch_type='sents', beam_size=5,
beta=-0.0, block_ngram_repeat=0, coverage_penalty='none', data_type='text', dump_beam='', fp32=True,
gpu=-1, ignore_when_blocking=[], length_penalty='none', max_length=100, max_sent_length=None,
min_length=0, output='/dev/null', phrase_table='', random_sampling_temp=1.0, random_sampling_topk=1,
ratio=-0.0, replace_unk=True, report_align=False, report_time=False, seed=829, stepwise_penalty=False,
tgt=None, verbose=False, tgt_prefix=None)
translator = build_translator(opt, report_score=False)
n_batch = len(invocations)
commands = [
[''] * result_cnt
for _ in range(n_batch)
]
confidences = [
[1, 0, 0, 0, 0]
for _ in range(n_batch)
]
################################################################################################
# Participants should add their codes to fill predict `commands` and `confidences` here #
################################################################################################
for idx, inv in enumerate(invocations):
new_inv = tokenize_eng(inv)
new_inv = ' '.join(new_inv)
translated = translator.translate([new_inv], batch_size=1)
for i in range(result_cnt):
commands[idx][i] = translated[1][0][i]
confidences[idx][i] = math.exp(translated[0][0][i].item()) / 2
confidences[idx][0] = 1.0
################################################################################################
# Participant code block ends #
################################################################################################
return commands, confidences
| StarcoderdataPython |
3275990 | <filename>0108-Convert-Sorted-Array-to-Binary-Search-Tree.py
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
def devide(nums):
if len(nums) == 0:
return None
midPoint = len(nums) // 2
left_list = nums[:midPoint]
right_list = nums[midPoint+1:]
return TreeNode(nums[midPoint], devide(left_list), devide(right_list))
return devide(nums)
| StarcoderdataPython |
1638898 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.ipynb (unless otherwise specified).
__all__ = ['first_not_na']
# Internal Cell
from math import sqrt
from typing import Optional, Tuple
import numpy as np
from numba import njit # type: ignore
# Internal Cell
@njit
def _validate_rolling_sizes(window_size: int,
min_samples: Optional[int] = None) -> Tuple[int,int]:
# have to split the following if because of numba
if min_samples is None:
min_samples = window_size
if min_samples > window_size:
min_samples = window_size
return window_size, min_samples
@njit
def _gt(x: float, y: float) -> float:
return x - y
@njit
def _lt(x: float, y: float) -> float:
return -_gt(x, y)
# Cell
@njit
def first_not_na(input_array: np.ndarray) -> int:
"""Returns the index of the first non-na value in the array."""
for index, element in enumerate(input_array):
if not np.isnan(element):
return index
return input_array.size | StarcoderdataPython |
1646457 | <reponame>mosiac1/OpenMetadata<gh_stars>1-10
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mixin class containing Pipeline specific methods
To be used by OpenMetadata class
"""
from typing import List
from metadata.generated.schema.api.data.createPipeline import CreatePipelineRequest
from metadata.generated.schema.entity.data.pipeline import (
Pipeline,
PipelineStatus,
Task,
)
from metadata.ingestion.ometa.client import REST
from metadata.ingestion.ometa.utils import ometa_logger
logger = ometa_logger()
class OMetaPipelineMixin:
"""
OpenMetadata API methods related to the Pipeline Entity
To be inherited by OpenMetadata
"""
client: REST
def add_pipeline_status(
self, pipeline: Pipeline, status: PipelineStatus
) -> Pipeline:
"""
Given a pipeline and a PipelineStatus, send it
to the Pipeline Entity
"""
resp = self.client.put(
f"{self.get_suffix(Pipeline)}/{pipeline.id.__root__}/status",
data=status.json(),
)
return Pipeline(**resp)
def add_task_to_pipeline(self, pipeline: Pipeline, *tasks: Task) -> Pipeline:
"""
The background logic for this method is that during
Airflow backend lineage, we compute one task at
a time.
Let's generalise a bit the approach by preparing
a method capable of updating a tuple of tasks
from the client.
Latest changes leave all the task management
to the client. Therefore, a Pipeline will only contain
the tasks sent in each PUT from the client.
"""
# Get the names of all incoming tasks
updated_tasks_names = {task.name for task in tasks}
# Check which tasks are currently in the pipeline but not being updated
not_updated_tasks = []
if pipeline.tasks:
not_updated_tasks = [
task for task in pipeline.tasks if task.name not in updated_tasks_names
]
# All tasks are the union of the incoming tasks & the not updated tasks
all_tasks = [*tasks, *not_updated_tasks]
updated_pipeline = CreatePipelineRequest(
name=pipeline.name,
displayName=pipeline.displayName,
description=pipeline.description,
pipelineUrl=pipeline.pipelineUrl,
concurrency=pipeline.concurrency,
pipelineLocation=pipeline.pipelineLocation,
startDate=pipeline.startDate,
service=pipeline.service,
tasks=all_tasks,
owner=pipeline.owner,
tags=pipeline.tags,
)
return self.create_or_update(updated_pipeline)
def clean_pipeline_tasks(self, pipeline: Pipeline, task_ids: List[str]) -> Pipeline:
"""
Given a list of tasks, remove from the
Pipeline Entity those that are not received
as an input.
e.g., if a Pipeline has tasks A, B, C,
but we only receive A & C, we will
remove the task B from the entity
"""
updated_pipeline = CreatePipelineRequest(
name=pipeline.name,
displayName=pipeline.displayName,
description=pipeline.description,
pipelineUrl=pipeline.pipelineUrl,
concurrency=pipeline.concurrency,
pipelineLocation=pipeline.pipelineLocation,
startDate=pipeline.startDate,
service=pipeline.service,
tasks=[task for task in pipeline.tasks if task.name in task_ids],
owner=pipeline.owner,
tags=pipeline.tags,
)
return self.create_or_update(updated_pipeline)
| StarcoderdataPython |
3373333 | <gh_stars>10-100
import unittest
from disklist import DiskList
class TestDiskList(unittest.TestCase):
def test_add(self):
"""
Test the + operator
"""
dlist1 = DiskList()
dlist2 = DiskList()
dlist1.append('1')
dlist1.append('2')
dlist2.append('3')
dlist2.append('4')
dlist = dlist1 + dlist2
self.assertTrue(len(dlist) == 4)
def test_getitem(self):
"""
Test the [] operator for getting items
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
self.assertTrue(dlist[0] == '0' and dlist[1] == '1' and dlist[2] == '2' and dlist[-1] == '3')
def test_setitem(self):
"""
Test the [] operator for setting items
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
dlist[0] = '10'
self.assertTrue(dlist[0] == '10' and dlist[1] == '1' and dlist[2] == '2' and dlist[-1] == '3')
def test_delitem(self):
"""
Test the [] operator for deleting items
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
del dlist[0]
self.assertTrue(dlist[0] == '1' and dlist[1] == '2' and dlist[-1] == '3')
def test_remove(self):
"""
Test the remove method
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
dlist.append('0')
dlist.remove('0')
self.assertTrue(dlist[0] == '1' and dlist[1] == '2' and dlist[2] == '3' and dlist[3] == '0')
def test_pop_without_index(self):
"""
Test the pop method without index
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
result = dlist.pop()
self.assertTrue(dlist[0] == '0' and dlist[1] == '1' and dlist[2] == '2' and result == '3' and len(dlist) == 3)
def test_pop_with_index(self):
"""
Test the pop method without index
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
result = dlist.pop(2)
self.assertTrue(dlist[0] == '0' and dlist[1] == '1' and dlist[2] == '3' and result == '2' and len(dlist) == 3)
def test_index(self):
"""
Test the index method
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
self.assertTrue(dlist.index('2') == 2)
def test_index_with_bounds(self):
"""
Test the index method with bounds
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
self.assertRaises(ValueError, lambda: dlist.index('2', 0, 1))
def test_clear(self):
"""
Test the clear method
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.clear()
self.assertTrue(len(dlist) == 0)
def test_len(self):
"""
Test the len() function
"""
dlist = DiskList()
self.assertTrue(len(dlist) == 0)
def test_append(self):
"""
Test appending new items
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
self.assertTrue(len(dlist) == 4)
def test_extend(self):
"""
Test the extend method
"""
dlist = DiskList()
dlist.extend(['0', '1', '2', '3'])
self.assertTrue(len(dlist) == 4)
def test_insert(self):
"""
Test inserting new items
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
dlist.insert(1, '10')
self.assertTrue(len(dlist) == 5 and dlist[1] == '10')
def test_iteration(self):
"""
Test iterating through the DiskList
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
result_list = []
for item in dlist:
result_list.append(item)
self.assertTrue(
result_list[0] == '0' and
result_list[1] == '1' and
result_list[2] == '2' and
result_list[3] == '3'
)
def test_count(self):
"""
Test count method
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('3')
dlist.append('3')
self.assertTrue(dlist.count('1') == 1 and dlist.count('3') == 2 and dlist.count('1337') == 0)
def test_clear_not_empty(self):
"""
Test clear method
"""
dlist = DiskList()
dlist.append('0')
dlist.append('1')
dlist.append('2')
dlist.append('3')
dlist.clear()
self.assertTrue(len(dlist) == 0)
def test_clear_empty(self):
"""
Test clear method when list is empty
"""
dlist = DiskList()
dlist.clear()
self.assertTrue(len(dlist) == 0)
def test_use_cache(self):
"""
Test using cache
"""
dlist = DiskList(cache_size=10)
l1 = []
l2 = []
l3 = []
l4 = []
for i in range(100):
dlist.append(i)
l1.append(i)
for i in dlist:
l2.append(i)
for i in dlist:
l3.append(i)
l4 = dlist[0:50]
for i in dlist[50:]:
l4.append(i)
self.assertTrue(all([i == j and j == k and k == l for i, j, k, l in zip(l1, l2, l3, l4)]))
def test_use_cache_bigger_than_list(self):
"""
Test using cache
"""
dlist = DiskList(cache_size=1000)
l1 = []
l2 = []
l3 = []
l4 = []
for i in range(100):
dlist.append(i)
l1.append(i)
for i in dlist:
l2.append(i)
for i in dlist:
l3.append(i)
l4 = dlist[0:50]
for i in dlist[50:]:
l4.append(i)
self.assertTrue(all([i == j and j == k and k == l for i, j, k, l in zip(l1, l2, l3, l4)]))
def test_use_cache_of_1(self):
"""
Test using cache
"""
dlist = DiskList(cache_size=1)
l1 = []
l2 = []
l3 = []
l4 = []
for i in range(100):
dlist.append(i)
l1.append(i)
for i in dlist:
l2.append(i)
for i in dlist:
l3.append(i)
l4 = dlist[0:50]
for i in dlist[50:]:
l4.append(i)
self.assertTrue(all([i == j and j == k and k == l for i, j, k, l in zip(l1, l2, l3, l4)]))
def test_use_cache_of_0(self):
"""
Test using cache
"""
dlist = DiskList(cache_size=0)
l1 = []
l2 = []
l3 = []
l4 = []
for i in range(100):
dlist.append(i)
l1.append(i)
for i in dlist:
l2.append(i)
for i in dlist:
l3.append(i)
l4 = dlist[0:50]
for i in dlist[50:]:
l4.append(i)
self.assertTrue(all([i == j and j == k and k == l for i, j, k, l in zip(l1, l2, l3, l4)]))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3256776 | <filename>test/config/redfish1_0_config.py<gh_stars>100-1000
from settings import *
from on_http_redfish_1_0 import Configuration, ApiClient
config = Configuration()
config.host = 'http://{0}:{1}'.format(HOST_IP,HOST_PORT)
config.host_authed = 'https://{0}:{1}'.format(HOST_IP, HOST_PORT_AUTH)
config.verify_ssl = False
config.debug = False
config.logger_format = LOGFORMAT
config.api_root = '/redfish/v1'
config.api_client = ApiClient(host=config.host + config.api_root)
config.auth_enabled = False
for key,elem in config.logger.iteritems():
elem.setLevel(LOGLEVELS[LOGGER_LVL])
| StarcoderdataPython |
3226590 | def proteins(strand):
pass
| StarcoderdataPython |
3223407 | version = "0.11.33"
| StarcoderdataPython |
69928 | from flask import Flask, url_for, redirect, request, Markup, render_template, session, flash
import json, datetime
import config
app = Flask(__name__)
app.config.from_object('config')
# DISABLE DEBUG FOR PRODUCTION!
app.debug = False
def clear_session():
session['last_action'] = None
# using session.clear() nulls everything, including the session itself, so you have to check for session AND session['key'] or pop(None) individual session keys
# session.clear()
# Check credentials, modify session, etc.
@app.before_request
def before_request():
if 'session_start' not in session:
session['session_start'] = datetime.datetime.now()
session['last_action'] = datetime.datetime.now()
@app.route('/index')
@app.route('/')
def index():
return render_template('home.html')
@app.route('/search', methods=['GET'])
def search():
searchword = request.args.get('query', '')
if searchword == '':
flash('No query value was provided.')
return render_template('search.html', query_return=searchword)
@app.route('/logout')
def logout():
clear_session()
return redirect(url_for('index'))
@app.errorhandler(404)
def not_found(error):
return render_template('error.html', error_info='404: Page not found')
@app.errorhandler(413)
def not_found(error):
return render_template('error.html', error_info='413: Upload size exceeded')
@app.errorhandler(500)
def internal_server_error(error):
# This may pass system errors you do not wish users to see
return render_template('error.html', error_info=error.args)
# What version of python is active?
# import sys
# @app.route('/pyversion')
# def pyversion():
# return sys.version
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
3376218 | #!/usr/bin/env python3
import unittest, os
from util.securechannel import SecureChannel, SecureError
from util import secp256k1
AID = "B00B5111CE01"
APPLET = "toys.BlindOracleApplet"
CLASSDIR = "BlindOracle"
mode = os.environ.get('TEST_MODE', "simulator")
if mode=="simulator":
from util.simulator import Simulator, ISOException
sim = Simulator(AID, APPLET, CLASSDIR)
elif mode=="card":
from util.card import Card, ISOException
sim = Card(AID)
else:
raise RuntimeError("Not supported")
def setUpModule():
sim.connect()
def tearDownModule():
sim.disconnect()
SELECT = b"\x00\xA4\x04\x00"
GET_RANDOM = b"\xB0\xB1\x00\x00"
GET_PUBKEY = b"\<KEY>"
def encode(data):
return bytes([len(data)])+data
class SecureAppletTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_secure_channel(self, open=True):
sc = SecureChannel(sim)
sc.open()
self.assertEqual(sc.is_open, True)
return sc
def test_select(self):
# selecting applet
data = SELECT+encode(bytes.fromhex(AID))
res = sim.request(data)
self.assertEqual(res, b"")
def test_root(self):
sc = self.get_secure_channel()
# check derivation from seed
seed = bytes.fromhex("ae361e712e3fe66c8f1d57192d80abe076137c917d37cee7da8ed152e993226df0ced36f35c0967f96a5291f35035e87be9b3df626e6eb96ad2b59fbd9c503f4")
expect = bytes.fromhex("5d85539e0995941e1dafd9fc27df3efea381461c13cfd245137b43bb37c29c39025a94ecdc430e6508ea7a432d1ae30e1d656194a028848f652a08bc43439b8561")
res = sc.request(b"\x10\x00"+seed)
self.assertEqual(res, expect)
# check loading of xprv
# chain code + 00 + prvkey
root = bytes.fromhex("5d85539e0995941e1dafd9fc27df3efea381461c13cfd245137b43bb37c29c39004cfa6a4f047f2c3fcad170a3a5f0ef254f0bbe2b2bec7554043c145dcc779428")
res = sc.request(b"\x10\x01"+root)
self.assertEqual(res, expect)
# check random xprv
res = sc.request(b"\x10\x7d"+root)
self.assertEqual(len(res), 65)
self.assertTrue(res[32] >= 2 and res[32] <= 3)
sc.close()
def test_derive(self):
sc = self.get_secure_channel()
# load seed
seed = bytes.fromhex("ae361e712e3fe66c8f1d57192d80abe076137c917d37cee7da8ed152e993226df0ced36f35c0967f96a5291f35035e87be9b3df626e6eb96ad2b59fbd9c503f4")
res = sc.request(b"\x10\x00"+seed)
# m/44h/0h/1h/0/55
path = [44+0x80000000, 0x80000000, 0x80000001, 0, 55]
bpath = b"".join(p.to_bytes(4,'big') for p in path)
res = sc.request(b"\x11\x01"+b"\x00"+bpath)
expect = bytes.fromhex("3902805bec66b8546bae3984ee186dd9d9620cead3d242bf8893e984aa472912033156b64844e8ce5f3d1d52092c9809a75bcbac93bfc9fc5b3a543842fb4d3558")
self.assertEqual(res, expect)
# derive first two indexes
res = sc.request(b"\x11\x01"+b"\x00"+bpath[:8])
# derive the rest
res = sc.request(b"\x11\x01"+b"\x01"+bpath[8:])
self.assertEqual(res, expect)
# check it's stored as child
res = sc.request(b"\x11\x02")
self.assertEqual(res, expect)
sc.close()
def test_sign(self):
sc = self.get_secure_channel()
# load seed
seed = bytes.fromhex("ae361e712e3fe66c8f1d57192d80abe076137c917d37cee7da8ed152e993226df0ced36f35c0967f96a5291f35035e87be9b3df626e6eb96ad2b59fbd9c503f4")
res = sc.request(b"\x10\x00"+seed)
# message to sign
msg = b"5"*32
# sign with root
sec = bytes.fromhex("025a94ecdc430e6508ea7a432d1ae30e1d656194a028848f652a08bc43439b8561")
pub = secp256k1.ec_pubkey_parse(sec)
res = sc.request(b"\x11\x03"+msg+b"\x00")
sig = secp256k1.ecdsa_signature_parse_der(res)
self.assertTrue(secp256k1.ecdsa_verify(sig, msg, pub))
# sign with current
# m/44h/0h/1h/0/55
path = [44+0x80000000, 0x80000000, 0x80000001, 0, 55]
bpath = b"".join(p.to_bytes(4,'big') for p in path)
res = sc.request(b"\x11\x01"+b"\x00"+bpath)
sec = bytes.fromhex("033156b64844e8ce5f3d1d52092c9809a75bcbac93bfc9fc5b3a543842fb4d3558")
pub = secp256k1.ec_pubkey_parse(sec)
res = sc.request(b"\x11\x03"+msg+b"\x01")
sig = secp256k1.ecdsa_signature_parse_der(res)
self.assertTrue(secp256k1.ecdsa_verify(sig, msg, pub))
# derive and sign
# derive first two indexes
current = sc.request(b"\x11\x01"+b"\x00"+bpath[:8])
# derive the rest and sign
res = sc.request(b"\x11\x04"+msg+b"\x01"+bpath[8:])
sig = secp256k1.ecdsa_signature_parse_der(res)
self.assertTrue(secp256k1.ecdsa_verify(sig, msg, pub))
# check that current did not change
res = sc.request(b"\x11\x02")
self.assertEqual(res, current)
sc.close()
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
4828019 | #!/usr/bin/python
##
# This module defines the ProMP class, which is the user-facing class for deploying Probabilistic Movement Primitives.
# The code for conditioning the ProMP is taken from the code by <NAME> at https://github.com/sebasutp/promp
# TODO: Implement the EM based learning with NIW prior
#
# @author <NAME> <<EMAIL>>, TU Darmstadt
import intprim.constants
import scipy.linalg
import numpy as np
import pickle
import sklearn.preprocessing
##
# The ProMP class is responsible for training an Probabilistic Movement Primitive model from demonstrations as well as performing run-time inference.
# Support for importing and exporting a trained model as well
#
class ProMP(object):
##
# The initialization method for ProMP.
#
# @param basis_model The basis model corresponding to this state space.
# @param scaling_groups If provided, used to indicate which degrees of freedom should be scaled as a group.
#
def __init__(self, basis_model, scaling_groups = None):
self.basis_model = basis_model
self.scaling_groups = scaling_groups
self.basis_weights = np.array([], dtype = intprim.constants.DTYPE)
self.prior_fitted = False
self.scalers = []
# self.init_scalers()
##
# Exports the internal state information from this model.
# Allows one to export a trained model and import it again without requiring training.
#
# @param file_name The name of the export file.
#
def export_data(self, file_name):
print("Exporting data to: " + str(file_name))
data_struct = {
"basis_weights" : self.basis_weights,
"scaling_groups" : self.scaling_groups,
"scalers" : self.scalers
}
with open(file_name, 'wb') as out_file:
pickle.dump(data_struct, out_file, pickle.HIGHEST_PROTOCOL)
##
# Imports the internal state information from an export file.
# Allows one to import a trained model without requiring training.
#
# @param file_name The name of the import file.
#
def import_data(self, file_name):
print("Importing data from: " + str(file_name))
with open(file_name, 'rb') as in_file:
data_struct = pickle.load(in_file)
self.basis_weights = np.array(data_struct["basis_weights"])
try:
self.scaling_groups = data_struct["scaling_groups"]
self.scalers = data_struct["scalers"]
except KeyError:
print("No scalers found during import!")
##
# Internal method which initializes data scalers.
#
def init_scalers(self):
if(self.scaling_groups is not None):
for group in self.scaling_groups:
self.scalers.append(sklearn.preprocessing.MinMaxScaler())
##
# Iteratively fits data scalers.
# This must be called on all training demonstrations before fitting, if scaling is used.
#
# @param trajectory Matrix of dimension D x T containing a demonstration, where T is the number of timesteps and D is the dimension of the measurement space.
#
def compute_standardization(self, trajectory):
if(len(self.scalers) > 0):
# N x M matrix, N is degrees of freedom, M is number of time steps
if(type(trajectory) != np.ndarray):
raise TypeError("Trajectory must be a numpy array.")
if(len(trajectory) != self.basis_model.num_observed_dof):
raise ValueError("Trajectory contains an invalid number of degrees of freedom.")
if(self.scaling_groups is not None):
for group, scaler in zip(self.scaling_groups, self.scalers):
scaler.partial_fit(trajectory[group, :].reshape(-1, 1))
else:
print("Skipping basis standardization...")
##
# Iteratively adds a demonstration to the model.
# The demonstration is decomposed into the latent space and the weights are stored internally.
#
# @param trajectory Matrix of dimension D x T containing a demonstration, where T is the number of timesteps and D is the dimension of the measurement space.
#
def add_demonstration(self, trajectory):
# N x M matrix, N is degrees of freedom, M is number of time steps
if(type(trajectory) != np.ndarray):
raise TypeError("Trajectory must be a numpy array.")
if(len(trajectory) != self.basis_model.num_observed_dof):
raise ValueError("Trajectory contains an invalid number of degrees of freedom. Got " + str(len(trajectory)) + " but expected " + str(self.basis_model.num_observed_dof))
demonstration_weights = self.basis_transform(trajectory)
if(self.basis_weights.shape[0] == 0):
self.basis_weights = np.hstack([self.basis_weights, demonstration_weights])
else:
self.basis_weights = np.vstack([self.basis_weights, demonstration_weights])
##
# Gets the mean trajectory of all trained demonstrations.
#
# @param num_samples The length of the generated mean trajectory
#
# @return mean_trajectory Matrix of dimension D x num_samples containing the mean trajectory.
#
def get_mean_trajectory(self, num_samples = intprim.constants.DEFAULT_NUM_SAMPLES):
mean, var = self.get_basis_weight_parameters()
domain = np.linspace(0, 1, num_samples, dtype = intprim.constants.DTYPE)
return self.basis_inverse_transform(domain, mean)
##
# Gets the approximated trajectory for the given demonstration.
# This is obtained by transforming the demonstration to the latent space and then projecting it back to measurement space.
#
# @param trajectory Matrix of dimension D x T containing a demonstration, where T is the number of timesteps and D is the dimension of the measurement space.
# @param num_samples The length of the generated approximate trajectory
#
# @return approximate_trajectory Matrix of dimension D x num_samples containing the approximate trajectory.
#
def get_approximate_trajectory(self, trajectory, num_samples = intprim.constants.DEFAULT_NUM_SAMPLES, deriv = False):
# N x M matrix, N is degrees of freedom, M is number of time steps
if(type(trajectory) != np.ndarray):
raise TypeError("Trajectory must be a numpy array.")
if(len(trajectory) != self.basis_model.num_observed_dof):
raise ValueError("Trajectory contains an invalid number of degrees of freedom.")
basis_weights = self.basis_transform(trajectory)
domain = np.linspace(0, 1, num_samples, dtype = intprim.constants.DTYPE)
return self.basis_inverse_transform(domain, basis_weights, deriv)
##
# Gets the approximated trajectory derivative for the given demonstration.
# This is obtained by transforming the demonstration to a latent space composed of the basis function derivatives and then projecting it back to measurement space.
#
# @param trajectory Matrix of dimension D x T containing a demonstration, where T is the number of timesteps and D is the dimension of the measurement space.
# @param num_samples The length of the generated approximate trajectory
#
# @return approximate_trajectory Matrix of dimension D x num_samples containing the approximate trajectory.
#
def get_approximate_trajectory_derivative(self, trajectory, num_samples = intprim.constants.DEFAULT_NUM_SAMPLES):
return self.get_approximate_trajectory(trajectory, num_samples, deriv = True)
##
# Gets the probability distribution of the trained demonstrations.
#
# @param trajectory Matrix of dimension D x T containing a demonstration, where T is the number of timesteps and D is the dimension of the measurement space.
# @param num_samples The length of the generated distribution.
#
# @return mean Matrix of dimension D x num_samples containing the mean of the distribution for every degree of freedom.
# @return upper_bound Matrix of dimension D x num_samples containing the mean + std of the distribution for every degree of freedom.
# @return lower_bound Matrix of dimension D x num_samples containing the mean - std of the distribution for every degree of freedom.
#
def get_probability_distribution(self, num_samples = intprim.constants.DEFAULT_NUM_SAMPLES):
trajectory = np.zeros((self.basis_model.num_observed_dof, num_samples))
upper_bound = np.zeros((self.basis_model.num_observed_dof, num_samples))
lower_bound = np.zeros((self.basis_model.num_observed_dof, num_samples))
domain = np.linspace(0, 1, num_samples, dtype = intprim.constants.DTYPE)
for idx in range(num_samples):
# In rare instances, projecting the covariance matrix can produce negative variance values in the diagonals of the projected matrix.
# Therefore, we instead project each demonstration and manually calculate the empirical mean/covariance.
projected_states = []
for dem_idx in range(self.basis_weights.shape[0]):
projected_states.append(self.basis_model.apply_coefficients(domain[idx], self.basis_weights[dem_idx, :]))
projected_states = np.array(projected_states)
dist_mean = np.mean(projected_states, axis = 0)
dist_var = np.cov(projected_states.T)
if(self.scaling_groups is not None):
var_scale = np.ones(dist_mean.shape)
for group, scaler in zip(self.scaling_groups, self.scalers):
var_scale[group] = 1.0 / scaler.scale_
dist_mean[group] = scaler.inverse_transform(dist_mean[group].reshape(-1, 1)).flatten()
var_scale = np.diag(var_scale)
dist_var = np.dot(var_scale, dist_var).dot(var_scale.T)
trajectory[:, idx] = dist_mean
for dof_index in range(0, self.basis_model.num_observed_dof):
std_dev = dist_var[dof_index][dof_index] ** 0.5
upper_bound[dof_index, idx] = dist_mean[dof_index] + std_dev
lower_bound[dof_index, idx] = dist_mean[dof_index] - std_dev
return trajectory, upper_bound, lower_bound
##
# Transforms the given trajectory from measurement space into the latent basis space.
#
# @param trajectory Matrix of dimension D x T containing a demonstration, where T is the number of timesteps and D is the dimension of the measurement space.
#
# @return transformed_state Vector of dimension B containing the transformed trajectory.
#
def basis_transform(self, trajectory):
if(self.scaling_groups is not None):
scaled_trajectory = np.zeros(trajectory.shape)
for group, scaler in zip(self.scaling_groups, self.scalers):
scaled_trajectory[group, :] = scaler.transform(trajectory[group, :].reshape(-1, 1)).reshape(trajectory[group, :].shape)
trajectory = scaled_trajectory
domain = np.linspace(0, 1, len(trajectory[0]), dtype = intprim.constants.DTYPE)
return self.basis_model.fit_basis_functions_linear_closed_form(domain, trajectory.T)
##
# Transforms the given basis space weights to measurement space for the given phase values.
#
# @param x Vector of dimension T containing the phase values that the basis space weights should be projected at.
# @param weights Vector of dimension B containing the basis space weights.
# @param deriv True if the basis weights should be transformed with basis function derivatives, False for normal basis functions.
#
# @return transformed_trajectory Matrix of dimension D x T containing the transformed trajectory.
#
def basis_inverse_transform(self, x, weights, deriv = False):
trajectory = np.zeros((self.basis_model.num_observed_dof, x.shape[0]), dtype = intprim.constants.DTYPE)
for idx in range(x.shape[0]):
trajectory[:, idx] = self.basis_model.apply_coefficients(x[idx], weights, deriv)
if(self.scaling_groups is not None):
for group, scaler in zip(self.scaling_groups, self.scalers):
trajectory[group, :] = scaler.inverse_transform(trajectory[group, :].reshape(-1, 1)).reshape(trajectory[group, :].shape)
return trajectory
##
# Gets the mean and covariance for the trained demonstrations.
#
# @return mean Vector of dimension B containing the sample mean of the trained basis weights.
# @return var Matrix of dimension B x B containing the sample covariance of the trained basis weights.
#
def get_basis_weight_parameters(self):
mean = np.mean(self.basis_weights, axis = 0)
if(self.basis_weights.shape[0] > 1):
var = np.cov(self.basis_weights, rowvar = False)
else:
var = None
return mean, var
##
# Performs inference over the given time duration returns a probable trajectory.
#
# @param times Vector of dimension num_samples containing times at which to generate the trajectory.
# @paran mean Vector of dimension B containing the sample mean of the basis weights.
# @param var Matrix of dimension B x B containing the sample covariance of the basis weights.
#
# @return new_trajectory Matrix of dimension D x num_samples containing the inferred trajectory.
# @return weights Vector of dimension B containing the weights used to infer the trajectory.
#
def generate_probable_trajectory(self, times, mean=None, var=None):
_mean, _var = self.get_basis_weight_parameters()
if mean is None:
mean = _mean
if var is None:
var = _var
weights = np.random.multivariate_normal(mean, var)
new_trajectory = self.basis_inverse_transform(times, weights)
return new_trajectory, weights
##
# Get the PromP weights after conditioning to reach a particular joint configuration.
#
# @param t Scalar or Vector of length num_samples containing the time phase(s) at which the required joint configuration should be reached.
# @paran mean_q Vector of dimension D containing the mean of the required joint configuration to be reached.
# @param var_q Matrix of dimension B x B containing the sample covariance of the required joint configuration to be reached.
# @paran mean_w Vector of dimension B containing the sample mean of the basis weights.
# @param var_w Matrix of dimension B x B containing the sample covariance of the basis weights.
#
# @return mean_w Vector of dimension B containing the sample mean of the basis weights after conditioning.
# @return var_w Matrix of dimension B x B containing the sample covariance of the basis weights after conditioning.
#
def get_conditioned_weights(self, t, mean_q, var_q=None, mean_w=None, var_w=None):
if isinstance(t, (list, tuple, np.ndarray)):
t = np.array(t)
else: # t is scalar
t = np.array([float(t)])
if var_q is None:
var_q = np.eye(len(mean_q))*1e-7
basis_funcs = self.basis_model.get_block_diagonal_basis_matrix(t)
d,lw = basis_funcs.shape
_mean_w, _var_w = self.get_basis_weight_parameters()
if mean_w is None:
mean_w = _mean_w
if var_w is None:
var_w = _var_w
tmp1 = np.dot(var_w, basis_funcs)
K = tmp1.dot(np.linalg.inv(var_q + basis_funcs.T.dot(tmp1)))
mean_w += K.dot(mean_q - basis_funcs.T.dot(mean_w))
var_w -= K.dot(basis_funcs.T.dot(var_w))
return mean_w, var_w
##
# Get the marginal distribution of the learnt trajectory at a given time.
#
# @param t Scalar or Vector of length num_samples containing the time phase(s) at which the required joint configuration should be reached.
# @paran mean_w Vector of dimension B containing the sample mean of the basis weights.
# @param var_w Matrix of dimension B x B containing the sample covariance of the basis weights.
#
# @return mean_q Vector of dimension D containing the mean of the marginal distribution at the given time.
# @return var_q Matrix of dimension B x B containing the sample covariance of the marginal distribution at the given time.
#
def get_marginal(self, t, mean_w=None, var_w=None):
if isinstance(t, (list, tuple, np.ndarray)):
t = np.array(t)
else: # t is scalar
t = np.array([float(t)])
basis_funcs = self.basis_model.get_block_diagonal_basis_matrix(t)
d,lw = basis_funcs.shape
_mean_w, _var_w = self.get_basis_weight_parameters()
if mean_w is None:
mean_w = _mean_w
if var_w is None:
var_w = _var_w
var_q = np.dot(basis_funcs.T, np.dot(var_w, basis_funcs))
mean_q = np.dot(basis_funcs.T, mean_w)
return mean_q, var_q
| StarcoderdataPython |
1705936 | <reponame>ssin122/test-h
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from h.groups.util import WorldGroup
from h.services.groupfinder import groupfinder_service_factory
from h.services.groupfinder import GroupfinderService
class TestGroupfinderService(object):
def test_returns_correct_group(self, svc, factories):
group = factories.Group()
assert svc.find(group.pubid) == group
def test_returns_correct_group_for_world(self, svc):
group = svc.find('__world__')
assert isinstance(group, WorldGroup)
def test_sets_auth_domain_on_world_group(self, svc):
group = svc.find('__world__')
assert group.auth_domain == 'example.com'
def test_returns_none_when_not_found(self, svc, factories):
factories.Group()
assert svc.find('bogus') is None
def test_caches_groups(self, svc, factories, db_session):
group = factories.Group()
pubid = group.pubid
svc.find(group.pubid)
db_session.delete(group)
db_session.flush()
group = svc.find(pubid)
assert group is not None
assert group.pubid == pubid
def test_flushes_cache_on_session_commit(self, svc, factories, db_session):
group = factories.Group()
pubid = group.pubid
svc.find(pubid)
db_session.delete(group)
db_session.commit()
group = svc.find(pubid)
assert group is None
@pytest.fixture
def svc(self, db_session):
return GroupfinderService(db_session, 'example.com')
class TestGroupfinderServiceFactory(object):
def test_returns_groupfinder_service(self, pyramid_request):
svc = groupfinder_service_factory(None, pyramid_request)
assert isinstance(svc, GroupfinderService)
def test_provides_database_session(self, pyramid_request):
svc = groupfinder_service_factory(None, pyramid_request)
assert svc.session == pyramid_request.db
def test_provides_auth_domain(self, pyramid_request):
svc = groupfinder_service_factory(None, pyramid_request)
assert svc.auth_domain == pyramid_request.auth_domain
| StarcoderdataPython |
1711061 | <filename>urllink2.py
import urllib.request, urllib.parse, urllib.error
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
#Ignore ssl certificate errors
ctx=ssl.create_default_context()
ctx.check_hostname=False
ctx.verify_mode=ssl.CERT_NONE
url=input("Enter- ")
html=urlopen(url,context=ctx).read()
soup=BeautifulSoup(html, 'html.parser')
tags=soup('a')
for tag in tags:
print("Tag: ", tag)
print("URL: ", tag.get('href', None))
print('Contents: ', tag.contents[0])
print('Attrs: ', tag.attrs) | StarcoderdataPython |
3342093 | # Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import importlib.metadata
from typing import List
import cpo
def get_distribution_package_name() -> str:
"""Returns the name of the distribution package providing the "cpo" top-level package
Returns
-------
str
name of the distribution package providing the "cpo" top-level package
"""
packages_to_distribution_packages_dict: collections.defaultdict[str, List[str]] = collections.defaultdict(list)
for distribution in importlib.metadata.distributions():
file_contents = distribution.read_text("top_level.txt")
if file_contents is None:
continue
for package in file_contents.split():
distribution_package_name = distribution.metadata["Name"]
# The second check is required as importlib.metadata.distributions()
# returns the name of this distribution package twice for unknown
# reasons when running unit tests.
if (package not in packages_to_distribution_packages_dict) or (
distribution_package_name not in packages_to_distribution_packages_dict[package]
):
packages_to_distribution_packages_dict[package].append(distribution_package_name)
package_name = cpo.__package__
if package_name not in packages_to_distribution_packages_dict:
raise Exception(
f"Distribution package name could not be identified (no distribution package provides a top-level package "
f"named '{package_name}')"
)
if len(packages_to_distribution_packages_dict[package_name]) != 1:
raise Exception(
f"Distribution package name could not be identified (more than one distribution package provides a "
f"top-level package named '{package_name}')"
)
return packages_to_distribution_packages_dict[package_name][0]
| StarcoderdataPython |
4835507 | <reponame>suetAndTie/face-generator
'''
train.py
Modified from https://github.com/cs230-stanford/cs230-code-examples/blob/master/pytorch/vision/train.py
'''
import argparse
import logging
import os
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import util
import model.began as began
import data.data_loader as data_loader
from evaluate import evaluate
import torchvision.utils as torch_utils
from test import test
import inception
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/CelebA/', help="Directory containing the dataset")
parser.add_argument('--model_dir', default='experiments/began_base', help="Directory containing params.json")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training") # 'best' or 'train'
def train(step, g, d, g_optimizer, d_optimizer, dataloader, metrics, params):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.util.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
# set model to training mode
g.train()
d.train()
# summary for current training loop and a running average object for loss
summ = []
g_loss_avg = util.RunningAverage()
d_loss_avg = util.RunningAverage()
b_converge_avg = util.RunningAverage()
curr_step = step
z_G = torch.FloatTensor(params.batch_size, params.h).to(params.device)
# Use tqdm for progress bar
with tqdm(total=len(dataloader)) as t:
for i, train_batch in enumerate(dataloader):
r_img = train_batch[0]
# move to GPU if available
if params.cuda: r_img = r_img.cuda(async=True)
# Reset the noise vectors
z_G.data.uniform_(-1,1)
########## Train Discriminator ##########
g_img = g(z_G)
g_img_passed = d(g_img.detach())
r_img_passed = d(r_img)
d_loss = d.loss_fn(r_img, g_img.detach(), r_img_passed, g_img_passed)
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
########## Train Generator ##########
g_img_passed = d(g_img)
g_loss = g.loss_fn(g_img, g_img_passed)
# clear previous gradients, compute gradients of all variables wrt loss
g_optimizer.zero_grad()
g_loss.backward()
g_optimizer.step()# performs updates using calculated gradients
# calcualte began convergence measure
b_converge = began.began_convergence(r_img, g_img, r_img_passed, g_img_passed, params.began_gamma)
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
# r_img = r_img.data.cpu().numpy()
# g_img = g_img.data.cpu().numpy()
# r_img_passed = r_img_passed.cpu().numpy()
# g_img_passed = g_img_passed.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric:metrics[metric](g_img, g_img, r_img_passed, g_img_passed) for metric in metrics}
summary_batch['g_loss'] = g_loss.data
summary_batch['d_loss'] = d_loss.data
summary_batch['b_converge'] = b_converge.data
summ.append(summary_batch)
# update the average loss
g_loss_avg.update(g_loss.data)
d_loss_avg.update(d_loss.data)
b_converge_avg.update(b_converge.data)
t.set_postfix(g_loss='{:05.3f}'.format(g_loss_avg()),
d_loss='{:05.3f}'.format(d_loss_avg()),
converge='{:05.3f}'.format(b_converge_avg()))
t.update()
# Apply learning rate update
g_lr = params.g_learning_rate * params.lr_update ** (curr_step // params.lr_update_step)
d_lr = params.d_learning_rate * params.lr_update ** (curr_step // params.lr_update_step)
for pg in g_optimizer.param_groups:
pg['lr'] = g_lr
for pg in d_optimizer.param_groups:
pg['lr'] = d_lr
curr_step += 1
# compute mean of all metrics in summary
metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
return curr_step, b_converge_avg()
def train_and_evaluate(g, d, train_dataloader, val_dataloader, g_optimizer, d_optimizer, metrics, params, model_dir,
restore_file=None):
"""Train the model and evaluate every epoch.
Args:
g
d
train_dataloader: (DataLoader) a torch.util.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.util.data.DataLoader object that fetches validation data
g_optimizer
d_optimizer
# metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
checkpoint = util.load_checkpoint(restore_path, g, d, g_optimizer, d_optimizer)
d.began_k = checkpoint['began_k']
g.z_fixed = checkpoint['z_fixed']
start = checkpoint['epoch']
step = checkpoint['step']
else:
start = 0
step = 0
best_b_converge = float('inf')
# save start images
if start == 0:
torch_utils.save_image(test(g.z_fixed, g, d), os.path.join(args.model_dir, 'start.jpg'))
for epoch in range(start, params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
step, b_converge = train(step, g, d, g_optimizer, d_optimizer, train_dataloader, metrics, params)
# Evaluate for one epoch on validation set
#val_metrics = evaluate(g, d, val_dataloader, metrics, params)
#b_converge = val_metrics['b_converge']
is_best = b_converge <= best_b_converge
# Save weights
if (epoch + 1 % params.save_epochs == 0 or epoch + 1 == params.num_epochs or is_best):
util.save_checkpoint({'epoch': epoch + 1,
'g_state_dict':g.state_dict(),
'd_state_dict':d.state_dict(),
'g_optim_dict':g_optimizer.state_dict(),
'd_optim_dict':d_optimizer.state_dict(),
'began_k':d.began_k,
'z_fixed':g.z_fixed,
'step':step
},
is_best=is_best,
checkpoint=model_dir,
epoch=epoch,
params=params)
# Save images form this epoch
torch_utils.save_image(test(g.z_fixed, g, d), os.path.join(args.model_dir, 'epoch{}.jpg'.format(epoch+1)))
# If best_eval, best_save_path
# if is_best:
# logging.info("- Found new best convergence")
# best_b_converge = b_converge
# Save best val metrics in a json file in the model directory
# best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
# util.save_dict_to_json(val_metrics, best_json_path)
# # Save latest val metrics in a json file in the model directory
# last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
# util.save_dict_to_json(val_metrics, last_json_path)
if __name__ == '__main__':
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = util.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
if params.ngpu > 0 and params.cuda: params.device = torch.device('cuda')
else: params.device = torch.device('cpu')
print(params.device)
# Set the random seed for reproducible experiments
torch.manual_seed(42)
if params.cuda: torch.cuda.manual_seed(42)
# Set the logger
util.set_logger(os.path.join(args.model_dir, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders
train_dl = data_loader.fetch_dataloader(args.data_dir, 'train', params, shuffle=True)
val_dl = data_loader.fetch_dataloader(args.data_dir, 'valid', params, shuffle=False)
logging.info("- done.")
# Define the model and optimizer
g = began.BeganGenerator(params).to(device=params.device)
d = began.BeganDiscriminator(params).to(device=params.device)
g_optimizer = optim.Adam(g.parameters(), lr=params.g_learning_rate,
betas=(params.beta1,params.beta2))
d_optimizer = optim.Adam(d.parameters(), lr=params.d_learning_rate,
betas=(params.beta1,params.beta2))
# Apply weight intialization
g.apply(began.weights_init)
d.apply(began.weights_init)
# fetch loss function and metrics
metrics = began.metrics
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(g, d, train_dl, val_dl, g_optimizer, d_optimizer,
metrics, params, args.model_dir, args.restore_file)
| StarcoderdataPython |
1762657 | from . import _musicplayer
async def on_ready():
_musicplayer.clear_cache_root()
| StarcoderdataPython |
163146 | <reponame>Nuullll/llvm-test-suite<filename>litsupport/modules/codesize.py
"""Test module to collect code size metrics of the benchmark executable."""
from litsupport import testplan
import logging
import os.path
def _getCodeSize(context):
# First get the filesize: This should always work.
metrics = {}
metrics['size'] = os.path.getsize(context.executable)
# If we have the llvm-size tool available get the size per segment.
llvm_size = context.config.llvm_size
if llvm_size:
# -format=sysv is easier to parse than darwin/berkeley.
cmdline = [llvm_size, '-format=sysv', context.executable]
out = testplan.check_output(cmdline).decode('utf-8', errors='ignore')
lines = out.splitlines()
# First line contains executable name, second line should be a
# "section size addr" header, numbers start after that.
if "section" not in lines[1] or "size" not in lines[1]:
logging.warning("Unexpected output from llvm-size on '%s'",
context.executable)
else:
for line in lines[2:]:
line = line.strip()
if line == "":
continue
values = line.split()
if len(values) < 2:
logging.info("Ignoring malformed output line: %s", line)
continue
if values[0] == 'Total':
continue
try:
name = values[0]
val = int(values[1])
metrics['size.%s' % name] = val
except ValueError:
logging.info("Ignoring malformed output line: %s", line)
return metrics
def mutatePlan(context, plan):
plan.metric_collectors.append(_getCodeSize)
| StarcoderdataPython |
3246492 | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if this Binary tree is Balanced, or false.
"""
def isBalanced(self, root):
isBal, _ = self.helper(root)
return isBal
def helper(self, root):
if not root:
return True, 0
l_isBal, l_cnt = self.helper(root.left)
r_isBal, r_cnt = self.helper(root.right)
return l_isBal and r_isBal and abs(l_cnt - r_cnt) < 2, max(l_cnt, r_cnt) + 1 | StarcoderdataPython |
3343272 | <filename>monstro/views/tests/test_pagination.py
from monstro.db import Model, String
import monstro.testing
from monstro.views.paginators import (
Paginator, PageNumberPaginator, LimitOffsetPaginator
)
class User(Model):
value = String()
class Meta:
collection = 'users'
class PaginatorTest(monstro.testing.AsyncTestCase):
class TestModel(Model):
value = String()
class Meta:
collection = 'test'
def test_bind__not_implemented(self):
pagination = Paginator()
with self.assertRaises(NotImplementedError):
pagination.bind()
def test_get_offset__not_implemented(self):
pagination = Paginator()
with self.assertRaises(NotImplementedError):
pagination.get_offset()
def test_get_limit__not_implemented(self):
pagination = Paginator()
with self.assertRaises(NotImplementedError):
pagination.get_limit()
class PageNumberPaginatorTest(monstro.testing.AsyncTestCase):
class TestModel(Model):
value = String()
class Meta:
collection = 'test'
def test_bind(self):
pagination = PageNumberPaginator()
pagination.bind(page=1, count=1)
self.assertEqual(1, pagination.page)
self.assertEqual(1, pagination.count)
def test_get_offset(self):
pagination = PageNumberPaginator()
pagination.bind(page=1, count=1)
self.assertEqual(0, pagination.get_offset())
def test_get_limit(self):
pagination = PageNumberPaginator()
pagination.bind(page=1, count=1)
self.assertEqual(1, pagination.get_limit())
async def test_paginate(self):
pagination = PageNumberPaginator()
pagination.bind(page=1, count=1)
for i in range(5):
await self.TestModel.objects.create(value=str(i))
data = await pagination.paginate(self.TestModel.objects.filter())
self.assertEqual(1, data['pages']['current'])
self.assertEqual(2, data['pages']['next'])
self.assertEqual(5, data['pages']['total'])
self.assertEqual(1, len(data['items']))
self.assertEqual('0', data['items'][0].value)
class LimitOffsetPaginatorTest(monstro.testing.AsyncTestCase):
class TestModel(Model):
value = String()
class Meta:
collection = 'test'
def test_bind(self):
pagination = LimitOffsetPaginator()
pagination.bind(limit=1, offset=2)
self.assertEqual(1, pagination.limit)
self.assertEqual(2, pagination.offset)
def test_get_offset(self):
pagination = LimitOffsetPaginator()
pagination.bind(limit=1, offset=2)
self.assertEqual(2, pagination.get_offset())
def test_get_limit(self):
pagination = LimitOffsetPaginator()
pagination.bind(limit=1, offset=2)
self.assertEqual(3, pagination.get_limit())
async def test_paginate(self):
pagination = LimitOffsetPaginator()
pagination.bind(limit=1, offset=2)
for i in range(5):
await self.TestModel.objects.create(value=str(i))
data = await pagination.paginate(self.TestModel.objects.filter())
self.assertEqual(3, data['pages']['current'])
self.assertEqual(2, data['pages']['previous'])
self.assertEqual(4, data['pages']['next'])
self.assertEqual(5, data['pages']['total'])
self.assertEqual(1, len(data['items']))
self.assertEqual('2', data['items'][0].value)
| StarcoderdataPython |
1668697 | from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import numpy as np
import random
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from lightgbm import LGBMRegressor
class LayerSizeGenerator:
def __init__(self):
self.num_layers = [1, 2]
self.num_neurons = np.arange(1, 15+1, 1)
def rvs(self, random_state=42):
random.seed(random_state)
# first randomly define num of layers, then pick the neuron size for each of them
num_layers = random.choice(self.num_layers)
layer_sizes = random.choices(self.num_neurons, k=num_layers)
return layer_sizes
class NNCombWrapper():
def __init__(self, model_params=None):
self.model_name = "nncomb"
self.search_type = 'random'
self.param_grid = {"early_stopping": [True],
"learning_rate": ["invscaling"],
"learning_rate_init": np.linspace(0.001, 0.999, 100),
'alpha': np.linspace(0.001, 0.999, 100),
'solver': ["adam"],
'activation': ["relu"],
"hidden_layer_sizes": LayerSizeGenerator()}
if model_params is None:
self.ModelClass = MLPRegressor()
else:
self.ModelClass = MLPRegressor(**model_params)
class RandomForestWrapper():
def __init__(self, model_params=None):
self.model_name = "random_forest"
self.search_type = 'random'
self.param_grid = {"max_features": ['auto', 'sqrt', 'log2'],
"min_samples_split": sp_randint(2, 31),
"n_estimators": sp_randint(2, 301),
"max_depth": sp_randint(2, 20)}
if model_params is None:
self.ModelClass = RandomForestRegressor()
else:
self.ModelClass = RandomForestRegressor(**model_params)
class LGBWrapper():
def __init__(self, model_params=None):
self.model_name = "lgb_regression"
self.search_type = 'random'
self.param_grid = {'num_leaves': sp_randint(6, 50),
'min_child_samples': sp_randint(100, 500),
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': sp_uniform(loc=0.2, scale=0.8),
"n_estimators": sp_randint(500, 1000),
"max_depth": sp_randint(3, 100),
"learning_rate": np.linspace(0.001, 0.99, 100),
'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100],
"objective": ["huber"]}
if model_params is None:
self.ModelClass = LGBMRegressor()
else:
self.ModelClass = LGBMRegressor(**model_params) | StarcoderdataPython |
1669925 | <filename>algotrader/utils/trade_data.py
from algotrader.model.trade_data_pb2 import *
def is_buy(new_order_req: NewOrderRequest):
return new_order_req.action == Buy
def is_sell(new_order_req: NewOrderRequest):
return new_order_req.action == Sell
| StarcoderdataPython |
9856 | import pytest
from aiospamc.client import Client
from aiospamc.exceptions import (
BadResponse,
UsageException,
DataErrorException,
NoInputException,
NoUserException,
NoHostException,
UnavailableException,
InternalSoftwareException,
OSErrorException,
OSFileException,
CantCreateException,
IOErrorException,
TemporaryFailureException,
ProtocolException,
NoPermissionException,
ConfigException,
ServerTimeoutException,
ResponseException,
)
from aiospamc.responses import Response
async def test_request_sent_to_connection(mock_client_dependency, mocker, hostname):
mock_req = mocker.MagicMock()
await mock_client_dependency.request(mock_req, host=hostname)
assert (
bytes(mock_req)
== mock_client_dependency.connection_factory().request.await_args[0][0]
)
async def test_request_response_sent_to_parser(
mock_client_dependency, mocker, hostname
):
mock_req = mocker.MagicMock()
connection = mock_client_dependency.connection_factory()
parser = mock_client_dependency.parser_factory()
mocker.spy(parser, "parse")
await mock_client_dependency.request(mock_req, host=hostname)
response = connection.request.return_value
assert response == parser.parse.call_args[0][0]
async def test_request_returns_response(mock_client_dependency, mocker, hostname):
mock_req = mocker.MagicMock()
connection = mock_client_dependency.connection_factory()
parser = mock_client_dependency.parser_factory()
parse_spy = mocker.spy(parser, "parse")
result = await mock_client_dependency.request(mock_req, host=hostname)
expected = Response(**parse_spy.spy_return)
assert expected == result
async def test_request_raises_usage(mock_client_response, mocker, ex_usage, hostname):
mock_client = mock_client_response(ex_usage)
with pytest.raises(UsageException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_data_err(
mock_client_response, mocker, ex_data_err, hostname
):
mock_client = mock_client_response(ex_data_err)
with pytest.raises(DataErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_input(
mock_client_response, mocker, ex_no_input, hostname
):
mock_client = mock_client_response(ex_no_input)
with pytest.raises(NoInputException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_user(
mock_client_response, mocker, ex_no_user, hostname
):
mock_client = mock_client_response(ex_no_user)
with pytest.raises(NoUserException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_host(
mock_client_response, mocker, ex_no_host, hostname
):
mock_client = mock_client_response(ex_no_host)
with pytest.raises(NoHostException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_unavailable(
mock_client_response, mocker, ex_unavailable, hostname
):
mock_client = mock_client_response(ex_unavailable)
with pytest.raises(UnavailableException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_software(
mock_client_response, mocker, ex_software, hostname
):
mock_client = mock_client_response(ex_software)
with pytest.raises(InternalSoftwareException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_os_error(
mock_client_response, mocker, ex_os_err, hostname
):
mock_client = mock_client_response(ex_os_err)
with pytest.raises(OSErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_os_file(
mock_client_response, mocker, ex_os_file, hostname
):
mock_client = mock_client_response(ex_os_file)
with pytest.raises(OSFileException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_cant_create(
mock_client_response, mocker, ex_cant_create, hostname
):
mock_client = mock_client_response(ex_cant_create)
with pytest.raises(CantCreateException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_io_error(
mock_client_response, mocker, ex_io_err, hostname
):
mock_client = mock_client_response(ex_io_err)
with pytest.raises(IOErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_temporary_failure(
mock_client_response, mocker, ex_temp_fail, hostname
):
mock_client = mock_client_response(ex_temp_fail)
with pytest.raises(TemporaryFailureException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_protocol(
mock_client_response, mocker, ex_protocol, hostname
):
mock_client = mock_client_response(ex_protocol)
with pytest.raises(ProtocolException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_permission(
mock_client_response, mocker, ex_no_perm, hostname
):
mock_client = mock_client_response(ex_no_perm)
with pytest.raises(NoPermissionException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_config(mock_client_response, mocker, ex_config, hostname):
mock_client = mock_client_response(ex_config)
with pytest.raises(ConfigException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_timeout(
mock_client_response, mocker, ex_timeout, hostname
):
mock_client = mock_client_response(ex_timeout)
with pytest.raises(ServerTimeoutException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_undefined(
mock_client_response, mocker, ex_undefined, hostname
):
mock_client = mock_client_response(ex_undefined)
with pytest.raises(ResponseException):
await mock_client.request(mocker.MagicMock(), host=hostname)
| StarcoderdataPython |
3258231 | import sys
import os
import csv
import numpy as np
import utils
from utils import error
import describe
import histogram
import file
import logreg_train
housenames = ["Ravenclaw", "Slytherin", "Gryffindor", "Hufflepuff"]
def usage():
error('%s [dataset] [theta dataset]' % sys.argv[0])
def logreg_predict(features, theta_features):
predicted_houses = [ "" for i in range(features.shape[1]) ]
for i in range(features.shape[1]):
max_p = 0.0
for house in housenames:
p = logreg_train.h(theta_features[house], features[:, i].reshape(-1,1))
if p > max_p:
max_p = p
predicted_houses[i] = house
return predicted_houses
def read_data(filename, feature_number, mean_features):
# checks
if not os.path.isfile(filename):
error('no such file: %s' % filename)
# parser: csv to feature lists
try:
with open(filename, 'r') as fs:
reader = csv.reader(fs)
student_number = sum(1 for row in reader) - 1
fs.seek(0)
reader.__next__()
data = np.zeros([feature_number, student_number])
i_line = 0
for line in reader:
for i, field in enumerate(line):
if i >= 6:
data[i - 6][i_line] = float(field) if field != "" else mean_features[i - 6]
i_line += 1
except:
error("invalid dataset")
return data
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
if not os.path.isfile(sys.argv[1]):
error('no such file: %s' % sys.argv[1])
if not os.path.isfile(sys.argv[2]):
error('no such file: %s' % sys.argv[2])
train_file = "resources/dataset_train.csv"
header_histo, features_histo = histogram.read_data(train_file)
feature_number = len(header_histo)
mean_features = logreg_train.calc_mean_features(features_histo, feature_number)
data = read_data(sys.argv[1], feature_number, mean_features)
train_data = logreg_train.read_data(train_file, feature_number, mean_features)
min_matrix = np.min(train_data["Features"], axis = 1).reshape(-1, 1)
max_matrix = np.max(train_data["Features"], axis = 1).reshape(-1, 1)
data = logreg_train.scale(data, min_matrix, max_matrix)
data = np.vstack((np.matrix(np.ones(len(data[0]))), data))
tn = feature_number + 1
theta_data = file.read_theta(sys.argv[2], tn)
houses = logreg_predict(data, theta_data)
file.write_houses(houses)
| StarcoderdataPython |
195441 | from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from nornir_netmiko import netmiko_send_command
def send_command(task):
task.run(task=netmiko_send_command, command_string="set cli complete-on-space off")
task.run(task=netmiko_send_command, command_string="show ip interface")
def main():
nr = InitNornir(config_file="config.yaml")
nr = nr.filter(name="srx2")
agg_result = nr.run(task=send_command)
print_result(agg_result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1743311 | from taurex.core import Singleton
from taurex.log import Logger
import inspect
import pkg_resources
class ClassFactory(Singleton):
"""
A factory the discovers new
classes from plugins
"""
def init(self):
self.log = Logger('ClassFactory')
self.extension_paths = []
self.reload_plugins()
def set_extension_paths(self, paths=None, reload=True):
self.extension_paths = paths
if reload:
self.reload_plugins()
def reload_plugins(self):
self.log.info('Reloading all modules and plugins')
self.setup_batteries_included()
self.setup_batteries_included_mixin()
self.load_plugins()
self.load_extension_paths()
def setup_batteries_included_mixin(self):
"""
Collect all the classes that are built into
TauREx 3
"""
from taurex.mixin import mixins
self._temp_mixin_klasses = set()
self._chem_mixin_klasses = set()
self._gas_mixin_klasses = set()
self._press_mixin_klasses = set()
self._planet_mixin_klasses = set()
self._star_mixin_klasses = set()
self._inst_mixin_klasses = set()
self._model_mixin_klasses = set()
self._contrib_mixin_klasses = set()
self._opt_mixin_klasses = set()
self._obs_mixin_klasses = set()
self._temp_mixin_klasses.update(
self._collect_temperatures_mixin(mixins))
self._chem_mixin_klasses.update(self._collect_chemistry_mixin(mixins))
self._gas_mixin_klasses.update(self._collect_gas_mixin(mixins))
self._press_mixin_klasses.update(self._collect_pressure_mixin(mixins))
self._planet_mixin_klasses.update(self._collect_planets_mixin(mixins))
self._star_mixin_klasses.update(self._collect_star_mixin(mixins))
self._inst_mixin_klasses.update(self._collect_instrument_mixin(mixins))
self._model_mixin_klasses.update(self._collect_model_mixin(mixins))
self._obs_mixin_klasses.update(self._collect_observation_mixin(mixins))
self._contrib_mixin_klasses.update(
self._collect_contributions_mixin(mixins))
self._opt_mixin_klasses.update(self._collect_optimizer_mixin(mixins))
def setup_batteries_included(self):
"""
Collect all the classes that are built into
TauREx 3
"""
from taurex import temperature, chemistry, pressure, planet, \
stellar, instruments, model, contributions, optimizer, opacity, \
spectrum
from taurex.opacity import ktables
from taurex.core import priors
self._temp_klasses = set()
self._chem_klasses = set()
self._gas_klasses = set()
self._press_klasses = set()
self._planet_klasses = set()
self._star_klasses = set()
self._inst_klasses = set()
self._model_klasses = set()
self._contrib_klasses = set()
self._opt_klasses = set()
self._opac_klasses = set()
self._ktab_klasses = set()
self._obs_klasses = set()
self._prior_klasses = set()
self._temp_klasses.update(self._collect_temperatures(temperature))
self._chem_klasses.update(self._collect_chemistry(chemistry))
self._gas_klasses.update(self._collect_gas(chemistry))
self._press_klasses.update(self._collect_pressure(pressure))
self._planet_klasses.update(self._collect_planets(planet))
self._star_klasses.update(self._collect_star(stellar))
self._inst_klasses.update(self._collect_instrument(instruments))
self._model_klasses.update(self._collect_model(model))
self._obs_klasses.update(self._collect_observation(spectrum))
self._contrib_klasses.update(
self._collect_contributions(contributions))
self._opt_klasses.update(self._collect_optimizer(optimizer))
self._opac_klasses.update(self._collect_opacity(opacity))
self._ktab_klasses.update(self._collect_ktables(ktables))
self._prior_klasses.update(self._collect_priors(priors))
def load_plugin(self, plugin_module):
self._temp_klasses.update(self._collect_temperatures(plugin_module))
self._chem_klasses.update(self._collect_chemistry(plugin_module))
self._gas_klasses.update(self._collect_gas(plugin_module))
self._press_klasses.update(self._collect_pressure(plugin_module))
self._planet_klasses.update(self._collect_planets(plugin_module))
self._star_klasses.update(self._collect_star(plugin_module))
self._inst_klasses.update(self._collect_instrument(plugin_module))
self._model_klasses.update(self._collect_model(plugin_module))
self._obs_klasses.update(self._collect_observation(plugin_module))
self._contrib_klasses.update(
self._collect_contributions(plugin_module))
self._opt_klasses.update(self._collect_optimizer(plugin_module))
self._opac_klasses.update(self._collect_opacity(plugin_module))
self._prior_klasses.update(self._collect_priors(plugin_module))
self._ktab_klasses.update(self._collect_ktables(plugin_module))
# Load any mixins
self._temp_mixin_klasses.update(
self._collect_temperatures_mixin(plugin_module))
self._chem_mixin_klasses.update(
self._collect_chemistry_mixin(plugin_module))
self._gas_mixin_klasses.update(
self._collect_gas_mixin(plugin_module))
self._press_mixin_klasses.update(
self._collect_pressure_mixin(plugin_module))
self._planet_mixin_klasses.update(
self._collect_planets_mixin(plugin_module))
self._star_mixin_klasses.update(
self._collect_star_mixin(plugin_module))
self._inst_mixin_klasses.update(
self._collect_instrument_mixin(plugin_module))
self._model_mixin_klasses.update(
self._collect_model_mixin(plugin_module))
self._obs_mixin_klasses.update(
self._collect_observation_mixin(plugin_module))
self._contrib_mixin_klasses.update(
self._collect_contributions_mixin(plugin_module))
self._opt_mixin_klasses.update(
self._collect_optimizer_mixin(plugin_module))
def discover_plugins(self):
plugins = {}
failed_plugins = {}
for entry_point in pkg_resources.iter_entry_points('taurex.plugins'):
entry_point_name = entry_point.name
try:
module = entry_point.load()
except Exception as e:
# For whatever reason do not attempt to load the plugin
self.log.warning('Could not load plugin %s', entry_point_name)
self.log.warning('Reason: %s', str(e))
failed_plugins[entry_point_name] = str(e)
continue
plugins[entry_point_name] = module
return plugins, failed_plugins
def load_plugins(self):
plugins, failed_plugins = self.discover_plugins()
self.log.info('----------Plugin loading---------')
self.log.debug('Discovered plugins %s', plugins.values())
for k, v in plugins.items():
self.log.info('Loading %s', k)
self.load_plugin(v)
def load_extension_paths(self):
import glob
import os
import pathlib
import importlib
paths = self.extension_paths
if paths:
# Make sure they're unique
all_files = set(sum([glob.glob(
os.path.join(os.path.abspath(p), '*.py'))
for p in paths], []))
for f in all_files:
self.info('Loading extensions from %s', f)
module_name = pathlib.Path(f).stem
spec = importlib.util.spec_from_file_location(module_name, f)
foo = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(foo)
self.load_plugin(foo)
except Exception as e:
self.log.warning('Could not load extension from file %s',
f)
self.log.warning('Reason: %s', str(e))
def _collect_classes(self, module, base_klass):
"""
Collects all classes that are a subclass of base
"""
klasses = []
clsmembers = inspect.getmembers(module, inspect.isclass)
for _, c in clsmembers:
if issubclass(c, base_klass) and (c is not base_klass):
self.log.debug(f' Found class {c.__name__}')
klasses.append(c)
return klasses
def _collect_temperatures(self, module):
from taurex.temperature import TemperatureProfile
return self._collect_classes(module, TemperatureProfile)
def _collect_chemistry(self, module):
from taurex.chemistry import Chemistry
return self._collect_classes(module, Chemistry)
def _collect_gas(self, module):
from taurex.chemistry import Gas
return self._collect_classes(module, Gas)
def _collect_pressure(self, module):
from taurex.pressure import PressureProfile
return self._collect_classes(module, PressureProfile)
def _collect_planets(self, module):
from taurex.planet import BasePlanet
return self._collect_classes(module, BasePlanet)
def _collect_star(self, module):
from taurex.stellar import Star
return self._collect_classes(module, Star)
def _collect_instrument(self, module):
from taurex.instruments import Instrument
return self._collect_classes(module, Instrument)
def _collect_model(self, module):
from taurex.model import ForwardModel, SimpleForwardModel
return [c for c in self._collect_classes(module, ForwardModel)
if c is not SimpleForwardModel]
def _collect_contributions(self, module):
from taurex.contributions import Contribution
return self._collect_classes(module, Contribution)
def _collect_optimizer(self, module):
from taurex.optimizer import Optimizer
return self._collect_classes(module, Optimizer)
def _collect_opacity(self, module):
from taurex.opacity import Opacity, InterpolatingOpacity
from taurex.opacity.ktables import KTable
return [c for c in self._collect_classes(module, Opacity)
if c is not InterpolatingOpacity and not issubclass(c, KTable)]
def _collect_ktables(self, module):
from taurex.opacity.ktables import KTable
return [c for c in self._collect_classes(module, KTable)]
def _collect_observation(self, module):
from taurex.spectrum import BaseSpectrum
return [c for c in self._collect_classes(module, BaseSpectrum)]
def _collect_priors(self, module):
from taurex.core.priors import Prior
return [c for c in self._collect_classes(module, Prior)]
# Mixins
def _collect_temperatures_mixin(self, module):
from taurex.mixin import TemperatureMixin
return self._collect_classes(module, TemperatureMixin)
def _collect_chemistry_mixin(self, module):
from taurex.mixin import ChemistryMixin
return self._collect_classes(module, ChemistryMixin)
def _collect_gas_mixin(self, module):
from taurex.mixin import GasMixin
return self._collect_classes(module, GasMixin)
def _collect_pressure_mixin(self, module):
from taurex.mixin import PressureMixin
return self._collect_classes(module, PressureMixin)
def _collect_planets_mixin(self, module):
from taurex.mixin import PlanetMixin
return self._collect_classes(module, PlanetMixin)
def _collect_star_mixin(self, module):
from taurex.mixin import StarMixin
return self._collect_classes(module, StarMixin)
def _collect_instrument_mixin(self, module):
from taurex.mixin import InstrumentMixin
return self._collect_classes(module, InstrumentMixin)
def _collect_model_mixin(self, module):
from taurex.mixin import ForwardModelMixin
return self._collect_classes(module, ForwardModelMixin)
def _collect_contributions_mixin(self, module):
from taurex.mixin import ContributionMixin
return self._collect_classes(module, ContributionMixin)
def _collect_optimizer_mixin(self, module):
from taurex.mixin import OptimizerMixin
return self._collect_classes(module, OptimizerMixin)
def _collect_observation_mixin(self, module):
from taurex.mixin import ObservationMixin
return self._collect_classes(module, ObservationMixin)
def list_from_base(self, klass_type):
from taurex.temperature import TemperatureProfile
from taurex.chemistry import Chemistry
from taurex.chemistry import Gas
from taurex.pressure import PressureProfile
from taurex.planet import BasePlanet
from taurex.stellar import Star
from taurex.instruments import Instrument
from taurex.model import ForwardModel
from taurex.contributions import Contribution
from taurex.optimizer import Optimizer
from taurex.opacity import Opacity
from taurex.opacity.ktables import KTable
from taurex.spectrum import BaseSpectrum
from taurex.core.priors import Prior
klass_dict = {
TemperatureProfile: self.temperatureKlasses,
Chemistry: self.chemistryKlasses,
Gas: self.gasKlasses,
PressureProfile: self.pressureKlasses,
BasePlanet: self.planetKlasses,
Star: self.starKlasses,
Instrument: self.instrumentKlasses,
ForwardModel: self.modelKlasses,
Contribution: self.contributionKlasses,
Optimizer: self.optimizerKlasses,
Opacity: self.opacityKlasses,
KTable: self.ktableKlasses,
BaseSpectrum: self.observationKlasses,
Prior: self.priorKlasses,
}
return klass_dict[klass_type]
@property
def temperatureKlasses(self):
return self._temp_klasses
@property
def chemistryKlasses(self):
return self._chem_klasses
@property
def gasKlasses(self):
return self._gas_klasses
@property
def pressureKlasses(self):
return self._press_klasses
@property
def planetKlasses(self):
return self._planet_klasses
@property
def starKlasses(self):
return self._star_klasses
@property
def instrumentKlasses(self):
return self._inst_klasses
@property
def modelKlasses(self):
return self._model_klasses
@property
def contributionKlasses(self):
return self._contrib_klasses
@property
def optimizerKlasses(self):
return self._opt_klasses
@property
def opacityKlasses(self):
return self._opac_klasses
@property
def ktableKlasses(self):
return self._ktab_klasses
@property
def observationKlasses(self):
return self._obs_klasses
@property
def priorKlasses(self):
return self._prior_klasses
# Mixins
@property
def temperatureMixinKlasses(self):
return self._temp_mixin_klasses
@property
def chemistryMixinKlasses(self):
return self._chem_mixin_klasses
@property
def gasMixinKlasses(self):
return self._gas_mixin_klasses
@property
def pressureMixinKlasses(self):
return self._press_mixin_klasses
@property
def planetMixinKlasses(self):
return self._planet_mixin_klasses
@property
def starMixinKlasses(self):
return self._star_mixin_klasses
@property
def instrumentMixinKlasses(self):
return self._inst_mixin_klasses
@property
def modelMixinKlasses(self):
return self._model_mixin_klasses
@property
def contributionMixinKlasses(self):
return self._contrib_mixin_klasses
@property
def optimizerMixinKlasses(self):
return self._opt_mixin_klasses
@property
def observationMixinKlasses(self):
return self._obs_mixin_klasses
| StarcoderdataPython |
1630072 |
# vim: set ts=4 sw=4 expandtab:
from libtimesheet.ApplicationConstants import Notification
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.RootPanel import RootPanelCls
from pyjamas.ui.MenuBar import MenuBar
from pyjamas.ui.MenuItem import MenuItem
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.Button import Button
from Menu import Menu
from DatePicker import DatePicker
from TimeGrid import TimeGrid
from Summary import Summary
class AppFrame(RootPanelCls):
menuBar = None
datePicker = None
timeGrid = None
summary = None
def __init__(self):
try:
RootPanelCls.__init__(self)
vpanel = VerticalPanel()
self.menuBar = Menu()
vpanel.add(self.menuBar)
self.datePicker = DatePicker()
vpanel.add(self.datePicker)
self.timeGrid = TimeGrid()
vpanel.add(self.timeGrid)
self.summary = Summary()
vpanel.add(self.summary)
self.add(vpanel)
except:
raise
| StarcoderdataPython |
123445 | <reponame>imranq2/SparkAutoMapper.FHIR
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
from spark_auto_mapper_fhir.fhir_types.date_time import FhirDateTime
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# collector (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for collector
from spark_auto_mapper_fhir.resources.practitioner import Practitioner
from spark_auto_mapper_fhir.resources.practitioner_role import PractitionerRole
# collectedDateTime (dateTime)
# collectedPeriod (Period)
from spark_auto_mapper_fhir.complex_types.period import Period
# duration (Duration)
from spark_auto_mapper_fhir.complex_types.duration import Duration
# quantity (Quantity)
from spark_auto_mapper_fhir.complex_types.quantity import Quantity
# method (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# End Import for References for method
# Import for CodeableConcept for method
from spark_auto_mapper_fhir.value_sets.fhir_specimen_collection_method import (
FHIRSpecimenCollectionMethodCode,
)
# End Import for CodeableConcept for method
# bodySite (CodeableConcept)
# End Import for References for bodySite
# Import for CodeableConcept for bodySite
from spark_auto_mapper_fhir.value_sets.snomedct_body_structures import (
SNOMEDCTBodyStructuresCode,
)
# End Import for CodeableConcept for bodySite
# fastingStatusCodeableConcept (CodeableConcept)
# End Import for References for fastingStatusCodeableConcept
# Import for CodeableConcept for fastingStatusCodeableConcept
from spark_auto_mapper_fhir.value_sets.v2_0916 import V2_0916
# End Import for CodeableConcept for fastingStatusCodeableConcept
# fastingStatusDuration (Duration)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class SpecimenCollection(FhirBackboneElementBase):
"""
Specimen.Collection
A sample to be used for analysis.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
collector: Optional[Reference[Union[Practitioner, PractitionerRole]]] = None,
collectedDateTime: Optional[FhirDateTime] = None,
collectedPeriod: Optional[Period] = None,
duration: Optional[Duration] = None,
quantity: Optional[Quantity] = None,
method: Optional[CodeableConcept[FHIRSpecimenCollectionMethodCode]] = None,
bodySite: Optional[CodeableConcept[SNOMEDCTBodyStructuresCode]] = None,
fastingStatusCodeableConcept: Optional[CodeableConcept[V2_0916]] = None,
fastingStatusDuration: Optional[Duration] = None,
) -> None:
"""
A sample to be used for analysis.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param collector: Person who collected the specimen.
:param collectedDateTime: None
:param collectedPeriod: None
:param duration: The span of time over which the collection of a specimen occurred.
:param quantity: The quantity of specimen collected; for instance the volume of a blood sample,
or the physical measurement of an anatomic pathology sample.
:param method: A coded value specifying the technique that is used to perform the procedure.
:param bodySite: Anatomical location from which the specimen was collected (if subject is a
patient). This is the target site. This element is not used for environmental
specimens.
:param fastingStatusCodeableConcept: None
:param fastingStatusDuration: None
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
collector=collector,
collectedDateTime=collectedDateTime,
collectedPeriod=collectedPeriod,
duration=duration,
quantity=quantity,
method=method,
bodySite=bodySite,
fastingStatusCodeableConcept=fastingStatusCodeableConcept,
fastingStatusDuration=fastingStatusDuration,
)
| StarcoderdataPython |
3204394 | # -*- coding: utf-8 -*-
"""ORCID Blueprint Module
Module that contains the full blueprint for ORCID OAuth
"""
from flask import flash, redirect, session, url_for, current_app, Markup
from flask_user import current_user
from flask_login import login_user
from app.oauth.orcid_flask_dance import make_orcid_blueprint
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from sqlalchemy.orm.exc import NoResultFound
from app.models import db, User, OAuth
from datetime import datetime
from pprint import pprint
orcid_blueprint = make_orcid_blueprint(
storage=SQLAlchemyStorage(OAuth, db.session, user=current_user)
)
@oauth_authorized.connect_via(orcid_blueprint)
def orcid_logged_in(orcid_blueprint, token):
"""
Handles the oauth dance for ORCID logins
Args:
orchid_blueprint: The instantiated orcid blueprint
token: the ouath token
Result:
Will do one of four things:
1. If user is not logged in, but there is an oauth, will login
2. If user is not logged in, will create a new user using information from orchid, and login
3. If a user is logged in, and oauth is associated already, will pass through
4. If a user is logged in, but no oauth associated, will associate the oauth
"""
# Check if I have an API token
if not token:
flash("Failed to log in.", category="error")
return False
# get the orcid id information
# ORCID API calls require that the orcid id be in the request, so that needs
# to be extracted from the token prior to making any requests
orcid_user_id = token['orcid']
response = orcid_blueprint.session.get("{}/record".format(orcid_user_id))
if not response.ok:
flash("Failed to get ORCID User Data", category="error")
return False
orcid_record = response.json()
pprint(orcid_record)
# Find this OAuth in the
query = OAuth.query.filter_by(
provider=orcid_blueprint.name, provider_user_id=orcid_user_id)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider=orcid_blueprint.name,
provider_user_id=orcid_user_id,
provider_user_login=orcid_user_id,
token=token)
if current_user.is_anonymous:
print("Current user is anonymous")
if oauth.user:
# Case 1 (above)
return current_app.user_manager._do_login_user(oauth.user, url_for("main.public"))
else:
# Case 2 (above)
print("!!! No Oauth")
orcid_person = orcid_record['person']
# check if there is a user with this email address
# Check to see if the ORCID user has an email exposed, otherwise, we cannot use it
if len(orcid_person['emails']['email']) == 0:
flash(Markup(
"Failed to create new user, must have at least one ORCID "
"email address accessible to restricted. Please login to your "
"ORCID account at http://orcid.org and update your permissions."
" Please see <a href='https://support.orcid.org/hc/en-us/articles/360006897614'>"
" Visibitility in ORCID</a> "
"for more information."))
return redirect(url_for("user.login"))
return False
orcid_email = orcid_person['emails']['email'][0]['email']
query = User.query.filter_by(email=orcid_email)
try:
nrc_u = query.one()
oauth.user = nrc_u
db.session.add(oauth)
db.session.commit()
login_user(oauth.user)
except NoResultFound:
print("!!!! we need to make an account")
# Case 3
try:
user = User(email=orcid_person['emails']['email'][0]['email'],
full_name="{} {}".format(orcid_person['name']['given-names']['value'],
orcid_person['name']['family-name']['value']),
active=True,
email_confirmed_at=datetime.utcnow(),
)
user.add_role("member")
user.add_role("registered-orcid", add_to_roles=True)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
# Need to use private method to bypass in this case
flash("Please update your Profile affiliation and affiliation type")
return current_app.user_manager._do_login_user(user, url_for('profile.current_user_profile_page'))
except Exception as e:
flash("There was an error creating a user from the ORCID credentials: {}".format(e))
return redirect(url_for("user.login"))
else:
print("!!! Authenticated User")
if oauth.user:
flash("Account already associated with another user, cannot be associated")
return redirect(url_for('profile.current_user_profile_page'))
else:
# Case 4 (above)
print("!!! SHOULD BE HERE")
oauth.user = current_user
db.session.add(oauth)
db.session.commit()
flash("Successfully linked ORCID account")
return False
@oauth_authorized.connect
def redirect_to_next_url(orcid_blueprint, token):
"""
redirect function to handle properly redirec if
login_next_url exists in the session
"""
# retrieve `next_url` from Flask's session cookie
if session.get('login_next_url') is not None:
next_url = session["login_next_url"]
# redirect the user to `next_url`
return redirect(next_url)
@oauth_error.connect_via(orcid_blueprint)
def orcid_error(orcid_blueprint, **kwargs):
"""
Handles passing back ouath errors elegantly
Args:
orchid_blueprint: Orcid Blueprint
Result:
Flashes error messages if they exist
"""
msg = "OAuth error from {name}! ".format(name=orcid_blueprint.name)
for k, v in kwargs.items():
msg += "{} = {} ".format(k, str(v))
print("msg= {}".format(msg))
flash(msg, category="error")
| StarcoderdataPython |
1672029 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.internet import reactor
def one(port, user, pw, service, perspective, number):
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", port, factory)
def1 = factory.getPerspective(user, pw, service, perspective)
def1.addCallback(connected, number)
def connected(perspective, number):
print("got perspective ref:", perspective)
print("asking it to foo(%d)" % number)
perspective.callRemote("foo", number)
def main():
one(8800, "user1", "<PASSWORD>", "service1", "perspective1.1", 10)
one(8800, "user1", "<PASSWORD>", "service2", "perspective2.1", 11)
one(8800, "user2", "<PASSWORD>", "service1", "perspective1.2", 12)
one(8800, "user2", "<PASSWORD>", "service2", "perspective2.2", 13)
one(8801, "user3", "<PASSWORD>", "service3", "perspective3.3", 14)
reactor.run()
main()
| StarcoderdataPython |
170543 | from flask_api import FlaskAPI
from flask_sqlalchemy import SQLAlchemy
from instance.config import app_config
from flask import request, jsonify, abort
db = SQLAlchemy()
def create_app(config_name):
app = FlaskAPI(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config.from_pyfile('config.py')
db.init_app(app)
from mycroblog.models import Entry
@app.route('/entry', methods=['post'])
def create():
text = str(request.data.get('text', ''))
if text:
entry = Entry(text=text)
entry.save()
response = jsonify({
'id': entry.id,
'text': entry.text,
'date_created': entry.date_created
})
response.status_code = 201
return response
else:
response = jsonify({
'message': 'We cannot save the entry'
})
response.status_code = 405
return response
@app.route('/entry/<int:id>', methods=['GET'])
def find_one(id):
entry = Entry.query.filter_by(id=id).first()
if not entry:
abort(404)
else:
response = jsonify({
'id': entry.id,
'text': entry.text,
'date_created': entry.date_created
})
response.status_code = 200
return response
@app.route('/entry/<int:id>', methods=['DELETE'])
def delete(id):
entry = Entry.query.filter_by(id=id).first()
if not entry:
abort(404)
else:
entry.delete()
response = jsonify({
'message': 'Successfully deleted.'
})
response.status_code = 200
return response
@app.route('/entries', methods=['GET'])
def browse():
entries = Entry.get_all()
results = []
for entry in entries:
obj = {
'id': entry.id,
'text': entry.text,
'date_created': entry.date_created
}
results.append(obj)
response = jsonify(results)
response.status_code = 200
return response
return app
| StarcoderdataPython |
1727628 | import os
import io
import glob
import json
import shutil
import datetime
import time
import zipfile
import requests
from importlib import import_module
from jinja2 import Template
from django.conf import settings
from celery.decorators import task
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
import google
from google.cloud import storage
from helpers import utils
from helpers.utils import get_jinja_template
from helpers.email_utils import notify_admins, send_email
import analysis.models
from analysis.models import Workflow, \
AnalysisProject, \
AnalysisProjectResource, \
SubmittedJob, \
Warning, \
CompletedJob, \
JobClientError, \
ProjectConstraint, \
WorkflowContainer
from base.models import Resource, Issue, CurrentZone
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
ZIPNAME = 'depenencies.zip'
WDL_INPUTS = 'inputs.json'
WORKFLOW_LOCATION = 'location'
WORKFLOW_PK = 'workflow_primary_key'
USER_PK = 'user_pk'
VERSIONING_FILE = 'workflow_details.txt'
MAX_COPY_ATTEMPTS = 5
SLEEP_PERIOD = 60 # in seconds. how long to sleep between copy attempts.
class InputMappingException(Exception):
pass
class MissingMappingHandlerException(Exception):
pass
class MissingDataException(Exception):
pass
class JobOutputCopyException(Exception):
pass
class JobOutputsException(Exception):
pass
class MockAnalysisProject(object):
'''
A mock class for use when testing.
'''
pass
def handle_exception(ex, message = ''):
'''
This function handles situations where there an error when submitting
to the cromwell server (submission or execution)
'''
subject = 'Error encountered with asynchronous task.'
# save this problem in the database:
issue = Issue(message = message)
issue.save()
notify_admins(message, subject)
def create_module_dot_path(filepath):
location_relative_to_basedir = os.path.relpath(filepath, start=settings.BASE_DIR)
return location_relative_to_basedir.replace('/', '.')
def fill_wdl_input(data):
'''
Constructs the inputs to the WDL. Returns a dict
'''
absolute_workflow_dir = data[WORKFLOW_LOCATION]
user_pk = data[USER_PK]
user = get_user_model().objects.get(pk=user_pk)
# load the wdl input into a dict
wdl_input_path = os.path.join(absolute_workflow_dir,
settings.WDL_INPUTS_TEMPLATE_NAME)
wdl_input_dict = json.load(open(wdl_input_path))
required_inputs = list(wdl_input_dict.keys())
# load the gui spec and create a dictionary:
gui_spec_path = os.path.join(absolute_workflow_dir,
settings.USER_GUI_SPEC_NAME)
gui_spec_json = json.load(open(gui_spec_path))
# for tracking which inputs were found. We can then see that all the required
# inputs were indeed specified
found_inputs = []
# iterate through the input elements that were specified for the GUI
for element in gui_spec_json[settings.INPUT_ELEMENTS]:
target = element[settings.TARGET]
if type(target)==str and target in wdl_input_dict:
# if the GUI specified a string input, it is supposed to directly
# map to a WDL input. If not, something has been corrupted.
try:
value = data[target] # get the value of the input from the frontend
wdl_input_dict[target] = value # set the value in the dict of WDL inputs
found_inputs.append(target)
except KeyError:
# if either of those key lookups failed, this exception will be raised
raise MissingDataException('The key "%s" was not in either the data payload (%s) '
'or the WDL input (%s)' % (target, data, wdl_input_dict))
elif type(target)==dict:
# if the "type" of target is a dict, it needs to have a name attribute that is
# present in the data payload. Otherwise, we cannot know where to map it
if target[settings.NAME] in data:
unmapped_data = data[target[settings.NAME]]
# The data referenced by data[target[settings.NAME]] could effectively be anything. Its format
# is dictated by some javascript code. For example, a file chooser
# could send data to the backend in a variety of formats, and that format
# is determined solely by the author of the workflow. We need to have custom
# code which takes that payload and properly maps it to the WDL inputs
# Get the handler code:
handler_path = os.path.join(absolute_workflow_dir, target[settings.HANDLER])
if os.path.isfile(handler_path):
# we have a proper file. Call that to map our unmapped_data
# to the WDL inputs
print('Using handler code in %s to map GUI inputs to WDL inputs' % handler_path)
module_name = target[settings.HANDLER][:-len(settings.PY_SUFFIX)]
module_location = create_module_dot_path(absolute_workflow_dir)
module_name = module_location + '.' + module_name
mod = import_module(module_name)
print('Imported module %s' % module_name)
try:
map_dict = mod.map_inputs(user, data, target[settings.NAME], target[settings.TARGET_IDS])
except Exception as ex:
raise InputMappingException('An exception as raised when attempting to map the frontend data. '
'Module name was %s, inputs were: %s\n%s\n%s' % (module_name, data, target[settings.NAME], target[settings.TARGET_IDS]))
print('Result of input mapping: %s' % map_dict)
for key, val in map_dict.items():
if key in wdl_input_dict:
wdl_input_dict[key] = val
found_inputs.append(key)
else:
raise InputMappingException('Problem! After mapping the front-'
'end to WDL inputs using the map \n%s\n'
'the key "%s" was not one of the WDL inputs' \
% (map_dict, key)
)
else:
raise MissingMappingHandlerException('Could not find handler for mapping at %s' % handler_path)
else:
raise MissingDataException('If the type of the WDL target is a dictionary, then it MUST '
'specify a "name" attribute. The value of that attribute must be in the '
'payload sent by the frontend.')
else:
raise Exception('Unexpected object encountered when trying to map front-end '
'to WDL inputs.')
if len(set(required_inputs).difference(set(found_inputs))) > 0:
raise Exception('The set of required inputs was %s, and the set of found '
'inputs was %s' % (required_inputs, found_inputs)
)
else:
return wdl_input_dict
def check_constraints(project, absolute_workflow_dir, inputs_json):
'''
Loads the module containing the code to check constraints
and calls it. Returns a 3-ple. The first item of the tuple indicates
if any of the constraints are violated; True is all pass, False if any fail.
The second bool indicates if there was a problem with the handler module (i.e. which is outside
the user's control). Finally the third is a list of messages that can indicate how the client
violated the constraints (e.g. "you submitted too many samples")
If there was a problem with the handler module (i.e. missing file or some bug in the code)
then inform the admin and let the client know that it is being worked on.
'''
# Using the project, we can get any constraints applied to this project:
project_constraints = ProjectConstraint.objects.filter(project=project)
if len(project_constraints) == 0:
return (True, False,[]) # no constraints, of course it passes
constraint_passes = []
messages = []
for project_constraint in project_constraints:
# the constraint member is of type ImplementedConstraint, which has as one of its members
# an attribute referencing the WorkflowConstraint
implemented_constraint = project_constraint.constraint
handler_filename = implemented_constraint.workflow_constraint.handler
handler_path = os.path.join(absolute_workflow_dir, handler_filename)
if os.path.isfile(handler_path):
# the handler file exists. Load the module and call the function
module_name = handler_filename[:-len(settings.PY_SUFFIX)]
module_location = create_module_dot_path(absolute_workflow_dir)
module_name = module_location + '.' + module_name
mod = import_module(module_name)
try:
constraint_satisifed, message = mod.check_constraints(implemented_constraint, inputs_json)
constraint_passes.append(constraint_satisifed)
messages.append(message)
except Exception as ex:
print(ex) # so we can see the exception in the logs
handle_exception(ex, message = str(ex))
return (False, True, messages)
else:
# the handler file is not there. Something is wrong. Let an admin know
handle_exception(None, message = 'Constraint handler module was not found at %s for project %s' % (handler_path, str(project.analysis_uuid)))
return (False, True), messages
return (all(constraint_passes), False, messages)
def get_docker_digests(workflow_obj):
'''
Using a Workflow instance, get the docker images that execute this workflow
and query dockerhub to get the digest. This way, we know JUST PRIOR to the run
what the exact image was. This is more robust than using the image tag, which is mutable
Returns a dictionary where the keys are the images ('docker.io/userA/imageB:tagC') and
the values are the digests
'''
containers = WorkflowContainer.objects.filter(workflow = workflow_obj)
d = {}
for c in containers:
image_tag = c.image_tag
current_digest = utils.query_for_digest(image_tag)
original_digest = c.hash_string
if current_digest != original_digest:
# this means the docker container has a different hash since
# the ingestion was performed. This is not necessarily an error, but
# one might choose to issue a warning here.
print('The digest hash for docker image %s has changed since '
' the workflow was first ingested.')
print('Original digest: %s' % original_digest)
print('Current digest: %s' % current_digest)
d[image_tag] = current_digest
return d
@task(name='prep_workflow')
def prep_workflow(data):
'''
'''
# if the 'analysis_uuid' key evaluates to something, then
# we have a "real" request to run analysis. If it evaluates
# to None, then we are simply testing that the correct files/variables
# are created
print('Workflow submitted with data: %s' % data)
date_str = datetime.datetime.now().strftime('%H%M%S_%m%d%Y')
workflow_obj = Workflow.objects.get(pk=data[WORKFLOW_PK])
if data['analysis_uuid']:
staging_dir = os.path.join(settings.JOB_STAGING_DIR,
str(data['analysis_uuid']),
date_str
)
analysis_project = AnalysisProject.objects.get(
analysis_uuid = data['analysis_uuid']
)
else:
staging_dir = os.path.join(settings.JOB_STAGING_DIR,
'test_%s' % workflow_obj.workflow_name,
date_str
)
analysis_project = MockAnalysisProject()
analysis_project.analysis_bucketname = 'some-mock-bucket'
# make the temporary staging dir:
try:
os.makedirs(staging_dir)
except OSError as ex:
if ex.errno == 17: # existed already
raise Exception('Staging directory already existed. This should not happen.')
else:
raise Exception('Something else went wrong when attempting to create a staging'
' directory at %s' % staging_dir)
# copy WDL files over to staging:
wdl_files = glob.glob(os.path.join(data[WORKFLOW_LOCATION], '*.' + settings.WDL))
for w in wdl_files:
shutil.copy(w, staging_dir)
# if there are WDL files in addition to the main one, they need to be zipped
# and submitted as 'dependencies'
additional_wdl_files = [x for x in glob.glob(os.path.join(staging_dir, '*.' + settings.WDL))
if os.path.basename(x) != settings.MAIN_WDL]
zip_archive = None
if len(additional_wdl_files) > 0:
zip_archive = os.path.join(staging_dir, ZIPNAME)
with zipfile.ZipFile(zip_archive, 'w') as zipout:
for f in additional_wdl_files:
zipout.write(f, os.path.basename(f))
# create/write the input JSON to a file in the staging location
wdl_input_dict = fill_wdl_input(data)
wdl_input_path = os.path.join(staging_dir, WDL_INPUTS)
with open(wdl_input_path, 'w') as fout:
json.dump(wdl_input_dict, fout)
# create a versioning file, which will provide details like git commit
# and docker digests
version_file = os.path.join(staging_dir, VERSIONING_FILE)
git_url = workflow_obj.git_url
git_commit = workflow_obj.git_commit_hash
docker_digest_dict = get_docker_digests(workflow_obj)
d = {
'git_repository': git_url,
'git_commit': git_commit,
'docker': docker_digest_dict
}
with open(version_file, 'w') as fout:
fout.write(json.dumps(d))
# check that any applied constraints are not violated:
if data['analysis_uuid']:
print('check constraints')
constraints_satisfied, problem, constraint_violation_messages = check_constraints(analysis_project, data[WORKFLOW_LOCATION], wdl_input_path)
print('done checking constraints')
if problem:
print('Was problem with constraints!')
analysis_project.status = '''
An unexpected error occurred on job submission. An administrator has been automatically notified of this error.
Thank you for your patience.
'''
analysis_project.error = True
analysis_project.save()
return
elif not constraints_satisfied:
print('constraints violated')
analysis_project.status = 'The constraints imposed on this project were violated.'
analysis_project.error = True
analysis_project.completed = False
analysis_project.success = False
analysis_project.save()
for m in constraint_violation_messages:
jc = JobClientError(project=analysis_project, error_text=m)
jc.save()
return
# Go start the workflow:
if data['analysis_uuid']:
print('had UUID')
# we are going to start the workflow-- check if we should run a pre-check
# to examine user input:
run_precheck = False
if os.path.exists(os.path.join(staging_dir, settings.PRECHECK_WDL)):
print('should run precheck')
run_precheck = True
execute_wdl(analysis_project, staging_dir, run_precheck)
else:
print('View final staging dir at %s' % staging_dir)
print('Would post the following:\n')
print('Data: %s\n' % data)
return wdl_input_dict
def get_zone_as_string():
'''
Returns the current zone as a string
'''
try:
current_zone = CurrentZone.objects.all()[0]
return current_zone.zone.zone
except IndexError:
message = 'A current zone has not set. Please check that a single zone has been selected in the database'
handle_exception(None, message=message)
return None
def execute_wdl(analysis_project, staging_dir, run_precheck=False):
'''
This function performs the actual work of submitting the job
'''
# read config to get the names/locations/parameters for job submission
config_path = os.path.join(THIS_DIR, 'wdl_job_config.cfg')
config_dict = utils.load_config(config_path)
# the path of the input json file:
wdl_input_path = os.path.join(staging_dir, WDL_INPUTS)
# pull together the components of the POST request to the Cromwell server
submission_endpoint = config_dict['submit_endpoint']
submission_url = settings.CROMWELL_SERVER_URL + submission_endpoint
payload = {}
payload = {'workflowType': config_dict['workflow_type'], \
'workflowTypeVersion': config_dict['workflow_type_version']
}
# load the options file so we can fill-in the zones:
options_json = {}
current_zone = get_zone_as_string()
if current_zone:
options_json['default_runtime_attributes'] = {'zones': current_zone}
options_json_str = json.dumps(options_json)
options_io = io.BytesIO(options_json_str.encode('utf-8'))
files = {
'workflowOptions': options_io,
'workflowInputs': open(wdl_input_path,'rb')
}
if run_precheck:
files['workflowSource'] = open(os.path.join(staging_dir, settings.PRECHECK_WDL), 'rb')
else:
files['workflowSource'] = open(os.path.join(staging_dir, settings.MAIN_WDL), 'rb')
zip_archive = os.path.join(staging_dir, ZIPNAME)
if os.path.exists(zip_archive):
files['workflowDependencies'] = open(zip_archive, 'rb')
# start the job:
try:
response = requests.post(submission_url, data=payload, files=files)
except Exception as ex:
print('An exception was raised when requesting cromwell server:')
print(ex)
message = 'An exception occurred when trying to submit a job to Cromwell. \n'
message += 'Project ID was: %s' % str(analysis_project.analysis_uuid)
message += str(ex)
analysis_project.status = '''
Error on job submission. An administrator has been automatically notified of this error.
Thank you for your patience.
'''
analysis_project.error = True
analysis_project.save()
handle_exception(ex, message=message)
raise ex
response_json = json.loads(response.text)
if response.status_code == 201:
if response_json['status'] == 'Submitted':
job_id = response_json['id']
if run_precheck:
job_status = 'Checking input data...'
else:
job_status = 'Job submitted...'
job = SubmittedJob(project=analysis_project,
job_id=job_id,
job_status=job_status,
job_staging_dir=staging_dir,
is_precheck = run_precheck
)
job.save()
# update the project also:
analysis_project.started = True # should already be set
analysis_project.start_time = datetime.datetime.now()
analysis_project.status = job_status
analysis_project.save()
else:
# In case we get other types of responses, inform the admins:
message = 'Job was submitted, but received an unexpected response from Cromwell:\n'
message += response.text
handle_exception(None, message=message)
else:
message = 'Did not submit job-- status code was %d, and response text was: %s' % (response.status_code, response.text)
analysis_project.status = '''
Error on job submission. An administrator has been automatically notified of this error.
Thank you for your patience.
'''
analysis_project.error = True
analysis_project.save()
handle_exception(None, message=message)
def parse_outputs(obj):
'''
depending on how the workflow was created, the outputs object can be relatively complex
e.g. for a scattered job, it can have nested lists for each key. Other times, a simple
list, or even a string.
`obj` is itself a dictionary OR a list. Returns a list of strings
'''
all_outputs = []
if type(obj) == dict:
for key, val in obj.items():
if type(val) == str: # if simple string output, just a single file:
all_outputs.append(val)
elif type(val) == bool:
continue
elif val is not None: # covers dict and list
all_outputs.extend(parse_outputs(val))
else:
pass # val was None. OK in cases with optional output
elif type(obj) == list:
for item in obj:
if type(item) == str:
all_outputs.append(item)
elif type(item) == bool:
continue
elif item is not None:
all_outputs.extend(parse_outputs(item))
else:
pass # item was None. OK for cases of optional output
else:
raise Exception('Unexpected type')
return all_outputs
def get_resource_size(path):
'''
This is used to query for the size of a file located at `path`.
Depending on the environment, this is different
TODO: abstract this for different cloud providers!!
'''
if settings.CONFIG_PARAMS['cloud_environment'] == settings.GOOGLE:
storage_client = storage.Client()
bucket_prefix = settings.CONFIG_PARAMS['google_storage_gs_prefix']
p = path[len(bucket_prefix):]
bucketname = p.split('/')[0]
objectname = '/'.join(p.split('/')[1:])
bucket_obj = storage_client.get_bucket(bucketname)
blob = bucket_obj.get_blob(objectname)
size = blob.size
if size:
return size
else:
return 0
else:
raise Exception('Have not implemented for this cloud provider yet')
def bucket_to_bucket_rewrite_in_google(source_blob, destination_bucket, destination_object_name):
'''
If two files are NOT in the same region, we cannot safely attempt to copy
using the copy_blob method. We have to use the rewrite
'''
# first create a Blob which we will "fill"
destination_blob = storage.Blob(destination_object_name, destination_bucket)
i = 0
consecutive_failures = 0
max_consecutive_failures = 3
finished = False
print('Starting transfer...')
while not finished:
print('Transfer chunk %d' % i)
try:
if i == 0:
token, bytes_written, total_bytes = destination_blob.rewrite(source_blob)
else:
token, bytes_written, total_bytes = destination_blob.rewrite(source_blob, token=token)
i += 1
if bytes_written == total_bytes:
finished = True
return
except google.api_core.exceptions.InternalServerError as ex:
# if we catch this type of error, we can attempt to recover
consecutive_failures += 1
if consecutive_failures <= max_consecutive_failures:
print('A 500 error was raised by the Google backend. Re-try')
else:
print('Experienced %d consecutive errors from the Google backend. Aborting the copy.' % max_consecutive_failures)
raise Exception('Experienced %d consecutive errors from the Google backend.')
except Exception as ex:
raise Exception('Blob rewrite failed for an unexpected reason.')
def move_resource_to_user_bucket(job, resource_path):
'''
Copies the final job output from the cromwell bucket to the user's bucket/folder
'''
storage_client = storage.Client()
# Creates the necessary items for the destination of this file
# strip the prefix (e.g. "gs://")
destination_bucket_prefix = settings.CONFIG_PARAMS[ \
'storage_bucket_prefix' \
][len(settings.CONFIG_PARAMS['google_storage_gs_prefix']):]
destination_bucket_name = '%s-%s' % (destination_bucket_prefix, str(job.project.owner.user_uuid)) # <prefix>-<uuid>
destination_object_name = os.path.join(str(job.project.analysis_uuid), \
job.job_id, \
os.path.basename(resource_path)
)
full_destination_with_prefix = '%s%s/%s' % (settings.CONFIG_PARAMS['google_storage_gs_prefix'],
destination_bucket_name, \
destination_object_name \
)
# now that we have the paths/names, create the class objects representing these things:
# typically this bucket would already exist due to a previous upload, but
# we create the bucket if it does not exist
try:
destination_bucket = storage_client.get_bucket(destination_bucket_name)
print('Destination bucket at %s existed.' % destination_bucket_name)
except google.api_core.exceptions.NotFound:
b = storage.Bucket(destination_bucket_name)
b.name = bucket_name
zone_str = get_zone_as_string() # if the zone is (somehow) not set, this will be None
# if zone_str was None, b.location=None, which is the default (and the created bucket is multi-regional)
if zone_str:
b.location = '-'.join(zone_str.split('-')[:-1]) # e.g. makes 'us-east4-c' into 'us-east4'
destination_bucket = storage_client.create_bucket(b)
# now handle the source side of things:
full_source_location_without_prefix = resource_path[len(settings.CONFIG_PARAMS['google_storage_gs_prefix']):]
source_bucket_name = full_source_location_without_prefix.split('/')[0]
source_object_name = '/'.join(full_source_location_without_prefix.split('/')[1:])
source_bucket = storage_client.get_bucket(source_bucket_name)
source_blob = storage.Blob(source_object_name, source_bucket)
# if somehow the destination bucket is in another region, larger transfers can fail
location_match = destination_bucket.location == source_bucket.location
# Give the copy a few chances to succeed.
copied = False
attempts = 0
while ((not copied) and (attempts < MAX_COPY_ATTEMPTS)):
try:
print('Copy %s to %s' % (source_blob,destination_object_name))
if location_match:
print('Buckets were both in the same region.')
source_bucket.copy_blob(source_blob, \
destination_bucket, \
new_name=destination_object_name \
)
else:
print('Buckets were in different regions. Doing bucket rewrite method...')
bucket_to_bucket_rewrite_in_google(source_blob, destination_bucket, destination_object_name)
copied = True
except Exception as ex:
print('Copy failed. Sleep and try again.')
time.sleep(SLEEP_PERIOD)
attempts += 1
# if still not copied, raise an issue:
if not copied:
raise JobOutputCopyException('Still could not copy after %d attempts.' % MAX_COPY_ATTEMPTS)
else:
return full_destination_with_prefix
def register_outputs(job):
'''
This adds outputs from the workflow to the list of Resources owned by the client
This way they are able to download files produced by the workflow
'''
config_path = os.path.join(THIS_DIR, 'wdl_job_config.cfg')
config_dict = utils.load_config(config_path)
# pull together the components of the request to the Cromwell server
outputs_endpoint = config_dict['outputs_endpoint']
outputs_url_template = Template(settings.CROMWELL_SERVER_URL + outputs_endpoint)
outputs_url = outputs_url_template.render({'job_id': job.job_id})
try:
response = requests.get(outputs_url)
response_json = json.loads(response.text)
if (response.status_code == 404) or (response.status_code == 400) or (response.status_code == 500):
job.project.status = 'Analysis completed. Error encountered when collecting final outputs.'
job.project.error = True
job.project.save()
handle_exception(None, 'Query for job failed with message: %s' % response_json['message'])
else: # the request itself was OK
outputs = response_json['outputs']
output_filepath_list = parse_outputs(outputs)
environment = settings.CONFIG_PARAMS['cloud_environment']
for p in output_filepath_list:
size_in_bytes = get_resource_size(p)
full_destination_with_prefix = move_resource_to_user_bucket(
job,
p
)
# add the Resource to the database:
r = Resource(
source = environment,
path = full_destination_with_prefix,
name = os.path.basename(p),
owner = job.project.owner,
size = size_in_bytes
)
r.save()
# add a ProjectResource to the database, so we can tie the Resource created above with the analysis project:
apr = AnalysisProjectResource(analysis_project=job.project, resource=r)
apr.save()
except Exception as ex:
print('An exception was raised when requesting job outputs from cromwell server')
print(ex)
message = 'An exception occurred when trying to query outputs from Cromwell. \n'
message += 'Job ID was: %s' % job.job_id
message += 'Project ID was: %s' % job.project.analysis_uuid
message += str(ex)
raise JobOutputsException(message)
def copy_pipeline_components(job):
'''
This copies the inputs.json to the output directory. Together with the WDL files, that can be used
to recreate everything
Also creates a file that indicates the repository and commit ID for the workflow version
'''
additional_files = []
# where the submitted files were placed:
staging_dir = job.job_staging_dir # something like /www/tmp_staging/<UUID>/<timestamp>
# the inputs.json that was ultimately submitted to cromwell
wdl_input_path = os.path.join(staging_dir, WDL_INPUTS)
additional_files.append(wdl_input_path)
# the versioning file, which has the git info and the docker container
# information. Inside the staging folder, it's simply VERSIONING_FILE
version_file = os.path.join(staging_dir, VERSIONING_FILE)
additional_files.append(version_file)
environment = settings.CONFIG_PARAMS['cloud_environment']
storage_client = storage.Client()
for p in additional_files:
stat_info = os.stat(p)
size_in_bytes = stat_info.st_size
destination_bucket = '%s-%s' % (settings.CONFIG_PARAMS['storage_bucket_prefix'], str(job.project.owner.user_uuid))
object_name = os.path.join( str(job.project.analysis_uuid), \
job.job_id, \
os.path.basename(p)
)
# perform the upload to the bucket:
bucket_name = destination_bucket[len(settings.CONFIG_PARAMS['google_storage_gs_prefix']):]
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(object_name)
blob.upload_from_filename(p)
# add the Resource to the database:
full_destination_with_prefix = '%s/%s' % (
destination_bucket, \
object_name \
)
r = Resource(
source = environment,
path = full_destination_with_prefix,
name = os.path.basename(p),
owner = job.project.owner,
size = size_in_bytes
)
r.save()
# add a ProjectResource to the database, so we can tie the Resource created above with the analysis project:
apr = AnalysisProjectResource(analysis_project=job.project, resource=r)
apr.save()
def handle_success(job):
'''
This is executed when a WDL job has completed and Cromwell has indicated success
`job` is an instance of SubmittedJob
'''
try:
# if everything goes well, we set the AnalysisProject to a completed state,
# notify the client, and delete the SubmittedJob. Since there is a 1:1
# between AnalysisProject and a complete job, that's enough to track history
register_outputs(job)
copy_pipeline_components(job)
# update the AnalysisProject instance to reflect the success:
project = job.project
project.completed = True
project.success = True
project.error = False
project.status = 'Successful completion'
project.finish_time = datetime.datetime.now()
project.save()
# inform client:
email_address = project.owner.email
current_site = Site.objects.get_current()
domain = current_site.domain
url = 'https://%s' % domain
context = {'site': url, 'user_email': email_address}
email_template = get_jinja_template('email_templates/analysis_success.html')
email_html = email_template.render(context)
email_plaintxt_template = get_jinja_template('email_templates/analysis_success.txt')
email_plaintxt = email_plaintxt_template.render(context)
email_subject = open('email_templates/analysis_success_subject.txt').readline().strip()
send_email(email_plaintxt, email_html, email_address, email_subject)
# delete the staging dir where the files were:
staging_dir = job.job_staging_dir
shutil.rmtree(staging_dir)
except Exception as ex:
# Set the project parameters so that clients will know what is going on:
project = job.project
project.status = 'Analysis completed. Error encountered when preparing final output. An administrator has been notified'
project.error = True
project.success = False
project.completed = False
project.save()
if type(ex) == JobOutputsException:
message = str(ex)
else:
message = 'Some other exception was raised following wrap-up from a completed job.'
handle_exception(ex, message=message)
finally:
# regardless of what happened, save a CompletedJob instance
project = job.project
cj = CompletedJob(project=project,
job_id = job.job_id,
job_status=job.job_status,
success = True,
job_staging_dir=job.job_staging_dir)
cj.save()
job.delete()
def handle_failure(job):
'''
This is executed when a WDL job has completed and Cromwell has indicated a failure has occurred
`job` is an instance of SubmittedJob
'''
project = job.project
cj = CompletedJob(project=project,
job_id = job.job_id,
job_status=job.job_status,
success = False,
job_staging_dir=job.job_staging_dir)
cj.save()
job.delete()
# update the AnalysisProject instance to reflect the failure:
project.completed = False
project.success = False
project.error = True
project.status = 'The job submission has failed. An administrator has been notified.'
project.finish_time = datetime.datetime.now()
project.restart_allowed = False # do not allow restarts for runtime failures
project.save()
# inform client (if desired):
if not settings.SILENT_CLIENTSIDE_FAILURE:
recipient = project.owner.email
email_html = open('email_templates/analysis_fail.html').read()
email_plaintext = open('email_templates/analysis_fail.txt').read()
email_subject = open('email_templates/analysis_fail_subject.txt').readline().strip()
send_email(email_plaintext, email_html, recipient, email_subject)
# notify admins:
message = 'Job (%s) experienced failure.' % cj.job_id
subject = 'Cromwell job failure'
notify_admins(message, subject)
def walk_response(key, val, target):
'''
Walks through a json object (`val`), returns all
primitives (e.g. strings) referenced by a `key` which
equals `target`.
'''
f = []
if type(val) == list:
for item in val:
f.extend(walk_response('', item, target))
elif type(val) == dict:
for k, v in val.items():
f.extend(walk_response(k, v, target))
elif key == target:
f.append(val)
return f
def log_client_errors(job, stderr_file_list):
'''
This handles pulling the stderr files (which indicate what went wrong)
from the cloud-based storage and extracting their contents
'''
errors = []
# make a folder where we can dump these stderr files temporarily:
foldername = 'tmp_stderr_%s' % datetime.datetime.now().strftime('%H%M%S_%m%d%Y')
stderr_folder = os.path.join(job.job_staging_dir, foldername)
os.mkdir(stderr_folder)
storage_client = storage.Client()
bucket_prefix = settings.CONFIG_PARAMS['google_storage_gs_prefix']
local_file_list = []
for i, stderr_path in enumerate(stderr_file_list):
path_without_prefix = stderr_path[len(bucket_prefix):]
bucket_name = path_without_prefix.split('/')[0]
object_name = '/'.join(path_without_prefix.split('/')[1:])
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(object_name)
file_location = os.path.join(stderr_folder, 'stderr_%d' % i)
local_file_list.append(file_location)
try:
blob.download_to_filename(file_location)
local_file_list.append(file_location)
except google.api_core.exceptions.NotFound as ex:
# if the stderr file was not found, it means something other issue
# occurred that prevented Cromwell from creating it.
error_text = 'An unexpected error has occurred. Please contact the administrator.'
jc = JobClientError(project=job.project, error_text=error_text)
jc.save()
errors.append(jc)
message = '''Job (%s) for project %s experienced failure on Cromwell.
The expected stderr file (%s) was not found, however.
Staging dir was %s.
''' % (job.job_id, job.project, stderr_path, job.job_staging_dir)
subject = 'Cromwell runtime job failure'
notify_admins(message, subject)
# now have all files-- read content and create database objects to track:
for f in local_file_list:
file_contents = open(f).read()
if len(file_contents) > 0:
stderr_sections = file_contents.split(settings.CROMWELL_STDERR_DELIM)
for section in stderr_sections:
jc = JobClientError(project=job.project, error_text=section)
jc.save()
errors.append(jc)
shutil.rmtree(stderr_folder)
return errors
def handle_precheck_failure(job):
'''
If a pre-check job failed, something was wrong with the inputs.
We query the cromwell metadata to get the error so the user can correct it
'''
config_path = os.path.join(THIS_DIR, 'wdl_job_config.cfg')
config_dict = utils.load_config(config_path)
# pull together the components of the request to the Cromwell server
metadata_endpoint = config_dict['metadata_endpoint']
metadata_url_template = Template(settings.CROMWELL_SERVER_URL + metadata_endpoint)
metadata_url = metadata_url_template.render({'job_id': job.job_id})
try:
response = requests.get(metadata_url)
response_json = response.json()
stderr_file_list = walk_response('',response_json, 'stderr')
error_obj_list = log_client_errors(job, stderr_file_list)
# update the AnalysisProject instance:
project = job.project
project.completed = False
project.success = False
project.error = True
project.status = 'Issue encountered with inputs.'
project.message = ''
project.finish_time = datetime.datetime.now()
project.save()
# inform the client of this problem so they can fix it (if allowed):
email_address = project.owner.email
current_site = Site.objects.get_current()
domain = current_site.domain
project_url = reverse('analysis-project-execute', args=[project.analysis_uuid,])
url = 'https://%s%s' % (domain, project_url)
context = {'site': url, 'user_email': email_address}
if project.restart_allowed:
email_template_path = 'email_templates/analysis_fail_with_recovery.html'
email_plaintxt_path = 'email_templates/analysis_fail_with_recovery.txt'
email_subject = 'email_templates/analysis_fail_subject.txt'
else:
email_template_path = 'email_templates/analysis_fail.html'
email_plaintxt_path = 'email_templates/analysis_fail.txt'
email_subject = 'email_templates/analysis_fail_subject.txt'
email_template = get_jinja_template(email_template_path)
email_html = email_template.render(context)
email_plaintxt_template = get_jinja_template(email_plaintxt_path)
email_plaintxt = email_plaintxt_template.render(context)
email_subject = open(email_subject).readline().strip()
send_email(email_plaintxt, email_html, email_address, email_subject)
if not project.restart_allowed:
# a project that had a pre-check failed, but a restart was NOT allowed.
# need to inform admins:
message = 'Job (%s) experienced failure during pre-check. No restart was allowed. Staging dir was %s' % (job.job_id, job.job_staging_dir)
subject = 'Cromwell job failure on pre-check'
notify_admins(message, subject)
# delete the failed job:
job.delete()
except Exception as ex:
print('An exception was raised when requesting metadata '
'from cromwell server following a pre-check failure')
print(ex)
message = 'An exception occurred when trying to query metadata. \n'
message += 'Job ID was: %s' % job.job_id
message += 'Project ID was: %s' % job.project.analysis_uuid
message += str(ex)
try:
warnings_sent = Warning.objects.get(job=job)
print('Error when querying cromwell for metadata. Notification suppressed')
except analysis.models.Warning.DoesNotExist:
handle_exception(ex, message=message)
# add a 'Warning' object in the database so that we don't
# overwhelm the admin email boxes.
warn = Warning(message=message, job=job)
warn.save()
raise ex
def handle_precheck_success(job):
'''
This function is invoked if the pre-check passed, and we are OK to launch the full job
'''
project = job.project
staging_dir = job.job_staging_dir
# Remove the old job object
job.delete()
# execute the main wdl file:
execute_wdl(project, staging_dir, False)
@task(name='check_job')
def check_job():
'''
Used for pinging the cromwell server to check job status
'''
terminal_actions = {
'Succeeded': handle_success,
'Failed': handle_failure
}
precheck_terminal_actions = {
'Succeeded': handle_precheck_success,
'Failed': handle_precheck_failure
}
other_states = ['Submitted','Running']
config_path = os.path.join(THIS_DIR, 'wdl_job_config.cfg')
config_dict = utils.load_config(config_path)
# pull together the components of the request to the Cromwell server
query_endpoint = config_dict['query_status_endpoint']
query_url_template = Template(settings.CROMWELL_SERVER_URL + query_endpoint)
# get the job IDs for active jobs:
active_job_set = SubmittedJob.objects.all()
print('%d active jobs found.' % len(active_job_set))
for job in active_job_set:
query_url = query_url_template.render({'job_id': job.job_id})
try:
response = requests.get(query_url)
response_json = json.loads(response.text)
if (response.status_code == 404) or (response.status_code == 400) or (response.status_code == 500):
handle_exception(None, 'Query for job failed with message: %s' % response_json['message'])
else: # the request itself was OK
status = response_json['status']
# if the job was in one of the finished states, execute some specific logic
if status in terminal_actions.keys():
if job.is_precheck:
precheck_terminal_actions[status](job) # call the function to execute the logic for this end-state
else:
terminal_actions[status](job) # call the function to execute the logic for this end-state
elif status in other_states:
# any custom behavior for unfinished tasks
# can be handled here if desired
# update the job status in the database
job.job_status = status
job.save()
project = job.project
project.status = status
project.save()
else:
# has some status we do not recognize
message = 'When querying for status of job ID: %s, ' % job.job_id
message += 'received an unrecognized response: %s' % response.text
job.job_status = 'Unknown'
job.save()
try:
warnings_sent = Warning.objects.get(job=job)
print('When querying cromwell for job status, received unrecognized status. Notification suppressed')
except analysis.models.Warning.DoesNotExist:
handle_exception(None, message=message)
# add a 'Warning' object in the database so that we don't
# overwhelm the admin email boxes.
warn = Warning(message=message, job=job)
warn.save()
except Exception as ex:
print('An exception was raised when requesting job status from cromwell server')
print(ex)
message = 'An exception occurred when trying to query a job. \n'
message += 'Job ID was: %s' % job.job_id
message += 'Project ID was: %s' % job.project.analysis_uuid
message += str(ex)
try:
warnings_sent = Warning.objects.get(job=job)
print('Error when querying cromwell for job status. Notification suppressed')
except analysis.models.Warning.DoesNotExist:
handle_exception(ex, message=message)
# add a 'Warning' object in the database so that we don't
# overwhelm the admin email boxes.
warn = Warning(message=message, job=job)
warn.save()
raise ex
| StarcoderdataPython |
1720805 | <filename>notebook/python_sandbox/py_ufo/drops/test3.py
def run_me():
print('this is test copied')
if __name__ == '__main__':
run_me()
x = 2
| StarcoderdataPython |
1631385 | <filename>sdk/python/pulumi_vault/azure/outputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'BackendRoleAzureGroup',
'BackendRoleAzureRole',
]
@pulumi.output_type
class BackendRoleAzureGroup(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "groupName":
suggest = "group_name"
elif key == "objectId":
suggest = "object_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BackendRoleAzureGroup. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BackendRoleAzureGroup.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BackendRoleAzureGroup.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
group_name: str,
object_id: Optional[str] = None):
pulumi.set(__self__, "group_name", group_name)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
@property
@pulumi.getter(name="groupName")
def group_name(self) -> str:
return pulumi.get(self, "group_name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
return pulumi.get(self, "object_id")
@pulumi.output_type
class BackendRoleAzureRole(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "roleName":
suggest = "role_name"
elif key == "roleId":
suggest = "role_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BackendRoleAzureRole. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BackendRoleAzureRole.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BackendRoleAzureRole.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
role_name: str,
scope: str,
role_id: Optional[str] = None):
pulumi.set(__self__, "role_name", role_name)
pulumi.set(__self__, "scope", scope)
if role_id is not None:
pulumi.set(__self__, "role_id", role_id)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> str:
return pulumi.get(self, "role_name")
@property
@pulumi.getter
def scope(self) -> str:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="roleId")
def role_id(self) -> Optional[str]:
return pulumi.get(self, "role_id")
| StarcoderdataPython |
3287027 | <filename>barbeque/cms/toolbar.py
from django.utils.encoding import force_text
from cms.cms_toolbars import ADMIN_MENU_IDENTIFIER, PAGE_MENU_IDENTIFIER
from cms.extensions.toolbar import ExtensionToolbar
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.toolbar.items import SideframeItem, ModalItem, SubMenu
@toolbar_pool.register
class ForceModalDialogToolbar(CMSToolbar):
def rebuild_menu(self, menu):
items = []
for item in menu.items:
if isinstance(item, SideframeItem):
# SideframeItem appends elipsis, we need to remove them before
# we reuse the name to avoid doubled elipsis.
cleaned_name = item.name.rstrip(' .')
real_item = ModalItem(
cleaned_name,
item.url,
active=item.active,
disabled=item.disabled,
extra_classes=item.extra_classes,
on_close=item.on_close,
side=item.side)
elif isinstance(item, SubMenu):
real_item = self.rebuild_menu(item)
else:
real_item = item
items.append(real_item)
menu.items = items
return menu
def populate(self):
menus = [
self.toolbar.get_menu(identifier)
for identifier in (ADMIN_MENU_IDENTIFIER, PAGE_MENU_IDENTIFIER)
]
for menu in [menu for menu in menus if menu]:
self.rebuild_menu(menu)
class TitleExtensionToolbar(ExtensionToolbar):
model = None
insert_after = None
def get_item_position(self, menu):
position = None
for items in menu._memo.values():
for item in items:
if force_text(getattr(item, 'name', None)) in (
force_text(self.insert_after),
'{0}...'.format(self.insert_after)
):
position = menu._item_position(item) + 1
break
return position
def populate(self):
current_page_menu = self._setup_extension_toolbar()
if not current_page_menu or not self.page:
return
position = self.get_item_position(current_page_menu)
urls = self.get_title_extension_admin()
for title_extension, url in urls:
current_page_menu.add_modal_item(
self.model._meta.verbose_name,
url=url, position=position,
disabled=not self.toolbar.edit_mode
)
| StarcoderdataPython |
136341 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Console script for daskerator."""
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import datetime as dt
from functools import partial
from hashlib import md5
import inspect
from operator import methodcaller
import os
from pathlib import Path
from pprint import pprint as pp
from queue import Queue
import sys
from threading import Thread
import time
from time import sleep
import click
from distributed import Client, LocalCluster
import pandas as pd
from bripy.bllb.logging import setup_logging
from bripy.bllb.str import hash_utf8
LOG_ON = False
LOG_LEVEL = "WARNING"
def start_log(enable=True, lvl='WARNING'):
log = setup_logging(enable, lvl, loguru_enqueue=True) #, std_lib=True)
log.info('examinator logging started')
return log
def md5_blocks(path, blocksize=1024 * 2048) -> str:
path = Path(path)
if not path.is_dir():
try:
hasher = md5()
with path.open('rb') as file:
block = file.read(blocksize)
while len(block) > 0:
hasher.update(block)
block = file.read(blocksize)
return hasher.hexdigest()
except Exception as error:
log.warning(
f'Error trying to hash item: {str(path)}\nError:\n{error}')
return
else:
log.debug(f'Item is a directory and will not be hashed. {str(path)}')
return
def get_stat(path, opt_md5=True, opt_pid=False) -> dict:
log.debug(path)
try:
path = Path(path)
info = dict([
_ for _ in inspect.getmembers(path.lstat())
if not _[0].startswith('_') and not inspect.isbuiltin(_[1])
])
info.update(
dict([(_[0], str(_[1])) for _ in inspect.getmembers(path)
if '__' not in _[0] and '<' not in str(_[1])]))
info.update(
dict([(str(_[0]), methodcaller(_[0])(path))
for _ in inspect.getmembers(path)
if _[0].startswith('is_') and _[0] != 'is_mount']))
info['path'] = str(path)
info['path_hash'] = hash_utf8(str(path))
info['f_atime'] = dt.datetime.fromtimestamp(info['st_atime'])
info['f_ctime'] = dt.datetime.fromtimestamp(info['st_ctime'])
info['f_mtime'] = dt.datetime.fromtimestamp(info['st_mtime'])
if opt_md5:
if not path.is_dir():
try:
md5_hash = md5_blocks(path)
info['md5'] = md5_hash
except:
log.warning(f'Could not hash item: {str(path)}')
else:
log.debug(
f'Item is a directory and will not be hashed. {str(path)}'
)
if opt_pid:
log.debug(
f"working using OS pid: {os.getpid()}, opt_pid: {opt_pid}")
return info
except Exception as error:
log.warning(error)
return {'path': str(path)}
def glob_paths(path):
try:
path = Path(path)
if path.is_dir():
return path.rglob('*')
else:
return path
except Exception as error:
log.warning(error)
def load_dir(from_q, to_q, stop):
limit = 300
i = limit
while True and ((i and not stop()) or from_q.qsize()):
if from_q.qsize():
l = from_q.get()
if isinstance(l, list):
for item in l:
to_q.put(item)
i = min(i + 1, limit)
else:
i -= 1
sleep(.1)
def unloadq(q, stop, limit=2000, rest=.1, check=100):
i = limit
loops = 0
results = []
while True and ((i and not stop()) or q.qsize()):
loops += 1
if loops % check == 0:
log.debug(i, loops, len(results))
if q.qsize():
x = q.get()
log.debug(x)
results.append(x)
i = min(i + 1, limit)
else:
i -= 1
if i % check == 0:
log.debug(i)
sleep(rest)
return results
def multiplex(n, q, **kwargs):
""" Convert one queue into several equivalent Queues
>>> q1, q2, q3 = multiplex(3, in_q)
"""
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues
def push(in_q, out_q):
while True:
x = in_q.get()
out_q.put(x)
def merge(*in_qs, **kwargs):
""" Merge multiple queues together
>>> out_q = merge(q1, q2, q3)
"""
out_q = Queue(**kwargs)
threads = [Thread(target=push, args=(q, out_q)) for q in in_qs]
for t in threads:
t.daemon = True
t.start()
return out_q
def get_dir(d):
path = Path(d)
if path.is_dir():
return [str(_) for _ in path.iterdir()]
def iterq(q):
while q.qsize():
yield q.get()
def proc_paths(basepaths, opt_md5=True):
q = Queue()
remote_q = client.scatter(q)
q1, q2 = multiplex(2, remote_q)
list_q = client.map(get_dir, q1)
l_q = client.gather(list_q)
pstat = partial(get_stat, opt_md5=opt_md5, opt_pid=True)
q3 = client.map(pstat, q2)
result_q = client.gather(q3)
qs = [q, remote_q, q1, q2, list_q, l_q, q3, result_q]
with ThreadPoolExecutor() as t:
stop_threads = False
stop = lambda: stop_threads
t.submit(load_dir, l_q, q, stop)
[q.put(str(Path(path).resolve())) for path in basepaths]
results_future = t.submit(unloadq, result_q, stop, limit=300)
ilimit = 10
i = ilimit
while i:
alive = sum([_q.qsize() for _q in qs])
if alive:
i = min(i + 1, ilimit)
log.debug(f'{alive}, {i}')
else:
i -= 1
log.debug(f'i: {i}')
sleep(.1)
stop_threads = True
# results_list = unloadq(result_q, limit=300)
results_list = results_future.result()
results = pd.DataFrame(results_list)
return results
@click.command()
@click.option('-b',
'--basepaths',
default='.',
help='Base path.',
multiple=True,
type=click.Path(exists=True))
@click.option('-f',
'--file',
help='File path or - for stdin',
type=click.File('r'))
@click.option('-o',
'--output',
default='.',
help='Output path.',
multiple=False,
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
writable=True,
resolve_path=True))
@click.option('--md5/--no-md5', default=True)
@click.option('-v', '--verbose', count=True)
@click.argument('args', nargs=-1)
def main(basepaths, output, file, md5, verbose, args):
"""Console script for examinator."""
# click documentation at http://click.pocoo.org/
s = time.perf_counter()
log_on = LOG_ON
log_level = LOG_LEVEL
if verbose:
log_on = True
log_level = max(4 - verbose, 1) * 10
global log
log = start_log(log_on, log_level)
log.info(f'verbose: {verbose}')
log.warning(f"\nlogs enabled: {log_on}\nlog_level: {log_level}")
log.debug("basepaths: {}".format(basepaths))
log.debug("output: {}".format(output))
log.debug('{}'.format(str(type(file))))
log.debug(f'Optional md5 hash: {md5}')
log.debug('{}'.format(args))
time.sleep(0.05) # Sleep to let logging initialize
global cluster, client
cluster = LocalCluster(processes=True)
client = Client(cluster)
if not basepaths:
basepaths = []
else:
basepaths = list(basepaths)
log.debug(f"{str(type(basepaths))}")
if file:
basepaths += file.read().split('\n')
log.debug(f"\n{str(type(basepaths))}\n{basepaths}\n")
log.debug(client)
try:
result = proc_paths(basepaths, opt_md5=md5)
log.debug(f'{str(type(result))}, {str(len(result))}')
fields = ['path', 'st_size']
if md5:
fields.append('md5')
pp(result.loc[:, fields])
except Exception:
log.exception("FAIL")
return error
elapsed = time.perf_counter() - s
log.info(f"{__file__} executed in {elapsed:0.2f} seconds.".format())
log.debug('\n\nFIN\n\n')
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| StarcoderdataPython |
14543 | <gh_stars>0
"""
Load tests from :class:`unittest.TestCase` subclasses.
This plugin implements :func:`loadTestsFromName` and
:func:`loadTestsFromModule` to load tests from
:class:`unittest.TestCase` subclasses found in modules or named on the
command line.
"""
# Adapted from unittest2/loader.py from the unittest2 plugins branch.
# This module contains some code copied from unittest2/loader.py and other
# code developed in reference to that module and others within unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
import logging
import unittest
from nose2 import events, util
__unittest = True
log = logging.getLogger(__name__)
class TestCaseLoader(events.Plugin):
"""Loader plugin that loads from test cases"""
alwaysOn = True
configSection = 'testcases'
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
def loadTestsFromModule(self, event):
"""Load tests in :class:`unittest.TestCase` subclasses"""
seen = set()
module = event.module
for name in dir(module):
obj = getattr(module, name)
if id(obj) in seen:
continue
seen.add(id(obj))
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
event.extraTests.append(
self._loadTestsFromTestCase(event, obj))
def loadTestsFromName(self, event):
"""Load tests from event.name if it names a test case/method"""
name = event.name
module = event.module
log.debug("load %s from %s", name, module)
try:
result = util.test_from_name(name, module)
except (AttributeError, ImportError) as e:
event.handled = True
return event.loader.failedLoadTests(name, e)
if result is None:
return
parent, obj, name, index = result
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
# name is a test case class
event.extraTests.append(self._loadTestsFromTestCase(event, obj))
elif (isinstance(parent, type) and
issubclass(parent, unittest.TestCase) and not
util.isgenerator(obj) and not
hasattr(obj, 'paramList')):
# name is a single test method
event.extraTests.append(parent(obj.__name__))
def _loadTestsFromTestCase(self, event, testCaseClass):
evt = events.LoadFromTestCaseEvent(event.loader, testCaseClass)
result = self.session.hooks.loadTestsFromTestCase(evt)
if evt.handled:
loaded_suite = result or event.loader.suiteClass()
else:
names = self._getTestCaseNames(event, testCaseClass)
if not names and hasattr(testCaseClass, 'runTest'):
names = ['runTest']
# FIXME return failure test case if name not in testcase class
loaded_suite = event.loader.suiteClass(map(testCaseClass, names))
if evt.extraTests:
loaded_suite.addTests(evt.extraTests)
return loaded_suite
def _getTestCaseNames(self, event, testCaseClass):
excluded = set()
def isTestMethod(attrname, testCaseClass=testCaseClass,
excluded=excluded):
prefix = evt.testMethodPrefix or self.session.testMethodPrefix
return (
attrname.startswith(prefix) and
hasattr(getattr(testCaseClass, attrname), '__call__') and
attrname not in excluded
)
evt = events.GetTestCaseNamesEvent(
event.loader, testCaseClass, isTestMethod)
result = self.session.hooks.getTestCaseNames(evt)
if evt.handled:
test_names = result or []
else:
excluded.update(evt.excludedNames)
test_names = [entry for entry in dir(testCaseClass)
if isTestMethod(entry)]
if evt.extraNames:
test_names.extend(evt.extraNames)
sortkey = getattr(
testCaseClass, 'sortTestMethodsUsing', event.loader.sortTestMethodsUsing)
if sortkey:
test_names.sort(
key=sortkey)
return test_names
| StarcoderdataPython |
3219983 | <reponame>SheikyHaz/tiny_python_projects
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""A collection of typing utilities."""
import sys
from typing import TYPE_CHECKING, Dict, List, NamedTuple, Union
if TYPE_CHECKING:
from typing import Counter # typing.Counter added in Python 3.6.1
if sys.version_info >= (3, 8):
from typing import Literal, TypedDict
else:
from typing_extensions import Literal, TypedDict
class FileItem(NamedTuple):
"""Represents data about a file handled by pylint
Each file item has:
- name: full name of the module
- filepath: path of the file
- modname: module name
"""
name: str
filepath: str
modpath: str
class ModuleDescriptionDict(TypedDict):
"""Represents data about a checked module"""
path: str
name: str
isarg: bool
basepath: str
basename: str
class ErrorDescriptionDict(TypedDict):
"""Represents data about errors collected during checking of a module"""
key: Literal["fatal"]
mod: str
ex: Union[ImportError, SyntaxError]
# The base type of the "stats" attribute of a checker
CheckerStats = Dict[
str, Union[int, "Counter[str]", List, Dict[str, Union[int, str, Dict[str, int]]]]
]
| StarcoderdataPython |
3319490 | import operator
dic = {'a': 12, 'b': 15, 'c': 3}
ascending = sorted(dic.items(), key=operator.itemgetter(1))
print(ascending)
descending = sorted(dic.items(), key=operator.itemgetter(1), reverse=True)
print(descending)
| StarcoderdataPython |
39160 | <gh_stars>0
from __future__ import print_function
import sys
import json
def convert(path):
try:
with open(path, 'r') as f:
geojson = json.loads(f.read())
# Warning - Only looking at the exterior, hence skipping holes.
coordinates = geojson['features'][0]['geometry']['coordinates'][0]
coord_string = ", ".join(["{} {}".format(x, y) for x, y in coordinates])
sql_polygon = u"POLYGON(({}))\n".format(coord_string)
sys.stdout.write(sql_polygon)
except IOError:
print("No such file")
except KeyError:
print("File is not properly formatted")
| StarcoderdataPython |
4834543 | <reponame>kryptn/Authda<filename>Authda/tests/test_tests.py
import unittest
class TestTestCase(unittest.TestCase):
def test_test(self):
self.assertTrue(True)
| StarcoderdataPython |
1683017 | from dotenv import load_dotenv
from flask import make_response
import jwt
from models import User
from helpers import is_login
load_dotenv()
def register_controller(session, request):
try:
data = request.form
email = data['email']
name = data['name']
password = data['password']
password_confirmation = data['password_confirmation']
except:
return 'error', 'form invalid'
user = session.query(User).filter_by(email=email).first()
if password != password_confirmation:
return 'error', 'passwords not match'
if user:
return 'error', 'email already exist'
user = User(name=name, email=email, password=password)
session.add(user)
session.commit
return 'success', 'user created' | StarcoderdataPython |
114231 | <gh_stars>0
class ResponseKeys(object):
POST = 'post'
POSTS = 'posts'
POST_SAVED = 'The post was saved successfully'
POST_UPDATED = 'The post was updated successfully'
POST_DELETED = 'The post was deleted successfully'
POST_NOT_FOUND = 'The post could not be found'
| StarcoderdataPython |
125625 | from anthill.platform.services import PlainService, ControllerRole
from anthill.platform.api.internal import as_internal
from psutil import virtual_memory, cpu_percent
class Service(ControllerRole, PlainService):
"""Anthill default service."""
master = 'game_master'
@staticmethod
def setup_internal_api():
@as_internal()
async def heartbeat_report(api, **options):
return {
'memory': virtual_memory().percent,
'cpu': cpu_percent()
}
| StarcoderdataPython |
106658 | <filename>ryu/app/load_tt_schedule_tb.py<gh_stars>1-10
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def load_tt_flowtable(dirpath):
schedule_path = os.path.join(dirpath, "tables")
# adapter_path = os.path.join(dirpath, "adapter")
all_tables = {}
for _, subdir, _ in os.walk(schedule_path):
for switch in subdir:
flow_tb = []
switch_path = os.path.join(schedule_path, switch)
for _, _, filenames in os.walk(switch_path):
for tbfile in filenames:
port_str, etype_str = os.path.splitext(tbfile)[0].split('_')
port_num = int(port_str[4:]) + 1
etype_num = 0 if etype_str == "send" else 1
tb_path = os.path.join(switch_path, tbfile)
with open(tb_path, 'r') as tb:
flow_num = int(tb.readline())
for i in range(flow_num):
entry = tb.readline()
schd_time, period, flow_id, buffer_id, \
flow_size = entry.split()
flow_tb.append([port_num,
etype_num, int(flow_id),
int(schd_time), int(period),
int(buffer_id), int(flow_size)])
all_tables[int(switch[-1])+1] = flow_tb
return all_tables
def tt_table_generator(dirpath):
schedule_path = os.path.join(dirpath, "tables")
for _, subdir, _ in os.walk(schedule_path):
for switch in subdir:
flow_tb = []
switch_path = os.path.join(schedule_path, switch)
for _, _, filenames in os.walk(switch_path):
for tbfile in filenames:
port_str, etype_str = os.path.splitext(tbfile)[0].split('_')
port_num = int(port_str[4:]) + 1
etype_num = 0 if etype_str == "send" else 1
tb_path = os.path.join(switch_path, tbfile)
with open(tb_path, 'r') as tb:
flow_num = int(tb.readline())
for i in range(flow_num):
entry = tb.readline()
schd_time, period, flow_id, buffer_id, \
flow_size = entry.split()
flow_tb.append([port_num, etype_num, int(flow_id),
int(schd_time), int(period),
int(buffer_id), int(flow_size)])
yield flow_tb
if __name__ == '__main__':
table_path = "/home/chenwh/Workspace/data/linear"
all_tables = load_tt_flowtable(table_path)
# all_tables = tt_table_generator(table_path)
print all_tables
| StarcoderdataPython |
3332252 | <filename>coordpy/decorators.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import functools
class CoordinateValueError(ValueError):
"""Custom ValueError class.
Just to distinguish this exception and make tracebacks easier, we
will use this exception when cleaning coordinate values.
"""
def __init__(self, value):
message = 'Coordinate values must be convertible to floats, not {}'
super(CoordinateValueError, self).__init__(message.format(value))
def clean_coordinate_value(value):
"""Clean the coordinate value provided to the function.
The value being passed to the function is a coordinate value. We
want to clean the coordinate to a float, since that is what we
primarily use.
Args:
value: The value could be a str, float, or int. Any of these
types is a coordinate value that should be translatable to
a float.
Returns:
float: Return the value passed in converted to a float.
Raises:
CoordinateValueError: If the value can't be converted to a
float this issue is raised.
"""
try:
return float(value)
except ValueError:
raise CoordinateValueError(value)
def clean_coordinates(function):
"""Clean the two sets of coordinates provided to the function.
When the decorated function is called it will be provided with two
sets of coordinates. Other arguments will be untouched, but the
coordinates will be cleaned so that each is a tuple of two floats.
"""
@functools.wraps(function)
def inner(a, b, *args, **kwargs):
a = (clean_coordinate_value(a[0]), clean_coordinate_value(a[1]))
b = (clean_coordinate_value(b[0]), clean_coordinate_value(b[1]))
return function(a, b, *args, **kwargs)
return inner
| StarcoderdataPython |
4824893 | import socket
def gateway():
"""Guest's gateway address."""
return slash24prefix() + '.1'
def guestip():
"""Not platform independent, but works on my setup with Windows."""
return socket.gethostbyname(socket.gethostname())
def vmnumber():
"""VM number is denoted by the third octet."""
octets = guestip().split('.')
return octets[2]
def slash24prefix():
"""If guestip() is '192.168.1.41', return '192.168.1'."""
octets = guestip().split('.')
return '.'.join(octets[:3])
| StarcoderdataPython |
3276018 |
import click
with open('resources/cs-cl.sqs.yaml', 'r') as fh:
config_contents = fh.read()
def inject_config_file(filename='sqs.yaml'):
def _decorator(f):
def _injected(self, *args, **kwargs):
assert hasattr(self, 'cli_runner')
assert isinstance(self.cli_runner, click.testing.CliRunner)
with self.cli_runner.isolated_filesystem():
with open(filename, 'w') as fh:
fh.write(config_contents)
return f(self, *args, **kwargs)
return _injected
return _decorator | StarcoderdataPython |
1658767 | <reponame>jpphooper/ds-art
import librosa
from librosa.display import waveshow, specshow
import streamlit as st
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from voxelfuse.voxel_model import VoxelModel
from voxelfuse.mesh import Mesh
from voxelfuse.primitives import generateMaterials
class ExtractMusicFeatures:
def __init__(self,
filepath,
duration,
offset,
sampling_rate,
hop_length,
n_mfcc):
self.filepath = filepath
self.duration = duration
self.offset = offset
self.sampling_rate = sampling_rate
self.hop_length = hop_length
self.n_mfcc = n_mfcc
y, sr = librosa.load(
self.filepath, sr=self.sampling_rate, duration=self.duration, offset=self.offset)
harm_perc_dict = self._extract_harmonic_percussive(y)
tempo_beat_frame_dict = self._extract_beats(y, sr)
mfcc_dict = self._extract_mfcc(y)
beat_mfcc_delta_dict = self._extract_beat_mfcc_delta(
mfcc_dict['mfcc'], tempo_beat_frame_dict['beat_frames'])
chromagram_dict = self._extract_chromagram(harm_perc_dict['y_harm'])
beat_chroma_dict = self._extract_beat_chroma(chromagram_dict['chromagram'],
tempo_beat_frame_dict['beat_frames'])
music_features = {'y': y,
'sr': sr,
**self._extract_beats(y, sr),
**harm_perc_dict,
**mfcc_dict,
**beat_mfcc_delta_dict,
**chromagram_dict,
**beat_chroma_dict,
**self._extract_beat_features(beat_chroma_dict['beat_chroma'],
beat_mfcc_delta_dict['beat_mfcc_delta'])
}
self.music_features = music_features
def _extract_beats(self, y, sr) -> dict:
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
return {'tempo': tempo, 'beat_frames': beat_frames}
def _extract_harmonic_percussive(self, y) -> dict:
y_harm, y_perc = librosa.effects.hpss(y)
return {'y_harm': y_harm, 'y_perc': y_perc}
def _extract_mfcc(self, y):
mfcc = librosa.feature.mfcc(
y=y,
sr=self.sampling_rate,
hop_length=self.hop_length,
n_mfcc=self.n_mfcc)
return {'mfcc': mfcc}
def _extract_beat_mfcc_delta(self, mfcc, beat_frames) -> dict:
beat_mfcc_delta = librosa.util.sync(
np.vstack([mfcc, librosa.feature.delta(mfcc)]), beat_frames)
return {'beat_mfcc_delta': beat_mfcc_delta}
def _extract_chromagram(self, y_harm) -> dict:
chromagram = librosa.feature.chroma_cqt(
y=y_harm, sr=self.sampling_rate)
return {'chromagram': chromagram}
def _extract_beat_chroma(self, chromagram, beat_frames) -> dict:
beat_chroma = librosa.util.sync(
chromagram, beat_frames, aggregate=np.median)
return {'beat_chroma': beat_chroma}
def _extract_beat_features(self, beat_chroma, beat_mfcc_delta):
beat_features = np.vstack([beat_chroma, beat_mfcc_delta])
return {'beat_features': beat_features}
def visualise_waveshow(self, waveshow_list):
fig, ax = plt.subplots(nrows=1, figsize=(30, 7))
if 'Mono' in waveshow_list:
waveshow(
self.music_features['y'], sr=self.music_features['sr'], alpha=0.5, ax=ax, color='b', label='Mono')
if 'Percussive' in waveshow_list:
waveshow(
self.music_features['y_perc'], sr=self.music_features['sr'], alpha=0.5, ax=ax, color='r', label='Percussive')
if 'Harmonic' in waveshow_list:
waveshow(
self.music_features['y_harm'], sr=self.music_features['sr'], alpha=0.5, ax=ax, color='g', label='Harmonic')
ax.set(title='Wave Show')
ax.label_outer()
ax.legend()
return fig
def visualise_specshow(self, spec_option):
fig, ax = plt.subplots(nrows=1, figsize=(30, 7))
if spec_option == 'Chromagram':
specshow(self.music_features['chromagram'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'MFCC':
specshow(self.music_features['mfcc'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'Beat MFCC Delta':
specshow(self.music_features['beat_mfcc_delta'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'Beat Chroma':
specshow(self.music_features['beat_chroma'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'Beat Features':
specshow(self.music_features['beat_features'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
return fig
def visualise_tile(self, final_tile_option, size_of_tile):
fig, ax = plt.subplots(nrows=2, figsize=(30, 7))
music_feature_options = {
'Harmonic': self.music_features['y_harm'],
'Percussive': self.music_features['y_perc'],
'Mono': self.music_features['y'],
'Chromagram': self.music_features['chromagram']
}
first_arr = music_feature_options[final_tile_option[0]]
if 'Chromogram' not in final_tile_option:
if len(final_tile_option) == 2:
second_arr = music_feature_options[final_tile_option[1]]
first_matrix, second_matrix = [], []
for _ in range(len(first_arr[:size_of_tile])):
first_matrix.append(first_arr[:size_of_tile])
if len(final_tile_option) == 2:
second_matrix.append(second_arr[:size_of_tile])
tile = np.array(first_matrix)
if len(final_tile_option) == 2:
second_tile = np.array(second_matrix)
tile = np.multiply(100 * tile, 200 * np.transpose(second_tile))
elif 'Chromagram' in final_tile_option:
first_arr = music_feature_options['Chromagram'][0]
final_tile_option.remove('Chromagram')
first_matrix = []
for arr in first_arr:
loop = True
row = []
while loop:
row.extend(arr)
if len(row) > size_of_tile:
first_matrix.append(row[:size_of_tile])
loop = False
loop = True
for row in first_matrix:
while loop:
first_matrix.append(row)
if len(first_matrix) > size_of_tile:
first_matrix = first_matrix[:size_of_tile]
loop = False
tile = first_matrix
if len(final_tile_option) == 1:
second_arr = music_feature_options[final_tile_option[0]]
second_matrix = []
for _ in range(len(second_arr[:size_of_tile])):
second_matrix.append(second_arr[:size_of_tile])
second_tile = np.array(second_matrix)
tile = np.add(tile, 0.5 * np.transpose(second_tile))
# Set up a figure twice as tall as it is wide
fig = plt.figure(figsize=plt.figaspect(2.))
# First subplot
ax = fig.add_subplot(2, 1, 1)
ax.set(title='Tile 2D')
ax.imshow(tile, interpolation='bilinear',
norm=colors.Normalize(), cmap='YlOrBr')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Second subplot
ax = fig.add_subplot(2, 1, 2, projection='3d')
ax.set(title='Tile 3D')
x = np.arange(0, size_of_tile, 1)
y = np.arange(0, size_of_tile, 1)
tile = tile - tile.min()
xs, ys = np.meshgrid(x, y)
ax.plot_surface(xs, ys, tile)
return fig, tile
def create_3d_tile():
sponge = [
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[0, 0, 0],
[1, 0, 1]
],
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
]
]
model = VoxelModel(sponge, generateMaterials(4)) # 4 is aluminium.
mesh = Mesh.fromVoxelModel(model)
return mesh
| StarcoderdataPython |
1678436 | import sys
def progressify(
seq,
message = "",
offset: int = 0,
length: int = 0,
):
"""
Display a progress bar in the terminal while iterating over a sequence.
This function can be used whereever we iterate over a sequence (i.e.
something iterable with a length) and we want to display progress being
made to the user. It works by passing on the values from the sequence (by
yielding them), which printing an updated progress bar to the terminal. The
progress is estimated based on how far we are through the sequence (based
on our iteration step and its length). The progress bar is as long as its
length, unless that is larger than the maximum length to avoid overflowing
on small terminal sizes or large input. The maximum length is set to 30, to
leave some space for a message, that can be printed along with the progress
bar itself (even on old standard 80-column terminals).
Printing the progress bar works through the use of block-drawing characters
and carriage returns (ASCII code 0x0d). In order to avoid the cursor hiding
part of the progress bar, we use terminal escape codes to temporarily hide
it during the runtime of this generator.
Args:
seq: The sequence of elements to iterate over (often a list).
message: An optional message that can be printed along with the bar. If
it is a string, it will be printed, with special sequences starting
with a percent sign replaced by various values:
%i Index (starting from 0)
%e Current element
If it is a callable, it will be called with (index, current_element)
as arguments, and its return value will be printed.
offset: An optional offset that can be used to add a value to the index
counter. Usually used in combination with length to "join" progress
bars in nested loops.
length: Override the calculation of maximum length. This can be used
together with offset as described below, or to allow progressifying
iterables that aren't sequences.
Yields:
The elements from seq.
Example:
for element in progressify([0.01, 0.1, 0.25, 0.5, 0.9, 0.99]):
do_something_with(element) # progress bar is updated at each step
for x in range(2):
def message(index, element):
return f"step {x*3 + i + 1}/6"
for y in progressify(range(3), message):
pass # e.g. "[▓▓░] step 5/6" when x==1 and y==2
for x in range(2):
for y in progressify(range(3), offset=x*3, length=6):
pass # e.g. "[▓▓▓▓▓░]" when x==1 and y==2
"""
longest = 0
if isinstance(message, str):
format_str = message
def message(i: int, element) -> str:
# map special sequences to special values
replacements = {
"%i": i,
"%e": element,
}
# copies so we evaluate this at every step
s = format_str
for key in replacements:
s = s.replace(key, str(replacements[key]))
return s
assert not isinstance(message, str)
try:
print("\033[?25l", end="", flush=True) # hide the cursor
length = length or len(seq) # allow overriding maximum length
maxlen = 30
for i, element in enumerate(seq):
i += offset
msg = message(i, element)
if len(msg) > longest:
longest = len(msg)
# we copy the index (i) in order for %i to refer to the real,
# rather than the scaled index
if length > maxlen:
i_chars = int(i * maxlen/length)
else:
i_chars = i
print(
"\r[{}{}] {}".format(
"▓"*(i_chars+1),
"░"*((length if length <= maxlen else maxlen)-i_chars-1),
msg.ljust(longest), # make sure we override previous values
),
file=sys.stderr,
flush=True, # to see the updated bar immediately
end=""
)
yield element
except Exception as e:
print(e)
finally:
print("\033[?25h", end="", flush=True) # show the cursor again
if i + 1 == length:
print()
| StarcoderdataPython |
3217132 | def gcd(a, b):
while b:
t = b
b = a % b
a = t
return a
def run_test(limit):
print('computing GCD of all pairs of integers in [1, ' + repr(limit) + ']^2')
x = limit
while x > 0:
y = limit
while y > 0:
r = gcd(x, y)
print('gcd of ' + repr(x) + ' and ' + repr(y) + ' is ' + repr(r))
y = y - 1
x = x - 1
run_test(100)
print('done')
| StarcoderdataPython |
4836310 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ, wildcard-import
"Vision transforms."
import warnings
import random
from ....block import Block, HybridBlock
from ....nn import Sequential, HybridSequential
from .....util import is_np_array
from . image import *
from .image import _append_return
class Compose(Sequential):
"""Sequentially composes multiple transforms.
Parameters
----------
transforms : list of transform Blocks.
The list of transforms to be composed.
Inputs:
- **data**: input tensor with shape of the first transform Block requires.
Outputs:
- **out**: output tensor with shape of the last transform Block produces.
Examples
--------
>>> transformer = transforms.Compose([transforms.Resize(300),
... transforms.CenterCrop(256),
... transforms.ToTensor()])
>>> image = mx.nd.random.uniform(0, 255, (224, 224, 3)).astype(dtype=np.uint8)
>>> transformer(image)
<NDArray 3x256x256 @cpu(0)>
"""
def __init__(self, transforms):
super(Compose, self).__init__()
transforms.append(None)
hybrid = []
for i in transforms:
if isinstance(i, HybridBlock):
hybrid.append(i)
continue
elif len(hybrid) == 1:
self.add(hybrid[0])
hybrid = []
elif len(hybrid) > 1:
hblock = HybridSequential()
for j in hybrid:
hblock.add(j)
hblock.hybridize()
self.add(hblock)
hybrid = []
if i is not None:
self.add(i)
class HybridCompose(HybridSequential):
"""Sequentially composes multiple transforms. This is the Hybrid version of Compose.
Parameters
----------
transforms : list of transform Blocks.
The list of transforms to be composed.
Inputs:
- **data**: input tensor with shape of the first transform Block requires.
Outputs:
- **out**: output tensor with shape of the last transform Block produces.
Examples
--------
>>> transformer = transforms.HybridCompose([transforms.Resize(300),
... transforms.CenterCrop(256),
... transforms.ToTensor()])
>>> image = mx.nd.random.uniform(0, 255, (224, 224, 3)).astype(dtype=np.uint8)
>>> transformer(image)
<NDArray 3x256x256 @cpu(0)>
"""
def __init__(self, transforms):
super(HybridCompose, self).__init__()
for i in transforms:
if not isinstance(i, HybridBlock):
raise ValueError("{} is not a HybridBlock, try use `Compose` instead".format(i))
self.add(i)
self.hybridize()
class Cast(HybridBlock):
"""Cast inputs to a specific data type
Parameters
----------
dtype : str, default 'float32'
The target data type, in string or `numpy.dtype`.
Inputs:
- **data**: input tensor with arbitrary shape and dtype.
Outputs:
- **out**: output tensor with the same shape as `data` and data type as dtype.
"""
def __init__(self, dtype='float32'):
super(Cast, self).__init__()
self._dtype = dtype
def hybrid_forward(self, F, *args):
if is_np_array():
F = F.npx
return tuple([F.cast(x, self._dtype) for x in args])
class RandomApply(Sequential):
"""Apply a list of transformations randomly given probability
Parameters
----------
transforms
List of transformations.
p : float
Probability of applying the transformations.
Inputs:
- **data**: input tensor.
Outputs:
- **out**: transformed image.
"""
def __init__(self, transforms, p=0.5):
super(RandomApply, self).__init__()
self.transforms = transforms
self.p = p
def forward(self, x, *args):
if self.p < random.random():
return x
x = self.transforms(x)
return _append_return(x, *args)
class HybridRandomApply(HybridSequential):
"""Apply a list of transformations randomly given probability
Parameters
----------
transforms
List of transformations which must be HybridBlocks.
p : float
Probability of applying the transformations.
Inputs:
- **data**: input tensor.
Outputs:
- **out**: transformed image.
"""
def __init__(self, transforms, p=0.5):
super(HybridRandomApply, self).__init__()
assert isinstance(transforms, HybridBlock)
self.transforms = transforms
self.p = p
def hybrid_forward(self, F, x, *args):
if is_np_array():
cond = self.p < F.random.uniform(low=0, high=1, size=1)
return F.npx.cond(cond, x, self.transforms(x))
cond = self.p < F.random.uniform(low=0, high=1, shape=1)
return _append_return(F.contrib.cond(cond, x, self.transforms(x)), *args)
| StarcoderdataPython |
173821 | #!/usr/bin/env python3
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from contextlib import closing as ctx_closing
from argparse import ArgumentParser
def read_daily_stats(sqc):
df = pd.read_sql_query(
"SELECT day, avg, (xx/n - avg*avg) AS var, min, max, n AS count FROM (" + \
"SELECT strftime('%Y-%m-%d',timestamp,'unixepoch') AS day," + \
" AVG(delay_ms) AS avg," + \
" MIN(delay_ms) AS min," + \
" MAX(delay_ms) AS max," + \
" SUM(delay_ms*delay_ms) AS xx," + \
" COUNT(*) AS n" + \
" FROM keystrokes WHERE (delay_ms <= 5000 AND pressed == expected)" + \
" GROUP BY day ORDER BY day)", sqc)
df['day'] = pd.to_datetime(df['day'])
return df
def main():
p = ArgumentParser()
p.add_argument('-d', '--db', nargs='+', help='sqlite database paths', default=['keystrokes.db'])
args = p.parse_args()
df = pd.DataFrame()
# Read each day from each database
for path in args.db:
with ctx_closing(sqlite3.connect(path)) as sqc:
tmp = read_daily_stats(sqc)
df = df.append(tmp)
# Combine duplicate day rows
g = df.groupby('day')
df = pd.DataFrame({
'avg' : g['avg'].mean(),
'var' : g['var'].sum() / (g['var'].count()**2),
'count' : g['count'].sum(),
'min' : g['min'].min(),
'max' : g['max'].max(),
}, index=g.groups)
df = df.sort_index()
df['std'] = np.sqrt(df['var'])
print(df)
print("Total keystrokes sampled:", sum(df['count']))
x = df.index.strftime('%d/%m')
y = df['avg']
ye = df['std']
y0 = df['min']
y0 = np.maximum(df['min'], y-ye)
y1 = np.minimum(df['max'], y+ye)
with plt.style.context('Solarize_Light2'):
fig,ax = plt.subplots()
ax.fill_between(x, y0, y1, alpha=0.2)
ax.plot(x, y, label='ms/keystroke', lw=5)
#plt.ylim((max(0, min(y) - 50), max(y) + 50))
plt.ylim((0, max(y1)*1.1))
plt.legend(loc='upper left')
plt.show()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1622856 | """
Verifies that embedding UAC information into the manifest works.
"""
import TestGyp
from xml.dom.minidom import parseString
test = TestGyp.TestGyp(formats=['msvs', 'ninja'], platforms=['win32'], disable='Need to solve win32api.LoadLibrary problems')
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
CHDIR = 'linker-flags'
test.run_gyp('enable-uac.gyp', chdir=CHDIR)
test.build('enable-uac.gyp', test.ALL, chdir=CHDIR)
# The following binaries must contain a manifest embedded.
test.fail_test(not extract_manifest(test.built_file_path('enable_uac.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path('enable_uac_no.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path('enable_uac_admin.exe', chdir=CHDIR), 1))
# Verify that <requestedExecutionLevel level="asInvoker" uiAccess="false" />
# is present.
manifest = parseString(extract_manifest(
test.built_file_path('enable_uac.exe', chdir=CHDIR), 1))
execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
test.fail_test(len(execution_level) != 1)
execution_level = execution_level[0].attributes
def _has_key(node, key):
# 'in' doesn't work with the NamedNodeMap interface in Python2,
# but 'has_key' was removed from it in Python3, so we need to
# shim things :(.
if hasattr(node, 'has_key'):
return node.has_key(key)
return key in node
test.fail_test(not (
_has_key(execution_level, 'level') and
_has_key(execution_level, 'uiAccess') and
execution_level['level'].nodeValue == 'asInvoker' and
execution_level['uiAccess'].nodeValue == 'false'))
# Verify that <requestedExecutionLevel> is not in the menifest.
manifest = parseString(extract_manifest(
test.built_file_path('enable_uac_no.exe', chdir=CHDIR), 1))
execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
test.fail_test(len(execution_level) != 0)
# Verify that <requestedExecutionLevel level="requireAdministrator"
# uiAccess="true" /> is present.
manifest = parseString(extract_manifest(
test.built_file_path('enable_uac_admin.exe', chdir=CHDIR), 1))
execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
test.fail_test(len(execution_level) != 1)
execution_level = execution_level[0].attributes
test.fail_test(not (
_has_key(execution_level, 'level') and
_has_key(execution_level, 'uiAccess') and
execution_level['level'].nodeValue == 'requireAdministrator' and
execution_level['uiAccess'].nodeValue == 'true'))
test.pass_test()
| StarcoderdataPython |
3222982 | import torch
import dgl.function as fn
import torch.nn as nn
import numpy as np
# from models.networks import *
OPS = {
'V_None' : lambda args: V_None(args),
'V_I' : lambda args: V_I(args),
'V_Max' : lambda args: V_Max(args),
'V_Mean' : lambda args: V_Mean(args),
'V_Min' : lambda args: V_Min(args),
'V_Sum' : lambda args: V_Sum(args),
'V_Sparse': lambda args: V_Sparse(args),
'V_Dense' : lambda args: V_Dense(args),
}
First_Stage = ['V_None', 'V_I', 'V_Sparse', 'V_Dense']
Second_Stage = ['V_I', 'V_Mean', 'V_Sum', 'V_Max']
Third_Stage = ['V_None', 'V_I', 'V_Sparse', 'V_Dense']
class V_Package(nn.Module):
def __init__(self, args, operation):
super().__init__()
self.args = args
self.operation = operation
if type(operation) in [V_None, V_I]:
self.seq = None
else:
self.seq = nn.Sequential()
self.seq.add_module('fc_bn', nn.Linear(args.node_dim, args.node_dim, bias = True))
if args.batchnorm_op:
self.seq.add_module('bn', nn.BatchNorm1d(self.args.node_dim))
self.seq.add_module('act', nn.ReLU())
def forward(self, input):
V = self.operation(input)
if self.seq:
V = self.seq(V)
return V
class NodePooling(nn.Module):
def __init__(self, args):
super().__init__()
self.A = nn.Linear(args.node_dim, args.node_dim)
# self.B = nn.Linear(args.node_dim, args.node_dim)
self.activate = nn.ReLU()
def forward(self, V):
V = self.A(V)
V = self.activate(V)
# V = self.B(V)
return V
class V_None(nn.Module):
def __init__(self, args):
super().__init__()
def forward(self, input):
V = input['V']
return 0. * V
class V_I(nn.Module):
def __init__(self, args):
super().__init__()
def forward(self, input):
V = input['V']
return V
class V_Max(nn.Module):
def __init__(self, args):
super().__init__()
self.pooling = NodePooling(args)
def forward(self, input):
G, V = input['G'], input['V']
# G.ndata['V'] = V
G.ndata['V'] = self.pooling(V)
G.update_all(fn.copy_u('V', 'M'), fn.max('M', 'V'))
return G.ndata['V']
class V_Mean(nn.Module):
def __init__(self, args):
super().__init__()
self.pooling = NodePooling(args)
def forward(self, input):
G, V = input['G'], input['V']
# G.ndata['V'] = V
G.ndata['V'] = self.pooling(V)
G.update_all(fn.copy_u('V', 'M'), fn.mean('M', 'V'))
return G.ndata['V']
class V_Sum(nn.Module):
def __init__(self, args):
super().__init__()
self.pooling = NodePooling(args)
def forward(self, input):
G, V = input['G'], input['V']
# G.ndata['V'] = self.pooling(V)
G.ndata['V'] = V
G.update_all(fn.copy_u('V', 'M'), fn.sum('M', 'V'))
return G.ndata['V']
class V_Min(nn.Module):
def __init__(self, args):
super().__init__()
self.pooling = NodePooling(args)
def forward(self, input):
G, V = input['G'], input['V']
G.ndata['V'] = self.pooling(V)
G.update_all(fn.copy_u('V', 'M'), fn.min('M', 'V'))
return G.ndata['V']
class V_Dense(nn.Module):
def __init__(self, args):
super().__init__()
self.W = nn.Linear(args.node_dim*2, args.node_dim, bias = True)
def forward(self, input):
V, V_in = input['V'], input['V_in']
gates = torch.cat([V, V_in], dim = 1)
gates = self.W(gates)
return torch.sigmoid(gates) * V
class V_Sparse(nn.Module):
def __init__(self, args):
super().__init__()
self.W = nn.Linear(args.node_dim*2, args.node_dim, bias = True)
self.a = nn.Linear(args.node_dim, 1, bias = False)
def forward(self, input):
V, V_in = input['V'], input['V_in']
gates = torch.cat([V, V_in], dim = 1)
# gates = self.W(gates)
gates = torch.relu(self.W(gates))
gates = self.a(gates)
return torch.sigmoid(gates) * V
if __name__ == '__main__':
print("test") | StarcoderdataPython |
183837 | <gh_stars>1-10
from flask import Flask,render_template
app=Flask(__name__)
@app.route('/')
def home():
return render_template('templateinherictence(home).html')
@app.route('/puppy/<name>')
def puppy(name):
return render_template('puppy.html',name=name)
@app.route('/flow')
def controloverflow():
mylist=[1,2,3,4,5]
return render_template('templatecontrolflow.html',mylist=mylist)
@app.route('/basic')
def index():
name="hermoine"
magic={'black spell':'harry',
'blue spell':'hermoine'}
return render_template('basic.html',my_variable=name,magic=magic)
if __name__=='__main__':
app.run(debug=True) | StarcoderdataPython |
1733964 | from IPython import embed
import random
if __name__ == "__main__":
adjectives = []
f = open("adjectives.txt", "r")
for x in f:
if ("\n" in x):
x = x.split("\n")[0]
adjectives.append(x)
colors = []
f = open("colors.txt", "r")
for x in f:
if ("\n" in x):
x = x.split("\n")[0]
colors.append(x)
nouns = []
f = open("nouns.txt", "r")
for x in f:
if ("\n" in x):
x = x.split("\n")[0]
nouns.append(x)
# enumerate all the possibilities
uid = []
for adj in adjectives:
for col in colors:
for n in nouns:
uuid = (adj+col+n+"\n").lower()
uid.append(uuid)
random.shuffle(uid)
with open("./uuids.txt", "w+") as fp:
[fp.write(entry) for entry in uid]
with open("./uuid_count.txt", "w+") as fp:
fp.write("0")
| StarcoderdataPython |
199589 | # coding: utf-8
from flask import render_template, Blueprint
bp = Blueprint('site', __name__)
@bp.route('/')
def index():
"""Index page."""
return render_template('site/index/index.html')
@bp.route('/about')
def about():
"""About page."""
return render_template('site/about/about.html')
| StarcoderdataPython |
59921 | <gh_stars>0
import numpy as np
from keras import objectives
from keras import backend as K
import tensorflow as tf
from ipdb import set_trace as stop
import scipy.stats as st
import scipy.misc as mi
_EPSILON = K.epsilon()
def _loss_tensor(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
out = -(y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred))
return K.mean(out, axis=-1)
def _loss_np(y_true, y_pred):
y_pred = np.clip(y_pred, _EPSILON, 1.0-_EPSILON)
out = -(y_true * np.log(y_pred) + (1.0 - y_true) * np.log(1.0 - y_pred))
return np.mean(out, axis=-1)
def _loss_tensor2(y_true, y_pred):
dist = tf.contrib.distributions.Normal(mu=K.zeros_like(y_true), sigma=K.ones_like(y_true))
n_dim = len(y_pred.get_shape().as_list())
return tf.reduce_logsumexp(dist.log_pdf(y_pred), reduction_indices=[n_dim-1])
def _loss_np2(y_true, y_pred):
log_pdf = st.norm.logpdf(y_pred, np.zeros_like(y_true), np.ones_like(y_true))
return mi.logsumexp(log_pdf, axis=-1)
def check_loss(_shape):
if _shape == '2d':
shape = (6, 7)
elif _shape == '3d':
shape = (5, 6, 7)
elif _shape == '4d':
shape = (8, 5, 6, 7)
elif _shape == '5d':
shape = (9, 8, 5, 6, 7)
y_a = np.random.random(shape)
y_b = np.random.random(shape)
out1 = K.eval(_loss_tensor2(K.variable(y_a), K.variable(y_b)))
out2 = _loss_np2(y_a, y_b)
assert out1.shape == out2.shape
assert out1.shape == shape[:-1]
print(np.linalg.norm(out1))
print(np.linalg.norm(out2))
print(np.linalg.norm(out1-out2))
def test_loss():
shape_list = ['2d', '3d', '4d', '5d']
for _shape in shape_list:
check_loss(_shape)
print('======================')
if __name__ == '__main__':
test_loss() | StarcoderdataPython |
1734978 | from datetime import datetime
from math import sqrt, log, ceil
from os.path import join, dirname, abspath
from helper.helper import std
from two_thinning.full_knowledge.RL.DQN.neural_network import *
N = 1000
M = 1000
def EXPONENTIAL_POTENTIAL(loads, alpha=0.5):
t = sum(loads)
n = len(loads)
potential = sum([exp(alpha * (x - t / n)) for x in loads])
return -potential
def STD_POTENTIAL(loads):
return -std(loads)
def MAX_LOAD_POTENTIAL(loads):
return -max(loads)
def MAX_LOAD_REWARD(loads):
return -max(loads)
def CORRECTED_MAX_LOAD_REWARD(loads, error_ratio=1.5):
return 1 if max(loads) < error_ratio * sum(loads) / len(loads) else 0
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
EPS_START = 0.2
EPS_END = 0.04
EPS_DECAY = 4200
TRAIN_EPISODES = 1000
EVAL_RUNS_TRAIN = 32
EVAL_RUNS_EVAL = 100
EVAL_PARALLEL_BATCH_SIZE = 32
PATIENCE = 400
PRINT_BEHAVIOUR = False
PRINT_PROGRESS = True
MAX_THRESHOLD = max(3, M // N + ceil(sqrt(log(N))))
NN_MODEL = GeneralNet
NN_TYPE = "general_net"
LOSS_FUCNTION = nn.SmoothL1Loss()
LR = 0.0004
NN_HIDDEN_SIZE = 64
NN_RNN_NUM_LAYERS = 1
NN_NUM_LIN_LAYERS = 1
SAVE_PATH = join(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))), "evaluation", "training_progression",
f'{str(datetime.now().strftime("%Y_%m_%d %H_%M_%S_%f"))}_{N}_{M}')
REWARD_FUN = MAX_LOAD_REWARD
POTENTIAL_FUN = MAX_LOAD_POTENTIAL | StarcoderdataPython |
146103 | <filename>Lab 2/FNN/mnist_FNN.py
from __future__ import print_function
import argparse
import torch
# torch.cuda.set_device(0)
import nni
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
criterion = nn.CrossEntropyLoss()
import numpy as np
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
from nni.utils import merge_parameter
writer = SummaryWriter('logs/mnist_experiment_1')
logger = logging.getLogger('mnist_AutoML')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# prelu=nn.PReLU(num_parameters=1)
# self.dropout1 = nn.Dropout2d(0.25)
self.in_hid_1= nn.Linear(784, 512)
self.hid1=nn.LeakyReLU()
self.in_hid_2= nn.Linear(512, 256)
self.hid2=nn.LeakyReLU()
self.in_hid_3= nn.Linear(256, 128)
self.hid3=nn.LeakyReLU()
self.hid_out=nn.Linear(128,10)
def forward(self, data):
x = data.view(-1, 784)
output=self.in_hid_1(x)
# output=self.dropout1(output)
output=self.hid1(output)
output=self.in_hid_2(output)
output=self.hid2(output)
output=self.in_hid_3(output)
output=self.hid3(output)
output=self.hid_out(output)
output = F.log_softmax(output, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
running_loss = 0.0
correct = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
if batch_idx % args['log_interval'] == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if batch_idx != 0:
global_step = (epoch - 1) * len(train_loader) + batch_idx
writer.add_scalar('Loss/train', running_loss / (args['batch_size'] * args['log_interval']), global_step)
writer.add_scalar('Accuracy/train', 100. * correct / (args['batch_size'] * args['log_interval']), global_step)
running_loss = 0.0
correct = 0.0
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def profile(model, device, train_loader):
dataiter = iter(train_loader)
data, target = dataiter.next()
data, target = data.to(device), target.to(device)
with torch.autograd.profiler.profile(use_cuda=False) as prof:
model(data[0].reshape(1,1,28,28))
print(prof)
def main():
torch.backends.cudnn.enabled = False ###
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
# use_cuda = not args.no_cuda and torch.cuda.is_available()
# torch.manual_seed(args.seed)
# device = torch.device("cuda" if use_cuda else "cpu")
# kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST('data', train=True, download=True,
# transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args.batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST('data', train=False, transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args.test_batch_size, shuffle=True, **kwargs)
# dataiter = iter(train_loader)
# images, labels = dataiter.next()
# grid = torchvision.utils.make_grid(images)
# writer.add_image('images', grid, 0)
# model = Net().to(device)
# optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
# images=images.to(device)
# writer.add_graph(model, images)
# scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# print("Start profiling...")
# profile(model, device, train_loader)
# print("Finished profiling.")
# for epoch in range(1, args.epochs + 1):
# train(args, model, device, train_loader, optimizer, epoch)
# test_acc=test(model, device, test_loader)
# scheduler.step()
# # report intermediate result
# nni.report_intermediate_result(test_acc)
# logger.debug('test accuracy %g', test_acc)
# logger.debug('Pipe send intermediate result done.')
# # report final result
# nni.report_final_result(test_acc)
# if args.save_model:
# print("Our model: \n\n", model, '\n')
# print("The state dict keys: \n\n", model.state_dict().keys())
# torch.save(model.state_dict(), "mnist.pt")
# state_dict = torch.load('mnist.pt')
# print(state_dict.keys())
# writer.close()
return args
def NNI(args):
# use_cuda = not args.no_cuda and torch.cuda.is_available()
use_cuda = not args['no_cuda'] and torch.cuda.is_available()
# torch.manual_seed(args.seed)
torch.manual_seed(args['seed'])
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args['batch_size'], shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args['test_batch_size'], shuffle=True, **kwargs)
dataiter = iter(train_loader)
images, labels = dataiter.next()
grid = torchvision.utils.make_grid(images)
writer.add_image('images', grid, 0)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args['lr'])
images=images.to(device)
writer.add_graph(model, images)
scheduler = StepLR(optimizer, step_size=1, gamma=args['gamma'])#等间隔调整学习率 StepLR, 将学习率调整为 lr*gamma
print("Start profiling...")
profile(model, device, train_loader)
print("Finished profiling.")
for epoch in range(1, args['epochs'] + 1):
train(args, model, device, train_loader, optimizer, epoch)
test_acc=test(model, device, test_loader)
scheduler.step()
nni.report_intermediate_result(test_acc)
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
nni.report_final_result(test_acc)
if args['save_model']:
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
torch.save(model.state_dict(), "mnist.pt")
state_dict = torch.load('mnist.pt')
print(state_dict.keys())
writer.close()
if __name__ == '__main__':
tuner_params = nni.get_next_parameter()
logger.debug(tuner_params)
params = vars(merge_parameter(main(), tuner_params))
print(params)
NNI(params)
| StarcoderdataPython |
21291 | import json
import urllib2
import traceback
import cgi
from flask import render_template, request
import web.util.tools as tools
import lib.http as http
import lib.es as es
from web import app
from lib.read import readfile
def get(p):
host = p['c']['host']; index = p['c']['index'];
# debug
p['debug'] = tools.get('debug', '')
# search keyword
p["q"] = tools.get('q', p['c']['query'])
# pagination
p["from"] = int(tools.get('from', 0))
p["size"] = int(tools.get('size', p['c']['page_size']))
# sort
p['sort_field'] = tools.get('sort_field', p['c']['sort_field'])
p['sort_dir'] = tools.get('sort_dir', p['c']['sort_dir'])
# selected app
p['selected_app'] = tools.get('app')
# search query
p["q"] = p["q"].replace('"', '\\"') # escape some special chars
p['search_query'] = render_template("search/search_query.html", p=p)
p["q"] = tools.get('q', p['c']['query']) # restore to what was entered originally
# send search request
try:
search_url = "{}/{}/post/_search".format(host, index)
p['response'] = http.http_req_json(search_url, "POST", p['search_query'])
except urllib2.HTTPError, e:
raise Exception("url: {}\nquery: {}\{}".format(
search_url, p['search_query'], e.read()))
# process the search result
p['post_list'] = []
for r in p['response']["hits"]["hits"]:
item = {}
# first take items from the fields
for k, v in r["_source"].items():
item[k] = v
# fetch highlight
if r.get('highlight'):
for k, v in r["highlight"].items():
if k == "url" or k == "_index" or k == "app":
continue
value = cgi.escape(v[0])
value = value.replace("::highlight::", "<font color=red>")
value = value.replace("::highlight_end::", "</font>")
item[k] = value
# produce standard fields
if r.get('_index') and not item.get('app'):
item['app'] = r.get('_index')
if not item.get('url'):
item['url'] = '{}/redirect?index={}&id={}'.format(
p.get('url'),
r.get('_index'),
r.get('_id'))
# Save to SearchResult
p['post_list'].append(item)
# Application Lists
p['applications'] = []
if p['response'].get('aggregations'):
internal = p['response']['aggregations']['internal']['buckets']
p['applications'].extend(
[item for item in internal if item.get('key') != 'search']
)
external = p['response']['aggregations']['external']['buckets']
p['applications'].extend(external)
# sort based on the count
p['applications'] = sorted(p['applications'],
key=lambda x: x['doc_count'], reverse=True)
# Feed Pagination
p["total"] = int(p['response']["hits"]["total"])
# Suggestion
p["suggestion"] = []; AnySuggestion = False;
# suggest.didyoumean[].options[].text
if p['response']["suggest"].get("didyoumean"):
for idx, term in enumerate(p['response']["suggest"].get("didyoumean")):
p["suggestion"].append(term["text"])
for o in term["options"]:
AnySuggestion = True
p["suggestion"][idx] = o["text"]
break # just take the first option
# if there are no suggestions then don't display
if not AnySuggestion: p["suggestion"] = []
# return json format
if tools.get("json"):
callback = tools.get("callback")
if not callback:
return json.dumps(p['response'])
else:
return "{}({})".format(callback, json.dumps(p['response']))
return render_template("search/default.html", p=p)
| StarcoderdataPython |
2644 | from datetime import datetime
from typing import Any, Dict, Union
__all__ = 'AnyDict'
AnyDict = Dict[str, Any] # pragma: no mutate
datetime_or_str = Union[datetime, str] # pragma: no mutate
| StarcoderdataPython |
1632483 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief: match based features
"""
import re
import string
import numpy as np
import pandas as pd
import config
from utils import dist_utils, ngram_utils, nlp_utils, np_utils
from utils import logging_utils, time_utils, pkl_utils
from feature_base import BaseEstimator, PairwiseFeatureWrapper
class MatchQueryCount(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "MatchQueryCount"
def _str_whole_word(self, str1, str2, i_):
cnt = 0
if len(str1) > 0 and len(str2) > 0:
try:
while i_ < len(str2):
i_ = str2.find(str1, i_)
if i_ == -1:
return cnt
else:
cnt += 1
i_ += len(str1)
except:
pass
return cnt
def transform_one(self, obs, target, id):
return self._str_whole_word(obs, target, 0)
class MatchQueryRatio(MatchQueryCount):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "MatchQueryRatio"
def transform_one(self, obs, target, id):
return np_utils._try_divide(super().transform_one(obs, target, id), len(target.split(" ")))
#------------- Longest match features -------------------------------
class LongestMatchSize(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "LongestMatchSize"
def transform_one(self, obs, target, id):
return dist_utils._longest_match_size(obs, target)
class LongestMatchRatio(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "LongestMatchRatio"
def transform_one(self, obs, target, id):
return dist_utils._longest_match_ratio(obs, target)
# --------------------------- Attribute based features -------------------------
class MatchAttrCount(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "MatchAttrCount"
def _str_whole_word(self, str1, str2, i_):
cnt = 0
if len(str1) > 0 and len(str2) > 0:
try:
while i_ < len(str2):
i_ = str2.find(str1, i_)
if i_ == -1:
return cnt
else:
cnt += 1
i_ += len(str1)
except:
pass
return cnt
def transform_one(self, obs, target, id):
cnt = 0
for o in obs.split(" "):
for t in target:
if not t[0].startswith("bullet"):
if self._str_whole_word(obs, t[0], 0):
cnt += 1
return cnt
class MatchAttrRatio(MatchQueryCount):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "MatchAttrRatio"
def transform_one(self, obs, target, id):
lo = len(obs.split(" "))
lt = len([t[0] for t in target if not t[0].startswith("bullet")])
return np_utils._try_divide(super().transform_one(obs, target, id), lo*lt)
class IsIndoorOutdoorMatch(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "IsIndoorOutdoorMatch"
def transform_one(self, obs, target, id):
os = []
if obs.find("indoor") != -1:
os.append("indoor")
if obs.find("outdoor") != -1:
os.append("outdoor")
cnt = 0
for t in target:
if t[0].find("indoor outdoor") != -1:
cnt = 1
ts = t[1].split(" ")
for i in ts:
if i in os:
return 1
if cnt == 0:
return 0
else:
return -1
# ---------------------------- Main --------------------------------------
def main():
logname = "generate_feature_match_%s.log"%time_utils._timestamp()
logger = logging_utils._get_logger(config.LOG_DIR, logname)
dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
generators = [
MatchQueryCount,
MatchQueryRatio,
LongestMatchSize,
LongestMatchRatio,
]
obs_fields_list = []
target_fields_list = []
obs_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
target_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
for generator in generators:
param_list = []
pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
pf.go()
# product_attribute_list
generators = [
MatchAttrCount,
MatchAttrRatio,
IsIndoorOutdoorMatch,
]
obs_fields_list = []
target_fields_list = []
obs_fields_list.append( ["search_term", "search_term_alt", "search_term_auto_corrected"][:1] )
target_fields_list.append( ["product_attribute_list"] )
for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
for generator in generators:
param_list = []
pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
pf.go()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1725763 | """ Module to update passwords to database """
import sys
import traceback
from mysql.connector import connect, Error
from colr import color
def update():
coluna = input(color(' Column? » ', fore='#fe7243'))
ident = input(color(' ID? » ', fore='#fe7243'))
update = input(color(' Write your update: ', fore='#fe7243'))
try:
conn = connect(
host="localhost",
user="mic",
password="<PASSWORD>",
database="pwd")
cur = conn.cursor()
query = " UPDATE pwd SET " + coluna + " = '" + update + "' WHERE pwdid = " + ident
cur.execute(query)
conn.commit()
except Error as e:
print("Error while connecting to db", e)
print('MySQL error: %s' % (' '.join(e.args)))
print("Exception class is: ", e.__class__)
print('MySQL traceback: ')
exc_type, exc_value, exc_tb = sys.exc_info()
print(traceback.format_exception(exc_type, exc_value, exc_tb))
finally:
if(conn):
conn.close()
if __name__ == '__main__':
update()
| StarcoderdataPython |
183373 | import json
import sys
import io
from util import read_json, flatten, get_json_files, get_loc_dirs
if sys.version_info.major < 3:
raise Exception("must use python 3")
def write_json(filename, data): # TODO: replace with util.write_json once sorting loc files is common.
with io.open(filename, 'w', encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False, sort_keys=True)
def main():
lang_packs = [x for x in get_loc_dirs()]
json_files = flatten(map(get_json_files, lang_packs))
for filepath in json_files:
contents = read_json(filepath)
write_json(filepath, contents)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1713606 | """
This module serves to provide a variety of constants
for the Fantasy Football Fun package.
"""
POSITIONS = ["qb", "rb", "wr", "te", "e",
"t", "g", "c", "ol", "dt", "de",
"dl", "ilb", "olb", "lb", "cb", "s",
"db", "k", "p"]
# all of the terms users can order by
ORDER_BY_TERMS = ["player", "year_id", "age",
"height_in", "weight", "bmi",
"g", "gs", "pass_cmp", "pass_att",
"pass_inc", "pass_cmp_perc", "pass_yds",
"pass_td", "pass_int", "pass_int_td",
"pass_td_perc", "pass_int_perc", "pass_rating",
"pass_sacked", "pass_sacked_yds", "pass_sacked_perc",
"pass_yds_per_att", "pass_adj_yds_per_att", "rush_yds",
"rec_yds", "scoring", "fantasy_points", "sacks",
"seasons"]
MAX_FF_POINTS_VAL = 35850
BASE_URL = 'https://www.pro-football-reference.com'
| StarcoderdataPython |
3280465 | # -*- coding: utf-8 -*-
import math
def is_inside_cone(x, y, d, alpha):
"""
Проверяет точку на принадлежность коническому наконечнику волновода
Точка в ск Федера
"""
if alpha >= 0.99 * math.pi:
return False
k1 = math.tan(0.5 * (math.pi - alpha))
h = 0.5 * d / math.tan(alpha / 2)
k2 = math.tan(0.5 * (math.pi + alpha))
return (k2 * y + h) >= x <= (k1 * y + h) and x >= 0
def check_point(x, y, d, length, alpha):
"""
Проверяет точку на принадлежность волноводу
Точка в ск Федера
"""
if -d / 2 <= y <= d / 2:
return 0 <= x <= length or is_inside_cone(x - length, y, d, alpha)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.