index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,200 | 6baf2478703ad51e0d998483d520e5f0e3821869 |
class Proposal:
def __init__(self, priority, message, origin):
self.priority = round(float(priority), 4)
self.message = str(message)
self.origin = origin
def __repr__(self):
return "Proposal(" + str(self.priority) + ", " + str(self.message) + ")"
def __lt__(self, other):
return self.priority < other.priority
def main():
from linked_lists import LinkedList, LinkedQueue
prop_1 = Proposal(5, 'close airtports', 'chile')
prop_2 = Proposal(3, 'open airtports', 'chile')
prop_3 = Proposal(20, 'give masks', 'chile')
prop_4 = Proposal(6, 'give masks', 'chile')
linked_queue = LinkedQueue(prop_1, prop_2, prop_3)
print(linked_queue)
linked_queue.sort_append(prop_4)
print(linked_queue)
if __name__ == "__main__":
main()
|
998,201 | a240389ee3310fe1ba6740b199ecb5da1d9a305f | #!/cvmfs/sft.cern.ch/lcg/views/LCG_98python3/x86_64-centos7-gcc9-opt/bin/python3
# -*- coding: utf-8 -*-
"""
Example:
python HiggsAnalysis/friend-tree-producer/scripts/add_ML_models_prediction_in_root_file.py \
--input <path to root file> \
--XGBs <path to XGB json files> \
--output-dir <out dir> \
--dry
"""
import sys, os
import argparse
import logging
sys.path = [
'/cvmfs/sft.cern.ch/lcg/views/LCG_98python3/x86_64-centos7-gcc9-opt/lib/python3.7/site-packages',
] + sys.path
import numpy as np
sys.path = [
'{}/.local/lib/python3.7/site-packages/'.format(os.environ["HOME"]),
'/home/ltortero/.local/lib/python3.7/site-packages/',
] + sys.path
from xgboost import XGBRegressor
import uproot
import pandas
import array
try:
from ROOT import TFile, TDirectoryFile, TTree
from ROOT.TObject import kOverwrite
except:
print("Try to load ROOT from elsewhere...")
sys.path = [
'/cvmfs/sft.cern.ch/lcg/views/LCG_93python3/x86_64-centos7-gcc62-opt/lib',
] + sys.path
from ROOT import TFile, TDirectoryFile, TTree
from ROOT.TObject import kOverwrite
logger = logging.getLogger()
os.environ['PYTHONPATH'] = '/cvmfs/sft.cern.ch/lcg/views/LCG_98python3/x86_64-centos7-gcc9-opt/lib/python3.7/site-package:' + os.environ['PYTHONPATH']
sys.path = [
'/cvmfs/sft.cern.ch/lcg/views/LCG_96bpython3/x86_64-ubuntu1804-gcc8-opt/lib',
'/cvmfs/sft.cern.ch/lcg/views/LCG_96bpython3/x86_64-ubuntu1804-gcc8-opt/lib/python3.6/site-packages',
] + sys.path
def setup_logging(output_file, level=logging.DEBUG):
logger.setLevel(level)
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
file_handler = logging.FileHandler(output_file, "w")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def parse_arguments():
parser = argparse.ArgumentParser(
description="create friend trees for electron scale factors form a given RooWorkspace"
)
parser.add_argument("--input", required=True, type=str, help="Input root file.")
parser.add_argument("--XGBs", required=False, type=str, help="Input XGB json files.", default="")
parser.add_argument(
"--tree", default="ntuple", type=str, help="Name of the root tree."
)
parser.add_argument(
"--channels", default="all", type=str, help="Channels to process, comma separated."
)
parser.add_argument(
"--categories", default="all", type=str, help="Categories to process, comma separated OR 'all'."
)
parser.add_argument(
"--enable-logging",
action="store_true",
help="Enable loggging for debug purposes.",
)
parser.add_argument(
"--first-entry",
"--first_entry",
"--start",
default=0,
type=int,
help="Index of first event to process.",
)
parser.add_argument(
"--last-entry",
"--last_entry",
"--end",
default=-1,
type=int,
help="Index of last event to process.",
)
parser.add_argument(
"--pipeline",
"--pipelines",
"--folder",
nargs="?",
default=None,
type=str,
help="Directory within rootfile.",
)
parser.add_argument(
"--output-dir", type=str, default=".", help="Tag of output files."
)
parser.add_argument("--config", nargs="?", type=str, default=None, help="Config")
parser.add_argument("--dry", action="store_true", default=False, help="dry run")
parser.add_argument("--recreate", action="store_true", default=False, help="Whether to just update or fully-recreate the friend tree.")
parser.add_argument("--pandas", action="store_true", default=False, help="Whether to use arrays or pandas dataframe with uproot")
return parser.parse_args()
var_names_at_KIT = {
"tau1_pt_reco" : "pt_1",
"tau1_eta_reco" : "eta_1",
"tau1_phi_reco" : "phi_1",
"tau2_pt_reco" : "pt_2",
"tau2_eta_reco" : "eta_2",
"tau2_phi_reco" : "phi_2",
"jet1_pt_reco" : "jpt_1",
"jet1_eta_reco" : "jeta_1",
"jet1_phi_reco" : "jphi_1",
"jet2_pt_reco" : "jpt_2",
"jet2_eta_reco" : "jeta_2",
"jet2_phi_reco" : "jphi_2",
"remaining_jets_pt_reco" : "jpt_r",
"remaining_jets_eta_reco" : "jeta_r",
"remaining_jets_phi_reco" : "jphi_r",
"remaining_jets_N_reco" : "Njet_r",
"MET_pt_reco" : "met",
"MET_phi_reco" : "metphi",
"MET_covXX_reco" : "metcov00",
"MET_covXY_reco" : "metcov01",
"MET_covYY_reco" : "metcov11",
"mT1_reco" : "mt_1",
"mT2_reco" : "mt_2",
"mTtt_reco" : "mt_tt",
"mTtot_reco" : "mt_tot",
"PuppiMET_pt_reco" : "puppimet",
"PuppiMET_phi_reco" : "puppimetphi",
"PuppimT1_reco" : "mt_1_puppi",
"PuppimT2_reco" : "mt_2_puppi",
"PuppimTtt_reco" : "mt_tt",
"PuppimTtot_reco" : "mt_tot_puppi",
"PU_npvsGood_reco" : "npv",
}
N_neutrinos_in_channel = {
"tt" : 2,
"mt" : 3,
"et" : 3,
"mm" : 4,
"em" : 4,
"ee" : 4,
}
class XGB_model_from_json(object):
def __init__(self, json_file):
name = "_".join(json_file.split("/")[-4:])
name = name.replace("/", "_")
name = name.replace('.json', '')
name = "XGB_" + name
name = name.replace("-","_")
self.name = name
# load json and create model
loaded_model = XGBRegressor()
loaded_model.load_model(json_file)
print("Loaded XGBRegressor model from disk:")
print("\t{}".format(json_file))
self.model = loaded_model
# load list of inputs for the model
sys.path.insert(0, json_file.rstrip(json_file.split('/')[-1]))
import inputs_for_models_in_this_dir
reload(inputs_for_models_in_this_dir) # avoid being stuck with previous versions
this_model_inputs = inputs_for_models_in_this_dir.inputs
this_model_inputs = [i if i not in var_names_at_KIT.keys() else var_names_at_KIT[i] for i in this_model_inputs]
self.inputs = this_model_inputs
def predict(self, filtered_df):
return self.model.predict(np.r_[filtered_df[self.inputs]])
def main(args):
print(args)
channels = args.channels.split(',')
categories = args.categories.split(',')
nickname = os.path.basename(args.input).replace(".root", "")
XGB_jsons = args.XGBs.split(',')
XGB_jsons = [f for f in XGB_jsons if f != ""]
models = {}
inputs = []
for XGB_json in XGB_jsons:
XGB_object = XGB_model_from_json(XGB_json)
models[XGB_object.name] = XGB_object
inputs += XGB_object.inputs
# load root file and create friend tree
root_file_input = args.input
output_path = os.path.join(args.output_dir, nickname)
if not os.path.exists(output_path):
os.makedirs(output_path)
root_file_output = os.path.join(
output_path,
"_".join(
filter(
None,
[
nickname,
args.pipeline,
str(args.first_entry),
str(args.last_entry),
],
)
)
+ ".root",
)
root_file_in = uproot.open(root_file_input)
if 'all' in channels:
channels = set([k.split('_')[0] for k in root_file_in.keys()])
if 'all' in categories:
categories = set([k.split('_')[-1].split(';')[0] for k in root_file_in.keys() if any([c in k for c in channels])])
if not args.dry:
root_file_old = None
if not args.recreate:
os.system(
"if [[ -e {root_file_output} ]] ; then mv {root_file_output} {root_file_output}_to_update ; fi".format(
root_file_output = root_file_output
)
)
root_file_old = TFile("{}_to_update".format(root_file_output), 'read')
root_file_out = TFile(root_file_output, 'recreate')
print("Opened new file")
first_pass = True
for channel in channels:
for cat in categories:
rootdirname = '{}_{}'.format(channel, cat)
if rootdirname not in root_file_in.keys() and "{};1".format(rootdirname) not in root_file_in.keys():
continue
if rootdirname != args.pipeline and args.pipeline != None:
continue
print('process pipeline: %s_%s' % (channel, cat))
if not first_pass and not args.dry:
root_file_out = TFile(root_file_output, 'update')
first_pass = False
if not args.dry:
rootdir_old = False
if root_file_old:
rootdir_old = root_file_old.GetDirectory(rootdirname)
if not rootdir_old:
already_rootdir = False
else:
already_rootdir = True
rootdir = TDirectoryFile(rootdirname, rootdirname)
rootdir.cd()
tree_old = False
if already_rootdir:
if not args.recreate:
tree_old = rootdir_old.Get(args.tree)
tree = TTree(args.tree, args.tree)
old_models = []
if tree_old:
old_models = [model.GetName() for model in [tree_old.GetListOfBranches()][0]]
if len(old_models) > 0:
root_file_out_old = uproot.open("{}_to_update".format(root_file_output))
models = {i:models[i] for i in models if i not in old_models}
all_models = old_models + [k for k in models]
leafValues = {}
for model in all_models:
leafValues[model] = array.array("f", [0])
if args.pandas:
df = root_file_in[rootdirname][args.tree].pandas.df()
if tree_old:
df_old = root_file_out_old[rootdirname][args.tree].pandas.df()
else:
_df = root_file_in[rootdirname][args.tree].arrays()
df = pandas.DataFrame()
keys_to_export = set(inputs+["pt_1", "pt_2", "phi_1", "phi_2"])
for key in ["N_neutrinos_reco", "mt_tt"]:
if key in keys_to_export:
keys_to_export.remove(key)
for k in keys_to_export:
df[k] = _df[str.encode(k)]
if tree_old:
_df_old = root_file_out_old[rootdirname][args.tree].arrays()
df_old = pandas.DataFrame()
keys_to_export = old_models
for k in keys_to_export:
df_old[k] = _df_old[str.encode(k)]
df["mt_tt"] = (2*df["pt_1"]*df["pt_2"]*(1-np.cos(df["phi_1"]-df["phi_2"])))**.5
df["N_neutrinos_reco"] = N_neutrinos_in_channel[channel] * np.ones(len(df[inputs[0]]), dtype='int')
# remove values set at -10 by default to match training settings
for variable in ["jpt_r", "jeta_r", "jphi_r", "Njet_r"]:
if variable in inputs:
df[variable].values[df[variable].values < 0] = 0
for model in models:
print("Predicting with {}...".format(model))
df[model] = models[model].predict(df)
if not args.dry:
print("Filling new branch in tree...")
for model in all_models:
newBranch = tree.Branch(
model,
leafValues[model],
"prediction/F"
)
first_entry = args.first_entry
last_entry = len(df[model].values)
if args.last_entry > first_entry and args.last_entry < len(df[model].values):
last_entry = args.last_entry
for k in range(first_entry, last_entry +1):
for model in all_models:
if model in old_models:
leafValues[model][0] = df_old[model].values[k]
else:
leafValues[model][0] = df[model].values[k]
tree.Fill()
print("Filled.")
rootdir.Remove(rootdir.Get(args.tree))
tree.Write(args.tree, kOverwrite)
root_file_out.Close()
os.system("rm -rf {}_to_update".format(root_file_output))
if __name__ == "__main__":
args = parse_arguments()
if args.enable_logging:
setup_logging(
"add_XGB_model_prediction_in_root_file_%s_%s_%s_%s.log"
% (
os.path.basename(args.input).replace(".root", ""),
args.folder,
args.first_entry,
args.last_entry,
),
logging.WARNING,
)
main(args)
|
998,202 | 6d5365252c3aa1290b03461fee9dc9442f1ba300 | from django.contrib import admin
from landloard.models import HostelHomePage, HostelService, HostelGallary, HostelContact, HostelPlan
from Profile.models import LandlordProfile
# Register your models here.
class HostelHomePageModelAdmin(admin.ModelAdmin):
list_display = ["hostel_name","welcoming_message","background_image"]
list_display_links = ["hostel_name"]
list_filter = ["hostel_name"]
search_fields = ["hostel_name","welcoming_message"]
class Meta:
model = HostelHomePage
admin.site.register(HostelHomePage, HostelHomePageModelAdmin)
class LandlordProfileModelAdmin(admin.ModelAdmin):
list_display = ["hostel_name","user","your_phone_number"]
list_display_links = ["hostel_name"]
list_filter = ["hostel_name"]
search_fields = ["hostel_name","user"]
class Meta:
model = LandlordProfile
admin.site.register(LandlordProfile, LandlordProfileModelAdmin)
admin.site.register(HostelService)
admin.site.register(HostelGallary)
admin.site.register(HostelContact)
admin.site.register(HostelPlan) |
998,203 | 20be6d3931c2ea56d726104d4a345907dd9aa728 | from datetime import datetime
def write_log(logs, from_file):
for log in logs:
print(f"{str(log)} \n")
print("\nFrom file: " + from_file + "\n" + "Date-time: " + datetime.now().strftime("%Y-%m-%d %H-%M \n"))
print("-" * 25 + '\n\n\n')
# with open("log.txt", "a", encoding="utf-8") as f:
# for log in logs:
# f.write(f"{str(log)} \n")
# f.write("\nFrom file: " + from_file + "\n" + "Date-time: " + datetime.now().strftime("%Y-%m-%d %H-%M \n"))
# f.write("-" * 25 + '\n\n\n') |
998,204 | f65530d4b29101c388ffbc3be35d7e0a6d86da46 | def solver(line):
l, sequence = line.split()[1:], []
for i in xrange(0,len(l),2):
sequence.append((l[i], int(l[i+1])))
prevB, prevO = 1, 1
timeB, timeO, time = 0, 0, 0
for next in sequence:
if next[0] == 'O':
step = abs(prevO-next[1]) + 1
prevO = next[1]
timeO = max(timeO + step, timeB + 1)
time = timeO
else:
step = abs(prevB-next[1]) + 1
prevB = next[1]
timeB = max(timeB + step, timeO + 1)
time = timeB
return str(time)
f = open("test.out", 'w')
cases = open("test.in", 'r').readlines()[1:]
for i in range(0, len(cases)):
line = "Case #" + str(i+1) + ": " + solver(cases[i])
print line
f.write(line + "\n")
f.close() |
998,205 | 570f04b6a563f9ae3597e0eec25977222a3ef97f | import json
import jsonschema
import os
with open(os.path.join(os.path.dirname(__file__), "1ZoneUncontrolled.epJSON")) as f2:
input_file = json.load(f2)
with open(os.path.join(os.path.dirname(__file__), "Energy+.schema.epJSON")) as f:
schema = json.load(f)
jsonschema.validate(input_file, schema)
|
998,206 | 08bf95d49d7fb7806daf01ea953f2bdf35ec3950 | import numpy as np
from keras.layers import Dense, Embedding, LSTM
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.datasets import imdb
# Устанавливаем seed для повторяемости результатов
np.random.seed(42)
# Максимальное количество слов (по частоте использования)
max_features = 5000
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, **k, allow_pickle=True)
# Загружаем данные
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
# restore np.load for future normal usage
np.load = np_load_old
# Максимальная длина рецензии в словах
maxlen = 80
# Заполняем или обрезаем рецензии
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
# Создаем сеть
model = Sequential()
# Слой для векторного представления слов
model.add(Embedding(max_features, 32, dropout=0.2))
# Слой долго-краткосрочной памяти
model.add(LSTM(100, dropout_W=0.2, dropout_U=0.2))
# Полносвязный слой для классификации
model.add(Dense(1, activation="sigmoid"))
# Компилируем модель
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Обучаем сеть
model.fit(X_train, y_train,
batch_size=64,
nb_epoch=7,
validation_data=(X_test, y_test),
verbose=1)
# Проверяем качество обучения на тестовых данных
scores = model.evaluate(X_test, y_test, batch_size=64)
print("Итоговая точность на тестовых данных: %.2f%%" % (scores[1]*100))
# Генерируем описание модели в формате json
model_json = model.to_json()
# Записываем модель в файл
json_file = open("rnn_model_test.json", "w")
json_file.write(model_json)
json_file.close()
model.save_weights("rnn_model_test.h5")
print("Модель нейронной сети успешно сохранена!") |
998,207 | 876aedb27be373148a772c68bbde098f7b911169 |
from flask import Blueprint, jsonify, current_app, abort
bp = Blueprint('routes', __name__, url_prefix="/routes") # pylint: disable=invalid-name
@bp.route('/', methods=['GET'])
def routes(): # pragma: no cover
"""Print available functions."""
if not current_app.debug:
abort(404)
func_list = []
for rule in current_app.url_map.iter_rules():
endpoint = rule.rule
methods = ", ".join(list(rule.methods))
doc = current_app.view_functions[rule.endpoint].__doc__
route = {
"endpoint": endpoint,
"methods": methods
}
if doc:
route["doc"] = doc
func_list.append(route)
func_list = sorted(func_list, key=lambda k: k['endpoint'])
return jsonify(func_list)
|
998,208 | c8352748395e998eea6db98477a20efd73b65cba | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import bs4
import json
import time
if __name__ == '__main__':
with open("29027415.html", "r") as fw:
content = fw.read()
htmlSoup = bs4.BeautifulSoup(content, features="html.parser")
# txt = htmlSoup.find_all('div', class_='d2txt')
d2txt_con = htmlSoup.find('div', class_='d2txt_con')
# print(d2txt_con)
para = d2txt_con.findAll('p')[1:]
for p in para:
print(p.text)
|
998,209 | 4e352de3ff4d215d2364400e2e822644e00b3cd4 | # tkinter — Python interface to Tcl/Tk
# The tkinter package (“Tk interface”) is the standard Python interface to the
# Tk GUI toolkit.
# Tk itself is not part of Python; it is maintained at ActiveState.
# lookup(style, option, state=None, default=None).
# Returns the value specified for option in style.
# If state is specified, it is expected to be a sequence of one or more states.
# If the default argument is set, it is used as a fallback value in case no specification
# for option is found.
# To check what font a Button uses by default:
from tkinter import ttk
print(ttk.Style().lookup("TButton", "font"))
|
998,210 | 3b74fe012607e69360fc3a5a4b5cfca823353105 | # Busqueda primero en anchura - Breadth First Search
from Nodos import Nodo
def busqueda_BPA_solucion(conecciones, estado_inicial, solucion):
resuelto = False
nodos_visitados = []
nodos_frontera = []
nodo_inicial = Nodo(estado_inicial)
nodos_frontera.append(nodo_inicial)
while (not resuelto) and len(nodos_frontera) != 0:
# nodo = nodos_frontera[0]
nodo_actual = nodos_frontera.pop(0)
# extraer nodo y añadirlo a visitados
nodos_visitados.append(nodo_actual)
if nodo_actual.get_estado() == solucion:
# resuelto = True
return nodo_actual
else:
# expandir nodos hijo
estado_nodo = nodo_actual.get_estado()
lista_hijos = []
for chld in conecciones[estado_nodo]:
hijo = Nodo(chld)
lista_hijos.append(hijo)
if not hijo.en_lista(nodos_visitados) and not hijo.en_lista(nodos_frontera):
nodos_frontera.append(hijo)
nodo_actual.set_hijo(lista_hijos)
if __name__ == "__main__":
conecciones = {
'Malaga': {'Salamanca', 'Madrid', 'Barcelona'},
'Sevilla': {'Santiago', 'Madrid'},
'Granada': {'Valencia'},
'Valencia': {'Barcelona'},
'Madrid': {'Salamanca', 'Sevilla', 'Malaga', 'Barcelona', 'Santander'},
'Salamanca': {'Malaga', 'Madrid'},
'Santiago': {'Sevilla', 'Santander', 'Barcelona'},
'Santander': {'Santiago', 'Madrid'},
'Zaragoza': {'Barcelona'},
'Barcelona': {'Zaragoza', 'Santiago', 'Madrid', 'Malaga', 'Valencia'}
}
estado_inicial = 'Barcelona'
solucion = 'Sevilla'
nodo_solucion = busqueda_BPA_solucion(conecciones, estado_inicial, solucion)
# mostrar resultado
resultado = []
nodo = nodo_solucion
while nodo.get_padre() is not None:
resultado.append(nodo.get_estado())
nodo = nodo.get_padre()
resultado.append(estado_inicial)
resultado.reverse()
print(resultado)
|
998,211 | f632909afe6014a50ffda534c6947a382df9bd72 | # Generated by Django 2.1.3 on 2018-11-11 20:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('etsy_core', '0014_productimage'),
]
operations = [
migrations.CreateModel(
name='UserFavouriteShop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='etsy_core.Shop')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
998,212 | 843db326b32965fd1687202cfa4307a31b095c6e | # Django settings for instamedia project.
import os, logging
logging.basicConfig(
level = logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
logging.debug("Reading settings...")
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_PATH + os.sep + 'static' + os.sep
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/static/"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'z@&gewj-htg(9k4(zx)mwtjo(4i9!zn)8l$z4d7ygr6q63^npn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth', #for user template var
#'django.core.context_processors.debug',
#'django.core.context_processors.i18n',
'django.core.context_processors.media', #for MEDIA_URL template var
'django.core.context_processors.request', #includes request in RequestContext
'explainthis.questions.ctxproc.setting', #includes request in RequestContext
)
AUTHENTICATION_BACKENDS = (
'django_rpx_plus.backends.RpxBackend',
'django.contrib.auth.backends.ModelBackend', #default django auth
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'explainthis.urls'
BASE_DOMAIN = "http://dev.explainthis.org:8000"
RPXNOW_API_KEY = "ab03bbe55691bf7ea5535a176848c99c39548c1e"
RPXNOW_APPLICATION_ID = "bgfakdbceknakngdnhgj"
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_PATH + os.sep + 'templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.comments',
'ajaxcomments',
'django.contrib.admin',
'explainthis.questions',
'django_rpx_plus',
'compress',
'voting',
'uni_form',
'taggit'
)
REGISTER_URL = '/accounts/register/'
RPXNOW_REALM = 'dev-explainthis'
AUTH_PROFILE_MODULE = "questions.UserProfile"
COMPRESS_CSS = {
'main': {
'source_filenames': (
'css/lib/yui/reset-min.css',
'css/lib/oocss/core/grid/grids.css',
'css/lib/oocss/core/template/template.css',
'css/main.css'
),
'output_filename': 'css/main_compressed.css',
'extra_context': {
'media': 'screen,projection',
},
},
'forms': {
'source_filenames': (
'css/uni-form-generic.css',
'css/uni-form.css'
),
'output_filename': 'css/form_compressed.css',
'extra_context': {
'media': 'screen,projection',
},
},
# other CSS groups goes here
}
COMPRESS_JS = {
'base': {
'source_filenames': (
'js/lib/jquery-1.4.2.min.js',
'js/main.js',
),
'output_filename': 'js/base_compressed.js',
},
'comments': {
'source_filenames': (
'js/post-comment.js',
'js/comment.js',
),
'output_filename': 'js/comments_compressed.js',
},
'site_frontpage': {
'source_filenames': (
'js/lib/jquery-1.4.2.min.js',
'js/lib/NobleCount/js/jquery.NobleCount.js',
'js/site_frontpage.js'
),
'output_filename': 'js/site_frontpage_compressed.js',
}
}
COMPRESS = True
COMPRESS_AUTO = True
COMPRESS_CSS_FILTERS = None
LOCAL_SETTINGS = PROJECT_PATH + os.sep+ 'settings_local.py'
if os.path.exists(LOCAL_SETTINGS): execfile(LOCAL_SETTINGS) |
998,213 | cfa6671c6b08c695953c3413785d39864036dc86 | from django.db import models
# Create your models here.
class Transaction(models.Model):
# Transaction id sent by merchant.
transaction_id = models.CharField(max_length=100)
# Payment gateway type used in transaction
payment_gateway_type = models.CharField(max_length=20, null=True, blank=True) # Map to PG_TYPE
# Map to addedon
transaction_date_time = models.DateTimeField(null=True, blank=True)
# mode (credit card/ CD - Cheque / Net Banking)
mode = models.CharField(max_length=10, null=True, blank=True)
status = models.CharField(max_length=15, null=True, blank=True)
amount = models.DecimalField(max_digits=19, decimal_places=6)
# Unique id from PayU.in
mihpayid = models.CharField(max_length=100, null=True, blank=True)
bankcode = models.CharField(max_length=10, null=True, blank=True)
# Reference number for the payment gateway (received in PG_TYPE)
bank_ref_num = models.CharField(max_length=100, null=True, blank=True)
discount = models.DecimalField(max_digits=19, decimal_places=6, default=0)
additional_charges = models.DecimalField(max_digits=19, decimal_places=6, default=0) # Charged by Payu
# Status of transaction in PayU system
# Map to unmappedstatus(initiated/ in progress /dropped / bounced / captured / auth/ failed / usercancelled/ pending)
txn_status_on_payu = models.CharField(max_length=20, null=True, blank=True)
hash_status = models.CharField(max_length=100, null=True, blank=True)
class CancelRefundCaptureRequests(models.Model):
transaction = models.ForeignKey(Transaction)
# PayU Request ID for a request in a Transaction.
request_id = models.CharField(max_length=100)
# Cancel or Refund or Capture Request
request_type = models.CharField(max_length=20)
# Status of webservice call
status = models.CharField(max_length=15)
message = models.CharField(max_length=100)
# PayU ID
mihpayid = models.CharField(max_length=100)
# Bank Reference Number
bank_ref_num = models.CharField(max_length=100, null=True, blank=True)
amount = models.DecimalField(max_digits=19, decimal_places=6, default=0)
error_code = models.CharField(max_length=10) |
998,214 | 315207ccb422fcf3d82db7beca509059265a583f | import RPi.GPIO as GPIO
import time
#import requests
#import os
#os.system('fswebcam image.jpg -save /home/pi/Desktop/image.jpg')
#url="https://smart-sort.herokuapp.com/process"
#filess = {"img":open("/home/pi/Desktop/image.jpg","rb")}
#result=requests.post(url,files=filess)
#print(result.text)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.OUT)
pwm=GPIO.PWM(12,100)
pwm.start(5)
angle1=10
duty1= float(angle1)/10 + 2.5
angle2=200
duty2= float(angle2)/10 + 2.5
ck=0
"""GPIO.setmode(GPIO.BOARD)
GPIO.setup(33, GPIO.OUT)
pwm=GPIO.PWM(33,100)
pwm.start(5)
angle1=10
duty1= float(angle1)/10 + 2.5
angle2=200
duty2= float(angle2)/10 + 2.5
ck=0
"""
#if result.text=='Biodegradable':
while ck<=5:
pwm.ChangeDutyCycle(duty1)
time.sleep(0.8)
pwm.ChangeDutyCycle(duty2)
time.sleep(0.8)
ck=ck+1
time.sleep(1)
"""while ck<=5:
pwm.ChangeDutyCycle(duty1)
time.sleep(0.8)
pwm.ChangeDutyCycle(duty2)
time.sleep(0.8)
ck=ck+1
time.sleep(1)"""
GPIO.cleanup()
|
998,215 | dde7f523d311f53df89cb0d0b39ef97e27bce783 | from flask import request, jsonify
from flask.views import MethodView
from werkzeug.exceptions import abort
from weathertracker.stats import get_stats
from weathertracker.utils.conversion import (
convert_to_datetime,
convert_to_string,
DatetimeConversionException,
)
class StatsAPI(MethodView):
# features/02-stats/01-get-stats.feature
def get(self):
stats = request.args.getlist("stat")
metrics = request.args.getlist("metric")
from_datetime = request.args.get("fromDateTime")
to_datetime = request.args.get("toDateTime")
# Validate query params are provided
if any(
[
len(stats) == 0,
len(metrics) == 0,
from_datetime is None,
to_datetime is None,
]
):
return abort(400)
try:
from_datetime = convert_to_datetime(from_datetime)
to_datetime = convert_to_datetime(to_datetime)
except DatetimeConversionException:
return abort(400)
print("stat: {}, type: {}".format(stats, type(stats)))
print("metrics: {}, type: {}".format(metrics, type(metrics)))
print("from_datetime: {}, type: {}".format(from_datetime, type(from_datetime)))
print("to_datetime: {}, type: {}".format(to_datetime, type(to_datetime)))
stats = get_stats(stats, metrics, from_datetime, to_datetime)
response = jsonify(stats)
response.status_code = 201
return response
|
998,216 | a466335b2ec63302b973af3576067000d8c62715 | import operator
f=open("intro.txt")
words=[]
dict={'the':0, 'of':0, 'for':0, 'is':0, 'as':0, 'an':0}
for line in f:
a=line.split(",")
for i in a:
b=i.split(".")
for j in b:
c=j.split()
for k in c:
if k in dict:
dict[k]+=1
t=tuple(dict.items())
t = sorted(t, key=operator.itemgetter(0))
for i in t:
print i |
998,217 | dc89143753fdebe7f51f516971091a89f947e8e9 | #!/usr/bin/env python
def div(a, y):
''' Divide two numbers'''
if y == 0:
return 0
else:
return a/y
if __name__ == '__main__':
print (div(2,4))
print (div(2,0))
|
998,218 | d7415ceb2834a83efb4ecacd78a85a5954a2c4d6 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instrument `django`_ to trace Django applications.
.. _django: https://pypi.org/project/django/
SQLCOMMENTER
*****************************************
You can optionally configure Django instrumentation to enable sqlcommenter which enriches
the query with contextual information.
Usage
-----
.. code:: python
from opentelemetry.instrumentation.django import DjangoInstrumentor
DjangoInstrumentor().instrument(is_sql_commentor_enabled=True)
For example,
::
Invoking Users().objects.all() will lead to sql query "select * from auth_users" but when SQLCommenter is enabled
the query will get appended with some configurable tags like "select * from auth_users /*metrics=value*/;"
SQLCommenter Configurations
***************************
We can configure the tags to be appended to the sqlquery log by adding below variables to the settings.py
SQLCOMMENTER_WITH_FRAMEWORK = True(Default) or False
For example,
::
Enabling this flag will add django framework and it's version which is /*framework='django%3A2.2.3*/
SQLCOMMENTER_WITH_CONTROLLER = True(Default) or False
For example,
::
Enabling this flag will add controller name that handles the request /*controller='index'*/
SQLCOMMENTER_WITH_ROUTE = True(Default) or False
For example,
::
Enabling this flag will add url path that handles the request /*route='polls/'*/
SQLCOMMENTER_WITH_APP_NAME = True(Default) or False
For example,
::
Enabling this flag will add app name that handles the request /*app_name='polls'*/
SQLCOMMENTER_WITH_OPENTELEMETRY = True(Default) or False
For example,
::
Enabling this flag will add opentelemetry traceparent /*traceparent='00-fd720cffceba94bbf75940ff3caaf3cc-4fd1a2bdacf56388-01'*/
SQLCOMMENTER_WITH_DB_DRIVER = True(Default) or False
For example,
::
Enabling this flag will add name of the db driver /*db_driver='django.db.backends.postgresql'*/
Usage
-----
.. code:: python
from opentelemetry.instrumentation.django import DjangoInstrumentor
DjangoInstrumentor().instrument()
Configuration
-------------
Exclude lists
*************
To exclude certain URLs from tracking, set the environment variable ``OTEL_PYTHON_DJANGO_EXCLUDED_URLS``
(or ``OTEL_PYTHON_EXCLUDED_URLS`` to cover all instrumentations) to a string of comma delimited regexes that match the
URLs.
For example,
::
export OTEL_PYTHON_DJANGO_EXCLUDED_URLS="client/.*/info,healthcheck"
will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.
Request attributes
********************
To extract attributes from Django's request object and use them as span attributes, set the environment variable
``OTEL_PYTHON_DJANGO_TRACED_REQUEST_ATTRS`` to a comma delimited list of request attribute names.
For example,
::
export OTEL_PYTHON_DJANGO_TRACED_REQUEST_ATTRS='path_info,content_type'
will extract the ``path_info`` and ``content_type`` attributes from every traced request and add them as span attributes.
Django Request object reference: https://docs.djangoproject.com/en/3.1/ref/request-response/#attributes
Request and Response hooks
***************************
This instrumentation supports request and response hooks. These are functions that get called
right after a span is created for a request and right before the span is finished for the response.
The hooks can be configured as follows:
.. code:: python
def request_hook(span, request):
pass
def response_hook(span, request, response):
pass
DjangoInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
Django Request object: https://docs.djangoproject.com/en/3.1/ref/request-response/#httprequest-objects
Django Response object: https://docs.djangoproject.com/en/3.1/ref/request-response/#httpresponse-objects
Capture HTTP request and response headers
*****************************************
You can configure the agent to capture specified HTTP headers as span attributes, according to the
`semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_.
Request headers
***************
To capture HTTP request headers as span attributes, set the environment variable
``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to a comma delimited list of HTTP header names.
For example,
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header"
will extract ``content-type`` and ``custom_request_header`` from the request headers and add them as span attributes.
Request header names in Django are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment
variable will capture the header named ``custom-header``.
Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example:
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="Accept.*,X-.*"
Would match all request headers that start with ``Accept`` and ``X-``.
To capture all request headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to ``".*"``.
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=".*"
The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>``
is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a
single item list containing all the header values.
For example:
``http.request.header.custom_request_header = ["<value1>,<value2>"]``
Response headers
****************
To capture HTTP response headers as span attributes, set the environment variable
``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to a comma delimited list of HTTP header names.
For example,
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header"
will extract ``content-type`` and ``custom_response_header`` from the response headers and add them as span attributes.
Response header names in Django are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment
variable will capture the header named ``custom-header``.
Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example:
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="Content.*,X-.*"
Would match all response headers that start with ``Content`` and ``X-``.
To capture all response headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to ``".*"``.
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=".*"
The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>``
is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a
single item list containing all the header values.
For example:
``http.response.header.custom_response_header = ["<value1>,<value2>"]``
Sanitizing headers
******************
In order to prevent storing sensitive data such as personally identifiable information (PII), session keys, passwords,
etc, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS``
to a comma delimited list of HTTP header names to be sanitized. Regexes may be used, and all header names will be
matched in a case-insensitive manner.
For example,
::
export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=".*session.*,set-cookie"
will replace the value of headers such as ``session-id`` and ``set-cookie`` with ``[REDACTED]`` in the span.
Note:
The environment variable names used to capture HTTP headers are still experimental, and thus are subject to change.
API
---
"""
from logging import getLogger
from os import environ
from typing import Collection
from django import VERSION as django_version
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from opentelemetry.instrumentation.django.environment_variables import (
OTEL_PYTHON_DJANGO_INSTRUMENT,
)
from opentelemetry.instrumentation.django.middleware.otel_middleware import (
_DjangoMiddleware,
)
from opentelemetry.instrumentation.django.package import _instruments
from opentelemetry.instrumentation.django.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.metrics import get_meter
from opentelemetry.semconv.metrics import MetricInstruments
from opentelemetry.trace import get_tracer
from opentelemetry.util.http import get_excluded_urls, parse_excluded_urls
DJANGO_2_0 = django_version >= (2, 0)
_excluded_urls_from_env = get_excluded_urls("DJANGO")
_logger = getLogger(__name__)
def _get_django_middleware_setting() -> str:
# In Django versions 1.x, setting MIDDLEWARE_CLASSES can be used as a legacy
# alternative to MIDDLEWARE. This is the case when `settings.MIDDLEWARE` has
# its default value (`None`).
if not DJANGO_2_0 and getattr(settings, "MIDDLEWARE", None) is None:
return "MIDDLEWARE_CLASSES"
return "MIDDLEWARE"
class DjangoInstrumentor(BaseInstrumentor):
"""An instrumentor for Django
See `BaseInstrumentor`
"""
_opentelemetry_middleware = ".".join(
[_DjangoMiddleware.__module__, _DjangoMiddleware.__qualname__]
)
_sql_commenter_middleware = "opentelemetry.instrumentation.django.middleware.sqlcommenter_middleware.SqlCommenter"
def instrumentation_dependencies(self) -> Collection[str]:
return _instruments
def _instrument(self, **kwargs):
# FIXME this is probably a pattern that will show up in the rest of the
# ext. Find a better way of implementing this.
if environ.get(OTEL_PYTHON_DJANGO_INSTRUMENT) == "False":
return
tracer_provider = kwargs.get("tracer_provider")
meter_provider = kwargs.get("meter_provider")
_excluded_urls = kwargs.get("excluded_urls")
tracer = get_tracer(
__name__,
__version__,
tracer_provider=tracer_provider,
)
meter = get_meter(__name__, __version__, meter_provider=meter_provider)
_DjangoMiddleware._tracer = tracer
_DjangoMiddleware._meter = meter
_DjangoMiddleware._excluded_urls = (
_excluded_urls_from_env
if _excluded_urls is None
else parse_excluded_urls(_excluded_urls)
)
_DjangoMiddleware._otel_request_hook = kwargs.pop("request_hook", None)
_DjangoMiddleware._otel_response_hook = kwargs.pop(
"response_hook", None
)
_DjangoMiddleware._duration_histogram = meter.create_histogram(
name=MetricInstruments.HTTP_SERVER_DURATION,
unit="ms",
description="measures the duration of the inbound http request",
)
_DjangoMiddleware._active_request_counter = meter.create_up_down_counter(
name=MetricInstruments.HTTP_SERVER_ACTIVE_REQUESTS,
unit="requests",
description="measures the number of concurrent HTTP requests those are currently in flight",
)
# This can not be solved, but is an inherent problem of this approach:
# the order of middleware entries matters, and here you have no control
# on that:
# https://docs.djangoproject.com/en/3.0/topics/http/middleware/#activating-middleware
# https://docs.djangoproject.com/en/3.0/ref/middleware/#middleware-ordering
_middleware_setting = _get_django_middleware_setting()
settings_middleware = []
try:
settings_middleware = getattr(settings, _middleware_setting, [])
except ImproperlyConfigured as exception:
_logger.debug(
"DJANGO_SETTINGS_MODULE environment variable not configured. Defaulting to empty settings: %s",
exception,
)
settings.configure()
settings_middleware = getattr(settings, _middleware_setting, [])
except ModuleNotFoundError as exception:
_logger.debug(
"DJANGO_SETTINGS_MODULE points to a non-existent module. Defaulting to empty settings: %s",
exception,
)
settings.configure()
settings_middleware = getattr(settings, _middleware_setting, [])
# Django allows to specify middlewares as a tuple, so we convert this tuple to a
# list, otherwise we wouldn't be able to call append/remove
if isinstance(settings_middleware, tuple):
settings_middleware = list(settings_middleware)
is_sql_commentor_enabled = kwargs.pop("is_sql_commentor_enabled", None)
if is_sql_commentor_enabled:
settings_middleware.insert(0, self._sql_commenter_middleware)
settings_middleware.insert(0, self._opentelemetry_middleware)
setattr(settings, _middleware_setting, settings_middleware)
def _uninstrument(self, **kwargs):
_middleware_setting = _get_django_middleware_setting()
settings_middleware = getattr(settings, _middleware_setting, None)
# FIXME This is starting to smell like trouble. We have 2 mechanisms
# that may make this condition be True, one implemented in
# BaseInstrumentor and another one implemented in _instrument. Both
# stop _instrument from running and thus, settings_middleware not being
# set.
if settings_middleware is None or (
self._opentelemetry_middleware not in settings_middleware
):
return
settings_middleware.remove(self._opentelemetry_middleware)
setattr(settings, _middleware_setting, settings_middleware)
|
998,219 | 5a1a87af9ac3f0eb6db671af6162c1ace7b0f069 | import os
import numpy as np
import pandas as pd
import cPickle as pickle
from natsort import natsorted
from random import randint
from skimage import exposure
from matplotlib import pyplot
from skimage.io import imread
from PIL import Image
from skimage.io import imshow
from skimage.filters import sobel
from skimage import feature
from skimage.color import gray2rgb
from sklearn.preprocessing import StandardScaler
PATH = '/Volumes/Mildred/Kaggle/DogsvsCats/data/test1'
maxPixel = 192
imageSize = maxPixel * maxPixel
num_features = imageSize * 3
def plot_sample(x):
img = x.reshape(maxPixel, maxPixel, 3)
imshow(img)
pyplot.show()
def load_images(path):
print 'reading file names ... '
names = [d for d in os.listdir (path) if d.endswith('.jpg')]
names = natsorted(names)
num_rows = len(names)
print names
print 'making dataset ... '
test_image = np.zeros((num_rows, num_features), dtype = float)
label = np.zeros((num_rows, 1), dtype = int)
file_names = []
i = 0
for n in names:
print n.split('.')[0]
image = imread(os.path.join(path, n))
if len(image.shape) == 3 and image.shape[2] == 3:
image = image.transpose(2, 0, 1)
test_image[i, 0:num_features] = np.reshape(image, (1, num_features))
label[i] = n.split('.')[0]
i += 1
else:
image = gray2rgb(image)
image = image.transpose(2, 0, 1)
test_image[i, 0:num_features] = np.reshape(image, (1, num_features))
label[i] = n.split('.')[0]
i += 1
return test_image, label
test, label = load_images(PATH)
print test[0]
print test.shape
np.save('data/test_color.npy', np.hstack((test, label)))
for i in range(0,5):
j = randint(0, test.shape[0])
plot_sample(test[j])
print label[j]
print np.amax(test[0])
print np.amin(test[0])
#print file_names
|
998,220 | de1cd4efe72560e1159d82541af09a8350005630 | r = sum(range(1,101))
s = [x*x for x in range(1,101) ]
print((r*r) - sum(s)) |
998,221 | 828b96ed2451e67cdfe07ed28150a91accd84ba2 | # -*- coding: utf-8 -*-
# @Time : 2019/12/5 0005 13:09
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: xadmin.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
# @WebSite : labixiaoxin.me
import xadmin
from .models import Goods, GoodsCategory, GoodsImage, GoodsCategoryBrand, Banner, HotSearchWords
from .models import IndexAd
class GoodsAdmin(object):
# 显示的列
list_display = ["name", "click_num", "sold_num", "fav_num", "goods_num", "market_price",
"shop_price", "goods_brief", "goods_desc", "is_new", "is_hot", "add_time"]
# 可以搜索的字段
search_fields = ['name', ]
# 列表页可以直接编辑的
list_editable = ["is_hot", ]
# 过滤器
list_filter = ["name", "click_num", "sold_num", "fav_num", "goods_num", "market_price",
"shop_price", "is_new", "is_hot", "add_time", "category_name"]
# 富文本编辑器
style_fields = {"goods_desc": "ueditor"}
# 在添加商品的时候可以添加商品图片
class GoodsImageInline(object):
model = GoodsImage
exclude = ['add_time']
extra = 1
style = 'tab'
inlines = [GoodsImageInline]
class GoodCategoryAdmin(object):
list_display = ['name', 'category_type', 'parent_category', 'add_time']
list_filter = ['category_type', 'parent_category', 'name']
search_fields = ['name', ]
class GoodsBrandAdmin(object):
list_display = ['category', 'image', 'name', 'desc']
def get_context(self):
context = super(GoodsBrandAdmin, self).get_context()
if 'form' in context:
context['form'].fields['category'].queryset = GoodsCategory.objects.filter(category_type=1)
return context
class BannerGoodsAdmin(object):
list_display = ['goods', 'image', 'index']
class HotSearchAdmin(object):
list_display = ['keywords', 'index', 'add_time']
class IndexAdAdmin(object):
list_display = ['category', 'goods']
xadmin.site.register(Goods, GoodsAdmin)
xadmin.site.register(GoodsCategory, GoodCategoryAdmin)
xadmin.site.register(Banner, BannerGoodsAdmin)
xadmin.site.register(GoodsCategoryBrand, GoodsBrandAdmin)
xadmin.site.register(HotSearchWords, HotSearchAdmin)
xadmin.site.register(IndexAd, IndexAdAdmin)
|
998,222 | 352c50c827e64f13ad6ae53c7e942322e032819e | from datetime import datetime
import logging
import json
logging.basicConfig(level=logging.INFO)
USERFILE = 'user.json'
# 产生一条信息,返回信息的字典形式
def create_user(user, passwd, label):
logging.info("create user recorde")
message = {}
message['time'] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
message['user'] = user
message['passwd'] = passwd
message['label'] = label
return message
# 获取所有信息,列表形式
def get_user():
m = json.load(open(USERFILE, 'r'))
return m
# 获取字典的序列
def get_dictTarget(name):
m=get_user()
for i in m:
if i['user'] == name:
return i
break
logging.info("no such id in get_dictTarget")
return None
# 保存信息,将列表类的content覆写到文件
def save_user(content):
if not isinstance(content,list):
logging.info("wrong type contenct in save_message")
return None
json.dump(content, open(USERFILE, 'w'))
# 添加一条信息
def add_file(user):
logging.info("add user to file")
if not isinstance(user, dict):
logging.info("wrong parameters: {0} type {1}".format(user, type(user)))
return None
content=get_user()
content.append(user)
json.dump(content, open(USERFILE, 'w'))
logging.info("success add message to file")
# 删除用户,此ID为用户在列表中的位置
def del_user(target):
m = get_user()
m.remove(target)
save_user(m)
if __name__ == "__main__":
# user1=create_user('lfy','123456','积极')
# user2=create_user('hym','123123','懒散')
# add_file(user1)
# add_file(user2)
# print(get_user())
print(get_user())
get_dictTarget('lfy') |
998,223 | 13658f1ccccf02770e12ddaa13364976a4d79dd7 | # -*- coding: utf-8 -*-
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from howToReachUsGUI import ROOT_DIR
sys.path.append(ROOT_DIR + '\\YOUniversity\\BayesianNetwork')
from bayesianNetwork import gradeBayesianInference
class Ui_Dialog_predMark(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.WindowModal)
Dialog.resize(701, 620)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(701, 620))
Dialog.setMaximumSize(QtCore.QSize(701, 620))
Dialog.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
font = QtGui.QFont()
font.setPointSize(15)
font.setKerning(True)
font.setBold(True)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
spacerItem2 = QtWidgets.QSpacerItem(13, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayoutWidget_7 = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget_7.setGeometry(QtCore.QRect(10, 69, 681, 441))
self.horizontalLayoutWidget_7.setObjectName("horizontalLayoutWidget_7")
font.setPointSize(12)
#Creazione stackedWidget (che permette di cambiare pagina, passando a quella che mostra il contenuto)
self.stackedWidget = QtWidgets.QStackedWidget(self.horizontalLayoutWidget_7)
self.stackedWidget.setObjectName("stackedWidget")
#Creazione Pagina 1 - Calcolo
self.page_1 = QtWidgets.QWidget()
self.page_1.setObjectName("page_1")
self.stackedWidget.addWidget(self.page_1)
self.pushButtonCalculate = QtWidgets.QPushButton(Dialog)
self.pushButtonCalculate.setGeometry(QtCore.QRect(140, 530, 421, 71))
self.pushButtonCalculate.setFont(font)
self.pushButtonCalculate.setAutoDefault(False)
self.pushButtonCalculate.setObjectName("pushButtonCalculate")
self.horizontalLayoutWidget_6 = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(0, 10, 701, 51))
self.horizontalLayoutWidget_6.setObjectName("horizontalLayoutWidget_6")
self.horizontalLayoutTitle = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_6)
self.horizontalLayoutTitle.setContentsMargins(0, 0, 0, 0)
self.horizontalLayoutTitle.setObjectName("horizontalLayoutTitle")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.labelTitlePredVoto = QtWidgets.QLabel(self.horizontalLayoutWidget_6)
self.labelTitlePredVoto.setFont(font)
self.labelTitlePredVoto.setObjectName("labelTitlePredVoto")
self.horizontalLayoutTitle.addItem(spacerItem)
self.horizontalLayoutTitle.addWidget(self.labelTitlePredVoto)
self.horizontalLayoutTitle.addItem(spacerItem1)
font.setBold(False)
self.groupBoxQuestions = QtWidgets.QGroupBox(self.page_1)
self.groupBoxQuestions.setGeometry(QtCore.QRect(0, 10, 601, 430))
self.groupBoxQuestions.setFont(font)
self.groupBoxQuestions.setTitle("")
self.groupBoxQuestions.setObjectName("groupBoxQuestions")
self.horizontalLayoutWidget_5 = QtWidgets.QWidget(self.groupBoxQuestions)
self.horizontalLayoutWidget_5.setGeometry(QtCore.QRect(30, 390, 541, 31))
self.horizontalLayoutWidget_5.setFont(font)
self.horizontalLayoutWidget_5.setObjectName("horizontalLayoutWidget_5")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_7)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout.addItem(spacerItem2)
#Creazione label relative alle domande
font.setPointSize(14)
self.labelFreeTime = QtWidgets.QLabel(self.groupBoxQuestions)
self.labelFreeTime.setGeometry(QtCore.QRect(10, 5, 561, 31))
self.labelFreeTime.setFont(font)
self.labelFreeTime.setObjectName("labelFreeTime")
self.labelStudyHard = QtWidgets.QLabel(self.groupBoxQuestions)
self.labelStudyHard.setGeometry(QtCore.QRect(10, 90, 561, 41))
self.labelStudyHard.setFont(font)
self.labelStudyHard.setObjectName("labelStudyHard")
self.labelAptitude = QtWidgets.QLabel(self.groupBoxQuestions)
self.labelAptitude.setGeometry(QtCore.QRect(10, 190, 561, 21))
self.labelAptitude.setFont(font)
self.labelAptitude.setObjectName("labelAptitude")
self.labelDifficulty = QtWidgets.QLabel(self.groupBoxQuestions)
self.labelDifficulty.setGeometry(QtCore.QRect(10, 280, 561, 20))
self.labelDifficulty.setFont(font)
self.labelDifficulty.setObjectName("labelDifficulty")
self.labelEmoFactor = QtWidgets.QLabel(self.groupBoxQuestions)
self.labelEmoFactor.setGeometry(QtCore.QRect(10, 360, 561, 20))
self.labelEmoFactor.setFont(font)
self.labelEmoFactor.setObjectName("labelEmoFactor")
#Creazione bottoni relativi alle domande
font.setPointSize(12)
self.horizontalLayoutWidget = QtWidgets.QWidget(self.groupBoxQuestions)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(30, 40, 541, 31))
self.horizontalLayoutWidget.setFont(font)
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.rbFreeTime0 = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.rbFreeTime0.setFont(font)
self.rbFreeTime0.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbFreeTime0.setObjectName("rbFreeTime0")
self.rbFreeTime1 = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.rbFreeTime1.setFont(font)
self.rbFreeTime1.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbFreeTime1.setObjectName("rbFreeTime1")
self.rbFreeTime2 = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.rbFreeTime2.setFont(font)
self.rbFreeTime2.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbFreeTime2.setObjectName("rbFreeTime2")
self.rbFreeTime3 = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.rbFreeTime3.setFont(font)
self.rbFreeTime3.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbFreeTime3.setObjectName("rbFreeTime3")
self.rbFreeTime4 = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.rbFreeTime4.setFont(font)
self.rbFreeTime4.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbFreeTime4.setObjectName("rbFreeTime4")
self.rbFreeTime5 = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.rbFreeTime5.setFont(font)
self.rbFreeTime5.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbFreeTime5.setObjectName("rbFreeTime5")
#Raggruppanento radio buttons
self.groupRBfreeTime = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.groupRBfreeTime.setContentsMargins(0, 0, 0, 0)
self.groupRBfreeTime.setObjectName("groupRBfreeTime")
self.groupRBfreeTime.addWidget(self.rbFreeTime0)
self.groupRBfreeTime.addWidget(self.rbFreeTime1)
self.groupRBfreeTime.addWidget(self.rbFreeTime2)
self.groupRBfreeTime.addWidget(self.rbFreeTime3)
self.groupRBfreeTime.addWidget(self.rbFreeTime4)
self.groupRBfreeTime.addWidget(self.rbFreeTime5)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.groupBoxQuestions)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(30, 130, 541, 31))
self.horizontalLayoutWidget_2.setFont(font)
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.rbStudyHard0 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.rbStudyHard0.setFont(font)
self.rbStudyHard0.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbStudyHard0.setObjectName("rbStudyHard0")
self.rbStudyHard1 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.rbStudyHard1.setFont(font)
self.rbStudyHard1.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbStudyHard1.setObjectName("rbStudyHard1")
self.rbStudyHard2 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.rbStudyHard2.setFont(font)
self.rbStudyHard2.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbStudyHard2.setObjectName("rbStudyHard2")
self.rbStudyHard3 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.rbStudyHard3.setFont(font)
self.rbStudyHard3.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbStudyHard3.setObjectName("rbStudyHard3")
#Raggruppamento radio buttons
self.groupRBstudyHard = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.groupRBstudyHard.setContentsMargins(0, 0, 0, 0)
self.groupRBstudyHard.setObjectName("groupRBstudyHard")
self.groupRBstudyHard.addWidget(self.rbStudyHard0)
self.groupRBstudyHard.addWidget(self.rbStudyHard1)
self.groupRBstudyHard.addWidget(self.rbStudyHard2)
self.groupRBstudyHard.addWidget(self.rbStudyHard3)
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.groupBoxQuestions)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(30, 220, 541, 31))
self.horizontalLayoutWidget_3.setFont(font)
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.rbAptitude0 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_3)
self.rbAptitude0.setFont(font)
self.rbAptitude0.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbAptitude0.setObjectName("rbAptitude0")
self.rbAptitude1 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_3)
self.rbAptitude1.setFont(font)
self.rbAptitude1.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbAptitude1.setObjectName("rbAptitude1")
self.rbAptitude2 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_3)
self.rbAptitude2.setFont(font)
self.rbAptitude2.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbAptitude2.setObjectName("rbAptitude2")
self.horizontalLayoutWidget_4 = QtWidgets.QWidget(self.groupBoxQuestions)
self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(30, 310, 541, 31))
self.horizontalLayoutWidget_4.setFont(font)
self.horizontalLayoutWidget_4.setObjectName("horizontalLayoutWidget_4")
#Raggruppamento radio buttons
self.groupRBAptitude = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)
self.groupRBAptitude.setContentsMargins(0, 0, 0, 0)
self.groupRBAptitude.setObjectName("groupRBAptitude")
self.groupRBAptitude.addWidget(self.rbAptitude0)
self.groupRBAptitude.addWidget(self.rbAptitude1)
self.groupRBAptitude.addWidget(self.rbAptitude2)
self.rbDifficulty0 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_4)
self.rbDifficulty0.setFont(font)
self.rbDifficulty0.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbDifficulty0.setObjectName("rbDifficulty0")
self.rbDifficulty1 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_4)
self.rbDifficulty1.setFont(font)
self.rbDifficulty1.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbDifficulty1.setObjectName("rbDifficulty1")
self.rbDifficulty2 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_4)
self.rbDifficulty2.setFont(font)
self.rbDifficulty2.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbDifficulty2.setObjectName("rbDifficulty2")
self.rbDifficulty3 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_4)
self.rbDifficulty3.setFont(font)
self.rbDifficulty3.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbDifficulty3.setObjectName("rbDifficulty3")
#Raggruppamento radio buttons
self.groupRBDifficulty = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_4)
self.groupRBDifficulty.setContentsMargins(0, 0, 0, 0)
self.groupRBDifficulty.setObjectName("groupRBDifficulty")
self.groupRBDifficulty.addWidget(self.rbDifficulty0)
self.groupRBDifficulty.addWidget(self.rbDifficulty1)
self.groupRBDifficulty.addWidget(self.rbDifficulty2)
self.groupRBDifficulty.addWidget(self.rbDifficulty3)
self.rbEmoFactor0 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_5)
self.rbEmoFactor0.setFont(font)
self.rbEmoFactor0.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbEmoFactor0.setObjectName("rbEmoFactor0")
self.rbEmoFactor1 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_5)
self.rbEmoFactor1.setFont(font)
self.rbEmoFactor1.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbEmoFactor1.setObjectName("rbEmoFactor1")
self.rbEmoFactor2 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_5)
self.rbEmoFactor2.setFont(font)
self.rbEmoFactor2.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
self.rbEmoFactor2.setObjectName("rbEmoFactor2")
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
#Raggruppamento radio buttons
self.groupRBEmoFactor = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_5)
self.groupRBEmoFactor.setContentsMargins(0, 0, 0, 0)
self.groupRBEmoFactor.setObjectName("groupRBEmoFactor")
self.groupRBEmoFactor.addWidget(self.rbEmoFactor0)
self.groupRBEmoFactor.addWidget(self.rbEmoFactor1)
self.groupRBEmoFactor.addWidget(self.rbEmoFactor2)
#Scelte di default impostate a "non specificato"
self.rbFreeTime0.setChecked(True)
self.rbStudyHard0.setChecked(True)
self.rbAptitude0.setChecked(True)
self.rbEmoFactor0.setChecked(True)
self.rbDifficulty0.setChecked(True)
#Creazione Pagina 2 - Risultati
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.showResults = QtWidgets.QPlainTextEdit(self.page_2)
self.showResults.setGeometry(QtCore.QRect(20, 10, 547, 321))
font.setFamily('Consolas')
font.setPointSize(16)
self.showResults.setFont(font)
self.showResults.setReadOnly(True)
self.showResults.setObjectName("showResults")
self.labelLegendTitle = QtWidgets.QLabel(self.page_2)
self.labelLegendTitle.setGeometry(QtCore.QRect(30, 340, 191, 21))
font.setFamily('MS Shell Dlg 2')
font.setPointSize(13)
self.labelLegendTitle.setFont(font)
self.labelLegendTitle.setObjectName("labelLegendTitle")
self.labelLegend = QtWidgets.QLabel(self.page_2)
self.labelLegend.setGeometry(QtCore.QRect(30, 360, 500, 80))
font.setPointSize(11)
self.labelLegend.setFont(font)
self.labelLegend.setObjectName("labelLegend")
#Tasto per chiudere la finestra
self.pushButtonGoBack = QtWidgets.QPushButton(Dialog)
self.pushButtonGoBack.setGeometry(QtCore.QRect(15, 15, 40, 40))
self.pushButtonGoBack.setMaximumSize(QtCore.QSize(49, 49))
self.pushButtonGoBack.setObjectName("pushButtonGoBack")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(ROOT_DIR + "\\resources\\images\\menu\\goBack.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButtonGoBack.setStyleSheet("background-color: rgb(55,117,169);")
self.pushButtonGoBack.setIcon(icon)
self.pushButtonGoBack.setIconSize(QtCore.QSize(49,49))
self.pushButtonGoBack.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
#Tasto per tornare alla pagina del calcolo dei voti
font.setFamily('MS Shell Dlg 2')
font.setPointSize(14)
font.setBold(True)
self.pushButtonBackToPage1 = QtWidgets.QPushButton(Dialog)
self.pushButtonBackToPage1.hide()
self.pushButtonBackToPage1.setGeometry(QtCore.QRect(140, 530, 421, 71))
self.pushButtonBackToPage1.setFont(font)
self.pushButtonBackToPage1.setAutoDefault(False)
self.pushButtonBackToPage1.setObjectName("pushButtonGoBack")
self.stackedWidget.addWidget(self.page_2)
self.horizontalLayout.addWidget(self.stackedWidget)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
#Azioni
self.stackedWidget.setCurrentIndex(0) #Seleziona la prima pagina da mostrare
self.pushButtonGoBack.clicked.connect(Dialog.close) #commenta se effettui il run da questo file
self.pushButtonCalculate.clicked.connect(self.predVoto)
self.pushButtonBackToPage1.clicked.connect(self.goBack)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Predizione del voto"))
self.pushButtonCalculate.setText(_translate("Dialog", "Quale potrebbe essere l\'esito dell\'esame?"))
self.labelTitlePredVoto.setText(_translate("Dialog", "Predizione del voto - Calcolo "))
self.labelFreeTime.setText(_translate("Dialog", "- Quanto tempo libero hai al giorno da dedicare allo studio?"))
self.rbFreeTime0.setText(_translate("Dialog", "non specificato"))
self.rbFreeTime1.setText(_translate("Dialog", "un\'ora"))
self.rbFreeTime2.setText(_translate("Dialog", "2 ore"))
self.rbFreeTime3.setText(_translate("Dialog", "3 ore"))
self.rbFreeTime4.setText(_translate("Dialog", "4 ore"))
self.rbFreeTime5.setText(_translate("Dialog", "5 ore o più"))
self.labelStudyHard.setText(_translate("Dialog", "- Quante ore al giorno dedichi effettivamente allo studio?"))
self.rbStudyHard0.setText(_translate("Dialog", "non specificato"))
self.rbStudyHard1.setText(_translate("Dialog", "meno di 2 ore"))
self.rbStudyHard2.setText(_translate("Dialog", "tra 2 e 4 ore"))
self.rbStudyHard3.setText(_translate("Dialog", "più di 4 ore"))
self.labelAptitude.setText(_translate("Dialog", "- Ti senti portato per questa materia?"))
self.rbAptitude0.setText(_translate("Dialog", "non specificato"))
self.rbAptitude1.setText(_translate("Dialog", "no"))
self.rbAptitude2.setText(_translate("Dialog", "sì"))
self.rbDifficulty0.setText(_translate("Dialog", "non specificato"))
self.rbDifficulty1.setText(_translate("Dialog", "facile"))
self.rbDifficulty2.setText(_translate("Dialog", "medio"))
self.rbDifficulty3.setText(_translate("Dialog", "difficile"))
self.labelDifficulty.setText(_translate("Dialog", "- Seleziona la difficoltà dell\'esame:"))
self.rbEmoFactor0.setText(_translate("Dialog", "non specificato"))
self.rbEmoFactor1.setText(_translate("Dialog", "no"))
self.rbEmoFactor2.setText(_translate("Dialog", "sì"))
self.labelEmoFactor.setText(_translate("Dialog", "- Sei una persona ansiosa durante gli esami?"))
self.pushButtonCalculate.setText(_translate("Dialog", "Quale potrebbe essere l\'esito dell\'esame?"))
self.showResults.setPlainText(_translate("Dialog", ""))
self.labelLegendTitle.setText(_translate("Dialog", "Legenda:"))
self.pushButtonBackToPage1.setText(_translate("Dialog", "Torna indietro "))
self.labelLegend.setText(_translate("Dialog",
"- Voto(0): Voto compreso tra 18 e 23\n"
"- Voto(1): Voto compreso tra 24 e 27\n"
"- Voto(2): Voto compreso tra 28 e 30L\n"
"- phi(Voto): probabilità che abbia preso un voto appartenente a quel range"))
# in base alle scelte effettuate, calcola il voto
def predVoto(self):
self.labelTitlePredVoto.setText("Predizione del voto - Risultati ")
self.stackedWidget.setCurrentIndex(1) #Mostra la pagina dei risultati
self.pushButtonCalculate.hide()
self.pushButtonGoBack.hide()
self.pushButtonBackToPage1.show()
d_freeTime = { self.rbFreeTime1: 0,
self.rbFreeTime2: 1,
self.rbFreeTime3: 2,
self.rbFreeTime4: 3,
self.rbFreeTime5: 4 }
d_studiedHard = { self.rbStudyHard1: 0,
self.rbStudyHard2: 1,
self.rbStudyHard3: 2 }
d_aptitude = { self.rbAptitude1: 0,
self.rbAptitude2: 1 }
d_emoFactor = { self.rbEmoFactor1: 0,
self.rbEmoFactor2: 1 }
d_difficulty = { self.rbDifficulty1: 0,
self.rbDifficulty2: 1,
self.rbDifficulty3: 2 }
evidences = {} #dizionario contenente le stringhe
#in base alle scelte, aggiorna evidences
for i in d_freeTime: #Tempo libero
if i.isChecked():
evidences.update({'Tempo libero': d_freeTime[i]})
for i in d_studiedHard: #Studiato molto
if i.isChecked():
evidences.update({'Studiato molto': d_studiedHard[i]})
for i in d_aptitude: #Attitudine
if i.isChecked():
evidences.update({'Attitudine': d_aptitude[i]})
for i in d_emoFactor: #Fattore emotivo
if i.isChecked():
evidences.update({'Fattore Emotivo': d_emoFactor[i]})
for i in d_difficulty: #Difficolta
if i.isChecked():
evidences.update({'Difficolta': d_difficulty[i]})
if bool(evidences) == False:
self.showResults.setPlainText('Errore.\nPer poter ottenere una predizione, tornare indietro e selezionare almeno uno dei parametri.')
else:
self.res = gradeBayesianInference(evidences)
self.showResults.setPlainText( str(gradeBayesianInference(evidences)) )
def goBack(self):
self.labelTitlePredVoto.setText("Predizione del voto - Calcolo ")
self.pushButtonCalculate.show()
self.pushButtonGoBack.show()
self.pushButtonBackToPage1.hide()
self.stackedWidget.setCurrentIndex(0) #Torna alla pagina delle domande(0)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog(None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowCloseButtonHint)
ui = Ui_Dialog_predMark()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
998,224 | 4d72fd0d0cf186ab9f2ea9edeb7a92821d0f6edd | """
Backtracking(Time limit exceeds)
Illustraction of the backtracking tree:
ex. nums = [ 1,1,1], S = 1. The node is the possible sum in nums[0:level]
root
/ \
1 -1
/ \ / \
2 0 0 -2
/ \ / \ / \ / \
3 1 1 -1 1 -1 -1 -3
"""
class Solution2(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
return self.bt(nums, 0, 0, S)
#backtracking helper function
#@param nums : List[int]
#@param i : +/-nums[i] is the number to be added to cur sum
#@param cur : current accumulating sum
#@param target : target sum
#@return param : int, the number of ways for target sum
def bt(self, nums, i, cur, target):
if i == len(nums):
return 1 if cur == target else 0
return self.bt(nums, i+1, cur + nums[i], target) + self.bt(nums, i+1, cur-nums[i], target)
"""
Backtracking with memorization.
From the above bactracking tree, we can see that there are a lot of redundant sub problems (i, s),
where i is index, s is current sum in nums[0:i].
Thus, we can use a map to store the intermediate result (i,s) to avoid redundant subproblems.
"""
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums : List[int]
:type S : int
:rtype : int
"""
cache = {}
#closure function
#@param i : int, nums array index
#@param cur : int, current sum using nums[0:i]
#@return param: int, the total number of ways for sum cur using nums[0:i]
def helper(i, cur):
if i == len(nums):
return 1 if cur == 0 else 0
if (i, cur) not in cache:
#the number of ways to for (i, cur)
s = helper(i+1, cur - nums[i]) + helper(i+1, cur+nums[i])
cache[(i, cur)] = s
return cache[(i, cur)]
return helper(0, S)
"""
Dynamic programming:
dp[i, j] means: the total number of ways to find sum j by assigning symbols for number in num[0:i]
dp[i, j] = dp[i-1, j-nums[i]] + dp[i-1, j+nums[j]]
ex. nums = [1,1,1], S = 1
range of sums of nums is -3 - 3
index: 0 1 2 3 4 5 6
mapped index: -3 -2 -1 0 1 2 3
1 0 0 1 0 1 0 0
1 0 1 0 2 0 1 0
1 1 0 3 0 3 0 0
"""
class Solution1(object):
def findTargetSumWays(self, nums, S):
"""
:type nums : List[int]
:type S : int
:rtype : int
"""
#edge case
if len(nums) == 0:
return 1 if S == 0 else 0
xmax = sum(nums)
#sanity check
if S > xmax or S < -xmax:
return 0
#initialize dp array
dp = [ [0] * (2 * xmax + 1) for _ in xrange(len(nums)) ]
for i in xrange(len(nums)):
for j in xrange(2*xmax + 1):
if i == 0:
#if nums[i] == 0, s = 0, there are two ways not one
dp[0][j] += 1 if nums[0] == j - xmax else 0
dp[0][j] += 1 if nums[0] == -(j - xmax) else 0
else:
dp[i][j] += dp[i-1][j-nums[i]] if j-nums[i] >= 0 else 0
dp[i][j] += dp[i-1][j+nums[i]] if j+nums[i] < 2*xmax + 1 else 0
return dp[-1][S + xmax]
"""
test
"""
myTest = Solution()
print myTest.findTargetSumWays([1,1,1,1,1], 3)
print myTest.findTargetSumWays([1,1,1,1,1], -3)
print myTest.findTargetSumWays([], 0)
print myTest.findTargetSumWays([], 1)
print myTest.findTargetSumWays([0,0], 1)
print myTest.findTargetSumWays([0,0], 0)
|
998,225 | 4f2f3b4c025a02ef8e42b702286c8f7165bd8e31 | #!/usr/bin/env python3
# This script is a all-in-one script.
# It includes the the create_blob, create_tree and create_commit functionality.
#
# Author: Tim Silhan
import os
import zlib
import time
from hashlib import sha1
# Create the blob object
print("Blob")
print("-----------------------")
blob_content = 'Hello World\n'
print('Input: ', blob_content)
blob_header = f'blob {len(blob_content)}\x00'
print('Header:', blob_header)
blob_store = blob_header + blob_content
print('Store:', blob_store)
blob_digest = sha1(blob_store.encode('utf-8')).hexdigest()
print('Digest:', blob_digest)
blob_dir = blob_digest[:2]
print('Dir:', blob_dir)
blob_file = blob_digest[2:]
print('File:', blob_file)
blob_compressed = zlib.compress(blob_store.encode('utf-8'))
print('\nCompressed:', blob_compressed)
os.makedirs(os.path.dirname(f'.git/objects/{blob_dir}/'))
with open(f'.git/objects/{blob_dir}/{blob_file}', 'wb') as blob:
blob.write(blob_compressed)
# Create the tree object
print("Tree")
print("-----------------------")
tree_filename = "hello.txt"
print('Ref Hash: ', blob_digest)
tree_content = b"100644 " + tree_filename.encode('utf-8') + b"\x00" + bytes.fromhex(blob_digest)
# Create a directory with a tree object
# content = b"40000 " + filename.encode('utf-8') + b"\x00" + bytes.fromhex(ref_hash)
tree_header = f'tree {len(tree_content)}\x00'
print('Header:', tree_header)
tree_store = tree_header.encode('utf-8') + tree_content
print('Store:', tree_store)
tree_digest = sha1(tree_store).hexdigest()
print('Digest:', tree_digest)
tree_dir = tree_digest[:2]
print('Dir:', tree_dir)
tree_file = tree_digest[2:]
print('File:', tree_file)
tree_compressed = zlib.compress(tree_store)
print('Compressed:', tree_compressed)
os.makedirs(os.path.dirname(f'.git/objects/{tree_dir}/'))
with open(f'.git/objects/{tree_dir}/{tree_file}', 'wb') as tree:
tree.write(tree_compressed)
# Create commit object
print("Commit")
print("-----------------------")
print('Tree Hash: ', tree_digest)
parent_hash = ''
if parent_hash:
print('Parent Hash: ', parent_hash)
author_name = 'John Doe'
author_email = 'jd@someplace.com'
seconds_since_epoch = int(time.time())
time_zone = '+0000'
commit_message = 'This is it! We made it!\n'
commit_content = f'tree {tree_digest}'
if parent_hash:
commit_content += f'\nparent {parent_hash}'
commit_content += f'\nauthor {author_name} <{author_email}> {seconds_since_epoch} {time_zone}'
commit_content += f'\ncommitter {author_name} <{author_email}> {seconds_since_epoch} {time_zone}'
commit_content += f'\n\n{commit_message}'
print('Content:\n', commit_content)
commit_header = f'commit {len(commit_content)}\x00'
print('Header:', commit_header)
commit_store = commit_header.encode('utf-8') + commit_content.encode('utf-8')
print('Store:', commit_store)
commit_digest = sha1(commit_store).hexdigest()
print('Digest:', commit_digest)
commit_dir = commit_digest[:2]
print('Dir:', commit_dir)
commit_file = commit_digest[2:]
print('File:', commit_file)
commit_compressed = zlib.compress(commit_store)
print('Compressed:', commit_compressed)
os.makedirs(os.path.dirname(f'.git/objects/{commit_dir}/'))
with open(f'.git/objects/{commit_dir}/{commit_file}', 'wb') as commit:
commit.write(commit_compressed)
|
998,226 | 9a07d3fb295afd6c9f316382a6eb2a1d08374934 | import os.path, sys
from sage.all import SQLDatabase
import base
padic_db = base.getDBConnection().ellcurves.padic_db
padic_db.ensure_index("label")
padic_db.ensure_index("prime")
def lookup_or_create(label,p):
item = padic_db.find_one({'label': label, 'p': p})
if item is None:
return {'label': label, 'p': p}
else:
return item
#for path in sys.argv[1:]:
# print path
# D = SQLDatabase(filename=path)
# query_dict = {'table_name': 'regulators', 'display_cols': ['p', 'val', 'zz', 'label'], 'expression': ['p','>','0']}
# Q = D.query(query_dict)
# for p, val, zz, label in Q.run_query():
# p = int(p)
# info =lookup_or_create(label,p)
# info['val'] = val
# info['prec'] = 20
# info['unit'] = zz
# padic_db.save(info)
|
998,227 | 972ad02c94aecba3f4259f1197fd1b6c38a76ac3 | import numpy as np
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
import read_data
import pca
from imputation import *
USE_IMPUTED_DATA = True
def train_basicSVM():
#basic SVM model
basic_svm = LinearSVC()
basic_svm.fit(X_train, y_train)
print("Basic SVM Score: ", basic_svm.score(X_val, y_val))
print("Basic SVM Score Test: ", basic_svm.score(X_test, y_test))
def train_L1SVM():
# L1 regularized SVM
svm_lasso = LinearSVC(penalty= "l1", dual=False, C=0.1)
svm_lasso.fit(X_train, y_train)
print("SVM with L1 regularization Score: ", svm_lasso.score(X_val, y_val))
print("Best L1 regularization Test Score: ", svm_lasso.score(X_test, y_test))
def tune_SVM():
# tuning hyperparameters
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'penalty': ['l1', 'l2'],
'loss': ['hinge', 'squared_hinge'],
'dual':[False, True]}
basic_svm_2 = LinearSVC()
svm_tune = GridSearchCV(basic_svm_2, param_grid, error_score = 0.0)
svm_tune.fit(X_train_and_val, y_train_and_val)
print(svm_tune.best_estimator_)
print("Best score: ", svm_tune.best_score_)
print("Best C: ", svm_tune.best_estimator_.C)
print("Best penalty: ", svm_tune.best_estimator_.penalty)
print("Best loss: ", svm_tune.best_estimator_.loss)
def train_RBFSVM():
rbf_svm = SVC()
rbf_svm.fit(X_train, y_train)
print("RBF Kernel SVM Score: ", rbf_svm.score(X_val, y_val))
def train_pca_SVM(num_components):
#train SVM with PCA model
X_train_transform = pca.apply_pca(X_train, num_components)
X_val_transform = pca.apply_pca(X_val, num_components)
basic_svm = LinearSVC()
basic_svm.fit(X_train_transform, y_train)
print("SVM Score with PCA with ", num_components, "components: ", basic_svm.score(X_val_transform, y_val))
def train_pca_L1SVM(num_components):
#train SVM with PCA model
X_train_transform = pca.apply_pca(X_train, num_components)
X_val_transform = pca.apply_pca(X_val, num_components)
svm = LinearSVC(penalty= "l1", dual=False)
svm.fit(X_train_transform, y_train)
print("L1 SVM Score with PCA with ", num_components, "components: ", svm.score(X_val_transform, y_val))
def train_SparsePCA_SVM(num_components):
#train SVM with PCA model
X_transform = pca.apply_SparsePCA(X_train, num_components)
basic_svm = LinearSVC()
basic_svm.fit(X_transform, y_train)
print("SVM Score with SparsePCA with ", num_components, "components: ", basic_svm.score(X_val, y_val))
if __name__ == "__main__":
if USE_IMPUTED_DATA:
X_train, y_train = get_imputed_traindata()
X_val, y_val = get_imputed_valdata()
X_test, y_test = get_imputed_testdata()
X_train_and_val = np.concatenate((X_train, X_val))
y_train_and_val = np.concatenate((y_train, y_val))
train_basicSVM()
train_L1SVM()
train_RBFSVM()
tune_SVM()
train_pca_SVM(num_components = 120)
train_pca_L1SVM(num_components = 120)
# else:
# print ("--------------- LOADING DATA -------------------")
# X_train, y_train = read_data.get_traindata()
# X_val, y_val = read_data.get_valdata()
# X_train_and_val = np.concatenate((X_train, X_val))
# y_train_and_val = np.concatenate((y_train, y_val))
# print ("--------------- DATA IS LOADED -------------------")
# train_basicSVM()
# train_L1SVM()
# train_RBFSVM()
# tune_SVM()
# train_pca_SVM(num_components = 20)
'''
Basic SVM Score: 0.510101010101
SVM with L1 regularization Score: 0.765151515152
----tuning hyperparameters
Best score: 0.835265700483
Best C: 0.01
Best penalty: l1
Best loss: squared_hinge
Best dual: False
With Imputation:
Best score: 0.662662662663
Best C: 1
Best penalty: l1
Best loss: squared_hinge
'''
|
998,228 | 15d618678d58deb911ba641dddfd995fdf00ed14 | #!/bin/python
class Questionair():
def __init__(self, data:str ):
self._raw = data;
# Dont like work in the constructor. Might remove this
self.answers = data.replace( '\r', '' ).replace( '\n', '' );
self.individual_answers = data.replace( '\r', '' ).split( '\n' );
def count_answers( self ) -> int:
# Simply return a count of all unique entries in the set.
return len( set( self.answers ) );
def count_individual_answers_that_match( self ) -> int:
# Setup the first set to keep anding
start_set = set( self.individual_answers[ 0 ] );
# Iterate through each answer and see what sets intersect
for line in self.individual_answers:
start_set = start_set & set( line );
return len( start_set );
def read_questionair( filename: str ) -> list:
with open( filename, 'r' ) as f:
txt = f.read().strip();
return txt.split( '\n\n' );
if __name__ == "__main__":
q = read_questionair( 'input' );
print( "First Star : {}".format( sum( [ Questionair( c ).count_answers() for c in q ] ) ) )
print( "Second Star : {}".format( sum( [ Questionair( c ).count_individual_answers_that_match() for c in q ] ) ) )
|
998,229 | d6591d8e29319e1a5f7ea8618d1d4863c494507e | import configparser
import os
import pymongo
class MongoTools:
def __init__(self):
config = configparser.ConfigParser()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(BASE_DIR, 'config.ini')
config.read(path)
self.mongo_database = config['MONGO']['DATABASE']
self.mongo_host = config['MONGO']['HOST']
self.mongo_port = config['MONGO']['PORT']
# mongodb collections
self.mongo_collection_slack_message = "slack_messages"
def connect_mongo(self):
return pymongo.MongoClient("mongodb://%s:%s" % (self.mongo_host, self.mongo_port))
def get_database(self):
conn = self.connect_mongo()
return conn.get_database(self.mongo_database)
def get_collection(self):
db = self.get_database()
return db.get_collection(self.mongo_collection_slack_message)
|
998,230 | 8e5187a9dea988e0958d4d9b9f3a5b117ea710ca | #! /usr/bin/python
from distutils.core import setup
import os
version = "3.0.0"
versionfile_path = os.path.join("globus","connect","server", "version")
oldversion = None
if os.path.exists(versionfile_path):
oldversionfile = file(versionfile_path, "r")
try:
oldversion = oldversionfile.read().strip()
finally:
oldversionfile.close()
if version != oldversion:
versionfile = file(versionfile_path, "w")
try:
versionfile.write(version + "\n")
finally:
versionfile.close()
setup(name = 'globus_connect_server',
version = version,
description = 'Globus Connect Server',
author = 'Globus Toolkit',
author_email = 'support@globus.org',
url = 'https://www.globusonline.org/gcmu',
packages = [
'globus',
'globus.connect',
'globus.connect.server',
'globus.connect.server.io',
'globus.connect.server.id',
'globus.connect.server.web',
'globus.connect.security'],
package_data = {
'globus.connect.security': [
'*.pem',
'*.signing_policy',
'cilogon-crl-fetch'],
'globus.connect.server': [
'mapapp-template',
'version'
]
},
data_files = [( '/etc', [ 'globus-connect-server.conf' ]),
( '/usr/share/man/man8', [
'man/man8/globus-connect-server-setup.8',
'man/man8/globus-connect-server-cleanup.8',
'man/man8/globus-connect-server-id-setup.8',
'man/man8/globus-connect-server-id-cleanup.8',
'man/man8/globus-connect-server-io-setup.8',
'man/man8/globus-connect-server-io-cleanup.8',
'man/man8/globus-connect-server-web-setup.8',
'man/man8/globus-connect-server-web-cleanup.8'
])],
scripts = ['globus-connect-server-setup',
'globus-connect-server-cleanup',
'globus-connect-server-id-cleanup',
'globus-connect-server-id-setup',
'globus-connect-server-io-cleanup',
'globus-connect-server-io-setup',
'globus-connect-server-web-cleanup',
'globus-connect-server-web-setup'
],
)
|
998,231 | 0d8cb2a16de9d41a38a4670555c669dc79211a19 | import folium
from itertools import cycle
def draw_paths(center, paths, file_name):
"""
:param center: list[lat, lng]
:param paths: list of Paths, where path is a list like this:
[(52.529, 13.407, 'TItle 1'), (52.529, 13.407, 'TItle 2')]
:param file_name: where to save the map, e.g. 'map.html'
:return:
"""
colors = cycle(['red', 'blue', 'green', 'purple', 'orange', 'darkred',
'lightred', 'beige', 'darkblue', 'darkgreen', 'cadetblue',
'darkpurple', 'white', 'pink', 'lightblue', 'lightgreen',
'gray', 'black', 'lightgray'])
def _draw_path(map, path):
color = colors.next()
coords = [[p[0], [p[1]]] for p in path]
folium.PolyLine(
locations=coords,
color=color
).add_to(map)
for point in path:
coord = [point[0], point[1]]
title = point[2]
folium.Marker(
coord,
popup=title,
icon=folium.Icon(color=color),
).add_to(map)
map = folium.Map(
location=center,
zoom_start=15,
tiles='Stamen Toner',
)
for path in paths:
_draw_path(map, path)
map.save(file_name) |
998,232 | a21637baac43301b9a2317b384b34bc771f04683 |
from argparse import Namespace
from commands.command import Command
from datetime import datetime
from util import tint_yellow, tint_red, tint_green, tint_blue
from util import tri_state_value, tri_state_device
import csv
class Profile(Command):
"""This class represents the 'profile' subcommand."""
def __init__(self):
"""Constructs a new instance."""
super(Profile, self).__init__()
def execute(self, args: Namespace):
"""Execute the 'profile' command. It gives a nice overview of data captured
with the sniff command
:param args: The arguments to the command
:type args: Namespace
"""
data = {}
with open(args.data, 'r') as file:
reader = csv.reader(file, delimiter=';')
for row in reader:
if reader.line_num == 1:
if 'Timestamp, Decimal, TriState, State' != ','.join(row):
print('Not a valid CSV file!')
return
else:
dt = datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
value = tri_state_value(row[2])
device = data.setdefault(tri_state_device(row[2].strip()), {})
device.setdefault(dt.date(),[]).append((dt.strftime('%H:%M'), value))
for k in sorted(data.keys()):
device = data[k]
print('Device {}:'.format(tint_yellow(k)))
for d in sorted(device.keys()):
device[d].sort(key=lambda x: x[0])
print('\t{}:'.format(tint_blue(d)))
for (t, v) in device[d]:
if v:
print('\t\tAt {} the device was turned {}.'.format(tint_blue(t),
tint_green('ON')))
else:
print('\t\tAt {} the device was turned {}.'.format(tint_blue(t),
tint_red('OFF')))
|
998,233 | 3a13fbc2f4c6154474d162f30e6ccea766a288a3 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='gibbrish',
version='1.0.0',
description="Generate '.strings' file with long gibberish strings in order to test if your app is ready for translation.",
author='Matan Lachmish',
author_email='matan.lachmish@gmail.com',
url='https://github.com/mlachmish/gibberish',
packages=find_packages(),
entry_points={
'console_scripts': [
'gibberish = generator.__main__:main',
],
},
)
|
998,234 | 1ebcb5bdb61985d808247f4ef66b24c5df9e9f2b |
#Üç basamaklı bir tamsayının basamakları toplamını bulan bir program yazınız.
sayi = int(input("3 basamaklı sayı:"))
yuzler = int()
onlar = int()
birler = int()
fark = int()
yuzler = sayi // 100
fark = sayi - (yuzler*100)
onlar = fark // 10
birler = fark % 10
print("basamak toplamları:{}".format(yuzler + onlar + birler)) |
998,235 | 1132c5161ca7554f70a878af401d3b1e81eba2d3 | import pickle
h_00 = pickle.load(open("train_history_unet_comb3_mse_00_10lvels.pkl", "rb"))
h_20 = pickle.load(open("train_history_unet_comb3_mse_20_10lvels.pkl", "rb"))
h_40 = pickle.load(open("train_history_unet_comb3_mse_40_10lvels.pkl", "rb"))
h_80 = pickle.load(open("train_history_unet_comb3_mse_80_10lvels.pkl", "rb"))
h_02 = pickle.load(open("train_history_unet_comb3_mse_02_10lvels.pkl", "rb"))
h_04 = pickle.load(open("train_history_unet_comb3_mse_04_10lvels.pkl", "rb"))
h_08 = pickle.load(open("train_history_unet_comb3_mse_08_10lvels.pkl", "rb"))
h_22 = pickle.load(open("train_history_unet_comb3_mse_22_10lvels.pkl", "rb"))
h_44 = pickle.load(open("train_history_unet_comb3_mse_44_10lvels.pkl", "rb"))
h_88 = pickle.load(open("train_history_unet_comb3_mse_88_10lvels.pkl", "rb"))
print("Model,MSE,POD,POFD")
print("M00,{:.4f},{:.4f},{:.4f}".format(h_00['val_mse'][-1], 1 - h_00['val_pom'][-1], h_00['val_pofd'][-1]))
print("M20,{:.4f},{:.4f},{:.4f}".format(h_20['val_mse'][-1], 1 - h_20['val_pom'][-1], h_20['val_pofd'][-1]))
print("M40,{:.4f},{:.4f},{:.4f}".format(h_40['val_mse'][-1], 1 - h_40['val_pom'][-1], h_40['val_pofd'][-1]))
print("M80,{:.4f},{:.4f},{:.4f}".format(h_80['val_mse'][-1], 1 - h_80['val_pom'][-1], h_80['val_pofd'][-1]))
print("M02,{:.4f},{:.4f},{:.4f}".format(h_02['val_mse'][-1], 1 - h_02['val_pom'][-1], h_02['val_pofd'][-1]))
print("M04,{:.4f},{:.4f},{:.4f}".format(h_04['val_mse'][-1], 1 - h_04['val_pom'][-1], h_04['val_pofd'][-1]))
print("M08,{:.4f},{:.4f},{:.4f}".format(h_08['val_mse'][-1], 1 - h_08['val_pom'][-1], h_08['val_pofd'][-1]))
print("M22,{:.4f},{:.4f},{:.4f}".format(h_22['val_mse'][-1], 1 - h_22['val_pom'][-1], h_22['val_pofd'][-1]))
print("M44,{:.4f},{:.4f},{:.4f}".format(h_44['val_mse'][-1], 1 - h_44['val_pom'][-1], h_44['val_pofd'][-1]))
print("M88,{:.4f},{:.4f},{:.4f}".format(h_88['val_mse'][-1], 1 - h_88['val_pom'][-1], h_88['val_pofd'][-1]))
exit()
print("----------------POD Opt-----------------------")
print("------------------MSE-------------------------")
print(['%.4f' % elem for elem in h_00['val_mse'][-1]])
print(['%.4f' % elem for elem in h_20['val_mse'][-1]])
print(['%.4f' % elem for elem in h_40['val_mse'][-1]])
print(['%.4f' % elem for elem in h_80['val_mse'][-1]])
print()
print("------------------POM------------------------")
print(['%.4f' % elem for elem in h_00['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_20['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_40['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_80['val_pom'][-10:]])
print()
print("------------------POFD-----------------------")
print(['%.4f' % elem for elem in h_00['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_20['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_40['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_80['val_pofd'][-10:]])
print()
print("----------------POFD Opt----------------------")
print("------------------MSE-------------------------")
print(['%.4f' % elem for elem in h_00['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_02['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_04['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_08['val_mse'][-10:]])
print()
print("------------------POM------------------------")
print(['%.4f' % elem for elem in h_00['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_02['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_04['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_08['val_pom'][-10:]])
print()
print("------------------POFD-----------------------")
print(['%.4f' % elem for elem in h_00['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_02['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_04['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_08['val_pofd'][-10:]])
print()
print("----------------POFD Opt----------------------")
print("------------------MSE-------------------------")
print(['%.4f' % elem for elem in h_00['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_02['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_04['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_08['val_mse'][-10:]])
print()
print("------------------POM------------------------")
print(['%.4f' % elem for elem in h_00['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_02['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_04['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_08['val_pom'][-10:]])
print()
print("------------------POFD-----------------------")
print(['%.4f' % elem for elem in h_00['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_02['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_04['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_08['val_pofd'][-10:]])
print()
print("----------------Comb Opt---------------------")
print("------------------MSE-------------------------")
print(['%.4f' % elem for elem in h_00['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_22['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_44['val_mse'][-10:]])
print(['%.4f' % elem for elem in h_88['val_mse'][-10:]])
print()
print("------------------POM------------------------")
print(['%.4f' % elem for elem in h_00['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_22['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_44['val_pom'][-10:]])
print(['%.4f' % elem for elem in h_88['val_pom'][-10:]])
print()
print("------------------POFD-----------------------")
print(['%.4f' % elem for elem in h_00['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_22['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_44['val_pofd'][-10:]])
print(['%.4f' % elem for elem in h_88['val_pofd'][-10:]])
|
998,236 | 1b423b19fd324018998b27eee5bbc1d3f3d2f990 | import os
import datetime
from passlib.hash import sha256_crypt
from flask import Flask, render_template, request, redirect, url_for, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine("postgres://tncdzhysojhnjn:e07995eb170d7115f2b7c503afa00117cb3319b7980df6654f0583751aebd960@ec2-3-216-129-140.compute-1.amazonaws.com:5432/dcc0kbu8eseuk2")
db = scoped_session(sessionmaker(bind=engine))
email = 'quangkhoi@gmail.com'
password = '123456'
#password = sha256_crypt.encrypt(password)
emails = db.execute("SELECT email , password FROM customers WHERE email= :email ",{"email":email}).fetchone()
print(sha256_crypt.encrypt(password))
#print(sha256_crypt.verify(password,emails.password)) |
998,237 | 1f25a24b32915a2c3d982f7087dc776eee072ccf | #!/usr/bin/env python
# Jim Mainprice, ARC
# September 2013
# Worcester Polytechnic Institute
#
# http://openrave.org/docs/latest_stable/command_line_tools/
# openrave-robot.py /your/path/to/your.robot.xml --info=joints
# On that page you can find more examples on how to use openrave-robot.py.
from openravepy import *
import roslib
import sys
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
import numpy
import time
from wpi_planning_utilities.rodrigues import *
from wpi_planning_utilities.TransformMatrix import *
from wpi_planning_utilities.str2num import *
from wpi_planning_utilities.TSR import *
from math import *
from copy import *
import os # for file operations
from base_wheel_turning import *
import rave2realhubo
class TrajectoryReader( BaseWheelTurning ):
def __init__(self,
frequency = 25,
joint_mapping = {},
HuboModelPath = '../../../../drchubo/drchubo_v2/robots/drchubo_v2.robot.xml',
WheelModelPath = '../../models/driving_wheel_tiny.robot.xml' ):
BaseWheelTurning.__init__( self, HuboModelPath, WheelModelPath )
self.joint_mapping = joint_mapping
self.hubo_traj = None
self.dt = 1 / float(frequency) # 20 Hz (0.05)
self.execute_in_loop = True
print "self.dt : " + str( self.dt )
# Ach trajectory mapping for DRCHubo!!! It differs from the internal ros mapping
# which is defined as a global parameter (joints) in the parameter server
self.hubo_ach_traj_joint_names = { 0 : 'RHY' , 1 : 'RHR' , 2 : 'RHP' , 3 : 'RKP' , 4 : 'RAP' ,
5 : 'RAR' , 6 : 'LHY' , 7 : 'LHR' , 8 : 'LHP' , 9 : 'LKP' ,
10 : 'LAP' , 11 : 'LAR' , 12 : 'RSP' , 13 : 'RSR' , 14 : 'RSY' ,
15 : 'REP' , 16 : 'RWY' , 17 : 'RWR' , 18 : 'RWP' , 19 : 'LSP' ,
20 : 'LSR' , 21 : 'LSY' , 22 : 'LEP' , 23 : 'LWY' , 24 : 'LWR' ,
25 : 'LWP' , 26 : 'NKY' , 27 : 'NK1' , 28 : 'NK2' , 29 : 'TSY' ,
30 : 'RF1' , 31 : 'RF2' , 32 : 'RF3' , 33 : 'RF4' , 34 : 'RF5' ,
35 : 'LF1' , 36 : 'LF2' , 37 : 'LF3' , 38 : 'LF4' , 39 : 'LF5' }
return
# Gets the joint mapping supposing each joint is one dof
def SetJointMapping(self):
self.joint_mapping = {}
for j in self.robotid.GetJoints():
self.joint_mapping[ j.GetName() ] = j.GetDOFIndex()
return
# Loads trajectory from file
def LoadAchfile(self,fname):
print "parsing file"
# open the file and reads the array
f = open(fname,'r')
array = []
for line in f:
array.append([float(x) for x in line.split()])
f.close()
if( len(array) == 0 ):
print "Warning : empty trajectory"
return False
print "filing message"
self.hubo_traj = RaveCreateTrajectory(self.robotid.GetEnv(),'')
self.hubo_traj.Init( self.robotid.GetActiveConfigurationSpecification() )
for line in array: # reads all lines in the file
# ---------------------------------------
# Fills position buffer
q = [0.0] * len(self.joint_mapping)
for idx in range( len(line) ):
joint_name = self.hubo_ach_traj_joint_names[idx]
try:
i = self.joint_mapping[ joint_name ]
#print i
except KeyError:
i = None
if joint_name == "RF1" or joint_name == "RF2" or joint_name == "LF1" :
print joint_name + " , value : " + str( line[idx] )
if i is not None:
q[i] = float(line[idx])
else:
continue
#print q
self.hubo_traj.Insert(self.hubo_traj.GetNumWaypoints(),q)
self.execute()
return True
# Plays the trajectory in openrave
def execute(self):
print "Play Back Trajectory!!!!"
while True :
for i in range(self.hubo_traj.GetNumWaypoints()):
# get the waypoint values, this holds velocites, time stamps, etc
data = self.hubo_traj.GetWaypoint(i)
# extract the robot joint values only
with self.env: # have to lock environment since accessing robot
q = self.hubo_traj.GetConfigurationSpecification().ExtractJointValues(data,self.robotid,self.robotid.GetActiveDOFIndices())
self.robotid.SetDOFValues(q)
time.sleep( self.dt )
if not self.execute_in_loop :
break
return
def main():
ortraj = False
achtraj = False
filename = None
if(len(sys.argv) >= 2):
for index in range(1,len(sys.argv)):
if(sys.argv[index] == "-f" and index+1<len(sys.argv)):
filename = str(sys.argv[index+1])
elif sys.argv[index] == "-or" :
ortraj = True
elif sys.argv[index] == "-ach":
achtraj = True
if not ortraj and not achtraj :
print "specify the format!!!"
return
handles = []
#files = ["movetraj0.txt","movetraj1.txt","movetraj7.txt","movetraj8.txt"]
robot_name = "drchubo"
player = TrajectoryReader()
player.SetViewer(True)
player.SetStopKeyStrokes(True)
player.SetProblems()
player.StartViewerAndSetValvePos( handles )
#player.Playback()
#player.PlaybackFiles(files)
player.SetJointMapping()
if filename is None :
player.LoadAchfile("../../trajectories/open_hands.traj")
else :
player.LoadAchfile( filename )
player.KillOpenrave()
return
if __name__ == "__main__":
main()
|
998,238 | 815f621c38e3b56a1edb8c6fc6f8f2ea8526cbf1 | if PRESENT( UKDPP_Picture_Ratio ):
CHECK( UKDPP_Picture_Ratio in [{"Numerator":4, "Denominator":3},
{"Numerator":14, "Denominator":9},
{"Numerator":15, "Denominator":9},
{"Numerator":16, "Denominator":9},
{"Numerator":37, "Denominator":20},
{"Numerator":21, "Denominator":9},
{"Numerator":12, "Denominator":5}] )
|
998,239 | c28e82d86d1b63e0c52b0884b486e04015b627de | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
import json
import requests
from urllib.request import urlopen
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
chrome_dirver = os.getcwd() + "/chromedriver"
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_dirver)
siteName = "http://american-driver.herokuapp.com/magazines.json"
#driver.get(siteName)
site = urlopen(siteName)
data = json.load(site)
for element in data:
for iterate in element:
print("Key: {}, Value: {}".format(iterate, element[iterate]))
|
998,240 | f987e34ff0cf4e1c4ced1f42da9d78c9dac8253b | #-*- coding:utf-8 -*-
import os,sys
#这里放着你要操作的文件夹名称
path = 'load'
#添加被动态加载的路径
sys.path.append(path)
files = os.listdir(path)
#动态加载py
def on_load():
load_module = []
for load_file in files:
loaf_file_name = str(load_file).split('.')[0]
if loaf_file_name.find('load_') == 0:
module =__import__(loaf_file_name)
load_module.append(module)
classes = {}
for module in load_module:
_class = getattr(module,'Regist')()
classes[_class.onRegist()] = _class
return classes
|
998,241 | 1544103293a710ed68b7dd94b411796bae7d970d | import numpy as np
from collections import deque
## Day 1
# This datastructure keeps track of the current top-3 elfs with most calories.
top_three = deque([0, 0, 0], maxlen=3)
def evaluate_elf(calories):
"""Compare the calories carried by an elf with the top 3.
Update the top 3 if necessary."""
if calories > top_three[2]:
top_three.append(calories)
elif calories > top_three[1]:
top_three.popleft()
top_three.insert(1, calories)
elif calories > top_three[0]:
top_three[0] = calories
# This keeps track of the calories carried by the elf we are currently
# counting.
calories = 0
# Go through the puzzle input line-by-line
with open('input_day1.txt') as f:
for line in f:
# If the line is empty, we start counting for the next elf.
# Evaluate the calories carried by the current elf before
# starting counting for the next elf.
if line == '\n':
evaluate_elf(calories)
calories = 0
else:
# We are still counting calories for the current elf.
calories += int(line)
# Don't forget the final elf!
evaluate_elf(calories)
print('Day 1, part 1:', top_three[2])
print('Day 1, part 2:', sum(top_three))
## Day 2
points = np.array([[3, 6, 0], [0, 3, 6], [6, 0, 3]])
strat = np.loadtxt('input_day2.txt', dtype='c').view('uint8') - [ord('A'), ord('X')]
print('Day 2, part 1:', (points[(strat[:, 0], strat[:, 1])] + strat[:, 1] + 1).sum())
choose = np.array([[3, 1, 2], [1, 2, 3], [2, 3, 1]])
print('Day 2, part 2:', (choose[(strat[:, 0], strat[:, 1])] + strat[:, 1] * 3).sum())
## Day 3
def bitmask(s):
mask = 0
for char in s:
if ord(char) & 0b100000:
# Lower case
mask |= 1 << (ord(char) - ord('a'))
else:
# Upper case
mask |= 1 << (ord(char) - ord('A') + 26)
return mask
answer = 0
with open('input_day3.txt') as f:
for line in f:
line = line.strip()
compartment1 = bitmask(line[:len(line)//2])
compartment2 = bitmask(line[len(line)//2:])
common_item = compartment1 & compartment2
answer += common_item.bit_length()
print('Day 3, part 1:', answer)
answer = 0
with open('input_day3.txt') as f:
for group in zip(*[iter(f)] * 3):
badge_options = 0b1111111111111111111111111111111111111111111111111111
for rucksack in group:
badge_options &= bitmask(rucksack.strip())
answer += badge_options.bit_length()
print('Day 3, part 1:', answer)
print('Day 3, part 1:', answer)
## Day 4
answer1 = 0
answer2 = 0
with open('input_day4.txt') as f:
for line in f:
[(from1, to1), (from2, to2)] = [allotment.split('-') for allotment in line.strip().split(',')]
from1 = int(from1)
to1 = int(to1)
from2 = int(from2)
to2 = int(to2)
if (from1 >= from2 and to1 <= to2) or (from2 >= from1 and to2 <= to1):
answer1 += 1
if (from1 <= 2 and to1 >= from2) or (from2 <= to1 and to2 >= from1):
answer2 += 1
print('Day 4, part 1:', answer1)
print('Day 4, part 1:', answer2)
## Day 5
def init_stacks(f):
stacks = [[] for _ in range(9)]
for _ in range(8):
for stack, crate in zip(stacks, f.readline()[1::4]):
if crate != ' ':
stack.insert(0, crate)
f.readline()
f.readline()
return stacks
with open('input_day5.txt') as f:
stacks = init_stacks(f)
while line := f.readline():
_, amount, _, stack_from, _, stack_to = line.split()
for _ in range(int(amount)):
stacks[int(stack_to) - 1].append(stacks[int(stack_from) - 1].pop())
print('Day 5, part 1:', ''.join([stack[-1] for stack in stacks]))
with open('input_day5.txt') as f:
stacks = init_stacks(f)
while line := f.readline():
_, amount, _, stack_from, _, stack_to = line.split()
stacks[int(stack_to) - 1].extend(stacks[int(stack_from) - 1][-int(amount):])
stacks[int(stack_from) - 1] = stacks[int(stack_from) - 1][:-int(amount)]
print('Day 5, part 2:', ''.join([stack[-1] for stack in stacks]))
## Day 6
with open('input_day6.txt') as f:
buffer = f.readline().strip()
def find_marker(marker_len):
marker_start = 0
for marker_end in range(len(buffer)):
i = marker_end - 1
while i >= marker_start and buffer[i] != buffer[marker_end]:
i -= 1
marker_start = i + 1
if marker_end - marker_start == marker_len:
return marker_end
print('Day 6, part 1:', find_marker(4))
print('Day 6, part 2:', find_marker(14))
## Day 7
dir_sizes = dict()
dir_stack = list()
total_size = 0
with open('input_day7.txt') as f:
for line in f:
match line.strip().split():
case ['$', 'cd', '/']:
dir_stack = list()
case ['$', 'cd', '..']:
dir_stack.pop()
case ['$', 'cd', dir_name]:
dir_stack.append(dir_name)
case ['$', 'ls']:
pass
case ['dir', dir_name]:
pass
case [size, file_name]:
dir_path = ''
for dir_name in dir_stack:
dir_path += '/' + dir_name
dir_sizes[dir_path] = dir_sizes.get(dir_path, 0) + int(size)
total_size += int(size)
size_needed = 30_000_000 - (70_000_000 - total_size)
print('Day 7, part 1:', sum([size for size in dir_sizes.values() if size <= 100_000]))
print('Day 7, part 2:', min([size for size in dir_sizes.values() if size >= size_needed]))
## Day 8
import numpy as np
with open('input_day8.txt') as f:
forest = np.array([[int(x) for x in list(line.strip())] for line in f])
def look_along(x):
return x > np.hstack((-1, np.maximum.accumulate(x)[:-1]))
is_visible = np.apply_along_axis(look_along, 0, forest)
is_visible |= np.apply_along_axis(look_along, 1, forest)
is_visible |= np.apply_along_axis(look_along, 0, forest[::-1, :])[::-1, :]
is_visible |= np.apply_along_axis(look_along, 1, forest[:, ::-1])[:, ::-1]
print('Day 8, part 1:', is_visible.sum())
def compute_scenic_score(candidate_tree):
height = forest[candidate_tree]
row, col = candidate_tree
if row == 0 or col == 0 or row == forest.shape[0] - 1 or col == forest.shape[1] - 1:
return 0
score = (np.maximum.accumulate(forest[row, col + 1:-1]) < height).sum() + 1
score *= (np.maximum.accumulate(forest[row + 1:-1, col]) < height).sum() + 1
score *= (np.maximum.accumulate(forest[row, col - 1:0:-1]) < height).sum() + 1
score *= (np.maximum.accumulate(forest[row - 1:0:-1, col]) < height).sum() + 1
return score
scenic_scores = [compute_scenic_score(tree) for tree in zip(*np.nonzero(is_visible))]
print('Day 8, part 2:', np.max(scenic_scores))
## Day 9
rope_len = 10
pos = [[0, 0] for _ in range(rope_len)]
pos_hist = [[tuple(p) for p in pos]]
with open('input_day9.txt') as f:
for stepno, line in enumerate(f):
direction, amount = line.split()
amount = int(amount)
for _ in range(amount):
# Update the position of the head
match direction:
case 'U':
pos[0][1] += 1
case 'D':
pos[0][1] -= 1
case 'L':
pos[0][0] -= 1
case 'R':
pos[0][0] += 1
# Each knot follows the previous knot
for knot in range(1, rope_len):
# Only update position of a knot if the distance to predecessor is 2
if (abs(pos[knot - 1][1] - pos[knot][1]) == 2) or (abs(pos[knot - 1][0] - pos[knot][0]) == 2):
# Update postition of a know
pos[knot][0] += min(max(pos[knot - 1][0] - pos[knot][0], -1), 1)
pos[knot][1] += min(max(pos[knot - 1][1] - pos[knot][1], -1), 1)
# Keep track of the history of the positions of the knots
pos_hist.append([tuple(p) for p in pos])
# Compute number of unique positions
print('Day 9, part 1:', len(set([p[1] for p in pos_hist])))
print('Day 9, part 2:', len(set([p[-1] for p in pos_hist])))
def print_pos(pos):
for y in range(4, -1, -1):
for x in range(6):
for i, p in enumerate(pos):
if p[0] == x and p[1] == y:
if i == 0:
print('H', end='')
else:
print(str(i), end='')
break
else:
print('.', end='')
print()
print()
## Day 10
def cpu():
"""Generates the value of the x register for each cycle."""
x = 1
with open('input_day10.txt') as f:
for line in f:
instruction, *param = line.strip().split()
match instruction, param:
case 'noop', []:
yield x
case 'addx', [val]:
yield x
yield x
x += int(val)
print('Day 10, part 1:', sum([cycle * x for cycle, x in enumerate(cpu(), 1)
if cycle in {20, 60, 100, 140, 180, 220}]))
print('Day 10, part 2:')
output = iter(cpu())
for y in range(6):
for x in range(40):
if (x - 1) <= next(output) <= (x + 1):
print('#', end='')
else:
print('.', end='')
print()
## Day 11
from collections import deque
import numpy as np
items = []
operations = []
divisible_by = []
if_divisible_throw_to_monkey = []
if_not_divisible_throw_to_monkey = []
with open('input_day11.txt') as f:
try:
while True:
assert next(f).startswith('Monkey')
items.append(deque([int(x) for x in next(f).split(' Starting items: ')[1].split(', ')]))
operation_str = next(f).split(' Operation: new = ')[1].strip()
operations.append(np.frompyfunc(eval(f'lambda old: {operation_str}'), 1, 1))
divisible_by.append(int(next(f).split(' Test: divisible by ')[1]))
if_divisible_throw_to_monkey.append(int(next(f).split(' If true: throw to monkey ')[1]))
if_not_divisible_throw_to_monkey.append(int(next(f).split(' If false: throw to monkey ')[1]))
next(f)
except StopIteration:
pass
# At this point, each item is represented by its worry level. However, this
# representation will not work for part 2. We convert the worry value into a
# tuple that contains the remainder for each value we want to test the worry
# level against.
for monkey_items in items:
for i in range(len(monkey_items)):
monkey_items[i] = np.array([monkey_items[i] % val for val in divisible_by])
n_monkeys = len(items)
n_items_inspected = np.zeros(n_monkeys, dtype='int64')
n_rounds = 10_000
for _ in range(n_rounds):
for monkey in range(n_monkeys):
while len(items[monkey]) > 0:
# Monkey inspects an item
item = items[monkey].popleft()
# This causes the worry level of the item to be modified
item = operations[monkey](item)
# After the operation performed by the monkey, we can reduce the
# item representation down again to the remainder for each value we
# want to test against.
item %= divisible_by
# Performing the test is now a lookup
if item[monkey] == 0:
items[if_divisible_throw_to_monkey[monkey]].append(item)
else:
items[if_not_divisible_throw_to_monkey[monkey]].append(item)
# Computing the answer to the puzzle along the way
n_items_inspected[monkey] += 1
print('Day 11, part 2:', np.multiply.reduce(np.sort(n_items_inspected)[-2:]))
## Day 12
import numpy as np
from collections import deque
# Read the puzzle input and make a matrix containing the height for each
# location.
height = []
with open('input_day12.txt') as f:
for y, line in enumerate(f):
height.append([ord(x) - ord('a') for x in line.strip()])
if 'S' in line:
x = line.index('S')
start_loc = (y, x)
if 'E' in line:
x = line.index('E')
end_loc = (y, x)
height = np.array(height)
# Start and end locations have pre-defined heights
height[start_loc] = 0
height[end_loc] = 25
def find_route(start_loc, slope_criterion, stop_criterion):
"""Find a route that fulfills the given criteria.
Parameters
----------
start_loc : (int, int)
The y, x locations of the start of the route.
slope_criterion : function (int -> bool)
Function that checks whether the slope of the land is acceptable.
stop_criterion : function (int, int -> bool)
Function that checks whether we've reached a suitable destination.
Returns
-------
dist : int
The distance travelled.
"""
# These are the locations we need to evaluate next, along with the
# distances from the starting location.
to_eval = deque([(start_loc, 0)])
# These are the locations we have already evaluated
seen = set([start_loc])
# These are the potential neighbours of a location
dirs = [(-1, 0), (+1, 0), (0, -1), (0, +1)]
curr_loc = start_loc
while not stop_criterion(curr_loc):
# Grab the next location to evaluate
curr_loc, dist = to_eval.popleft()
# Check all the neighbours
for d in dirs:
new_loc = (curr_loc[0] + d[0], curr_loc[1] + d[1])
# Check bounds
if new_loc[0] < 0 or new_loc[0] >= height.shape[0]:
continue
if new_loc[1] < 0 or new_loc[1] >= height.shape[1]:
continue
# Don't re-visit locations we've already seen.
if new_loc in seen:
continue
# Check whether the slope is ok
if not slope_criterion(height[new_loc] - height[curr_loc]):
continue
# Ok, let's evaluate this location
to_eval.append((new_loc, dist + 1))
# Mark this location so we never re-visit it.
seen.add(new_loc)
return dist
print('Day 12, part 1:', find_route(start_loc, lambda slope: slope <= 1, lambda loc: loc == end_loc))
print('Day 12, part 2:', find_route(end_loc, lambda slope: slope >= -1, lambda loc: height[loc] == 0))
## Day 13
from functools import cmp_to_key
packets = []
with open('input_day13.txt') as f:
for line in f:
if len(line.strip()) > 0:
packets.append(eval(line))
def cmp(a, b):
for left_val, right_val in zip(a, b):
if type(left_val) == int and type(right_val) == int:
ans = left_val - right_val
elif type(left_val) == list and type(right_val) == list:
ans = cmp(left_val, right_val)
elif type(left_val) == list:
ans = cmp(left_val, [right_val])
else:
ans = cmp([left_val], right_val)
if ans != 0:
return ans
return len(a) - len(b)
as_pairs = zip(packets[:-1:2], packets[1::2])
print('Day 13, part 1:', sum([i for i, (a, b) in enumerate(as_pairs, 1)
if cmp(a, b) < 0]))
dividers = ([[2]], [[6]])
packets.extend(dividers)
packets = sorted(packets, key=cmp_to_key(cmp))
print('Day 13, part 2:', (packets.index(dividers[0]) + 1) * (packets.index(dividers[1]) + 1))
## Day 14
import numpy as np
# This is the map of where walls are and sand is. Walls will be marked with a 1
# and sand with a 2, so we can make pretty plots later.
grid = np.zeros((1000, 1000), dtype='uint8')
# Parse the puzzle input, draw the walls inside the grid
with open('input_day14.txt') as f:
for line in f:
coords = [tuple([int(x) for x in coord.split(',')]) for coord in line.strip().split(' -> ')]
start = coords[0]
for end in coords[1:]:
grid[min(start[1], end[1]):max(start[1], end[1]) + 1, min(start[0], end[0]):max(start[0], end[0]) + 1] = 1
start = end
# Compute where the abyss starts
abyss_start = np.nonzero(grid)[0].max() + 1
# Keep track of the number of grains of sand that have come to rest
n_rest = 0
# Start dropping grains of sand.
# We'll work on a copy of the grid so we can re-use it for part 2.
grid_with_sand = grid.copy()
# Instead of dropping grains from the very top all of the time, we drop the
# next grain from the last position the previous grain was still
# falling/rolling. Hence, we keep track of the route the current grain of sand
# is taking.
route = [(500, 0)]
for _ in range(10_000):
x, y = route.pop()
while y < abyss_start:
if grid_with_sand[y + 1, x] == 0:
# Falling down
y += 1
elif grid_with_sand[y + 1, x - 1] == 0:
# Rolling down and left
y += 1
x -= 1
elif grid_with_sand[y + 1, x + 1] == 0:
# Rolling down and right
y += 1
x += 1
else:
# Sand has come to rest
grid_with_sand[y, x] = 2
n_rest += 1
route.pop()
break
route.append((x, y))
else:
# Sand is falling into the abyss
break
print('Day 14, part 1:', n_rest)
# For part 2, lay down a floor
floor = abyss_start + 1
grid[floor, :] = 1
# The initial pile of sand is a big triangle
sand = np.zeros_like(grid)
for y in range(floor):
sand[y, 500-y:500+y+1] = 2
# Go line by line, chipping away at the big pile of sand
no_sand = np.zeros_like(grid[0])
for y in range(floor):
# Shrink the ranges of no sand by one from each end,
# so:
# ##### #####
# becomes:
# ### ###
for start, end in np.flatnonzero(np.diff(no_sand)).reshape(-1, 2):
no_sand[start + 1] = 0
no_sand[end] = 0
# Add any walls on top of that
no_sand[grid[y] > 0] = 1
# Carve the no_sand out of the big heap of sand
sand[y, no_sand > 0] = 0
print('Day 14, part 2:', int(sand.sum() / 2))
## Day 15
import re
from tqdm import tqdm
from numba import jit
def range_union(ranges):
"""Collapse a list of ranges into a list of disjoint ranges."""
union = []
for r in sorted(ranges, key=lambda r: r.start):
if len(union) > 0 and union[-1].stop >= r.start:
union[-1] = range(union[-1].start, max(union[-1].stop, r.stop))
else:
union.append(r)
return union
def positions(sensor_loc, beacon_loc, y):
"""Given a sensor-beacon pair, compute the range of x-coords seen along the
given y coordinate."""
radius = abs(beacon_loc[0] - sensor_loc[0]) + abs(beacon_loc[1] - sensor_loc[1])
radius -= abs(y - sensor_loc[1])
if radius >= 0:
return range(sensor_loc[0] - radius, sensor_loc[0] + radius + 1)
else:
return range(sensor_loc[0], sensor_loc[0])
sensor_beacon_pairs = []
with open('input_day15.txt') as f:
for line in f:
sensor_x, sensor_y, beacon_x, beacon_y = re.match(r'Sensor at x=(-?\d+), y=(-?\d+): closest beacon is at x=(-?\d+), y=(-?\d+)', line).groups()
sensor_beacon_pairs.append(((int(sensor_x), int(sensor_y)), (int(beacon_x), int(beacon_y))))
y = 2_000_000
ranges_seen = range_union([positions(sensor_loc, beacon_loc, y=y)
for sensor_loc, beacon_loc in sensor_beacon_pairs])
# Apparently, sensors and beacons along the y-coordinate don't count?
other_stuff = []
other_stuff = set([beacon_loc[0] for _, beacon_loc in sensor_beacon_pairs if beacon_loc[1] == y])
other_stuff |= set([sensor_loc[0] for _, sensor_loc in sensor_beacon_pairs if sensor_loc[1] == y])
print('Day 15, part 1:', sum([r.stop - r.start for r in ranges_seen]) - len(other_stuff))
# Wait for a loooong time
for y in tqdm(range(4_000_000), ncols=80):
ranges_seen = [positions(sensor_loc, beacon_loc, y=y)
for sensor_loc, beacon_loc in sensor_beacon_pairs]
ranges_seen = [r for r in ranges_seen if r.start <= 4_000_000 and r.stop >= 0]
ranges_seen = range_union(ranges_seen)
if len(ranges_seen) > 0 and ranges_seen[0].stop <= 4_000_000:
print('Day 15, part 2:', ranges_seen[0].stop * 4_000_000 + y)
break
## Day 16
import re
from math import inf
flow_rates = dict()
tunnels = dict()
with open('input_day16.txt') as f:
for line in f:
room, flow_rate, connections = re.match(r'Valve ([A-Z][A-Z]) has flow rate=(\d+); tunnels? leads? to valves? ([A-Z, ]+)', line).groups()
flow_rate = int(flow_rate)
# Valves with a flow rate of 0 don't count
if flow_rate > 0:
flow_rates[room] = flow_rate
tunnels[room] = connections.split(', ')
caves = tunnels.keys()
valves = set(flow_rates.keys())
# Compute all-to-all distances between the valves
D = {start_cave: {to_cave: 0 if to_cave == start_cave else inf for to_cave in caves} for start_cave in caves}
for start, destinations in tunnels.items():
for d in destinations:
D[start][d] = 1
for cave1 in caves:
for cave2 in caves:
for cave3 in caves:
D[cave2][cave3] = min(D[cave2][cave3], D[cave2][cave1] + D[cave1][cave3])
cache = dict()
def best_route(my_pos, valves_left, time_left=30, elephant_present=False):
if len(valves_left) == 0 or time_left <= 0:
return 0
situation = (my_pos, tuple(valves_left), time_left, elephant_present)
if situation in cache:
return cache[situation]
scores = []
# Either we go and turn on another valve...
for next_valve in valves_left:
time_spent = D[my_pos][next_valve] + 1
score = max(0, flow_rates[next_valve] * (time_left - time_spent))
score += best_route(next_valve, valves_left - set([next_valve]), time_left - time_spent, elephant_present)
scores.append(score)
# ...or we let the elephant take it from here
if elephant_present:
scores.append(best_route('AA', valves_left, time_left=26, elephant_present=False))
cache[situation] = max(scores)
return max(scores)
print('Day 16, part 1:', best_route('AA', set(valves), time_left=30))
print('Day 16, part 2:', best_route('AA', set(valves), time_left=26, elephant_present=True))
## Day 17
import numpy as np
from itertools import cycle
from time import sleep
def print_field(field):
for y in range(np.nonzero(field)[0].min(), len(field)):
for x in range(7):
if field[y, x] == 1:
print('█', end='')
elif field[y, x] == 2:
print('▒', end='')
else:
print(' ', end='')
print()
print()
print()
print()
rock_types = [
np.array([[1, 1, 1, 1]]),
np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]),
np.array([[0, 0, 1],
[0, 0, 1],
[1, 1, 1]]),
np.array([[1],
[1],
[1],
[1]]),
np.array([[1, 1],
[1, 1]]),
]
# The rock types and the jet directions cycle
rock_types = cycle([t.astype('bool') for t in rock_types])
with open('input_day17.txt') as f:
jet_directions = cycle(list(f.readline().strip()))
# This is the tetris field we'll be filling up.
# Whenever we run out of space, we will double its size.
field = np.zeros((100, 7), dtype='bool')
# Lay down a floor so the rocks have something to fall onto.
field[-1, :] = True
def hits(rock, y, x):
'''Does the given rock shape at the given coordinates overlap with another
rock (or the floor)?'''
return np.any(field[y:y + rock.shape[0], x:x + rock.shape[1]] & rock)
# Lots of bookkeeping to be done during the main loop
n_rocks_thrown = 0
field_height = 0
height_after_each_rock = list()
n_rocks_thrown_to_reach_height = dict()
# Start throwing down rocks
while True:
rock = next(rock_types)
# The rock starts 3 lines above the current field. The y-coordinates are a
# bit wonky in this simulation, as they also serve as indexes in the field
# array, hence grow larger downwards.
y = len(field) - 1 - field_height - 3 - rock.shape[0]
# The rock starts 2 spots to the right of the left wall
x = 2
# Check if we have run out of space in the field.
if y < 0:
# Double the field size
y += len(field)
field = np.vstack((np.zeros_like(field), field))
# The rock is falling down
while True:
# Move the rock sideways if it doesn't hit anything
match next(jet_directions):
case '<':
next_x = max(0, x - 1)
case '>':
next_x = min(7 - rock.shape[1], x + 1)
if not hits(rock, y, next_x):
x = next_x
# Move the rock downwards if it doesn't hit anything
if not hits(rock, y + 1, x):
y += 1
else:
# Rock has come to rest
break
# Fill in the shape of the rock in the field
field[y:y + rock.shape[0], x:x + rock.shape[1]] |= rock
# Update bookkeeping stuff
n_rocks_thrown += 1
field_height = len(field) - np.nonzero(field)[0].min() - 1
height_after_each_rock.append(field_height)
# A rock can add multiple rows at once. First repeat the last value until
# we reach the current height...
if len(n_rocks_thrown_to_reach_height) > 0:
for j in range(list(n_rocks_thrown_to_reach_height.keys())[-1], field_height):
n_rocks_thrown_to_reach_height[j] = list(n_rocks_thrown_to_reach_height.values())[-1]
# ...then add the value for the current height
n_rocks_thrown_to_reach_height[field_height] = n_rocks_thrown
if n_rocks_thrown == 2022:
print('Day 17, part 1:', field_height)
# Cycle detection using Floyd's hare and tortoise algorithm.
# Only kicks in after we've collected enough rows.
if n_rocks_thrown > 1000:
# The "hare" position is the row directly under the rock that was just
# placed.
hare = y + rock.shape[0]
hare_height = len(field) - 1 - hare
# The "tortoise" position at half the height of the "hare"
tortoise_height = hare_height // 2
tortoise = len(field) - 1 - tortoise_height
# This is the magic of the algorithm: if the area surrounding the hare
# position matches the area surrounding the tortoise position, we have
# detected a cycle! Here, we use an area of size 20 (probably overfill).
if np.array_equal(field[hare:hare+20], field[tortoise:tortoise+20]):
break # We found the cycle. That's all we need from the simulation.
# Number of rocks the elephants asked us to simulate
target_n_rocks = 1_000_000_000_000
# Length of the cycle in terms of number of rocks
cycle_n_rocks = n_rocks_thrown_to_reach_height[hare_height] - n_rocks_thrown_to_reach_height[tortoise_height]
# Length of the cycle in terms of rows in the playing field
cycle_n_rows = tortoise - hare
# To compute the answer to part 2, we first see how many complete cycles could
# be completed
part2_ans = target_n_rocks // cycle_n_rocks * cycle_n_rows
# The number of rocks needed before the cycling part and after the cycling part
# can be conveniently computed using a single modulo. See how many rows they
# produce.
part2_ans += height_after_each_rock[target_n_rocks % cycle_n_rocks - 1]
print('Day 17, part 2:', part2_ans)
## Day 18
from collections import deque
import numpy as np
deltas = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]
def count_open_sides(voxels):
cubes = set(zip(*np.where(voxels == 1)))
open_sides = 0
for cube in cubes:
for delta in deltas:
if tuple(np.add(cube, delta)) not in cubes:
open_sides += 1
return open_sides
voxels = np.zeros((22, 22, 22), dtype='int')
with open('input_day18.txt') as f:
for line in f:
x, y, z = line.strip().split(',')
voxels[int(x) + 1, int(y) + 1, int(z) + 1] = 1
print('Day 18, part 1:', count_open_sides(voxels))
to_flood = deque([(0, 0, 0)])
voxels[(0, 0, 0)] = 2
while len(to_flood) > 0:
current_voxel = to_flood.popleft()
for delta in deltas:
neighbour_voxel = tuple(np.clip(np.add(current_voxel, delta), 0, 21))
if voxels[neighbour_voxel] == 0:
voxels[neighbour_voxel] = 2
to_flood.append(neighbour_voxel)
voxels[voxels == 0] = 1
print('Day 18, part 2:', count_open_sides(voxels))
## Day 19
import re
ORE = 0
CLAY = 1
OBSIDIAN = 2
GEODE = 3
def progress_state(resources, robots, recipe, time=1):
factory_making = None
# If we can make a geode robot, do so
if all([r >= c for r, c in zip(resources, recipe[GEODE])]):
factory_making = GEODE
resources = [r - c for r, c in zip(resources, recipe[GEODE])]
# If we need an obsidian robot, make one if we can
elif all([r >= c for r, c in zip(resources, recipe[OBSIDIAN])]) and recipe[GEODE][OBSIDIAN] / (recipe[GEODE][ORE] + recipe[OBSIDIAN][ORE] + recipe[CLAY][ORE]) > robots[OBSIDIAN] / robots[ORE]:
factory_making = OBSIDIAN
resources = [r - c for r, c in zip(resources, recipe[OBSIDIAN])]
# If we need a clay robot, make one if we can
elif all([r >= c for r, c in zip(resources, recipe[CLAY])]) and recipe[OBSIDIAN][CLAY] / (recipe[OBSIDIAN][ORE] + recipe[CLAY][ORE]) > robots[CLAY] / robots[ORE]:
factory_making = CLAY
resources = [r - c for r, c in zip(resources, recipe[CLAY])]
elif all([r >= c for r, c in zip(resources, recipe[ORE])]):
# Make an ore robot
factory_making = ORE
resources = [r - c for r, c in zip(resources, recipe[ORE])]
# Robots collect resources
resources = [r + i for r, i in zip(resources, robots)]
# Factory finishes producing the robot
if factory_making is not None:
robots = list(robots)
robots[factory_making] += 1
return resources, robots, recipe, time + 1
recipes = list()
with open('input_day19_test.txt') as f:
for line in f:
(ore_robot_ore_cost, clay_robot_ore_cost,
obsidian_robot_ore_cost, obsidian_robot_clay_cost,
geode_robot_ore_cost, geode_robot_obsidian_cost) = re.match(
r'Blueprint \d+: Each ore robot costs (\d+) ore. '
r'Each clay robot costs (\d+) ore. Each obsidian robot '
r'costs (\d+) ore and (\d+) clay. Each geode robot costs '
r'(\d+) ore and (\d+) obsidian.', line).groups()
recipes.append([
[int(ore_robot_ore_cost), 0, 0, 0],
[int(clay_robot_ore_cost), 0, 0, 0],
[int(obsidian_robot_ore_cost), int(obsidian_robot_clay_cost), 0, 0],
[int(geode_robot_ore_cost), 0, int(geode_robot_obsidian_cost), 0],
])
resources = [0, 0, 0, 0]
robots = [1, 0, 0, 0]
recipe = recipes[0]
time = 0
for i in range(1, 25):
resources, robots, recipe, time = progress_state(resources, robots, recipe, time)
print(time, resources, robots)
## Day 19
import numpy as np
import re
from dataclasses import dataclass
recipes = list()
with open('input_day19.txt') as f:
for line in f:
(a, b, c, d, e, f) = re.match(
r'Blueprint \d+: Each ore robot costs (\d+) ore. '
r'Each clay robot costs (\d+) ore. Each obsidian robot '
r'costs (\d+) ore and (\d+) clay. Each geode robot costs '
r'(\d+) ore and (\d+) obsidian.', line).groups()
recipes.append([
[int(a), 0, 0, 0],
[int(b), 0, 0, 0],
[int(c), int(d), 0, 0],
[int(e), 0, int(f), 0],
])
recipes = np.array(recipes)
@dataclass
class State:
resources: np.ndarray
robots: np.ndarray
def score(self):
return (self.robots @ np.array([1, 2, 500, 10000]) +
self.resources @ np.array([1, 10, 50, 10000]))
def __lt__(self, other):
return self.score() < other.score()
def __eq__(self, other):
return self.score() == other.score()
# Branch out the possibilities based on the robot we decide to build.
def max_geodes(recipe, max_time):
states_to_explore = [State(resources=np.array([0, 0, 0, 0]),
robots=np.array([1, 0, 0, 0]))]
for time in range(max_time):
next_states = list()
for state in list(states_to_explore):
new_resources = state.resources + state.robots
for i, cost in enumerate(recipe):
if np.all(state.resources >= cost):
new_robots = state.robots.copy()
new_robots[i] += 1
next_states.append(State(new_resources - cost, new_robots))
next_states.append(State(new_resources, state.robots))
states_to_explore = sorted(next_states)[-1000:]
return max(states_to_explore, key=lambda state: state.resources[3]).resources[3]
print('Day 19, part 1:', sum([i * max_geodes(r, max_time=24) for i, r in enumerate(recipes, 1)]))
print('Day 19, part 2:', np.prod([max_geodes(r, max_time=32) for r in recipes[:3]]))
## Day 20
import numpy as np
numbers = np.loadtxt('input_day20.txt', dtype='int64')
def mix(numbers, n_times):
# Instead of shuffling around the numbers, we will be shuffling around
# these indices.
indices = list(range(len(numbers)))
for i in list(range(len(numbers))) * n_times:
j = indices.index(i)
indices.pop(j)
indices.insert((j + numbers[i]) % len(indices), i)
return indices
indices = mix(numbers, n_times=1)
zero_location = indices.index(np.flatnonzero(numbers == 0)[0])
print('Day 20, part 1:', sum(numbers[indices[(zero_location + x) % len(numbers)]] for x in [1000, 2000, 3000]))
numbers *= 811589153
indices = mix(numbers, n_times=10)
zero_location = indices.index(np.flatnonzero(numbers == 0)[0])
print('Day 20, part 2:', sum(numbers[indices[(zero_location + x) % len(numbers)]] for x in [1000, 2000, 3000]))
## Day 21
import operator
operators = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.floordiv}
# For each monkey, the operation they are performing
operations = dict()
# For each monkey not yelling a number, the left and right arguments for their
# operation
arguments = dict()
# For each monkey not yelling a number, whether the human is in their left (0) or
# right (1) argument.
human_in_argument = dict()
# Parse the input
with open('input_day21.txt') as f:
for line in f:
monkey, operation = line.strip().split(': ')
try:
number = int(operation)
operations[monkey] = number
except ValueError:
left, symbol, right = operation.split()
arguments[monkey] = (left, right)
operations[monkey] = operators[symbol]
def resolve(monkey='root'):
'''Compute what the given monkey is yelling.'''
operation = operations[monkey]
if type(operation) == int:
return operation
else:
left, right = arguments[monkey]
return operation(resolve(left), resolve(right))
def find_the_human(monkey='root'):
'''Determine whether the human is needed for the given monkey to know what
number to yell. Keeps track of this in the human_in_argument dict along the
way.'''
if monkey == 'humn':
return True
if monkey not in arguments:
return False
left, right = arguments[monkey]
if find_the_human(left):
human_in_argument[monkey] = 0
return True
elif find_the_human(right):
human_in_argument[monkey] = 1
return True
else:
return False
def human_needs_to_yell(monkey='root', target=None):
'''Compute the value the human needs to yell. The `target` parameter is set
to what the monkey one level up wants to hear.'''
if monkey == 'humn':
return target
operation = operations[monkey]
args = arguments[monkey]
human_arg = human_in_argument[monkey]
monkey_arg = 1 - human_arg
# The trick here is to first resolve the argument for which there is no
# human in the loop. Then you known enough to pass along the desired target
# to argument that has the human in the loop.
if operation is operator.eq:
target = resolve(args[monkey_arg])
elif operation is operator.add:
target = target - resolve(args[monkey_arg])
elif operation is operator.mul:
target = target // resolve(args[monkey_arg])
elif operation is operator.sub:
if human_arg == 0:
target = target + resolve(args[monkey_arg])
else:
target = resolve(args[monkey_arg]) - target
elif operation is operator.floordiv:
if human_arg == 0:
target = target * resolve(args[monkey_arg])
else:
target = resolve(args[monkey_arg]) / target
return human_needs_to_yell(args[human_arg], target)
print('Day 21, part 1:', resolve())
# Make the requested modifications
operations['root'] = operator.eq
del operations['humn']
# Play hide and seek
find_the_human()
print('Day 22, part 2:', human_needs_to_yell())
## Day 23
import numpy as np
import re
# Test
def wrap_test_part1(y, x, facing):
if facing == 0:
if y in range(1, 5):
return y, 9, facing
if y in range(5, 9):
return y, 1, facing
if y in range(9, 16):
return y, 9, facing
elif facing == 1:
if x in range(1, 9):
return 5, x, facing
elif x in range(9, 13):
return 1, x, facing
elif x in range(13, 17):
return 9, x, facing
elif facing == 2:
if y in range(1, 5):
return y, 12, facing
if y in range(5, 9):
return y, 12, facing
if y in range(9, 16):
return y, 16, facing
elif facing == 3:
if x in range(1, 9):
return 8, x, facing
elif x in range(9, 13):
return 12, x, facing
elif x in range(13, 17):
return 12, x, facing
def wrap_test_part2(y, x, facing):
if facing == 0:
if y in range(1, 5):
return 12 - (y - 1), 16, 2
if y in range(5, 9):
return 9, 16 - (y - 5), 1
if y in range(9, 16):
return 4 - (y - 9), 12, 2
elif facing == 1:
if x in range(1, 5):
return 11, 8 - (x - 1), 3
elif x in range(5, 9):
return 12 - (x - 5), 9, 0
elif x in range(9, 13):
return 8, 4 - (x - 9), 3
elif x in range(13, 17):
return 8 - (x - 13), 1, 0
elif facing == 2:
if y in range(1, 5):
return 5, 5 + (y - 1), 1
if y in range(5, 9):
return 12, 16 - (y - 5), 3
if y in range(9, 16):
return 8, 8 - (y - 9), 3
elif facing == 3:
if x in range(1, 5):
return 1, 12 - (x - 1), 1
elif x in range(5, 9):
return 1 + (x - 5), 9, 0
elif x in range(9, 13):
return 5, 4 - (x - 9), 1
elif x in range(13, 17):
return 8 - (x - 13), 12, 2
# Real
def wrap_real_part1(y, x, facing):
if facing == 0:
if y in range(1, 51):
return y, 51, facing
elif y in range(51, 101):
return y, 51, facing
elif y in range(101, 151):
return y, 1, facing
elif y in range(151, 201):
return y, 1, facing
elif facing == 1:
if x in range(1, 51):
return 101, x, facing
elif x in range(51, 101):
return 1, x, facing
elif x in range(101, 151):
return 1, x, facing
elif facing == 2:
if y in range(1, 51):
return y, 150, facing
elif y in range(51, 101):
return y, 100, facing
elif y in range(101, 151):
return y, 100, facing
elif y in range(151, 201):
return y, 50, facing
elif facing == 3:
if x in range(1, 51):
return 200, x, facing
elif x in range(51, 101):
return 150, x, facing
elif x in range(101, 151):
return 50, x, facing
def wrap_real_part2(y, x, facing):
if facing == 0:
if y in range(1, 51):
return 150 - (y - 1), 100, 2
elif y in range(51, 101):
return 50, 101 + (y - 51), 3
elif y in range(101, 151):
return 50 - (y - 101), 150, 2
elif y in range(151, 201):
return 150, 51 + (y - 151), 3
elif facing == 1:
if x in range(1, 51):
return 1, 101 + (x - 1), 1
elif x in range(51, 101):
return 151 + (x - 51), 50, 2
elif x in range(101, 151):
return 51 + (x - 101), 100, 2
elif facing == 2:
if y in range(1, 51):
return 150 - (y - 1), 1, 0
elif y in range(51, 101):
return 101, 1 + (y - 51), 1
elif y in range(101, 151):
return 50 - (y - 101), 51, 0
elif y in range(151, 201):
return 1, 51 + (y - 151), 1
elif facing == 3:
if x in range(1, 51):
return 51 + (x - 1), 51, 0
elif x in range(51, 101):
return 151 + (x - 51), 1, 0
elif x in range(101, 151):
return 200, 1 + (x - 101), 3
def print_board(board):
for row in board:
for val in row:
if val == 0:
print(' ', end='')
elif val == 1:
print('.', end='')
elif val == 2:
print('#', end='')
elif val == 3:
print('>', end='')
elif val == 4:
print('v', end='')
elif val == 5:
print('<', end='')
elif val == 6:
print('^', end='')
else:
raise ValueError(f'Invalid value: {val}')
print()
to_num = {' ': 0, '.': 1, '#': 2}
facing_d = [(0, 1), (1, 0), (0, -1), (-1, 0)]
rows = []
wrap = wrap_real_part2
with open('input_day22.txt') as f:
for line in f:
if line.strip() == '':
break
rows.append([to_num[x] for x in list(line.rstrip())])
route = re.findall(r'\d+|[LR]', next(f).strip())
def walk_route(route, wrap):
'''Walk the given route along the board, using the given wrapping function
for when we fall off edges.'''
# Build the board as a numpy array
board_height = len(rows)
board_width = max([len(row) for row in rows])
board = np.zeros((board_height + 2, board_width + 2), dtype='int')
for y, row in enumerate(rows, 1):
board[y, 1:len(row)+1] = row
# Starting position
y = 1
x = np.flatnonzero(board[y])[0]
facing = 0
# Start walking
for instruction in route:
if instruction == 'L':
facing = (facing - 1) % 4
elif instruction == 'R':
facing = (facing + 1) % 4
else:
num = int(instruction)
for _ in range(num):
board[y, x] = 3 + facing
dy, dx = facing_d[facing]
new_y = y + dy
new_x = x + dx
new_facing = facing
if board[new_y, new_x] == 0:
new_y, new_x, new_facing = wrap(y, x, facing)
if board[new_y, new_x] == 2:
break
y = new_y
x = new_x
facing = new_facing
board[y, x] = 3 + facing
return y * 1000 + 4 * x + facing
print('Day 22, part 1:', walk_route(route, wrap_real_part1))
print('Day 22, part 2:', walk_route(route, wrap_real_part2))
## Day 23
from collections import deque, defaultdict
dirs = [0-1j, 1-1j, 1+0j, 1+1j, 0+1j, -1+1j, -1+0j, -1-1j]
loc = set(complex(x, y)
for y, line in enumerate(open('input_day23.txt'))
for x, ch in enumerate(line) if ch == '#')
n_elves = len(loc)
to_check = deque((dirs[i], (dirs[i - 1], dirs[i], dirs[i + 1]))
for i in [0, 4, 6, 2])
for round in range(1, 1_000):
proposals = defaultdict(list)
for j, l in enumerate(loc):
possibilities = [l + d for d, check in to_check
if not any(l + c in loc for c in check)]
if len(possibilities) in [0, 4]:
proposal = l
else:
proposal = possibilities[0]
proposals[proposal] += [l]
new_loc = set(sum(([k] if len(v) == 1 else v
for k, v in proposals.items()), []))
assert len(new_loc) == n_elves
if len(new_loc - loc) == 0:
print('Day 23, part 2:', round)
break
loc = new_loc
to_check.rotate(-1)
if round == 10:
n_empty = np.prod(np.ptp([[l.real, l.imag] for l in loc], axis=0)) - len(loc)
print('Day 23, part 1:', int(n_empty))
|
998,242 | e9c46d379a9a8a76ebdededc659e716a11cbd605 | # Calcular e apresentar o valor do volume de uma lata de óleo, utilizando a fórmula VOLUME <- 3.14159 *R^2 * ALTURA.
Alt = float(input('Informe a altura da lata: '))
R = float(input('informe o raio da lata: '))
volume = 3.14159 * R**2 * Alt
print('O volume da lata corresponde a: {}'.format(volume)) |
998,243 | 9a36762e7c4dab9fde228e80df8fc0cfe86c05eb | # coding=utf-8
import unittest
from utils import PackageTypes
from app.product import Product
class ProductTestCase(unittest.TestCase):
def setUp(self):
self.product = Product()
def tearDown(self):
self.product = list()
def test_add_product_complete(self):
self.assertEqual(self.product.products, dict())
self.product.add_product(name="Soup", price=0.65, package_type=PackageTypes.TIN.value)
self.product.add_product(name="Bread", price=0.8, package_type=PackageTypes.LOAF.value)
self.assertDictEqual(self.product.products, {'Soup': {'price': 0.65, 'package_type': 'tin'},
'Bread': {'price': 0.8, 'package_type': 'loaf'}})
def test_get_list_items(self):
self.assertEqual(self.product.products, dict())
self.product.add_product(name="Bread", price=0.8, package_type=PackageTypes.LOAF.value)
self.assertDictEqual(self.product.products, {'Bread': {'price': 0.8, 'package_type': 'loaf'}})
def test_get_product_existent(self):
self.product.add_product(name="Soup", price=0.65, package_type=PackageTypes.TIN.value)
self.product.add_product(name="Bread", price=0.8, package_type=PackageTypes.LOAF.value)
self.product.add_product(name="Milk", price=0.8, package_type=PackageTypes.BOTTLE.value)
self.assertDictEqual(self.product.get_product('Milk'), {'name': 'Milk', 'price': 0.8, 'package_type': 'bottle'})
def test_get_product_not_existent(self):
self.product.add_product(name="Soup", price=0.65, package_type=PackageTypes.TIN.value)
self.product.add_product(name="Bread", price=0.8, package_type=PackageTypes.LOAF.value)
self.product.add_product(name="Milk", price=0.8, package_type=PackageTypes.BOTTLE.value)
self.assertFalse(self.product.get_product('Banana'))
def test_remove_product(self):
self.product.add_product(name="Soup", price=0.65, package_type=PackageTypes.TIN.value)
self.product.add_product(name="Bread", price=0.8, package_type=PackageTypes.LOAF.value)
self.product.add_product(name="Milk", price=1.8, package_type=PackageTypes.BOTTLE.value)
self.product.remove_product('Milk')
self.assertDictEqual(self.product.products, {'Soup': {'price': 0.65, 'package_type': 'tin'},
'Bread': {'price': 0.8, 'package_type': 'loaf'}})
|
998,244 | d8ddca55163f3a8301b4cf1c85abb175c07c26c9 | # SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-1emj5*2t%yxisb!&bjw#3v21ca-a89xnn3%y1qsyx9!m_llivq'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
#}
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'team5_db',
'USER': 'team5',
'PASSWORD': 'team5',
'HOST': '163.152.216.248',
'PORT': '3306',
}
} |
998,245 | 57f2e6aeb51634315a3054ea712bf3a5b3adf9ad | from auth import *
from burnt_toast import *
from deps import *
import argparse
import os
import json
import time
from datetime import datetime
# Time to wait between upload checks if we haven't detected any in-progress
# videos yet.
DEFAULT_POLLING_FREQUENCY_SECONDS = 200
# Turn on for debugging
ENABLE_LOGS = False
def _log(string):
if ENABLE_LOGS:
print(string)
def _collect_processing_videos(yt, playlist_id):
request = yt.playlistItems().list(part='contentDetails', playlistId=playlist_id, maxResults=20)
result = request.execute()
video_ids = []
for item in result['items']:
video_ids.append(item['contentDetails']['videoId'])
request = yt.videos().list(
part='status,snippet,processingDetails,suggestions',
id=','.join(video_ids),
maxResults=len(video_ids))
result = request.execute()
completed_videos = []
processing_videos = []
for item in result['items']:
processing_details = item['processingDetails']
snippet = item['snippet']
status = item['status']
if item['processingDetails']['processingStatus'] == 'processing':
processing_videos.append(item)
else:
completed_videos.append(item)
return completed_videos, processing_videos
def _snippet_published_datetime(video):
# e.g. 2021-01-28T20:55:14Z
return datetime.strptime(video['snippet']['publishedAt'], '%Y-%m-%dT%H:%M:%SZ')
def main(args):
assert args.secrets_file
assert os.path.exists(args.secrets_file)
yt = get_authenticated_service(args.secrets_file)
print('Running UploadWatch. Press CTRL+C to stop.')
# Get a list of the 10 most recently uploaded videos for this user at a low
# frequency (once every 10 minutes).
request = yt.channels().list(mine=True, part='contentDetails')
result = request.execute()
assert 'items' in result
assert 'contentDetails' in result['items'][0]
playlist_id = result['items'][0]['contentDetails']['relatedPlaylists']['uploads']
wait_time_seconds = DEFAULT_POLLING_FREQUENCY_SECONDS
completed_videos, processing_videos = _collect_processing_videos(yt, playlist_id)
while True:
_log(f'Found {len(processing_videos)} processing videos after waiting {wait_time_seconds} seconds.')
if len(processing_videos) > 0:
_log(json.dumps(processing_videos[0], indent=4))
if 'processingProgress' in processing_videos[0]['processingDetails']:
next_to_complete = \
min(
processing_videos,
key = lambda x : int(x['processingDetails']['processingProgress']['timeLeftMs']))
next_wait_time_seconds = \
int(next_to_complete['processingDetails']['processingProgress']['timeLeftMs']) * 1000
else:
next_wait_time_seconds = DEFAULT_POLLING_FREQUENCY_SECONDS
else:
next_wait_time_seconds = DEFAULT_POLLING_FREQUENCY_SECONDS
time.sleep(next_wait_time_seconds)
last_completed_item = max(completed_videos, key=_snippet_published_datetime)
completed_videos, processing_videos = _collect_processing_videos(yt, playlist_id)
# Check if we've published anything new since the last time we checked
next_completed_item = max(completed_videos, key=_snippet_published_datetime)
if next_completed_item != last_completed_item:
newly_completed = \
[c for c in completed_videos \
if _snippet_published_datetime(c) > _snippet_published_datetime(last_completed_item)]
for newly_completed in newly_completed:
print(json.dumps(newly_completed, indent=4))
processing_details = newly_completed['processingDetails']
video_title = newly_completed['snippet']['title']
if processing_details['processingStatus'] == 'failed':
_log(json.dumps(newly_completed), indent=4)
signal_toast('Upload failed', video_title)
elif processing_details['processingStatus'] == 'succeeded':
signal_toast('Upload succeeded', video_title)
_log(f'Succeeded: {video_title}')
else:
_log('Found no successfully processed videos since we last checked.')
if __name__ == '__main__':
print('Checking installation dependencies. You may get a prompt for elevated privileges.')
get_dependencies()
print('Done!')
parser = argparse.ArgumentParser()
parser.add_argument('--secrets-file', type=str, required=True)
main(parser.parse_args())
|
998,246 | 39dc31fbbc678ee1bcb6be2bc3dc63377cc9c25d | print("enter marks for 5 subjects:")
sum = 0.0
avg = 0.0
for marks in input().split():
sum += float(marks)
avg = sum/5.0
print(f"average: {avg}")
|
998,247 | d37eb0ea1126d6d012ae6ba90b83e4107dfd26dd | from django_restql.mixins import DynamicFieldsMixin
from rest_framework import serializers
from core.serializers import CustomWritableNestedModelSerializer
from employees.models import (
Employee,
EmergencyContact,
WorkHistory,
Education,
)
from users.api.v1.serializers import UserSerializer
class EmergencyContactSerializer(DynamicFieldsMixin, serializers.ModelSerializer):
class Meta:
model = EmergencyContact
exclude = ("employee",)
class WorkHistorySerializer(DynamicFieldsMixin, serializers.ModelSerializer):
class Meta:
model = WorkHistory
exclude = ("employee",)
class EducationSerializer(DynamicFieldsMixin, serializers.ModelSerializer):
class Meta:
model = Education
exclude = ("employee",)
class EmployeeSerializer(DynamicFieldsMixin, CustomWritableNestedModelSerializer):
user = UserSerializer(partial=True)
work_histories = WorkHistorySerializer(many=True, required=False)
emergency_contacts = EmergencyContactSerializer(many=True, required=False)
educations = EducationSerializer(many=True, required=False)
class Meta:
model = Employee
fields = "__all__"
|
998,248 | 2c282e1f076c086814bd1872eebfcd06e13fc41b | import os
import collections
import torch
import torch.utils.data as data
import gzip
from gqnshapenet import *
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='Convert and save data to PyTorch tensor format')
parser.add_argument('--SKETCH_TRAIN_DATASET_PATH', type=str, help='Path to sketches in training dataset', default='.')
parser.add_argument('--SKETCH_TEST_DATASET_PATH', type=str, help='Path to sketches in test dataset', default='.')
parser.add_argument('--GROUND_TRUTH_TRAIN_DATASET_PATH', type=str, help='Path to sketches in test dataset', default='.')
parser.add_argument('--GROUND_TRUTH_TEST_DATASET_PATH', type=str, help='Path to sketches in test dataset', default='.')
parser.add_argument('--TRAIN_OUTPUT_DIR', type=str, help='Path to output directory', default='.')
parser.add_argument('--TEST_OUTPUT_DIR', type=str, help='Path to output directory', default='.')
opt = parser.parse_args()
def arrange_data(data_tmp, M, gen_rand_indx, rand_idx):
x_tmp = []
v_tmp = []
for data in data_tmp:
x_tmp.append(torch.stack([x[0][0] for x in data]))
v_tmp.append(torch.stack([v[1][0] for v in data]))
if gen_rand_indx:
rand_idx = torch.LongTensor(1).random_(0, 15)
x_tmp = torch.stack(x_tmp)
v_tmp = torch.stack(v_tmp)
v_tmp_xyz = v_tmp.narrow(dim=2, start=0, length=3)
v_tmp_jawpitch = v_tmp.narrow(dim=2, start=3, length=2)
v_tmp_classID = v_tmp.narrow(dim=2, start=5, length=1)
v_tmp_jawpitch_cosed = torch.cos(v_tmp_jawpitch)
v_tmp_jawpitch_sined = torch.sin(v_tmp_jawpitch)
v_pitch_cosed = v_tmp_jawpitch_cosed.narrow(dim=2, start=0, length=1)
v_pitch_sined = v_tmp_jawpitch_sined.narrow(dim=2, start=0, length=1)
v_jaw_cosed = v_tmp_jawpitch_cosed.narrow(dim=2, start=0, length=1)
v_jaw_sined = v_tmp_jawpitch_sined.narrow(dim=2, start=0, length=1)
v_tmp = torch.cat([torch.cat([v_tmp_xyz, v_jaw_cosed], dim=2), v_jaw_sined], dim=2)
v_tmp = torch.cat([torch.cat([v_tmp, v_pitch_cosed], dim=2), v_pitch_sined], dim=2)
v_tmp = torch.cat([v_tmp, v_tmp_classID], dim=2)
q_tmp = v_tmp[:,rand_idx, :]
v_real_query = q_tmp
critic_img = x_tmp[:,rand_idx, :, :, :]
x_real = x_tmp.narrow(dim= 1, start=0, length=M)
v_real = v_tmp.narrow(dim= 1, start=0, length=M)
critic_img = critic_img.squeeze(dim=1)
return x_real, v_real, v_real_query, rand_idx, critic_img
if __name__ == '__main__':
import sys
S_max = 2000000
path_sketch_train = opt.SKETCH_TRAIN_DATASET_PATH
path_real_train = opt.GROUND_TRUTH_TRAIN_DATASET_PATH
path_sketch_test = opt.SKETCH_TEST_DATASET_PATH
path_real_test = opt.GROUND_TRUTH_TEST_DATASET_PATH
train_data = Shapenet_dataset(path_sketch_train, path_real_train, batch_size=1)
# Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.
train_loader = data.DataLoader(train_data, batch_size = 1, shuffle=False, drop_last = True)
test_data = Shapenet_dataset(path_sketch_test, path_real_test, batch_size=1)
# Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.
test_loader = data.DataLoader(test_data, batch_size = 1, shuffle=False, drop_last = True)
train_iter = iter(train_loader)
test_iter = iter(test_loader)
save_train = opt.TRAIN_OUTPUT_DIR
save_test = opt.TEST_OUTPUT_DIR
tot = 0
w = 0
for t in tqdm(range(S_max)):
try:
data = next(train_iter)
except StopIteration:
print("DONE!")
break
save_path = save_train + str(w) + '.pt.gz'
w += 1
with gzip.open(save_path, 'wb') as f:
torch.save(data, f)
print(f' [-] {w} scenes in the train dataset')
## test
d = 0
for t in tqdm(range(S_max)):
try:
data = next(test_iter)
except StopIteration:
print("DONE!")
break
save_path = save_test + str(d) + '.pt.gz'
d += 1
with gzip.open(save_path, 'wb') as f:
torch.save(data, f)
print(f' [-] {d} scenes in the test dataset')
|
998,249 | a6b68d04109c9c90e2f11a52347efd5af92ef72b | from django.http import HttpResponse, HttpResponseRedirect
from texts.models import Language, SourceText, Exam, Grader
from records.models import Record
from django.core import serializers
from django.template import RequestContext
from django.core.context_processors import csrf
from django.shortcuts import get_object_or_404, render, render_to_response
from django import forms
from django.template import RequestContext
from django.forms import ModelForm
from rpy2.robjects import r
from rpy2 import robjects as ro
from django.db.models import Q
from analysis.models import RFunctions, RParamaters, RFunctionForm
import imp, os, re
import json
from itertools import chain
#KeywordManager = imp.load_source('KeywordManager', os.getcwd() + '/keyword/KeywordManager.py')
#k = KeywordManager.KeywordManager()
# This function can generate random sample data
# When the page is reflashed, the data will be replaced by new ones
r("""totalError <- function( fileName)
{
library(RMySQL)
connectionObject <- dbConnect(MySQL(), user="root", password="", dbname= "translation", host="localhost")
#Query to be run on connectionObject
# *Currently utilizes tables texts_exam and texts_language
# *Currently utilizes fields texts_exam.sourceLanguage_id, texts_exam.targetLanguage_id, texts_language.id, and texts_language.language
queryCommand <- paste("
SELECT
source_Language.displayName AS Language_Source,
target_Language.displayName AS Language_Target,
COUNT(*) AS Count
FROM texts_exam AS Exams
INNER JOIN texts_language AS source_Language
INNER JOIN texts_language AS target_Language
INNER JOIN texts_error AS errors
WHERE
Exams.sourceLanguage_id = source_Language.id
AND Exams.targetLanguage_id = target_Language.id
AND Exams.id = errors.exam_id
GROUP BY Language_Source, Language_Target
ORDER BY Language_Source DESC, Language_Target DESC
;")
#Run and store result of query into queryResult variable
queryResult <- dbSendQuery(connectionObject,queryCommand)
#Change resulting query from query result to data.frame through fetch()
# *"n = #" specifies number of rows to store in the frame, "n = -1" will retrieve all rows
dataFetched <- fetch(queryResult, n = -1)
#Combine columns Language_Source and Language_Target into a single column named Language
dataFetched$Language <- paste(dataFetched$Language_Source, dataFetched$Language_Target, sep = " to ")
#Remove columns Language_Source and Language_Target
dataFetched$Language_Source <- NULL
dataFetched$Language_Target <- NULL
#Begin recording of graphing
png(fileName)
#Specifies margins and graphing specifications
# *Increased left margin size to accommodate y-axis names (the languages) [mar=c(5,10,4,2)+0.1]
# *Default margins for labels [mgp=c(3,1,0)]
# *All text horizontal [las=1]
par(mar=c(5.1,10.1,4.1,2.1),mgp=c(3,1,0),las=1)
#Color palette for graph
graphColors = c("firebrick3", "firebrick4")
#Make the barplot (horizontal specified)
barplot(xlab = "Number of Errors", main = "Errors to Translation Set", horiz=TRUE, col=graphColors)
#Close connection to file (prevent further writing to same file)
dev.off()
}""")
r(""" Sample_Data <- function(n)
sample(1:100,n,replace=F)
""")
r("""totalTest <- function(fileName)
{
library(RMySQL)
connectionObject <- dbConnect(MySQL(), user="root", password="geranium2", dbname= "translation", host="localhost")
queryCommand <- paste("
SELECT
source_Language.displayName AS Language_Source,
target_Language.displayName AS Language_Target,
COUNT(*) AS Count
FROM texts_exam AS Exams
INNER JOIN texts_language AS source_Language
INNER JOIN texts_language AS target_Language
WHERE
Exams.sourceLanguage_id = source_Language.id
AND Exams.targetLanguage_id = target_Language.id
GROUP BY Language_Source, Language_Target
ORDER BY Language_Source DESC, Language_Target DESC
;")
#Run and store result of query into queryResult variable
queryResult <- dbSendQuery(connectionObject,queryCommand)
#Change resulting query from query result to data.frame through fetch()
# *"n = #" specifies number of rows to store in the frame, "n = -1" will retrieve all rows
dataFetched <- fetch(queryResult, n = -1)
#Combine columns Language_Source and Language_Target into a single column named Language
dataFetched$Language <- paste(dataFetched$Language_Source, dataFetched$Language_Target, sep = " to ")
#Remove columns Language_Source and Language_Target
dataFetched$Language_Source <- NULL
dataFetched$Language_Target <- NULL
#Begin recording of graphing
png(fileName)
#Specifies margins and graphing specifications
# *Increased left margin size to accommodate y-axis names (the languages) [mar=c(5,10,4,2)+0.1]
# *Default margins for labels [mgp=c(3,1,0)]
# *All text horizontal [las=1]
par(mar=c(5.1,10.1,4.1,2.1),mgp=c(3,1,0),las=1)
#Color palette for graph
graphColors = c("steelblue3", "steelblue4")
#Make the barplot (horizontal specified)
barplot(5, names = "dataFetched$Language", xlab = "Number of Exams", main = "Exams to Translation Set", horiz=TRUE, col=graphColors)
#Close connection to file (prevent further writing to same file)
dev.off()
}""")
# This is a line-graph generation function for average score
r("""averageScore <- function(filename, title, scores, years, exams, passages) {
if (length(years) > 1){
All_Exams <- vector()
for (i in 1:length(years)){
All_Exams <- append(All_Exams,c(mean(Sample_Data(1 + i))))
}
g_range <- range(0,100)
png(file=filename, width=800, height=480)
plot(All_Exams,type="o",col="blue",ylim=g_range,axes=FALSE,ann=FALSE)
axis(1,at=1:length(years),lab=c(years))
axis(2,las=1,at=10*0:g_range[2])
box()
title(main="Avg. Score",col.main="red",font.main=4)
title(xlab="Years",col.lab=rgb(0,0.5,0))
title(ylab="Score",col.lab=rgb(0,0.5,0))
cl <- rainbow(length(unique(c(title))))
legend(1, g_range[2], unique(title), cex=0.8,
col=c(cl), pch=21:22, lty=1:2);
for (i in 1:length(years)){
text(i, All_Exams[i]+5, labels=round(All_Exams[i],1))
}
dev.off()
}else{
All_Exams <- rep(c(mean(Sample_Data(length(exams)))), times = length(exams))
g_range <- range(0,100)
png(file=filename, width=800, height=480)
plot(All_Exams,type="o",col="blue",ylim=g_range,axes=FALSE,ann=FALSE)
axis(1,at=1:length(exams),lab=c(exams))
axis(2,las=1,at=10*0:g_range[2])
box()
title(main="Avg. Score",col.main="red",font.main=4)
title(xlab="Exams",col.lab=rgb(0,0.5,0))
title(ylab="Score",col.lab=rgb(0,0.5,0))
legend(1, g_range[2], unique(c(paste(title, years, sep=" "))), cex=0.8,
col=c("blue"), pch=21:22, lty=1:2);
for (i in 1:length(All_Exams)){
text(i, All_Exams[i]+5, labels=round(All_Exams[i],1))
}
dev.off()
}
}""")
# This is a line-graph generation function for pass fail rates
r("""passFail <- function(filename, title, scores, years, exams, passages) {
if (length(years) > 1){
All_Exams <- vector()
for (i in 1:length(years)){
All_Exams <- append(All_Exams,c(mean(Sample_Data(1 + i))))
}
#All_Exams <- rep(c(mean(Sample_Data(1)), mean(Sample_Data(5)), mean(Sample_Data(6))))
g_range <- range(0,100)
png(file=filename, width=800, height=480)
plot(All_Exams,type="o",col="blue",ylim=g_range,axes=FALSE,ann=FALSE)
axis(1,at=1:length(years),lab=c(years))
axis(2,las=1,at=5*0:g_range[2])
title(main="Pass Rate %",col.main="red",font.main=4)
title(xlab="Years",col.lab=rgb(0,0.5,0))
title(ylab="% of Passing Tests",col.lab=rgb(0,0.5,0))
box()
cl <- rainbow(length(unique(c(title))))
legend(1, g_range[2], unique(c(title)), cex=0.8,
col=c(cl), pch=21:22, lty=1:2);
for (i in 1:length(years)){
text(i, All_Exams[i]+5, labels=round(All_Exams[i],1))
}
dev.off()
} else{
All_Exams <- rep(c(mean(Sample_Data(length(exams)))), times = length(exams))
g_range <- range(0,100)
png(file=filename, width=800, height=480)
plot(All_Exams,type="o",col="blue",ylim=g_range,axes=FALSE,ann=FALSE)
axis(1,at=1:length(exams),lab=c(exams))
axis(2,las=1,at=5*0:g_range[2])
title(main="Pass Rate %",col.main="red",font.main=4)
title(xlab="Exams",col.lab=rgb(0,0.5,0))
title(ylab="% of Passing Tests",col.lab=rgb(0,0.5,0))
box()
legend(1, g_range[2], unique(c(paste(title, years, sep=" "))), cex=0.8,
col=c("blue"), pch=21:22, lty=1:2);
for (i in 1:length(All_Exams)){
text(i, All_Exams[i]+5, labels=round(All_Exams[i],1))
}
dev.off()
}
}""")
# This is a bar-graph generation function for error breakdown
r("""errorBreakdown <- function(filename, title, format, exams) {
if (format == "byHalf"){
A <- matrix(c(3,5,7,1,9,4,6,5,2,12,2,1,7,6,8),nrow=2,ncol=length(exams),byrow=TRUE)
png(file=filename, width=800, height=480)
barplot(A,width=0.8,main=paste(title,"Errors By Half",sep=" "),col.main="red",font.main=4,names.arg=c(exams),
xlab="Exams",ylab="Errors",col.lab=rgb(0,0.5,0),col=c("tan2","blue"), xlim=c(0,length(exams)))
legend("topleft",c("1st half","2nd half"),cex=.8,
col=c("tan2","blue"),pch=c(22,0,0))
box()
dev.off()
}else if (format == "byQuarter"){
A <- matrix(c(3,5,7,1,9,4,6,5,2,12,2,1,7,6,8),nrow=4,ncol=length(exams),byrow=TRUE)
png(file=filename, width=800, height=480)
barplot(A,width=0.8,main=paste(title,"Errors By Quarter",sep=" "),col.main="red",font.main=4,names.arg=c(exams),
xlab="Exams",ylab="Errors",col.lab=rgb(0,0.5,0),col=c("tan2","blue","darkslategray3", "red"), xlim=c(0,length(exams)))
legend("topleft",c("1st qtr","2nd qtr","3rd qtr", "4th qtr"),cex=.8,
col=c("tan2","blue","darkslategray3", "red"),pch=c(22,0,0))
box()
dev.off()
}else if (format =="HalfwithScore"){
a <- c(3,5,7,1,9,4,6,5,2,12,2,1,7,6,8)
A <- matrix(a,nrow=2,ncol=length(exams),byrow=TRUE)
png(file=filename, width=800, height=480)
bp <-barplot(A, beside = TRUE,width=0.8,main=paste(title,"Errors By Half",sep=" "),col.main="red",font.main=4,names.arg=c(exams),
xlab="Exams",ylab="Errors",col.lab=rgb(0,0.5,0),col=c("tan2","blue"), ylim=c(0,20))
legend("topleft",c("1st half","2nd half"),cex=.8,
col=c("tan2","blue"),pch=c(22,0,0))
text(bp, 0, round(A, 1),cex=2,pos=3)
box()
dev.off()
}else if (format == "QuarterwithScore"){
a <- c(3,5,7,1,9,4,6,5,2,12,2,1,7,6,8)
A <- matrix(a,nrow=4,ncol=length(exams),byrow=TRUE)
png(file=filename, width=800, height=480)
bp <- barplot(A, beside = TRUE, width=0.8,main=paste(title,"Errors By Quarter",sep=" "),col.main="red",font.main=4,names.arg=c(exams),
xlab="Exams",ylab="Errors",col.lab=rgb(0,0.5,0),col=c("tan2","blue","darkslategray3", "red"), ylim=c(0,20))
legend("topleft",c("1st qtr","2nd qtr","3rd qtr", "4th qtr"),cex=.8,
col=c("tan2","blue","darkslategray3", "red"),pch=c(22,0,0))
text(bp, 0, round(A, 1),cex=2,pos=3)
box()
dev.off()
}
}""")
def index(request):
langs = Language.objects.all()
graders = Grader.objects.all()
c = {}
c.update(csrf(request))
# ... view code here
return render(request,'analysis/index.html', {'langs': langs, 'graders': graders})
#Pass objects to rDesigner.html for individual processing
def rDesigner(request):
return rDesignerWithArgs(request, None);
def rDesignerWithArgs(request, fileList):
rfunctions = RFunctions.objects.all()
rparameters = RParamaters.objects.all().order_by('RFunction','SequenceNumber');
return render(request, 'analysis/rDesigner.html', {'rfunctions': rfunctions, 'rparameters': rparameters, 'rform': RFunctionForm, 'fileList': fileList});
#Form POST R function deletion
def rDeletion(request):
todel = request.POST.getlist("todelete")
RFunctions.objects.filter(id__in=todel).delete();
return HttpResponseRedirect('..')
def relParms(functionId):
return RParamaters.objects.filter(id=functionId);
def rEdit(request):
item = RFunctions.objects.get(id=request.POST["id"])
item.Name = request.POST["Name"]
item.Description = request.POST["Description"]
item.ReturnType = request.POST.get('ReturnType', 'IMG')
item.Body = request.POST["Body"]
item.save()
saveParms(item, request)
return HttpResponseRedirect('..')
def saveParms(rFunction, request):
RParamaters.objects.filter(RFunction=rFunction.id).delete();
parms = request.POST.getlist('parm')
i = 0
for parm in parms:
RParamaters.objects.get_or_create(RFunction = rFunction, SequenceNumber = i, parameter = parm)
i += 1
print request.POST
def rCreate(request):
item, p = RFunctions.objects.get_or_create(Name = request.POST["Name"], Description = request.POST["Description"], Body = request.POST["Body"])
#item.save()
print("saving parms pre")
saveParms(item, request)
return HttpResponseRedirect('..')
def bound_form():
item = RFunctions.objects.get(id=1)
if item == None:
item = RFunctions()
form = RFunctionForm(initial={ 'id': item.id,
'Name': item.Name,
'Description': item.Description,
'Body': item.Body,
'ReturnType': item.ReturnType})
return form
def getFunctionParms(request):
json_data = serializers.serialize('json', RParamaters.objects.filter(RFunction=request.GET["value"]))
#objs = simplejson.loads( serialized_obj )
#json_data = simplejson.dumps( {'funcs':objs} )
print json_data
return HttpResponse(json_data, content_type="application/json")
def getFileList(dir):
fileList = "";
for file in os.listdir(dir):
fileList = fileList + file + ":"
return fileList;
def executeRFunction(request):
outputDirectory = os.getcwd() + "/translation_mockup/static/img/" + request.session.session_key;
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory);
for existingFile in os.listdir(outputDirectory):
filePath = os.path.join(outputDirectory, existingFile)
try:
if os.path.isfile(filePath):
os.unlink(filePath)
except Exception, e:
print e
item = RFunctions.objects.get(id=request.POST["FunctionID"])
rawR = item.Body
rawR = rawR.replace("\r", "");
rawR = "outputDir = '" + outputDirectory + "/'\n" + rawR
rToExecute = """{0}"""
ro.r(rToExecute.format(rawR))
fileList = json.dumps(getFileList(outputDirectory));
return rDesignerWithArgs(request, fileList);
def getFunctionData(request):
json_data = serializers.serialize('json', RFunctions.objects.filter(id=request.GET["value"]))
#objs = simplejson.loads( serialized_obj )
#json_data = simplejson.dumps( {'funcs':objs} )
print json_data
return HttpResponse(json_data, content_type="application/json")
# return HttpResponse('test', mimetype="text/plain")
def getParams(request):
if request.GET["type"] == "Years":
source = list(request.GET["source"].split(","))
target = list(request.GET["target"].split(","))
years = []
for y in Record.objects.raw('SELECT year, id FROM records_record WHERE source_language in %s and target_language in %s group by year ' ,[source, target] ):
years.insert(y.id, (y.year, y.year))
response = render_to_response("analysis/params_list.html", {'params': years})
return response
elif request.GET["type"] == "Exams":
years = list(request.GET["year"].split(","))
source = list(request.GET["source"].split(","))
target = list(request.GET["target"].split(","))
grader = list(request.GET["grader"].split(","))
exams = []
for e in Record.objects.raw('SELECT exam_number, id FROM records_record where year in %s and source_language in %s and target_language in %s group by exam_number', [years, source, target]):
exams.insert(e.id, (e.exam_number , e.exam_number))
response = render_to_response("analysis/params_list.html", {'params': exams })
return response
elif request.GET["type"] == "Passages":
exams = list(request.GET["value"].split(","))
passages = []
passages.insert(0,('A','A'))
for p in Record.objects.raw('select second_passage, id from records_record where exam_number in %s group by second_passage', [exams]):
passages.insert(p.id, (p.second_passage, p.second_passage))
response = render_to_response("analysis/params_list.html", {'params': passages })
return response
def generateGraph(request):
source = list(request.GET["source"].split(","))
target = list(request.GET["target"].split(","))
years = list(request.GET["year"].split(","))
exams = list(request.GET["exam"].split(","))
passages = list(request.GET["passage"].split(","))
languagePair = ''
if request.GET["type"] == "userAnalysis":
r.totalError("translation_mockup/static/img/totalError.png")
return HttpResponse('<img src="/static/img/totalError.png" />')
if request.GET["type"] == "mainAnalysis":
r.totalTest("translation_mockup/static/img/totalTest.png")
return HttpResponse('<img src="/static/img/totalTest.png" />')
if request.GET["type"] == "passFail":
langs = []
scores = []
if len(source) > 1:
for l in Language.objects.raw('select id, displayName from texts_language where id in %s', [target]):
languagePair = "_" + l.displayName
for s in Record.objects.raw('select 100 - SUM(texts_error.pointsDeducted) + records_record.global_quality_pts as Score, texts_error.id, displayName from texts_error left outer join records_record on texts_error.exam_id = records_record.id left outer join texts_language on records_record.source_language = texts_language.id where records_record.source_language in %s and records_record.target_language in %s and records_record.year in %s and records_record.exam_number in %s and texts_error.passageLetter in %s group by texts_error.exam_id',[source, target, years, exams, passages]):
scores.append(s.Score)
langs.append(s.displayName + languagePair)
elif len(target) > 1:
for l in Language.objects.raw('select id, displayName from texts_language where id in %s', [source]):
languagePair = l.displayName + "_"
for s in Record.objects.raw('select 100 - SUM(texts_error.pointsDeducted) + records_record.global_quality_pts as Score, texts_error.id, displayName from texts_error left outer join records_record on texts_error.exam_id = records_record.id left outer join texts_language on records_record.target_language = texts_language.id where records_record.source_language in %s and records_record.target_language in %s and records_record.year in %s and records_record.exam_number in %s and texts_error.passageLetter in %s group by texts_error.exam_id',[source, target, years, exams, passages]):
scores.append(s.Score)
langs.append(languagePair + s.displayName)
r.passFail("translation_mockup/static/img/passFail.png",langs, ro.IntVector(scores), years, exams, passages)
return HttpResponse('<img src="/static/img/passFail.png" />')
elif request.GET["type"] == "avgScore":
langs = []
scores = []
if len(source) > 1:
for l in Language.objects.raw('select id, displayName from texts_language where id in %s', [target]):
languagePair = "_" + l.displayName
for s in Record.objects.raw('select 100 - SUM(texts_error.pointsDeducted) + records_record.global_quality_pts as Score, texts_error.id, displayName from texts_error left outer join records_record on texts_error.exam_id = records_record.id left outer join texts_language on records_record.source_language = texts_language.id where records_record.source_language in %s and records_record.target_language in %s and records_record.year in %s and records_record.exam_number in %s and texts_error.passageLetter in %s group by texts_error.exam_id',[source, target, years, exams, passages]):
scores.append(s.Score)
langs.append(s.displayName + languagePair)
elif len(target) > 1:
for l in Language.objects.raw('select id, displayName from texts_language where id in %s', [source]):
languagePair = l.displayName + "_"
for s in Record.objects.raw('select 100 - SUM(texts_error.pointsDeducted) + records_record.global_quality_pts as Score, texts_error.id, displayName from texts_error left outer join records_record on texts_error.exam_id = records_record.id left outer join texts_language on records_record.target_language = texts_language.id where records_record.source_language in %s and records_record.target_language in %s and records_record.year in %s and records_record.exam_number in %s and texts_error.passageLetter in %s group by texts_error.exam_id',[source, target, years, exams, passages]):
scores.append(s.Score)
langs.append(languagePair + s.displayName)
r.averageScore("translation_mockup/static/img/avgScore.png", langs, ro.IntVector(scores), years, exams, passages)
return HttpResponse('<img src="/static/img/avgScore.png" />')
elif request.GET["type"] == "errorBreakdown":
r.errorBreakdown("translation_mockup/static/img/errorBreakdown.png", languagePair, request.GET["format"], exams)
return HttpResponse('<img src="/static/img/errorBreakdown.png" />')
#return the marked up html for the keywords in their original context
def getKeywords(request, query):
offsetLength = 30
match = k.getDocumentsForKeyword(query)[0]
#get the error in context as it appears in the source yexy
hilightedResult = '<span style="background-color:LightBlue;">' + query + '</span>'
queryPosition = match.text.find(query)
finalHTML = match.text[queryPosition - offsetLength : queryPosition] + hilightedResult + match.text[queryPosition + len(query) : queryPosition + offsetLength]
#get all of the errors that have the query in the source phrase for any of the exams that have the souce text as one of the fields
examsWithSourceText = Exam.objects.filter(Q(sourceText1__pk = match.pk) | Q(sourceText2__pk = match.pk))
errorsWithQuery = []
for exam in examsWithSourceText:
errorsWithQuery.extend(map(lambda e: e.as_json(), exam.error_set.filter(sourcePhrase__contains = query)))
result = {'sourceTextHTML': finalHTML, 'errors': errorsWithQuery}
return HttpResponse(json.dumps(result), mimetype="application/json")
#line chart
#def generateLineGraph(request):
# r.generate_line("translation_mockup/static/img/Avg_Line.png", "Random data ", "mm")
# return HttpResponse('<img src="/static/img/Avg_Line.png" />')
|
998,250 | 4087ab6bf9ae2049f186764265eddb4da83b3aa4 | import unittest
import zserio
from testutils import getZserioApi
class ExtendedFieldInTemplateTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "extended_members.zs").extended_field_in_template
def testConstructorSimple(self):
extended = self.api.ExtendedSimple()
# always present when not read from stream
self.assertTrue(extended.is_extended_value_present())
# default initialized
self.assertEqual(0, extended.value)
self.assertEqual(0, extended.extended_value)
extended = self.api.ExtendedSimple(42, zserio.limits.UINT32_MAX)
self.assertTrue(extended.is_extended_value_present())
self.assertEqual(42, extended.value)
self.assertEqual(zserio.limits.UINT32_MAX, extended.extended_value)
def testConstructorCompound(self):
extended = self.api.ExtendedCompound()
# always present when not read from stream
self.assertTrue(extended.is_extended_value_present())
# default initialized
self.assertEqual(0, extended.value)
self.assertIsNone(extended.extended_value)
extended = self.api.ExtendedCompound(42, self.api.Compound(zserio.limits.UINT32_MAX))
self.assertTrue(extended.is_extended_value_present())
self.assertEqual(42, extended.value)
self.assertEqual(zserio.limits.UINT32_MAX, extended.extended_value.field)
def testEqSimple(self):
extended1 = self.api.ExtendedSimple()
extended2 = self.api.ExtendedSimple()
self.assertEqual(extended1, extended2)
extended1.value = 13
self.assertNotEqual(extended1, extended2)
extended2.value = 13
self.assertEqual(extended1, extended2)
extended2.extended_value = zserio.limits.UINT32_MAX
self.assertNotEqual(extended1, extended2)
extended1.extended_value = zserio.limits.UINT32_MAX
self.assertEqual(extended1, extended2)
def testEqCompound(self):
extended1 = self.api.ExtendedCompound()
extended2 = self.api.ExtendedCompound()
self.assertEqual(extended1, extended2)
extended1.value = 13
self.assertNotEqual(extended1, extended2)
extended2.value = 13
self.assertEqual(extended1, extended2)
extended2.extended_value = self.api.Compound(zserio.limits.UINT32_MAX)
self.assertNotEqual(extended1, extended2)
extended1.extended_value = self.api.Compound(zserio.limits.UINT32_MAX)
self.assertEqual(extended1, extended2)
def testHashSimple(self):
extended1 = self.api.ExtendedSimple()
extended2 = self.api.ExtendedSimple()
self.assertEqual(hash(extended1), hash(extended2))
extended1.value = 13
self.assertNotEqual(hash(extended1), hash(extended2))
extended2.value = 13
self.assertEqual(hash(extended1), hash(extended2))
extended2.extended_value = 42
self.assertNotEqual(hash(extended1), hash(extended2))
extended1.extended_value = 42
self.assertEqual(hash(extended1), hash(extended2))
def testHashCompound(self):
extended1 = self.api.ExtendedCompound()
extended2 = self.api.ExtendedCompound()
self.assertEqual(hash(extended1), hash(extended2))
extended1.value = 13
self.assertNotEqual(hash(extended1), hash(extended2))
extended2.value = 13
self.assertEqual(hash(extended1), hash(extended2))
extended2.extended_value = self.api.Compound(42)
self.assertNotEqual(hash(extended1), hash(extended2))
extended1.extended_value = self.api.Compound(42)
self.assertEqual(hash(extended1), hash(extended2))
def testBitSizeOfSimple(self):
extended = self.api.ExtendedSimple()
self.assertEqual(EXTENDED_BIT_SIZE, extended.bitsizeof())
def testBitSizeOfCompound(self):
extended = self.api.ExtendedCompound(42, self.api.Compound())
self.assertEqual(EXTENDED_BIT_SIZE, extended.bitsizeof())
def testInitializeOffsetsSimple(self):
extended = self.api.ExtendedSimple()
self.assertEqual(EXTENDED_BIT_SIZE, extended.initialize_offsets(0))
def testInitializeOffsetsCompound(self):
extended = self.api.ExtendedCompound(42, self.api.Compound())
self.assertEqual(EXTENDED_BIT_SIZE, extended.initialize_offsets(0))
def testWriteReadExtendedSimple(self):
extended = self.api.ExtendedSimple(42, zserio.limits.UINT32_MAX)
bitBuffer = zserio.serialize(extended)
self.assertEqual(EXTENDED_BIT_SIZE, bitBuffer.bitsize)
readExtended = zserio.deserialize(self.api.ExtendedSimple, bitBuffer)
self.assertTrue(readExtended.is_extended_value_present())
self.assertEqual(extended, readExtended)
def testWriteReadExtendedCompound(self):
extended = self.api.ExtendedCompound(42, self.api.Compound(zserio.limits.UINT32_MAX))
bitBuffer = zserio.serialize(extended)
self.assertEqual(EXTENDED_BIT_SIZE, bitBuffer.bitsize)
readExtended = zserio.deserialize(self.api.ExtendedCompound, bitBuffer)
self.assertTrue(readExtended.is_extended_value_present())
self.assertEqual(extended, readExtended)
def testWriteOriginalReadExtendedSimple(self):
original = self.api.Original(42)
bitBuffer = zserio.serialize(original)
readExtended = zserio.deserialize(self.api.ExtendedSimple, bitBuffer)
self.assertFalse(readExtended.is_extended_value_present())
# extended value is default constructed
self.assertEqual(0, readExtended.extended_value)
# bit size as original
self.assertEqual(ORIGINAL_BIT_SIZE, readExtended.bitsizeof())
# initialize offsets as original
self.assertEqual(ORIGINAL_BIT_SIZE, readExtended.initialize_offsets(0))
# write as original
bitBuffer = zserio.serialize(readExtended)
self.assertEqual(ORIGINAL_BIT_SIZE, bitBuffer.bitsize)
# read original again
readOriginal = zserio.deserialize(self.api.Original, bitBuffer)
self.assertEqual(original, readOriginal)
# setter makes the value present!
readExtended.extended_value = zserio.limits.UINT32_MAX
self.assertTrue(readExtended.is_extended_value_present())
# bit size as extended
self.assertEqual(EXTENDED_BIT_SIZE, readExtended.bitsizeof())
# initialize offsets as extended
self.assertEqual(EXTENDED_BIT_SIZE, readExtended.initialize_offsets(0))
# writes as extended
bitBuffer = zserio.serialize(readExtended)
self.assertEqual(EXTENDED_BIT_SIZE, bitBuffer.bitsize)
def testWriteOriginalReadExtendedCompound(self):
original = self.api.Original(42)
bitBuffer = zserio.serialize(original)
readExtended = zserio.deserialize(self.api.ExtendedCompound, bitBuffer)
self.assertFalse(readExtended.is_extended_value_present())
# extended value is None
self.assertIsNone(readExtended.extended_value)
# bit size as original
self.assertEqual(ORIGINAL_BIT_SIZE, readExtended.bitsizeof())
# initialize offsets as original
self.assertEqual(ORIGINAL_BIT_SIZE, readExtended.initialize_offsets(0))
# write as original
bitBuffer = zserio.serialize(readExtended)
self.assertEqual(ORIGINAL_BIT_SIZE, bitBuffer.bitsize)
# read original again
readOriginal = zserio.deserialize(self.api.Original, bitBuffer)
self.assertEqual(original, readOriginal)
# setter makes the value present!
readExtended.extended_value = self.api.Compound(zserio.limits.UINT32_MAX)
self.assertTrue(readExtended.is_extended_value_present())
# bit size as extended
self.assertEqual(EXTENDED_BIT_SIZE, readExtended.bitsizeof())
# initialize offsets as extended
self.assertEqual(EXTENDED_BIT_SIZE, readExtended.initialize_offsets(0))
# writes as extended
bitBuffer = zserio.serialize(readExtended)
self.assertEqual(EXTENDED_BIT_SIZE, bitBuffer.bitsize)
def testWriteExtendedSimpleReadOriginal(self):
extended = self.api.ExtendedSimple(42, zserio.limits.UINT32_MAX)
bitBuffer = zserio.serialize(extended)
self.assertEqual(EXTENDED_BIT_SIZE, bitBuffer.bitsize)
reader = zserio.BitStreamReader.from_bitbuffer(bitBuffer)
readOriginal = self.api.Original.from_reader(reader)
self.assertEqual(extended.value, readOriginal.value)
self.assertEqual(ORIGINAL_BIT_SIZE, reader.bitposition)
def testWriteExtendedCompoundReadOriginal(self):
extended = self.api.ExtendedCompound(42, self.api.Compound(zserio.limits.UINT32_MAX))
bitBuffer = zserio.serialize(extended)
self.assertEqual(EXTENDED_BIT_SIZE, bitBuffer.bitsize)
reader = zserio.BitStreamReader.from_bitbuffer(bitBuffer)
readOriginal = self.api.Original.from_reader(reader)
self.assertEqual(extended.value, readOriginal.value)
self.assertEqual(ORIGINAL_BIT_SIZE, reader.bitposition)
ORIGINAL_BIT_SIZE = 4 * 8
EXTENDED_BIT_SIZE = ORIGINAL_BIT_SIZE + 4 * 8
|
998,251 | ba69a64e216f240793a9ac67eab6568ba4c0681b | import os
class Config:
SECRET_KEY = '0917b13a9091915d54b6336f45909539cce452b3661b21f386418a257883b30a'
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_TRACK_MODIFICATIONS = False
TWILIO_URL = 'https://www.twilio.com/company/jobs'
TWILIO_EMP_URL = 'https://api.greenhouse.io/v1/boards/twilio/offices'
AIRBNB_URL = 'https://careers.airbnb.com/positions/'
AIRBNB_EMP_URL = 'https://careers.airbnb.com/wp-admin/admin-ajax.php?action=fetch_greenhouse_jobs&which-board=airbnb&strip-empty=true'
YEXT_URL = 'https://www.yext.com/careers/open-positions/'
YEXT_EMP_URL = 'https://api.greenhouse.io/v1/boards/yext/embed/departments'
SQUARESPACE_URL = 'https://www.squarespace.com/about/careers'
SQUARESPACE_EMP_URL = 'https://api.greenhouse.io/v1/boards/squarespace/offices/'
HUBSPOT_URL = 'https://www.hubspot.com/jobs/search'
HUBSPOT_EMP_URL = 'https://hs-greenhouse.herokuapp.com/search'
PINTEREST_URL = 'https://careers.pinterest.com/careers'
PINTEREST_EMP_URL = 'https://api.greenhouse.io/v1/boards/pinterest/offices/'
JDPOWER_URL = 'https://www.jdpower.com/business/careers/open-positions'
JDPOWER_EMP_URL = 'https://api.greenhouse.io/v1/boards/jdpower/offices/'
THETRADEDESK_URL = 'https://www.thetradedesk.com/'
THETRADEDESK_EMP_URL = 'https://api.greenhouse.io/v1/boards/thetradedesk/offices/'
HARRYS_URL = 'https://www.harrys.com/en/us/careers'
HARRYS_EMP_URL = 'https://api.greenhouse.io/v1/boards/harrys/offices/'
SUPPORTED = ['Twilio', 'Airbnb', 'Yext', 'SquareSpace', 'HubSpot', 'Pinterest', 'JDPower', 'theTradeDesk', 'Harrys']
|
998,252 | 307e3386e608010e4839b99d3b9b103aa8d42c68 | #!/usr/bin/env python
# encoding: utf-8
# @Time : 2019/12/18 11:36
# @Author : lxx
# @File : process.py
# @Software: PyCharm
print("开始。。。。。")
with open("result.txt","w",encoding="utf-8") as resultFile:
for line in open("replaceAttribute_Value_Concept",encoding="utf-8"):
line=line.strip()
splitWords = line.split("\t")
# print(splitWords)
newAttribute=splitWords[0]
newAttributeValue=splitWords[1]
newConcept=splitWords[2]
tmpAttributeValue=""
for index,singleStr in enumerate(newAttributeValue):
if(index ==0):
tmpAttributeValue = tmpAttributeValue + singleStr + " Bval|"
elif(index==len(newAttributeValue) -1):
tmpAttributeValue = tmpAttributeValue + singleStr + " Eval|"
else:
tmpAttributeValue = tmpAttributeValue + singleStr + " Mval|"
for line1 in open("template.txt", encoding="utf-8"):
line1 = line1.strip()
split = line1.split("|")
newLine = newAttribute +" NN|" + split[1] + "|" +tmpAttributeValue+ split[3] + "|"+ newConcept +" NN|"+ split[5]
resultFile.write(newLine)
resultFile.write("\n")
print("结束。。。。")
|
998,253 | 0ae108a0ee33e68800936bc3daa423c673978623 | import sys
#
#TE = {Pseudo0:[(x,y),(z,l)...],Pseudo1:[(m,x)...]...}
TE_ranges = {}
CHROM,START,STOP = 0,1,2
def isDataLine(line):
"""
Determines in line contains site data
"""
if len(line) > 1:
return line[0] != "#"
return False
def loadTEranges(TE_file_loc):
"""
Load TE ranges from a text file of the format CHROM START STOP
"""
with open(TE_file_loc) as TE_file:
for line in TE_file:
line_col = str.split(line)
#Identify chromosome
TE_ranges.setdefault(line_col[CHROM],[]).append((line_col[START],line_col[STOP]))
TE_file.close()
return
def validRange(line):
"""
Determine if the given site is withen any TE range
"""
line_col = str.split(line)
chrom = line_col[0]
pos = line_col[1]
# any(lower <= postcode <= upper for (lower, upper) in [(1000, 2249), (2555, 2574), ...])
if any(float(low) <= float(pos) <= float(high) for (low,high) in TE_ranges[chrom]):
return False
return True
if __name__ == "__main__":
TE_file_loc,vcf_file_loc = sys.argv[1],sys.argv[2]
loadTEranges(TE_file_loc)
trimmed_vcf = open(vcf_file_loc[:-4] + "_TEremoved.vcf", "w")
removed_sites = open(vcf_file_loc[:-4] + "_TE_Sites.vcf","w")
with open(vcf_file_loc) as vcf_file:
for line in vcf_file:
if isDataLine(line):
if validRange(line):
trimmed_vcf.write(line)
else:
removed_sites.write(line)
else:
#Write header info
trimmed_vcf.write(line)
removed_sites.close()
vcf_file.close()
trimmed_vcf.close()
|
998,254 | 353bdb4ef52be349151d50fa7383f6bdf443f6d6 | from datetime import datetime
from django.db import models
# Create your models here.
class Projeto(models.Model):
"""
classe para modelar banco de dados para me app
"""
nome_projeto = models.CharField(max_length=200)
descricao_projeto = models.TextField()
cliente = models.CharField(max_length=100)
data_conclusao = models.DateTimeField(default=datetime.now, blank=True)
website = models.CharField(max_length=200)
linguagem = models.CharField(max_length=50)
tags_projeto = models.TextField()
detalhes_projeto = models.TextField()
|
998,255 | b294eb7569d6e44860ff9056b9534505699a94ae | /Users/pranavsankhe/anaconda/lib/python3.6/ntpath.py |
998,256 | 7d2e22caa0d8d0397f63ed9b7b426e7a00f99f1d | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import login, authenticate, logout
from django.urls import reverse
from assistant.models import *
from assistant.forms import *
from assistant.unit_conv import *
import json
# Create your views here.
#categories of lists
lists=['produce','alcohol','pantry','dairy','spices','misc']
recipe_cats=['breakfast','dinner','cocktails','ingredients']
def grocery(request):
context={'categories': lists}
if(request.user.is_authenticated):
context['username'] = request.user.username
if(request.method == 'GET'):
return render(request,"assistant/grocery.html", context)
return render(request,"assistant/grocery.html", context)
def menu(request):
pass
def toDo(request):
pass
def extractNum(s):
res = ''
for c in s:
if(c.isnumeric()):
res = res + c
return int(res)
def cookbook(request):
context={}
if(request.user.is_authenticated):
context['username'] = request.user.username
categories={}
for cat in recipe_cats:
recipes_obj=[]
recipes = RecipeItem.objects.filter(category=cat)
for recipe in recipes:
recipe_obj = {
'name': recipe.name,
'shortname': recipe.shortname,
'thumbnail': recipe.shortname}
makeable = True
gluten = False
dairy = False
missing_ingredients = []
planned_ingredients = []
ingredients = []
for ingr in recipe.ingredients.all():
ingr_obj = {'name':ingr.name}
if(len(recipe.ingr_quants.filter(name=ingr.name))>0):
ingrItem = recipe.ingr_quants.get(name=ingr.name)
ingr_obj['quantity'] = ingrItem.quantity
ingr_obj['unit'] = ingrItem.units
else:
ingr_obj['quantity'] = 1
ingr_obj['unit'] = ''
ingredients.append(ingr_obj)
if(ingr.quantity == 0 and ingr.name!=' water'):
if(len(RecipeItem.objects.filter(name=(ingr.name[1:].lower())))>0):
home_ingr = RecipeItem.objects.get(name=(ingr.name[1:].lower()))
for home_ingr_ingr in home_ingr.ingredients.all():
if(home_ingr_ingr.quantity == 0 and home_ingr_ingr.name!=' water'):
missing_ingredients.append(ingr_obj)
makeable = False
break;
elif(home_ingr_ingr.quantity < 1 and home_ingr_ingr.name!=' water'):
planned_ingredients.append(ingr_obj)
makeable = False
break;
else:
missing_ingredients.append(ingr_obj)
makeable = False
elif(ingr.quantity < 1 and ingr.name!=' water'):
if(len(RecipeItem.objects.filter(name=(ingr.name[1:].lower())))>0):
home_ingr = RecipeItem.objects.get(name=(ingr.name[1:].lower()))
for home_ingr_ingr in home_ingr.ingredients.all():
if(home_ingr_ingr.quantity == 0 and home_ingr_ingr.name!=' water'):
missing_ingredients.append(ingr_obj)
makeable = False
break;
elif(home_ingr_ingr.quantity < 1 and home_ingr_ingr.name!=' water'):
planned_ingredients.append(ingr_obj)
makeable = False
break;
else:
planned_ingredients.append(ingr_obj)
makeable = False
if(ingr.gluten):
gluten = True
if(ingr.dairy):
dairy = True
recipe_obj['makeable'] = makeable
recipe_obj['gluten'] = gluten
recipe_obj['dairy'] = dairy
recipe_obj['ingredients'] = ingredients
recipe_obj['missing_ingredients'] = missing_ingredients
recipe_obj['planned_ingredients'] = planned_ingredients
optional_ingredients = []
for ingr in recipe.optional_ingredients.all():
optional_ingredients.append(ingr.name)
recipe_obj['optional_ingredients'] = optional_ingredients
recipes_obj.append(recipe_obj)
categories[cat] = recipes_obj
context['categories'] = categories
if(request.method == 'GET'):
return render(request,"assistant/cookbook.html", context)
return render(request,"assistant/cookbook.html", context)
def edit(request):
context={}
if(request.user.is_authenticated):
context['username'] = request.user.username
context['recipes'] = RecipeItem.objects.all().order_by('name')
context['ingredients'] = GroceryItem.objects.all().order_by('name')
if(request.method == 'GET'):
return render(request,"assistant/edit.html", context)
return render(request,"assistant/edit.html", context)
@csrf_exempt
def swap_list(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
if(('quantity' in request.POST) and ('grocery' in request.POST)):
grocery_name = request.POST['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=request.POST['grocery'])
grocery.quantity = grocery.quantity * -1
grocery.save()
return update_lists(request)
@csrf_exempt
def add_stock(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
body = json.loads(request.body.decode('utf-8'))
for word in body:
word = word.replace(' ','')
if(('quantity' in body) and ('grocery' in body)):
try:
#known grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=grocery_name)
if(grocery.quantity > 0):#in stock list
grocery.quantity += int(body['quantity'])
else:
grocery.quantity = int(body['quantity'])
except:
#new grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem( name=grocery_name,
category='misc',
quantity=extractNum(body['quantity']))
grocery.save()
return update_lists(request)
@csrf_exempt
def add_single_stock(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
body = json.loads(request.body.decode('utf-8'))
for word in body:
word = word.replace(' ','')
if('grocery' in body):
try:
#known grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=grocery_name)
if(grocery.quantity > 0):#in stock list
grocery.quantity += grocery.default_quant
else:
grocery.quantity = grocery.default_quant
except:
#new grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem( name=grocery_name,
category='misc',
quantity=1)
grocery.save()
return update_lists(request)
@csrf_exempt
def remove_stock(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
body = json.loads(request.body.decode('utf-8'))
for word in body:
word = word.replace(' ','')
if(('quantity' in body) and ('grocery' in body)):
try:
#known grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=grocery_name)
quant = int(body['quantity'])
if(grocery.quantity > 0):#in stock list
if(quant <= grocery.quantity):
grocery.quantity -= quant
else:
grocery.quantity = 0
grocery.save()
except:
#grocery not found
pass
return update_lists(request)
@csrf_exempt
def add_grocery(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
body = json.loads(request.body.decode('utf-8'))
print(body)
if(('quantity' in body) and ('grocery' in body)):
try:
#known grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=grocery_name)
if(grocery.quantity < 0):#in grocery list
grocery.quantity -= int(body['quantity'])
else:#remove from stock and add to grocery list
grocery.quantity = -1*int(body['quantity'])
except:
#new grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem( name=grocery_name,
category='misc',
quantity=-1*extractNum(body['quantity']))
grocery.save()
return update_lists(request)
@csrf_exempt
def add_single_grocery(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
body = json.loads(request.body.decode('utf-8'))
print(body)
if('grocery' in body):
try:
#known grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=grocery_name)
if(grocery.quantity < 0):#in grocery list
grocery.quantity -= grocery.default_quant
else:#remove from stock and add to grocery list
grocery.quantity = -1*grocery.default_quant
except:
#new grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem( name=grocery_name,
category='misc',
quantity=-1)
grocery.save()
return update_lists(request)
@csrf_exempt
def remove_grocery(request):
if(request.method == 'GET'):
return render(request,"assistant/grocery.html")
if(('quantity' in request.POST) and ('grocery' in request.POST)):
if(int(request.POST['quantity']) == 0):
grocery = GroceryItem.objects.get(name=request.POST['grocery'])
grocery.quantity = 0
grocery.save()
return update_lists(request)
body = json.loads(request.body.decode('utf-8'))
if(('quantity' in body) and ('grocery' in body)):
try:
#known grocery
grocery_name = body['grocery'].replace(' - ',' ')
grocery = GroceryItem.objects.get(name=grocery_name)
quant = int(body['quantity'])
if(grocery.quantity < 0):#in grocery list
if(quant <= abs(grocery.quantity)):
grocery.quantity += quant
else:
grocery.quantity = 0
grocery.save()
except:
#grocery not found
pass
return update_lists(request)
def update_lists(request):
json_response = []
list_obj = {}
for list_type in lists:
list_type_obj={}
for grocery in GroceryItem.objects.filter(category=list_type):
if(grocery.quantity != 0):
grocery_obj={ 'name':grocery.name,
'quantity':grocery.quantity,
'unit':grocery.units,
}
list_type_obj[grocery.name] = grocery_obj
if(len(list_type_obj) > 0):
list_obj[list_type] = list_type_obj
json_response.append(list_obj)
response_json = json.dumps(json_response)
response = HttpResponse(response_json, content_type='application/json')
return response
def admin_login(request):
context={}
if(request.user.is_authenticated):
context['username'] = request.user.username
if(request.method == 'GET'):
context['form'] = LoginForm()
return render(request,'assistant/login.html', context)
form = LoginForm(request.POST)
context['form'] = form
return render(request,'assistant/login.html', context)
def login_action(request):
context={}
if(request.user.is_authenticated):
context['username'] = request.user.username
if(request.method == 'GET'):
context['form'] = LoginForm()
return render(request,'assistant/login.html',context)
form = LoginForm(request.POST)
context['form'] = form
if not form.is_valid():
return render(request,'assistant/login.html',context)
new_user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
login(request,new_user)
return redirect(reverse('cookbook'))
def select_recipe(request):
json_response = []
list_obj = {}
if('recipe' in request.POST):
recipe = RecipeItem.objects.get(shortname=request.POST['recipe'])
list_obj['name'] = recipe.name
list_obj['shortname'] = recipe.shortname
list_obj['category'] = recipe.category
ingredients = []
for ingredient in recipe.ingredients.all():
ingrObj = {'name':ingredient.name}
if(len(recipe.ingr_quants.filter(name=ingredient.name))>0):
ingrItem = recipe.ingr_quants.get(name=ingredient.name)
ingrObj['quantity'] = ingrItem.quantity
ingrObj['unit'] = ingrItem.units
else:
ingrObj['quantity'] = 1
ingrObj['unit'] = ''
ingredients.append(ingrObj)
list_obj['ingredients'] = ingredients
options = []
for option in recipe.optional_ingredients.all():
options.append(option.name)
list_obj['options'] = options
json_response.append(list_obj)
response_json = json.dumps(json_response)
response = HttpResponse(response_json, content_type='application/json')
return response
def select_ingredient(request):
json_response = []
list_obj = {}
if('ingredient' in request.POST):
ingredient = GroceryItem.objects.get(name=request.POST['ingredient'])
list_obj['name'] = ingredient.name
list_obj['category'] = ingredient.category
list_obj['gluten'] = ingredient.gluten
list_obj['dairy'] = ingredient.dairy
list_obj['default_quant'] = ingredient.default_quant
list_obj['units'] = ingredient.units
json_response.append(list_obj)
response_json = json.dumps(json_response)
response = HttpResponse(response_json, content_type='application/json')
return response
def edit_recipe(request):
context = {}
if(request.method == 'GET'):
return render(request,"assistant/edit.html", context)
if('recipe_shortname' in request.POST):
shortname = request.POST['recipe_shortname']
else:
return render(request,"assistant/edit.html", context)
if('recipe_name' in request.POST):
name = request.POST['recipe_name']
if('recipe_cat' in request.POST):
category = request.POST['recipe_cat'].lower()
if(len(RecipeItem.objects.filter(shortname=shortname)) > 0):
recipe = RecipeItem.objects.get(shortname=shortname)
if(recipe.name != name):
recipe.name = name
if(recipe.category != category):
recipe.category = category
else:
recipe = RecipeItem(name=name, shortname=shortname, category=category)
recipe.save()
curIngr={}
curOpt={}
for ingredient in recipe.ingredients.all():
curIngr[ingredient.name] = False
for option in recipe.optional_ingredients.all():
curOpt[option.name] = False
#add ingredients found in post
if('num_ingredients' in request.POST):
num_ingredients = int(request.POST['num_ingredients'])
for i in range(num_ingredients):
print('ingredient_input_'+str(i))
if('ingredient_input_'+str(i) in request.POST):
ingredient_name = request.POST['ingredient_input_'+str(i)]
if(ingredient_name[0] != ' '):
ingredient_name = ' '+ingredient_name
ingredient_name = ingredient_name.replace('+',' ').lower()
if(ingredient_name in curIngr):
curIngr[ingredient_name] = True
if(len(GroceryItem.objects.filter(name=ingredient_name)) > 0):
ingredient = GroceryItem.objects.get(name=ingredient_name)
else:
ingredient = GroceryItem(name=ingredient_name, quantity=0, category='misc')
ingredient.save()
if(ingredient not in recipe.ingredients.all()):
recipe.ingredients.add(ingredient)
if('ingredient_quant_input_'+str(i) in request.POST):
quant = request.POST['ingredient_quant_input_'+str(i)]
if('ingredient_unit_input_'+str(i) in request.POST):
unit = request.POST['ingredient_unit_input_'+str(i)]
if(len(recipe.ingr_quants.filter(name=ingredient_name))>0):
ingritem = recipe.ingr_quants.get(name=ingredient_name)
if(ingritem.quantity != quant):
ingritem.quantity = quant
if(ingritem.units != unit):
ingritem.units = unit
ingritem.save()
else:
ingritem = IngredientItem(name=ingredient_name,quantity=quant,units=unit)
ingritem.save()
recipe.ingr_quants.add(ingritem)
if('num_options' in request.POST):
num_options = int(request.POST['num_options'])
for i in range(num_options):
if('option_input_'+str(i) in request.POST):
option_name = request.POST['option_input_'+str(i)]
if(option_name[0] != ' '):
option_name = ' '+option_name
option_name = option_name.replace('+',' ').lower()
if(option_name in curOpt):
curOpt[option_name] = True
if(len(GroceryItem.objects.filter(name=option_name)) > 0):
option = GroceryItem.objects.get(name=option_name)
else:
option = GroceryItem(name=option_name, quantity=0, category='misc')
option.save()
if(option not in recipe.optional_ingredients.all()):
recipe.optional_ingredients.add(option)
#remove ingredients not found in post
for ingrName,included in curIngr.items():
ingredient = GroceryItem.objects.get(name=ingrName)
if(not included):
recipe.ingredients.remove(ingredient)
for ingrName,included in curOpt.items():
ingredient = GroceryItem.objects.get(name=ingrName)
if(not included):
recipe.optional_ingredients.remove(ingredient)
recipe.save()
return edit(request)
def edit_ingredient(request):
context = {}
if(request.method == 'GET'):
return render(request,"assistant/edit.html", context)
if('ingredient_name' in request.POST):
name = ' '+request.POST['ingredient_name'].lower()
if('ingr_cat' in request.POST):
category = request.POST['ingr_cat'].lower()
gluten = False
if('ingredient_gluten' in request.POST):
if(request.POST['ingredient_gluten'] == 'on'):
gluten = True
dairy = False
if('ingredient_dairy' in request.POST):
if(request.POST['ingredient_dairy'] == 'on'):
dairy = True
if('ingredient_default_quant' in request.POST):
default_quant = request.POST['ingredient_default_quant']
if('ingredient_units' in request.POST):
units = request.POST['ingredient_units']
if(len(GroceryItem.objects.filter(name=name)) > 0):
ingr = GroceryItem.objects.get(name=name)
if(ingr.category != category):
ingr.category = category
if(ingr.gluten != gluten):
ingr.gluten = gluten
if(ingr.dairy != dairy):
ingr.dairy = dairy
if(ingr.default_quant != default_quant):
ingr.default_quant = default_quant
if(ingr.units != units):
ingr.units = units
else:
ingr = GroceryItem(name=name,
category=category,
gluten=gluten,
dairy=dairy,
default_quant=default_quant,
units=units)
ingr.save()
return edit(request)
@csrf_exempt
def buy_recipe(request):
context = {}
if(request.method == 'GET'):
return render(request,"assistant/cookbook.html", context)
if('recipe' in request.POST):
recipe = RecipeItem.objects.get(shortname=request.POST['recipe'])
for ingr in recipe.ingredients.all():
if(ingr.quantity < 1 and ingr.name!=' water'):
delta_quant = 1
delta_unit = ''
if(len(recipe.ingr_quants.filter(name=ingr.name))>0):
ingrItem = recipe.ingr_quants.get(name=ingr.name)
delta_quant = ingrItem.quantity
delta_unit = ingrItem.units
if(len(RecipeItem.objects.filter(name=(ingr.name[1:].lower())))>0):
home_ingr = RecipeItem.objects.get(name=(ingr.name[1:].lower()))
for home_ingr_ingr in home_ingr.ingredients.all():
if(home_ingr_ingr.quantity == 0 and home_ingr_ingr.name!=' water'):
ingr.quantity = ingr.quantity - convert(delta_quant,delta_unit,ingr.units)
ingr.save()
break;
else:
ingr.quantity = ingr.quantity - convert(delta_quant,delta_unit,ingr.units)
ingr.save()
for i in range(len(ingr.optional_ingredients.all())):
if('optional'+str(i) in request.POST):
ingr = recipe.optional_ingredients.get(name=(request.POST['optional'+str(i)].replace('_',' ')))
if(ingr.quantity == 0 and ingr.name != 'water'):
if(len(RecipeItem.objects.filter(name=(ingr.name[1:].lower())))>0):
home_ingr = RecipeItem.objects.get(name=(ingr.name[1:].lower()))
for home_ingr_ingr in home_ingr.ingredients.all():
if(home_ingr_ingr.quantity == 0 and home_ingr_ingr.name!=' water'):
ingr.quantity = ingr.default_quant*-1
ingr.save()
break;
else:
ingr.quantity = ingr.default_quant*-1
ingr.save()
response_json = json.dumps([])
response = HttpResponse(response_json, content_type='application/json')
return response
|
998,257 | cf3de74403ee08a3171d84d1616f59b3ec313622 | import math
class Vector3:
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def to_string(self):
return "x: {0}, y:{1}, z:{2}".format(self.x, self.y, self.z)
def clone(self):
return Vector3(self.x, self.y, self.z)
def set_x(self, x):
self.x = x
def get_x(self):
return self.x
def set_y(self, y):
self.y = y
def get_y(self):
return self.y
def set_z(self, z):
self.z = z
def get_z(self):
return self.z
def set_components_from_array(self, pointList):
self.x = pointList[0]
self.y = pointList[1]
self.z = pointList[2]
def get_components(self):
return self.x, self.y, self.z
def add(self, v):
self.x += v.x
self.y += v.y
self.z += v.z
return self
def sub(self, v):
self.x -= v.x
self.y -= v.y
self.z -= v.z
return self
def multiply(self, v):
self.x *= v.x
self.y *= v.y
self.z *= v.z
return self
def divide(self, v):
self.x /= v.x
self.y /= v.y
self.z /= v.z
return self
def add_scaler(self, s):
self.x += s
self.y += s
self.z += s
return self
def sub_scaler(self, s):
self.x -= s
self.y -= s
self.z -= s
return self
def add_vectors(self, a, b):
self.x = a.x + b.x
self.y = a.y + b.y
self.z = a.z + b.z
return self
def sub_vectors(self, a, b):
self.x = a.x - b.x
self.y = a.y - b.y
self.z = a.z - b.z
return self
def add_scaled_vector(self, v, s):
self.x += v.x * s
self.y += v.y * s
self.z += v.z * s
return self
def multiply_scalar(self, scalar):
if math.isfinite(scalar):
self.x *= scalar
self.y *= scalar
self.z *= scalar
else:
self.x = 0
self.y = 0
self.z = 0
return self
def divide_scalar(self, scalar):
return self.multiply_scalar(1 / scalar)
def dot(self, v):
return self.x * v.x + self.y * v.y + self.z * v.z
def cross(self, v):
x = self.x
y = self.y
z = self.z
self.x = y * v.z - z * v.y
self.y = z * v.x - x * v.z
self.z = x * v.y - y * v.x
return self
def length_sq(self):
return self.x * self.x + self.y * self.y + self.z * self.z
def length(self):
return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
def normalize(self):
return self.divide_scalar(self.length())
def set_length(self, length):
return self.multiply_scalar(length / self.length())
def lerp(self, v, alpha):
self.x += (v.x - self.x) * alpha
self.y += (v.y - self.y) * alpha
self.z += (v.z - self.z) * alpha
return self
def angle_to(self, v):
theta = self.dot( v ) / math.sqrt( self.length_sq() * v.length_sq() )
# // clamp, to handle numerical problems
return math.acos( math.clamp( theta, - 1, 1 ) )
def distance_to(self, v):
return math.sqrt( self.distance_to_squared( v ) )
def distance_to_squared(self, v):
dx = self.x - v.x
dy = self.y - v.y
dz = self.z - v.z
return dx * dx + dy * dy + dz * dz
def to_serializable(self):
self
@staticmethod
def from_deserialized(deserialized_vector_3):
return Vector3(
deserialized_vector_3['x'],
deserialized_vector_3['y'],
deserialized_vector_3['z'],
)
|
998,258 | 7a430fe7a8985653def8a3481429c06cd89b8488 | import itertools
L = [int(x) for x in input().split()]
C = [int(x) for x in input().split()]
L.sort()
min_length = float('inf')
for l in itertools.permutations(L):
length = (l[0]*2+l[1]*2)*C[0] + (l[1]*2+l[2]*2)*C[1] + (l[2]*2+l[0]*2)*C[2]
min_length = min(min_length, length)
print(min_length)
|
998,259 | c07552b19e1a8889a535948b5ed6c0dcb5967621 | from problem00 import print1DList
def find_greatest_student(students_information_list):
"""
find person who has highest GPA and return tuple(0:the person's name and 1:it's GPA)
:param students_information_list: array of tuples
:return: tuple(greatest_student_name, highest_gpa)
"""
highest_gpa = -1
greatest_student_name = ''
for students_information in students_information_list:
gpa = get_gpa(get_student_scores(students_information))
if gpa > highest_gpa:
greatest_student_name = get_student_name(students_information)
highest_gpa = gpa
return greatest_student_name, highest_gpa
def get_student_name(student_information):
"""
return the first element of parameter
:param student_information:
:return: string: student's name
"""
return student_information[0]
def get_student_scores(student_information):
"""
return the index [1],[2],[3],[4],[5] elements of parameter
:param student_information:
:return: array of integers: scores
"""
return [
student_information[1]
, student_information[2]
, student_information[3]
, student_information[4]
, student_information[5]
]
def get_gpa(scores):
"""
calculate GPA from array of score
:param scores: array of scores
:return: number as GPA (0 or 1 or 2 or 3 or 4)
"""
subjects_gpas = []
for score in scores:
subjects_gpas.append(calculate_gpa(score))
gpa = get_average(subjects_gpas)
return gpa
def calculate_gpa(score):
"""
calculate GPA from score
:param score: number between 0 and 100
:return: number as GPA (0 or 1 or 2 or 3 or 4)
"""
if score < 60:
return 0
elif 60 <= score < 70:
return 1
elif 70 <= score < 80:
return 2
elif 80 <= score < 90:
return 3
elif score >= 90:
return 4
def get_average(array):
"""
get the average of array elements
:param array: array
:return: the average of array elements
"""
total = sum(array)
count = len(array)
average = total / count
return average
def main():
# receive ('Ali', 4.0)
students_info_01 = [
('Marcus', 95, 90, 85, 80, 75)
, ('Ali', 100, 100, 100, 100, 100)
, ('Monkey', 40, 40, 40, 40, 40)
]
print1DList(find_greatest_student(students_info_01))
print()
# receive ('Marcus', 3.0) (Marcus's GPA and Ali's GPA are same)
students_info_02 = [
('Marcus', 80, 80, 80, 80, 80)
, ('Ali', 90, 70, 90, 70, 80)
, ('Monkey', 59, 60, 69, 70, 79)
, ('Mike', 80, 89, 90, 50, 50)
]
print1DList(find_greatest_student(students_info_02))
print()
# receive ('Marcus', 0.0)
students_info_03 = [
('Marcus', 50, 50, 40, 30, 40)
, ('Ali', 40, 50, 59, 50, 50)
]
print1DList(find_greatest_student(students_info_03))
main()
|
998,260 | 7edb4be9fe6031fe5e73e1652ed97f55581199a0 | """
Good morning! Here's your coding interview problem for today.
This problem was recently asked by Google.
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?
"""
def add_up_to(array, k):
"""
Function that return whether two numbers of an array add up to a given int k
Args :
array (list): The array to be processed
k (int): The integer that that can be the sum of two numbers of the array
Returns :
add_up (bool): The boolean value that is True when two numbers of the array add up to k
"""
add_up = False
for i in range(len(array)):
for j in range(i, len(array)):
if array[i] + array[j] == k:
return not add_up
return add_up
def add_up_to_bonus(array, k):
"""
Function that return whether two numbers of an array add up to a given int k
Same as the previous one but does the process in one pass
Args :
array (list): The array to be processed
k (int): The integer that can be the sum of two numbers of the array
Returns :
add_up (bool): The boolean value that is True when two numbers of the array add up to k
"""
add_up = False
for i in range(len(array)):
if (k - array[i]) in array[(i+1):]:
return not add_up
return add_up
if __name__ == '__main__':
print(add_up_to([10, 15, 3, 7], 25))
print(add_up_to_bonus([10, 15, 3, 7], 25))
|
998,261 | 4c93e51ad4488e6c54632bf7f2628c5bb2765c26 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import json
import pdb
from ...utils import visdom_render
from ...utils import transformations
from ...utils import visutil
from ...utils import mesh
from ...utils import cub_parse
from ...utils.visualizer import Visualizer
from ...nnutils import geom_utils
import pymesh
from ...utils import bird_vis
from ...nnutils.nmr import NeuralRenderer
from ...utils import render_utils
from ...nnutils import icn_net, geom_utils
from ...nnutils import loss_utils as loss_utils
from ...data import cub as cub_data
from ...data import p3d as p3d_data
from ...nnutils import test_utils
"""
Script for testing on CUB.
Sample usage: python -m cmr.benchmark.csp_keypoint --split val --name
<model_name> --num_train_epoch <model_epoch>
"""
from . import pck_eval
from absl import app
from absl import flags
import os
import os.path as osp
import numpy as np
import torch
import torchvision
import scipy.io as sio
cm = plt.get_cmap('jet')
# from matplotlib import set_cmap
flags.DEFINE_boolean('visualize', False, 'if true visualizes things')
flags.DEFINE_integer('seed', 0, 'seed for randomness')
flags.DEFINE_boolean('pose_dump', True, 'scale_trans_predictions dumped to a file')
flags.DEFINE_boolean('mask_dump', True, 'dump seg mask to file')
flags.DEFINE_string('quat_predictions_path', None, 'Load pose annotations')
flags.DEFINE_string('mask_predictions_path', None, 'Load mask annotations')
flags.DEFINE_boolean('robust', False, 'evaluate using a roboust measure')
flags.DEFINE_string('dataset', 'cub', 'Evaulate on birds')
opts = flags.FLAGS
# color_map = cm.jet(0)
kp_eval_thresholds = [0.05, 0.1, 0.2]
class CSPTester(test_utils.Tester):
def define_model(self,):
opts = self.opts
self.img_size = opts.img_size
self.model = icn_net.ICPNet(opts)
self.load_network(self.model, 'pred', self.opts.num_train_epoch)
self.mask_preds = None
if opts.mask_predictions_path is not None:
print('populating mask for birds')
self.mask_preds = sio.loadmat(opts.mask_predictions_path)
self.model.cuda()
self.model.eval()
self.upsample_img_size = (
(opts.img_size // 64) * (2**6), (opts.img_size // 64) * (2**6))
self.camera_solver = geom_utils.CameraSolver(self.Tensor, self.device)
self.offset_z = 5.0
self.uv2points = cub_parse.UVTo3D(self.mean_shape)
self.model_obj = pymesh.form_mesh(self.mean_shape['verts'].data.cpu(
).numpy(), self.mean_shape['faces'].data.cpu().numpy())
self.model_obj_path = osp.join(
self.opts.cachedir, 'cub', 'model', 'mean_bird.obj')
self.grid = cub_parse.get_sample_grid(self.upsample_img_size).repeat(
opts.batch_size * 2, 1, 1, 1).to(self.device)
self.init_render()
self.kp_names = self.dl_img1.dataset.sdset.kp_names
self.renderer_mask = NeuralRenderer(opts.img_size)
self.hypo_mask_renderers = [NeuralRenderer(opts.img_size) for _ in range(opts.num_hypo_cams)]
self.renderer_depth = NeuralRenderer(opts.img_size)
self.hypo_depth_renderers = [NeuralRenderer(opts.img_size) for _ in range(opts.num_hypo_cams)]
# self.render_mean_bird_with_uv()
if opts.pose_dump:
self.scale_trans_preds = {} # iter, pair_id, pose_1, pose_2
self.quat_preds = {} # iter, pair_id, pose_1, pose_2
if opts.mask_dump:
self.mask_preds = {}
return
def init_render(self, ):
opts = self.opts
faces_np = self.mean_shape['faces'].data.cpu().numpy()
verts_np = self.mean_shape['sphere_verts'].data.cpu().numpy()
self.keypoint_cmap = [cm(i * 17) for i in range(15)]
vis_rend = bird_vis.VisRenderer(opts.img_size, faces_np)
uv_sampler = mesh.compute_uvsampler(
verts_np, faces_np, tex_size=opts.tex_size)
uv_sampler = torch.from_numpy(uv_sampler).float().cuda()
self.uv_sampler = uv_sampler.view(-1, len(faces_np),
opts.tex_size * opts.tex_size, 2)
self.verts_obj = self.mean_shape['verts']
self.visdom_renderer = visdom_render.VisdomRenderer(
vis_rend, self.verts_obj, self.uv_sampler, self.offset_z,
self.mean_shape_np, self.model_obj_path, self.keypoint_cmap, self.opts)
return
def init_dataset(self,):
opts = self.opts
if opts.dataset == 'cub':
print('Loading the Birds dataset')
dataloader_fn = cub_data.cub_test_pair_dataloader
elif opts.dataset == 'p3d':
print('Loading the p3d dataset {}'.format(opts.p3d_class))
dataloader_fn = p3d_data.p3d_test_pair_dataloader
else:
assert False, 'Incorrect dataset type, {}'.format(opts.dataset)
self.dl_img1 = dataloader_fn(opts, 1)
self.dl_img2 = dataloader_fn(opts, 2)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if opts.dataset == 'p3d':
if opts.p3d_class == 'car':
mpath = osp.join(opts.p3d_cache_dir, '../shapenet/', 'car', 'shape.mat')
else:
mpath = osp.join(opts.p3d_cache_dir, '../shapenet/', opts.p3d_class, 'shape.mat')
elif opts.dataset == 'cub':
mpath = osp.join(opts.cub_cache_dir, '../shapenet/', 'bird', 'shape.mat')
print('Loading Mean shape from {}'.format(mpath))
self.mean_shape = cub_parse.load_mean_shape(mpath, self.device)
self.mean_shape_np = sio.loadmat(mpath)
def set_input(self, batch):
opts = self.opts
batch = cub_parse.collate_pair_batch(batch)
input_imgs = batch['img'].type(self.Tensor)
mask = batch['mask'].type(self.Tensor)
for b in range(input_imgs.size(0)):
input_imgs[b] = self.resnet_transform(input_imgs[b])
self.inds = [k.item() for k in batch['inds']]
self.input_img_tensor = input_imgs.to(self.device)
self.mask = mask.to(self.device)
self.codes_gt = {}
self.kp_uv = batch['kp_uv'].type(self.Tensor).to(self.device)
self.codes_gt['kp_uv'] = self.kp_uv
self.codes_gt['kp'] = batch['kp'].type(self.Tensor).to(self.device)
cam_pose = batch['sfm_pose'].type(self.Tensor)
self.cam_pose = cam_pose.to(self.device)
self.codes_gt['cam'] = self.cam_pose
kps_vis = self.codes_gt['kp'][..., 2] > 0
kps_ind = (self.codes_gt['kp'] * 0.5 + 0.5) * \
self.input_img_tensor.size(-1)
self.codes_gt['kps_vis'] = kps_vis
self.codes_gt['kps_ind'] = kps_ind
return
def predict(self,):
opts = self.opts
feed_dict = {}
feed_dict['img'] = self.input_img_tensor
feed_dict['mask'] = self.mask
codes_pred = self.model.forward(feed_dict)
b_size = len(self.mask)
ratio = self.upsample_img_size[1] * 1.0 / self.upsample_img_size[0]
mask = torch.nn.functional.grid_sample(
self.mask.unsqueeze(1), self.grid[0:b_size])
img = torch.nn.functional.grid_sample(
self.input_img_tensor, self.grid[0:b_size])
kps_vis = self.codes_gt['kps_vis']
kps_uv = 0 * self.codes_gt['kp'][:, :, 0:2]
kps_ind = self.codes_gt['kps_ind'].long()
kps_ind_modified = 0 * kps_ind
uv_maps = codes_pred['uv_map']
for bx in range(len(kps_vis)):
for kx in range(len(kps_vis[bx])):
rx = kps_ind[bx][kx][1]
cx = kps_ind[bx][kx][0]
kps_uv[bx, kx] = uv_maps[bx, rx, cx]
self.codes_pred = codes_pred
if self.mask_preds is not None and not opts.mask_dump:
self.codes_pred['seg_mask'] = self.populate_mask_from_file().squeeze()
else:
self.dump_predictions()
return
def dump_predictions(self,):
opts = self.opts
iter_index = "{:05}".format(self.iter_index)
if opts.pose_dump:
codes_pred = self.codes_pred
camera = codes_pred['cam'].data.cpu().numpy()
pose1 = {'scale_p1': camera[0, 0], 'trans_p1': camera[0, 1:3]}
pose2 = {'scale_p2': camera[1, 0], 'trans_p2': camera[1, 1:3]}
pose = pose1
pose.update(pose2)
pose['ind1'] = self.inds[0]
pose['ind2'] = self.inds[1]
self.scale_trans_preds[iter_index] = pose
pose1 = {'quat_p1': camera[0, 3:7]}
pose2 = {'quat_p2': camera[1, 3:7]}
pose = pose1
pose.update(pose2)
self.quat_preds[iter_index] = pose
if opts.mask_dump:
mask_np = self.codes_pred['seg_mask'].data.cpu().numpy()
mask = {}
mask['mask_1'] = mask_np[0]
mask['mask_2'] = mask_np[1]
self.mask_preds[iter_index] = mask
def populate_pose_from_file(self,):
iter_index = "{:05}".format(self.iter_index)
st = self.scale_trans_preds[iter_index]
quat = self.quat_preds[iter_index]
p1_s = np.array([st['scale_p1']])
p2_s = np.array([st['scale_p2']])
p1_t = st['trans_p1']
p2_t = st['trans_p2']
p1_q = quat['quat_p1'][0, 0][0]
p2_q = quat['quat_p2'][0, 0][0]
camera1 = np.concatenate([p1_s, p1_t, p1_q], axis=0)
camera2 = np.concatenate([p2_s, p2_t, p2_q], axis=0)
camera = np.stack([camera1, camera2], axis=0)
return torch.from_numpy(camera.copy()).float().type(self.Tensor)
def populate_mask_from_file(self,):
iter_index = "{:05}".format(self.iter_index)
masks = self.mask_preds[iter_index]
mask1 = masks['mask_1'][0, 0]
mask2 = masks['mask_2'][0, 0]
mask = np.stack([mask1, mask2])
return torch.from_numpy(mask).float().type(self.Tensor)
def find_nearest_point_on_mask(self, mask, x, y):
img_H = mask.size(0)
img_W = mask.size(1)
non_zero_inds = torch.nonzero(mask)
distances = (non_zero_inds[:, 0] - y)**2 + (non_zero_inds[:, 1] - x) ** 2
min_dist, min_index = torch.min(distances, dim=0)
min_index = min_index.item()
return non_zero_inds[min_index][1].item(), non_zero_inds[min_index][0].item()
def map_kp_img1_to_img2(self, vis_inds, kps1, kps2, uv_map1, uv_map2, mask1, mask2):
kp_mask = torch.zeros([len(kps1)]).cuda()
kp_mask[vis_inds] = 1
kps1 = kps1.long()
kps1_vis = kps1[:, 2] > 200
img_H = uv_map2.size(0)
img_W = uv_map2.size(1)
kps1_uv = uv_map1[kps1[:, 1], kps1[:, 0], :]
kps1_3d = geom_utils.project_uv_to_3d(self.uv2points, kps1_uv[None, None, :, :])
uv_points3d = geom_utils.project_uv_to_3d(self.uv2points, uv_map2[None, :, :, :])
# kps1_3d = self.uv2points.forward()
# uv_map2_3d = self.uv2points.forward()
distances3d = torch.sum((kps1_3d.view(-1, 1, 3) - uv_points3d.view(1, -1, 3))**2, -1).sqrt()
distances3d = distances3d + (1 - mask2.view(1, -1)) * 1000
distances = distances3d
min_dist, min_indices = torch.min(distances.view(len(kps1), -1), dim=1)
min_dist = min_dist + (1 - kps1_vis).float() * 1000
transfer_kps = torch.stack(
[min_indices % img_W, min_indices // img_W], dim=1)
kp_transfer_error = torch.norm((transfer_kps.float() - kps2[:, 0:2]), dim=1)
return transfer_kps, torch.stack([kp_transfer_error, kp_mask, min_dist], dim=1)
def evaluate(self,):
# Collect keypoints that are visible in both the images. Take keypoints
# from one image --> Keypoints in second image.
common_kp_indices = torch.nonzero(
self.codes_gt['kp'][0, :, 2] * self.codes_gt['kp'][1, :, 2] > 0.5)
kps_ind = self.codes_gt['kps_ind']
kps = self.codes_gt['kp'] # -1 to 1
uv_map = self.codes_pred['uv_map']
self.codes_pred['common_kps'] = common_kp_indices
mask = (self.codes_pred['seg_mask'] > 0.5).float()
transfer_kps12, error_kps12 = self.map_kp_img1_to_img2(
common_kp_indices, kps_ind[0], kps_ind[1], uv_map[0], uv_map[1], mask[0], mask[1])
transfer_kps21, error_kps21 = self.map_kp_img1_to_img2(
common_kp_indices, kps_ind[1], kps_ind[0], uv_map[1], uv_map[0], mask[1], mask[0])
kps1 = visutil.torch2numpy(kps_ind[0])
kps2 = visutil.torch2numpy(kps_ind[1])
self.codes_pred['tfs_12'] = transfer_kps12
self.codes_pred['tfs_21'] = transfer_kps21
return visutil.torch2numpy(transfer_kps12), visutil.torch2numpy(error_kps12), visutil.torch2numpy(transfer_kps21), visutil.torch2numpy(error_kps21), kps1, kps2
def get_current_visuals(self,):
visuals = self.visuals_to_save(self.total_steps, count=1)[0]
visuals.pop('ind')
return visuals
def visuals_to_save(self, total_steps):
visdom_renderer = self.visdom_renderer
opts = self.opts
batch_visuals = []
mask = self.codes_gt['mask']
img = self.codes_gt['img']
uv_map = self.codes_pred['uv_map']
results_dir = osp.join(opts.result_dir, "{}".format(
opts.split), "{}".format(total_steps))
if not osp.exists(results_dir):
os.makedirs(results_dir)
if opts.use_gt_cam:
camera = self.codes_gt['cam']
else:
camera = self.codes_pred['cam']
for b in range(len(img)):
visuals = {}
visuals['z_img'] = visutil.tensor2im(visutil.undo_resnet_preprocess(
img.data[b, None, :, :, :]))
# pdb.set_trace()
visuals['img_kp'] = bird_vis.draw_keypoint_on_image(visuals['z_img'], self.codes_gt['kps_ind'][
b], self.codes_gt['kps_vis'][b], self.keypoint_cmap)
visuals['z_mask'] = visutil.tensor2im(
mask.data.repeat(1, 3, 1, 1)[b, None, :, :, :])
visuals['uv_x'], visuals['uv_y'] = render_utils.render_uvmap(
mask[b], uv_map[b].data.cpu())
# visuals['model'] =
# (self.render_model_using_cam(self.codes_pred['cam'][b])*255).astype(np.uint8)
visuals['texture_copy'] = bird_vis.copy_texture_from_img(
mask[b], img[b], self.codes_pred['xy_map'][b])
texture_vps = visdom_renderer.render_model_using_nmr(uv_map.data[b], img.data[b], mask.data[b],
camera[b], upsample_texture=True)
visuals.update(texture_vps)
# texture_uv = visdom_renderer.render_model_uv_using_nmr(
# uv_map.data[b], mask.data[b], camera[b])
# visuals.update(texture_uv)
visuals['ind'] = "{:04}".format(self.inds[b])
texture_kp = visdom_renderer.render_kps_heatmap(uv_map.data[b], self.codes_gt['kps_ind'][b], self.codes_gt[
'kps_vis'][b], camera[b])
visuals.update(texture_kp)
texture_gt_kp = visdom_renderer.render_gt_kps_heatmap(
self.codes_gt['kp_uv'][b], camera[b])
# visuals.update(texture_gt_kp)
uv_contour = visdom_render.render_UV_contour(visuals['z_img'], uv_map.data[b].cpu(), mask.data[b].cpu())
visuals['contour'] = uv_contour
if opts.pred_xy_cycle:
texture_cycle = visdom_renderer.render_cycle_images(
self.codes_pred['cycle_xy_map_mask'][b],
self.codes_pred['cycle_xy_map'][b], img.data[b],
mask.data[b], camera[b])
visuals.update(texture_cycle)
if opts.multiple_cam_hypo:
vis_cam_hypotheses = visdom_renderer.render_all_hypotheses(self.codes_pred['cam_hypotheses'][b],
self.codes_pred['cam_probs'][b],
self.codes_pred['cam_sample_inds'][b].item(),
self.codes_gt['cam'][b],
[self.loss_factors['per_hypo_loss'][b]])
visuals.update(vis_cam_hypotheses)
steal_visuals = self.steal_colors()
visuals.update(steal_visuals)
# steal_visuals_cyc = self.steal_colors_cyc()
# visuals.update(steal_visuals_cyc)
batch_visuals.append(visuals)
bird_vis.save_obj_with_texture('{:04}'.format(self.inds[b]), results_dir, visuals[
'texture_img'], self.mean_shape_np)
# bird_vis.save_obj_with_texture('{:04}'.format(self.inds[b]),
# results_dir, visuals['texture_img'], self.mean_shape_np)
## transfer key point results:
mask = self.codes_gt['mask']
img = self.codes_gt['img']
kps_ind = self.codes_gt['kps_ind']
codes_pred =self.codes_pred
codes_gt = self.codes_gt
visuals_tfs = bird_vis.render_transfer_kps_imgs(self.keypoint_cmap, batch_visuals[0]['z_img'], batch_visuals[1]['z_img'], kps_ind[0], kps_ind[1],
self.codes_pred['tfs_12'], self.codes_pred['tfs_21'], self.codes_pred['common_kps'] )
batch_visuals[0].update(visuals_tfs)
batch_visuals[1].update(visuals_tfs)
return batch_visuals
def test(self,):
opts = self.opts
bench_stats_m1 = {'kps1': [], 'kps2': [], 'transfer': [], 'kps_err': [], 'pair': [], }
bench_stats_m2 = {'transfer': [], 'kps_err': [], 'pair': [], }
n_iter = opts.max_eval_iter if opts.max_eval_iter > 0 else len(
self.dl_img1)
result_path = osp.join(
opts.results_dir, 'results_{}.mat'.format(n_iter))
print('Writing to %s' % result_path)
self.visualizer = Visualizer(opts)
visualizer = self.visualizer
bench_stats = {}
self.iter_index = None
if not osp.exists(result_path) or opts.force_run:
from itertools import izip
for i, batch in enumerate(izip(self.dl_img1, self.dl_img2)):
self.iter_index = i
if i % 100 == 0:
print('{}/{} evaluation iterations.'.format(i, n_iter))
if opts.max_eval_iter > 0 and (i >= opts.max_eval_iter):
break
self.set_input(batch)
self.predict()
transfer_kps12, error_kps12, transfer_kps21, error_kps21, kps1, kps2 = self.evaluate()
if opts.visualize and (i % opts.visuals_freq == 0):
visualizer.save_current_results(i, self.visuals_to_save(i))
bench_stats_m1['transfer'].append(transfer_kps12)
bench_stats_m1['kps_err'].append(error_kps12)
bench_stats_m1['kps1'].append(kps1)
bench_stats_m1['kps2'].append(kps2)
bench_stats_m1['pair'].append(
(self.inds[0], self.inds[1]))
bench_stats_m1['transfer'].append(transfer_kps21)
bench_stats_m1['kps_err'].append(error_kps21)
bench_stats_m1['kps1'].append(kps2)
bench_stats_m1['kps2'].append(kps1)
bench_stats_m1['pair'].append(
(self.inds[1], self.inds[0]))
bench_stats_m1['kps1'] = np.stack(bench_stats_m1['kps1'])
bench_stats_m1['kps2'] = np.stack(bench_stats_m1['kps2'])
bench_stats_m1['transfer'] = np.stack(bench_stats_m1['transfer'])
bench_stats_m1['kps_err'] = np.stack(bench_stats_m1['kps_err'])
bench_stats_m1['pair'] = np.stack(bench_stats_m1['pair'])
bench_stats['m1'] = bench_stats_m1
if opts.pose_dump:
pose_file = osp.join(opts.results_dir, 'scale_trans_dump_{}.mat'.format(n_iter))
sio.savemat(pose_file, self.scale_trans_preds)
pose_file = osp.join(opts.results_dir, 'quat_dump_{}.mat'.format(n_iter))
sio.savemat(pose_file, self.quat_preds)
if opts.mask_dump:
mask_file = osp.join(opts.results_dir, 'mask_dump_{}.mat'.format(n_iter))
sio.savemat(mask_file, self.mask_preds)
sio.savemat(result_path, bench_stats)
else:
bench_stats = sio.loadmat(result_path)
bench_stats_m1 = {}
bench_stats_m1['pair'] = bench_stats['m1']['pair'][0][0]
bench_stats_m1['kps_err'] = bench_stats['m1']['kps_err'][0][0]
bench_stats_m1['transfer'] = bench_stats['m1']['transfer'][0][0]
bench_stats_m1['kps1'] = bench_stats['m1']['kps1'][0][0]
bench_stats_m1['kps2'] = bench_stats['m1']['kps2'][0][0]
dist_thresholds = [1e-4, 1e-3,0.25*1e-2, 0.5*1e-2, 0.75*1e-2, 1E-2, 1E-1, 0.2, 0.3, 0.4, 0.5, 0.6, 10]
pck_eval.run_evaluation(bench_stats_m1, n_iter, opts.results_dir, opts.img_size, self.kp_names, dist_thresholds)
return
def main(_):
# opts.n_data_workers = 0 opts.batch_size = 1 print = pprint.pprint
opts.batch_size = 1
opts.results_dir = osp.join(opts.results_dir_base, opts.name, '%s' %
(opts.split), 'epoch_%d' % opts.num_train_epoch)
opts.result_dir = opts.results_dir
if not osp.exists(opts.results_dir):
print('writing to %s' % opts.results_dir)
os.makedirs(opts.results_dir)
seed = opts.seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
tester = CSPTester(opts)
tester.init_testing()
tester.test()
if __name__ == '__main__':
app.run(main)
|
998,262 | 3e1a498d50235b023a03e24a1106fb8e012e5aa7 | import pandas as pd
from datetime import datetime, timedelta, date
import time
PLAYER_URL = "https://raw.githubusercontent.com/vaastav/Fantasy-Premier-League/master/data/2020-21/gws/merged_gw.csv"
columns = []
df = pd.read_csv(PLAYER_URL)
difficult_teams = [5, 9, 11, 12, 13, 17] # big six
df["difficult"] = df["opponent_team"].apply(lambda x: 1 if x in difficult_teams else 0)
df["kickoff_time"] = pd.to_datetime(df["kickoff_time"]).dt.date
df["was_home"] = df["was_home"].apply(lambda x: 1 if x else 0)
def form(player, kickoff):
is_player = df["element"] == player
last_thirty_days = df["kickoff_time"] >= kickoff - timedelta(days=30)
df_player = df[is_player & last_thirty_days]
points = df_player["total_points"].sum()
games = df_player[df_player["minutes"] > 0].count()[0]
form = points / games if games > 0 else 0
return form
start = time.time()
df["form"] = df.apply(lambda x: form(x["element"], x["kickoff_time"]), axis=1)
end = time.time()
print(end - start)
df.to_csv("fpl.csv", index=False)
|
998,263 | 1ba5d6fb2393b57b0079df42b73e0b4c38f24b8c | some = list()
print(some) |
998,264 | 54f9cddeeaab38608e0b8ec99b2b26f62a4e9d34 | rows, col = map(int, input().split(', '))
matrix = [(list(map(int, input().split(' ')))) for i in range(rows)]
for i in range(col):
sum_columns = 0
for j in range(rows):
sum_columns += matrix[j][i]
print(sum_columns)
|
998,265 | 9074f45c52bed42e513731008574ead6e047fa5d | class QueryStore():
def __init__(self, storage):
self.storage = storage
def trajectory_collectivevariable(self, collectivevariable, ensemble=None, replica=None, step=None):
"""
Return list of collectivevariables fast for specific sets of samples
samples can be all samples found in specific or all sample_set and filter
these by ensemble and/or replica.
Parameters
----------
collectivevariable : paths.CollectiveVariable()
the collectivevariable from which the values should be extracted
ensemble : paths.Ensemble or None
if not None only samples from the specific ensemble are used.
For `None` (default) all ensembles are considered
replica : int or None
if not None only samples from the specific replica ID are used.
For `None` (default) all replica IDs are considered.
step : int or None
if not None only samples from the specific step are used.
For `None` (default) all sample_set steps are considered.
Returns
-------
list of list of float
Returns for each sample a list of floats which represent the
collectivevariable values of the trajectory of the samples
"""
storage = self.storage
if ensemble is not None:
ens_idx = ensemble.idx[storage]
output = []
op_dict = storage.cvs.get_list_value(collectivevariable, slice(None,None))
for sset_id in range(len(storage.sample_set)):
if step is not None and sset_id != step:
continue
sample_idxs = storage.variables['sampleset_sample_idx'][sset_id].tolist()
ensemble_idxs = storage.variables['sample_ensemble_idx'][sample_idxs].tolist()
replica_idxs = storage.variables['sample_replica'][sample_idxs].tolist()
traj_idx = storage.variables['sample_trajectory_idx'][sample_idxs].tolist()
for no, sample_idx in enumerate(sample_idxs):
if ensemble is not None and ens_idx != ensemble_idxs[no]:
continue
if replica is not None and replica != replica_idxs[no]:
continue
snap_idxs = storage.variables['trajectory_snapshot_idx'][traj_idx[no]]
output.append([ op_dict[idx] for idx in snap_idxs ])
return output
def trajectory_length(self, ensemble=None, replica=None, step=None):
"""
Return list of trajectory lengths fast for specific sets of samples
samples can be all samples found in specific or all sample_set and filter
these by ensemble and/or replica.
Parameters
----------
ensemble : paths.Ensemble or None
if not None only samples from the specific ensemble are used.
For `None` (default) all ensembles are considered
replica : int or None
if not None only samples from the specific replica ID are used.
For `None` (default) all replica IDs are considered.
step : int or None
if not None only samples from the specific step are used.
For `None` (default) all sample_set steps are considered.
Returns
-------
list of int
Returns for each sample the number of snapshots in it
"""
storage = self.storage
if ensemble is not None:
ens_idx = ensemble.idx[storage]
output = []
for sset_id in range(len(storage.sample_set)):
if step is not None and sset_id != step:
continue
sample_idxs = storage.variables['sampleset_sample_idx'][sset_id].tolist()
ensemble_idxs = storage.variables['sample_ensemble_idx'][sample_idxs].tolist()
replica_idxs = storage.variables['sample_replica'][sample_idxs].tolist()
traj_idx = storage.variables['sample_trajectory_idx'][sample_idxs].tolist()
for no, sample_idx in enumerate(sample_idxs):
if ensemble is not None and ens_idx != ensemble_idxs[no]:
continue
if replica is not None and replica != replica_idxs[no]:
continue
snap_idxs = storage.variables['trajectory_snapshot_idx'][traj_idx[no]]
output.append(len(snap_idxs))
return output |
998,266 | cbbbdca3b8e261f3ae2c9f7abd8d2dd8e361908c | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2014 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import copy
import datetime
from functools import wraps
import ipaddress
import sys
import warnings
# NOTE(uglide): Required to override default oslo_db Query class
import manila.db.sqlalchemy.query # noqa
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import exception as db_exception
from oslo_db import options as db_options
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy import and_
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import subqueryload
from sqlalchemy.sql.expression import false
from sqlalchemy.sql.expression import literal
from sqlalchemy.sql.expression import true
from sqlalchemy.sql import func
from manila.common import constants
from manila.db.sqlalchemy import models
from manila.db.sqlalchemy import utils
from manila import exception
from manila.i18n import _
from manila import quota
osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
CONF = cfg.CONF
CONF.import_group("profiler", "manila.service")
LOG = log.getLogger(__name__)
QUOTAS = quota.QUOTAS
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = []
_DEFAULT_SQL_CONNECTION = 'sqlite://'
db_options.set_defaults(cfg.CONF,
connection=_DEFAULT_SQL_CONNECTION)
context_manager = enginefacade.transaction_context()
# FIXME(stephenfin): we need to remove reliance on autocommit semantics ASAP
# since it's not compatible with SQLAlchemy 2.0
context_manager.configure(__autocommit=True)
def get_engine():
return context_manager._factory.get_legacy_facade().get_engine()
def get_session(**kwargs):
return context_manager._factory.get_legacy_facade().get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_share_exists(f):
"""Decorator to require the specified share to exist.
Requires the wrapped function to use context and share_id as
their first two arguments.
"""
@wraps(f)
def wrapper(context, share_id, *args, **kwargs):
share_get(context, share_id)
return f(context, share_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_share_snapshot_exists(f):
"""Decorator to require the specified share snapshot to exist.
Requires the wrapped function to use context and share_snapshot_id as
their first two arguments.
"""
@wraps(f)
def wrapper(context, share_snapshot_id, *args, **kwargs):
share_snapshot_get(context, share_snapshot_id)
return f(context, share_snapshot_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_share_network_subnet_exists(f):
"""Decorator to require the specified share network subnet to exist.
Requires the wrapped function to use context and share_network_subnet_id
as their first two arguments.
"""
@wraps(f)
def wrapper(context, share_network_subnet_id, *args, **kwargs):
share_network_subnet_get(context, share_network_subnet_id)
return f(context, share_network_subnet_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_share_instance_exists(f):
"""Decorator to require the specified share instance to exist.
Requires the wrapped function to use context and share_instance_id as
their first two arguments.
"""
@wraps(f)
def wrapper(context, share_instance_id, *args, **kwargs):
share_instance_get(context, share_instance_id)
return f(context, share_instance_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def apply_sorting(model, query, sort_key, sort_dir):
if sort_dir.lower() not in ('desc', 'asc'):
msg = _("Wrong sorting data provided: sort key is '%(sort_key)s' "
"and sort direction is '%(sort_dir)s'.") % {
"sort_key": sort_key, "sort_dir": sort_dir}
raise exception.InvalidInput(reason=msg)
# NOTE(maaoyu): We add the additional sort by ID in this case to
# get deterministic results. Without the ordering by ID this could
# lead to flapping return lists.
sort_keys = [sort_key]
if sort_key != 'id':
sort_keys.append('id')
for sort_key in sort_keys:
sort_attr = getattr(model, sort_key)
sort_method = getattr(sort_attr, sort_dir.lower())
query = query.order_by(sort_method())
return query
def handle_db_data_error(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database.')
LOG.exception(msg)
raise exception.Invalid(msg)
return wrapper
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: model to query. Must be a subclass of ModelBase.
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session')
if hasattr(context, 'session') and context.session:
session = context.session
if not session:
session = get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
kwargs = dict()
if project_only and not context.is_admin:
kwargs['project_id'] = context.project_id
if read_deleted in ('no', 'n', False):
kwargs['deleted'] = False
elif read_deleted == 'only':
kwargs['deleted'] = True
elif read_deleted in ('yes', 'y', True):
pass
return db_utils.model_query(
model=model,
session=session,
args=args,
**kwargs,
)
def _process_model_like_filter(model, query, filters):
"""Applies regex expression filtering to a query.
:param model: model to apply filters to
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
:returns: the updated query.
"""
if query is None:
return query
if filters:
for key in sorted(filters):
column_attr = getattr(model, key)
if 'property' == type(column_attr).__name__:
continue
value = filters[key]
if not (isinstance(value, (str, int))):
continue
query = query.filter(
column_attr.op('LIKE')(u'%%%s%%' % value))
return query
def apply_like_filters(process_exact_filters):
def _decorator(query, model, filters, legal_keys):
exact_filters = filters.copy()
regex_filters = {}
for key, value in filters.items():
if key not in legal_keys:
# Skip ones we're not filtering on
continue
# NOTE(haixin): For inexact match, the filter keys
# are in the format of 'key~=value'
if key.endswith('~'):
exact_filters.pop(key)
regex_filters[key.rstrip('~')] = value
query = process_exact_filters(query, model, exact_filters,
legal_keys)
return _process_model_like_filter(model, query, regex_filters)
return _decorator
@apply_like_filters
def exact_filter(query, model, filters, legal_keys,
created_at_key='created_at'):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
created_at_attr = getattr(model, created_at_key, None)
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key == 'created_since' and created_at_attr:
# This is a reserved query parameter to indicate resources created
# after a particular datetime
value = timeutils.normalize_time(value)
query = query.filter(created_at_attr.op('>=')(value))
elif key == 'created_before' and created_at_attr:
# This is a reserved query parameter to indicate resources created
# before a particular datetime
value = timeutils.normalize_time(value)
query = query.filter(created_at_attr.op('<=')(value))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def ensure_model_dict_has_id(model_dict):
if not model_dict.get('id'):
model_dict['id'] = uuidutils.generate_uuid()
return model_dict
def _sync_shares(context, project_id, user_id, share_type_id=None):
shares, _ = _share_data_get_for_project(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'shares': shares}
def _sync_snapshots(context, project_id, user_id, share_type_id=None):
snapshots, _ = _snapshot_data_get_for_project(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'snapshots': snapshots}
def _sync_gigabytes(context, project_id, user_id, share_type_id=None):
_, share_gigs = _share_data_get_for_project(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'gigabytes': share_gigs}
def _sync_snapshot_gigabytes(context, project_id, user_id, share_type_id=None):
_, snapshot_gigs = _snapshot_data_get_for_project(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'snapshot_gigabytes': snapshot_gigs}
def _sync_share_networks(context, project_id, user_id, share_type_id=None):
share_networks_count = _count_share_networks(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'share_networks': share_networks_count}
def _sync_share_groups(context, project_id, user_id, share_type_id=None):
share_groups_count = _count_share_groups(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'share_groups': share_groups_count}
def _sync_backups(context, project_id, user_id, share_type_id=None):
backups, _ = _backup_data_get_for_project(context, project_id, user_id)
return {'backups': backups}
def _sync_backup_gigabytes(context, project_id, user_id, share_type_id=None):
_, backup_gigs = _backup_data_get_for_project(context, project_id, user_id)
return {'backup_gigabytes': backup_gigs}
def _sync_share_group_snapshots(
context, project_id, user_id, share_type_id=None,
):
share_group_snapshots_count = _count_share_group_snapshots(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'share_group_snapshots': share_group_snapshots_count}
def _sync_share_replicas(context, project_id, user_id, share_type_id=None):
share_replicas_count, _ = _share_replica_data_get_for_project(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'share_replicas': share_replicas_count}
def _sync_replica_gigabytes(context, project_id, user_id, share_type_id=None):
_, replica_gigs = _share_replica_data_get_for_project(
context, project_id, user_id, share_type_id=share_type_id,
)
return {'replica_gigabytes': replica_gigs}
QUOTA_SYNC_FUNCTIONS = {
'_sync_shares': _sync_shares,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_snapshot_gigabytes': _sync_snapshot_gigabytes,
'_sync_share_networks': _sync_share_networks,
'_sync_share_groups': _sync_share_groups,
'_sync_share_group_snapshots': _sync_share_group_snapshots,
'_sync_share_replicas': _sync_share_replicas,
'_sync_replica_gigabytes': _sync_replica_gigabytes,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes,
}
###################
@require_admin_context
@context_manager.writer
def share_resources_host_update(context, current_host, new_host):
"""Updates the 'host' attribute of resources"""
resources = {
'instances': models.ShareInstance,
'servers': models.ShareServer,
'groups': models.ShareGroup,
}
result = {}
for res_name, res_model in resources.items():
host_field = res_model.host
query = model_query(
context, res_model, read_deleted="no",
).filter(host_field.like('{}%'.format(current_host)))
count = query.update(
{host_field: func.replace(host_field, current_host, new_host)},
synchronize_session=False,
)
result.update({res_name: count})
return result
###################
@require_admin_context
@context_manager.writer
def service_destroy(context, service_id):
service_ref = _service_get(context, service_id)
service_ref.soft_delete(context.session)
@require_admin_context
def _service_get(context, service_id):
result = (
model_query(
context,
models.Service,
).filter_by(
id=service_id,
).first()
)
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
@context_manager.reader
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
@context_manager.reader
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
@context_manager.reader
def service_get_all_by_topic(context, topic):
return (model_query(
context, models.Service, read_deleted="no").
filter_by(disabled=False).
filter_by(topic=topic).
all())
@require_admin_context
@context_manager.reader
def service_get_by_host_and_topic(context, host, topic):
result = (model_query(
context, models.Service, read_deleted="no").
filter_by(disabled=False).
filter_by(host=host).
filter_by(topic=topic).
first())
if not result:
raise exception.ServiceNotFound(service_id=host)
return result
@require_admin_context
def _service_get_all_topic_subquery(context, topic, subq, label):
sort_value = getattr(subq.c, label)
return (
model_query(
context, models.Service,
func.coalesce(sort_value, 0),
read_deleted="no",
).filter_by(
topic=topic,
).filter_by(
disabled=False,
).outerjoin(
(subq, models.Service.host == subq.c.host)
).order_by(
sort_value
).all()
)
@require_admin_context
@context_manager.reader
def service_get_all_share_sorted(context):
topic = CONF.share_topic
label = 'share_gigabytes'
subq = (
model_query(
context,
models.Share,
func.sum(models.Share.size).label(label),
read_deleted="no",
).join(
models.ShareInstance,
models.ShareInstance.share_id == models.Share.id,
).group_by(
models.ShareInstance.host
).subquery()
)
return _service_get_all_topic_subquery(
context,
topic,
subq,
label,
)
@require_admin_context
@context_manager.reader
def service_get_by_args(context, host, binary):
result = (model_query(context, models.Service).
filter_by(host=host).
filter_by(binary=binary).
first())
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
@context_manager.writer
def service_create(context, values):
_ensure_availability_zone_exists(context, values)
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
service_ref.save(context.session)
return service_ref
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def service_update(context, service_id, values):
_ensure_availability_zone_exists(context, values, strict=False)
service_ref = _service_get(context, service_id)
service_ref.update(values)
service_ref.save(context.session)
###################
@require_context
@context_manager.reader
def quota_get_all_by_project_and_user(context, project_id, user_id):
authorize_project_context(context, project_id)
user_quotas = model_query(
context, models.ProjectUserQuota,
models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit,
).filter_by(
project_id=project_id,
).filter_by(
user_id=user_id,
).all()
result = {'project_id': project_id, 'user_id': user_id}
for u_quota in user_quotas:
result[u_quota.resource] = u_quota.hard_limit
return result
@require_context
@context_manager.reader
def quota_get_all_by_project_and_share_type(
context, project_id, share_type_id,
):
authorize_project_context(context, project_id)
share_type_quotas = model_query(
context, models.ProjectShareTypeQuota,
models.ProjectShareTypeQuota.resource,
models.ProjectShareTypeQuota.hard_limit,
).filter_by(
project_id=project_id,
).filter_by(
share_type_id=share_type_id,
).all()
result = {
'project_id': project_id,
'share_type_id': share_type_id,
}
for st_quota in share_type_quotas:
result[st_quota.resource] = st_quota.hard_limit
return result
@require_context
@context_manager.reader
def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
project_quotas = model_query(
context, models.Quota, read_deleted="no",
).filter_by(
project_id=project_id,
).all()
result = {'project_id': project_id}
for p_quota in project_quotas:
result[p_quota.resource] = p_quota.hard_limit
return result
@require_context
@context_manager.reader
def quota_get_all(context, project_id):
authorize_project_context(context, project_id)
result = (model_query(context, models.ProjectUserQuota).
filter_by(project_id=project_id).
all())
return result
@require_admin_context
@context_manager.writer
def quota_create(
context,
project_id,
resource,
limit,
user_id=None,
share_type_id=None,
):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
if per_user:
check = model_query(context, models.ProjectUserQuota).filter(
models.ProjectUserQuota.project_id == project_id,
models.ProjectUserQuota.user_id == user_id,
models.ProjectUserQuota.resource == resource,
).all()
quota_ref = models.ProjectUserQuota()
quota_ref.user_id = user_id
elif share_type_id:
check = model_query(context, models.ProjectShareTypeQuota).filter(
models.ProjectShareTypeQuota.project_id == project_id,
models.ProjectShareTypeQuota.share_type_id == share_type_id,
models.ProjectShareTypeQuota.resource == resource,
).all()
quota_ref = models.ProjectShareTypeQuota()
quota_ref.share_type_id = share_type_id
else:
check = model_query(context, models.Quota).filter(
models.Quota.project_id == project_id,
models.Quota.resource == resource,
).all()
quota_ref = models.Quota()
if check:
raise exception.QuotaExists(project_id=project_id, resource=resource)
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save(context.session)
except Exception as e:
if "out of range" in str(e).lower():
msg = _("Quota limit should not exceed 2147483647")
raise exception.InvalidInput(reason=msg)
raise
return quota_ref
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def quota_update(
context,
project_id,
resource,
limit,
user_id=None,
share_type_id=None,
):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
if per_user:
query = model_query(context, models.ProjectUserQuota).filter(
models.ProjectUserQuota.project_id == project_id,
models.ProjectUserQuota.user_id == user_id,
models.ProjectUserQuota.resource == resource,
)
elif share_type_id:
query = model_query(context, models.ProjectShareTypeQuota).filter(
models.ProjectShareTypeQuota.project_id == project_id,
models.ProjectShareTypeQuota.share_type_id == share_type_id,
models.ProjectShareTypeQuota.resource == resource,
)
else:
query = model_query(context, models.Quota).filter(
models.Quota.project_id == project_id,
models.Quota.resource == resource,
)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(
project_id=project_id, user_id=user_id)
elif share_type_id:
raise exception.ProjectShareTypeQuotaNotFound(
project_id=project_id, share_type=share_type_id)
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
@context_manager.reader
def quota_class_get(context, class_name, resource):
result = (
model_query(
context,
models.QuotaClass,
read_deleted="no",
).filter_by(
class_name=class_name
).filter_by(
resource=resource
).first()
)
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
@context_manager.reader
def quota_class_get_default(context):
rows = (model_query(context, models.QuotaClass, read_deleted="no").
filter_by(class_name=_DEFAULT_QUOTA_NAME).
all())
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@context_manager.reader
def quota_class_get_all_by_name(context, class_name):
authorize_quota_class_context(context, class_name)
rows = (model_query(context, models.QuotaClass, read_deleted="no").
filter_by(class_name=class_name).
all())
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
@context_manager.writer
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save(context.session)
return quota_class_ref
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def quota_class_update(context, class_name, resource, limit):
result = (model_query(context, models.QuotaClass, read_deleted="no").
filter_by(class_name=class_name).
filter_by(resource=resource).
update({'hard_limit': limit}))
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
@context_manager.reader
def quota_usage_get(context, project_id, resource, user_id=None,
share_type_id=None):
query = (model_query(context, models.QuotaUsage, read_deleted="no").
filter_by(project_id=project_id).
filter_by(resource=resource))
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
elif share_type_id:
result = query.filter_by(queryshare_type_id=share_type_id).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None,
share_type_id=None):
authorize_project_context(context, project_id)
query = (model_query(context, models.QuotaUsage, read_deleted="no").
filter_by(project_id=project_id))
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id is None))
result['user_id'] = user_id
elif share_type_id:
query = query.filter_by(share_type_id=share_type_id)
result['share_type_id'] = share_type_id
else:
query = query.filter_by(share_type_id=None)
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
@context_manager.reader
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
@require_context
@context_manager.reader
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
@context_manager.reader
def quota_usage_get_all_by_project_and_share_type(context, project_id,
share_type_id):
return _quota_usage_get_all(
context, project_id, share_type_id=share_type_id)
def _quota_usage_create(context, project_id, user_id, resource, in_use,
reserved, until_refresh, share_type_id=None):
quota_usage_ref = models.QuotaUsage()
if share_type_id:
quota_usage_ref.share_type_id = share_type_id
else:
quota_usage_ref.user_id = user_id
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=context.session)
return quota_usage_ref
@require_admin_context
@context_manager.writer
def quota_usage_create(context, project_id, user_id, resource, in_use,
reserved, until_refresh, share_type_id=None):
return _quota_usage_create(
context,
project_id,
user_id,
resource,
in_use,
reserved,
until_refresh,
share_type_id=share_type_id,
)
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def quota_usage_update(context, project_id, user_id, resource,
share_type_id=None, **kwargs):
updates = {}
for key in ('in_use', 'reserved', 'until_refresh'):
if key in kwargs:
updates[key] = kwargs[key]
query = model_query(
context, models.QuotaUsage, read_deleted="no",
).filter_by(project_id=project_id).filter_by(resource=resource)
if share_type_id:
query = query.filter_by(share_type_id=share_type_id)
else:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id is None))
result = query.update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire, share_type_id=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
if share_type_id:
reservation_ref.share_type_id = share_type_id
else:
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=context.session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_share_type_quota_usages(context, project_id, share_type_id):
rows = model_query(
context, models.QuotaUsage, read_deleted="no",
).filter(
models.QuotaUsage.project_id == project_id,
models.QuotaUsage.share_type_id == share_type_id,
).with_for_update().all()
return {row.resource: row for row in rows}
def _get_user_quota_usages(context, project_id, user_id):
# Broken out for testability
rows = model_query(
context, models.QuotaUsage, read_deleted="no",
).filter_by(
project_id=project_id,
).filter(
or_(
models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id is None,
)
).with_for_update().all()
return {row.resource: row for row in rows}
def _get_project_quota_usages(context, project_id):
rows = model_query(
context, models.QuotaUsage, read_deleted="no",
).filter_by(
project_id=project_id,
).filter(
models.QuotaUsage.share_type_id is None,
).with_for_update().all()
result = dict()
# Get the total count of in_use,reserved
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
result[row.resource]['total'] += (row.in_use + row.reserved)
else:
result[row.resource] = dict(
in_use=row.in_use,
reserved=row.reserved,
total=row.in_use + row.reserved,
)
return result
# NOTE(stephenfin): We intentionally don't wrap the outer function here since
# we call the innter function multiple times and want each call to be in a
# separate transaction
@require_context
def quota_reserve(context, resources, project_quotas, user_quotas,
share_type_quotas, deltas, expire, until_refresh,
max_age, project_id=None, user_id=None, share_type_id=None,
overquota_allowed=False):
user_reservations = _quota_reserve(
context, resources, project_quotas, user_quotas,
deltas, expire, until_refresh, max_age, project_id, user_id=user_id,
overquota_allowed=overquota_allowed)
if share_type_id:
try:
st_reservations = _quota_reserve(
context, resources, project_quotas, share_type_quotas,
deltas, expire, until_refresh, max_age, project_id,
share_type_id=share_type_id,
overquota_allowed=overquota_allowed)
except exception.OverQuota:
# rollback previous reservations
with excutils.save_and_reraise_exception():
# We call a public method since we haven't wrapped this, the
# caller, and we want to run in a different transaction
reservation_rollback(
context, user_reservations,
project_id=project_id, user_id=user_id)
return user_reservations + st_reservations
return user_reservations
# NOTE(stephenfin): Per above, we wrap the inner method here
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def _quota_reserve(context, resources, project_quotas, user_or_st_quotas,
deltas, expire, until_refresh,
max_age, project_id=None, user_id=None, share_type_id=None,
overquota_allowed=False):
elevated = context.elevated()
if project_id is None:
project_id = context.project_id
if share_type_id:
user_or_st_usages = _get_share_type_quota_usages(
context, project_id, share_type_id,
)
else:
user_id = user_id if user_id else context.user_id
user_or_st_usages = _get_user_quota_usages(
context, project_id, user_id,
)
# Get the current usages
project_usages = _get_project_quota_usages(context, project_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if ((resource not in PER_PROJECT_QUOTAS) and
(resource not in user_or_st_usages)):
user_or_st_usages[resource] = _quota_usage_create(
elevated,
project_id,
user_id,
resource,
0, 0,
until_refresh or None,
share_type_id=share_type_id,
)
refresh = True
elif ((resource in PER_PROJECT_QUOTAS) and
(resource not in user_or_st_usages)):
user_or_st_usages[resource] = _quota_usage_create(
elevated,
project_id,
None,
resource,
0, 0,
until_refresh or None,
share_type_id=share_type_id,
)
refresh = True
elif user_or_st_usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif user_or_st_usages[resource].until_refresh is not None:
user_or_st_usages[resource].until_refresh -= 1
if user_or_st_usages[resource].until_refresh <= 0:
refresh = True
elif max_age and (user_or_st_usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age:
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(
elevated,
project_id,
user_id,
share_type_id=share_type_id,
)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if ((res not in PER_PROJECT_QUOTAS) and
(res not in user_or_st_usages)):
user_or_st_usages[res] = _quota_usage_create(
elevated,
project_id,
user_id,
res,
0, 0,
until_refresh or None,
share_type_id=share_type_id,
)
if ((res in PER_PROJECT_QUOTAS) and
(res not in user_or_st_usages)):
user_or_st_usages[res] = _quota_usage_create(
elevated,
project_id,
None,
res,
0, 0,
until_refresh or None,
share_type_id=share_type_id,
)
if user_or_st_usages[res].in_use != in_use:
LOG.debug(
'quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'share_type_id: %(share_type_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s',
{'project_id': project_id,
'user_id': user_id,
'share_type_id': share_type_id,
'res': res,
'tracked_use': user_or_st_usages[res].in_use,
'in_use': in_use})
# Update the usage
user_or_st_usages[res].in_use = in_use
user_or_st_usages[res].until_refresh = (
until_refresh or None)
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_or_st_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_or_st_usages.items():
if key not in project_usages:
project_usages[key] = value
overs = [res for res, delta in deltas.items()
if user_or_st_quotas[res] >= 0 and delta >= 0 and
(0 <= project_quotas[res] < delta +
project_usages[res]['total'] or
user_or_st_quotas[res] < delta +
user_or_st_usages[res].total)]
# NOTE(carloss): If OverQuota is allowed, there is no problem to exceed
# the quotas, so we reset the overs list and LOG it.
if overs and overquota_allowed:
msg = _("The service has identified one or more exceeded "
"quotas. Please check the quotas for project "
"%(project_id)s, user %(user_id)s and share type "
"%(share_type_id)s, and adjust them if "
"necessary.") % {
"project_id": project_id,
"user_id": user_id,
"share_type_id": share_type_id
}
LOG.warning(msg)
overs = []
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(
elevated,
uuidutils.generate_uuid(),
user_or_st_usages[res],
project_id,
user_id,
res, delta, expire,
share_type_id=share_type_id,
)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_or_st_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_or_st_usages.values():
context.session.add(usage_ref)
# NOTE(stephenfin): commit changes before we raise any exceptions
context.session.commit()
context.session.begin()
if unders:
LOG.warning("Change will make usage less than 0 for the following "
"resources: %s", unders)
if overs:
if project_quotas == user_or_st_quotas:
usages = project_usages
else:
usages = user_or_st_usages
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
raise exception.OverQuota(
overs=sorted(overs), quotas=user_or_st_quotas, usages=usages)
return reservations
def _quota_reservations_query(context, reservations):
"""Return the relevant reservations."""
return model_query(
context, models.Reservation,
read_deleted="no",
).filter(
models.Reservation.uuid.in_(reservations),
).with_for_update()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def reservation_commit(context, reservations, project_id=None, user_id=None,
share_type_id=None):
if share_type_id:
st_usages = _get_share_type_quota_usages(
context, project_id, share_type_id,
)
else:
st_usages = {}
user_usages = _get_user_quota_usages(context, project_id, user_id)
reservation_query = _quota_reservations_query(context, reservations)
for reservation in reservation_query.all():
if reservation['share_type_id']:
usages = st_usages
else:
usages = user_usages
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def reservation_rollback(context, reservations, project_id=None, user_id=None,
share_type_id=None):
if share_type_id:
st_usages = _get_share_type_quota_usages(
context, project_id, share_type_id,
)
else:
st_usages = {}
user_usages = _get_user_quota_usages(context, project_id, user_id)
reservation_query = _quota_reservations_query(context, reservations)
for reservation in reservation_query.all():
if reservation['share_type_id']:
usages = st_usages
else:
usages = user_usages
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
@context_manager.writer
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
model_query(
context, models.ProjectUserQuota, read_deleted="no",
).filter_by(
project_id=project_id,
).filter_by(user_id=user_id).soft_delete(synchronize_session=False)
model_query(
context, models.QuotaUsage, read_deleted="no",
).filter_by(
project_id=project_id,
).filter_by(user_id=user_id).soft_delete(synchronize_session=False)
model_query(
context, models.Reservation, read_deleted="no",
).filter_by(
project_id=project_id,
).filter_by(user_id=user_id).soft_delete(synchronize_session=False)
@require_admin_context
@context_manager.writer
def quota_destroy_all_by_share_type(context, share_type_id, project_id=None):
return _quota_destroy_all_by_share_type(
context, share_type_id, project_id=project_id,
)
@require_admin_context
def _quota_destroy_all_by_share_type(context, share_type_id, project_id=None):
"""Soft deletes all quotas, usages and reservations.
:param context: request context for queries, updates and logging
:param share_type_id: ID of the share type to filter the quotas, usages
and reservations under.
:param project_id: ID of the project to filter the quotas, usages and
reservations under. If not provided, share type quotas for all
projects will be acted upon.
"""
share_type_quotas = model_query(
context, models.ProjectShareTypeQuota,
read_deleted="no",
).filter_by(share_type_id=share_type_id)
share_type_quota_usages = model_query(
context, models.QuotaUsage, read_deleted="no",
).filter_by(share_type_id=share_type_id)
share_type_quota_reservations = model_query(
context, models.Reservation, read_deleted="no",
).filter_by(share_type_id=share_type_id)
if project_id is not None:
share_type_quotas = share_type_quotas.filter_by(
project_id=project_id,
)
share_type_quota_usages = share_type_quota_usages.filter_by(
project_id=project_id,
)
share_type_quota_reservations = (
share_type_quota_reservations.filter_by(project_id=project_id)
)
share_type_quotas.soft_delete(synchronize_session=False)
share_type_quota_usages.soft_delete(synchronize_session=False)
share_type_quota_reservations.soft_delete(synchronize_session=False)
@require_admin_context
@context_manager.writer
def quota_destroy_all_by_project(context, project_id):
model_query(
context, models.Quota, read_deleted="no",
).filter_by(
project_id=project_id,
).soft_delete(synchronize_session=False)
model_query(
context, models.ProjectUserQuota, read_deleted="no",
).filter_by(
project_id=project_id,
).soft_delete(synchronize_session=False)
model_query(
context, models.QuotaUsage, read_deleted="no",
).filter_by(
project_id=project_id,
).soft_delete(synchronize_session=False)
model_query(
context, models.Reservation, read_deleted="no",
).filter_by(
project_id=project_id,
).soft_delete(synchronize_session=False)
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def reservation_expire(context):
current_time = timeutils.utcnow()
reservation_query = model_query(
context, models.Reservation,
read_deleted="no"
).filter(models.Reservation.expire < current_time)
for reservation in reservation_query.all():
if reservation.delta >= 0:
quota_usage = model_query(
context, models.QuotaUsage, read_deleted="no",
).filter(
models.QuotaUsage.id == reservation.usage_id,
).first()
quota_usage.reserved -= reservation.delta
context.session.add(quota_usage)
reservation_query.soft_delete(synchronize_session=False)
################
def _extract_subdict_by_fields(source_dict, fields):
dict_to_extract_from = copy.deepcopy(source_dict)
sub_dict = {}
for field in fields:
field_value = dict_to_extract_from.pop(field, None)
if field_value:
sub_dict.update({field: field_value})
return sub_dict, dict_to_extract_from
def _extract_share_instance_values(values):
share_instance_model_fields = [
'status', 'host', 'scheduled_at', 'launched_at', 'terminated_at',
'share_server_id', 'share_network_id', 'availability_zone',
'replica_state', 'share_type_id', 'share_type', 'access_rules_status',
]
share_instance_values, share_values = (
_extract_subdict_by_fields(values, share_instance_model_fields)
)
return share_instance_values, share_values
def _change_size_to_instance_size(snap_instance_values):
if 'size' in snap_instance_values:
snap_instance_values['instance_size'] = snap_instance_values['size']
snap_instance_values.pop('size')
def _extract_snapshot_instance_values(values):
fields = ['status', 'progress', 'provider_location']
snapshot_instance_values, snapshot_values = (
_extract_subdict_by_fields(values, fields)
)
return snapshot_instance_values, snapshot_values
################
@require_context
def share_instance_create(context, share_id, values):
session = get_session()
with session.begin():
return _share_instance_create(context, share_id, values, session)
def _share_instance_create(context, share_id, values, session):
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
values.update({'share_id': share_id})
share_instance_ref = models.ShareInstance()
share_instance_ref.update(values)
share_instance_ref.save(session=session)
return share_instance_get(context, share_instance_ref['id'],
session=session)
@require_context
def share_instance_update(context, share_instance_id, values,
with_share_data=False):
session = get_session()
_ensure_availability_zone_exists(context, values, session, strict=False)
with session.begin():
instance_ref = _share_instance_update(
context, share_instance_id, values, session
)
if with_share_data:
parent_share = share_get(context, instance_ref['share_id'],
session=session)
instance_ref.set_share_data(parent_share)
return instance_ref
def share_and_snapshot_instances_status_update(
context, values, share_instance_ids=None, snapshot_instance_ids=None,
current_expected_status=None):
updated_share_instances = None
updated_snapshot_instances = None
session = get_session()
with session.begin():
if current_expected_status and share_instance_ids:
filters = {'instance_ids': share_instance_ids}
share_instances = share_instances_get_all(
context, filters=filters, session=session)
all_instances_are_compliant = all(
instance['status'] == current_expected_status
for instance in share_instances)
if not all_instances_are_compliant:
msg = _('At least one of the shares is not in the %(status)s '
'status.') % {
'status': current_expected_status
}
raise exception.InvalidShareInstance(reason=msg)
if current_expected_status and snapshot_instance_ids:
filters = {'instance_ids': snapshot_instance_ids}
snapshot_instances = share_snapshot_instance_get_all_with_filters(
context, filters, session=session)
all_snap_instances_are_compliant = all(
snap_instance['status'] == current_expected_status
for snap_instance in snapshot_instances)
if not all_snap_instances_are_compliant:
msg = _('At least one of the snapshots is not in the '
'%(status)s status.') % {
'status': current_expected_status
}
raise exception.InvalidShareSnapshotInstance(reason=msg)
if share_instance_ids:
updated_share_instances = share_instances_status_update(
context, share_instance_ids, values, session=session)
if snapshot_instance_ids:
updated_snapshot_instances = (
share_snapshot_instances_status_update(
context, snapshot_instance_ids, values, session=session))
return updated_share_instances, updated_snapshot_instances
@require_context
def share_instances_status_update(
context, share_instance_ids, values, session=None):
session = session or get_session()
result = (
model_query(
context, models.ShareInstance, read_deleted="no",
session=session).filter(
models.ShareInstance.id.in_(share_instance_ids)).update(
values, synchronize_session=False))
return result
def _share_instance_update(context, share_instance_id, values, session):
share_instance_ref = share_instance_get(context, share_instance_id,
session=session)
share_instance_ref.update(values)
share_instance_ref.save(session=session)
return share_instance_ref
@require_context
def share_instance_get(context, share_instance_id, session=None,
with_share_data=False):
if session is None:
session = get_session()
result = model_query(
context, models.ShareInstance, session=session,
).filter_by(
id=share_instance_id,
).options(
joinedload('export_locations').joinedload('_el_metadata_bare'),
joinedload('share_type'),
).first()
if result is None:
raise exception.NotFound()
if with_share_data:
parent_share = share_get(context, result['share_id'], session=session)
result.set_share_data(parent_share)
return result
@require_admin_context
def share_instances_get_all(context, filters=None, session=None):
session = session or get_session()
query = model_query(
context, models.ShareInstance, session=session, read_deleted="no",
).options(
joinedload('export_locations'),
)
filters = filters or {}
export_location_id = filters.get('export_location_id')
export_location_path = filters.get('export_location_path')
if export_location_id or export_location_path:
query = query.join(
models.ShareInstanceExportLocations,
models.ShareInstanceExportLocations.share_instance_id ==
models.ShareInstance.id)
if export_location_path:
query = query.filter(
models.ShareInstanceExportLocations.path ==
export_location_path)
if export_location_id:
query = query.filter(
models.ShareInstanceExportLocations.uuid ==
export_location_id)
query = query.join(
models.Share,
models.Share.id ==
models.ShareInstance.share_id)
is_soft_deleted = filters.get('is_soft_deleted')
if is_soft_deleted:
query = query.filter(models.Share.is_soft_deleted == true())
else:
query = query.filter(models.Share.is_soft_deleted == false())
instance_ids = filters.get('instance_ids')
if instance_ids:
query = query.filter(models.ShareInstance.id.in_(instance_ids))
# TODO(gouthamr): This DB API method needs to be generalized for all
# share instance fields.
host = filters.get('host')
if host:
query = query.filter(
or_(models.ShareInstance.host == host,
models.ShareInstance.host.like("{0}#%".format(host)))
)
share_server_id = filters.get('share_server_id')
if share_server_id:
query = query.filter(
models.ShareInstance.share_server_id == share_server_id)
# Returns list of share instances that satisfy filters.
query = query.all()
return query
@require_context
def _update_share_instance_usages(context, share, instance_ref,
is_replica=False):
deltas = {}
no_instances_remain = len(share.instances) == 0
share_usages_to_release = {"shares": -1, "gigabytes": -share['size']}
replica_usages_to_release = {"share_replicas": -1,
"replica_gigabytes": -share['size']}
if is_replica and no_instances_remain:
# A share that had a replication_type is being deleted, so there's
# need to update the share replica quotas and the share quotas
deltas.update(replica_usages_to_release)
deltas.update(share_usages_to_release)
elif is_replica:
# The user is deleting a share replica
deltas.update(replica_usages_to_release)
else:
# A share with no replication_type is being deleted
deltas.update(share_usages_to_release)
reservations = None
try:
# we give the user_id of the share, to update
# the quota usage for the user, who created the share
reservations = QUOTAS.reserve(
context,
project_id=share['project_id'],
user_id=share['user_id'],
share_type_id=instance_ref['share_type_id'],
**deltas)
QUOTAS.commit(
context, reservations, project_id=share['project_id'],
user_id=share['user_id'],
share_type_id=instance_ref['share_type_id'])
except Exception:
resource_name = (
'share replica' if is_replica else 'share')
resource_id = instance_ref['id'] if is_replica else share['id']
msg = (_("Failed to update usages deleting %(resource_name)s "
"'%(id)s'.") % {'id': resource_id,
"resource_name": resource_name})
LOG.exception(msg)
if reservations:
QUOTAS.rollback(
context, reservations,
share_type_id=instance_ref['share_type_id'])
@require_context
def share_instance_delete(context, instance_id, session=None,
need_to_update_usages=False):
if session is None:
session = get_session()
with session.begin():
share_export_locations_update(context, instance_id, [], delete=True)
instance_ref = share_instance_get(context, instance_id,
session=session)
is_replica = instance_ref['replica_state'] is not None
instance_ref.soft_delete(session=session, update_status=True)
share = share_get(context, instance_ref['share_id'], session=session)
if len(share.instances) == 0:
share_access_delete_all_by_share(context, share['id'])
session.query(models.ShareMetadata).filter_by(
share_id=share['id']).soft_delete()
share.soft_delete(session=session)
if need_to_update_usages:
_update_share_instance_usages(context, share, instance_ref,
is_replica=is_replica)
def _set_instances_share_data(context, instances, session):
if instances and not isinstance(instances, list):
instances = [instances]
instances_with_share_data = []
for instance in instances:
try:
parent_share = share_get(context, instance['share_id'],
session=session)
except exception.NotFound:
continue
instance.set_share_data(parent_share)
instances_with_share_data.append(instance)
return instances_with_share_data
@require_admin_context
def share_instances_get_all_by_host(context, host, with_share_data=False,
status=None, session=None):
"""Retrieves all share instances hosted on a host."""
session = session or get_session()
instances = (
model_query(context, models.ShareInstance).filter(
or_(
models.ShareInstance.host == host,
models.ShareInstance.host.like("{0}#%".format(host))
)
)
)
if status is not None:
instances = instances.filter(models.ShareInstance.status == status)
# Returns list of all instances that satisfy filters.
instances = instances.all()
if with_share_data:
instances = _set_instances_share_data(context, instances, session)
return instances
@require_context
def share_instances_get_all_by_share_network(context, share_network_id):
"""Returns list of share instances that belong to given share network."""
result = (
model_query(context, models.ShareInstance).filter(
models.ShareInstance.share_network_id == share_network_id,
).all()
)
return result
@require_context
def share_instances_get_all_by_share_server(context, share_server_id,
with_share_data=False):
"""Returns list of share instance with given share server."""
session = get_session()
result = (
model_query(context, models.ShareInstance).filter(
models.ShareInstance.share_server_id == share_server_id,
).all()
)
if with_share_data:
result = _set_instances_share_data(context, result, session)
return result
@require_context
def share_instances_get_all_by_share(context, share_id):
"""Returns list of share instances that belong to given share."""
result = (
model_query(context, models.ShareInstance).filter(
models.ShareInstance.share_id == share_id,
).all()
)
return result
@require_context
def share_instances_get_all_by_share_group_id(context, share_group_id):
"""Returns list of share instances that belong to given share group."""
result = (
model_query(context, models.Share).filter(
models.Share.share_group_id == share_group_id,
).all()
)
instances = []
for share in result:
instance = share.instance
instance.set_share_data(share)
instances.append(instance)
return instances
################
def _share_replica_get_with_filters(context, share_id=None, replica_id=None,
replica_state=None, status=None,
with_share_server=True, session=None):
query = model_query(context, models.ShareInstance, session=session,
read_deleted="no")
if not context.is_admin:
query = query.join(
models.Share,
models.ShareInstance.share_id == models.Share.id).filter(
models.Share.project_id == context.project_id)
if share_id is not None:
query = query.filter(models.ShareInstance.share_id == share_id)
if replica_id is not None:
query = query.filter(models.ShareInstance.id == replica_id)
if replica_state is not None:
query = query.filter(
models.ShareInstance.replica_state == replica_state)
else:
query = query.filter(models.ShareInstance.replica_state.isnot(None))
if status is not None:
query = query.filter(models.ShareInstance.status == status)
if with_share_server:
query = query.options(joinedload('share_server'))
return query
@require_context
def share_replicas_get_all(context, with_share_data=False,
with_share_server=True, session=None):
"""Returns replica instances for all available replicated shares."""
session = session or get_session()
result = _share_replica_get_with_filters(
context, with_share_server=with_share_server, session=session).all()
if with_share_data:
result = _set_instances_share_data(context, result, session)
return result
@require_context
def share_replicas_get_all_by_share(context, share_id,
with_share_data=False,
with_share_server=False, session=None):
"""Returns replica instances for a given share."""
session = session or get_session()
result = _share_replica_get_with_filters(
context, with_share_server=with_share_server,
share_id=share_id, session=session).all()
if with_share_data:
result = _set_instances_share_data(context, result, session)
return result
@require_context
def share_replicas_get_available_active_replica(context, share_id,
with_share_data=False,
with_share_server=False,
session=None):
"""Returns an 'active' replica instance that is 'available'."""
session = session or get_session()
result = _share_replica_get_with_filters(
context, with_share_server=with_share_server, share_id=share_id,
replica_state=constants.REPLICA_STATE_ACTIVE,
status=constants.STATUS_AVAILABLE, session=session).first()
if result and with_share_data:
result = _set_instances_share_data(context, result, session)[0]
return result
@require_context
def share_replica_get(context, replica_id, with_share_data=False,
with_share_server=False, session=None):
"""Returns summary of requested replica if available."""
session = session or get_session()
result = _share_replica_get_with_filters(
context, with_share_server=with_share_server,
replica_id=replica_id, session=session).first()
if result is None:
raise exception.ShareReplicaNotFound(replica_id=replica_id)
if with_share_data:
result = _set_instances_share_data(context, result, session)[0]
return result
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_replica_update(context, share_replica_id, values,
with_share_data=False, session=None):
"""Updates a share replica with specified values."""
session = session or get_session()
with session.begin():
_ensure_availability_zone_exists(context, values, session,
strict=False)
updated_share_replica = _share_instance_update(
context, share_replica_id, values, session=session)
if with_share_data:
updated_share_replica = _set_instances_share_data(
context, updated_share_replica, session)[0]
return updated_share_replica
@require_context
def share_replica_delete(context, share_replica_id, session=None,
need_to_update_usages=True):
"""Deletes a share replica."""
session = session or get_session()
share_instance_delete(context, share_replica_id, session=session,
need_to_update_usages=need_to_update_usages)
################
@require_context
def _share_get_query(context, session=None, **kwargs):
if session is None:
session = get_session()
return (model_query(context, models.Share, session=session, **kwargs).
options(joinedload('share_metadata')))
def _process_share_filters(query, filters, project_id=None, is_public=False):
if filters is None:
filters = {}
share_filter_keys = ['share_group_id', 'snapshot_id',
'is_soft_deleted', 'source_backup_id']
instance_filter_keys = ['share_server_id', 'status', 'share_type_id',
'host', 'share_network_id']
share_filters = {}
instance_filters = {}
for k, v in filters.items():
share_filters.update({k: v}) if k in share_filter_keys else None
instance_filters.update({k: v}) if k in instance_filter_keys else None
no_key = 'key_is_absent'
def _filter_data(query, model, desired_filters):
for key, value in desired_filters.items():
filter_attr = getattr(model, key, no_key)
if filter_attr == no_key:
pass
query = query.filter(filter_attr == value)
return query
if share_filters:
query = _filter_data(query, models.Share, share_filters)
if instance_filters:
query = _filter_data(query, models.ShareInstance, instance_filters)
if project_id:
if is_public:
query = query.filter(or_(models.Share.project_id == project_id,
models.Share.is_public))
else:
query = query.filter(models.Share.project_id == project_id)
display_name = filters.get('display_name')
if display_name:
query = query.filter(
models.Share.display_name == display_name)
else:
display_name = filters.get('display_name~')
if display_name:
query = query.filter(models.Share.display_name.op('LIKE')(
u'%' + display_name + u'%'))
display_description = filters.get('display_description')
if display_description:
query = query.filter(
models.Share.display_description == display_description)
else:
display_description = filters.get('display_description~')
if display_description:
query = query.filter(models.Share.display_description.op('LIKE')(
u'%' + display_description + u'%'))
export_location_id = filters.pop('export_location_id', None)
export_location_path = filters.pop('export_location_path', None)
if export_location_id or export_location_path:
query = query.join(
models.ShareInstanceExportLocations,
models.ShareInstanceExportLocations.share_instance_id ==
models.ShareInstance.id)
if export_location_path:
query = query.filter(
models.ShareInstanceExportLocations.path ==
export_location_path)
if export_location_id:
query = query.filter(
models.ShareInstanceExportLocations.uuid ==
export_location_id)
if 'metadata' in filters:
for k, v in filters['metadata'].items():
# pylint: disable=no-member
query = query.filter(
or_(models.Share.share_metadata.any(
key=k, value=v)))
if 'extra_specs' in filters:
query = query.join(
models.ShareTypeExtraSpecs,
models.ShareTypeExtraSpecs.share_type_id ==
models.ShareInstance.share_type_id)
for k, v in filters['extra_specs'].items():
query = query.filter(and_(models.ShareTypeExtraSpecs.key == k,
models.ShareTypeExtraSpecs.value == v))
return query
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
value = str(v) if isinstance(v, bool) else v
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = value
metadata_refs.append(metadata_ref)
return metadata_refs
@require_context
def share_create(context, share_values, create_share_instance=True):
values = copy.deepcopy(share_values)
values = ensure_model_dict_has_id(values)
values['share_metadata'] = _metadata_refs(values.get('metadata'),
models.ShareMetadata)
session = get_session()
share_ref = models.Share()
share_instance_values, share_values = _extract_share_instance_values(
values)
_ensure_availability_zone_exists(context, share_instance_values, session,
strict=False)
share_ref.update(share_values)
with session.begin():
share_ref.save(session=session)
if create_share_instance:
_share_instance_create(context, share_ref['id'],
share_instance_values, session=session)
# NOTE(u_glide): Do so to prevent errors with relationships
return share_get(context, share_ref['id'], session=session)
@require_admin_context
def _share_data_get_for_project(
context, project_id, user_id, share_type_id=None,
):
query = model_query(
context, models.Share,
func.count(models.Share.id),
func.sum(models.Share.size),
read_deleted="no",
).filter_by(project_id=project_id)
if share_type_id:
query = query.join("instances").filter_by(share_type_id=share_type_id)
elif user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
return (result[0] or 0, result[1] or 0)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_update(context, share_id, update_values):
session = get_session()
values = copy.deepcopy(update_values)
share_instance_values, share_values = _extract_share_instance_values(
values)
_ensure_availability_zone_exists(context, share_instance_values, session,
strict=False)
with session.begin():
share_ref = share_get(context, share_id, session=session)
_share_instance_update(context, share_ref.instance['id'],
share_instance_values, session=session)
share_ref.update(share_values)
share_ref.save(session=session)
return share_ref
@require_context
def share_get(context, share_id, session=None, **kwargs):
result = _share_get_query(context, session, **kwargs).filter_by(
id=share_id).first()
if result is None:
raise exception.NotFound()
return result
def _share_get_all_with_filters(context, project_id=None, share_server_id=None,
share_group_id=None, filters=None,
is_public=False, sort_key=None,
sort_dir=None, show_count=False):
"""Returns sorted list of shares that satisfies filters.
:param context: context to query under
:param project_id: project id that owns shares
:param share_server_id: share server that hosts shares
:param filters: dict of filters to specify share selection
:param is_public: public shares from other projects will be added
to result if True
:param sort_key: key of models.Share to be used for sorting
:param sort_dir: desired direction of sorting, can be 'asc' and 'desc'
:returns: list -- models.Share
:raises: exception.InvalidInput
"""
if filters is None:
filters = {}
if not sort_key:
sort_key = 'created_at'
if not sort_dir:
sort_dir = 'desc'
query = (
_share_get_query(context).join(
models.ShareInstance,
models.ShareInstance.share_id == models.Share.id
)
)
if share_group_id:
filters['share_group_id'] = share_group_id
if share_server_id:
filters['share_server_id'] = share_server_id
# if not specified is_soft_deleted filter, default is False, to get
# shares not in recycle bin.
if 'is_soft_deleted' not in filters:
filters['is_soft_deleted'] = False
query = _process_share_filters(
query, filters, project_id, is_public=is_public)
try:
query = apply_sorting(models.Share, query, sort_key, sort_dir)
except AttributeError:
try:
query = apply_sorting(
models.ShareInstance, query, sort_key, sort_dir)
except AttributeError:
msg = _("Wrong sorting key provided - '%s'.") % sort_key
raise exception.InvalidInput(reason=msg)
count = None
# NOTE(carloss): Count must be calculated before limit and offset are
# applied into the query.
if show_count:
count = query.count()
if 'limit' in filters:
offset = filters.get('offset', 0)
query = query.limit(filters['limit']).offset(offset)
# Returns list of shares that satisfy filters.
query = query.all()
if show_count:
return count, query
return query
@require_admin_context
def get_all_expired_shares(context):
query = (
_share_get_query(context).join(
models.ShareInstance,
models.ShareInstance.share_id == models.Share.id
)
)
filters = {"is_soft_deleted": True}
query = _process_share_filters(query, filters=filters)
scheduled_deleted_attr = getattr(models.Share,
'scheduled_to_be_deleted_at', None)
now_time = timeutils.utcnow()
query = query.filter(scheduled_deleted_attr.op('<=')(now_time))
result = query.all()
return result
@require_admin_context
def share_get_all(context, filters=None, sort_key=None, sort_dir=None):
project_id = filters.pop('project_id', None) if filters else None
query = _share_get_all_with_filters(
context,
project_id=project_id,
filters=filters, sort_key=sort_key, sort_dir=sort_dir)
return query
@require_admin_context
def share_get_all_with_count(context, filters=None, sort_key=None,
sort_dir=None):
count, query = _share_get_all_with_filters(
context,
filters=filters, sort_key=sort_key, sort_dir=sort_dir,
show_count=True)
return count, query
@require_context
def share_get_all_by_project(context, project_id, filters=None,
is_public=False, sort_key=None, sort_dir=None):
"""Returns list of shares with given project ID."""
query = _share_get_all_with_filters(
context, project_id=project_id, filters=filters, is_public=is_public,
sort_key=sort_key, sort_dir=sort_dir)
return query
@require_context
def share_get_all_by_project_with_count(
context, project_id, filters=None, is_public=False, sort_key=None,
sort_dir=None):
"""Returns list of shares with given project ID."""
count, query = _share_get_all_with_filters(
context, project_id=project_id, filters=filters, is_public=is_public,
sort_key=sort_key, sort_dir=sort_dir, show_count=True)
return count, query
@require_context
def share_get_all_by_share_group_id(context, share_group_id,
filters=None, sort_key=None,
sort_dir=None):
"""Returns list of shares with given group ID."""
query = _share_get_all_with_filters(
context, share_group_id=share_group_id,
filters=filters, sort_key=sort_key, sort_dir=sort_dir)
return query
@require_context
def share_get_all_by_share_group_id_with_count(context, share_group_id,
filters=None, sort_key=None,
sort_dir=None):
"""Returns list of shares with given share group ID."""
count, query = _share_get_all_with_filters(
context, share_group_id=share_group_id,
filters=filters, sort_key=sort_key, sort_dir=sort_dir, show_count=True)
return count, query
@require_context
def share_get_all_by_share_server(context, share_server_id, filters=None,
sort_key=None, sort_dir=None):
"""Returns list of shares with given share server."""
query = _share_get_all_with_filters(
context, share_server_id=share_server_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
return query
@require_context
def get_shares_in_recycle_bin_by_share_server(
context, share_server_id, filters=None, sort_key=None, sort_dir=None):
"""Returns list of shares in recycle bin with given share server."""
if filters is None:
filters = {}
filters["is_soft_deleted"] = True
query = _share_get_all_with_filters(
context, share_server_id=share_server_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
return query
@require_context
def share_get_all_by_share_server_with_count(
context, share_server_id, filters=None, sort_key=None, sort_dir=None):
"""Returns list of shares with given share server."""
count, query = _share_get_all_with_filters(
context, share_server_id=share_server_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir, show_count=True)
return count, query
@require_context
def get_shares_in_recycle_bin_by_network(
context, share_network_id, filters=None, sort_key=None, sort_dir=None):
"""Returns list of shares in recycle bin with given share network."""
if filters is None:
filters = {}
filters["share_network_id"] = share_network_id
filters["is_soft_deleted"] = True
query = _share_get_all_with_filters(context, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
return query
@require_context
def share_delete(context, share_id):
session = get_session()
with session.begin():
share_ref = share_get(context, share_id, session)
if len(share_ref.instances) > 0:
msg = _("Share %(id)s has %(count)s share instances.") % {
'id': share_id, 'count': len(share_ref.instances)}
raise exception.InvalidShare(msg)
share_ref.soft_delete(session=session)
(session.query(models.ShareMetadata).
filter_by(share_id=share_id).soft_delete())
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_soft_delete(context, share_id):
session = get_session()
now_time = timeutils.utcnow()
time_delta = datetime.timedelta(
seconds=CONF.soft_deleted_share_retention_time)
scheduled_to_be_deleted_at = now_time + time_delta
update_values = {
'is_soft_deleted': True,
'scheduled_to_be_deleted_at': scheduled_to_be_deleted_at
}
with session.begin():
share_ref = share_get(context, share_id, session=session)
share_ref.update(update_values)
share_ref.save(session=session)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_restore(context, share_id):
session = get_session()
update_values = {
'is_soft_deleted': False,
'scheduled_to_be_deleted_at': None
}
with session.begin():
share_ref = share_get(context, share_id, session=session)
share_ref.update(update_values)
share_ref.save(session=session)
###################
@context_manager.reader
def _transfer_get(context, transfer_id, resource_type='share',
session=None, read_deleted=False):
"""resource_type can be share or network(TODO network transfer)"""
query = model_query(context, models.Transfer,
session=session,
read_deleted=read_deleted).filter_by(id=transfer_id)
if not is_admin_context(context):
if resource_type == 'share':
share = models.Share
query = query.filter(models.Transfer.resource_id == share.id,
share.project_id == context.project_id)
result = query.first()
if not result:
raise exception.TransferNotFound(transfer_id=transfer_id)
return result
@context_manager.reader
def share_transfer_get(context, transfer_id, read_deleted=False):
return _transfer_get(context, transfer_id, read_deleted=read_deleted)
def _transfer_get_all(context, limit=None, sort_key=None,
sort_dir=None, filters=None, offset=None):
session = get_session()
sort_key = sort_key or 'created_at'
sort_dir = sort_dir or 'desc'
with session.begin():
query = model_query(context, models.Transfer, session=session)
if filters:
legal_filter_keys = ('display_name', 'display_name~',
'id', 'resource_type', 'resource_id',
'source_project_id', 'destination_project_id')
query = exact_filter(query, models.Transfer,
filters, legal_filter_keys)
query = utils.paginate_query(query, models.Transfer, limit,
sort_key=sort_key,
sort_dir=sort_dir,
offset=offset)
return query.all()
@require_admin_context
def transfer_get_all(context, limit=None, sort_key=None,
sort_dir=None, filters=None, offset=None):
return _transfer_get_all(context, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
filters=filters, offset=offset)
@require_context
def transfer_get_all_by_project(context, project_id,
limit=None, sort_key=None,
sort_dir=None, filters=None, offset=None):
filters = filters.copy() if filters else {}
filters['source_project_id'] = project_id
return _transfer_get_all(context, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
filters=filters, offset=offset)
@require_context
@handle_db_data_error
def transfer_create(context, values):
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
resource_id = values['resource_id']
now_time = timeutils.utcnow()
time_delta = datetime.timedelta(
seconds=CONF.transfer_retention_time)
transfer_timeout = now_time + time_delta
values['expires_at'] = transfer_timeout
session = get_session()
with session.begin():
transfer = models.Transfer()
transfer.update(values)
transfer.save(session=session)
update = {'status': constants.STATUS_AWAITING_TRANSFER}
if values['resource_type'] == 'share':
share_update(context, resource_id, update)
return transfer
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def transfer_destroy(context, transfer_id,
update_share_status=True):
session = get_session()
with session.begin():
update = {'status': constants.STATUS_AVAILABLE}
transfer = share_transfer_get(context, transfer_id)
if transfer['resource_type'] == 'share':
if update_share_status:
share_update(context, transfer['resource_id'], update)
transfer_query = model_query(context, models.Transfer,
session=session).filter_by(id=transfer_id)
transfer_query.soft_delete()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def transfer_accept(context, transfer_id, user_id, project_id,
accept_snapshots=False):
session = get_session()
with session.begin():
share_id = share_transfer_get(context, transfer_id)['resource_id']
update = {'status': constants.STATUS_AVAILABLE,
'user_id': user_id,
'project_id': project_id,
'updated_at': timeutils.utcnow()}
share_update(context, share_id, update)
# Update snapshots for transfer snapshots with share.
if accept_snapshots:
snapshots = share_snapshot_get_all_for_share(context, share_id)
for snapshot in snapshots:
LOG.debug('Begin to transfer snapshot: %s', snapshot['id'])
update = {'user_id': user_id,
'project_id': project_id,
'updated_at': timeutils.utcnow()}
share_snapshot_update(context, snapshot['id'], update)
query = session.query(models.Transfer).filter_by(id=transfer_id)
query.update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'destination_project_id': project_id,
'accepted': True})
@require_context
def transfer_accept_rollback(context, transfer_id, user_id,
project_id, rollback_snap=False):
session = get_session()
with session.begin():
share_id = share_transfer_get(
context, transfer_id, read_deleted=True)['resource_id']
update = {'status': constants.STATUS_AWAITING_TRANSFER,
'user_id': user_id,
'project_id': project_id,
'updated_at': timeutils.utcnow()}
share_update(context, share_id, update)
# rollback snapshots for transfer snapshots with share.
if rollback_snap:
snapshots = share_snapshot_get_all_for_share(context, share_id)
for snapshot in snapshots:
LOG.debug('Begin to rollback snapshot: %s', snapshot['id'])
update = {'user_id': user_id,
'project_id': project_id,
'updated_at': timeutils.utcnow()}
share_snapshot_update(context, snapshot['id'], update)
query = session.query(models.Transfer).filter_by(id=transfer_id)
query.update({'deleted': 'False',
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'destination_project_id': None,
'accepted': 0})
@require_admin_context
def get_all_expired_transfers(context):
session = get_session()
with session.begin():
query = model_query(context, models.Transfer, session=session)
expires_at_attr = getattr(models.Transfer, 'expires_at', None)
now_time = timeutils.utcnow()
query = query.filter(expires_at_attr.op('<=')(now_time))
result = query.all()
return result
###################
def _share_access_get_query(context, session, values, read_deleted='no'):
"""Get access record."""
query = (model_query(
context, models.ShareAccessMapping, session=session,
read_deleted=read_deleted).options(
joinedload('share_access_rules_metadata')))
return query.filter_by(**values)
def _share_instance_access_query(context, session, access_id=None,
instance_id=None):
filters = {'deleted': 'False'}
if access_id is not None:
filters.update({'access_id': access_id})
if instance_id is not None:
filters.update({'share_instance_id': instance_id})
return model_query(context, models.ShareInstanceAccessMapping,
session=session).filter_by(**filters)
def _share_access_metadata_get_item(context, access_id, key, session=None):
result = (_share_access_metadata_get_query(
context, access_id, session=session).filter_by(key=key).first())
if not result:
raise exception.ShareAccessMetadataNotFound(
metadata_key=key, access_id=access_id)
return result
def _share_access_metadata_get_query(context, access_id, session=None):
return (model_query(
context, models.ShareAccessRulesMetadata, session=session,
read_deleted="no").
filter_by(access_id=access_id).
options(joinedload('access')))
@require_context
def share_access_metadata_update(context, access_id, metadata):
session = get_session()
with session.begin():
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _share_access_metadata_get_item(
context, access_id, meta_key, session=session)
except exception.ShareAccessMetadataNotFound:
meta_ref = models.ShareAccessRulesMetadata()
item.update({"key": meta_key, "access_id": access_id})
meta_ref.update(item)
meta_ref.save(session=session)
return metadata
@require_context
def share_access_metadata_delete(context, access_id, key):
session = get_session()
with session.begin():
metadata = _share_access_metadata_get_item(
context, access_id, key, session=session)
metadata.soft_delete(session)
@require_context
def share_access_create(context, values):
values = ensure_model_dict_has_id(values)
session = get_session()
with session.begin():
values['share_access_rules_metadata'] = (
_metadata_refs(values.get('metadata'),
models.ShareAccessRulesMetadata))
access_ref = models.ShareAccessMapping()
access_ref.update(values)
access_ref.save(session=session)
parent_share = share_get(context, values['share_id'], session=session)
for instance in parent_share.instances:
vals = {
'share_instance_id': instance['id'],
'access_id': access_ref['id'],
}
_share_instance_access_create(vals, session)
return share_access_get(context, access_ref['id'])
@require_context
def share_instance_access_create(context, values, share_instance_id):
values = ensure_model_dict_has_id(values)
session = get_session()
with session.begin():
access_list = _share_access_get_query(
context, session, {
'share_id': values['share_id'],
'access_type': values['access_type'],
'access_to': values['access_to'],
}).all()
if len(access_list) > 0:
access_ref = access_list[0]
else:
access_ref = models.ShareAccessMapping()
access_ref.update(values)
access_ref.save(session=session)
vals = {
'share_instance_id': share_instance_id,
'access_id': access_ref['id'],
}
_share_instance_access_create(vals, session)
return share_access_get(context, access_ref['id'])
@require_context
def share_instance_access_copy(context, share_id, instance_id, session=None):
"""Copy access rules from share to share instance."""
session = session or get_session()
share_access_rules = _share_access_get_query(
context, session, {'share_id': share_id}).all()
for access_rule in share_access_rules:
values = {
'share_instance_id': instance_id,
'access_id': access_rule['id'],
}
_share_instance_access_create(values, session)
return share_access_rules
def _share_instance_access_create(values, session):
access_ref = models.ShareInstanceAccessMapping()
access_ref.update(ensure_model_dict_has_id(values))
access_ref.save(session=session)
return access_ref
@require_context
def share_access_get(context, access_id, session=None):
"""Get access record."""
session = session or get_session()
access = _share_access_get_query(
context, session, {'id': access_id}).first()
if access:
return access
else:
raise exception.NotFound()
@require_context
def share_instance_access_get(context, access_id, instance_id,
with_share_access_data=True):
"""Get access record."""
session = get_session()
access = _share_instance_access_query(context, session, access_id,
instance_id).first()
if access is None:
raise exception.NotFound()
if with_share_access_data:
access = _set_instances_share_access_data(context, access, session)[0]
return access
@require_context
def share_access_get_all_for_share(context, share_id, filters=None,
session=None):
filters = filters or {}
session = session or get_session()
query = (_share_access_get_query(
context, session, {'share_id': share_id}).filter(
models.ShareAccessMapping.instance_mappings.any()))
if 'metadata' in filters:
for k, v in filters['metadata'].items():
query = query.filter(
or_(models.ShareAccessMapping.
share_access_rules_metadata.any(key=k, value=v)))
return query.all()
@require_context
def share_access_get_all_for_instance(context, instance_id, filters=None,
with_share_access_data=True,
session=None):
"""Get all access rules related to a certain share instance."""
session = session or get_session()
filters = copy.deepcopy(filters) if filters else {}
filters.update({'share_instance_id': instance_id})
legal_filter_keys = ('id', 'share_instance_id', 'access_id', 'state')
query = _share_instance_access_query(context, session)
query = exact_filter(
query, models.ShareInstanceAccessMapping, filters, legal_filter_keys)
instance_accesses = query.all()
if with_share_access_data:
instance_accesses = _set_instances_share_access_data(
context, instance_accesses, session)
return instance_accesses
def _set_instances_share_access_data(context, instance_accesses, session):
if instance_accesses and not isinstance(instance_accesses, list):
instance_accesses = [instance_accesses]
for instance_access in instance_accesses:
share_access = share_access_get(
context, instance_access['access_id'], session=session)
instance_access.set_share_access_data(share_access)
return instance_accesses
def _set_instances_snapshot_access_data(context, instance_accesses, session):
if instance_accesses and not isinstance(instance_accesses, list):
instance_accesses = [instance_accesses]
for instance_access in instance_accesses:
snapshot_access = share_snapshot_access_get(
context, instance_access['access_id'], session=session)
instance_access.set_snapshot_access_data(snapshot_access)
return instance_accesses
@require_context
def share_access_get_all_by_type_and_access(context, share_id, access_type,
access):
session = get_session()
return _share_access_get_query(context, session,
{'share_id': share_id,
'access_type': access_type,
'access_to': access}).all()
@require_context
def share_access_check_for_existing_access(context, share_id, access_type,
access_to):
return _check_for_existing_access(
context, 'share', share_id, access_type, access_to)
def _check_for_existing_access(context, resource, resource_id, access_type,
access_to):
session = get_session()
if resource == 'share':
query_method = _share_access_get_query
access_to_field = models.ShareAccessMapping.access_to
else:
query_method = _share_snapshot_access_get_query
access_to_field = models.ShareSnapshotAccessMapping.access_to
with session.begin():
if access_type == 'ip':
rules = query_method(
context, session, {'%s_id' % resource: resource_id,
'access_type': access_type}).filter(
access_to_field.startswith(access_to.split('/')[0])).all()
matching_rules = [
rule for rule in rules if
ipaddress.ip_network(str(access_to)) ==
ipaddress.ip_network(str(rule['access_to']))
]
return len(matching_rules) > 0
else:
return query_method(
context, session, {'%s_id' % resource: resource_id,
'access_type': access_type,
'access_to': access_to}).count() > 0
@require_context
def share_access_delete_all_by_share(context, share_id):
session = get_session()
with session.begin():
(session.query(models.ShareAccessMapping).
filter_by(share_id=share_id).soft_delete())
@require_context
def share_instance_access_delete(context, mapping_id):
session = get_session()
with session.begin():
mapping = (session.query(models.ShareInstanceAccessMapping).
filter_by(id=mapping_id).first())
if not mapping:
exception.NotFound()
mapping.soft_delete(session, update_status=True,
status_field_name='state')
other_mappings = _share_instance_access_query(
context, session, mapping['access_id']).all()
# NOTE(u_glide): Remove access rule if all mappings were removed.
if len(other_mappings) == 0:
(session.query(models.ShareAccessRulesMetadata).filter_by(
access_id=mapping['access_id']).soft_delete())
(session.query(models.ShareAccessMapping).filter_by(
id=mapping['access_id']).soft_delete())
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_instance_access_update(context, access_id, instance_id, updates):
session = get_session()
share_access_fields = ('access_type', 'access_to', 'access_key',
'access_level')
share_access_map_updates, share_instance_access_map_updates = (
_extract_subdict_by_fields(updates, share_access_fields)
)
with session.begin():
share_access = _share_access_get_query(
context, session, {'id': access_id}).first()
share_access.update(share_access_map_updates)
share_access.save(session=session)
access = _share_instance_access_query(
context, session, access_id, instance_id).first()
access.update(share_instance_access_map_updates)
access.save(session=session)
return access
###################
@require_context
def share_snapshot_instance_create(context, snapshot_id, values, session=None):
session = session or get_session()
values = copy.deepcopy(values)
values['share_snapshot_metadata'] = _metadata_refs(
values.get('metadata'), models.ShareSnapshotMetadata)
_change_size_to_instance_size(values)
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
values.update({'snapshot_id': snapshot_id})
instance_ref = models.ShareSnapshotInstance()
instance_ref.update(values)
instance_ref.save(session=session)
return share_snapshot_instance_get(context, instance_ref['id'],
session=session)
@require_context
def share_snapshot_instance_update(context, instance_id, values):
session = get_session()
instance_ref = share_snapshot_instance_get(context, instance_id,
session=session)
_change_size_to_instance_size(values)
# NOTE(u_glide): Ignore updates to custom properties
for extra_key in models.ShareSnapshotInstance._extra_keys:
if extra_key in values:
values.pop(extra_key)
instance_ref.update(values)
instance_ref.save(session=session)
return instance_ref
@require_context
def share_snapshot_instance_delete(context, snapshot_instance_id,
session=None):
session = session or get_session()
with session.begin():
snapshot_instance_ref = share_snapshot_instance_get(
context, snapshot_instance_id, session=session)
access_rules = share_snapshot_access_get_all_for_snapshot_instance(
context, snapshot_instance_id, session=session)
for rule in access_rules:
share_snapshot_instance_access_delete(
context, rule['access_id'], snapshot_instance_id)
for el in snapshot_instance_ref.export_locations:
share_snapshot_instance_export_location_delete(context, el['id'])
snapshot_instance_ref.soft_delete(
session=session, update_status=True)
snapshot = share_snapshot_get(
context, snapshot_instance_ref['snapshot_id'], session=session)
if len(snapshot.instances) == 0:
session.query(models.ShareSnapshotMetadata).filter_by(
share_snapshot_id=snapshot['id']).soft_delete()
snapshot.soft_delete(session=session)
@require_context
def share_snapshot_instance_get(context, snapshot_instance_id, session=None,
with_share_data=False):
session = session or get_session()
result = _share_snapshot_instance_get_with_filters(
context, instance_ids=[snapshot_instance_id], session=session).first()
if result is None:
raise exception.ShareSnapshotInstanceNotFound(
instance_id=snapshot_instance_id)
if with_share_data:
result = _set_share_snapshot_instance_data(context, result, session)[0]
return result
@require_context
def share_snapshot_instance_get_all_with_filters(context, search_filters,
with_share_data=False,
session=None):
"""Get snapshot instances filtered by known attrs, ignore unknown attrs.
All filters accept list/tuples to filter on, along with simple values.
"""
def listify(values):
if values:
if not isinstance(values, (list, tuple, set)):
return values,
else:
return values
session = session or get_session()
_known_filters = ('instance_ids', 'snapshot_ids', 'share_instance_ids',
'statuses')
filters = {k: listify(search_filters.get(k)) for k in _known_filters}
result = _share_snapshot_instance_get_with_filters(
context, session=session, **filters).all()
if with_share_data:
result = _set_share_snapshot_instance_data(context, result, session)
return result
def _share_snapshot_instance_get_with_filters(context, instance_ids=None,
snapshot_ids=None, statuses=None,
share_instance_ids=None,
session=None):
query = model_query(context, models.ShareSnapshotInstance, session=session,
read_deleted="no")
if instance_ids is not None:
query = query.filter(
models.ShareSnapshotInstance.id.in_(instance_ids))
if snapshot_ids is not None:
query = query.filter(
models.ShareSnapshotInstance.snapshot_id.in_(snapshot_ids))
if share_instance_ids is not None:
query = query.filter(models.ShareSnapshotInstance.share_instance_id
.in_(share_instance_ids))
if statuses is not None:
query = query.filter(models.ShareSnapshotInstance.status.in_(statuses))
query = query.options(joinedload('share_group_snapshot'))
return query
def _set_share_snapshot_instance_data(context, snapshot_instances, session):
if snapshot_instances and not isinstance(snapshot_instances, list):
snapshot_instances = [snapshot_instances]
for snapshot_instance in snapshot_instances:
share_instance = share_instance_get(
context, snapshot_instance['share_instance_id'], session=session,
with_share_data=True)
snapshot_instance['share'] = share_instance
return snapshot_instances
###################
@require_context
def share_snapshot_create(context, create_values,
create_snapshot_instance=True):
values = copy.deepcopy(create_values)
values = ensure_model_dict_has_id(values)
values['share_snapshot_metadata'] = _metadata_refs(
values.pop('metadata', {}), models.ShareSnapshotMetadata)
snapshot_ref = models.ShareSnapshot()
snapshot_instance_values, snapshot_values = (
_extract_snapshot_instance_values(values)
)
share_ref = share_get(context, snapshot_values.get('share_id'))
snapshot_instance_values.update(
{'share_instance_id': share_ref.instance.id}
)
snapshot_ref.update(snapshot_values)
session = get_session()
with session.begin():
snapshot_ref.save(session=session)
if create_snapshot_instance:
share_snapshot_instance_create(
context,
snapshot_ref['id'],
snapshot_instance_values,
session=session
)
return share_snapshot_get(
context, snapshot_values['id'], session=session)
@require_admin_context
def _snapshot_data_get_for_project(
context, project_id, user_id, share_type_id=None,
):
query = model_query(
context, models.ShareSnapshot,
func.count(models.ShareSnapshot.id),
func.sum(models.ShareSnapshot.size),
read_deleted="no",
).filter_by(project_id=project_id)
if share_type_id:
query = query.join(
models.ShareInstance,
models.ShareInstance.share_id == models.ShareSnapshot.share_id,
).filter_by(share_type_id=share_type_id)
elif user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
return result[0] or 0, result[1] or 0
@require_context
def share_snapshot_get(context, snapshot_id, project_only=True, session=None):
result = (model_query(context, models.ShareSnapshot, session=session,
project_only=project_only).
filter_by(id=snapshot_id).
options(joinedload('share')).
options(joinedload('instances')).
options(joinedload('share_snapshot_metadata')).
first())
if not result:
raise exception.ShareSnapshotNotFound(snapshot_id=snapshot_id)
return result
def _share_snapshot_get_all_with_filters(context, project_id=None,
share_id=None, filters=None,
limit=None, offset=None,
sort_key=None, sort_dir=None,
show_count=False):
"""Retrieves all snapshots.
If no sorting parameters are specified then returned snapshots are sorted
by the 'created_at' key and desc order.
:param context: context to query under
:param filters: dictionary of filters
:param limit: maximum number of items to return
:param sort_key: attribute by which results should be sorted,default is
created_at
:param sort_dir: direction in which results should be sorted
:returns: list of matching snapshots
"""
# Init data
sort_key = sort_key or 'created_at'
sort_dir = sort_dir or 'desc'
filters = copy.deepcopy(filters) if filters else {}
query = model_query(context, models.ShareSnapshot)
if project_id:
query = query.filter_by(project_id=project_id)
if share_id:
query = query.filter_by(share_id=share_id)
query = (query.options(joinedload('share'))
.options(joinedload('instances'))
.options(joinedload('share_snapshot_metadata'))
)
# Snapshots with no instances are filtered out.
query = query.filter(
models.ShareSnapshot.id == models.ShareSnapshotInstance.snapshot_id)
# Apply filters
if 'usage' in filters:
usage_filter_keys = ['any', 'used', 'unused']
if filters['usage'] == 'any':
pass
elif filters['usage'] == 'used':
query = query.filter(models.Share.snapshot_id == (
models.ShareSnapshot.id))
elif filters['usage'] == 'unused':
query = query.filter(models.Share.snapshot_id != (
models.ShareSnapshot.id))
else:
msg = _("Wrong 'usage' key provided - '%(key)s'. "
"Expected keys are '%(ek)s'.") % {
'key': filters['usage'],
'ek': usage_filter_keys}
raise exception.InvalidInput(reason=msg)
filters.pop('usage')
if 'status' in filters:
query = query.filter(models.ShareSnapshotInstance.status == (
filters['status']))
filters.pop('status')
if 'metadata' in filters:
for k, v in filters['metadata'].items():
# pylint: disable=no-member
query = query.filter(
or_(models.ShareSnapshot.share_snapshot_metadata.any(
key=k, value=v)))
filters.pop('metadata')
legal_filter_keys = ('display_name', 'display_name~',
'display_description', 'display_description~',
'id', 'user_id', 'project_id', 'share_id',
'share_proto', 'size', 'share_size')
query = exact_filter(query, models.ShareSnapshot,
filters, legal_filter_keys)
query = apply_sorting(models.ShareSnapshot, query, sort_key, sort_dir)
count = None
if show_count:
count = query.count()
if limit is not None:
query = query.limit(limit)
if offset:
query = query.offset(offset)
# Returns list of share snapshots that satisfy filters
query = query.all()
if show_count:
return count, query
return query
@require_admin_context
def share_snapshot_get_all(context, filters=None, limit=None, offset=None,
sort_key=None, sort_dir=None):
return _share_snapshot_get_all_with_filters(
context, filters=filters, limit=limit,
offset=offset, sort_key=sort_key, sort_dir=sort_dir)
@require_admin_context
def share_snapshot_get_all_with_count(context, filters=None, limit=None,
offset=None, sort_key=None,
sort_dir=None):
count, query = _share_snapshot_get_all_with_filters(
context, filters=filters, limit=limit,
offset=offset, sort_key=sort_key, sort_dir=sort_dir,
show_count=True)
return count, query
@require_context
def share_snapshot_get_all_by_project(context, project_id, filters=None,
limit=None, offset=None,
sort_key=None, sort_dir=None):
authorize_project_context(context, project_id)
return _share_snapshot_get_all_with_filters(
context, project_id=project_id, filters=filters, limit=limit,
offset=offset, sort_key=sort_key, sort_dir=sort_dir)
@require_context
def share_snapshot_get_all_by_project_with_count(context, project_id,
filters=None, limit=None,
offset=None, sort_key=None,
sort_dir=None):
authorize_project_context(context, project_id)
count, query = _share_snapshot_get_all_with_filters(
context, project_id=project_id, filters=filters, limit=limit,
offset=offset, sort_key=sort_key, sort_dir=sort_dir,
show_count=True)
return count, query
@require_context
def share_snapshot_get_all_for_share(context, share_id, filters=None,
sort_key=None, sort_dir=None):
return _share_snapshot_get_all_with_filters(
context, share_id=share_id,
filters=filters, sort_key=sort_key, sort_dir=sort_dir,
)
@require_context
def share_snapshot_get_latest_for_share(context, share_id):
snapshots = _share_snapshot_get_all_with_filters(
context, share_id=share_id, sort_key='created_at', sort_dir='desc')
return snapshots[0] if snapshots else None
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_snapshot_update(context, snapshot_id, values):
session = get_session()
with session.begin():
snapshot_ref = share_snapshot_get(context, snapshot_id,
session=session)
instance_values, snapshot_values = (
_extract_snapshot_instance_values(values)
)
if snapshot_values:
snapshot_ref.update(snapshot_values)
snapshot_ref.save(session=session)
if instance_values:
snapshot_ref.instance.update(instance_values)
snapshot_ref.instance.save(session=session)
return snapshot_ref
@require_context
def share_snapshot_instances_status_update(
context, snapshot_instance_ids, values, session=None):
session = session or get_session()
result = (
model_query(
context, models.ShareSnapshotInstance,
read_deleted="no", session=session).filter(
models.ShareSnapshotInstance.id.in_(snapshot_instance_ids)
).update(values, synchronize_session=False))
return result
###################################
# Share Snapshot Metadata functions
###################################
@require_context
@require_share_snapshot_exists
def share_snapshot_metadata_get(context, share_snapshot_id):
session = get_session()
return _share_snapshot_metadata_get(context,
share_snapshot_id, session=session)
@require_context
@require_share_snapshot_exists
def share_snapshot_metadata_delete(context, share_snapshot_id, key):
session = get_session()
meta_ref = _share_snapshot_metadata_get_item(
context, share_snapshot_id, key, session=session)
meta_ref.soft_delete(session=session)
@require_context
@require_share_snapshot_exists
def share_snapshot_metadata_update(context, share_snapshot_id,
metadata, delete):
session = get_session()
return _share_snapshot_metadata_update(context, share_snapshot_id,
metadata, delete,
session=session)
def share_snapshot_metadata_update_item(context, share_snapshot_id,
item):
session = get_session()
return _share_snapshot_metadata_update(context, share_snapshot_id,
item, delete=False,
session=session)
def share_snapshot_metadata_get_item(context, share_snapshot_id,
key):
session = get_session()
row = _share_snapshot_metadata_get_item(context, share_snapshot_id,
key, session=session)
result = {}
result[row['key']] = row['value']
return result
def _share_snapshot_metadata_get_query(context, share_snapshot_id,
session=None):
session = session or get_session()
return (model_query(context, models.ShareSnapshotMetadata,
session=session,
read_deleted="no").
filter_by(share_snapshot_id=share_snapshot_id).
options(joinedload('share_snapshot')))
def _share_snapshot_metadata_get(context, share_snapshot_id, session=None):
session = session or get_session()
rows = _share_snapshot_metadata_get_query(context, share_snapshot_id,
session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _share_snapshot_metadata_get_item(context, share_snapshot_id,
key, session=None):
session = session or get_session()
result = (_share_snapshot_metadata_get_query(
context, share_snapshot_id, session=session).filter_by(
key=key).first())
if not result:
raise exception.MetadataItemNotFound
return result
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _share_snapshot_metadata_update(context, share_snapshot_id,
metadata, delete, session=None):
session = session or get_session()
delete = strutils.bool_from_string(delete)
with session.begin():
if delete:
original_metadata = _share_snapshot_metadata_get(
context, share_snapshot_id, session=session)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _share_snapshot_metadata_get_item(
context, share_snapshot_id, meta_key,
session=session)
meta_ref.soft_delete(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
meta_ref = _share_snapshot_metadata_get_query(
context, share_snapshot_id,
session=session).filter_by(
key=meta_key).first()
if not meta_ref:
meta_ref = models.ShareSnapshotMetadata()
item.update({"key": meta_key,
"share_snapshot_id": share_snapshot_id})
meta_ref.update(item)
meta_ref.save(session=session)
return metadata
#################################
@require_context
def share_snapshot_access_create(context, values):
values = ensure_model_dict_has_id(values)
session = get_session()
with session.begin():
access_ref = models.ShareSnapshotAccessMapping()
access_ref.update(values)
access_ref.save(session=session)
snapshot = share_snapshot_get(context, values['share_snapshot_id'],
session=session)
for instance in snapshot.instances:
vals = {
'share_snapshot_instance_id': instance['id'],
'access_id': access_ref['id'],
}
_share_snapshot_instance_access_create(vals, session)
return share_snapshot_access_get(context, access_ref['id'])
def _share_snapshot_access_get_query(context, session, filters,
read_deleted='no'):
query = model_query(context, models.ShareSnapshotAccessMapping,
session=session, read_deleted=read_deleted)
return query.filter_by(**filters)
def _share_snapshot_instance_access_get_query(context, session,
access_id=None,
share_snapshot_instance_id=None):
filters = {'deleted': 'False'}
if access_id is not None:
filters.update({'access_id': access_id})
if share_snapshot_instance_id is not None:
filters.update(
{'share_snapshot_instance_id': share_snapshot_instance_id})
return model_query(context, models.ShareSnapshotInstanceAccessMapping,
session=session).filter_by(**filters)
@require_context
def share_snapshot_instance_access_get_all(context, access_id, session):
rules = _share_snapshot_instance_access_get_query(
context, session, access_id=access_id).all()
return rules
@require_context
def share_snapshot_access_get(context, access_id, session=None):
session = session or get_session()
access = _share_snapshot_access_get_query(
context, session, {'id': access_id}).first()
if access:
return access
else:
raise exception.NotFound()
def _share_snapshot_instance_access_create(values, session):
access_ref = models.ShareSnapshotInstanceAccessMapping()
access_ref.update(ensure_model_dict_has_id(values))
access_ref.save(session=session)
return access_ref
@require_context
def share_snapshot_access_get_all_for_share_snapshot(context,
share_snapshot_id,
filters):
session = get_session()
filters['share_snapshot_id'] = share_snapshot_id
access_list = _share_snapshot_access_get_query(
context, session, filters).all()
return access_list
@require_context
def share_snapshot_check_for_existing_access(context, share_snapshot_id,
access_type, access_to):
return _check_for_existing_access(
context, 'share_snapshot', share_snapshot_id, access_type, access_to)
@require_context
def share_snapshot_access_get_all_for_snapshot_instance(
context, snapshot_instance_id, filters=None,
with_snapshot_access_data=True, session=None):
"""Get all access rules related to a certain snapshot instance."""
session = session or get_session()
filters = copy.deepcopy(filters) if filters else {}
filters.update({'share_snapshot_instance_id': snapshot_instance_id})
query = _share_snapshot_instance_access_get_query(context, session)
legal_filter_keys = (
'id', 'share_snapshot_instance_id', 'access_id', 'state')
query = exact_filter(
query, models.ShareSnapshotInstanceAccessMapping, filters,
legal_filter_keys)
instance_accesses = query.all()
if with_snapshot_access_data:
instance_accesses = _set_instances_snapshot_access_data(
context, instance_accesses, session)
return instance_accesses
@require_context
def share_snapshot_instance_access_update(
context, access_id, instance_id, updates):
snapshot_access_fields = ('access_type', 'access_to')
snapshot_access_map_updates, share_instance_access_map_updates = (
_extract_subdict_by_fields(updates, snapshot_access_fields)
)
session = get_session()
with session.begin():
snapshot_access = _share_snapshot_access_get_query(
context, session, {'id': access_id}).first()
if not snapshot_access:
raise exception.NotFound()
snapshot_access.update(snapshot_access_map_updates)
snapshot_access.save(session=session)
access = _share_snapshot_instance_access_get_query(
context, session, access_id=access_id,
share_snapshot_instance_id=instance_id).first()
if not access:
raise exception.NotFound()
access.update(share_instance_access_map_updates)
access.save(session=session)
return access
@require_context
def share_snapshot_instance_access_get(
context, access_id, share_snapshot_instance_id,
with_snapshot_access_data=True):
session = get_session()
with session.begin():
access = _share_snapshot_instance_access_get_query(
context, session, access_id=access_id,
share_snapshot_instance_id=share_snapshot_instance_id).first()
if access is None:
raise exception.NotFound()
if with_snapshot_access_data:
return _set_instances_snapshot_access_data(
context, access, session)[0]
else:
return access
@require_context
def share_snapshot_instance_access_delete(
context, access_id, snapshot_instance_id):
session = get_session()
with session.begin():
rule = _share_snapshot_instance_access_get_query(
context, session, access_id=access_id,
share_snapshot_instance_id=snapshot_instance_id).first()
if not rule:
exception.NotFound()
rule.soft_delete(session, update_status=True,
status_field_name='state')
other_mappings = share_snapshot_instance_access_get_all(
context, rule['access_id'], session)
if len(other_mappings) == 0:
(
session.query(models.ShareSnapshotAccessMapping)
.filter_by(id=rule['access_id'])
.soft_delete(update_status=True, status_field_name='state')
)
@require_context
def share_snapshot_instance_export_location_create(context, values):
values = ensure_model_dict_has_id(values)
session = get_session()
with session.begin():
ssiel = models.ShareSnapshotInstanceExportLocation()
ssiel.update(values)
ssiel.save(session=session)
return ssiel
def _share_snapshot_instance_export_locations_get_query(context, session,
values):
query = model_query(context, models.ShareSnapshotInstanceExportLocation,
session=session)
return query.filter_by(**values)
@require_context
def share_snapshot_export_locations_get(context, snapshot_id):
session = get_session()
snapshot = share_snapshot_get(context, snapshot_id, session=session)
ins_ids = [ins['id'] for ins in snapshot.instances]
export_locations = _share_snapshot_instance_export_locations_get_query(
context, session, {}).filter(
models.ShareSnapshotInstanceExportLocation.
share_snapshot_instance_id.in_(ins_ids)).all()
return export_locations
@require_context
def share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id, session=None):
if not session:
session = get_session()
export_locations = _share_snapshot_instance_export_locations_get_query(
context, session,
{'share_snapshot_instance_id': share_snapshot_instance_id}).all()
return export_locations
@require_context
def share_snapshot_instance_export_location_get(context, el_id):
session = get_session()
export_location = _share_snapshot_instance_export_locations_get_query(
context, session, {'id': el_id}).first()
if export_location:
return export_location
else:
raise exception.NotFound()
@require_context
def share_snapshot_instance_export_location_delete(context, el_id):
session = get_session()
with session.begin():
el = _share_snapshot_instance_export_locations_get_query(
context, session, {'id': el_id}).first()
if not el:
exception.NotFound()
el.soft_delete(session=session)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_snapshot_instance_export_locations_update(
context, share_snapshot_instance_id, export_locations, delete):
# NOTE(dviroel): Lets keep this backward compatibility for driver that
# may still return export_locations as string
if not isinstance(export_locations, (list, tuple, set)):
export_locations = (export_locations, )
export_locations_as_dicts = []
for el in export_locations:
export_location = el
if isinstance(el, str):
export_location = {
"path": el,
"is_admin_only": False,
}
elif not isinstance(export_location, dict):
raise exception.ManilaException(
_("Wrong export location type '%s'.") % type(export_location))
export_locations_as_dicts.append(export_location)
export_locations = export_locations_as_dicts
export_locations_paths = [el['path'] for el in export_locations]
session = get_session()
current_el_rows = share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id, session=session)
def get_path_list_from_rows(rows):
return set([row['path'] for row in rows])
current_el_paths = get_path_list_from_rows(current_el_rows)
def create_indexed_time_dict(key_list):
base = timeutils.utcnow()
return {
# NOTE(u_glide): Incrementing timestamp by microseconds to make
# timestamp order match index order.
key: base + datetime.timedelta(microseconds=index)
for index, key in enumerate(key_list)
}
indexed_update_time = create_indexed_time_dict(export_locations_paths)
for el in current_el_rows:
if delete and el['path'] not in export_locations_paths:
el.soft_delete(session)
else:
updated_at = indexed_update_time[el['path']]
el.update({
'updated_at': updated_at,
})
el.save(session=session)
# Now add new export locations
for el in export_locations:
if el['path'] in current_el_paths:
# Already updated
continue
location_ref = models.ShareSnapshotInstanceExportLocation()
location_ref.update({
'id': uuidutils.generate_uuid(),
'path': el['path'],
'share_snapshot_instance_id': share_snapshot_instance_id,
'updated_at': indexed_update_time[el['path']],
'is_admin_only': el.get('is_admin_only', False),
})
location_ref.save(session=session)
return get_path_list_from_rows(
share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id, session=session))
#################################
def _share_metadata_get_query(context, share_id):
return model_query(
context, models.ShareMetadata, read_deleted="no",
).filter_by(share_id=share_id).options(joinedload('share'))
@require_context
@require_share_exists
@context_manager.reader
def share_metadata_get(context, share_id):
return _share_metadata_get(context, share_id)
def _share_metadata_get(context, share_id):
rows = _share_metadata_get_query(context, share_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_share_exists
@context_manager.reader
def share_metadata_get_item(context, share_id, key):
try:
row = _share_metadata_get_item(context, share_id, key)
except exception.MetadataItemNotFound:
raise exception.MetadataItemNotFound()
result = {}
result[row['key']] = row['value']
return result
@require_context
@require_share_exists
@context_manager.writer
def share_metadata_delete(context, share_id, key):
_share_metadata_get_query(
context, share_id,
).filter_by(key=key).soft_delete()
@require_context
@require_share_exists
@context_manager.writer
def share_metadata_update(context, share_id, metadata, delete):
return _share_metadata_update(context, share_id, metadata, delete)
@require_context
@require_share_exists
@context_manager.writer
def share_metadata_update_item(context, share_id, item):
return _share_metadata_update(context, share_id, item, delete=False)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _share_metadata_update(context, share_id, metadata, delete):
# Set existing metadata to deleted if delete argument is True
delete = strutils.bool_from_string(delete)
if delete:
original_metadata = _share_metadata_get(context, share_id)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _share_metadata_get_item(
context, share_id, meta_key,
)
meta_ref.soft_delete(session=context.session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _share_metadata_get_item(
context, share_id, meta_key,
)
except exception.MetadataItemNotFound:
meta_ref = models.ShareMetadata()
item.update({"key": meta_key, "share_id": share_id})
meta_ref.update(item)
meta_ref.save(session=context.session)
return metadata
def _share_metadata_get_item(context, share_id, key):
result = _share_metadata_get_query(
context, share_id,
).filter_by(key=key).first()
if not result:
raise exception.MetadataItemNotFound()
return result
############################
# Export locations functions
############################
def _share_export_locations_get(context, share_instance_ids,
include_admin_only=True,
ignore_secondary_replicas=False, session=None):
session = session or get_session()
if not isinstance(share_instance_ids, (set, list, tuple)):
share_instance_ids = (share_instance_ids, )
query = model_query(
context,
models.ShareInstanceExportLocations,
session=session,
read_deleted="no",
).filter(
models.ShareInstanceExportLocations.share_instance_id.in_(
share_instance_ids),
).order_by(
"updated_at",
).options(
joinedload("_el_metadata_bare"),
)
if not include_admin_only:
query = query.filter_by(is_admin_only=False)
if ignore_secondary_replicas:
replica_state_attr = models.ShareInstance.replica_state
query = query.join("share_instance").filter(
or_(replica_state_attr == None, # noqa
replica_state_attr == constants.REPLICA_STATE_ACTIVE))
return query.all()
@require_context
@require_share_exists
def share_export_locations_get_by_share_id(context, share_id,
include_admin_only=True,
ignore_migration_destination=False,
ignore_secondary_replicas=False):
share = share_get(context, share_id)
if ignore_migration_destination:
ids = [instance.id for instance in share.instances
if instance['status'] != constants.STATUS_MIGRATING_TO]
else:
ids = [instance.id for instance in share.instances]
rows = _share_export_locations_get(
context, ids, include_admin_only=include_admin_only,
ignore_secondary_replicas=ignore_secondary_replicas)
return rows
@require_context
@require_share_instance_exists
def share_export_locations_get_by_share_instance_id(context,
share_instance_id,
include_admin_only=True):
rows = _share_export_locations_get(
context, [share_instance_id], include_admin_only=include_admin_only)
return rows
@require_context
@require_share_exists
def share_export_locations_get(context, share_id):
# NOTE(vponomaryov): this method is kept for compatibility with
# old approach. New one uses 'share_export_locations_get_by_share_id'.
# Which returns list of dicts instead of list of strings, as this one does.
share = share_get(context, share_id)
rows = _share_export_locations_get(
context, share.instance.id, context.is_admin)
return [location['path'] for location in rows]
@require_context
def share_export_location_get_by_uuid(context, export_location_uuid,
ignore_secondary_replicas=False,
session=None):
session = session or get_session()
query = model_query(
context,
models.ShareInstanceExportLocations,
session=session,
read_deleted="no",
).filter_by(
uuid=export_location_uuid,
).options(
joinedload("_el_metadata_bare"),
)
if ignore_secondary_replicas:
replica_state_attr = models.ShareInstance.replica_state
query = query.join("share_instance").filter(
or_(replica_state_attr == None, # noqa
replica_state_attr == constants.REPLICA_STATE_ACTIVE))
result = query.first()
if not result:
raise exception.ExportLocationNotFound(uuid=export_location_uuid)
return result
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def share_export_locations_update(context, share_instance_id, export_locations,
delete):
# NOTE(u_glide):
# Backward compatibility code for drivers,
# which return single export_location as string
if not isinstance(export_locations, (list, tuple, set)):
export_locations = (export_locations, )
export_locations_as_dicts = []
for el in export_locations:
# NOTE(vponomaryov): transform old export locations view to new one
export_location = el
if isinstance(el, str):
export_location = {
"path": el,
"is_admin_only": False,
"metadata": {},
}
elif isinstance(export_location, dict):
if 'metadata' not in export_location:
export_location['metadata'] = {}
else:
raise exception.ManilaException(
_("Wrong export location type '%s'.") % type(export_location))
export_locations_as_dicts.append(export_location)
export_locations = export_locations_as_dicts
export_locations_paths = [el['path'] for el in export_locations]
session = get_session()
current_el_rows = _share_export_locations_get(
context, share_instance_id, session=session)
def get_path_list_from_rows(rows):
return set([row['path'] for row in rows])
current_el_paths = get_path_list_from_rows(current_el_rows)
def create_indexed_time_dict(key_list):
base = timeutils.utcnow()
return {
# NOTE(u_glide): Incrementing timestamp by microseconds to make
# timestamp order match index order.
key: base + datetime.timedelta(microseconds=index)
for index, key in enumerate(key_list)
}
indexed_update_time = create_indexed_time_dict(export_locations_paths)
for el in current_el_rows:
if delete and el['path'] not in export_locations_paths:
export_location_metadata_delete(context, el['uuid'])
el.soft_delete(session)
else:
updated_at = indexed_update_time[el['path']]
el.update({
'updated_at': updated_at,
'deleted': 0,
})
el.save(session=session)
if el['el_metadata']:
export_location_metadata_update(
context, el['uuid'], el['el_metadata'], session=session)
# Now add new export locations
for el in export_locations:
if el['path'] in current_el_paths:
# Already updated
continue
location_ref = models.ShareInstanceExportLocations()
location_ref.update({
'uuid': uuidutils.generate_uuid(),
'path': el['path'],
'share_instance_id': share_instance_id,
'updated_at': indexed_update_time[el['path']],
'deleted': 0,
'is_admin_only': el.get('is_admin_only', False),
})
location_ref.save(session=session)
if not el.get('metadata'):
continue
export_location_metadata_update(
context, location_ref['uuid'], el.get('metadata'), session=session)
return get_path_list_from_rows(_share_export_locations_get(
context, share_instance_id, session=session))
#####################################
# Export locations metadata functions
#####################################
def _export_location_metadata_get_query(context, export_location_uuid,
session=None):
session = session or get_session()
export_location_id = share_export_location_get_by_uuid(
context, export_location_uuid).id
return model_query(
context, models.ShareInstanceExportLocationsMetadata, session=session,
read_deleted="no",
).filter_by(
export_location_id=export_location_id,
)
@require_context
def export_location_metadata_get(context, export_location_uuid, session=None):
rows = _export_location_metadata_get_query(
context, export_location_uuid, session=session).all()
result = {}
for row in rows:
result[row["key"]] = row["value"]
return result
@require_context
def export_location_metadata_delete(context, export_location_uuid, keys=None):
session = get_session()
metadata = _export_location_metadata_get_query(
context, export_location_uuid, session=session,
)
# NOTE(vponomaryov): if keys is None then we delete all metadata.
if keys is not None:
keys = keys if isinstance(keys, (list, set, tuple)) else (keys, )
metadata = metadata.filter(
models.ShareInstanceExportLocationsMetadata.key.in_(keys))
metadata = metadata.all()
for meta_ref in metadata:
meta_ref.soft_delete(session=session)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def export_location_metadata_update(context, export_location_uuid, metadata,
delete=False, session=None):
session = session or get_session()
if delete:
original_metadata = export_location_metadata_get(
context, export_location_uuid, session=session)
keys_for_deletion = set(original_metadata).difference(metadata)
if keys_for_deletion:
export_location_metadata_delete(
context, export_location_uuid, keys=keys_for_deletion)
el = share_export_location_get_by_uuid(context, export_location_uuid)
for meta_key, meta_value in metadata.items():
# NOTE(vponomaryov): we should use separate session
# for each meta_ref because of autoincrement of integer primary key
# that will not take effect using one session and we will rewrite,
# in that case, single record - first one added with this call.
session = get_session()
if meta_value is None:
LOG.warning("%s should be properly defined in the driver.",
meta_key)
item = {"value": meta_value, "updated_at": timeutils.utcnow()}
meta_ref = _export_location_metadata_get_query(
context, export_location_uuid, session=session,
).filter_by(
key=meta_key,
).first()
if not meta_ref:
meta_ref = models.ShareInstanceExportLocationsMetadata()
item.update({
"key": meta_key,
"export_location_id": el.id,
})
meta_ref.update(item)
meta_ref.save(session=session)
return metadata
###################################
def _security_service_get_query(context, project_only=False):
return model_query(
context, models.SecurityService, project_only=project_only,
)
@require_context
@context_manager.writer
def security_service_create(context, values):
values = ensure_model_dict_has_id(values)
security_service_ref = models.SecurityService()
security_service_ref.update(values)
security_service_ref.save(session=context.session)
return security_service_ref
@require_context
@context_manager.writer
def security_service_delete(context, id):
security_service_ref = _security_service_get(context, id)
security_service_ref.soft_delete(session=context.session)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def security_service_update(context, id, values):
security_service_ref = _security_service_get(context, id)
security_service_ref.update(values)
security_service_ref.save(session=context.session)
return security_service_ref
@require_context
@context_manager.reader
def security_service_get(context, id, **kwargs):
return _security_service_get(context, id, **kwargs)
@require_context
def _security_service_get(context, id, session=None, **kwargs):
result = _security_service_get_query(
context,
**kwargs,
).filter_by(id=id).first()
if result is None:
raise exception.SecurityServiceNotFound(security_service_id=id)
return result
@require_context
@context_manager.reader
def security_service_get_all(context):
return _security_service_get_query(context).all()
@require_context
@context_manager.reader
def security_service_get_all_by_project(context, project_id):
return _security_service_get_query(context).filter_by(
project_id=project_id,
).all()
@require_context
@context_manager.reader
def security_service_get_all_by_share_network(context, share_network_id):
return model_query(
context, models.SecurityService,
).join(
models.ShareNetworkSecurityServiceAssociation,
models.SecurityService.id ==
models.ShareNetworkSecurityServiceAssociation.security_service_id,
).filter_by(
share_network_id=share_network_id, deleted=0,
).all()
###################
def _share_network_get_query(context):
return model_query(
context, models.ShareNetwork, project_only=True,
).options(
joinedload('share_instances'),
joinedload('security_services'),
subqueryload('share_network_subnets'),
)
@require_context
@context_manager.writer
def share_network_create(context, values):
values = ensure_model_dict_has_id(values)
network_ref = models.ShareNetwork()
network_ref.update(values)
network_ref.save(session=context.session)
return _share_network_get(context, values['id'])
@require_context
@context_manager.writer
def share_network_delete(context, id):
network_ref = _share_network_get(context, id)
network_ref.soft_delete(session=context.session)
@require_context
@context_manager.writer
def share_network_update(context, id, values):
network_ref = _share_network_get(context, id)
network_ref.update(values)
network_ref.save(session=context.session)
return network_ref
@require_context
@context_manager.reader
def share_network_get(context, id):
return _share_network_get(context, id)
@require_context
def _share_network_get(context, id):
result = _share_network_get_query(context).filter_by(id=id).first()
if result is None:
raise exception.ShareNetworkNotFound(share_network_id=id)
return result
@require_context
@context_manager.reader
def share_network_get_all_by_filter(context, filters=None):
query = _share_network_get_query(context)
legal_filter_keys = ('project_id', 'created_since', 'created_before')
if not filters:
filters = {}
query = exact_filter(
query, models.ShareNetwork, filters, legal_filter_keys,
)
if 'security_service_id' in filters:
security_service_id = filters.get('security_service_id')
query = query.join(
models.ShareNetworkSecurityServiceAssociation,
models.ShareNetwork.id == models.ShareNetworkSecurityServiceAssociation.share_network_id, # noqa: E501
).filter_by(
security_service_id=security_service_id,
deleted=0,
)
return query.all()
@require_context
@context_manager.reader
def share_network_get_all(context):
return _share_network_get_query(context).all()
@require_context
@context_manager.reader
def share_network_get_all_by_project(context, project_id):
return _share_network_get_query(
context,
).filter_by(project_id=project_id).all()
@require_context
@context_manager.reader
def share_network_get_all_by_security_service(context, security_service_id):
return model_query(
context, models.ShareNetwork,
).join(
models.ShareNetworkSecurityServiceAssociation,
models.ShareNetwork.id ==
models.ShareNetworkSecurityServiceAssociation.share_network_id,
).filter_by(security_service_id=security_service_id, deleted=0).all()
@require_context
@context_manager.writer
def share_network_add_security_service(context, id, security_service_id):
assoc_ref = model_query(
context,
models.ShareNetworkSecurityServiceAssociation,
).filter_by(
share_network_id=id,
).filter_by(security_service_id=security_service_id).first()
if assoc_ref:
msg = "Already associated"
raise exception.ShareNetworkSecurityServiceAssociationError(
share_network_id=id,
security_service_id=security_service_id,
reason=msg,
)
share_nw_ref = _share_network_get(context, id)
security_service_ref = _security_service_get(context, security_service_id)
share_nw_ref.security_services += [security_service_ref]
share_nw_ref.save(session=context.session)
return share_nw_ref
@require_context
@context_manager.reader
def share_network_security_service_association_get(
context, share_network_id, security_service_id,
):
association = model_query(
context,
models.ShareNetworkSecurityServiceAssociation,
).filter_by(
share_network_id=share_network_id,
).filter_by(
security_service_id=security_service_id,
).first()
return association
@require_context
@context_manager.writer
def share_network_remove_security_service(context, id, security_service_id):
share_nw_ref = _share_network_get(context, id)
_security_service_get(context, security_service_id)
assoc_ref = model_query(
context,
models.ShareNetworkSecurityServiceAssociation,
).filter_by(
share_network_id=id,
).filter_by(security_service_id=security_service_id).first()
if assoc_ref:
assoc_ref.soft_delete(session=context.session)
else:
msg = "No association defined"
raise exception.ShareNetworkSecurityServiceDissociationError(
share_network_id=id,
security_service_id=security_service_id,
reason=msg,
)
return share_nw_ref
@require_context
@context_manager.writer
def share_network_update_security_service(
context, id, current_security_service_id, new_security_service_id,
):
share_nw_ref = _share_network_get(context, id)
# Check if the old security service exists
_security_service_get(context, current_security_service_id)
new_security_service_ref = _security_service_get(
context, new_security_service_id,
)
assoc_ref = model_query(
context,
models.ShareNetworkSecurityServiceAssociation,
).filter_by(
share_network_id=id,
).filter_by(
security_service_id=current_security_service_id,
).first()
if assoc_ref:
assoc_ref.soft_delete(session=context.session)
else:
msg = "No association defined"
raise exception.ShareNetworkSecurityServiceDissociationError(
share_network_id=id,
security_service_id=current_security_service_id,
reason=msg)
# Add new association
share_nw_ref.security_services += [new_security_service_ref]
share_nw_ref.save(session=context.session)
return share_nw_ref
@require_context
def _count_share_networks(
context, project_id, user_id=None, share_type_id=None,
):
query = model_query(
context, models.ShareNetwork,
func.count(models.ShareNetwork.id),
read_deleted="no",
).filter_by(project_id=project_id)
if share_type_id:
query = query.join("share_instances").filter_by(
share_type_id=share_type_id)
elif user_id is not None:
query = query.filter_by(user_id=user_id)
return query.first()[0]
###################
@require_context
def _share_network_subnet_get_query(context):
return model_query(
context, models.ShareNetworkSubnet,
).options(
joinedload('share_servers'),
joinedload('share_network'),
joinedload('share_network_subnet_metadata'),
)
@require_context
@context_manager.writer
def share_network_subnet_create(context, values):
values = ensure_model_dict_has_id(values)
values['share_network_subnet_metadata'] = _metadata_refs(
values.pop('metadata', {}), models.ShareNetworkSubnetMetadata)
network_subnet_ref = models.ShareNetworkSubnet()
network_subnet_ref.update(values)
network_subnet_ref.save(session=context.session)
return _share_network_subnet_get(
context, network_subnet_ref['id'],
)
@require_context
@context_manager.writer
def share_network_subnet_delete(context, network_subnet_id):
network_subnet_ref = _share_network_subnet_get(context, network_subnet_id)
context.session.query(models.ShareNetworkSubnetMetadata).filter_by(
share_network_subnet_id=network_subnet_id,
).soft_delete()
network_subnet_ref.soft_delete(session=context.session, update_status=True)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def share_network_subnet_update(context, network_subnet_id, values):
network_subnet_ref = _share_network_subnet_get(context, network_subnet_id)
network_subnet_ref.update(values)
network_subnet_ref.save(session=context.session)
return network_subnet_ref
@require_context
@context_manager.reader
def share_network_subnet_get(context, network_subnet_id, parent_id=None):
return _share_network_subnet_get(
context, network_subnet_id, parent_id=parent_id,
)
@require_context
def _share_network_subnet_get(context, network_subnet_id, parent_id=None):
kwargs = {'id': network_subnet_id}
if parent_id:
kwargs['share_network_id'] = parent_id
result = _share_network_subnet_get_query(
context,
).filter_by(**kwargs).first()
if result is None:
raise exception.ShareNetworkSubnetNotFound(
share_network_subnet_id=network_subnet_id,
)
return result
@require_context
@context_manager.reader
def share_network_subnet_get_all_with_same_az(context, network_subnet_id):
subnet = _share_network_subnet_get_query(
context,
).filter_by(id=network_subnet_id).subquery()
result = _share_network_subnet_get_query(
context,
).join(
subnet,
subnet.c.share_network_id ==
models.ShareNetworkSubnet.share_network_id,
).filter(
func.coalesce(subnet.c.availability_zone_id, '0') ==
func.coalesce(models.ShareNetworkSubnet.availability_zone_id, '0')
).all()
if not result:
raise exception.ShareNetworkSubnetNotFound(
share_network_subnet_id=network_subnet_id,
)
return result
@require_context
@context_manager.reader
def share_network_subnet_get_all(context):
return _share_network_subnet_get_query(context).all()
@require_context
@context_manager.reader
def share_network_subnet_get_all_by_share_network(context, network_id):
return _share_network_subnet_get_query(context).filter_by(
share_network_id=network_id,
).all()
@require_context
@context_manager.reader
def share_network_subnets_get_all_by_availability_zone_id(
context, share_network_id, availability_zone_id,
fallback_to_default=True,
):
"""Get the share network subnets DB records in a given AZ.
This method returns list of subnets DB record for a given share network id
and an availability zone. If the 'availability_zone_id' is 'None', a
record may be returned and it will represent the default share network
subnets. If there is no subnet for a specific availability zone id and
"fallback_to_default" is True, this method will return the default share
network subnets, if it exists.
:param context: operation context.
:param share_network_id: the share network id to be the subnets.
:param availability_zone_id: the availability zone id to be the subnets.
:param fallback_to_default: determines in case no subnets found in the
given AZ, it will return the "default" subnets.
:return: the list of share network subnets in the AZ and share network.
"""
return _share_network_subnets_get_all_by_availability_zone_id(
context, share_network_id, availability_zone_id,
fallback_to_default=fallback_to_default,
)
@require_context
def _share_network_subnets_get_all_by_availability_zone_id(
context, share_network_id, availability_zone_id,
fallback_to_default=True,
):
result = _share_network_subnet_get_query(context).filter_by(
share_network_id=share_network_id,
availability_zone_id=availability_zone_id,
).all()
# If a specific subnet wasn't found, try get the default one
if availability_zone_id and not result and fallback_to_default:
return _share_network_subnet_get_query(context).filter_by(
share_network_id=share_network_id,
availability_zone_id=None,
).all()
return result
@require_context
@context_manager.reader
def share_network_subnet_get_default_subnets(context, share_network_id):
return _share_network_subnets_get_all_by_availability_zone_id(
context, share_network_id, availability_zone_id=None,
)
@require_context
@context_manager.reader
def share_network_subnet_get_all_by_share_server_id(context, share_server_id):
result = _share_network_subnet_get_query(context).filter(
models.ShareNetworkSubnet.share_servers.any(
id=share_server_id,
)
).all()
if not result:
raise exception.ShareNetworkSubnetNotFoundByShareServer(
share_server_id=share_server_id,
)
return result
###################
def _share_network_subnet_metadata_get_query(context, share_network_subnet_id):
return model_query(
context, models.ShareNetworkSubnetMetadata,
read_deleted="no",
).filter_by(
share_network_subnet_id=share_network_subnet_id,
).options(joinedload('share_network_subnet'))
@require_context
@require_share_network_subnet_exists
@context_manager.reader
def share_network_subnet_metadata_get(context, share_network_subnet_id):
return _share_network_subnet_metadata_get(context, share_network_subnet_id)
@require_context
def _share_network_subnet_metadata_get(context, share_network_subnet_id):
rows = _share_network_subnet_metadata_get_query(
context, share_network_subnet_id,
).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_share_network_subnet_exists
@context_manager.writer
def share_network_subnet_metadata_delete(
context, share_network_subnet_id, key,
):
meta_ref = _share_network_subnet_metadata_get_item(
context, share_network_subnet_id, key,
)
meta_ref.soft_delete(session=context.session)
@require_context
@require_share_network_subnet_exists
@context_manager.writer
def share_network_subnet_metadata_update(
context, share_network_subnet_id, metadata, delete,
):
return _share_network_subnet_metadata_update(
context, share_network_subnet_id, metadata, delete,
)
@require_context
@context_manager.writer
def share_network_subnet_metadata_update_item(
context, share_network_subnet_id, item,
):
return _share_network_subnet_metadata_update(
context, share_network_subnet_id, item, delete=False,
)
@require_context
@context_manager.reader
def share_network_subnet_metadata_get_item(
context, share_network_subnet_id, key,
):
row = _share_network_subnet_metadata_get_item(
context, share_network_subnet_id, key,
)
result = {row['key']: row['value']}
return result
def _share_network_subnet_metadata_get_item(
context, share_network_subnet_id, key,
):
result = _share_network_subnet_metadata_get_query(
context, share_network_subnet_id,
).filter_by(key=key).first()
if not result:
raise exception.MetadataItemNotFound
return result
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _share_network_subnet_metadata_update(
context, share_network_subnet_id, metadata, delete,
):
delete = strutils.bool_from_string(delete)
if delete:
original_metadata = _share_network_subnet_metadata_get(
context, share_network_subnet_id,
)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _share_network_subnet_metadata_get_item(
context, share_network_subnet_id, meta_key,
)
meta_ref.soft_delete(session=context.session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects.
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not.
item = {"value": meta_value}
meta_ref = _share_network_subnet_metadata_get_query(
context, share_network_subnet_id,
).filter_by(key=meta_key).first()
if not meta_ref:
meta_ref = models.ShareNetworkSubnetMetadata()
item.update(
{
"key": meta_key,
"share_network_subnet_id": share_network_subnet_id,
}
)
meta_ref.update(item)
meta_ref.save(session=context.session)
return metadata
#################################
def _share_server_get_query(context):
return model_query(
context, models.ShareServer,
).options(
joinedload('share_instances'),
joinedload('network_allocations'),
joinedload('share_network_subnets'),
)
@require_context
@context_manager.writer
def share_server_create(context, values):
values = ensure_model_dict_has_id(values)
server_ref = models.ShareServer()
# updated_at is needed for judgement of automatic cleanup
server_ref.updated_at = timeutils.utcnow()
server_ref.update(values)
server_ref.save(session=context.session)
# NOTE(u_glide): Do so to prevent errors with relationships
return _share_server_get(context, server_ref['id'])
@require_context
@context_manager.writer
def share_server_delete(context, id):
server_ref = _share_server_get(context, id)
model_query(
context, models.ShareServerShareNetworkSubnetMapping,
).filter_by(
share_server_id=id,
).soft_delete()
_share_server_backend_details_delete(context, id)
server_ref.soft_delete(session=context.session, update_status=True)
@require_context
@context_manager.writer
def share_server_update(context, id, values):
server_ref = _share_server_get(context, id)
server_ref.update(values)
server_ref.save(session=context.session)
return server_ref
@require_context
@context_manager.reader
def share_server_get(context, server_id):
return _share_server_get(context, server_id)
@require_context
def _share_server_get(context, server_id):
result = _share_server_get_query(context).filter_by(id=server_id).first()
if result is None:
raise exception.ShareServerNotFound(share_server_id=server_id)
return result
@require_context
@context_manager.reader
def share_server_search_by_identifier(context, identifier):
identifier_field = models.ShareServer.identifier
# try if given identifier is a substring of existing entry's identifier
result = (_share_server_get_query(context).filter(
identifier_field.like('%{}%'.format(identifier))).all())
if not result:
# repeat it with underscores instead of hyphens
result = (_share_server_get_query(context).filter(
identifier_field.like('%{}%'.format(
identifier.replace("-", "_")))).all())
if not result:
# repeat it with hypens instead of underscores
result = (_share_server_get_query(context).filter(
identifier_field.like('%{}%'.format(
identifier.replace("_", "-")))).all())
if not result:
# try if an existing identifier is a substring of given identifier
result = (_share_server_get_query(context).filter(
literal(identifier).contains(identifier_field)).all())
if not result:
# repeat it with underscores instead of hyphens
result = (_share_server_get_query(context).filter(
literal(identifier.replace("-", "_")).contains(
identifier_field)).all())
if not result:
# repeat it with hypens instead of underscores
result = (_share_server_get_query(context).filter(
literal(identifier.replace("_", "-")).contains(
identifier_field)).all())
if not result:
raise exception.ShareServerNotFound(share_server_id=identifier)
return result
@require_context
@context_manager.reader
def share_server_get_all_by_host_and_share_subnet_valid(
context, host, share_subnet_id,
):
result = _share_server_get_query(
context,
).filter_by(
host=host,
).filter(
models.ShareServer.share_network_subnets.any(id=share_subnet_id)
).filter(
models.ShareServer.status.in_(
(constants.STATUS_CREATING, constants.STATUS_ACTIVE),
)
).all()
if not result:
filters_description = ('share_network_subnet_id is '
'"%(share_subnet_id)s", host is "%(host)s" and '
'status in "%(status_cr)s" or '
'"%(status_act)s"') % {
'share_subnet_id': share_subnet_id,
'host': host,
'status_cr': constants.STATUS_CREATING,
'status_act': constants.STATUS_ACTIVE,
}
raise exception.ShareServerNotFoundByFilters(
filters_description=filters_description,
)
return result
@require_context
@context_manager.reader
def share_server_get_all_by_host_and_share_subnet(
context, host, share_subnet_id,
):
result = _share_server_get_query(
context,
).filter_by(
host=host,
).filter(
models.ShareServer.share_network_subnets.any(id=share_subnet_id)
).all()
if not result:
filters_description = (
'share_network_subnet_id is "%(share_subnet_id)s" and host is '
'"%(host)s".'
) % {
'share_subnet_id': share_subnet_id,
'host': host,
}
raise exception.ShareServerNotFoundByFilters(
filters_description=filters_description,
)
return result
@require_context
@context_manager.reader
def share_server_get_all(context):
return _share_server_get_query(context).all()
@require_context
@context_manager.reader
def share_server_get_all_with_filters(context, filters):
return _share_server_get_all_with_filters(context, filters)
@require_context
def _share_server_get_all_with_filters(context, filters):
query = _share_server_get_query(context)
if filters.get('host'):
query = query.filter_by(host=filters.get('host'))
if filters.get('status'):
query = query.filter_by(status=filters.get('status'))
if filters.get('source_share_server_id'):
query = query.filter_by(
source_share_server_id=filters.get('source_share_server_id'))
if filters.get('share_network_id'):
query = query.join(
models.ShareServerShareNetworkSubnetMapping,
models.ShareServerShareNetworkSubnetMapping.share_server_id ==
models.ShareServer.id
).join(
models.ShareNetworkSubnet,
models.ShareNetworkSubnet.id ==
models.ShareServerShareNetworkSubnetMapping.share_network_subnet_id
).filter(
models.ShareNetworkSubnet.share_network_id ==
filters.get('share_network_id'))
return query.all()
@require_context
@context_manager.reader
def share_server_get_all_by_host(context, host, filters=None):
if filters:
filters.update({'host': host})
else:
filters = {'host': host}
return _share_server_get_all_with_filters(context, filters=filters)
@require_context
@context_manager.reader
def share_server_get_all_unused_deletable(context, host, updated_before):
valid_server_status = (
constants.STATUS_INACTIVE,
constants.STATUS_ACTIVE,
constants.STATUS_ERROR,
)
result = (_share_server_get_query(context)
.filter_by(is_auto_deletable=True)
.filter_by(host=host)
.filter(~models.ShareServer.share_groups.any())
.filter(~models.ShareServer.share_instances.any())
.filter(models.ShareServer.status.in_(valid_server_status))
.filter(models.ShareServer.updated_at < updated_before).all())
return result
def _share_server_backend_details_get_item(context,
share_server_id,
key, session=None):
result = (_share_server_backend_details_get_query(
context, share_server_id, session=session).filter_by(key=key).first())
if not result:
raise exception.ShareServerBackendDetailsNotFound()
return result
def _share_server_backend_details_get_query(context,
share_server_id,
session=None):
return (model_query(
context, models.ShareServerBackendDetails, session=session,
read_deleted="no").
filter_by(share_server_id=share_server_id))
@require_context
@context_manager.writer
def share_server_backend_details_set(context, share_server_id, server_details):
_share_server_get(context, share_server_id)
for meta_key, meta_value in server_details.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _share_server_backend_details_get_item(
context, share_server_id, meta_key, session=context.session)
except exception.ShareServerBackendDetailsNotFound:
meta_ref = models.ShareServerBackendDetails()
item.update({"key": meta_key, "share_server_id": share_server_id})
meta_ref.update(item)
meta_ref.save(session=context.session)
return server_details
@require_context
@context_manager.writer
def share_server_backend_details_delete(context, share_server_id):
return _share_server_backend_details_delete(context, share_server_id)
@require_context
def _share_server_backend_details_delete(context, share_server_id):
share_server_details = model_query(
context,
models.ShareServerBackendDetails,
).filter_by(share_server_id=share_server_id).all()
for item in share_server_details:
item.soft_delete(session=context.session)
@require_context
@context_manager.writer
def share_servers_update(context, share_server_ids, values):
result = model_query(
context, models.ShareServer, read_deleted="no",
).filter(
models.ShareServer.id.in_(share_server_ids),
).update(values, synchronize_session=False)
return result
###################
def _driver_private_data_query(
context, entity_id, key=None, read_deleted=False,
):
query = model_query(
context, models.DriverPrivateData,
read_deleted=read_deleted,
).filter_by(
entity_uuid=entity_id,
)
if isinstance(key, list):
return query.filter(models.DriverPrivateData.key.in_(key))
elif key is not None:
return query.filter_by(key=key)
return query
@require_context
@context_manager.reader
def driver_private_data_get(context, entity_id, key=None, default=None):
query = _driver_private_data_query(context, entity_id, key)
if key is None or isinstance(key, list):
return {item.key: item.value for item in query.all()}
else:
result = query.first()
return result["value"] if result is not None else default
@require_context
@context_manager.writer
def driver_private_data_update(
context, entity_id, details, delete_existing=False,
):
# NOTE(u_glide): following code modifies details dict, that's why we should
# copy it
new_details = copy.deepcopy(details)
# Process existing data
original_data = context.session.query(models.DriverPrivateData).filter_by(
entity_uuid=entity_id,
).all()
for data_ref in original_data:
in_new_details = data_ref['key'] in new_details
if in_new_details:
new_value = str(new_details.pop(data_ref['key']))
data_ref.update({
"value": new_value,
"deleted": 0,
"deleted_at": None
})
data_ref.save(session=context.session)
elif delete_existing and data_ref['deleted'] != 1:
data_ref.update({
"deleted": 1, "deleted_at": timeutils.utcnow()
})
data_ref.save(session=context.session)
# Add new data
for key, value in new_details.items():
data_ref = models.DriverPrivateData()
data_ref.update({
"entity_uuid": entity_id,
"key": key,
"value": str(value)
})
data_ref.save(session=context.session)
return details
@require_context
@context_manager.writer
def driver_private_data_delete(context, entity_id, key=None):
query = _driver_private_data_query(context, entity_id, key)
query.update({"deleted": 1, "deleted_at": timeutils.utcnow()})
###################
@require_context
@context_manager.writer
def network_allocation_create(context, values):
values = ensure_model_dict_has_id(values)
alloc_ref = models.NetworkAllocation()
alloc_ref.update(values)
alloc_ref.save(session=context.session)
return alloc_ref
@require_context
@context_manager.writer
def network_allocation_delete(context, id):
alloc_ref = _network_allocation_get(context, id)
alloc_ref.soft_delete(session=context.session)
@require_context
@context_manager.reader
def network_allocation_get(context, id, read_deleted="no"):
return _network_allocation_get(context, id, read_deleted=read_deleted)
@require_context
def _network_allocation_get(context, id, read_deleted="no"):
result = model_query(
context, models.NetworkAllocation,
read_deleted=read_deleted,
).filter_by(id=id).first()
if result is None:
raise exception.NotFound()
return result
@require_context
@context_manager.reader
def network_allocations_get_by_ip_address(context, ip_address):
result = model_query(
context, models.NetworkAllocation,
).filter_by(ip_address=ip_address).all()
return result or []
@require_context
@context_manager.reader
def network_allocations_get_for_share_server(
context, share_server_id, label=None, subnet_id=None,
):
query = model_query(
context, models.NetworkAllocation,
).filter_by(
share_server_id=share_server_id,
)
if label:
if label != 'admin':
query = query.filter(or_(
# NOTE(vponomaryov): we treat None as alias for 'user'.
models.NetworkAllocation.label == None, # noqa
models.NetworkAllocation.label == label,
))
else:
query = query.filter(models.NetworkAllocation.label == label)
if subnet_id:
query = query.filter(
models.NetworkAllocation.share_network_subnet_id == subnet_id)
result = query.all()
return result
@require_context
@context_manager.writer
def network_allocation_update(context, id, values, read_deleted=None):
alloc_ref = _network_allocation_get(context, id, read_deleted=read_deleted)
alloc_ref.update(values)
alloc_ref.save(session=context.session)
return alloc_ref
###################
def _dict_with_specs(inst_type_query, specs_key='extra_specs'):
"""Convert type query result to dict with extra_spec and rate_limit.
Takes a share [group] type query returned by sqlalchemy and returns it
as a dictionary, converting the extra/group specs entry from a list
of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
'group_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
specs = {x['key']: x['value'] for x in inst_type_query[specs_key]}
inst_type_dict[specs_key] = specs
return inst_type_dict
@require_admin_context
@context_manager.writer
def share_type_create(context, values, projects=None):
"""Create a new share type.
In order to pass in extra specs, the values dict should contain a
'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
values = ensure_model_dict_has_id(values)
projects = projects or []
try:
values['extra_specs'] = _metadata_refs(
values.get('extra_specs'),
models.ShareTypeExtraSpecs,
)
share_type_ref = models.ShareTypes()
share_type_ref.update(values)
share_type_ref.save(session=context.session)
except db_exception.DBDuplicateEntry:
raise exception.ShareTypeExists(id=values['name'])
except Exception as e:
raise db_exception.DBError(e)
for project in set(projects):
access_ref = models.ShareTypeProjects()
access_ref.update(
{"share_type_id": share_type_ref.id, "project_id": project},
)
access_ref.save(session=context.session)
return share_type_ref
def _share_type_get_query(context, read_deleted=None, expected_fields=None):
expected_fields = expected_fields or []
query = model_query(
context,
models.ShareTypes,
read_deleted=read_deleted,
).options(joinedload('extra_specs'))
if 'projects' in expected_fields:
query = query.options(joinedload('projects'))
if not context.is_admin:
the_filter = [models.ShareTypes.is_public == true()]
projects_attr = getattr(models.ShareTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@handle_db_data_error
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _share_type_update(context, type_id, values, is_group):
if values.get('name') is None:
values.pop('name', None)
if is_group:
model = models.ShareGroupTypes
exists_exc = exception.ShareGroupTypeExists
exists_args = {'type_id': values.get('name')}
else:
model = models.ShareTypes
exists_exc = exception.ShareTypeExists
exists_args = {'id': values.get('name')}
query = model_query(context, model)
try:
result = query.filter_by(id=type_id).update(values)
except db_exception.DBDuplicateEntry:
# This exception only occurs if there's a non-deleted
# share/group type which has the same name as the name being
# updated.
raise exists_exc(**exists_args)
if not result:
if is_group:
raise exception.ShareGroupTypeNotFound(type_id=type_id)
else:
raise exception.ShareTypeNotFound(share_type_id=type_id)
@context_manager.writer
def share_type_update(context, share_type_id, values):
_share_type_update(context, share_type_id, values, is_group=False)
@require_context
@context_manager.reader
def share_type_get_all(context, inactive=False, filters=None):
"""Returns a dict describing all share_types with name as key."""
filters = filters or {}
read_deleted = "yes" if inactive else "no"
query = _share_type_get_query(context, read_deleted=read_deleted)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models. ShareTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models. ShareTypes, 'projects')
the_filter.extend([
projects_attr.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
rows = query.order_by("name").all()
result = {}
for row in rows:
result[row['name']] = _dict_with_specs(row)
return result
def _share_type_get_id_from_share_type(context, id):
result = model_query(
context, models.ShareTypes, read_deleted="no",
).filter_by(id=id).first()
if not result:
raise exception.ShareTypeNotFound(share_type_id=id)
return result['id']
def _share_type_get(context, id, inactive=False, expected_fields=None):
expected_fields = expected_fields or []
read_deleted = "yes" if inactive else "no"
result = _share_type_get_query(
context, read_deleted, expected_fields,
).filter_by(id=id).first()
if not result:
# The only way that id could be None is if the default share type is
# not configured and no other share type was specified.
if id is None:
raise exception.DefaultShareTypeNotConfigured()
raise exception.ShareTypeNotFound(share_type_id=id)
share_type = _dict_with_specs(result)
if 'projects' in expected_fields:
share_type['projects'] = [p['project_id'] for p in result['projects']]
return share_type
@require_context
@context_manager.reader
def share_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific share_type."""
return _share_type_get(context, id,
inactive=inactive,
expected_fields=expected_fields)
def _share_type_get_by_name(context, name):
result = _share_type_get_query(context).filter_by(name=name).first()
if not result:
raise exception.ShareTypeNotFoundByName(share_type_name=name)
return _dict_with_specs(result)
@require_context
@context_manager.reader
def share_type_get_by_name(context, name):
"""Return a dict describing specific share_type."""
return _share_type_get_by_name(context, name)
@require_context
@context_manager.reader
def share_type_get_by_name_or_id(context, name_or_id):
"""Return a dict describing specific share_type using its name or ID.
:returns: ShareType object or None if not found
"""
try:
return _share_type_get(context, name_or_id)
except exception.ShareTypeNotFound:
try:
return _share_type_get_by_name(context, name_or_id)
except exception.ShareTypeNotFoundByName:
return None
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def share_type_destroy(context, id):
_share_type_get(context, id)
shares_count = model_query(
context,
models.ShareInstance,
read_deleted="no",
).filter_by(share_type_id=id).count()
share_group_types_count = model_query(
context,
models.ShareGroupTypeShareTypeMapping,
read_deleted="no",
).filter_by(share_type_id=id).count()
if shares_count or share_group_types_count:
msg = ("Deletion of share type %(stype)s failed; it in use by "
"%(shares)d shares and %(gtypes)d share group types")
msg_args = {'stype': id,
'shares': shares_count,
'gtypes': share_group_types_count}
LOG.error(msg, msg_args)
raise exception.ShareTypeInUse(share_type_id=id)
model_query(
context, models.ShareTypeExtraSpecs,
).filter_by(
share_type_id=id
).soft_delete()
model_query(
context, models.ShareTypeProjects,
).filter_by(
share_type_id=id,
).soft_delete()
model_query(
context, models.ShareTypes,
).filter_by(
id=id
).soft_delete()
# NOTE(stephenfin): commit changes before we do anything with quotas
context.session.commit()
context.session.begin()
# Destroy any quotas, usages and reservations for the share type:
_quota_destroy_all_by_share_type(context, id)
def _share_type_access_query(context):
return model_query(context, models.ShareTypeProjects, read_deleted="no")
@require_admin_context
@context_manager.reader
def share_type_access_get_all(context, type_id):
share_type_id = _share_type_get_id_from_share_type(context, type_id)
return _share_type_access_query(
context,
).filter_by(share_type_id=share_type_id).all()
@require_admin_context
@context_manager.writer
def share_type_access_add(context, type_id, project_id):
"""Add given tenant to the share type access list."""
share_type_id = _share_type_get_id_from_share_type(context, type_id)
access_ref = models.ShareTypeProjects()
access_ref.update(
{"share_type_id": share_type_id, "project_id": project_id},
)
try:
access_ref.save(session=context.session)
except db_exception.DBDuplicateEntry:
raise exception.ShareTypeAccessExists(
share_type_id=type_id, project_id=project_id,
)
return access_ref
@require_admin_context
@context_manager.writer
def share_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the share type access list."""
share_type_id = _share_type_get_id_from_share_type(context, type_id)
count = _share_type_access_query(
context,
).filter_by(
share_type_id=share_type_id,
).filter_by(
project_id=project_id,
).soft_delete(synchronize_session=False)
if count == 0:
raise exception.ShareTypeAccessNotFound(
share_type_id=type_id, project_id=project_id,
)
####################
def _share_type_extra_specs_query(context, share_type_id):
return model_query(
context, models.ShareTypeExtraSpecs, read_deleted="no",
).filter_by(
share_type_id=share_type_id,
).options(joinedload('share_type'))
@require_context
@context_manager.reader
def share_type_extra_specs_get(context, share_type_id):
rows = _share_type_extra_specs_query(context, share_type_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@context_manager.writer
def share_type_extra_specs_delete(context, share_type_id, key):
_share_type_extra_specs_get_item(context, share_type_id, key)
_share_type_extra_specs_query(
context, share_type_id,
).filter_by(key=key).soft_delete()
def _share_type_extra_specs_get_item(context, share_type_id, key):
result = _share_type_extra_specs_query(
context, share_type_id,
).filter_by(key=key).options(joinedload('share_type')).first()
if not result:
raise exception.ShareTypeExtraSpecsNotFound(
extra_specs_key=key,
share_type_id=share_type_id,
)
return result
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def share_type_extra_specs_update_or_create(context, share_type_id, specs):
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _share_type_extra_specs_get_item(
context, share_type_id, key,
)
except exception.ShareTypeExtraSpecsNotFound:
spec_ref = models.ShareTypeExtraSpecs()
spec_ref.update(
{
"key": key,
"value": value,
"share_type_id": share_type_id,
"deleted": 0,
}
)
spec_ref.save(session=context.session)
return specs
####################
def _ensure_availability_zone_exists(
context, values, session=None, *, strict=True,
):
az_name = values.pop('availability_zone', None)
if strict and not az_name:
msg = _("Values dict should have 'availability_zone' field.")
raise ValueError(msg)
elif not az_name:
return
if uuidutils.is_uuid_like(az_name):
az_ref = _availability_zone_get(context, az_name, session=session)
else:
az_ref = _availability_zone_create_if_not_exist(
context, az_name, session=session)
values.update({'availability_zone_id': az_ref['id']})
@require_context
@context_manager.reader
def availability_zone_get(context, id_or_name):
return _availability_zone_get(context, id_or_name)
# TODO(stephenfin): Remove the 'session' argument once all callers have been
# converted
@require_context
def _availability_zone_get(context, id_or_name, session=None):
query = model_query(context, models.AvailabilityZone, session=session)
if uuidutils.is_uuid_like(id_or_name):
query = query.filter_by(id=id_or_name)
else:
query = query.filter_by(name=id_or_name)
result = query.first()
if not result:
raise exception.AvailabilityZoneNotFound(id=id_or_name)
return result
# TODO(stephenfin): Remove the 'session' argument once all callers have been
# converted
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _availability_zone_create_if_not_exist(context, name, session=None):
try:
return _availability_zone_get(context, name, session=session)
except exception.AvailabilityZoneNotFound:
az = models.AvailabilityZone()
az.update({'id': uuidutils.generate_uuid(), 'name': name})
# TODO(stephenfin): Remove this branch once all callers have been
# updated not to pass 'session'
if session is not None:
with session.begin():
az.save(session)
else:
az.save(context.session)
return az
@require_context
@context_manager.reader
def availability_zone_get_all(context):
enabled_services = model_query(
context, models.Service,
models.Service.availability_zone_id,
read_deleted="no"
).filter_by(disabled=False).distinct()
return model_query(
context, models.AvailabilityZone, read_deleted="no",
).filter(
models.AvailabilityZone.id.in_(enabled_services)
).all()
####################
@require_admin_context
@context_manager.writer
def purge_deleted_records(context, age_in_days):
"""Purge soft-deleted records older than(and equal) age from tables."""
if age_in_days < 0:
msg = _('Must supply a non-negative value for "age_in_days".')
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
metadata = MetaData()
metadata.reflect(get_engine())
deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days)
for table in reversed(metadata.sorted_tables):
if 'deleted' not in table.columns.keys():
continue
try:
mds = [m for m in models.__dict__.values() if
(hasattr(m, '__tablename__') and
m.__tablename__ == str(table))]
if len(mds) > 0:
# collect all soft-deleted records
with context.session.begin_nested():
model = mds[0]
s_deleted_records = context.session.query(
model,
).filter(model.deleted_at <= deleted_age)
deleted_count = 0
# delete records one by one,
# skip the records which has FK constraints
for record in s_deleted_records:
try:
with context.session.begin_nested():
context.session.delete(record)
deleted_count += 1
except db_exc.DBError:
LOG.warning(
("Deleting soft-deleted resource %s "
"failed, skipping."), record)
if deleted_count != 0:
LOG.info("Deleted %(count)s records in "
"table %(table)s.",
{'count': deleted_count, 'table': table})
except db_exc.DBError:
LOG.warning("Querying table %s's soft-deleted records "
"failed, skipping.", table)
####################
def _share_group_get(context, share_group_id):
result = (model_query(context, models.ShareGroup,
project_only=True,
read_deleted='no').
filter_by(id=share_group_id).
options(joinedload('share_types')).
first())
if not result:
raise exception.ShareGroupNotFound(share_group_id=share_group_id)
return result
@require_context
@context_manager.reader
def share_group_get(context, share_group_id):
return _share_group_get(context, share_group_id)
def _share_group_get_all(context, project_id=None, share_server_id=None,
host=None, detailed=True, filters=None,
sort_key=None, sort_dir=None):
sort_key = sort_key or 'created_at'
sort_dir = sort_dir or 'desc'
query = model_query(
context, models.ShareGroup, read_deleted='no')
# Apply filters
if not filters:
filters = {}
no_key = 'key_is_absent'
for k, v in filters.items():
temp_k = k.rstrip('~') if k in constants.LIKE_FILTER else k
filter_attr = getattr(models.ShareGroup, temp_k, no_key)
if filter_attr == no_key:
msg = _("Share groups cannot be filtered using '%s' key.")
raise exception.InvalidInput(reason=msg % k)
if k in constants.LIKE_FILTER:
query = query.filter(filter_attr.op('LIKE')(u'%' + v + u'%'))
else:
query = query.filter(filter_attr == v)
if project_id:
query = query.filter(
models.ShareGroup.project_id == project_id)
if host:
query = query.filter(
models.ShareGroup.host == host)
if share_server_id:
query = query.filter(
models.ShareGroup.share_server_id == share_server_id)
try:
query = apply_sorting(models.ShareGroup, query, sort_key, sort_dir)
except AttributeError:
msg = _("Wrong sorting key provided - '%s'.") % sort_key
raise exception.InvalidInput(reason=msg)
if detailed:
return query.options(joinedload('share_types')).all()
else:
query = query.with_entities(
models.ShareGroup.id, models.ShareGroup.name)
values = []
for sg_id, sg_name in query.all():
values.append({"id": sg_id, "name": sg_name})
return values
@require_admin_context
@context_manager.reader
def share_group_get_all(context, detailed=True, filters=None, sort_key=None,
sort_dir=None):
return _share_group_get_all(
context, detailed=detailed, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
@require_admin_context
@context_manager.reader
def share_group_get_all_by_host(context, host, detailed=True):
return _share_group_get_all(context, host=host, detailed=detailed)
@require_context
@context_manager.reader
def share_group_get_all_by_project(context, project_id, detailed=True,
filters=None, sort_key=None, sort_dir=None):
authorize_project_context(context, project_id)
return _share_group_get_all(
context, project_id=project_id, detailed=detailed, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
@require_context
@context_manager.reader
def share_group_get_all_by_share_server(context, share_server_id, filters=None,
sort_key=None, sort_dir=None):
return _share_group_get_all(
context, share_server_id=share_server_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
@require_context
@context_manager.writer
def share_group_create(context, values):
share_group = models.ShareGroup()
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
mappings = []
for item in values.get('share_types') or []:
mapping = models.ShareGroupShareTypeMapping()
mapping['id'] = uuidutils.generate_uuid()
mapping['share_type_id'] = item
mapping['share_group_id'] = values['id']
mappings.append(mapping)
values['share_types'] = mappings
share_group.update(values)
context.session.add(share_group)
return _share_group_get(context, values['id'])
@require_context
@context_manager.writer
def share_group_update(context, share_group_id, values):
share_group_ref = _share_group_get(
context, share_group_id)
share_group_ref.update(values)
share_group_ref.save(session=context.session)
return share_group_ref
@require_admin_context
@context_manager.writer
def share_group_destroy(context, share_group_id):
share_group_ref = _share_group_get(context, share_group_id)
share_group_ref.soft_delete(context.session)
context.session.query(models.ShareGroupShareTypeMapping).filter_by(
share_group_id=share_group_ref['id']).soft_delete()
@require_context
@context_manager.reader
def count_shares_in_share_group(context, share_group_id):
return (model_query(context, models.Share,
project_only=True, read_deleted="no").
filter_by(share_group_id=share_group_id).
count())
@require_context
@context_manager.reader
def get_all_shares_by_share_group(context, share_group_id):
return (model_query(
context, models.Share,
project_only=True, read_deleted="no").
filter_by(share_group_id=share_group_id).
all())
@require_context
def _count_share_groups(context, project_id, user_id=None, share_type_id=None):
query = model_query(
context, models.ShareGroup,
func.count(models.ShareGroup.id),
read_deleted="no",
).filter_by(project_id=project_id)
if share_type_id:
query = query.join("share_group_share_type_mappings").filter_by(
share_type_id=share_type_id)
elif user_id is not None:
query = query.filter_by(user_id=user_id)
return query.first()[0]
@require_context
def _count_share_group_snapshots(
context, project_id, user_id=None, share_type_id=None,
):
query = model_query(
context, models.ShareGroupSnapshot,
func.count(models.ShareGroupSnapshot.id),
read_deleted="no",
).filter_by(project_id=project_id)
if share_type_id:
query = query.join(
"share_group"
).join(
"share_group_share_type_mappings"
).filter_by(share_type_id=share_type_id)
elif user_id is not None:
query = query.filter_by(user_id=user_id)
return query.first()[0]
@require_context
def _share_replica_data_get_for_project(
context, project_id, user_id=None, share_type_id=None,
):
query = model_query(
context, models.ShareInstance,
func.count(models.ShareInstance.id),
func.sum(models.Share.size),
read_deleted="no",
).join(
models.Share,
models.ShareInstance.share_id == models.Share.id
).filter(
models.Share.project_id == project_id
).filter(
models.ShareInstance.replica_state.isnot(None)
)
if share_type_id:
query = query.filter(
models.ShareInstance.share_type_id == share_type_id)
elif user_id:
query = query.filter(models.Share.user_id == user_id)
result = query.first()
return result[0] or 0, result[1] or 0
@require_context
@context_manager.reader
def count_share_group_snapshots_in_share_group(context, share_group_id):
return model_query(
context, models.ShareGroupSnapshot,
project_only=True, read_deleted="no",
).filter_by(
share_group_id=share_group_id,
).count()
@require_context
@context_manager.reader
def count_share_groups_in_share_network(context, share_network_id):
return (model_query(
context, models.ShareGroup,
project_only=True, read_deleted="no").
filter_by(share_network_id=share_network_id).
count())
@require_context
@context_manager.reader
def count_share_group_snapshot_members_in_share(context, share_id):
return model_query(
context, models.ShareSnapshotInstance,
project_only=True, read_deleted="no",
).join(
models.ShareInstance,
models.ShareInstance.id == (
models.ShareSnapshotInstance.share_instance_id),
).filter(
models.ShareInstance.share_id == share_id,
).count()
####################
@require_context
def _share_group_snapshot_get(context, share_group_snapshot_id):
result = model_query(
context,
models.ShareGroupSnapshot,
project_only=True,
read_deleted='no',
).options(
joinedload('share_group'),
joinedload('share_group_snapshot_members'),
).filter_by(
id=share_group_snapshot_id,
).first()
if not result:
raise exception.ShareGroupSnapshotNotFound(
share_group_snapshot_id=share_group_snapshot_id)
return result
def _share_group_snapshot_get_all(
context,
project_id=None,
detailed=True,
filters=None,
sort_key=None,
sort_dir=None,
):
if not sort_key:
sort_key = 'created_at'
if not sort_dir:
sort_dir = 'desc'
query = model_query(context, models.ShareGroupSnapshot, read_deleted='no')
# Apply filters
if not filters:
filters = {}
no_key = 'key_is_absent'
for k, v in filters.items():
filter_attr = getattr(models.ShareGroupSnapshot, k, no_key)
if filter_attr == no_key:
msg = _("Share group snapshots cannot be filtered using '%s' key.")
raise exception.InvalidInput(reason=msg % k)
query = query.filter(filter_attr == v)
if project_id:
query = query.filter(
models.ShareGroupSnapshot.project_id == project_id)
try:
query = apply_sorting(
models.ShareGroupSnapshot, query, sort_key, sort_dir)
except AttributeError:
msg = _("Wrong sorting key provided - '%s'.") % sort_key
raise exception.InvalidInput(reason=msg)
if detailed:
return query.options(
joinedload('share_group'),
joinedload('share_group_snapshot_members')
).all()
else:
query = query.with_entities(models.ShareGroupSnapshot.id,
models.ShareGroupSnapshot.name)
values = []
for sgs_id, sgs_name in query.all():
values.append({"id": sgs_id, "name": sgs_name})
return values
@require_context
@context_manager.reader
def share_group_snapshot_get(context, share_group_snapshot_id):
return _share_group_snapshot_get(context, share_group_snapshot_id)
@require_admin_context
@context_manager.reader
def share_group_snapshot_get_all(
context, detailed=True, filters=None, sort_key=None, sort_dir=None):
return _share_group_snapshot_get_all(
context, filters=filters, detailed=detailed,
sort_key=sort_key, sort_dir=sort_dir)
@require_context
@context_manager.reader
def share_group_snapshot_get_all_by_project(
context, project_id, detailed=True, filters=None,
sort_key=None, sort_dir=None):
authorize_project_context(context, project_id)
return _share_group_snapshot_get_all(
context, project_id=project_id, filters=filters, detailed=detailed,
sort_key=sort_key, sort_dir=sort_dir,
)
@require_context
@context_manager.writer
def share_group_snapshot_create(context, values):
share_group_snapshot = models.ShareGroupSnapshot()
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
share_group_snapshot.update(values)
context.session.add(share_group_snapshot)
return _share_group_snapshot_get(context, values['id'])
@require_context
@context_manager.writer
def share_group_snapshot_update(context, share_group_snapshot_id, values):
share_group_ref = _share_group_snapshot_get(
context, share_group_snapshot_id,
)
share_group_ref.update(values)
share_group_ref.save(session=context.session)
return share_group_ref
@require_admin_context
@context_manager.writer
def share_group_snapshot_destroy(context, share_group_snapshot_id):
share_group_snap_ref = _share_group_snapshot_get(
context,
share_group_snapshot_id,
)
share_group_snap_ref.soft_delete(context.session)
context.session.query(
models.ShareSnapshotInstance
).filter_by(
share_group_snapshot_id=share_group_snapshot_id
).soft_delete()
####################
@require_context
@context_manager.reader
def share_group_snapshot_members_get_all(context, share_group_snapshot_id):
query = model_query(
context,
models.ShareSnapshotInstance,
read_deleted='no',
).filter_by(share_group_snapshot_id=share_group_snapshot_id)
return query.all()
@require_context
@context_manager.reader
def share_group_snapshot_member_get(context, member_id):
return _share_group_snapshot_member_get(context, member_id)
def _share_group_snapshot_member_get(context, member_id):
result = model_query(
context,
models.ShareSnapshotInstance,
project_only=True,
read_deleted='no',
).filter_by(id=member_id).first()
if not result:
raise exception.ShareGroupSnapshotMemberNotFound(member_id=member_id)
return result
@require_context
@context_manager.writer
def share_group_snapshot_member_create(context, values):
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
_change_size_to_instance_size(values)
member = models.ShareSnapshotInstance()
member.update(values)
context.session.add(member)
return _share_group_snapshot_member_get(context, values['id'])
@require_context
@context_manager.writer
def share_group_snapshot_member_update(context, member_id, values):
_change_size_to_instance_size(values)
member = _share_group_snapshot_member_get(context, member_id)
member.update(values)
context.session.add(member)
return _share_group_snapshot_member_get(context, member_id)
####################
@require_admin_context
@context_manager.writer
def share_group_type_create(context, values, projects=None):
"""Create a new share group type.
In order to pass in group specs, the values dict should contain a
'group_specs' key/value pair:
{'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
values = ensure_model_dict_has_id(values)
projects = projects or []
try:
values['group_specs'] = _metadata_refs(
values.get('group_specs'), models.ShareGroupTypeSpecs)
mappings = []
for item in values.get('share_types', []):
share_type = share_type_get_by_name_or_id(context, item)
if not share_type:
raise exception.ShareTypeDoesNotExist(share_type=item)
mapping = models.ShareGroupTypeShareTypeMapping()
mapping['id'] = uuidutils.generate_uuid()
mapping['share_type_id'] = share_type['id']
mapping['share_group_type_id'] = values['id']
mappings.append(mapping)
values['share_types'] = mappings
share_group_type_ref = models.ShareGroupTypes()
share_group_type_ref.update(values)
share_group_type_ref.save(session=context.session)
except db_exception.DBDuplicateEntry:
raise exception.ShareGroupTypeExists(type_id=values['name'])
except exception.ShareTypeDoesNotExist:
raise
except Exception as e:
raise db_exception.DBError(e)
for project in set(projects):
access_ref = models.ShareGroupTypeProjects()
access_ref.update({"share_group_type_id": share_group_type_ref.id,
"project_id": project})
access_ref.save(session=context.session)
return share_group_type_ref
def _share_group_type_get_query(
context,
read_deleted=None,
expected_fields=None,
):
expected_fields = expected_fields or []
query = model_query(
context,
models.ShareGroupTypes,
read_deleted=read_deleted
).options(
joinedload('group_specs'),
joinedload('share_types'),
)
if 'projects' in expected_fields:
query = query.options(joinedload('projects'))
if not context.is_admin:
the_filter = [models.ShareGroupTypes.is_public == true()]
projects_attr = getattr(models.ShareGroupTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
@context_manager.reader
def share_group_type_get_all(context, inactive=False, filters=None):
"""Returns a dict describing all share group types with name as key."""
filters = filters or {}
read_deleted = "yes" if inactive else "no"
query = _share_group_type_get_query(context, read_deleted=read_deleted)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.ShareGroupTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models. ShareGroupTypes, 'projects')
the_filter.extend([
projects_attr.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
rows = query.order_by("name").all()
result = {}
for row in rows:
result[row['name']] = _dict_with_specs(row, 'group_specs')
return result
def _share_group_type_get_id_from_share_group_type_query(context, type_id):
return model_query(
context,
models.ShareGroupTypes,
read_deleted="no",
).filter_by(id=type_id)
def _share_group_type_get_id_from_share_group_type(context, type_id):
result = _share_group_type_get_id_from_share_group_type_query(
context,
type_id,
).first()
if not result:
raise exception.ShareGroupTypeNotFound(type_id=type_id)
return result['id']
@require_context
def _share_group_type_get(
context,
type_id,
inactive=False,
expected_fields=None,
):
expected_fields = expected_fields or []
read_deleted = "yes" if inactive else "no"
result = _share_group_type_get_query(
context,
read_deleted,
expected_fields,
).filter_by(id=type_id).first()
if not result:
raise exception.ShareGroupTypeNotFound(type_id=type_id)
share_group_type = _dict_with_specs(result, 'group_specs')
if 'projects' in expected_fields:
share_group_type['projects'] = [
p['project_id'] for p in result['projects']]
return share_group_type
@require_context
@context_manager.reader
def share_group_type_get(context, type_id, inactive=False,
expected_fields=None):
"""Return a dict describing specific share group type."""
return _share_group_type_get(
context,
type_id,
inactive=inactive,
expected_fields=expected_fields,
)
@require_context
def _share_group_type_get_by_name(context, name):
result = model_query(
context,
models.ShareGroupTypes,
).options(
joinedload('group_specs'),
joinedload('share_types'),
).filter_by(
name=name,
).first()
if not result:
raise exception.ShareGroupTypeNotFoundByName(type_name=name)
return _dict_with_specs(result, 'group_specs')
@require_context
@context_manager.reader
def share_group_type_get_by_name(context, name):
"""Return a dict describing specific share group type."""
return _share_group_type_get_by_name(context, name)
@require_admin_context
@context_manager.writer
def share_group_type_destroy(context, type_id):
_share_group_type_get(context, type_id)
results = model_query(
context,
models.ShareGroup,
read_deleted="no",
).filter_by(
share_group_type_id=type_id,
).count()
if results:
LOG.error('Share group type %s deletion failed, it in use.',
type_id)
raise exception.ShareGroupTypeInUse(type_id=type_id)
model_query(
context,
models.ShareGroupTypeSpecs,
).filter_by(
share_group_type_id=type_id,
).soft_delete()
model_query(
context,
models.ShareGroupTypeShareTypeMapping,
).filter_by(
share_group_type_id=type_id,
).soft_delete()
model_query(
context,
models.ShareGroupTypeProjects,
).filter_by(
share_group_type_id=type_id,
).soft_delete()
model_query(
context,
models.ShareGroupTypes,
).filter_by(
id=type_id,
).soft_delete()
###############################
def _share_group_type_access_query(context,):
return model_query(
context,
models.ShareGroupTypeProjects,
read_deleted="no",
)
@require_admin_context
@context_manager.reader
def share_group_type_access_get_all(context, type_id):
share_group_type_id = _share_group_type_get_id_from_share_group_type(
context, type_id)
return _share_group_type_access_query(context).filter_by(
share_group_type_id=share_group_type_id,
).all()
@require_admin_context
@context_manager.writer
def share_group_type_access_add(context, type_id, project_id):
"""Add given tenant to the share group type access list."""
share_group_type_id = _share_group_type_get_id_from_share_group_type(
context, type_id)
access_ref = models.ShareGroupTypeProjects()
access_ref.update({"share_group_type_id": share_group_type_id,
"project_id": project_id})
try:
access_ref.save(session=context.session)
except db_exception.DBDuplicateEntry:
raise exception.ShareGroupTypeAccessExists(
type_id=share_group_type_id, project_id=project_id)
return access_ref
@require_admin_context
@context_manager.writer
def share_group_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the share group type access list."""
share_group_type_id = _share_group_type_get_id_from_share_group_type(
context, type_id)
count = _share_group_type_access_query(context).filter_by(
share_group_type_id=share_group_type_id,
).filter_by(
project_id=project_id,
).soft_delete(
synchronize_session=False,
)
if count == 0:
raise exception.ShareGroupTypeAccessNotFound(
type_id=share_group_type_id, project_id=project_id)
###############################
def _share_group_type_specs_query(context, type_id):
return model_query(
context,
models.ShareGroupTypeSpecs,
read_deleted="no"
).filter_by(
share_group_type_id=type_id,
).options(
joinedload('share_group_type'),
)
@require_context
@context_manager.reader
def share_group_type_specs_get(context, type_id):
rows = _share_group_type_specs_query(context, type_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@context_manager.writer
def share_group_type_specs_delete(context, type_id, key):
_share_group_type_specs_get_item(context, type_id, key)
_share_group_type_specs_query(
context,
type_id,
).filter_by(
key=key,
).soft_delete()
@require_context
def _share_group_type_specs_get_item(context, type_id, key):
result = _share_group_type_specs_query(
context,
type_id,
).filter_by(
key=key,
).options(
joinedload('share_group_type'),
).first()
if not result:
raise exception.ShareGroupTypeSpecsNotFound(
specs_key=key, type_id=type_id)
return result
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def share_group_type_specs_update_or_create(context, type_id, specs):
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _share_group_type_specs_get_item(
context,
type_id,
key,
)
except exception.ShareGroupTypeSpecsNotFound:
spec_ref = models.ShareGroupTypeSpecs()
spec_ref.update({"key": key, "value": value,
"share_group_type_id": type_id, "deleted": 0})
spec_ref.save(session=context.session)
return specs
###############################
@require_context
def _message_get(context, message_id):
query = model_query(context,
models.Message,
read_deleted="no",
project_only="yes")
result = query.filter_by(id=message_id).first()
if not result:
raise exception.MessageNotFound(message_id=message_id)
return result
@require_context
@context_manager.reader
def message_get(context, message_id):
return _message_get(context, message_id)
@require_context
@context_manager.reader
def message_get_all(context, filters=None, limit=None, offset=None,
sort_key='created_at', sort_dir='desc'):
"""Retrieves all messages.
If no sort parameters are specified then the returned messages are
sorted by the 'created_at' key in descending order.
:param context: context to query under
:param limit: maximum number of items to return
:param offset: the number of items to skip from the marker or from the
first element.
:param sort_key: attributes by which results should be sorted.
:param sort_dir: directions in which results should be sorted.
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see exact_filter function for
more information
:returns: list of matching messages
"""
messages = models.Message
query = model_query(context,
messages,
read_deleted="no",
project_only="yes")
legal_filter_keys = ('request_id', 'resource_type', 'resource_id',
'action_id', 'detail_id', 'message_level',
'created_since', 'created_before')
if not filters:
filters = {}
query = exact_filter(query, messages, filters, legal_filter_keys)
query = utils.paginate_query(query, messages, limit,
sort_key=sort_key,
sort_dir=sort_dir,
offset=offset)
return query.all()
@require_context
@context_manager.writer
def message_create(context, message_values):
values = copy.deepcopy(message_values)
message_ref = models.Message()
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
message_ref.update(values)
context.session.add(message_ref)
return _message_get(context, message_ref['id'])
@require_context
@context_manager.writer
def message_destroy(context, message):
model_query(
context, models.Message,
).filter_by(id=message.get('id')).soft_delete()
@require_admin_context
@context_manager.writer
def cleanup_expired_messages(context):
now = timeutils.utcnow()
return context.session.query(
models.Message
).filter(
models.Message.expires_at < now
).delete()
###############################
@require_context
@context_manager.reader
def backend_info_get(context, host):
"""Get hash info for given host."""
result = _backend_info_query(context, host)
return result
@require_context
@context_manager.writer
def backend_info_create(context, host, value):
info_ref = models.BackendInfo()
info_ref.update({"host": host, "info_hash": value})
info_ref.save(context.session)
return info_ref
@require_context
@context_manager.writer
def backend_info_update(context, host, value=None, delete_existing=False):
"""Remove backend info for host name."""
info_ref = _backend_info_query(context, host)
if info_ref:
if value:
info_ref.update({"info_hash": value})
elif delete_existing and info_ref['deleted'] != 1:
info_ref.update({"deleted": 1, "deleted_at": timeutils.utcnow()})
else:
info_ref = models.BackendInfo()
info_ref.update({"host": host, "info_hash": value})
info_ref.save(context.session)
return info_ref
def _backend_info_query(context, host, read_deleted=False):
result = model_query(
context,
models.BackendInfo,
read_deleted=read_deleted,
).filter_by(
host=host,
).first()
return result
###################
def _async_operation_data_query(
context, entity_id, key=None, read_deleted=False,
):
query = model_query(
context, models.AsynchronousOperationData,
read_deleted=read_deleted,
).filter_by(
entity_uuid=entity_id,
)
if isinstance(key, list):
return query.filter(models.AsynchronousOperationData.key.in_(key))
elif key is not None:
return query.filter_by(key=key)
return query
@require_context
@context_manager.reader
def async_operation_data_get(context, entity_id, key=None, default=None):
query = _async_operation_data_query(context, entity_id, key)
if key is None or isinstance(key, list):
return {item.key: item.value for item in query.all()}
else:
result = query.first()
return result["value"] if result is not None else default
@require_context
@context_manager.writer
def async_operation_data_update(
context, entity_id, details, delete_existing=False,
):
new_details = copy.deepcopy(details)
# Process existing data
original_data = context.session.query(
models.AsynchronousOperationData).filter_by(
entity_uuid=entity_id,
).all()
for data_ref in original_data:
in_new_details = data_ref['key'] in new_details
if in_new_details:
new_value = str(new_details.pop(data_ref['key']))
data_ref.update({
"value": new_value,
"deleted": 0,
"deleted_at": None
})
data_ref.save(session=context.session)
elif delete_existing and data_ref['deleted'] != 1:
data_ref.update({
"deleted": 1, "deleted_at": timeutils.utcnow()
})
data_ref.save(session=context.session)
# Add new data
for key, value in new_details.items():
data_ref = models.AsynchronousOperationData()
data_ref.update({
"entity_uuid": entity_id,
"key": key,
"value": str(value)
})
data_ref.save(session=context.session)
return details
@require_context
@context_manager.writer
def async_operation_data_delete(context, entity_id, key=None):
query = _async_operation_data_query(context, entity_id, key)
query.update({"deleted": 1, "deleted_at": timeutils.utcnow()})
@require_context
def share_backup_create(context, share_id, values):
return _share_backup_create(context, share_id, values)
@require_context
@context_manager.writer
def _share_backup_create(context, share_id, values):
if not values.get('id'):
values['id'] = uuidutils.generate_uuid()
values.update({'share_id': share_id})
share_backup_ref = models.ShareBackup()
share_backup_ref.update(values)
share_backup_ref.save(session=context.session)
return share_backup_get(context, share_backup_ref['id'])
@require_context
@context_manager.reader
def share_backup_get(context, share_backup_id):
result = model_query(
context, models.ShareBackup, project_only=True, read_deleted="no"
).filter_by(
id=share_backup_id,
).first()
if result is None:
raise exception.ShareBackupNotFound(backup_id=share_backup_id)
return result
@require_context
@context_manager.reader
def share_backups_get_all(context, filters=None,
limit=None, offset=None,
sort_key=None, sort_dir=None):
project_id = filters.pop('project_id', None) if filters else None
query = _share_backups_get_with_filters(
context,
project_id=project_id,
filters=filters, limit=limit, offset=offset,
sort_key=sort_key, sort_dir=sort_dir)
return query
def _share_backups_get_with_filters(context, project_id=None, filters=None,
limit=None, offset=None,
sort_key=None, sort_dir=None):
"""Retrieves all backups.
If no sorting parameters are specified then returned backups are sorted
by the 'created_at' key and desc order.
:param context: context to query under
:param filters: dictionary of filters
:param limit: maximum number of items to return
:param sort_key: attribute by which results should be sorted,default is
created_at
:param sort_dir: direction in which results should be sorted
:returns: list of matching backups
"""
# Init data
sort_key = sort_key or 'created_at'
sort_dir = sort_dir or 'desc'
filters = copy.deepcopy(filters) if filters else {}
query = model_query(context, models.ShareBackup)
if project_id:
query = query.filter_by(project_id=project_id)
legal_filter_keys = ('display_name', 'display_name~',
'display_description', 'display_description~',
'id', 'share_id', 'host', 'topic', 'status')
query = exact_filter(query, models.ShareBackup,
filters, legal_filter_keys)
query = apply_sorting(models.ShareBackup, query, sort_key, sort_dir)
if limit is not None:
query = query.limit(limit)
if offset:
query = query.offset(offset)
return query.all()
@require_admin_context
@context_manager.reader
def _backup_data_get_for_project(context, project_id, user_id):
query = model_query(context, models.ShareBackup,
func.count(models.ShareBackup.id),
func.sum(models.ShareBackup.size),
read_deleted="no").\
filter_by(project_id=project_id)
if user_id:
result = query.filter_by(user_id=user_id).first()
else:
result = query.first()
return (result[0] or 0, result[1] or 0)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@context_manager.writer
def share_backup_update(context, backup_id, values):
backup_ref = share_backup_get(context, backup_id)
backup_ref.update(values)
backup_ref.save(session=context.session)
return backup_ref
@require_context
@context_manager.writer
def share_backup_delete(context, backup_id):
backup_ref = share_backup_get(context, backup_id)
backup_ref.soft_delete(session=context.session, update_status=True)
|
998,267 | c8985cfbdcb06ac2d0716ce4c5d60415173b856d | import asyncio
import base64
import websockets
import json
from sortedcontainers import SortedDict
from RestAgent import RestAgent
class Subscription:
def __init__(self, instance_name, uri, symbol, subs_type, rest_agent):
self.__topBook = {
'bid': 0.0,
'ask': 0.0,
'last': 0.0,
'bidSize': 0.0,
'askSize': 0.0,
'lastSize': 0,
'taker': '',
'timestamp': 0,
'ts': 0
}
self.__instance_name = instance_name
self.__symbol = symbol
self.__type = subs_type
self.__uri = uri
self.__bidList = SortedDict()
self.__askList = SortedDict()
self.__updateBuffer = []
self.__isStarted = False
self.__rest_agent = rest_agent
self.init_seqNum = 0
async def socket_consumer(self):
my_subscription = self.__instance_name
topic = 'orderbook_' + self.__symbol
full_topic = self.__uri + topic + "/" + my_subscription
async with websockets.connect(full_topic) as websocket:
while True:
msg = await websocket.recv()
data = json.loads(msg)
resp = base64.b64decode(data['payload'])
print(resp)
ack = json.dumps({'messageId': data['messageId']})
await websocket.send(ack)
msg = json.loads(resp)
self.update(msg)
def start(self):
# print(self.__rest_agent.send_request(request='/orderbook/btc_usdc/', param=None, method='GET'))
self.initSnapshot()
asyncio.get_event_loop().run_until_complete(self.socket_consumer())
def initSnapshot(self):
topic = '/orderbook/' + self.__symbol + '/'
message = self.__rest_agent.send_request(request=topic, param=None, method='GET')
self.init_seqNum = int(message['updated_id'])
for i in message['buy']:
price = float(i['price'])
vol = float(i['amount'])
self.__bidList[price] = vol
for i in message['sell']:
price = float(i['price'])
vol = float(i['amount'])
self.__askList[price] = vol
def update(self, message):
if self.init_seqNum == 0:
self.__updateBuffer.append(message)
elif len(self.__updateBuffer) > 0:
for i in self.__updateBuffer:
if int(i['updated_id']) > self.init_seqNum:
self.updateProceed(i)
self.__updateBuffer.clear()
self.updateProceed(message)
# just output first 5 levels for testing purposes
slice = self.__askList
n = 0
for i in slice:
n = n + 1
print('Vol:{} Px:{}'.format(slice[i], i))
if n > 5:
break
print('***********************')
def updateProceed(self, msg):
for i in msg['buy']:
price = float(i['price'])
vol = float(i['amount'])
if price in self.__bidList and vol == 0:
del self.__bidList[price]
if vol != 0:
self.__bidList[price] = vol
for i in msg['sell']:
price = float(i['price'])
vol = float(i['amount'])
if price in self.__askList and vol == 0:
del self.__askList[price]
if vol != 0:
self.__askList[price] = vol
if __name__ == "__main__":
rest = RestAgent('https://api.exchange.ripio.com/api/v1',
'a963ae2fccf59bbaae607b1a65b3ca2d3305378b2dc59a0659a02b3b675a6513')
trade_wrapper = Subscription('Ripio', 'wss://api.exchange.ripio.com/ws/v2/consumer/non-persistent/public/default/',
'btc_usdc', 'top', rest)
trade_wrapper.start()
input()
|
998,268 | 735840bc8b38aa3a43110a02357028927384627f | from typing import List
from slack_entities.client.client import get_client
from slack_entities.entities import Channel
class FileUpload:
def __init__(
self,
channels: List[Channel],
token: str,
file_path: str,
filetype: str,
filename: str,
initial_comment: str,
):
self.channels = channels
self.token = token
self.filename = filename
self.filetype = filetype
self.initial_comment = initial_comment
self.file_path = file_path
@property
def message_params(self):
params = {
'channels': ','.join(map(lambda c: c.id, self.channels)),
'filename': self.filename,
'filetype': self.filetype,
'initial_comment': self.initial_comment
}
return params
def send(self):
return get_client(token=self.token).api_call(
'files.upload',
body_encoding='data',
files={'file': self.file_path},
**self.message_params,
)
|
998,269 | e8544b95acde8e77b6155d340ea00e434b7aa9c1 | #!/usr/bin/env python
def count(num):
# if not num.isnumeric():
# return "INSOMNIA"
if num == 0:
return "INSOMNIA"
base = {0,1,2,3,4,5,6,7,8,9}
seen = set()
counter = 1
num1 = str(num)
while True:
# Add to seen:
# print seen, num
for c in num1:
seen.add(int(c))
if base == seen:
return num1
else:
# num *= counter
num1 = str(num * counter)
counter += 1
for idx in range(1, input()+1):
print "Case #%d:"%idx, count(input())
|
998,270 | b3e988276f9bec682ec2b1642e19b2feefff6cdb | import sys
def factorial(n):
res = 1
for i in range(1, n+1):
res *= i
return res
def tbs_numbers(n):
return int(factorial(2 * n) / (factorial(n + 1) * factorial(n)))
def main():
n = int(sys.stdin.readline().strip())
print(tbs_numbers(n))
if __name__ == '__main__':
main()
|
998,271 | 97124bfa298a7edf88517275706d803518b86f94 |
from __future__ import print_function
import ConfigParser
class TGICConfigDB:
def __init__(self):
core_dict = TGICConfigDB.parseTGICConfig()
self.tag_token = core_dict['tag_token']
self.comment_token = core_dict['comment_token']
def __repr__(self):
return "TGIC Config:\n\ttag_token = %s" % (self.tag_token)
@staticmethod
def getConfigDict(section):
'''
Borrowed from:
https://wiki.python.org/moin/ConfigParserExamples
'''
Config = ConfigParser.ConfigParser()
Config.read(".tgic.ini")
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
@staticmethod
def parseTGICConfig():
core_dict = TGICConfigDB.getConfigDict('Core')
return core_dict
if __name__ == '__main__':
configDB = TGICConfigDB()
print("read Token - ", configDB.tag_token)
|
998,272 | e9262c1a13a92fe410e452a968b6f2b9dfb7d5d9 | x = "C\Users\bernard.castello\Documents\classMates.txt"
y = raw_input ("Type in a name")
def open(Path, x):
with open(Path) as file:
for line in file:
if y in file:
print (line)
break:
else:
print("Searching...")
print("Name not found")
'''def splitFile(Path, x):
first = []:
last = []:
z = :
with open(Path, y):''' |
998,273 | 9886cf21aa0c5ab30d28ce8bd8526555f3797461 | #!/usr/bin/python
from colprint import colprint
def download_file(url, webuser = None, webpass = None):
request = __urllib2.Request(url)
if webuser:
base64string = __base64.encodestring('%s:%s' % (webuser, webpass))[:-1]
request.add_header("Authorization", "Basic %s" % base64string)
htmlFile = __urllib2.urlopen(request)
htmlData = htmlFile.read()
htmlFile.close()
return htmlData
def edits1(word):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
n = len(word)
return set([word[0:i]+word[i+1:] for i in range(n)] + # deletion
[word[0:i]+word[i+1]+word[i]+word[i+2:] for i in range(n-1)] + # transposition
[word[0:i]+c+word[i+1:] for i in range(n) for c in alphabet] + # alteration
[word[0:i]+c+word[i:] for i in range(n+1) for c in alphabet]) # insertion
if __name__ == "__main__":
colprint(edits1('shawncresante'),10) |
998,274 | 52b50192266887fcdff0fbb57fce84478dbfa7b6 | from __future__ import print_function
import multiprocessing
import signal
import random
import datetime as dt
import my_subprocess
import my_subprocess2
import my_subprocess3
import logging
from common import *
import process_logger as pl
def call_subprocess(params):
pl.initProcess()
print("Params are ", params)
my_subprocess.run_worker(params)
my_subprocess2.run_worker(params)
def initLogging():
loggerfilename = "log/jsa_run_" + dt.datetime.now().strftime("%y%m%d_%H%M%S")
pl.initMain(loggerfilename)
my_subprocess.initLogging(loggerfilename)
my_subprocess2.initLogging(loggerfilename)
my_subprocess3.initLogging(loggerfilename)
def main():
initLogging()
pl.log("Starting...")
pl.writeOutput("hello!")
print("Initializing 4 workers")
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = multiprocessing.Pool(4)
signal.signal(signal.SIGINT, original_sigint_handler)
try:
print("Starting 10 jobs of 0-5 seconds each")
# res = pool.map_async(call_subprocess, [random.randint(0,5) for i in range(10)])
res = [pool.apply_async(call_subprocess, (random.randint(0,5),)) for i in range(3)]
print("Waiting for results")
for r in res:
r.get(60) # Without the timeout this blocking call ignores all signals.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
else:
print("Normal termination")
pool.close()
pool.join()
pl.log("Finished!")
pl.mergeOutputs()
if __name__ == "__main__":
main() |
998,275 | 2ebe26c049e34001960246d7d06673f519ec81cf | #!/usr/bin/python3
'''
Write a function that returns an object (Python data structure)
represented by a JSON string
'''
import json
def from_json_string(my_str):
'''Function that gives the python object of
a string JSON'''
return json.loads(my_str)
|
998,276 | bf83eada4e0e09ef46e15cc7aaf2b220a9e12c46 |
if __name__ == '__main__':
from FileTracker import *
from Hub import *
hub = Hub()
appendline = ['234','567']
msg = {'action':'ADD','pre':1, 'lines':appendline}
hub.LoadFile('test.txt')
hub.ChangeFile('test.txt', msg);
hub.ChangeFile('test.txt',{'action':'REMOVE','lines':[3,4]})
hub.CloseFile('test.txt')
#testfile = FileTracker('test.txt')
#testfile.Load()
#msg = {'action':'ADD','pre':1, 'lines':appendline}
#testfile.Add(1,appendline)
#testfile.Execute(msg)
#testfile.Execute({'action':'REMOVE','lines':[3,4]})
#testfile.Write("test.log",'LOG')
|
998,277 | d7489dd6d8f5b0b7cd449329483a22aca2f11c6f | # -*- coding: utf-8 -*-
from linepy import * #
from akad.ttypes import * #
from multiprocessing import Pool, Process #
from akad.ttypes import ContentType as Type #
from thrift import transport, protocol, server
from LINEPY import *
#from akad.ttypes import *
#from multiprocessing import Pool, Process
from datetime import datetime
from bs4 import BeautifulSoup
from gtts import gTTS
from googletrans import Translator
import time,random,sys,json,codecs,threading,glob,re,os,subprocess
from datetime import datetime, timedelta
from humanfriendly import format_timespan, format_size, format_number, format_length
import requests
import datetime
import requests,urllib,json
#Khusus Login Qr
cl = LINE('EzkL5U08jR7wgDazdld0.9q2iIcfJltjghJCvoSD1Wa.sJhW3B7mv6oi0NxWAY971JrxCBjfaet+heNlSPWP3To=')
cl.log("Auth Token : " + str(cl.authToken))
cl.log("Timeline Token : " + str(cl.tl.channelAccessToken))
#Khusus Lpgin Token
#cl = LINE(
#cl.log("Auth Token : " + str(cl.authToken))
#cl.log("Timeline Token : " + str(cl.tl.channelAccessToken))
ki = LINE('EzkL5U08jR7wgDazdld0.9q2iIcfJltjghJCvoSD1Wa.sJhW3B7mv6oi0NxWAY971JrxCBjfaet+heNlSPWP3To=')
ki.log("Auth Token : " + str(cl.authToken))
ki.log("Timeline Token : " + str(cl.tl.channelAccessToken))
kk = LINE('EzkL5U08jR7wgDazdld0.9q2iIcfJltjghJCvoSD1Wa.sJhW3B7mv6oi0NxWAY971JrxCBjfaet+heNlSPWP3To=')
kk.log("Auth Token : " + str(cl.authToken))
kk.log("Timeline Token : " + str(cl.tl.channelAccessToken))
kc = LINE('EzkL5U08jR7wgDazdld0.9q2iIcfJltjghJCvoSD1Wa.sJhW3B7mv6oi0NxWAY971JrxCBjfaet+heNlSPWP3To=')
kc.log("Auth Token : " + str(cl.authToken))
kc.log("Timeline Token : " + str(cl.tl.channelAccessToken))
startBot = time.time()
elapsed_time = format_timespan(time.time()-startBot)
helpMessage ="""
► Command Public Bot ►
► Id
► Mid
► Command in the groups By Teguh ►
► Tutup qr
► Buka qr
► Invite 「Mid」
► Invite 「Mid」
► Ginfo
► Cancel
► White 「target」
► Restart
► Guest On/Off
► QrOn/Off
► closeqr on
► Lurking
► Bot Out
► Kill
► Group bc 「Kata」
► Contact bc「Kata」
► List grup
► Speed
► Runtime
◇ Command kicker only Owner ◇
◇ Salken Ya
◇ Bunuh「@」
◇ Kick「Mid」
◇ All join
◇ Bye all
◇ Kill ban
◇ Ban 「@」
◇ Unban「@」
◇ Ban 「Contact」
◇ Unban 「Contact」
◇ Banlist
◇ Clear ban
◇ Tes
◇ Clear
"""
oepoll = OEPoll(cl)
KAC=[cl,ki,kk,kc]
mid = cl.getProfile().mid
Amid = cl.getProfile().mid
Bmid = cl.getProfile().mid
Cmid = cl.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid]
admin=["uf50d888821632d32461e37153ac775c0"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add me \n Creator line.me/ti/p/~gerhanaselatan",
"lang":"JP",
"comment":"Thanks for add me \n Creator line.me/ti/p/~gerhanaselatan",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"D͆̒̄ͧ́ͨ̀͠҉͈͓̳̞̬͓͈̖̲̰͚ ̡͉̜͓͓͉̖̺̌ͤ̅̈́̉͌ͅͅͅW̵̧̺̗̝ͥͧ̐ͨ͆͆ͯ͆͂̓ͣ̃ͣ͒ͬͬ̎ͮ͢ ̴̶̶̧̳̖̤͕͖͈̞͖͎͉̔̃̔͑ͧ̌̂ͫͬ̄͒̋̈́ͭ͐̽̆͘Ĩ̶̏͑ͤͯͦ̊̓̒́ͬͥ̽̀ͮ͑̓ͬ́҉̢̖̻̮͇̭͍",
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"ProtectQR":True,
"acck":False,
"spamer":{},
"CloseQR":True,
"Protectguest":True,
"Protectcancel":True,
"protectionOn":True,
"atjointicket":True,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
cl.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#--------Open Qr Kick Start--------------#
if op.type == 11:
if wait["ProtectQR"] == True:
if op.param2 not in Bots:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.updateGroup(G)
#--------Open Qr Kick Finish--------------#
#--------Open Qr Auto Close---------------#
if op.type == 11:
if not op.param2 in Bots:
if wait["CloseQR"] == True:
try:
klist=[cl,ki,kk,kc] #Teguh tambah
puck=random.choice(kpist)
G = puck.getGroup(op.param1)
G.preventJoinByTicket = True
puck.updateGroup(G)
except Exception as e:
print(e)
#--------Open Qr Auto Close---------------#
if op.type == 13:
if mid in op.param2:
if wait["autoJoin"] == True:
if op.param2 in Bots:
G = cl.getGroup(op.param1)
cl.acceptGroupInvitation(op.param1)
#--------Invite User Kick Start-----------#
if op.type == 13:
if wait["Protectguest"] == True:
if op.param2 not in Bots:
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Invite User Kick Finish------------#
#------Join Kicked start------------------#
if op.type == 17:
if wait["acck"] == True:
if op.param2 not in admin:
try:
contact = cl.getContact(op.param2)
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except Exception as e:
print(e)
#-------Join Kicked Finish----------------#
#-------Blacklist Join Kick Start---------#
if op.type == 17:
if op.param2 in wait["blacklist"]:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Kick Auto BL------------------------#
if op.type == 19:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 not in wait["whitelist"]:
wait["blacklist"][op.param2] = True
print("kicker kicked")
else:
pass
#--------------------Kick Auto Bl-------#
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.type == 13:
print(op.param1)
print(op.param2)
print(op.param3)
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if op.param2 not in Bots:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("bot Aktif")
if op.type == 19:
if op.param3 in admin:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,admin)
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,admin)
except:
print ("bot bekerja")
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,Bots)
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
#------Cancel User Kick start------#
if op.type == 32:
if op.param2 not in Bots:
cl.kickoutFromGroup(op.param1,[op.param2])
#-------------------------------------#
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg._from
if msg._from == profile.mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
#============================================================#
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Keyword","help","Help"]:
if msg._from in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif ("Gn " in msg.text):
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Bunuh " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Semoga dapat anu Di Luar Sana")
pass
elif "Kick " in msg.text:
if msg._from in admin:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "Kick2 " in msg.text:
if msg._from in admin:
midd = msg.text.replace("Kick2 ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Kick3 " in msg.text:
if msg._from in admin:
midd = msg.text.replace("Kick3 ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Kick4 " in msg.text:
if msg._from in admin:
midd = msg.text.replace("Kick4 ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg._from in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["List grup","List group","list grup","list group"]:
if msg._from in admin:
ki.sendText(msg.to,"「Group」\n\nWaiting for : Group List")
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "║○%s\n" % (cl.getGroup(i).name+"\n║Members: "+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"╔═════¤|{ List Grup }|¤═════\n" + h + "╠═══════[ Total ]════════\n║" + str(len(gid)) + "\n╚════════════════════")
elif msg.text in ["Reset"]:
if msg._from in admin:
cl.sendText(msg.to, "Bot Have Been Restart")
restart_program()
print ("Reset")
elif msg.text in ["cancel","Cancel"]:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Ourl","ourl"]:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr"]:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
cl.sendText(msg.to,"Sudah Tertutup")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya bang teguh Yang bisa Gunain Perintah ini.")
elif msg.text == "Ginfo":
if msg._from in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif msg.text in ["Mid ku","mid ku","My mid","Mid saya"]:
cl.sendText(msg.to,msg._from)
elif msg.text in ["TL:"]:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Cn " in msg.text:
if msg._from in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Names " + string)
elif "Mybio " in msg.text:
if msg._from in admin:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 100000000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"()Update Bio→" + string + "←")
elif msg.text in ["Pc On","pc on"]:
if msg._from in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"protect cancel On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Pc Off","pc off"]:
if msg._from in admin:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"protect cancel Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Guest On","guest on"]:
if msg._from in admin:
if wait["Protectguest"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Guest Off","guest off"]:
if msg._from in admin:
if wait["Protectguest"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["CloseQR On","closeqr on"]:
if msg._from in admin:
if wait["CloseQR"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Closed QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["CloseQR"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Closed QR ON")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["CloseQR Off","closeqr off"]:
if msg._from in admin:
if wait["CloseQR"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Closed QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["CloseQR"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Closed QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Acc on","acc on","A on","a on"]:
if msg._from in admin:
if wait["acck"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Udah aktif kak")
else:
cl.sendText(msg.to,"Done")
else:
wait["acck"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Anti Join Mode on")
else:
cl.sendText(msg.to,"Done")
elif msg.text in ["Acc off","acc off","A off","a off"]:
if msg._from in admin:
if wait["acck"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off kak")
else:
cl.sendText(msg.to,"Done")
else:
wait["acck"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Anti Join Mode off")
else:
cl.sendText(msg.to,"Done")
elif msg.text in ["Qr On","qr on"]:
if msg._from in admin:
if wait["ProtectQR"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["ProtectQR"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR ON")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr Off","qr off"]:
if msg._from in admin:
if wait["ProtectQR"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["ProtectQR"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if msg._from in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]:
if msg._from in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オン","Join on","Auto join:on","自動åƒåŠ ï¼šé–‹"]:
if msg._from in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オフ","Join off","Auto join:off","自動åƒåŠ ï¼šé—œ"]:
if msg._from in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
if msg._from in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒ç»ã€‚è¦æ—¶å¼€è¯·æŒ‡å®šäººæ•°å‘é€")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°ç»„用自动邀请拒ç»")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg._from in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg._from in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg._from in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg._from in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Set"]:
if msg._from in admin:
md = ""
if wait["Protectcancel"] == True: md+=" Protect Cancel : on\n"
else: md+=" Protect Cancel : off\n"
if wait["ProtectQR"] == True: md+=" Protect QR : on\n"
else: md+=" Protect QR : off\n"
if wait["CloseQR"] == True: md+=" Closed QR : on\n"
else: md+=" CloseQR : off\n"
if wait["Protectguest"] == True: md+=" Block Invite : on\n"
else: md+=" Block Invite : off\n"
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
cl.sendText(msg.to,md)
elif msg.text in ["Group id","群組全id"]:
if msg._from in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg._from in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"Semua grup sudah dibatalkan")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg._from in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg._from in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Buka qr","Open qr"]:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Sudah Dibuka")
else:
cl.sendText(msg.to,"Sudah Terbuka")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Bang teguh Yang bisa Gunain Perintah ini.")
#-----------------------------------------------------------------#
elif msg.text in ["Lurking","lurking"]:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
chiya += rom[1] + "\n"
cl.sendText(msg.to, "========HAI DUL NYIMAK========%s\n\nKamu tercyduk muehehehe👻👻👻👻\n[%s]" %(wait2['readMember'][msg.to],setTime[msg.to]))
print("ReadPoint Set...")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
cl.sendText(msg.to, "Kami telah memperbarui poin baca secara otomatis.")
else:
# cl.sendText(msg.to, "Kami telah memperbarui poin baca secara otomatis.")
print("ReadPoint Set...")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
cl.sendText(msg.to, "Kami telah memperbarui poin baca secara otomatis.")
#-----------------------------------------------
#-----------------------------------------------
#-----------------------------------------------
elif msg.text in ["All join"]: #Panggil Semua Bot
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = True
cl.updateGroup(G)
#----------------Bot Out All Group Start------------------------------#
elif msg.text in ["Bot Out"]:
if msg._from in admin:
if msg.toType == 2:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(msg.to)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara")
else:
cl.sendText(msg.to,"He declined all invitations")
#--------------------------Bot Out All Group Start----------------------------#
elif msg.text in ["Bye all"]:
if msg._from in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Dwi Bye"]:
if msg._from in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
#-----------------------Leave Group Bot---------------------------------------#
elif msg.text in ["Kill"]:
if msg._from in admin:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc] #Teguh tambah
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#--------------------------Group Bc Start-------------------------------------#
elif "Group bc " in msg.text:
if msg._from in admin:
bctxt = msg.text.replace("Group bc ", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
#--------------------------Group Bc Finish------------------------------------#
elif "Salken Ya" in msg.text:
if msg._from in admin:
if msg.toType == 2:
print("ok")
_name = msg.text.replace("Salken Ya","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"⚠DENG DENG DENG DENG !⚠")
cl.sendText(msg.to,"JANGAN PANIK SEMUA PINTU KELUAR ADA DI POJOK KANAN🔫")
cl.sendText(msg.to,"CEPET TANGKIS GOBLOK JANGAN DILIATIN NTAR GRUP LU RATA GOBLOK")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.")
else:
for target in targets:
if not target in admin and Bots:
try:
klist=[cl,ki,kk,kc] #Teguh takbahin
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Grup Bersih")
elif "Nk " in msg.text:
if msg._from in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,kk,kc] #Teguh tambahin
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Bye")
elif "Blacklist @ " in msg.text:
if msg._from in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Cv")
except:
cl.sendText(msg.to,"error")
elif "Ban @" in msg.text:
if msg._from in admin:
if msg.toType == 2:
print("[Ban]ok")
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Target Tidak Djtemukan")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Siap")
except:
cl.sendText(msg.to,"Berhasil")
elif "Unban @" in msg.text:
if msg._from in admin:
if msg.toType == 2:
print("[Unban]ok")
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found Cv")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Cv")
except:
cl.sendText(msg.to,"Succes Cv")
#----------------------------------------------------------------------------#
elif msg.text in ["Tes"]:
if msg._from in admin:
cl.sendText(msg.to,"Teguh Here Mask")
#-----------------------------------------------------------------------------
elif msg.text in ["Sp","sp"]:
cl.sendText(msg.to, "Process Waiting...")
sp = int(round(time.time() *1000))
cl.sendText(msg.to,"my speed : %sms" % (sp - op.createdTime))
#---------------------------------------------------------------------
elif msg.text in ["Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "Proccess...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "Kecepatan mengirim pesan: %sms" % (elapsed_time))
#------------------------------------------------------------------
elif msg.text in ["Runtime"]:
if msg._from in admin:
runtime = time.time()-startBot
elapsed_time = format_timespan(time.time()-startBot)
cl.sendText(msg.to,"Running in %s" % (elapsed_time))
#------------------------------------------------------------------
elif msg.text in ["Ban"]:
if msg._from in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim Kontak")
elif msg.text in ["Unban"]:
if msg._from in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim Kontak")
elif msg.text in ["Banlist"]:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Cek ban"]:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Clear ban"]:
if msg._from in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"Done")
elif msg.text in ["Kill ban"]:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
#----------------Fungsi Cek Sider-------------------#
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n - " + Name
wait2['ROM'][op.param1][op.param2] = " - " + Name
else:
ki.sendText
except:
pass
#----------------Fungsi Cek Sider-------------------#
if op.type == 59:
print(op)
except Exception as error:
print(error)
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread = threading.Thread(target=bot, args=(op,))
thread.start()
except Exception as e:
print(e)
|
998,278 | 994ba3f9d1c9e6e597985d3ea2aff49ac1a3635d | import re
import sys
import csv
import time
def main():
res = {}
test_file = sys.argv[1] if len(sys.argv) > 1 else 'test_file.csv'
with open(test_file, 'r') as fd:
# read file line by line
for line in fd:
# pop the 'date' row from the line
strip_line = re.sub('([0-9]{1,4}\-){2}[0-9]{1,2}', '', line).strip()
# get the department name for line
dep = re.sub('[0-9]+', '', strip_line).strip(',')
# get the number of sales for line
num = re.search('[0-9]+', strip_line)
if not dep or not num:
continue
num = num.group(0)
# add the calculated sum to the key in dictionary by summarizing if it exist and by adding a key if not
res[dep] = num if not res.get(dep) else int(num) + int(res[dep])
# prepare a dictionary for dumping to the target file
res_to_csv = []
for key, value in res.items():
res_to_csv.append({'dep':key, 'num':value})
# write a data to the file
with open('target_file.csv', 'w', newline='') as target:
writer = csv.DictWriter(target, res_to_csv[0].keys())
writer.writerows(res_to_csv)
if __name__ == '__main__':
t = time.process_time()
main()
elapsed_time = time.process_time() - t
print(f"Data transforming finished. Resulting 'target_file.csv' was created in {elapsed_time}s.")
|
998,279 | b49026fe0b0e06b404391f86b5936309fcec9e2e | # Note that this relies on pytest (instead of unittest) to also redirect
# stdout to a non-tty, which prevents man from calling a pager.
import subprocess
import sys
import pytest
def speedoc(*args):
subprocess.check_call([sys.executable, "-m_speedoc", *args])
def test_main():
speedoc("sphinx")
speedoc("sphinx.__version__")
with pytest.raises(subprocess.CalledProcessError):
speedoc("sphinx", "sphinx.__version__")
|
998,280 | 580ebdb86440d7f0693ea651ed1fd191bb9e4471 | import face_recognition
import numpy as np
# import socket programming library
import socket
# import thread module
from _thread import *
import threading
import json
from dotenv import load_dotenv, find_dotenv
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os
print_lock = threading.Lock()
load_dotenv(find_dotenv())
DB_CONNECT_STRING = 'mysql+pymysql://%s:%s@%s/%s' % (os.environ.get('MYSQL_USER'),
os.environ.get('MYSQL_PASSWORD'),
os.environ.get('MYSQL_SERVER'),
os.environ.get('MYSQL_DATABASE'))
engine = create_engine(DB_CONNECT_STRING)
DB_Session = sessionmaker(bind=engine)
session = DB_Session()
# thread fuction
def threaded(c):
while True:
# data received from client
data = c.recv(1024)
if not data:
# lock released on exit
print_lock.release()
break
# reverse the given string from client
data = json.loads(data)
print(data)
image = face_recognition.load_image_file(data['path'])
face_encodings = face_recognition.face_encodings(image)
if len(face_encodings) == 0:
c.send(json.dumps({
'success': False,
'message': '未找到人脸'
}).encode('utf-8'))
elif len(face_encodings) == 2:
c.send(json.dumps({
'success': False,
'message': '找到多张人脸'
}).encode('utf-8'))
else:
print('ok')
c.send(json.dumps({
'success': True,
'data': face_encodings[0].tolist()
}).encode('utf-8'))
# connection closed
c.close()
def Main():
host = ""
# reverse a port on your computer
# in our case it is 12345 but it
# can be anything
port = 28691
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
print("socket binded to post", port)
# put the socket into listening mode
s.listen(10)
print("socket is listening")
# a forever loop until client wants to exit
while True:
# establish connection with client
c, addr = s.accept()
# lock acquired by client
print_lock.acquire()
# Start a new thread and return its identifier
start_new_thread(threaded, (c,))
s.close()
if __name__ == '__main__':
Main()
|
998,281 | 9d99880883062ae209c02a79362434728f7100b8 | from math import log
def binary_flip(n):
return (~n) & int((pow(2,log(n)/log(2))-1))
print(binary_flip(5))
print(binary_flip(4))
print(binary_flip(6))
|
998,282 | d9c1924216579a2732376d6599614e83e93970e8 | # coding: utf-8
import os
from StringIO import StringIO
from email.utils import COMMASPACE
from girlfriend.testing import GirlFriendTestCase
from girlfriend.plugin.mail import (
SMTPManager,
Attachment,
SendMailPlugin,
Mail,
)
from girlfriend.util.config import Config
class SMTPManagerTestCase(GirlFriendTestCase):
def test_validate_config(self):
smtp_manager = SMTPManager()
config = Config({
"smtp_test": {
"host": "smtp.163.com",
"account": "17600817832@163.com",
"password": "gf123456"
}
})
smtp_manager.validate_config(config)
self.assertEquals(config["smtp_test"]["port"], 25)
config["smtp_test"]["port"] = "25"
smtp_manager = SMTPManager()
smtp_manager.validate_config(config)
self.assertEquals(config["smtp_test"]["port"], 25)
class AttachmentTestCase(GirlFriendTestCase):
def test_build_mime_object(self):
with open("test_attachment.txt", "w") as f:
f.write("Hello! Hawaii!")
# test file path
attachment = Attachment("test_attachment.txt",
"text/plain", u"测试文本.txt".encode("utf-8"))
mime_object = attachment.build_mime_object()
# test file object
attachment = Attachment(
open("test_attachment.txt", "r"), "text/plain",
u"测试文本.txt".encode("utf-8"))
mime_object_2 = attachment.build_mime_object()
self.assertEquals(mime_object.as_string(), mime_object_2.as_string())
# test StringIO
attachment = Attachment(
StringIO("Hello! Hawaii!"), "text/plain",
u"测试文本.txt".encode("utf-8"))
mime_object_3 = attachment.build_mime_object()
self.assertEquals(mime_object.as_string(), mime_object_3.as_string())
os.remove("test_attachment.txt")
class SendMailPluginTestCase(GirlFriendTestCase):
def setUp(self):
self.config = Config({
"smtp_test": {
"host": "smtp.163.com",
"port": 465,
"account": "17600817832@163.com",
"password": "gf123456",
"ssl": "true"
}
})
self.send_mail_plugin = SendMailPlugin()
self.send_mail_plugin.config_validator(self.config)
def test_execute(self):
ctx = {}
self.send_mail_plugin.execute(
ctx,
"test",
receivers="17600817832@163.com",
sender="17600817832@163.com",
subject=u"新年快乐",
content=u"新年快乐!"
)
def test_multi_receiver(self):
ctx = {}
self.send_mail_plugin.execute(
ctx,
"test",
receivers=COMMASPACE.join([
"17600817832@163.com",
"chihz3800@163.com",
"hongze.chi@gmail.com"
]),
sender="17600817832@163.com",
subject=lambda ctx, receiver: u"你好, " + receiver,
content=lambda ctx, receiver: u"你好, " + receiver
)
self.send_mail_plugin.execute(
ctx,
"test",
receivers=[
"hongze.chi@gmail.com",
"chihz3800@163.com",
],
sender="17600817832@163.com",
subject=lambda ctx, receiver: u"Hahaha, " + receiver,
content=lambda ctx, receiver: u"你好, content " + receiver,
attachments=[
Attachment(StringIO("simple!"),
"text/plain", u"文本.txt".encode("gb2312")),
Attachment(StringIO("naive!"),
"text/plain", u"文本2.txt".encode("gb2312"))
]
)
def test_customize(self):
class MyMail(Mail):
def __init__(self, context, receiver):
super(MyMail, self).__init__(context, receiver)
@property
def sender(self):
return "17600817832@163.com"
@property
def receiver_email(self):
return self._receiver.email
@property
def subject(self):
return u"新年快乐"
@property
def content(self):
return u"新年快乐,发大财!{}".format(self._receiver.name)
@property
def attachments(self):
return [
Attachment(StringIO("simple!"),
"text/plain", u"文本.txt".encode("gb2312")),
Attachment(StringIO("naive!"),
"text/plain", u"文本2.txt".encode("gb2312"))
]
class User(object):
def __init__(self, name, email):
self.name = name
self.email = email
ctx = {}
self.send_mail_plugin.execute(
ctx,
"test",
receivers=[
User(u"迟宏泽", "h.ongzechi@gmail.com"),
User(u"小白", "hongze.chi@gmail.com")
],
mail=MyMail
)
|
998,283 | 0784a7a8f97b6c3ee213ee2bb864e07f485d7159 | #
#
#
import os
import shutil
#
# domain
#
from _domain.utils import FileWriter, JsonDumper
#
#
#
from utility.generator import BaseGenerator
from antivirus.models import SafeBrowsing
#
#
#
class Generator(BaseGenerator):
#
#
#
def __init__(self, root_dir):
# call the base class
super(Generator, self).__init__(root_dir)
#
#
#
def generate_config(self, target_dir):
# create writer and dumper
w = FileWriter(target_dir)
d = JsonDumper()
self.generate_safebrowsing(w, d)
#
#
#
def generate_safebrowsing(self, writer, dumper):
o = SafeBrowsing.objects.first()
d = {
"enable" : o.enable,
"bypass_to_localnet" : o.bypass_to_localnet,
"api_key" : o.api_key,
"deny_url" : o.deny_url,
"check_malware" : o.check_malware,
"check_social" : o.check_social,
"check_unwanted_soft" : o.check_unwanted_soft,
"cache_clean" : o.cache_clean,
"daemon_port" : o.daemon_port,
"helper_verbose" : o.helper_verbose,
"helper_total" : o.helper_total,
"helper_idle" : o.helper_idle,
"helper_startup" : o.helper_startup
}
writer.write('safe_browsing.json', dumper.dumps(d))
|
998,284 | a45d9c732f97b35a2ac827ff90ab786096ae9a6a | from . import ImageObject, BlockGroup, Mothership
def hp_up(screen, player):
"""Increase the hp of the player"""
player.add_lifes(1)
def bullet_up(screen, player):
"""Increase player bullet reload speed"""
#If the player is not firing at a higher speed
if player.maxcooldown > 6:
player.maxcooldown -= 2
def bullet_down(screen, player):
"""Decrease the bullet reload speed"""
#If the player is not firing at a really slow speed
if player.maxcooldown < screen.fps:
player.maxcooldown += 2
def shield_up(screen, player):
"""Creates a shield for the player"""
#Spawn the blocks
screen.blocks = BlockGroup(screen.screen_width, screen.screen_height//1.2, screen.screen, 3, screen.player1.get_height() + 10)
def emp_bomb(screen, player):
"""Destroy lives of all normal enemies by 1"""
#Var to decrease emp bomb dmg
if screen.wave // 4 > 1:
health_dec = screen.wave // 4
else:
health_dec = 1
#Iterate through all the sprites
for sprite in screen.enemies:
#If the enemy only has 1 life
if sprite.get_lives() <= health_dec:
#Kill the sprite
sprite.kill()
#Otherwise
else:
#Destroy it x time
sprite.destroy(health_dec)
def deflector(screen, player):
"""Move all the enemies on screen back"""
#Iterate through the enemies
for sprite in screen.enemies:
#Move the enemy back
sprite.move(0, -10)
#Iterate through the other sprites
for sprite in screen.other_enemies:
#If it is the mothership ignore it
if type(sprite) == Mothership:
continue
#Otherwise move the sprite back
sprite.move(0, -10)
def extra_bullet_power(screen, player):
"""Increase the bullet power of the player"""
#Increase the bullet power of the player
if player.get_bullet_power() < screen.wave + 2:
#Increase the damage of the player bullet
player.increase_bullet_power(1)
def decrease_bullet_power(screen, player):
"""Decrease the bullet power of the player"""
#If the player bullet power is greater than 1
if player.get_bullet_power() > 1:
#Decrease the player bullet power
player.increase_bullet_power(-1)
class PowerUp(ImageObject):
#To store the images of the sprites
sprites = {}
#To store the powerup functions
powers = {'bullet_up' : bullet_up,
'bullet_attack_up' : extra_bullet_power,
"debuff_bullet" : decrease_bullet_power,
"deflector" : deflector,
"emp" : emp_bomb,
"hp_up" : hp_up,
"shield_up" : shield_up}
def __init__(self, initial_x:int, initial_y:int, width:int, height:int, power_type:str, time_to_live:int, debug:bool = False):
"""Constructor for the powerup class"""
#Call the superclass contructor
super().__init__(initial_x, initial_y, width, height, PowerUp.sprites[power_type], debug)
#Store variables
self.power_type = power_type
self.ttl = time_to_live
#Scale the image
self.scale(30,30)
@staticmethod
def get_powerups() -> tuple:
"""Return the total number of powerups"""
return tuple(PowerUp.powers.keys())
def get_ability(self):
"""Ability of the power up"""
#Play the powerup sound
# self.sound.play('powerup')
#Return the powerup function
return PowerUp.powers[self.power_type]
def get_power_type(self) -> str:
"""Get the power type of the power up"""
return self.power_type
def update(self) -> None:
"""Update the powerup sprite"""
#If time to live is 0
if self.ttl == 0:
#Kill itself
self.kill()
return
#Otherwise
else:
#Reduce time to live
self.ttl -= 1
#Call superclass update
return super().update()
|
998,285 | 6745e5eb295f1a1064d22096f910df3089c5fa24 | ximport sys
from collections import defaultdict
import numpy as np
infile = sys.argv[1]
input = open(infile, "r")
data = input.read().split("\n")[1:-1]
input.close()
SYS_MOS = defaultdict(list)
SYS_SPK = defaultdict(list)
SPK_MOS = defaultdict(list)
SPK_SYS = defaultdict(list)
UTT_MOS = defaultdict(list)
#spoofed,A07,LA_0028,LA_E_7151962.wav,5
for line in data:
la_system = line.split(",")[1]
speaker = line.split(",")[2]
mos = int(line.split(",")[-1])
utt = line.split(",")[3]
SYS_MOS[la_system].append(mos)
SYS_SPK[la_system].append(speaker)
SPK_MOS[speaker].append(mos)
SPK_SYS[speaker].append(la_system)
UTT_MOS[utt].append(mos)
for k,v in SYS_MOS.items():
la_system = k
avg_mos = np.average(np.array(v))
std_mos = np.std(np.array(v))
print(la_system, avg_mos, std_mos, len(v))
for k,v in SYS_SPK.items():
la_system = k
unique_speakers = len(set(v))
print(la_system, unique_speakers)
for k,v in SPK_MOS.items():
speaker = k
avg_mos = sum(v) / float(len(v))
std_mos = np.std(np.array(v))
print(speaker, avg_mos, std_mos, len(v))
for k,v in SPK_SYS.items():
speaker = k
systems = sorted(set(v))
print(speaker, systems)
|
998,286 | f7cce749cbd77b5b3f1350eb5762e1b7966c2324 | from cards.exceptions import ErrorCreatureOwner
from cards.exceptions import InvalidParameter
from cards.exceptions import CreatureNotFound
from mapchart.models import HexMap as Map
def play_card(hexmap, player, card, targets):
result = {'card': card}
piece = hexmap.get_piece(*targets[0])
result.update(movement(hexmap, player, piece, targets[0] + targets[1]))
return result
def movement(hexmap, player, target_creature, moves):
if player.side != target_creature.side:
error = 'at movement player {} != {}'
raise ErrorCreatureOwner(
error.format(player.side, target_creature.side)
)
creature_location = moves[0]
if hexmap.get_piece(*creature_location) is not target_creature:
piece = hexmap.get_piece(*creature_location)
error = 'at movement {}[{}] != {}'
raise CreatureNotFound(
error.format(piece, creature_location, target_creature)
)
for previous, current in zip(moves[:-1], moves[1:]):
if not Map.are_close(previous, current):
raise InvalidParameter('moves {}'.format(moves))
result = {
'map': moves[1:],
'creature': moves[0]
}
return result
|
998,287 | 7378adf176b14b2586cafdd0761655cf3bd3087b | from __future__ import print_function
import io
from setuptools import setup
import pylibrespot_java
def read(*filenames, **kwargs):
encoding = kwargs.get("encoding", "utf-8")
sep = kwargs.get("sep", "\n")
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as file:
buf.append(file.read())
return sep.join(buf)
LONG_DESCRIPTION = read("README.md")
setup(
name="pylibrespot-java",
version=pylibrespot_java.__version__,
url="http://github.com/uvjustin/pylibrespot-java/",
author="Justin Wong",
install_requires=['aiohttp'],
author_email="46082645+uvjustin@users.noreply.github.com",
description="Python Interface for librespot-java",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
package_data={"pyforked_daapd": ["py.typed"]},
zip_safe=False,
packages=["pylibrespot_java"],
include_package_data=True,
platforms="any",
classifiers=[
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Natural Language :: English",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
998,288 | 50826de6667c5b96f1bc10d9578083097a233e02 | # coding:utf-8
import time
import numpy as np
import model.cluster as cluster
import os
import collections
import time
c = collections.Counter()
_cur_dir = os.path.dirname(os.path.realpath(__file__))
class config():
def __init__(self):
self.result_dir = os.path.join(_cur_dir)
self.input_file = os.path.join(_cur_dir, 'cluster_test_data30.txt')
self.vector_len = 32
def train(config, X):
print(X.shape)
clst = cluster.AdaptiveAgglomerativeClustering(linkage='complete', affinity='cosine', threhold=0.8)
print("Training...")
labels = clst.fit_predict(X)
n_clusters = clst.n_clusters
return labels, n_clusters
if __name__ == '__main__':
#load data
config = config()
with open(config.input_file, 'r') as fr:
train_data = [r.strip().split('\t') for r in fr]
train_data = [r[:-1]+[[float(num) for num in r[-1][1:-1].replace(' ','').split(',')]] for r in train_data if len(r) == 20]
print("Done with data length:", len(train_data))
# train
feature = [r[-1] for r in train_data]
feature = np.array(feature)
begin = time.time()
if len(feature) > 1:
labels, n_clusters = train(config, feature)
else:
labels, n_clusters = [0], 1
print("Time cost:", time.time() - begin)
# write cluster result
write_data = []
for i, item in enumerate(train_data):
item.append(labels[i])
write_data.append(item)
write_data.sort(key=lambda a: a[-1])
write_data = [[str(r) for r in l] for l in write_data]
write_data = ['\t'.join(l[-1:] + l[13:14] + l[:-1]) for l in write_data]
fw = open(os.path.join(config.result_dir, 'cluster_test_result.txt'), 'w')
fw.write('\n'.join(write_data))
fw.close()
|
998,289 | 7794e69611a769fe2c4e15ad92a7621f3a35b327 | import math
class AllPrimeNumbers:
def countPrimes(self, n):
if n < 3:
return 0
arr = [0] * n
arr[0] = 1
arr[1] = 1
sqrt = int(math.sqrt(n))
for i in range(2, sqrt + 1):
j = 2
while (i * j) < n:
arr[i * j] = 1
j += 1
return len([num for num in arr if num == 0])
def mySqrt(self, x: int) -> int:
x = x ** (1 / 2)
z = math.floor(x)
return z |
998,290 | d31f85476331bf8ce2ebafbb1f8cb066d4640cae | # This file is part of scorevideo_lib: A library for working with scorevideo
# Use of this file is governed by the license in LICENSE.txt.
"""Test operations needed to run other tests.
"""
from tests.src.test_rawlog import get_actual_expected
from tests.src import TEST_RES
def test_file_read():
"""Test that file reading works.
Tests that lines with and without trailing newlines are extracted verbatim
from files as performed by other tests.
Returns: None
"""
expected = ["scorevideo LOG\n", "File: log.mat"]
with open(TEST_RES + "/file_read.txt", 'r') as file:
actual = file.readlines()
assert expected == actual
def return_file_read(_):
"""Return the lines expected to be found in the file_read test file.
Args:
_: Present to mimic the behavior of RawLog.get_section_* functions, but not
used by the function
Returns: A list of the lines in file_read.txt with trailing whitespace
removed
"""
return ["scorevideo LOG", "File: log.mat"]
def test_get_actual_expected():
"""Test that the get_actual_expected function works.
This is important because other functions use get_actual_expected
Returns: None
"""
exp, act = get_actual_expected(TEST_RES + "/file_read.txt",
return_file_read,
TEST_RES + "/file_read.txt")
assert exp == act
|
998,291 | 6eaa96cb8f3a0528d0510af4315c539b6c024c87 | import os
import math
import json
out = {"Main Power Up":{}}
for file in os.listdir("."):
if ".txt" in file:
with open(file, 'r') as f:
d = f.readlines()
name = d[0].strip()
desc = d[1].strip()
d = d[2:]
if "Damage Up" in desc:
_min_min = float(d[0].split(' ')[3].split('-')[0].strip())
_min_max = float(d[-1].split(' ')[3].split('-')[0].strip())
_min_mid = float((_min_min + _min_max) / 2)
min_params = [_min_max, _min_mid, _min_min]
_max_min = float(d[0].split(' ')[3].split('-')[1].strip())
_max_max = float(d[-1].split(' ')[3].split('-')[1].strip())
_max_mid = float((_max_min + _max_max) / 2)
max_params = [_max_max, _max_mid, _max_min]
out["Main Power Up"][name] = {"desc": desc, "min_params":min_params, "max_params":max_params}
else:
_max = float(d[-1].split(' ')[3].strip())
_min = float(d[0].split(' ')[3].strip())
_mid = float((_max + _min) / 2)
params = [_max,_mid,_min]
out["Main Power Up"][name] = {"desc": desc, "params":params}
with open("mpu_params.json", 'w') as f:
f.write(json.dumps(out, indent=2))
|
998,292 | 74f766c126907cb61ccd06c83c3966bc075c5af9 | """ Compute statistical significance with spatial autoregressive modeling. """
from scipy.optimize import curve_fit
import numpy as np
import pysal as ps
def fit_exp_decay(x, y):
""" Fit an exponentially decaying function of the form y = exp(-x/x0) for
dependent variable `y` evaluated at positions `x`.
Parameters
----------
x : np.ndarray
independent variable
y : np.ndarray
dependent variable
Returns
-------
x0 : float
OLS estimate of parameter x0
"""
def _func(z, z0):
return np.exp(-z/z0)
popt, pcov = curve_fit(_func, x, y)
return popt[0]
def fit_empirical_autocorr_scale(corrmat, distmat, dmax=None):
""" Fit spatial autocorrelation length scale to empirical data.
Parameters
----------
corrmat : np.ndarray
correlation matrix whose element [i, j] is the correlation (of a measure
such as gene expression profiles) between parcels i and j
distmat : np.ndarray
matrix whose i,j-th element is the geodesic distance btwn parcels i & j
dmax : float, optional
cutoff distance; if not None, only elements of `corrmat` and `distmat`
for which the element in `distmat` is less than `dmax` are used when
estimating the empirical spatial autocorrelation scale, d0
Returns
-------
d0 : float
characteristic scale of spatial autocorrelation
Notes
-----
The characteristic scale of spatial autocorrelation is estimated by fitting
the parameter d0 in the equation ``r = exp(-d/d0)`` using OLS, where r is
an element of the correlation matrix `corr`, and d is the corresponding
element of the geodesic distance matrix. The upper triangular parts of the
distance and correlation matrices are flattened before estimating d0.
"""
nr, nc = corrmat.shape
assert nr == nc and (nr, nc) == distmat.shape
triu_inds = np.triu_indices(nr, k=1)
x = distmat[triu_inds]
y = corrmat[triu_inds]
if dmax is not None:
n = x.size
mask = np.less(x, dmax)
x = x[mask]
y = y[mask]
print("# Fitting d0 on %i of %i elements for which d < (dmax = %f)" % (
sum(mask), n, dmax))
d0 = fit_exp_decay(x, y)
return d0
def pysal_weight_matrix(d, d0, normalize=True):
""" Construct a weight matrix as a PySAL W weights object.
Parameters
----------
d : np.ndarray
matrix whose i,j-th element is the geodesic distance btwn parcels i & j
d0 : float
characteristic scale of spatial autocorrelation (e.g., returned from
fit_empirical_autocorr_scale)
normalize : bool, optional
if True, normalize each row of the weight matrix
Returns
-------
ps.W
PySAL W weights object
Notes
-----
This function returns a PySAL W weights object, for use with the spatial
autoregressive modeling routine pysal.spreg.ml_lag.ML_Lag().
"""
nr, nc = d.shape
assert nr == nc
# Create a weight matrix using the input spatial autocorrelation scale
weights = np.exp(-d / d0)
# Mask diagonal elements to remove self-coupling; optionally normalize
diagmask = np.eye(nr, dtype=bool)
weights[diagmask] = 0
if normalize:
weights /= weights.sum(axis=1)
# Construct pysal weight object directly from the np.ndarray
return ps.weights.full2W(weights)
def slm_ml_beta_pval(x, y, w):
""" Compute p-value associated with the maximum likelihood (ML) estimate of
spatial lag model (SLM) parameter \beta.
Parameters
----------
x : np.ndarray
independent variable
y : np.ndarray
dependent variable
w : ps.W
PySAL W weights object
Returns
-------
beta : float
direct (i.e. local) impact on the dependent variable `y` due to a unit
change in independent variable `x` assuming spatial structure defined
by the PySAL weights object `w`
Notes
-----
The SLM has the functional form ``y = \rho W y + x \beta + \nu``, where \rho
scales the strength of spatial autocorrelation; W is a user-defined weight
matrix that implicitly specifies the form of spatial structure in the data;
and \nu is normally distributed. This function fits an SLM using the input
x, y and w, and returns the statistical significance for parameter \beta.
"""
assert x.size == y.size
assert x.ndim == y.ndim == 1
# Transform 1d numpy arrays to column vectors
x = np.expand_dims(x, 1)
y = np.expand_dims(y, 1)
# Compute maximum likelihood estimation of spatial lag model
res = ps.spreg.ml_lag.ML_Lag(y, x, w)
# Return parameter beta, which reflects the direct (i.e. local) impact on
# dependent variable y due to a unit change in independent variable x
beta = res.z_stat[1][1]
return beta
|
998,293 | 707da3ccddd492f20b490a0e7b2798638b7f11ab | from clove.network.bitcoin import Bitcoin
class Chaincoin(Bitcoin):
"""
Class with all the necessary CHC network information based on
https://github.com/chaincoin/chaincoin/blob/master/src/chainparams.cpp
(date of access: 01/18/2018)
"""
name = 'chaincoin'
symbols = ('CHC', )
seeds = (
'seed1.chaincoin.org',
'seed2.chaincoin.org',
'seed3.chaincoin.org',
'seed4.chaincoin.org',
'seed5.chaincoin.org',
'seed6.chaincoin.org',
'seed7.chaincoin.org',
'seed8.chaincoin.org',
'chc1.ignorelist.com',
'chc2.ignorelist.com',
'chc3.ignorelist.com',
'chc4.ignorelist.com',
)
port = 11994
message_start = b'\xf9\xbe\xb4\xd9'
base58_prefixes = {
'PUBKEY_ADDR': 0,
'SCRIPT_ADDR': 5,
'SECRET_KEY': 128
}
# Chaincoin does not have a TESTNET
# https://github.com/chaincoin/chaincoin/blob/master/src/chainparams.cpp#L147
|
998,294 | 965e3fba2c13451f4b6f434cf8f0260eb704e1f6 | import multiprocessing
from multiprocessing import Process
from capture import main as capture
from owl import main as logger
import tkinter
WIDTH = 200
HEIGHT = 100
if __name__ == '__main__':
multiprocessing.freeze_support()
logger_process = Process(target=logger)
logger_process.daemon = True
logger_process.start()
capture_process = Process(target=capture, args=(2,))
capture_process.daemon = True
capture_process.start()
window = tkinter.Tk()
window.title("Logger")
window.config(height=HEIGHT, width=WIDTH)
button_widget = tkinter.Button(window, text="Stop logging", command=window.destroy)\
.place(relx=0.5, rely=0.5, anchor=tkinter.CENTER)
tkinter.mainloop()
|
998,295 | 9c576683f641adaed037ad92aad5b078f723400e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import subprocess
from scipy.stats import chi2
TESTFILE_TEMPLATE = """#include <iostream>
#include "Chi2PLookup.h"
int main() {{
Chi2PLookup Chi2PLookupTable;
double x = {0};
int df = {1};
double outvalue;
outvalue = Chi2PLookupTable.getPValue(x, df);
std::cout << outvalue << "\\n";
return 0;
}}
"""
def test_headerfile(template=TESTFILE_TEMPLATE, testvalue=1.1,
df=1, precision=10000, start_chi=25, headerfile="tests/Chi2PLookup.h",
srcfpath="tests/test.cpp", binfpath="tests/test.out"):
"""Test generated header file within cpp source file.
:param str template: Template file that contains main() function and imports header file.
:param testvalue: Chi value.
:param int df: Degree of freedom.
:param str srcfpath: Path where source file will be saved.
:param str binfpath: Path where binary file will be saved.
:return: None
:rtype: None
"""
command = "python -m chi2plookup generate --headerfile={} --df={} --precision={} --start_chi={}".format(headerfile, df, precision, start_chi)
subprocess.call(command, shell=True)
p_value = 1 - chi2.cdf(testvalue, df)
template = template.format(testvalue, df)
with open(srcfpath, "w") as outfile:
outfile.write(template)
subprocess.call("g++ -std=c++11 {} -o {}".format(srcfpath, binfpath), shell=True)
generated_p_value = subprocess.check_output("./{}".format(binfpath))
assert round(float(p_value), 6) == round(float(generated_p_value.strip()), 6)
|
998,296 | fcbeb31ae46b3320ce6c870ba769a524c55a98f0 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 14 14:49:22 2015
@author: hoseung
PURPOSE:
halo mass evolution catalog for Jinsu's phase space diagram study.
OUTPUT:
Consists of 10 columns, for each galaxy in each snapshot.
# ID x y z vx vy vz Rvir Mvir Mass
((nnouts*ngal) x ncolumns) number of values.
+
array of snapshot redshifts at the end of file.
SAMPLING:
First, halos that end up within 'rvir' * the cluster virial radius are chosen.
Among them, only those with complete main prg tree are finally selected.
Additionally, halo mass cut (Mcut) is available.
"""
import numpy as np
def filter_halo_mass(data, Mcut=None):
""" Returns indices of halos more massive tha Mcut"""
m = np.array(data['m'][0])
ind =np.where(m > Mcut)[0]
print("# of halos:",len(ind))
return ind
def n_most_massive(data, massive_count=1000):
""" Returns indicies of top N most massive halos,
massive_count = 1000 by default """
m = np.array(data['m'][0])
i = np.argsort(m)
ind = i[:-1 - massive_count:-1]
return ind
def filter_halo_pnum(data, Ncut=1000):
""" Returns indicies of halos with more than Ncut particles"""
npart = np.array(data['np'][0])
ind =np.where(npart > Ncut)[0]
print("# of halos:",len(ind))
return ind
def extract_halos_within(halos, ind_center, scale=1.0, Mcut=1e4):
'''
Returns halos within SCALE * Rvir of the central halo.
def extract_halos_within(halos, ind_center, scale=1.0)
halos : halo finder output (single snapshot)
ind_center : index of central halo
scale : multiplying factor to the Rvir of the central halo
'''
import numpy as np
import utils.sampling as smp
xc = halos['p'][0][0][i_center]
yc = halos['p'][0][1][i_center]
zc = halos['p'][0][2][i_center]
rvir= halos['rvir'][0][i_center]
xx = halos['p'][0][0]
yy = halos['p'][0][1]
zz = halos['p'][0][2]
m = np.array(halos['m'][0])
dd = smp.distance_to([xc,yc,zc],[xx,yy,zz])
i_m = m > Mcut
i_ok = np.logical_and(dd < (rvir * scale), i_m)
return i_ok
#%%
''' Cluster 05101, cluster subhaloes (at the final snapshot)
'''
import tree.hmutil as hmu
options = ['N most massive', '>1e13', 'nDM']
option = options[1]
n_massive = 500
fixed_position = True
Ncut = 120
Mcut = 1e7
#work_dir = '/home/hoseung/Work/data/AGN2/'
work_dir = './'
nout_ini = 30
nout_fi = 132
nout_ini_hal = 10
rvir=3.0
# nout_halo = 122 == nout 10
# nout_halo = 0 == nout 132
nouts = range(nout_fi, nout_ini -1, -1)
Nnouts = len(nouts)
try:
f = open(work_dir + 'satellite_halos.txt', 'w')
# f_properties = open(work_dir + 'satellite_halos_prop.txt', 'w')
except:
print("No filename is given.\n Try write_halo_xyz(x,y,z,r,filename = fn)")
from tree import tmtree
tree = tmtree.load(work_dir=work_dir, filename="halo/TMtree.fits")
tfin = tree[np.where(tree['NOUT'] == 0)]
tini = tree[np.where(tree['NOUT'] == nout_fi - nout_ini)]
halo = hmo.Halo(nout=nout_fi, base=work_dir, halofinder="HM", load=True)
#%%
i_center = np.argmax(halo.data['np'])
import utils.sampling as smp
import tree.halomodule as hmo
i_satellites = smp.extract_halos_within(halo.data, i_center, scale=rvir, Mcut=Mcut)
print("Total {0} halos \n{1} halos are selected".format(
len(i_satellites),sum(i_satellites)))
# halos found inside the cluster and has tree back to nout_ini
halo_list = halo.data['id'][i_satellites]
#print(halo_list)
h_ind_ok, halo_ok = tmtree.check_tree_complete(tree, 0, nout_fi - nout_ini, halo_list)
print(len(halo_ok))
from utils import match
import load
f.write(" # ID x y z[Mpc] vx vy vz[km/s]")
f.write(" Rvir(Mpc) Mvir Mass[Msun]\n")
zred=[]
for inout, nout in enumerate(nouts):
info = load.info.Info()
info.setup(nout=nout, base=work_dir)
info.read_info()
# data = hmu.load_data(nout, work_dir=work_dir, normalize=True) # load .sav halo file and normalize it to code unit.
halo = hmo.Halo(nout=nout_fi, base=work_dir, halofinder="HM", load=True, info=info)
# fname = work_dir + 'halos_py/halos_' + '031' + '.pickle'
# data = load_halo_py(fname)
ind = match.match_list_ind(data['id'], halo_ok[:,inout])
x = halo.data['x'][ind] * info.pboxsize
y = halo.data['y'][ind] * info.pboxsize
z = halo.data['z'][ind] * info.pboxsize
vx = halo.data['vx'][ind]# * info.kms
vy = halo.data['vy'][ind]# * info.kms
vz = halo.data['vz'][ind]# * info.kms
r = halo.data['rvir'][ind] * info.pboxsize
m = halo.data['mvir'][ind] * 1e11
m2 = halo.data['m'][ind]
ids = [int(i) for i in halo.data['id'][ind]]
for i in range(len(ids)):
f.write("{:<4} {:<4} {:.5f} {:.5f} {:.5f}".format(i,ids[i],x[i],y[i],z[i]))
f.write(" {:.3f} {:.3f} {:.3f}".format(vx[i],vy[i],vz[i]))
f.write(" {:.6f} {:.0f} {:.0f} \n".format(r[i],m[i],m2[i]))
zred.append(str(info.zred))
f.write(" Redshifts \n")
for i, nout in enumerate(nouts):
f.write("{0} ".format(zred[i]))
f.close() |
998,297 | 1d5dd1940a53bc8e0ae64ab76aa68f9cecc89598 |
__author__ = "TROY Development Team"
__copyright__ = "Copyright 2013, RADICAL"
__license__ = "MIT"
from compute_unit_description import ComputeUnitDescription
from compute_unit import ComputeUnit
from relation_description import RelationDescription
from relation import Relation
from task_description import TaskDescription
from task import Task
from workload import Workload
from workload_manager import WorkloadManager
from data_stager import DataStager
|
998,298 | 883d233124d9b9e4557201d1146eee45e63e5d4d | #!/usr/bin/env python
from random import randint, choice
from math import floor
def public_good():
players = [5,5,5,5]
pool = 0
multiplier = 3
for _ in range(10):
for n, p in enumerate(players):
val = randint(0,floor(p))
players[n] -= val
pool += val
print("players", players, "pool", pool)
pool *= multiplier
share = floor(pool / len(players))
pool -= share * len(players)
players = [p+share for p in players]
print("players", players, "pool", pool)
print()
print("Public Good")
public_good()
print()
|
998,299 | 8a4b5e8701edc030ad278f015e6790346a2294b2 | '''
czytanie znakow
'''
imie = raw_input("jak masz na imie?\n")
print imie |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.